commit bad838d8b447d7917ce809f55470346cf84b0bc3 Author: AuxXxilium Date: Fri Jul 1 13:57:55 2022 -0300 first commit diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..df37aa23 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,22 @@ +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_size = 2 + +# Tab indentation (no size specified) +[Makefile] +indent_style = tab + +# YAML +[*.yml] +indent_style = space +indent_size = 2 + +[*.{c,h}] +indent_style = space +indent_size = 4 diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..4d2d0c67 --- /dev/null +++ b/.gitignore @@ -0,0 +1,7 @@ +!.gitkeep +.vscode +arpl.img* +/buildroot-2022.02.2 +test.sh +docker/Dockerfile +docker/cache \ No newline at end of file diff --git a/PLATFORMS b/PLATFORMS new file mode 100644 index 00000000..73fe4eec --- /dev/null +++ b/PLATFORMS @@ -0,0 +1,8 @@ +bromolow 3.10.108 +apollolake 4.4.180 +broadwell 4.4.180 +broadwellnk 4.4.180 +denverton 4.4.180 +geminilake 4.4.180 +v1000 4.4.180 +purley 4.4.180 diff --git a/README.md b/README.md new file mode 100644 index 00000000..4100fc6f --- /dev/null +++ b/README.md @@ -0,0 +1 @@ +# arpl diff --git a/TODO b/TODO new file mode 100644 index 00000000..db7b960d --- /dev/null +++ b/TODO @@ -0,0 +1,23 @@ +A fazer + - Implementar update do bzimage e ramdisk online + +Concluidos: + - Generalizar código dos addons + - Implementar checagem de conflito entre addons *** Usado alternativa de ter listagem de módulos não necessários *** + - Tirar MAXDISKS dos arquivos dos modelos e adicionar menu no synoinfo para configurar máximo de HDs + - mudar na configs dos modelos os módulos builtin para módulos não necessários + - Mudar palavra "extra" para "cmdline" + - Implementar exibição de cmdline e synoinfo dos modelos + - Adicionar checagem no grub para exibir ou não menu de boot + - Bug no boot.sh, se usuário mudar a variável netif_num o script repassa os macX sem considerar o novo número de interfaces + *** TIRADO obrigatoriedade no LKM de passar esses parâmetros *** + - bug com macs, com 2 placas os MACs podem se inverter, checar isso + *** Resolvido com solução anterior *** + - Verifica se plataforma vai rodar na máquina checando as flags da CPU + - Implementar seleção da versão do LKM (dev ou prod) + - Usando TTYD para acesso via web + - Verificar se fica legal colocar na config dos modelos os addons obrigatórios como o qjs-dtb *** Usado outra maneira *** + - Implementar escolha de maxdisks + + +https://kb.synology.com/en-me/DSM/tutorial/What_kind_of_CPU_does_my_NAS_have diff --git a/Taskfile.yaml b/Taskfile.yaml new file mode 100644 index 00000000..bcb6559c --- /dev/null +++ b/Taskfile.yaml @@ -0,0 +1,22 @@ +# https://taskfile.dev + +version: '3' + +tasks: + build-img: + cmds: + - ./img-gen.sh + + build-docker-img: + dir: docker + cmds: + - ./build.sh + + compile-lkms: + cmds: + - ./compile-lkm.sh + + compile-addons: + dir: addons + cmds: + - ./compile-addons.sh {{.CLI_ARGS}} diff --git a/addons/9p/install.sh b/addons/9p/install.sh new file mode 100644 index 00000000..a8d42d95 --- /dev/null +++ b/addons/9p/install.sh @@ -0,0 +1,6 @@ +if [ "${1}" = "rd" ]; then + echo "Installing module for Plan 9 Resource Sharing Support (9P2000)" + ${INSMOD} "/modules/9pnet.ko" + ${INSMOD} "/modules/9pnet_virtio.ko" + ${INSMOD} "/modules/9p.ko" ${PARAMS} +fi diff --git a/addons/9p/manifest.yml b/addons/9p/manifest.yml new file mode 100644 index 00000000..cd4c5807 --- /dev/null +++ b/addons/9p/manifest.yml @@ -0,0 +1,28 @@ +version: 1 +name: 9p +description: "Driver for Plan 9 Resource Sharing Support (9P2000)" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/addons/9p/src/3.10.108/Makefile b/addons/9p/src/3.10.108/Makefile new file mode 100644 index 00000000..0d269d65 --- /dev/null +++ b/addons/9p/src/3.10.108/Makefile @@ -0,0 +1,36 @@ +obj-m := 9p.o + +9p-objs := \ + vfs_super.o \ + vfs_inode.o \ + vfs_inode_dotl.o \ + vfs_addr.o \ + vfs_file.o \ + vfs_dir.o \ + vfs_dentry.o \ + v9fs.o \ + fid.o \ + xattr.o \ + xattr_user.o + +9p-y += cache.o +9p-n += acl.o + +obj-m := 9pnet.o +obj-m += 9pnet_virtio.o +obj-n += 9pnet_rdma.o + +9pnet-objs := \ + mod.o \ + client.o \ + error.o \ + util.o \ + protocol.o \ + trans_fd.o \ + trans_common.o \ + +9pnet_virtio-objs := \ + trans_virtio.o + +9pnet_rdma-objs := \ + trans_rdma.o diff --git a/addons/9p/src/3.10.108/acl.c b/addons/9p/src/3.10.108/acl.c new file mode 100644 index 00000000..9686c1f1 --- /dev/null +++ b/addons/9p/src/3.10.108/acl.c @@ -0,0 +1,375 @@ +/* + * Copyright IBM Corporation, 2010 + * Author Aneesh Kumar K.V + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "xattr.h" +#include "acl.h" +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" + +static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name) +{ + ssize_t size; + void *value = NULL; + struct posix_acl *acl = NULL; + + size = v9fs_fid_xattr_get(fid, name, NULL, 0); + if (size > 0) { + value = kzalloc(size, GFP_NOFS); + if (!value) + return ERR_PTR(-ENOMEM); + size = v9fs_fid_xattr_get(fid, name, value, size); + if (size > 0) { + acl = posix_acl_from_xattr(&init_user_ns, value, size); + if (IS_ERR(acl)) + goto err_out; + } + } else if (size == -ENODATA || size == 0 || + size == -ENOSYS || size == -EOPNOTSUPP) { + acl = NULL; + } else + acl = ERR_PTR(-EIO); + +err_out: + kfree(value); + return acl; +} + +int v9fs_get_acl(struct inode *inode, struct p9_fid *fid) +{ + int retval = 0; + struct posix_acl *pacl, *dacl; + struct v9fs_session_info *v9ses; + + v9ses = v9fs_inode2v9ses(inode); + if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) || + ((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) { + set_cached_acl(inode, ACL_TYPE_DEFAULT, NULL); + set_cached_acl(inode, ACL_TYPE_ACCESS, NULL); + return 0; + } + /* get the default/access acl values and cache them */ + dacl = __v9fs_get_acl(fid, POSIX_ACL_XATTR_DEFAULT); + pacl = __v9fs_get_acl(fid, POSIX_ACL_XATTR_ACCESS); + + if (!IS_ERR(dacl) && !IS_ERR(pacl)) { + set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl); + set_cached_acl(inode, ACL_TYPE_ACCESS, pacl); + } else + retval = -EIO; + + if (!IS_ERR(dacl)) + posix_acl_release(dacl); + + if (!IS_ERR(pacl)) + posix_acl_release(pacl); + + return retval; +} + +static struct posix_acl *v9fs_get_cached_acl(struct inode *inode, int type) +{ + struct posix_acl *acl; + /* + * 9p Always cache the acl value when + * instantiating the inode (v9fs_inode_from_fid) + */ + acl = get_cached_acl(inode, type); + BUG_ON(acl == ACL_NOT_CACHED); + return acl; +} + +struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type) +{ + struct v9fs_session_info *v9ses; + + v9ses = v9fs_inode2v9ses(inode); + if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) || + ((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) { + /* + * On access = client and acl = on mode get the acl + * values from the server + */ + return NULL; + } + return v9fs_get_cached_acl(inode, type); + +} + +static int v9fs_set_acl(struct p9_fid *fid, int type, struct posix_acl *acl) +{ + int retval; + char *name; + size_t size; + void *buffer; + if (!acl) + return 0; + + /* Set a setxattr request to server */ + size = posix_acl_xattr_size(acl->a_count); + buffer = kmalloc(size, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + retval = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); + if (retval < 0) + goto err_free_out; + switch (type) { + case ACL_TYPE_ACCESS: + name = POSIX_ACL_XATTR_ACCESS; + break; + case ACL_TYPE_DEFAULT: + name = POSIX_ACL_XATTR_DEFAULT; + break; + default: + BUG(); + } + retval = v9fs_fid_xattr_set(fid, name, buffer, size, 0); +err_free_out: + kfree(buffer); + return retval; +} + +int v9fs_acl_chmod(struct inode *inode, struct p9_fid *fid) +{ + int retval = 0; + struct posix_acl *acl; + + if (S_ISLNK(inode->i_mode)) + return -EOPNOTSUPP; + acl = v9fs_get_cached_acl(inode, ACL_TYPE_ACCESS); + if (acl) { + retval = posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); + if (retval) + return retval; + set_cached_acl(inode, ACL_TYPE_ACCESS, acl); + retval = v9fs_set_acl(fid, ACL_TYPE_ACCESS, acl); + posix_acl_release(acl); + } + return retval; +} + +int v9fs_set_create_acl(struct inode *inode, struct p9_fid *fid, + struct posix_acl *dacl, struct posix_acl *acl) +{ + set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl); + set_cached_acl(inode, ACL_TYPE_ACCESS, acl); + v9fs_set_acl(fid, ACL_TYPE_DEFAULT, dacl); + v9fs_set_acl(fid, ACL_TYPE_ACCESS, acl); + return 0; +} + +void v9fs_put_acl(struct posix_acl *dacl, + struct posix_acl *acl) +{ + posix_acl_release(dacl); + posix_acl_release(acl); +} + +int v9fs_acl_mode(struct inode *dir, umode_t *modep, + struct posix_acl **dpacl, struct posix_acl **pacl) +{ + int retval = 0; + umode_t mode = *modep; + struct posix_acl *acl = NULL; + + if (!S_ISLNK(mode)) { + acl = v9fs_get_cached_acl(dir, ACL_TYPE_DEFAULT); + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (!acl) + mode &= ~current_umask(); + } + if (acl) { + if (S_ISDIR(mode)) + *dpacl = posix_acl_dup(acl); + retval = posix_acl_create(&acl, GFP_NOFS, &mode); + if (retval < 0) + return retval; + if (retval > 0) + *pacl = acl; + else + posix_acl_release(acl); + } + *modep = mode; + return 0; +} + +static int v9fs_remote_get_acl(struct dentry *dentry, const char *name, + void *buffer, size_t size, int type) +{ + char *full_name; + + switch (type) { + case ACL_TYPE_ACCESS: + full_name = POSIX_ACL_XATTR_ACCESS; + break; + case ACL_TYPE_DEFAULT: + full_name = POSIX_ACL_XATTR_DEFAULT; + break; + default: + BUG(); + } + return v9fs_xattr_get(dentry, full_name, buffer, size); +} + +static int v9fs_xattr_get_acl(struct dentry *dentry, const char *name, + void *buffer, size_t size, int type) +{ + struct v9fs_session_info *v9ses; + struct posix_acl *acl; + int error; + + if (strcmp(name, "") != 0) + return -EINVAL; + + v9ses = v9fs_dentry2v9ses(dentry); + /* + * We allow set/get/list of acl when access=client is not specified + */ + if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) + return v9fs_remote_get_acl(dentry, name, buffer, size, type); + + acl = v9fs_get_cached_acl(dentry->d_inode, type); + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (acl == NULL) + return -ENODATA; + error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); + posix_acl_release(acl); + + return error; +} + +static int v9fs_remote_set_acl(struct dentry *dentry, const char *name, + const void *value, size_t size, + int flags, int type) +{ + char *full_name; + + switch (type) { + case ACL_TYPE_ACCESS: + full_name = POSIX_ACL_XATTR_ACCESS; + break; + case ACL_TYPE_DEFAULT: + full_name = POSIX_ACL_XATTR_DEFAULT; + break; + default: + BUG(); + } + return v9fs_xattr_set(dentry, full_name, value, size, flags); +} + + +static int v9fs_xattr_set_acl(struct dentry *dentry, const char *name, + const void *value, size_t size, + int flags, int type) +{ + int retval; + struct posix_acl *acl; + struct v9fs_session_info *v9ses; + struct inode *inode = dentry->d_inode; + + if (strcmp(name, "") != 0) + return -EINVAL; + + v9ses = v9fs_dentry2v9ses(dentry); + /* + * set the attribute on the remote. Without even looking at the + * xattr value. We leave it to the server to validate + */ + if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) + return v9fs_remote_set_acl(dentry, name, + value, size, flags, type); + + if (S_ISLNK(inode->i_mode)) + return -EOPNOTSUPP; + if (!inode_owner_or_capable(inode)) + return -EPERM; + if (value) { + /* update the cached acl value */ + acl = posix_acl_from_xattr(&init_user_ns, value, size); + if (IS_ERR(acl)) + return PTR_ERR(acl); + else if (acl) { + retval = posix_acl_valid(acl); + if (retval) + goto err_out; + } + } else + acl = NULL; + + switch (type) { + case ACL_TYPE_ACCESS: + name = POSIX_ACL_XATTR_ACCESS; + if (acl) { + struct iattr iattr; + + retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl); + if (retval) + goto err_out; + if (!acl) { + /* + * ACL can be represented + * by the mode bits. So don't + * update ACL. + */ + value = NULL; + size = 0; + } + iattr.ia_valid = ATTR_MODE; + /* FIXME should we update ctime ? + * What is the following setxattr update the + * mode ? + */ + v9fs_vfs_setattr_dotl(dentry, &iattr); + } + break; + case ACL_TYPE_DEFAULT: + name = POSIX_ACL_XATTR_DEFAULT; + if (!S_ISDIR(inode->i_mode)) { + retval = acl ? -EINVAL : 0; + goto err_out; + } + break; + default: + BUG(); + } + retval = v9fs_xattr_set(dentry, name, value, size, flags); + if (!retval) + set_cached_acl(inode, type, acl); +err_out: + posix_acl_release(acl); + return retval; +} + +const struct xattr_handler v9fs_xattr_acl_access_handler = { + .prefix = POSIX_ACL_XATTR_ACCESS, + .flags = ACL_TYPE_ACCESS, + .get = v9fs_xattr_get_acl, + .set = v9fs_xattr_set_acl, +}; + +const struct xattr_handler v9fs_xattr_acl_default_handler = { + .prefix = POSIX_ACL_XATTR_DEFAULT, + .flags = ACL_TYPE_DEFAULT, + .get = v9fs_xattr_get_acl, + .set = v9fs_xattr_set_acl, +}; diff --git a/addons/9p/src/3.10.108/acl.h b/addons/9p/src/3.10.108/acl.h new file mode 100644 index 00000000..e4f7e882 --- /dev/null +++ b/addons/9p/src/3.10.108/acl.h @@ -0,0 +1,55 @@ +/* + * Copyright IBM Corporation, 2010 + * Author Aneesh Kumar K.V + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ +#ifndef FS_9P_ACL_H +#define FS_9P_ACL_H + +#ifdef CONFIG_9P_FS_POSIX_ACL +extern int v9fs_get_acl(struct inode *, struct p9_fid *); +extern struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type); +extern int v9fs_acl_chmod(struct inode *, struct p9_fid *); +extern int v9fs_set_create_acl(struct inode *, struct p9_fid *, + struct posix_acl *, struct posix_acl *); +extern int v9fs_acl_mode(struct inode *dir, umode_t *modep, + struct posix_acl **dpacl, struct posix_acl **pacl); +extern void v9fs_put_acl(struct posix_acl *dacl, struct posix_acl *acl); +#else +#define v9fs_iop_get_acl NULL +static inline int v9fs_get_acl(struct inode *inode, struct p9_fid *fid) +{ + return 0; +} +static inline int v9fs_acl_chmod(struct inode *inode, struct p9_fid *fid) +{ + return 0; +} +static inline int v9fs_set_create_acl(struct inode *inode, + struct p9_fid *fid, + struct posix_acl *dacl, + struct posix_acl *acl) +{ + return 0; +} +static inline void v9fs_put_acl(struct posix_acl *dacl, + struct posix_acl *acl) +{ +} +static inline int v9fs_acl_mode(struct inode *dir, umode_t *modep, + struct posix_acl **dpacl, + struct posix_acl **pacl) +{ + return 0; +} + +#endif +#endif /* FS_9P_XATTR_H */ diff --git a/addons/9p/src/3.10.108/cache.c b/addons/9p/src/3.10.108/cache.c new file mode 100644 index 00000000..a9ea73d6 --- /dev/null +++ b/addons/9p/src/3.10.108/cache.c @@ -0,0 +1,415 @@ +/* + * V9FS cache definitions. + * + * Copyright (C) 2009 by Abhishek Kulkarni + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "cache.h" + +#define CACHETAG_LEN 11 + +struct fscache_netfs v9fs_cache_netfs = { + .name = "9p", + .version = 0, +}; + +/** + * v9fs_random_cachetag - Generate a random tag to be associated + * with a new cache session. + * + * The value of jiffies is used for a fairly randomly cache tag. + */ + +static +int v9fs_random_cachetag(struct v9fs_session_info *v9ses) +{ + v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL); + if (!v9ses->cachetag) + return -ENOMEM; + + return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies); +} + +static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data, + void *buffer, uint16_t bufmax) +{ + struct v9fs_session_info *v9ses; + uint16_t klen = 0; + + v9ses = (struct v9fs_session_info *)cookie_netfs_data; + p9_debug(P9_DEBUG_FSC, "session %p buf %p size %u\n", + v9ses, buffer, bufmax); + + if (v9ses->cachetag) + klen = strlen(v9ses->cachetag); + + if (klen > bufmax) + return 0; + + memcpy(buffer, v9ses->cachetag, klen); + p9_debug(P9_DEBUG_FSC, "cache session tag %s\n", v9ses->cachetag); + return klen; +} + +const struct fscache_cookie_def v9fs_cache_session_index_def = { + .name = "9P.session", + .type = FSCACHE_COOKIE_TYPE_INDEX, + .get_key = v9fs_cache_session_get_key, +}; + +void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses) +{ + /* If no cache session tag was specified, we generate a random one. */ + if (!v9ses->cachetag) + v9fs_random_cachetag(v9ses); + + v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index, + &v9fs_cache_session_index_def, + v9ses); + p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n", + v9ses, v9ses->fscache); +} + +void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses) +{ + p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n", + v9ses, v9ses->fscache); + fscache_relinquish_cookie(v9ses->fscache, 0); + v9ses->fscache = NULL; +} + + +static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data, + void *buffer, uint16_t bufmax) +{ + const struct v9fs_inode *v9inode = cookie_netfs_data; + memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path)); + p9_debug(P9_DEBUG_FSC, "inode %p get key %llu\n", + &v9inode->vfs_inode, v9inode->qid.path); + return sizeof(v9inode->qid.path); +} + +static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data, + uint64_t *size) +{ + const struct v9fs_inode *v9inode = cookie_netfs_data; + *size = i_size_read(&v9inode->vfs_inode); + + p9_debug(P9_DEBUG_FSC, "inode %p get attr %llu\n", + &v9inode->vfs_inode, *size); +} + +static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data, + void *buffer, uint16_t buflen) +{ + const struct v9fs_inode *v9inode = cookie_netfs_data; + memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version)); + p9_debug(P9_DEBUG_FSC, "inode %p get aux %u\n", + &v9inode->vfs_inode, v9inode->qid.version); + return sizeof(v9inode->qid.version); +} + +static enum +fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data, + const void *buffer, + uint16_t buflen) +{ + const struct v9fs_inode *v9inode = cookie_netfs_data; + + if (buflen != sizeof(v9inode->qid.version)) + return FSCACHE_CHECKAUX_OBSOLETE; + + if (memcmp(buffer, &v9inode->qid.version, + sizeof(v9inode->qid.version))) + return FSCACHE_CHECKAUX_OBSOLETE; + + return FSCACHE_CHECKAUX_OKAY; +} + +static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data) +{ + struct v9fs_inode *v9inode = cookie_netfs_data; + struct pagevec pvec; + pgoff_t first; + int loop, nr_pages; + + pagevec_init(&pvec, 0); + first = 0; + + for (;;) { + nr_pages = pagevec_lookup(&pvec, v9inode->vfs_inode.i_mapping, + first, + PAGEVEC_SIZE - pagevec_count(&pvec)); + if (!nr_pages) + break; + + for (loop = 0; loop < nr_pages; loop++) + ClearPageFsCache(pvec.pages[loop]); + + first = pvec.pages[nr_pages - 1]->index + 1; + + pvec.nr = nr_pages; + pagevec_release(&pvec); + cond_resched(); + } +} + +const struct fscache_cookie_def v9fs_cache_inode_index_def = { + .name = "9p.inode", + .type = FSCACHE_COOKIE_TYPE_DATAFILE, + .get_key = v9fs_cache_inode_get_key, + .get_attr = v9fs_cache_inode_get_attr, + .get_aux = v9fs_cache_inode_get_aux, + .check_aux = v9fs_cache_inode_check_aux, + .now_uncached = v9fs_cache_inode_now_uncached, +}; + +void v9fs_cache_inode_get_cookie(struct inode *inode) +{ + struct v9fs_inode *v9inode; + struct v9fs_session_info *v9ses; + + if (!S_ISREG(inode->i_mode)) + return; + + v9inode = V9FS_I(inode); + if (v9inode->fscache) + return; + + v9ses = v9fs_inode2v9ses(inode); + v9inode->fscache = fscache_acquire_cookie(v9ses->fscache, + &v9fs_cache_inode_index_def, + v9inode); + + p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n", + inode, v9inode->fscache); +} + +void v9fs_cache_inode_put_cookie(struct inode *inode) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + + if (!v9inode->fscache) + return; + p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n", + inode, v9inode->fscache); + + fscache_relinquish_cookie(v9inode->fscache, 0); + v9inode->fscache = NULL; +} + +void v9fs_cache_inode_flush_cookie(struct inode *inode) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + + if (!v9inode->fscache) + return; + p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n", + inode, v9inode->fscache); + + fscache_relinquish_cookie(v9inode->fscache, 1); + v9inode->fscache = NULL; +} + +void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_fid *fid; + + if (!v9inode->fscache) + return; + + spin_lock(&v9inode->fscache_lock); + fid = filp->private_data; + if ((filp->f_flags & O_ACCMODE) != O_RDONLY) + v9fs_cache_inode_flush_cookie(inode); + else + v9fs_cache_inode_get_cookie(inode); + + spin_unlock(&v9inode->fscache_lock); +} + +void v9fs_cache_inode_reset_cookie(struct inode *inode) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + struct v9fs_session_info *v9ses; + struct fscache_cookie *old; + + if (!v9inode->fscache) + return; + + old = v9inode->fscache; + + spin_lock(&v9inode->fscache_lock); + fscache_relinquish_cookie(v9inode->fscache, 1); + + v9ses = v9fs_inode2v9ses(inode); + v9inode->fscache = fscache_acquire_cookie(v9ses->fscache, + &v9fs_cache_inode_index_def, + v9inode); + p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n", + inode, old, v9inode->fscache); + + spin_unlock(&v9inode->fscache_lock); +} + +int __v9fs_fscache_release_page(struct page *page, gfp_t gfp) +{ + struct inode *inode = page->mapping->host; + struct v9fs_inode *v9inode = V9FS_I(inode); + + BUG_ON(!v9inode->fscache); + + return fscache_maybe_release_page(v9inode->fscache, page, gfp); +} + +void __v9fs_fscache_invalidate_page(struct page *page) +{ + struct inode *inode = page->mapping->host; + struct v9fs_inode *v9inode = V9FS_I(inode); + + BUG_ON(!v9inode->fscache); + + if (PageFsCache(page)) { + fscache_wait_on_page_write(v9inode->fscache, page); + BUG_ON(!PageLocked(page)); + fscache_uncache_page(v9inode->fscache, page); + } +} + +static void v9fs_vfs_readpage_complete(struct page *page, void *data, + int error) +{ + if (!error) + SetPageUptodate(page); + + unlock_page(page); +} + +/** + * __v9fs_readpage_from_fscache - read a page from cache + * + * Returns 0 if the pages are in cache and a BIO is submitted, + * 1 if the pages are not in cache and -error otherwise. + */ + +int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page) +{ + int ret; + const struct v9fs_inode *v9inode = V9FS_I(inode); + + p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page); + if (!v9inode->fscache) + return -ENOBUFS; + + ret = fscache_read_or_alloc_page(v9inode->fscache, + page, + v9fs_vfs_readpage_complete, + NULL, + GFP_KERNEL); + switch (ret) { + case -ENOBUFS: + case -ENODATA: + p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret); + return 1; + case 0: + p9_debug(P9_DEBUG_FSC, "BIO submitted\n"); + return ret; + default: + p9_debug(P9_DEBUG_FSC, "ret %d\n", ret); + return ret; + } +} + +/** + * __v9fs_readpages_from_fscache - read multiple pages from cache + * + * Returns 0 if the pages are in cache and a BIO is submitted, + * 1 if the pages are not in cache and -error otherwise. + */ + +int __v9fs_readpages_from_fscache(struct inode *inode, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages) +{ + int ret; + const struct v9fs_inode *v9inode = V9FS_I(inode); + + p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages); + if (!v9inode->fscache) + return -ENOBUFS; + + ret = fscache_read_or_alloc_pages(v9inode->fscache, + mapping, pages, nr_pages, + v9fs_vfs_readpage_complete, + NULL, + mapping_gfp_mask(mapping)); + switch (ret) { + case -ENOBUFS: + case -ENODATA: + p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret); + return 1; + case 0: + BUG_ON(!list_empty(pages)); + BUG_ON(*nr_pages != 0); + p9_debug(P9_DEBUG_FSC, "BIO submitted\n"); + return ret; + default: + p9_debug(P9_DEBUG_FSC, "ret %d\n", ret); + return ret; + } +} + +/** + * __v9fs_readpage_to_fscache - write a page to the cache + * + */ + +void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page) +{ + int ret; + const struct v9fs_inode *v9inode = V9FS_I(inode); + + p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page); + ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL); + p9_debug(P9_DEBUG_FSC, "ret = %d\n", ret); + if (ret != 0) + v9fs_uncache_page(inode, page); +} + +/* + * wait for a page to complete writing to the cache + */ +void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page) +{ + const struct v9fs_inode *v9inode = V9FS_I(inode); + p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page); + if (PageFsCache(page)) + fscache_wait_on_page_write(v9inode->fscache, page); +} diff --git a/addons/9p/src/3.10.108/cache.h b/addons/9p/src/3.10.108/cache.h new file mode 100644 index 00000000..40cc54ce --- /dev/null +++ b/addons/9p/src/3.10.108/cache.h @@ -0,0 +1,139 @@ +/* + * V9FS cache definitions. + * + * Copyright (C) 2009 by Abhishek Kulkarni + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#ifndef _9P_CACHE_H +#ifdef CONFIG_9P_FSCACHE +#include +#include + +extern struct fscache_netfs v9fs_cache_netfs; +extern const struct fscache_cookie_def v9fs_cache_session_index_def; +extern const struct fscache_cookie_def v9fs_cache_inode_index_def; + +extern void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses); +extern void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses); + +extern void v9fs_cache_inode_get_cookie(struct inode *inode); +extern void v9fs_cache_inode_put_cookie(struct inode *inode); +extern void v9fs_cache_inode_flush_cookie(struct inode *inode); +extern void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp); +extern void v9fs_cache_inode_reset_cookie(struct inode *inode); + +extern int __v9fs_cache_register(void); +extern void __v9fs_cache_unregister(void); + +extern int __v9fs_fscache_release_page(struct page *page, gfp_t gfp); +extern void __v9fs_fscache_invalidate_page(struct page *page); +extern int __v9fs_readpage_from_fscache(struct inode *inode, + struct page *page); +extern int __v9fs_readpages_from_fscache(struct inode *inode, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages); +extern void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page); +extern void __v9fs_fscache_wait_on_page_write(struct inode *inode, + struct page *page); + +static inline int v9fs_fscache_release_page(struct page *page, + gfp_t gfp) +{ + return __v9fs_fscache_release_page(page, gfp); +} + +static inline void v9fs_fscache_invalidate_page(struct page *page) +{ + __v9fs_fscache_invalidate_page(page); +} + +static inline int v9fs_readpage_from_fscache(struct inode *inode, + struct page *page) +{ + return __v9fs_readpage_from_fscache(inode, page); +} + +static inline int v9fs_readpages_from_fscache(struct inode *inode, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages) +{ + return __v9fs_readpages_from_fscache(inode, mapping, pages, + nr_pages); +} + +static inline void v9fs_readpage_to_fscache(struct inode *inode, + struct page *page) +{ + if (PageFsCache(page)) + __v9fs_readpage_to_fscache(inode, page); +} + +static inline void v9fs_uncache_page(struct inode *inode, struct page *page) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + fscache_uncache_page(v9inode->fscache, page); + BUG_ON(PageFsCache(page)); +} + +static inline void v9fs_fscache_wait_on_page_write(struct inode *inode, + struct page *page) +{ + return __v9fs_fscache_wait_on_page_write(inode, page); +} + +#else /* CONFIG_9P_FSCACHE */ + +static inline int v9fs_fscache_release_page(struct page *page, + gfp_t gfp) { + return 1; +} + +static inline void v9fs_fscache_invalidate_page(struct page *page) {} + +static inline int v9fs_readpage_from_fscache(struct inode *inode, + struct page *page) +{ + return -ENOBUFS; +} + +static inline int v9fs_readpages_from_fscache(struct inode *inode, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages) +{ + return -ENOBUFS; +} + +static inline void v9fs_readpage_to_fscache(struct inode *inode, + struct page *page) +{} + +static inline void v9fs_uncache_page(struct inode *inode, struct page *page) +{} + +static inline void v9fs_fscache_wait_on_page_write(struct inode *inode, + struct page *page) +{ + return; +} + +#endif /* CONFIG_9P_FSCACHE */ +#endif /* _9P_CACHE_H */ diff --git a/addons/9p/src/3.10.108/client.c b/addons/9p/src/3.10.108/client.c new file mode 100644 index 00000000..e191aab9 --- /dev/null +++ b/addons/9p/src/3.10.108/client.c @@ -0,0 +1,2256 @@ +/* + * net/9p/clnt.c + * + * 9P Client + * + * Copyright (C) 2008 by Eric Van Hensbergen + * Copyright (C) 2007 by Latchesar Ionkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "protocol.h" + +#define CREATE_TRACE_POINTS +#include + +/* + * Client Option Parsing (code inspired by NFS code) + * - a little lazy - parse all client options + */ + +enum { + Opt_msize, + Opt_trans, + Opt_legacy, + Opt_version, + Opt_err, +}; + +static const match_table_t tokens = { + {Opt_msize, "msize=%u"}, + {Opt_legacy, "noextend"}, + {Opt_trans, "trans=%s"}, + {Opt_version, "version=%s"}, + {Opt_err, NULL}, +}; + +inline int p9_is_proto_dotl(struct p9_client *clnt) +{ + return clnt->proto_version == p9_proto_2000L; +} +EXPORT_SYMBOL(p9_is_proto_dotl); + +inline int p9_is_proto_dotu(struct p9_client *clnt) +{ + return clnt->proto_version == p9_proto_2000u; +} +EXPORT_SYMBOL(p9_is_proto_dotu); + +/* + * Some error codes are taken directly from the server replies, + * make sure they are valid. + */ +static int safe_errno(int err) +{ + if ((err > 0) || (err < -MAX_ERRNO)) { + p9_debug(P9_DEBUG_ERROR, "Invalid error code %d\n", err); + return -EPROTO; + } + return err; +} + + +/* Interpret mount option for protocol version */ +static int get_protocol_version(char *s) +{ + int version = -EINVAL; + + if (!strcmp(s, "9p2000")) { + version = p9_proto_legacy; + p9_debug(P9_DEBUG_9P, "Protocol version: Legacy\n"); + } else if (!strcmp(s, "9p2000.u")) { + version = p9_proto_2000u; + p9_debug(P9_DEBUG_9P, "Protocol version: 9P2000.u\n"); + } else if (!strcmp(s, "9p2000.L")) { + version = p9_proto_2000L; + p9_debug(P9_DEBUG_9P, "Protocol version: 9P2000.L\n"); + } else + pr_info("Unknown protocol version %s\n", s); + + return version; +} + +/** + * parse_options - parse mount options into client structure + * @opts: options string passed from mount + * @clnt: existing v9fs client information + * + * Return 0 upon success, -ERRNO upon failure + */ + +static int parse_opts(char *opts, struct p9_client *clnt) +{ + char *options, *tmp_options; + char *p; + substring_t args[MAX_OPT_ARGS]; + int option; + char *s; + int ret = 0; + + clnt->proto_version = p9_proto_2000u; + clnt->msize = 8192; + + if (!opts) + return 0; + + tmp_options = kstrdup(opts, GFP_KERNEL); + if (!tmp_options) { + p9_debug(P9_DEBUG_ERROR, + "failed to allocate copy of option string\n"); + return -ENOMEM; + } + options = tmp_options; + + while ((p = strsep(&options, ",")) != NULL) { + int token, r; + if (!*p) + continue; + token = match_token(p, tokens, args); + switch (token) { + case Opt_msize: + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + ret = r; + continue; + } + clnt->msize = option; + break; + case Opt_trans: + s = match_strdup(&args[0]); + if (!s) { + ret = -ENOMEM; + p9_debug(P9_DEBUG_ERROR, + "problem allocating copy of trans arg\n"); + goto free_and_return; + } + clnt->trans_mod = v9fs_get_trans_by_name(s); + if (clnt->trans_mod == NULL) { + pr_info("Could not find request transport: %s\n", + s); + ret = -EINVAL; + kfree(s); + goto free_and_return; + } + kfree(s); + break; + case Opt_legacy: + clnt->proto_version = p9_proto_legacy; + break; + case Opt_version: + s = match_strdup(&args[0]); + if (!s) { + ret = -ENOMEM; + p9_debug(P9_DEBUG_ERROR, + "problem allocating copy of version arg\n"); + goto free_and_return; + } + ret = get_protocol_version(s); + if (ret == -EINVAL) { + kfree(s); + goto free_and_return; + } + kfree(s); + clnt->proto_version = ret; + break; + default: + continue; + } + } + +free_and_return: + kfree(tmp_options); + return ret; +} + +/** + * p9_tag_alloc - lookup/allocate a request by tag + * @c: client session to lookup tag within + * @tag: numeric id for transaction + * + * this is a simple array lookup, but will grow the + * request_slots as necessary to accommodate transaction + * ids which did not previously have a slot. + * + * this code relies on the client spinlock to manage locks, its + * possible we should switch to something else, but I'd rather + * stick with something low-overhead for the common case. + * + */ + +static struct p9_req_t * +p9_tag_alloc(struct p9_client *c, u16 tag, unsigned int max_size) +{ + unsigned long flags; + int row, col; + struct p9_req_t *req; + int alloc_msize = min(c->msize, max_size); + + /* This looks up the original request by tag so we know which + * buffer to read the data into */ + tag++; + + if (tag >= c->max_tag) { + spin_lock_irqsave(&c->lock, flags); + /* check again since original check was outside of lock */ + while (tag >= c->max_tag) { + row = (tag / P9_ROW_MAXTAG); + c->reqs[row] = kcalloc(P9_ROW_MAXTAG, + sizeof(struct p9_req_t), GFP_ATOMIC); + + if (!c->reqs[row]) { + pr_err("Couldn't grow tag array\n"); + spin_unlock_irqrestore(&c->lock, flags); + return ERR_PTR(-ENOMEM); + } + for (col = 0; col < P9_ROW_MAXTAG; col++) { + c->reqs[row][col].status = REQ_STATUS_IDLE; + c->reqs[row][col].tc = NULL; + } + c->max_tag += P9_ROW_MAXTAG; + } + spin_unlock_irqrestore(&c->lock, flags); + } + row = tag / P9_ROW_MAXTAG; + col = tag % P9_ROW_MAXTAG; + + req = &c->reqs[row][col]; + if (!req->tc) { + req->wq = kmalloc(sizeof(wait_queue_head_t), GFP_NOFS); + if (!req->wq) { + pr_err("Couldn't grow tag array\n"); + return ERR_PTR(-ENOMEM); + } + init_waitqueue_head(req->wq); + req->tc = kmalloc(sizeof(struct p9_fcall) + alloc_msize, + GFP_NOFS); + req->rc = kmalloc(sizeof(struct p9_fcall) + alloc_msize, + GFP_NOFS); + if ((!req->tc) || (!req->rc)) { + pr_err("Couldn't grow tag array\n"); + kfree(req->tc); + kfree(req->rc); + kfree(req->wq); + req->tc = req->rc = NULL; + req->wq = NULL; + return ERR_PTR(-ENOMEM); + } + req->tc->capacity = alloc_msize; + req->rc->capacity = alloc_msize; + req->tc->sdata = (char *) req->tc + sizeof(struct p9_fcall); + req->rc->sdata = (char *) req->rc + sizeof(struct p9_fcall); + } + + p9pdu_reset(req->tc); + p9pdu_reset(req->rc); + + req->tc->tag = tag-1; + req->status = REQ_STATUS_ALLOC; + + return &c->reqs[row][col]; +} + +/** + * p9_tag_lookup - lookup a request by tag + * @c: client session to lookup tag within + * @tag: numeric id for transaction + * + */ + +struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag) +{ + int row, col; + + /* This looks up the original request by tag so we know which + * buffer to read the data into */ + tag++; + + if(tag >= c->max_tag) + return NULL; + + row = tag / P9_ROW_MAXTAG; + col = tag % P9_ROW_MAXTAG; + + return &c->reqs[row][col]; +} +EXPORT_SYMBOL(p9_tag_lookup); + +/** + * p9_tag_init - setup tags structure and contents + * @c: v9fs client struct + * + * This initializes the tags structure for each client instance. + * + */ + +static int p9_tag_init(struct p9_client *c) +{ + int err = 0; + + c->tagpool = p9_idpool_create(); + if (IS_ERR(c->tagpool)) { + err = PTR_ERR(c->tagpool); + goto error; + } + err = p9_idpool_get(c->tagpool); /* reserve tag 0 */ + if (err < 0) { + p9_idpool_destroy(c->tagpool); + goto error; + } + c->max_tag = 0; +error: + return err; +} + +/** + * p9_tag_cleanup - cleans up tags structure and reclaims resources + * @c: v9fs client struct + * + * This frees resources associated with the tags structure + * + */ +static void p9_tag_cleanup(struct p9_client *c) +{ + int row, col; + + /* check to insure all requests are idle */ + for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) { + for (col = 0; col < P9_ROW_MAXTAG; col++) { + if (c->reqs[row][col].status != REQ_STATUS_IDLE) { + p9_debug(P9_DEBUG_MUX, + "Attempting to cleanup non-free tag %d,%d\n", + row, col); + /* TODO: delay execution of cleanup */ + return; + } + } + } + + if (c->tagpool) { + p9_idpool_put(0, c->tagpool); /* free reserved tag 0 */ + p9_idpool_destroy(c->tagpool); + } + + /* free requests associated with tags */ + for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) { + for (col = 0; col < P9_ROW_MAXTAG; col++) { + kfree(c->reqs[row][col].wq); + kfree(c->reqs[row][col].tc); + kfree(c->reqs[row][col].rc); + } + kfree(c->reqs[row]); + } + c->max_tag = 0; +} + +/** + * p9_free_req - free a request and clean-up as necessary + * c: client state + * r: request to release + * + */ + +static void p9_free_req(struct p9_client *c, struct p9_req_t *r) +{ + int tag = r->tc->tag; + p9_debug(P9_DEBUG_MUX, "clnt %p req %p tag: %d\n", c, r, tag); + + r->status = REQ_STATUS_IDLE; + if (tag != P9_NOTAG && p9_idpool_check(tag, c->tagpool)) + p9_idpool_put(tag, c->tagpool); +} + +/** + * p9_client_cb - call back from transport to client + * c: client state + * req: request received + * + */ +void p9_client_cb(struct p9_client *c, struct p9_req_t *req) +{ + p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc->tag); + wake_up(req->wq); + p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag); +} +EXPORT_SYMBOL(p9_client_cb); + +/** + * p9_parse_header - parse header arguments out of a packet + * @pdu: packet to parse + * @size: size of packet + * @type: type of request + * @tag: tag of packet + * @rewind: set if we need to rewind offset afterwards + */ + +int +p9_parse_header(struct p9_fcall *pdu, int32_t *size, int8_t *type, int16_t *tag, + int rewind) +{ + int8_t r_type; + int16_t r_tag; + int32_t r_size; + int offset = pdu->offset; + int err; + + pdu->offset = 0; + if (pdu->size == 0) + pdu->size = 7; + + err = p9pdu_readf(pdu, 0, "dbw", &r_size, &r_type, &r_tag); + if (err) + goto rewind_and_exit; + + pdu->size = r_size; + pdu->id = r_type; + pdu->tag = r_tag; + + p9_debug(P9_DEBUG_9P, "<<< size=%d type: %d tag: %d\n", + pdu->size, pdu->id, pdu->tag); + + if (type) + *type = r_type; + if (tag) + *tag = r_tag; + if (size) + *size = r_size; + + +rewind_and_exit: + if (rewind) + pdu->offset = offset; + return err; +} +EXPORT_SYMBOL(p9_parse_header); + +/** + * p9_check_errors - check 9p packet for error return and process it + * @c: current client instance + * @req: request to parse and check for error conditions + * + * returns error code if one is discovered, otherwise returns 0 + * + * this will have to be more complicated if we have multiple + * error packet types + */ + +static int p9_check_errors(struct p9_client *c, struct p9_req_t *req) +{ + int8_t type; + int err; + int ecode; + + err = p9_parse_header(req->rc, NULL, &type, NULL, 0); + /* + * dump the response from server + * This should be after check errors which poplulate pdu_fcall. + */ + trace_9p_protocol_dump(c, req->rc); + if (err) { + p9_debug(P9_DEBUG_ERROR, "couldn't parse header %d\n", err); + return err; + } + if (type != P9_RERROR && type != P9_RLERROR) + return 0; + + if (!p9_is_proto_dotl(c)) { + char *ename; + err = p9pdu_readf(req->rc, c->proto_version, "s?d", + &ename, &ecode); + if (err) + goto out_err; + + if (p9_is_proto_dotu(c)) + err = -ecode; + + if (!err || !IS_ERR_VALUE(err)) { + err = p9_errstr2errno(ename, strlen(ename)); + + p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", + -ecode, ename); + } + kfree(ename); + } else { + err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode); + err = -ecode; + + p9_debug(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode); + } + + return err; + +out_err: + p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err); + + return err; +} + +/** + * p9_check_zc_errors - check 9p packet for error return and process it + * @c: current client instance + * @req: request to parse and check for error conditions + * @in_hdrlen: Size of response protocol buffer. + * + * returns error code if one is discovered, otherwise returns 0 + * + * this will have to be more complicated if we have multiple + * error packet types + */ + +static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req, + char *uidata, int in_hdrlen, int kern_buf) +{ + int err; + int ecode; + int8_t type; + char *ename = NULL; + + err = p9_parse_header(req->rc, NULL, &type, NULL, 0); + /* + * dump the response from server + * This should be after parse_header which poplulate pdu_fcall. + */ + trace_9p_protocol_dump(c, req->rc); + if (err) { + p9_debug(P9_DEBUG_ERROR, "couldn't parse header %d\n", err); + return err; + } + + if (type != P9_RERROR && type != P9_RLERROR) + return 0; + + if (!p9_is_proto_dotl(c)) { + /* Error is reported in string format */ + int len; + /* 7 = header size for RERROR; */ + int inline_len = in_hdrlen - 7; + + len = req->rc->size - req->rc->offset; + if (len > (P9_ZC_HDR_SZ - 7)) { + err = -EFAULT; + goto out_err; + } + + ename = &req->rc->sdata[req->rc->offset]; + if (len > inline_len) { + /* We have error in external buffer */ + if (kern_buf) { + memcpy(ename + inline_len, uidata, + len - inline_len); + } else { + err = copy_from_user(ename + inline_len, + uidata, len - inline_len); + if (err) { + err = -EFAULT; + goto out_err; + } + } + } + ename = NULL; + err = p9pdu_readf(req->rc, c->proto_version, "s?d", + &ename, &ecode); + if (err) + goto out_err; + + if (p9_is_proto_dotu(c)) + err = -ecode; + + if (!err || !IS_ERR_VALUE(err)) { + err = p9_errstr2errno(ename, strlen(ename)); + + p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", + -ecode, ename); + } + kfree(ename); + } else { + err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode); + err = -ecode; + + p9_debug(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode); + } + return err; + +out_err: + p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err); + return err; +} + +static struct p9_req_t * +p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...); + +/** + * p9_client_flush - flush (cancel) a request + * @c: client state + * @oldreq: request to cancel + * + * This sents a flush for a particular request and links + * the flush request to the original request. The current + * code only supports a single flush request although the protocol + * allows for multiple flush requests to be sent for a single request. + * + */ + +static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq) +{ + struct p9_req_t *req; + int16_t oldtag; + int err; + + err = p9_parse_header(oldreq->tc, NULL, NULL, &oldtag, 1); + if (err) + return err; + + p9_debug(P9_DEBUG_9P, ">>> TFLUSH tag %d\n", oldtag); + + req = p9_client_rpc(c, P9_TFLUSH, "w", oldtag); + if (IS_ERR(req)) + return PTR_ERR(req); + + + /* if we haven't received a response for oldreq, + remove it from the list. */ + spin_lock(&c->lock); + if (oldreq->status == REQ_STATUS_FLSH) + list_del(&oldreq->req_list); + spin_unlock(&c->lock); + + p9_free_req(c, req); + return 0; +} + +static struct p9_req_t *p9_client_prepare_req(struct p9_client *c, + int8_t type, int req_size, + const char *fmt, va_list ap) +{ + int tag, err; + struct p9_req_t *req; + + p9_debug(P9_DEBUG_MUX, "client %p op %d\n", c, type); + + /* we allow for any status other than disconnected */ + if (c->status == Disconnected) + return ERR_PTR(-EIO); + + /* if status is begin_disconnected we allow only clunk request */ + if ((c->status == BeginDisconnect) && (type != P9_TCLUNK)) + return ERR_PTR(-EIO); + + tag = P9_NOTAG; + if (type != P9_TVERSION) { + tag = p9_idpool_get(c->tagpool); + if (tag < 0) + return ERR_PTR(-ENOMEM); + } + + req = p9_tag_alloc(c, tag, req_size); + if (IS_ERR(req)) + return req; + + /* marshall the data */ + p9pdu_prepare(req->tc, tag, type); + err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap); + if (err) + goto reterr; + p9pdu_finalize(c, req->tc); + trace_9p_client_req(c, type, tag); + return req; +reterr: + p9_free_req(c, req); + return ERR_PTR(err); +} + +/** + * p9_client_rpc - issue a request and wait for a response + * @c: client session + * @type: type of request + * @fmt: protocol format string (see protocol.c) + * + * Returns request structure (which client must free using p9_free_req) + */ + +static struct p9_req_t * +p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) +{ + va_list ap; + int sigpending, err; + unsigned long flags; + struct p9_req_t *req; + + va_start(ap, fmt); + req = p9_client_prepare_req(c, type, c->msize, fmt, ap); + va_end(ap); + if (IS_ERR(req)) + return req; + + if (signal_pending(current)) { + sigpending = 1; + clear_thread_flag(TIF_SIGPENDING); + } else + sigpending = 0; + + err = c->trans_mod->request(c, req); + if (err < 0) { + if (err != -ERESTARTSYS && err != -EFAULT) + c->status = Disconnected; + goto reterr; + } +again: + /* Wait for the response */ + err = wait_event_interruptible(*req->wq, + req->status >= REQ_STATUS_RCVD); + + if ((err == -ERESTARTSYS) && (c->status == Connected) + && (type == P9_TFLUSH)) { + sigpending = 1; + clear_thread_flag(TIF_SIGPENDING); + goto again; + } + + if (req->status == REQ_STATUS_ERROR) { + p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); + err = req->t_err; + } + if ((err == -ERESTARTSYS) && (c->status == Connected)) { + p9_debug(P9_DEBUG_MUX, "flushing\n"); + sigpending = 1; + clear_thread_flag(TIF_SIGPENDING); + + if (c->trans_mod->cancel(c, req)) + p9_client_flush(c, req); + + /* if we received the response anyway, don't signal error */ + if (req->status == REQ_STATUS_RCVD) + err = 0; + } + if (sigpending) { + spin_lock_irqsave(¤t->sighand->siglock, flags); + recalc_sigpending(); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); + } + if (err < 0) + goto reterr; + + err = p9_check_errors(c, req); + trace_9p_client_res(c, type, req->rc->tag, err); + if (!err) + return req; +reterr: + p9_free_req(c, req); + return ERR_PTR(safe_errno(err)); +} + +/** + * p9_client_zc_rpc - issue a request and wait for a response + * @c: client session + * @type: type of request + * @uidata: user bffer that should be ued for zero copy read + * @uodata: user buffer that shoud be user for zero copy write + * @inlen: read buffer size + * @olen: write buffer size + * @hdrlen: reader header size, This is the size of response protocol data + * @fmt: protocol format string (see protocol.c) + * + * Returns request structure (which client must free using p9_free_req) + */ +static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type, + char *uidata, char *uodata, + int inlen, int olen, int in_hdrlen, + int kern_buf, const char *fmt, ...) +{ + va_list ap; + int sigpending, err; + unsigned long flags; + struct p9_req_t *req; + + va_start(ap, fmt); + /* + * We allocate a inline protocol data of only 4k bytes. + * The actual content is passed in zero-copy fashion. + */ + req = p9_client_prepare_req(c, type, P9_ZC_HDR_SZ, fmt, ap); + va_end(ap); + if (IS_ERR(req)) + return req; + + if (signal_pending(current)) { + sigpending = 1; + clear_thread_flag(TIF_SIGPENDING); + } else + sigpending = 0; + + /* If we are called with KERNEL_DS force kern_buf */ + if (segment_eq(get_fs(), KERNEL_DS)) + kern_buf = 1; + + err = c->trans_mod->zc_request(c, req, uidata, uodata, + inlen, olen, in_hdrlen, kern_buf); + if (err < 0) { + if (err == -EIO) + c->status = Disconnected; + if (err != -ERESTARTSYS) + goto reterr; + } + if (req->status == REQ_STATUS_ERROR) { + p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); + err = req->t_err; + } + if ((err == -ERESTARTSYS) && (c->status == Connected)) { + p9_debug(P9_DEBUG_MUX, "flushing\n"); + sigpending = 1; + clear_thread_flag(TIF_SIGPENDING); + + if (c->trans_mod->cancel(c, req)) + p9_client_flush(c, req); + + /* if we received the response anyway, don't signal error */ + if (req->status == REQ_STATUS_RCVD) + err = 0; + } + if (sigpending) { + spin_lock_irqsave(¤t->sighand->siglock, flags); + recalc_sigpending(); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); + } + if (err < 0) + goto reterr; + + err = p9_check_zc_errors(c, req, uidata, in_hdrlen, kern_buf); + trace_9p_client_res(c, type, req->rc->tag, err); + if (!err) + return req; +reterr: + p9_free_req(c, req); + return ERR_PTR(safe_errno(err)); +} + +static struct p9_fid *p9_fid_create(struct p9_client *clnt) +{ + int ret; + struct p9_fid *fid; + unsigned long flags; + + p9_debug(P9_DEBUG_FID, "clnt %p\n", clnt); + fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL); + if (!fid) + return ERR_PTR(-ENOMEM); + + ret = p9_idpool_get(clnt->fidpool); + if (ret < 0) { + ret = -ENOSPC; + goto error; + } + fid->fid = ret; + + memset(&fid->qid, 0, sizeof(struct p9_qid)); + fid->mode = -1; + fid->uid = current_fsuid(); + fid->clnt = clnt; + fid->rdir = NULL; + spin_lock_irqsave(&clnt->lock, flags); + list_add(&fid->flist, &clnt->fidlist); + spin_unlock_irqrestore(&clnt->lock, flags); + + return fid; + +error: + kfree(fid); + return ERR_PTR(ret); +} + +static void p9_fid_destroy(struct p9_fid *fid) +{ + struct p9_client *clnt; + unsigned long flags; + + p9_debug(P9_DEBUG_FID, "fid %d\n", fid->fid); + clnt = fid->clnt; + p9_idpool_put(fid->fid, clnt->fidpool); + spin_lock_irqsave(&clnt->lock, flags); + list_del(&fid->flist); + spin_unlock_irqrestore(&clnt->lock, flags); + kfree(fid->rdir); + kfree(fid); +} + +static int p9_client_version(struct p9_client *c) +{ + int err = 0; + struct p9_req_t *req; + char *version; + int msize; + + p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n", + c->msize, c->proto_version); + + switch (c->proto_version) { + case p9_proto_2000L: + req = p9_client_rpc(c, P9_TVERSION, "ds", + c->msize, "9P2000.L"); + break; + case p9_proto_2000u: + req = p9_client_rpc(c, P9_TVERSION, "ds", + c->msize, "9P2000.u"); + break; + case p9_proto_legacy: + req = p9_client_rpc(c, P9_TVERSION, "ds", + c->msize, "9P2000"); + break; + default: + return -EINVAL; + break; + } + + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version); + if (err) { + p9_debug(P9_DEBUG_9P, "version error %d\n", err); + trace_9p_protocol_dump(c, req->rc); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version); + if (!strncmp(version, "9P2000.L", 8)) + c->proto_version = p9_proto_2000L; + else if (!strncmp(version, "9P2000.u", 8)) + c->proto_version = p9_proto_2000u; + else if (!strncmp(version, "9P2000", 6)) + c->proto_version = p9_proto_legacy; + else { + err = -EREMOTEIO; + goto error; + } + + if (msize < c->msize) + c->msize = msize; + +error: + kfree(version); + p9_free_req(c, req); + + return err; +} + +struct p9_client *p9_client_create(const char *dev_name, char *options) +{ + int err; + struct p9_client *clnt; + + err = 0; + clnt = kmalloc(sizeof(struct p9_client), GFP_KERNEL); + if (!clnt) + return ERR_PTR(-ENOMEM); + + clnt->trans_mod = NULL; + clnt->trans = NULL; + spin_lock_init(&clnt->lock); + INIT_LIST_HEAD(&clnt->fidlist); + + err = p9_tag_init(clnt); + if (err < 0) + goto free_client; + + err = parse_opts(options, clnt); + if (err < 0) + goto destroy_tagpool; + + if (!clnt->trans_mod) + clnt->trans_mod = v9fs_get_default_trans(); + + if (clnt->trans_mod == NULL) { + err = -EPROTONOSUPPORT; + p9_debug(P9_DEBUG_ERROR, + "No transport defined or default transport\n"); + goto destroy_tagpool; + } + + clnt->fidpool = p9_idpool_create(); + if (IS_ERR(clnt->fidpool)) { + err = PTR_ERR(clnt->fidpool); + goto put_trans; + } + + p9_debug(P9_DEBUG_MUX, "clnt %p trans %p msize %d protocol %d\n", + clnt, clnt->trans_mod, clnt->msize, clnt->proto_version); + + err = clnt->trans_mod->create(clnt, dev_name, options); + if (err) + goto destroy_fidpool; + + if (clnt->msize > clnt->trans_mod->maxsize) + clnt->msize = clnt->trans_mod->maxsize; + + err = p9_client_version(clnt); + if (err) + goto close_trans; + + return clnt; + +close_trans: + clnt->trans_mod->close(clnt); +destroy_fidpool: + p9_idpool_destroy(clnt->fidpool); +put_trans: + v9fs_put_trans(clnt->trans_mod); +destroy_tagpool: + p9_idpool_destroy(clnt->tagpool); +free_client: + kfree(clnt); + return ERR_PTR(err); +} +EXPORT_SYMBOL(p9_client_create); + +void p9_client_destroy(struct p9_client *clnt) +{ + struct p9_fid *fid, *fidptr; + + p9_debug(P9_DEBUG_MUX, "clnt %p\n", clnt); + + if (clnt->trans_mod) + clnt->trans_mod->close(clnt); + + v9fs_put_trans(clnt->trans_mod); + + list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) { + pr_info("Found fid %d not clunked\n", fid->fid); + p9_fid_destroy(fid); + } + + if (clnt->fidpool) + p9_idpool_destroy(clnt->fidpool); + + p9_tag_cleanup(clnt); + + kfree(clnt); +} +EXPORT_SYMBOL(p9_client_destroy); + +void p9_client_disconnect(struct p9_client *clnt) +{ + p9_debug(P9_DEBUG_9P, "clnt %p\n", clnt); + clnt->status = Disconnected; +} +EXPORT_SYMBOL(p9_client_disconnect); + +void p9_client_begin_disconnect(struct p9_client *clnt) +{ + p9_debug(P9_DEBUG_9P, "clnt %p\n", clnt); + clnt->status = BeginDisconnect; +} +EXPORT_SYMBOL(p9_client_begin_disconnect); + +struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, + char *uname, kuid_t n_uname, char *aname) +{ + int err = 0; + struct p9_req_t *req; + struct p9_fid *fid; + struct p9_qid qid; + + + p9_debug(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n", + afid ? afid->fid : -1, uname, aname); + fid = p9_fid_create(clnt); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + fid = NULL; + goto error; + } + + req = p9_client_rpc(clnt, P9_TATTACH, "ddss?u", fid->fid, + afid ? afid->fid : P9_NOFID, uname, aname, n_uname); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RATTACH qid %x.%llx.%x\n", + qid.type, (unsigned long long)qid.path, qid.version); + + memmove(&fid->qid, &qid, sizeof(struct p9_qid)); + + p9_free_req(clnt, req); + return fid; + +error: + if (fid) + p9_fid_destroy(fid); + return ERR_PTR(err); +} +EXPORT_SYMBOL(p9_client_attach); + +struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, + char **wnames, int clone) +{ + int err; + struct p9_client *clnt; + struct p9_fid *fid; + struct p9_qid *wqids; + struct p9_req_t *req; + uint16_t nwqids, count; + + err = 0; + wqids = NULL; + clnt = oldfid->clnt; + if (clone) { + fid = p9_fid_create(clnt); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + fid = NULL; + goto error; + } + + fid->uid = oldfid->uid; + } else + fid = oldfid; + + + p9_debug(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n", + oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL); + + req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid, + nwname, wnames); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + goto clunk_fid; + } + p9_free_req(clnt, req); + + p9_debug(P9_DEBUG_9P, "<<< RWALK nwqid %d:\n", nwqids); + + if (nwqids != nwname) { + err = -ENOENT; + goto clunk_fid; + } + + for (count = 0; count < nwqids; count++) + p9_debug(P9_DEBUG_9P, "<<< [%d] %x.%llx.%x\n", + count, wqids[count].type, + (unsigned long long)wqids[count].path, + wqids[count].version); + + if (nwname) + memmove(&fid->qid, &wqids[nwqids - 1], sizeof(struct p9_qid)); + else + fid->qid = oldfid->qid; + + kfree(wqids); + return fid; + +clunk_fid: + kfree(wqids); + p9_client_clunk(fid); + fid = NULL; + +error: + if (fid && (fid != oldfid)) + p9_fid_destroy(fid); + + return ERR_PTR(err); +} +EXPORT_SYMBOL(p9_client_walk); + +int p9_client_open(struct p9_fid *fid, int mode) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + struct p9_qid qid; + int iounit; + + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> %s fid %d mode %d\n", + p9_is_proto_dotl(clnt) ? "TLOPEN" : "TOPEN", fid->fid, mode); + err = 0; + + if (fid->mode != -1) + return -EINVAL; + + if (p9_is_proto_dotl(clnt)) + req = p9_client_rpc(clnt, P9_TLOPEN, "dd", fid->fid, mode); + else + req = p9_client_rpc(clnt, P9_TOPEN, "db", fid->fid, mode); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } + + p9_debug(P9_DEBUG_9P, "<<< %s qid %x.%llx.%x iounit %x\n", + p9_is_proto_dotl(clnt) ? "RLOPEN" : "ROPEN", qid.type, + (unsigned long long)qid.path, qid.version, iounit); + + fid->mode = mode; + fid->iounit = iounit; + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_open); + +int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, + kgid_t gid, struct p9_qid *qid) +{ + int err = 0; + struct p9_client *clnt; + struct p9_req_t *req; + int iounit; + + p9_debug(P9_DEBUG_9P, + ">>> TLCREATE fid %d name %s flags %d mode %d gid %d\n", + ofid->fid, name, flags, mode, + from_kgid(&init_user_ns, gid)); + clnt = ofid->clnt; + + if (ofid->mode != -1) + return -EINVAL; + + req = p9_client_rpc(clnt, P9_TLCREATE, "dsddg", ofid->fid, name, flags, + mode, gid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } + + p9_debug(P9_DEBUG_9P, "<<< RLCREATE qid %x.%llx.%x iounit %x\n", + qid->type, + (unsigned long long)qid->path, + qid->version, iounit); + + ofid->mode = mode; + ofid->iounit = iounit; + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_create_dotl); + +int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, + char *extension) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + struct p9_qid qid; + int iounit; + + p9_debug(P9_DEBUG_9P, ">>> TCREATE fid %d name %s perm %d mode %d\n", + fid->fid, name, perm, mode); + err = 0; + clnt = fid->clnt; + + if (fid->mode != -1) + return -EINVAL; + + req = p9_client_rpc(clnt, P9_TCREATE, "dsdb?s", fid->fid, name, perm, + mode, extension); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } + + p9_debug(P9_DEBUG_9P, "<<< RCREATE qid %x.%llx.%x iounit %x\n", + qid.type, + (unsigned long long)qid.path, + qid.version, iounit); + + fid->mode = mode; + fid->iounit = iounit; + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_fcreate); + +int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, kgid_t gid, + struct p9_qid *qid) +{ + int err = 0; + struct p9_client *clnt; + struct p9_req_t *req; + + p9_debug(P9_DEBUG_9P, ">>> TSYMLINK dfid %d name %s symtgt %s\n", + dfid->fid, name, symtgt); + clnt = dfid->clnt; + + req = p9_client_rpc(clnt, P9_TSYMLINK, "dssg", dfid->fid, name, symtgt, + gid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } + + p9_debug(P9_DEBUG_9P, "<<< RSYMLINK qid %x.%llx.%x\n", + qid->type, (unsigned long long)qid->path, qid->version); + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_symlink); + +int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, char *newname) +{ + struct p9_client *clnt; + struct p9_req_t *req; + + p9_debug(P9_DEBUG_9P, ">>> TLINK dfid %d oldfid %d newname %s\n", + dfid->fid, oldfid->fid, newname); + clnt = dfid->clnt; + req = p9_client_rpc(clnt, P9_TLINK, "dds", dfid->fid, oldfid->fid, + newname); + if (IS_ERR(req)) + return PTR_ERR(req); + + p9_debug(P9_DEBUG_9P, "<<< RLINK\n"); + p9_free_req(clnt, req); + return 0; +} +EXPORT_SYMBOL(p9_client_link); + +int p9_client_fsync(struct p9_fid *fid, int datasync) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + p9_debug(P9_DEBUG_9P, ">>> TFSYNC fid %d datasync:%d\n", + fid->fid, datasync); + err = 0; + clnt = fid->clnt; + + req = p9_client_rpc(clnt, P9_TFSYNC, "dd", fid->fid, datasync); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RFSYNC fid %d\n", fid->fid); + + p9_free_req(clnt, req); + +error: + return err; +} +EXPORT_SYMBOL(p9_client_fsync); + +int p9_client_clunk(struct p9_fid *fid) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + int retries = 0; + + if (!fid) { + pr_warn("%s (%d): Trying to clunk with NULL fid\n", + __func__, task_pid_nr(current)); + dump_stack(); + return 0; + } + +again: + p9_debug(P9_DEBUG_9P, ">>> TCLUNK fid %d (try %d)\n", fid->fid, + retries); + err = 0; + clnt = fid->clnt; + + req = p9_client_rpc(clnt, P9_TCLUNK, "d", fid->fid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid); + + p9_free_req(clnt, req); +error: + /* + * Fid is not valid even after a failed clunk + * If interrupted, retry once then give up and + * leak fid until umount. + */ + if (err == -ERESTARTSYS) { + if (retries++ == 0) + goto again; + } else + p9_fid_destroy(fid); + return err; +} +EXPORT_SYMBOL(p9_client_clunk); + +int p9_client_remove(struct p9_fid *fid) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + p9_debug(P9_DEBUG_9P, ">>> TREMOVE fid %d\n", fid->fid); + err = 0; + clnt = fid->clnt; + + req = p9_client_rpc(clnt, P9_TREMOVE, "d", fid->fid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RREMOVE fid %d\n", fid->fid); + + p9_free_req(clnt, req); +error: + if (err == -ERESTARTSYS) + p9_client_clunk(fid); + else + p9_fid_destroy(fid); + return err; +} +EXPORT_SYMBOL(p9_client_remove); + +int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags) +{ + int err = 0; + struct p9_req_t *req; + struct p9_client *clnt; + + p9_debug(P9_DEBUG_9P, ">>> TUNLINKAT fid %d %s %d\n", + dfid->fid, name, flags); + + clnt = dfid->clnt; + req = p9_client_rpc(clnt, P9_TUNLINKAT, "dsd", dfid->fid, name, flags); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RUNLINKAT fid %d %s\n", dfid->fid, name); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_unlinkat); + +int +p9_client_read(struct p9_fid *fid, char *data, char __user *udata, u64 offset, + u32 count) +{ + char *dataptr; + int kernel_buf = 0; + struct p9_req_t *req; + struct p9_client *clnt; + int err, rsize, non_zc = 0; + + + p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", + fid->fid, (unsigned long long) offset, count); + err = 0; + clnt = fid->clnt; + + rsize = fid->iounit; + if (!rsize || rsize > clnt->msize-P9_IOHDRSZ) + rsize = clnt->msize - P9_IOHDRSZ; + + if (count < rsize) + rsize = count; + + /* Don't bother zerocopy for small IO (< 1024) */ + if (clnt->trans_mod->zc_request && rsize > 1024) { + char *indata; + if (data) { + kernel_buf = 1; + indata = data; + } else + indata = (__force char *)udata; + /* + * response header len is 11 + * PDU Header(7) + IO Size (4) + */ + req = p9_client_zc_rpc(clnt, P9_TREAD, indata, NULL, rsize, 0, + 11, kernel_buf, "dqd", fid->fid, + offset, rsize); + } else { + non_zc = 1; + req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset, + rsize); + } + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } + + p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count); + + if (non_zc) { + if (data) { + memmove(data, dataptr, count); + } else { + err = copy_to_user(udata, dataptr, count); + if (err) { + err = -EFAULT; + goto free_and_error; + } + } + } + p9_free_req(clnt, req); + return count; + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_read); + +int +p9_client_write(struct p9_fid *fid, char *data, const char __user *udata, + u64 offset, u32 count) +{ + int err, rsize; + int kernel_buf = 0; + struct p9_client *clnt; + struct p9_req_t *req; + + p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %d\n", + fid->fid, (unsigned long long) offset, count); + err = 0; + clnt = fid->clnt; + + rsize = fid->iounit; + if (!rsize || rsize > clnt->msize-P9_IOHDRSZ) + rsize = clnt->msize - P9_IOHDRSZ; + + if (count < rsize) + rsize = count; + + /* Don't bother zerocopy for small IO (< 1024) */ + if (clnt->trans_mod->zc_request && rsize > 1024) { + char *odata; + if (data) { + kernel_buf = 1; + odata = data; + } else + odata = (char *)udata; + req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, odata, 0, rsize, + P9_ZC_HDR_SZ, kernel_buf, "dqd", + fid->fid, offset, rsize); + } else { + if (data) + req = p9_client_rpc(clnt, P9_TWRITE, "dqD", fid->fid, + offset, rsize, data); + else + req = p9_client_rpc(clnt, P9_TWRITE, "dqU", fid->fid, + offset, rsize, udata); + } + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } + + p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count); + + p9_free_req(clnt, req); + return count; + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_write); + +struct p9_wstat *p9_client_stat(struct p9_fid *fid) +{ + int err; + struct p9_client *clnt; + struct p9_wstat *ret = kmalloc(sizeof(struct p9_wstat), GFP_KERNEL); + struct p9_req_t *req; + u16 ignored; + + p9_debug(P9_DEBUG_9P, ">>> TSTAT fid %d\n", fid->fid); + + if (!ret) + return ERR_PTR(-ENOMEM); + + err = 0; + clnt = fid->clnt; + + req = p9_client_rpc(clnt, P9_TSTAT, "d", fid->fid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + goto error; + } + + p9_debug(P9_DEBUG_9P, + "<<< RSTAT sz=%x type=%x dev=%x qid=%x.%llx.%x\n" + "<<< mode=%8.8x atime=%8.8x mtime=%8.8x length=%llx\n" + "<<< name=%s uid=%s gid=%s muid=%s extension=(%s)\n" + "<<< uid=%d gid=%d n_muid=%d\n", + ret->size, ret->type, ret->dev, ret->qid.type, + (unsigned long long)ret->qid.path, ret->qid.version, ret->mode, + ret->atime, ret->mtime, (unsigned long long)ret->length, + ret->name, ret->uid, ret->gid, ret->muid, ret->extension, + from_kuid(&init_user_ns, ret->n_uid), + from_kgid(&init_user_ns, ret->n_gid), + from_kuid(&init_user_ns, ret->n_muid)); + + p9_free_req(clnt, req); + return ret; + +error: + kfree(ret); + return ERR_PTR(err); +} +EXPORT_SYMBOL(p9_client_stat); + +struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, + u64 request_mask) +{ + int err; + struct p9_client *clnt; + struct p9_stat_dotl *ret = kmalloc(sizeof(struct p9_stat_dotl), + GFP_KERNEL); + struct p9_req_t *req; + + p9_debug(P9_DEBUG_9P, ">>> TGETATTR fid %d, request_mask %lld\n", + fid->fid, request_mask); + + if (!ret) + return ERR_PTR(-ENOMEM); + + err = 0; + clnt = fid->clnt; + + req = p9_client_rpc(clnt, P9_TGETATTR, "dq", fid->fid, request_mask); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + goto error; + } + + p9_debug(P9_DEBUG_9P, + "<<< RGETATTR st_result_mask=%lld\n" + "<<< qid=%x.%llx.%x\n" + "<<< st_mode=%8.8x st_nlink=%llu\n" + "<<< st_uid=%d st_gid=%d\n" + "<<< st_rdev=%llx st_size=%llx st_blksize=%llu st_blocks=%llu\n" + "<<< st_atime_sec=%lld st_atime_nsec=%lld\n" + "<<< st_mtime_sec=%lld st_mtime_nsec=%lld\n" + "<<< st_ctime_sec=%lld st_ctime_nsec=%lld\n" + "<<< st_btime_sec=%lld st_btime_nsec=%lld\n" + "<<< st_gen=%lld st_data_version=%lld", + ret->st_result_mask, ret->qid.type, ret->qid.path, + ret->qid.version, ret->st_mode, ret->st_nlink, + from_kuid(&init_user_ns, ret->st_uid), + from_kgid(&init_user_ns, ret->st_gid), + ret->st_rdev, ret->st_size, ret->st_blksize, + ret->st_blocks, ret->st_atime_sec, ret->st_atime_nsec, + ret->st_mtime_sec, ret->st_mtime_nsec, ret->st_ctime_sec, + ret->st_ctime_nsec, ret->st_btime_sec, ret->st_btime_nsec, + ret->st_gen, ret->st_data_version); + + p9_free_req(clnt, req); + return ret; + +error: + kfree(ret); + return ERR_PTR(err); +} +EXPORT_SYMBOL(p9_client_getattr_dotl); + +static int p9_client_statsize(struct p9_wstat *wst, int proto_version) +{ + int ret; + + /* NOTE: size shouldn't include its own length */ + /* size[2] type[2] dev[4] qid[13] */ + /* mode[4] atime[4] mtime[4] length[8]*/ + /* name[s] uid[s] gid[s] muid[s] */ + ret = 2+4+13+4+4+4+8+2+2+2+2; + + if (wst->name) + ret += strlen(wst->name); + if (wst->uid) + ret += strlen(wst->uid); + if (wst->gid) + ret += strlen(wst->gid); + if (wst->muid) + ret += strlen(wst->muid); + + if ((proto_version == p9_proto_2000u) || + (proto_version == p9_proto_2000L)) { + ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ + if (wst->extension) + ret += strlen(wst->extension); + } + + return ret; +} + +int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = fid->clnt; + wst->size = p9_client_statsize(wst, clnt->proto_version); + p9_debug(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid); + p9_debug(P9_DEBUG_9P, + " sz=%x type=%x dev=%x qid=%x.%llx.%x\n" + " mode=%8.8x atime=%8.8x mtime=%8.8x length=%llx\n" + " name=%s uid=%s gid=%s muid=%s extension=(%s)\n" + " uid=%d gid=%d n_muid=%d\n", + wst->size, wst->type, wst->dev, wst->qid.type, + (unsigned long long)wst->qid.path, wst->qid.version, wst->mode, + wst->atime, wst->mtime, (unsigned long long)wst->length, + wst->name, wst->uid, wst->gid, wst->muid, wst->extension, + from_kuid(&init_user_ns, wst->n_uid), + from_kgid(&init_user_ns, wst->n_gid), + from_kuid(&init_user_ns, wst->n_muid)); + + req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RWSTAT fid %d\n", fid->fid); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_wstat); + +int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> TSETATTR fid %d\n", fid->fid); + p9_debug(P9_DEBUG_9P, + " valid=%x mode=%x uid=%d gid=%d size=%lld\n" + " atime_sec=%lld atime_nsec=%lld\n" + " mtime_sec=%lld mtime_nsec=%lld\n", + p9attr->valid, p9attr->mode, + from_kuid(&init_user_ns, p9attr->uid), + from_kgid(&init_user_ns, p9attr->gid), + p9attr->size, p9attr->atime_sec, p9attr->atime_nsec, + p9attr->mtime_sec, p9attr->mtime_nsec); + + req = p9_client_rpc(clnt, P9_TSETATTR, "dI", fid->fid, p9attr); + + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RSETATTR fid %d\n", fid->fid); + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_setattr); + +int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = fid->clnt; + + p9_debug(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid); + + req = p9_client_rpc(clnt, P9_TSTATFS, "d", fid->fid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "ddqqqqqqd", &sb->type, + &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail, + &sb->files, &sb->ffree, &sb->fsid, &sb->namelen); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RSTATFS fid %d type 0x%lx bsize %ld " + "blocks %llu bfree %llu bavail %llu files %llu ffree %llu " + "fsid %llu namelen %ld\n", + fid->fid, (long unsigned int)sb->type, (long int)sb->bsize, + sb->blocks, sb->bfree, sb->bavail, sb->files, sb->ffree, + sb->fsid, (long int)sb->namelen); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_statfs); + +int p9_client_rename(struct p9_fid *fid, + struct p9_fid *newdirfid, const char *name) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = fid->clnt; + + p9_debug(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n", + fid->fid, newdirfid->fid, name); + + req = p9_client_rpc(clnt, P9_TRENAME, "dds", fid->fid, + newdirfid->fid, name); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_rename); + +int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name, + struct p9_fid *newdirfid, const char *new_name) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = olddirfid->clnt; + + p9_debug(P9_DEBUG_9P, ">>> TRENAMEAT olddirfid %d old name %s" + " newdirfid %d new name %s\n", olddirfid->fid, old_name, + newdirfid->fid, new_name); + + req = p9_client_rpc(clnt, P9_TRENAMEAT, "dsds", olddirfid->fid, + old_name, newdirfid->fid, new_name); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RRENAMEAT newdirfid %d new name %s\n", + newdirfid->fid, new_name); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_renameat); + +/* + * An xattrwalk without @attr_name gives the fid for the lisxattr namespace + */ +struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid, + const char *attr_name, u64 *attr_size) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + struct p9_fid *attr_fid; + + err = 0; + clnt = file_fid->clnt; + attr_fid = p9_fid_create(clnt); + if (IS_ERR(attr_fid)) { + err = PTR_ERR(attr_fid); + attr_fid = NULL; + goto error; + } + p9_debug(P9_DEBUG_9P, + ">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n", + file_fid->fid, attr_fid->fid, attr_name); + + req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds", + file_fid->fid, attr_fid->fid, attr_name); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + goto clunk_fid; + } + p9_free_req(clnt, req); + p9_debug(P9_DEBUG_9P, "<<< RXATTRWALK fid %d size %llu\n", + attr_fid->fid, *attr_size); + return attr_fid; +clunk_fid: + p9_client_clunk(attr_fid); + attr_fid = NULL; +error: + if (attr_fid && (attr_fid != file_fid)) + p9_fid_destroy(attr_fid); + + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(p9_client_xattrwalk); + +int p9_client_xattrcreate(struct p9_fid *fid, const char *name, + u64 attr_size, int flags) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + p9_debug(P9_DEBUG_9P, + ">>> TXATTRCREATE fid %d name %s size %lld flag %d\n", + fid->fid, name, (long long)attr_size, flags); + err = 0; + clnt = fid->clnt; + req = p9_client_rpc(clnt, P9_TXATTRCREATE, "dsqd", + fid->fid, name, attr_size, flags); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RXATTRCREATE fid %d\n", fid->fid); + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL_GPL(p9_client_xattrcreate); + +int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) +{ + int err, rsize, non_zc = 0; + struct p9_client *clnt; + struct p9_req_t *req; + char *dataptr; + + p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n", + fid->fid, (unsigned long long) offset, count); + + err = 0; + clnt = fid->clnt; + + rsize = fid->iounit; + if (!rsize || rsize > clnt->msize-P9_READDIRHDRSZ) + rsize = clnt->msize - P9_READDIRHDRSZ; + + if (count < rsize) + rsize = count; + + /* Don't bother zerocopy for small IO (< 1024) */ + if (clnt->trans_mod->zc_request && rsize > 1024) { + /* + * response header len is 11 + * PDU Header(7) + IO Size (4) + */ + req = p9_client_zc_rpc(clnt, P9_TREADDIR, data, NULL, rsize, 0, + 11, 1, "dqd", fid->fid, offset, rsize); + } else { + non_zc = 1; + req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid, + offset, rsize); + } + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } + if (rsize < count) { + pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize); + count = rsize; + } + + p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count); + + if (non_zc) + memmove(data, dataptr, count); + + p9_free_req(clnt, req); + return count; + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_readdir); + +int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode, + dev_t rdev, kgid_t gid, struct p9_qid *qid) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + err = 0; + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> TMKNOD fid %d name %s mode %d major %d " + "minor %d\n", fid->fid, name, mode, MAJOR(rdev), MINOR(rdev)); + req = p9_client_rpc(clnt, P9_TMKNOD, "dsdddg", fid->fid, name, mode, + MAJOR(rdev), MINOR(rdev), gid); + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type, + (unsigned long long)qid->path, qid->version); + +error: + p9_free_req(clnt, req); + return err; + +} +EXPORT_SYMBOL(p9_client_mknod_dotl); + +int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode, + kgid_t gid, struct p9_qid *qid) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + err = 0; + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n", + fid->fid, name, mode, from_kgid(&init_user_ns, gid)); + req = p9_client_rpc(clnt, P9_TMKDIR, "dsdg", fid->fid, name, mode, + gid); + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type, + (unsigned long long)qid->path, qid->version); + +error: + p9_free_req(clnt, req); + return err; + +} +EXPORT_SYMBOL(p9_client_mkdir_dotl); + +int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + err = 0; + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> TLOCK fid %d type %i flags %d " + "start %lld length %lld proc_id %d client_id %s\n", + fid->fid, flock->type, flock->flags, flock->start, + flock->length, flock->proc_id, flock->client_id); + + req = p9_client_rpc(clnt, P9_TLOCK, "dbdqqds", fid->fid, flock->type, + flock->flags, flock->start, flock->length, + flock->proc_id, flock->client_id); + + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, clnt->proto_version, "b", status); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status); +error: + p9_free_req(clnt, req); + return err; + +} +EXPORT_SYMBOL(p9_client_lock_dotl); + +int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + err = 0; + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> TGETLOCK fid %d, type %i start %lld " + "length %lld proc_id %d client_id %s\n", fid->fid, glock->type, + glock->start, glock->length, glock->proc_id, glock->client_id); + + req = p9_client_rpc(clnt, P9_TGETLOCK, "dbqqds", fid->fid, glock->type, + glock->start, glock->length, glock->proc_id, glock->client_id); + + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, clnt->proto_version, "bqqds", &glock->type, + &glock->start, &glock->length, &glock->proc_id, + &glock->client_id); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld " + "proc_id %d client_id %s\n", glock->type, glock->start, + glock->length, glock->proc_id, glock->client_id); +error: + p9_free_req(clnt, req); + return err; +} +EXPORT_SYMBOL(p9_client_getlock_dotl); + +int p9_client_readlink(struct p9_fid *fid, char **target) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + err = 0; + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> TREADLINK fid %d\n", fid->fid); + + req = p9_client_rpc(clnt, P9_TREADLINK, "d", fid->fid); + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, clnt->proto_version, "s", target); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target); +error: + p9_free_req(clnt, req); + return err; +} +EXPORT_SYMBOL(p9_client_readlink); diff --git a/addons/9p/src/3.10.108/error.c b/addons/9p/src/3.10.108/error.c new file mode 100644 index 00000000..126fd0dc --- /dev/null +++ b/addons/9p/src/3.10.108/error.c @@ -0,0 +1,247 @@ +/* + * linux/fs/9p/error.c + * + * Error string handling + * + * Plan 9 uses error strings, Unix uses error numbers. These functions + * try to help manage that and provide for dynamically adding error + * mappings. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +/** + * struct errormap - map string errors from Plan 9 to Linux numeric ids + * @name: string sent over 9P + * @val: numeric id most closely representing @name + * @namelen: length of string + * @list: hash-table list for string lookup + */ +struct errormap { + char *name; + int val; + + int namelen; + struct hlist_node list; +}; + +#define ERRHASHSZ 32 +static struct hlist_head hash_errmap[ERRHASHSZ]; + +/* FixMe - reduce to a reasonable size */ +static struct errormap errmap[] = { + {"Operation not permitted", EPERM}, + {"wstat prohibited", EPERM}, + {"No such file or directory", ENOENT}, + {"directory entry not found", ENOENT}, + {"file not found", ENOENT}, + {"Interrupted system call", EINTR}, + {"Input/output error", EIO}, + {"No such device or address", ENXIO}, + {"Argument list too long", E2BIG}, + {"Bad file descriptor", EBADF}, + {"Resource temporarily unavailable", EAGAIN}, + {"Cannot allocate memory", ENOMEM}, + {"Permission denied", EACCES}, + {"Bad address", EFAULT}, + {"Block device required", ENOTBLK}, + {"Device or resource busy", EBUSY}, + {"File exists", EEXIST}, + {"Invalid cross-device link", EXDEV}, + {"No such device", ENODEV}, + {"Not a directory", ENOTDIR}, + {"Is a directory", EISDIR}, + {"Invalid argument", EINVAL}, + {"Too many open files in system", ENFILE}, + {"Too many open files", EMFILE}, + {"Text file busy", ETXTBSY}, + {"File too large", EFBIG}, + {"No space left on device", ENOSPC}, + {"Illegal seek", ESPIPE}, + {"Read-only file system", EROFS}, + {"Too many links", EMLINK}, + {"Broken pipe", EPIPE}, + {"Numerical argument out of domain", EDOM}, + {"Numerical result out of range", ERANGE}, + {"Resource deadlock avoided", EDEADLK}, + {"File name too long", ENAMETOOLONG}, + {"No locks available", ENOLCK}, + {"Function not implemented", ENOSYS}, + {"Directory not empty", ENOTEMPTY}, + {"Too many levels of symbolic links", ELOOP}, + {"No message of desired type", ENOMSG}, + {"Identifier removed", EIDRM}, + {"No data available", ENODATA}, + {"Machine is not on the network", ENONET}, + {"Package not installed", ENOPKG}, + {"Object is remote", EREMOTE}, + {"Link has been severed", ENOLINK}, + {"Communication error on send", ECOMM}, + {"Protocol error", EPROTO}, + {"Bad message", EBADMSG}, + {"File descriptor in bad state", EBADFD}, + {"Streams pipe error", ESTRPIPE}, + {"Too many users", EUSERS}, + {"Socket operation on non-socket", ENOTSOCK}, + {"Message too long", EMSGSIZE}, + {"Protocol not available", ENOPROTOOPT}, + {"Protocol not supported", EPROTONOSUPPORT}, + {"Socket type not supported", ESOCKTNOSUPPORT}, + {"Operation not supported", EOPNOTSUPP}, + {"Protocol family not supported", EPFNOSUPPORT}, + {"Network is down", ENETDOWN}, + {"Network is unreachable", ENETUNREACH}, + {"Network dropped connection on reset", ENETRESET}, + {"Software caused connection abort", ECONNABORTED}, + {"Connection reset by peer", ECONNRESET}, + {"No buffer space available", ENOBUFS}, + {"Transport endpoint is already connected", EISCONN}, + {"Transport endpoint is not connected", ENOTCONN}, + {"Cannot send after transport endpoint shutdown", ESHUTDOWN}, + {"Connection timed out", ETIMEDOUT}, + {"Connection refused", ECONNREFUSED}, + {"Host is down", EHOSTDOWN}, + {"No route to host", EHOSTUNREACH}, + {"Operation already in progress", EALREADY}, + {"Operation now in progress", EINPROGRESS}, + {"Is a named type file", EISNAM}, + {"Remote I/O error", EREMOTEIO}, + {"Disk quota exceeded", EDQUOT}, +/* errors from fossil, vacfs, and u9fs */ + {"fid unknown or out of range", EBADF}, + {"permission denied", EACCES}, + {"file does not exist", ENOENT}, + {"authentication failed", ECONNREFUSED}, + {"bad offset in directory read", ESPIPE}, + {"bad use of fid", EBADF}, + {"wstat can't convert between files and directories", EPERM}, + {"directory is not empty", ENOTEMPTY}, + {"file exists", EEXIST}, + {"file already exists", EEXIST}, + {"file or directory already exists", EEXIST}, + {"fid already in use", EBADF}, + {"file in use", ETXTBSY}, + {"i/o error", EIO}, + {"file already open for I/O", ETXTBSY}, + {"illegal mode", EINVAL}, + {"illegal name", ENAMETOOLONG}, + {"not a directory", ENOTDIR}, + {"not a member of proposed group", EPERM}, + {"not owner", EACCES}, + {"only owner can change group in wstat", EACCES}, + {"read only file system", EROFS}, + {"no access to special file", EPERM}, + {"i/o count too large", EIO}, + {"unknown group", EINVAL}, + {"unknown user", EINVAL}, + {"bogus wstat buffer", EPROTO}, + {"exclusive use file already open", EAGAIN}, + {"corrupted directory entry", EIO}, + {"corrupted file entry", EIO}, + {"corrupted block label", EIO}, + {"corrupted meta data", EIO}, + {"illegal offset", EINVAL}, + {"illegal path element", ENOENT}, + {"root of file system is corrupted", EIO}, + {"corrupted super block", EIO}, + {"protocol botch", EPROTO}, + {"file system is full", ENOSPC}, + {"file is in use", EAGAIN}, + {"directory entry is not allocated", ENOENT}, + {"file is read only", EROFS}, + {"file has been removed", EIDRM}, + {"only support truncation to zero length", EPERM}, + {"cannot remove root", EPERM}, + {"file too big", EFBIG}, + {"venti i/o error", EIO}, + /* these are not errors */ + {"u9fs rhostsauth: no authentication required", 0}, + {"u9fs authnone: no authentication required", 0}, + {NULL, -1} +}; + +/** + * p9_error_init - preload mappings into hash list + * + */ + +int p9_error_init(void) +{ + struct errormap *c; + int bucket; + + /* initialize hash table */ + for (bucket = 0; bucket < ERRHASHSZ; bucket++) + INIT_HLIST_HEAD(&hash_errmap[bucket]); + + /* load initial error map into hash table */ + for (c = errmap; c->name != NULL; c++) { + c->namelen = strlen(c->name); + bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; + INIT_HLIST_NODE(&c->list); + hlist_add_head(&c->list, &hash_errmap[bucket]); + } + + return 1; +} +EXPORT_SYMBOL(p9_error_init); + +/** + * errstr2errno - convert error string to error number + * @errstr: error string + * @len: length of error string + * + */ + +int p9_errstr2errno(char *errstr, int len) +{ + int errno; + struct errormap *c; + int bucket; + + errno = 0; + c = NULL; + bucket = jhash(errstr, len, 0) % ERRHASHSZ; + hlist_for_each_entry(c, &hash_errmap[bucket], list) { + if (c->namelen == len && !memcmp(c->name, errstr, len)) { + errno = c->val; + break; + } + } + + if (errno == 0) { + /* TODO: if error isn't found, add it dynamically */ + errstr[len] = 0; + pr_err("%s: server reported unknown error %s\n", + __func__, errstr); + errno = ESERVERFAULT; + } + + return -errno; +} +EXPORT_SYMBOL(p9_errstr2errno); diff --git a/addons/9p/src/3.10.108/fid.c b/addons/9p/src/3.10.108/fid.c new file mode 100644 index 00000000..d51ec9fa --- /dev/null +++ b/addons/9p/src/3.10.108/fid.c @@ -0,0 +1,306 @@ +/* + * V9FS FID Management + * + * Copyright (C) 2007 by Latchesar Ionkov + * Copyright (C) 2005, 2006 by Eric Van Hensbergen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" + +/** + * v9fs_fid_add - add a fid to a dentry + * @dentry: dentry that the fid is being added to + * @fid: fid to add + * + */ + +static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid) +{ + hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata); +} + +void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid) +{ + spin_lock(&dentry->d_lock); + __add_fid(dentry, fid); + spin_unlock(&dentry->d_lock); +} + +/** + * v9fs_fid_find - retrieve a fid that belongs to the specified uid + * @dentry: dentry to look for fid in + * @uid: return fid that belongs to the specified user + * @any: if non-zero, return any fid associated with the dentry + * + */ + +static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any) +{ + struct p9_fid *fid, *ret; + + p9_debug(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n", + dentry->d_name.name, dentry, from_kuid(&init_user_ns, uid), + any); + ret = NULL; + /* we'll recheck under lock if there's anything to look in */ + if (dentry->d_fsdata) { + struct hlist_head *h = (struct hlist_head *)&dentry->d_fsdata; + spin_lock(&dentry->d_lock); + hlist_for_each_entry(fid, h, dlist) { + if (any || uid_eq(fid->uid, uid)) { + ret = fid; + break; + } + } + spin_unlock(&dentry->d_lock); + } + + return ret; +} + +/* + * We need to hold v9ses->rename_sem as long as we hold references + * to returned path array. Array element contain pointers to + * dentry names. + */ +static int build_path_from_dentry(struct v9fs_session_info *v9ses, + struct dentry *dentry, char ***names) +{ + int n = 0, i; + char **wnames; + struct dentry *ds; + + for (ds = dentry; !IS_ROOT(ds); ds = ds->d_parent) + n++; + + wnames = kmalloc(sizeof(char *) * n, GFP_KERNEL); + if (!wnames) + goto err_out; + + for (ds = dentry, i = (n-1); i >= 0; i--, ds = ds->d_parent) + wnames[i] = (char *)ds->d_name.name; + + *names = wnames; + return n; +err_out: + return -ENOMEM; +} + +static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, + kuid_t uid, int any) +{ + struct dentry *ds; + char **wnames, *uname; + int i, n, l, clone, access; + struct v9fs_session_info *v9ses; + struct p9_fid *fid, *old_fid = NULL; + + v9ses = v9fs_dentry2v9ses(dentry); + access = v9ses->flags & V9FS_ACCESS_MASK; + fid = v9fs_fid_find(dentry, uid, any); + if (fid) + return fid; + /* + * we don't have a matching fid. To do a TWALK we need + * parent fid. We need to prevent rename when we want to + * look at the parent. + */ + down_read(&v9ses->rename_sem); + ds = dentry->d_parent; + fid = v9fs_fid_find(ds, uid, any); + if (fid) { + /* Found the parent fid do a lookup with that */ + fid = p9_client_walk(fid, 1, (char **)&dentry->d_name.name, 1); + goto fid_out; + } + up_read(&v9ses->rename_sem); + + /* start from the root and try to do a lookup */ + fid = v9fs_fid_find(dentry->d_sb->s_root, uid, any); + if (!fid) { + /* the user is not attached to the fs yet */ + if (access == V9FS_ACCESS_SINGLE) + return ERR_PTR(-EPERM); + + if (v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) + uname = NULL; + else + uname = v9ses->uname; + + fid = p9_client_attach(v9ses->clnt, NULL, uname, uid, + v9ses->aname); + if (IS_ERR(fid)) + return fid; + + v9fs_fid_add(dentry->d_sb->s_root, fid); + } + /* If we are root ourself just return that */ + if (dentry->d_sb->s_root == dentry) + return fid; + /* + * Do a multipath walk with attached root. + * When walking parent we need to make sure we + * don't have a parallel rename happening + */ + down_read(&v9ses->rename_sem); + n = build_path_from_dentry(v9ses, dentry, &wnames); + if (n < 0) { + fid = ERR_PTR(n); + goto err_out; + } + clone = 1; + i = 0; + while (i < n) { + l = min(n - i, P9_MAXWELEM); + /* + * We need to hold rename lock when doing a multipath + * walk to ensure none of the patch component change + */ + fid = p9_client_walk(fid, l, &wnames[i], clone); + if (IS_ERR(fid)) { + if (old_fid) { + /* + * If we fail, clunk fid which are mapping + * to path component and not the last component + * of the path. + */ + p9_client_clunk(old_fid); + } + kfree(wnames); + goto err_out; + } + old_fid = fid; + i += l; + clone = 0; + } + kfree(wnames); +fid_out: + if (!IS_ERR(fid)) { + spin_lock(&dentry->d_lock); + if (d_unhashed(dentry)) { + spin_unlock(&dentry->d_lock); + p9_client_clunk(fid); + fid = ERR_PTR(-ENOENT); + } else { + __add_fid(dentry, fid); + spin_unlock(&dentry->d_lock); + } + } +err_out: + up_read(&v9ses->rename_sem); + return fid; +} + +/** + * v9fs_fid_lookup - lookup for a fid, try to walk if not found + * @dentry: dentry to look for fid in + * + * Look for a fid in the specified dentry for the current user. + * If no fid is found, try to create one walking from a fid from the parent + * dentry (if it has one), or the root dentry. If the user haven't accessed + * the fs yet, attach now and walk from the root. + */ + +struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) +{ + kuid_t uid; + int any, access; + struct v9fs_session_info *v9ses; + + v9ses = v9fs_dentry2v9ses(dentry); + access = v9ses->flags & V9FS_ACCESS_MASK; + switch (access) { + case V9FS_ACCESS_SINGLE: + case V9FS_ACCESS_USER: + case V9FS_ACCESS_CLIENT: + uid = current_fsuid(); + any = 0; + break; + + case V9FS_ACCESS_ANY: + uid = v9ses->uid; + any = 1; + break; + + default: + uid = INVALID_UID; + any = 0; + break; + } + return v9fs_fid_lookup_with_uid(dentry, uid, any); +} + +struct p9_fid *v9fs_fid_clone(struct dentry *dentry) +{ + struct p9_fid *fid, *ret; + + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return fid; + + ret = p9_client_walk(fid, 0, NULL, 1); + return ret; +} + +static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, kuid_t uid) +{ + struct p9_fid *fid, *ret; + + fid = v9fs_fid_lookup_with_uid(dentry, uid, 0); + if (IS_ERR(fid)) + return fid; + + ret = p9_client_walk(fid, 0, NULL, 1); + return ret; +} + +struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) +{ + int err; + struct p9_fid *fid; + + fid = v9fs_fid_clone_with_uid(dentry, GLOBAL_ROOT_UID); + if (IS_ERR(fid)) + goto error_out; + /* + * writeback fid will only be used to write back the + * dirty pages. We always request for the open fid in read-write + * mode so that a partial page write which result in page + * read can work. + */ + err = p9_client_open(fid, O_RDWR); + if (err < 0) { + p9_client_clunk(fid); + fid = ERR_PTR(err); + goto error_out; + } +error_out: + return fid; +} diff --git a/addons/9p/src/3.10.108/fid.h b/addons/9p/src/3.10.108/fid.h new file mode 100644 index 00000000..2b6787fc --- /dev/null +++ b/addons/9p/src/3.10.108/fid.h @@ -0,0 +1,30 @@ +/* + * V9FS FID Management + * + * Copyright (C) 2005 by Eric Van Hensbergen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ +#ifndef FS_9P_FID_H +#define FS_9P_FID_H +#include + +struct p9_fid *v9fs_fid_lookup(struct dentry *dentry); +struct p9_fid *v9fs_fid_clone(struct dentry *dentry); +void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid); +struct p9_fid *v9fs_writeback_fid(struct dentry *dentry); +#endif diff --git a/addons/9p/src/3.10.108/mod.c b/addons/9p/src/3.10.108/mod.c new file mode 100644 index 00000000..6ab36aea --- /dev/null +++ b/addons/9p/src/3.10.108/mod.c @@ -0,0 +1,201 @@ +/* + * net/9p/9p.c + * + * 9P entry point + * + * Copyright (C) 2007 by Latchesar Ionkov + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_NET_9P_DEBUG +unsigned int p9_debug_level = 0; /* feature-rific global debug level */ +EXPORT_SYMBOL(p9_debug_level); +module_param_named(debug, p9_debug_level, uint, 0); +MODULE_PARM_DESC(debug, "9P debugging level"); + +void _p9_debug(enum p9_debug_flags level, const char *func, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if ((p9_debug_level & level) != level) + return; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (level == P9_DEBUG_9P) + pr_notice("(%8.8d) %pV", task_pid_nr(current), &vaf); + else + pr_notice("-- %s (%d): %pV", func, task_pid_nr(current), &vaf); + + va_end(args); +} +EXPORT_SYMBOL(_p9_debug); +#endif + +/* + * Dynamic Transport Registration Routines + * + */ + +static DEFINE_SPINLOCK(v9fs_trans_lock); +static LIST_HEAD(v9fs_trans_list); + +/** + * v9fs_register_trans - register a new transport with 9p + * @m: structure describing the transport module and entry points + * + */ +void v9fs_register_trans(struct p9_trans_module *m) +{ + spin_lock(&v9fs_trans_lock); + list_add_tail(&m->list, &v9fs_trans_list); + spin_unlock(&v9fs_trans_lock); +} +EXPORT_SYMBOL(v9fs_register_trans); + +/** + * v9fs_unregister_trans - unregister a 9p transport + * @m: the transport to remove + * + */ +void v9fs_unregister_trans(struct p9_trans_module *m) +{ + spin_lock(&v9fs_trans_lock); + list_del_init(&m->list); + spin_unlock(&v9fs_trans_lock); +} +EXPORT_SYMBOL(v9fs_unregister_trans); + +/** + * v9fs_get_trans_by_name - get transport with the matching name + * @name: string identifying transport + * + */ +struct p9_trans_module *v9fs_get_trans_by_name(char *s) +{ + struct p9_trans_module *t, *found = NULL; + + spin_lock(&v9fs_trans_lock); + + list_for_each_entry(t, &v9fs_trans_list, list) + if (strcmp(t->name, s) == 0 && + try_module_get(t->owner)) { + found = t; + break; + } + + spin_unlock(&v9fs_trans_lock); + return found; +} +EXPORT_SYMBOL(v9fs_get_trans_by_name); + +/** + * v9fs_get_default_trans - get the default transport + * + */ + +struct p9_trans_module *v9fs_get_default_trans(void) +{ + struct p9_trans_module *t, *found = NULL; + + spin_lock(&v9fs_trans_lock); + + list_for_each_entry(t, &v9fs_trans_list, list) + if (t->def && try_module_get(t->owner)) { + found = t; + break; + } + + if (!found) + list_for_each_entry(t, &v9fs_trans_list, list) + if (try_module_get(t->owner)) { + found = t; + break; + } + + spin_unlock(&v9fs_trans_lock); + return found; +} +EXPORT_SYMBOL(v9fs_get_default_trans); + +/** + * v9fs_put_trans - put trans + * @m: transport to put + * + */ +void v9fs_put_trans(struct p9_trans_module *m) +{ + if (m) + module_put(m->owner); +} + +/** + * init_p9 - Initialize module + * + */ +static int __init init_p9(void) +{ + int ret = 0; + + p9_error_init(); + pr_info("Installing 9P2000 support\n"); + p9_trans_fd_init(); + + return ret; +} + +/** + * exit_p9 - shutdown module + * + */ + +static void __exit exit_p9(void) +{ + pr_info("Unloading 9P2000 support\n"); + + p9_trans_fd_exit(); +} + +module_init(init_p9) +module_exit(exit_p9) + +MODULE_AUTHOR("Latchesar Ionkov "); +MODULE_AUTHOR("Eric Van Hensbergen "); +MODULE_AUTHOR("Ron Minnich "); +MODULE_LICENSE("GPL"); diff --git a/addons/9p/src/3.10.108/protocol.c b/addons/9p/src/3.10.108/protocol.c new file mode 100644 index 00000000..ab9127ec --- /dev/null +++ b/addons/9p/src/3.10.108/protocol.c @@ -0,0 +1,636 @@ +/* + * net/9p/protocol.c + * + * 9P Protocol Support Code + * + * Copyright (C) 2008 by Eric Van Hensbergen + * + * Base on code from Anthony Liguori + * Copyright (C) 2008 by IBM, Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "protocol.h" + +#include + +static int +p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); + +void p9stat_free(struct p9_wstat *stbuf) +{ + kfree(stbuf->name); + kfree(stbuf->uid); + kfree(stbuf->gid); + kfree(stbuf->muid); + kfree(stbuf->extension); +} +EXPORT_SYMBOL(p9stat_free); + +size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size) +{ + size_t len = min(pdu->size - pdu->offset, size); + memcpy(data, &pdu->sdata[pdu->offset], len); + pdu->offset += len; + return size - len; +} + +static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size) +{ + size_t len = min(pdu->capacity - pdu->size, size); + memcpy(&pdu->sdata[pdu->size], data, len); + pdu->size += len; + return size - len; +} + +static size_t +pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size) +{ + size_t len = min(pdu->capacity - pdu->size, size); + if (copy_from_user(&pdu->sdata[pdu->size], udata, len)) + len = 0; + + pdu->size += len; + return size - len; +} + +/* + b - int8_t + w - int16_t + d - int32_t + q - int64_t + s - string + u - numeric uid + g - numeric gid + S - stat + Q - qid + D - data blob (int32_t size followed by void *, results are not freed) + T - array of strings (int16_t count, followed by strings) + R - array of qids (int16_t count, followed by qids) + A - stat for 9p2000.L (p9_stat_dotl) + ? - if optional = 1, continue parsing +*/ + +static int +p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, + va_list ap) +{ + const char *ptr; + int errcode = 0; + + for (ptr = fmt; *ptr; ptr++) { + switch (*ptr) { + case 'b':{ + int8_t *val = va_arg(ap, int8_t *); + if (pdu_read(pdu, val, sizeof(*val))) { + errcode = -EFAULT; + break; + } + } + break; + case 'w':{ + int16_t *val = va_arg(ap, int16_t *); + __le16 le_val; + if (pdu_read(pdu, &le_val, sizeof(le_val))) { + errcode = -EFAULT; + break; + } + *val = le16_to_cpu(le_val); + } + break; + case 'd':{ + int32_t *val = va_arg(ap, int32_t *); + __le32 le_val; + if (pdu_read(pdu, &le_val, sizeof(le_val))) { + errcode = -EFAULT; + break; + } + *val = le32_to_cpu(le_val); + } + break; + case 'q':{ + int64_t *val = va_arg(ap, int64_t *); + __le64 le_val; + if (pdu_read(pdu, &le_val, sizeof(le_val))) { + errcode = -EFAULT; + break; + } + *val = le64_to_cpu(le_val); + } + break; + case 's':{ + char **sptr = va_arg(ap, char **); + uint16_t len; + + errcode = p9pdu_readf(pdu, proto_version, + "w", &len); + if (errcode) + break; + + *sptr = kmalloc(len + 1, GFP_NOFS); + if (*sptr == NULL) { + errcode = -EFAULT; + break; + } + if (pdu_read(pdu, *sptr, len)) { + errcode = -EFAULT; + kfree(*sptr); + *sptr = NULL; + } else + (*sptr)[len] = 0; + } + break; + case 'u': { + kuid_t *uid = va_arg(ap, kuid_t *); + __le32 le_val; + if (pdu_read(pdu, &le_val, sizeof(le_val))) { + errcode = -EFAULT; + break; + } + *uid = make_kuid(&init_user_ns, + le32_to_cpu(le_val)); + } break; + case 'g': { + kgid_t *gid = va_arg(ap, kgid_t *); + __le32 le_val; + if (pdu_read(pdu, &le_val, sizeof(le_val))) { + errcode = -EFAULT; + break; + } + *gid = make_kgid(&init_user_ns, + le32_to_cpu(le_val)); + } break; + case 'Q':{ + struct p9_qid *qid = + va_arg(ap, struct p9_qid *); + + errcode = p9pdu_readf(pdu, proto_version, "bdq", + &qid->type, &qid->version, + &qid->path); + } + break; + case 'S':{ + struct p9_wstat *stbuf = + va_arg(ap, struct p9_wstat *); + + memset(stbuf, 0, sizeof(struct p9_wstat)); + stbuf->n_uid = stbuf->n_muid = INVALID_UID; + stbuf->n_gid = INVALID_GID; + + errcode = + p9pdu_readf(pdu, proto_version, + "wwdQdddqssss?sugu", + &stbuf->size, &stbuf->type, + &stbuf->dev, &stbuf->qid, + &stbuf->mode, &stbuf->atime, + &stbuf->mtime, &stbuf->length, + &stbuf->name, &stbuf->uid, + &stbuf->gid, &stbuf->muid, + &stbuf->extension, + &stbuf->n_uid, &stbuf->n_gid, + &stbuf->n_muid); + if (errcode) + p9stat_free(stbuf); + } + break; + case 'D':{ + uint32_t *count = va_arg(ap, uint32_t *); + void **data = va_arg(ap, void **); + + errcode = + p9pdu_readf(pdu, proto_version, "d", count); + if (!errcode) { + *count = + min_t(uint32_t, *count, + pdu->size - pdu->offset); + *data = &pdu->sdata[pdu->offset]; + } + } + break; + case 'T':{ + uint16_t *nwname = va_arg(ap, uint16_t *); + char ***wnames = va_arg(ap, char ***); + + errcode = p9pdu_readf(pdu, proto_version, + "w", nwname); + if (!errcode) { + *wnames = + kmalloc(sizeof(char *) * *nwname, + GFP_NOFS); + if (!*wnames) + errcode = -ENOMEM; + } + + if (!errcode) { + int i; + + for (i = 0; i < *nwname; i++) { + errcode = + p9pdu_readf(pdu, + proto_version, + "s", + &(*wnames)[i]); + if (errcode) + break; + } + } + + if (errcode) { + if (*wnames) { + int i; + + for (i = 0; i < *nwname; i++) + kfree((*wnames)[i]); + } + kfree(*wnames); + *wnames = NULL; + } + } + break; + case 'R':{ + int16_t *nwqid = va_arg(ap, int16_t *); + struct p9_qid **wqids = + va_arg(ap, struct p9_qid **); + + *wqids = NULL; + + errcode = + p9pdu_readf(pdu, proto_version, "w", nwqid); + if (!errcode) { + *wqids = + kmalloc(*nwqid * + sizeof(struct p9_qid), + GFP_NOFS); + if (*wqids == NULL) + errcode = -ENOMEM; + } + + if (!errcode) { + int i; + + for (i = 0; i < *nwqid; i++) { + errcode = + p9pdu_readf(pdu, + proto_version, + "Q", + &(*wqids)[i]); + if (errcode) + break; + } + } + + if (errcode) { + kfree(*wqids); + *wqids = NULL; + } + } + break; + case 'A': { + struct p9_stat_dotl *stbuf = + va_arg(ap, struct p9_stat_dotl *); + + memset(stbuf, 0, sizeof(struct p9_stat_dotl)); + errcode = + p9pdu_readf(pdu, proto_version, + "qQdugqqqqqqqqqqqqqqq", + &stbuf->st_result_mask, + &stbuf->qid, + &stbuf->st_mode, + &stbuf->st_uid, &stbuf->st_gid, + &stbuf->st_nlink, + &stbuf->st_rdev, &stbuf->st_size, + &stbuf->st_blksize, &stbuf->st_blocks, + &stbuf->st_atime_sec, + &stbuf->st_atime_nsec, + &stbuf->st_mtime_sec, + &stbuf->st_mtime_nsec, + &stbuf->st_ctime_sec, + &stbuf->st_ctime_nsec, + &stbuf->st_btime_sec, + &stbuf->st_btime_nsec, + &stbuf->st_gen, + &stbuf->st_data_version); + } + break; + case '?': + if ((proto_version != p9_proto_2000u) && + (proto_version != p9_proto_2000L)) + return 0; + break; + default: + BUG(); + break; + } + + if (errcode) + break; + } + + return errcode; +} + +int +p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, + va_list ap) +{ + const char *ptr; + int errcode = 0; + + for (ptr = fmt; *ptr; ptr++) { + switch (*ptr) { + case 'b':{ + int8_t val = va_arg(ap, int); + if (pdu_write(pdu, &val, sizeof(val))) + errcode = -EFAULT; + } + break; + case 'w':{ + __le16 val = cpu_to_le16(va_arg(ap, int)); + if (pdu_write(pdu, &val, sizeof(val))) + errcode = -EFAULT; + } + break; + case 'd':{ + __le32 val = cpu_to_le32(va_arg(ap, int32_t)); + if (pdu_write(pdu, &val, sizeof(val))) + errcode = -EFAULT; + } + break; + case 'q':{ + __le64 val = cpu_to_le64(va_arg(ap, int64_t)); + if (pdu_write(pdu, &val, sizeof(val))) + errcode = -EFAULT; + } + break; + case 's':{ + const char *sptr = va_arg(ap, const char *); + uint16_t len = 0; + if (sptr) + len = min_t(size_t, strlen(sptr), + USHRT_MAX); + + errcode = p9pdu_writef(pdu, proto_version, + "w", len); + if (!errcode && pdu_write(pdu, sptr, len)) + errcode = -EFAULT; + } + break; + case 'u': { + kuid_t uid = va_arg(ap, kuid_t); + __le32 val = cpu_to_le32( + from_kuid(&init_user_ns, uid)); + if (pdu_write(pdu, &val, sizeof(val))) + errcode = -EFAULT; + } break; + case 'g': { + kgid_t gid = va_arg(ap, kgid_t); + __le32 val = cpu_to_le32( + from_kgid(&init_user_ns, gid)); + if (pdu_write(pdu, &val, sizeof(val))) + errcode = -EFAULT; + } break; + case 'Q':{ + const struct p9_qid *qid = + va_arg(ap, const struct p9_qid *); + errcode = + p9pdu_writef(pdu, proto_version, "bdq", + qid->type, qid->version, + qid->path); + } break; + case 'S':{ + const struct p9_wstat *stbuf = + va_arg(ap, const struct p9_wstat *); + errcode = + p9pdu_writef(pdu, proto_version, + "wwdQdddqssss?sugu", + stbuf->size, stbuf->type, + stbuf->dev, &stbuf->qid, + stbuf->mode, stbuf->atime, + stbuf->mtime, stbuf->length, + stbuf->name, stbuf->uid, + stbuf->gid, stbuf->muid, + stbuf->extension, stbuf->n_uid, + stbuf->n_gid, stbuf->n_muid); + } break; + case 'D':{ + uint32_t count = va_arg(ap, uint32_t); + const void *data = va_arg(ap, const void *); + + errcode = p9pdu_writef(pdu, proto_version, "d", + count); + if (!errcode && pdu_write(pdu, data, count)) + errcode = -EFAULT; + } + break; + case 'U':{ + int32_t count = va_arg(ap, int32_t); + const char __user *udata = + va_arg(ap, const void __user *); + errcode = p9pdu_writef(pdu, proto_version, "d", + count); + if (!errcode && pdu_write_u(pdu, udata, count)) + errcode = -EFAULT; + } + break; + case 'T':{ + uint16_t nwname = va_arg(ap, int); + const char **wnames = va_arg(ap, const char **); + + errcode = p9pdu_writef(pdu, proto_version, "w", + nwname); + if (!errcode) { + int i; + + for (i = 0; i < nwname; i++) { + errcode = + p9pdu_writef(pdu, + proto_version, + "s", + wnames[i]); + if (errcode) + break; + } + } + } + break; + case 'R':{ + int16_t nwqid = va_arg(ap, int); + struct p9_qid *wqids = + va_arg(ap, struct p9_qid *); + + errcode = p9pdu_writef(pdu, proto_version, "w", + nwqid); + if (!errcode) { + int i; + + for (i = 0; i < nwqid; i++) { + errcode = + p9pdu_writef(pdu, + proto_version, + "Q", + &wqids[i]); + if (errcode) + break; + } + } + } + break; + case 'I':{ + struct p9_iattr_dotl *p9attr = va_arg(ap, + struct p9_iattr_dotl *); + + errcode = p9pdu_writef(pdu, proto_version, + "ddugqqqqq", + p9attr->valid, + p9attr->mode, + p9attr->uid, + p9attr->gid, + p9attr->size, + p9attr->atime_sec, + p9attr->atime_nsec, + p9attr->mtime_sec, + p9attr->mtime_nsec); + } + break; + case '?': + if ((proto_version != p9_proto_2000u) && + (proto_version != p9_proto_2000L)) + return 0; + break; + default: + BUG(); + break; + } + + if (errcode) + break; + } + + return errcode; +} + +int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = p9pdu_vreadf(pdu, proto_version, fmt, ap); + va_end(ap); + + return ret; +} + +static int +p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = p9pdu_vwritef(pdu, proto_version, fmt, ap); + va_end(ap); + + return ret; +} + +int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st) +{ + struct p9_fcall fake_pdu; + int ret; + + fake_pdu.size = len; + fake_pdu.capacity = len; + fake_pdu.sdata = buf; + fake_pdu.offset = 0; + + ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "S", st); + if (ret) { + p9_debug(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret); + trace_9p_protocol_dump(clnt, &fake_pdu); + } + + return ret; +} +EXPORT_SYMBOL(p9stat_read); + +int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type) +{ + pdu->id = type; + return p9pdu_writef(pdu, 0, "dbw", 0, type, tag); +} + +int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu) +{ + int size = pdu->size; + int err; + + pdu->size = 0; + err = p9pdu_writef(pdu, 0, "d", size); + pdu->size = size; + + trace_9p_protocol_dump(clnt, pdu); + p9_debug(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", + pdu->size, pdu->id, pdu->tag); + + return err; +} + +void p9pdu_reset(struct p9_fcall *pdu) +{ + pdu->offset = 0; + pdu->size = 0; +} + +int p9dirent_read(struct p9_client *clnt, char *buf, int len, + struct p9_dirent *dirent) +{ + struct p9_fcall fake_pdu; + int ret; + char *nameptr; + + fake_pdu.size = len; + fake_pdu.capacity = len; + fake_pdu.sdata = buf; + fake_pdu.offset = 0; + + ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid, + &dirent->d_off, &dirent->d_type, &nameptr); + if (ret) { + p9_debug(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret); + trace_9p_protocol_dump(clnt, &fake_pdu); + goto out; + } + + strcpy(dirent->d_name, nameptr); + kfree(nameptr); + +out: + return fake_pdu.offset; +} +EXPORT_SYMBOL(p9dirent_read); diff --git a/addons/9p/src/3.10.108/protocol.h b/addons/9p/src/3.10.108/protocol.h new file mode 100644 index 00000000..2cc525fa --- /dev/null +++ b/addons/9p/src/3.10.108/protocol.h @@ -0,0 +1,34 @@ +/* + * net/9p/protocol.h + * + * 9P Protocol Support Code + * + * Copyright (C) 2008 by Eric Van Hensbergen + * + * Base on code from Anthony Liguori + * Copyright (C) 2008 by IBM, Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +int p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, + va_list ap); +int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); +int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type); +int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu); +void p9pdu_reset(struct p9_fcall *pdu); +size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size); diff --git a/addons/9p/src/3.10.108/trans_common.c b/addons/9p/src/3.10.108/trans_common.c new file mode 100644 index 00000000..2ee38791 --- /dev/null +++ b/addons/9p/src/3.10.108/trans_common.c @@ -0,0 +1,69 @@ +/* + * Copyright IBM Corporation, 2010 + * Author Venkateswararao Jujjuri + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +#include +#include +#include +#include +#include +#include "trans_common.h" + +/** + * p9_release_req_pages - Release pages after the transaction. + */ +void p9_release_pages(struct page **pages, int nr_pages) +{ + int i; + + for (i = 0; i < nr_pages; i++) + if (pages[i]) + put_page(pages[i]); +} +EXPORT_SYMBOL(p9_release_pages); + +/** + * p9_nr_pages - Return number of pages needed to accommodate the payload. + */ +int p9_nr_pages(char *data, int len) +{ + unsigned long start_page, end_page; + start_page = (unsigned long)data >> PAGE_SHIFT; + end_page = ((unsigned long)data + len + PAGE_SIZE - 1) >> PAGE_SHIFT; + return end_page - start_page; +} +EXPORT_SYMBOL(p9_nr_pages); + +/** + * payload_gup - Translates user buffer into kernel pages and + * pins them either for read/write through get_user_pages_fast(). + * @req: Request to be sent to server. + * @pdata_off: data offset into the first page after translation (gup). + * @pdata_len: Total length of the IO. gup may not return requested # of pages. + * @nr_pages: number of pages to accommodate the payload + * @rw: Indicates if the pages are for read or write. + */ + +int p9_payload_gup(char *data, int *nr_pages, struct page **pages, int write) +{ + int nr_mapped_pages; + + nr_mapped_pages = get_user_pages_fast((unsigned long)data, + *nr_pages, write, pages); + if (nr_mapped_pages <= 0) + return nr_mapped_pages; + + *nr_pages = nr_mapped_pages; + return 0; +} +EXPORT_SYMBOL(p9_payload_gup); diff --git a/addons/9p/src/3.10.108/trans_common.h b/addons/9p/src/3.10.108/trans_common.h new file mode 100644 index 00000000..173bb550 --- /dev/null +++ b/addons/9p/src/3.10.108/trans_common.h @@ -0,0 +1,17 @@ +/* + * Copyright IBM Corporation, 2010 + * Author Venkateswararao Jujjuri + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +void p9_release_pages(struct page **, int); +int p9_payload_gup(char *, int *, struct page **, int); +int p9_nr_pages(char *, int); diff --git a/addons/9p/src/3.10.108/trans_fd.c b/addons/9p/src/3.10.108/trans_fd.c new file mode 100644 index 00000000..02efb25c --- /dev/null +++ b/addons/9p/src/3.10.108/trans_fd.c @@ -0,0 +1,1090 @@ +/* + * linux/fs/9p/trans_fd.c + * + * Fd transport layer. Includes deprecated socket layer. + * + * Copyright (C) 2006 by Russ Cox + * Copyright (C) 2004-2005 by Latchesar Ionkov + * Copyright (C) 2004-2008 by Eric Van Hensbergen + * Copyright (C) 1997-2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include /* killme */ + +#define P9_PORT 564 +#define MAX_SOCK_BUF (64*1024) +#define MAXPOLLWADDR 2 + +/** + * struct p9_fd_opts - per-transport options + * @rfd: file descriptor for reading (trans=fd) + * @wfd: file descriptor for writing (trans=fd) + * @port: port to connect to (trans=tcp) + * + */ + +struct p9_fd_opts { + int rfd; + int wfd; + u16 port; +}; + +/** + * struct p9_trans_fd - transport state + * @rd: reference to file to read from + * @wr: reference of file to write to + * @conn: connection state reference + * + */ + +struct p9_trans_fd { + struct file *rd; + struct file *wr; + struct p9_conn *conn; +}; + +/* + * Option Parsing (code inspired by NFS code) + * - a little lazy - parse all fd-transport options + */ + +enum { + /* Options that take integer arguments */ + Opt_port, Opt_rfdno, Opt_wfdno, Opt_err, +}; + +static const match_table_t tokens = { + {Opt_port, "port=%u"}, + {Opt_rfdno, "rfdno=%u"}, + {Opt_wfdno, "wfdno=%u"}, + {Opt_err, NULL}, +}; + +enum { + Rworksched = 1, /* read work scheduled or running */ + Rpending = 2, /* can read */ + Wworksched = 4, /* write work scheduled or running */ + Wpending = 8, /* can write */ +}; + +struct p9_poll_wait { + struct p9_conn *conn; + wait_queue_t wait; + wait_queue_head_t *wait_addr; +}; + +/** + * struct p9_conn - fd mux connection state information + * @mux_list: list link for mux to manage multiple connections (?) + * @client: reference to client instance for this connection + * @err: error state + * @req_list: accounting for requests which have been sent + * @unsent_req_list: accounting for requests that haven't been sent + * @req: current request being processed (if any) + * @tmp_buf: temporary buffer to read in header + * @rsize: amount to read for current frame + * @rpos: read position in current frame + * @rbuf: current read buffer + * @wpos: write position for current frame + * @wsize: amount of data to write for current frame + * @wbuf: current write buffer + * @poll_pending_link: pending links to be polled per conn + * @poll_wait: array of wait_q's for various worker threads + * @pt: poll state + * @rq: current read work + * @wq: current write work + * @wsched: ???? + * + */ + +struct p9_conn { + struct list_head mux_list; + struct p9_client *client; + int err; + struct list_head req_list; + struct list_head unsent_req_list; + struct p9_req_t *req; + char tmp_buf[7]; + int rsize; + int rpos; + char *rbuf; + int wpos; + int wsize; + char *wbuf; + struct list_head poll_pending_link; + struct p9_poll_wait poll_wait[MAXPOLLWADDR]; + poll_table pt; + struct work_struct rq; + struct work_struct wq; + unsigned long wsched; +}; + +static void p9_poll_workfn(struct work_struct *work); + +static DEFINE_SPINLOCK(p9_poll_lock); +static LIST_HEAD(p9_poll_pending_list); +static DECLARE_WORK(p9_poll_work, p9_poll_workfn); + +static void p9_mux_poll_stop(struct p9_conn *m) +{ + unsigned long flags; + int i; + + for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { + struct p9_poll_wait *pwait = &m->poll_wait[i]; + + if (pwait->wait_addr) { + remove_wait_queue(pwait->wait_addr, &pwait->wait); + pwait->wait_addr = NULL; + } + } + + spin_lock_irqsave(&p9_poll_lock, flags); + list_del_init(&m->poll_pending_link); + spin_unlock_irqrestore(&p9_poll_lock, flags); +} + +/** + * p9_conn_cancel - cancel all pending requests with error + * @m: mux data + * @err: error code + * + */ + +static void p9_conn_cancel(struct p9_conn *m, int err) +{ + struct p9_req_t *req, *rtmp; + unsigned long flags; + LIST_HEAD(cancel_list); + + p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); + + spin_lock_irqsave(&m->client->lock, flags); + + if (m->err) { + spin_unlock_irqrestore(&m->client->lock, flags); + return; + } + + m->err = err; + + list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { + req->status = REQ_STATUS_ERROR; + if (!req->t_err) + req->t_err = err; + list_move(&req->req_list, &cancel_list); + } + list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { + req->status = REQ_STATUS_ERROR; + if (!req->t_err) + req->t_err = err; + list_move(&req->req_list, &cancel_list); + } + spin_unlock_irqrestore(&m->client->lock, flags); + + list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { + p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req); + list_del(&req->req_list); + p9_client_cb(m->client, req); + } +} + +static int +p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt) +{ + int ret, n; + struct p9_trans_fd *ts = NULL; + + if (client && client->status == Connected) + ts = client->trans; + + if (!ts) + return -EREMOTEIO; + + if (!ts->rd->f_op || !ts->rd->f_op->poll) + return -EIO; + + if (!ts->wr->f_op || !ts->wr->f_op->poll) + return -EIO; + + ret = ts->rd->f_op->poll(ts->rd, pt); + if (ret < 0) + return ret; + + if (ts->rd != ts->wr) { + n = ts->wr->f_op->poll(ts->wr, pt); + if (n < 0) + return n; + ret = (ret & ~POLLOUT) | (n & ~POLLIN); + } + + return ret; +} + +/** + * p9_fd_read- read from a fd + * @client: client instance + * @v: buffer to receive data into + * @len: size of receive buffer + * + */ + +static int p9_fd_read(struct p9_client *client, void *v, int len) +{ + int ret; + struct p9_trans_fd *ts = NULL; + + if (client && client->status != Disconnected) + ts = client->trans; + + if (!ts) + return -EREMOTEIO; + + if (!(ts->rd->f_flags & O_NONBLOCK)) + p9_debug(P9_DEBUG_ERROR, "blocking read ...\n"); + + ret = kernel_read(ts->rd, ts->rd->f_pos, v, len); + if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) + client->status = Disconnected; + return ret; +} + +/** + * p9_read_work - called when there is some data to be read from a transport + * @work: container of work to be done + * + */ + +static void p9_read_work(struct work_struct *work) +{ + int n, err; + struct p9_conn *m; + + m = container_of(work, struct p9_conn, rq); + + if (m->err < 0) + return; + + p9_debug(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos); + + if (!m->rbuf) { + m->rbuf = m->tmp_buf; + m->rpos = 0; + m->rsize = 7; /* start by reading header */ + } + + clear_bit(Rpending, &m->wsched); + p9_debug(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", + m, m->rpos, m->rsize, m->rsize-m->rpos); + err = p9_fd_read(m->client, m->rbuf + m->rpos, + m->rsize - m->rpos); + p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err); + if (err == -EAGAIN) { + goto end_clear; + } + + if (err <= 0) + goto error; + + m->rpos += err; + + if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */ + u16 tag; + p9_debug(P9_DEBUG_TRANS, "got new header\n"); + + n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */ + if (n >= m->client->msize) { + p9_debug(P9_DEBUG_ERROR, + "requested packet size too big: %d\n", n); + err = -EIO; + goto error; + } + + tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */ + p9_debug(P9_DEBUG_TRANS, + "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); + + m->req = p9_tag_lookup(m->client, tag); + if (!m->req || (m->req->status != REQ_STATUS_SENT && + m->req->status != REQ_STATUS_FLSH)) { + p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", + tag); + err = -EIO; + goto error; + } + + if (m->req->rc == NULL) { + m->req->rc = kmalloc(sizeof(struct p9_fcall) + + m->client->msize, GFP_NOFS); + if (!m->req->rc) { + m->req = NULL; + err = -ENOMEM; + goto error; + } + } + m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall); + memcpy(m->rbuf, m->tmp_buf, m->rsize); + m->rsize = n; + } + + /* not an else because some packets (like clunk) have no payload */ + if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */ + p9_debug(P9_DEBUG_TRANS, "got new packet\n"); + spin_lock(&m->client->lock); + if (m->req->status != REQ_STATUS_ERROR) + m->req->status = REQ_STATUS_RCVD; + list_del(&m->req->req_list); + spin_unlock(&m->client->lock); + p9_client_cb(m->client, m->req); + m->rbuf = NULL; + m->rpos = 0; + m->rsize = 0; + m->req = NULL; + } + +end_clear: + clear_bit(Rworksched, &m->wsched); + + if (!list_empty(&m->req_list)) { + if (test_and_clear_bit(Rpending, &m->wsched)) + n = POLLIN; + else + n = p9_fd_poll(m->client, NULL); + + if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { + p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); + schedule_work(&m->rq); + } + } + + return; +error: + p9_conn_cancel(m, err); + clear_bit(Rworksched, &m->wsched); +} + +/** + * p9_fd_write - write to a socket + * @client: client instance + * @v: buffer to send data from + * @len: size of send buffer + * + */ + +static int p9_fd_write(struct p9_client *client, void *v, int len) +{ + int ret; + mm_segment_t oldfs; + struct p9_trans_fd *ts = NULL; + + if (client && client->status != Disconnected) + ts = client->trans; + + if (!ts) + return -EREMOTEIO; + + if (!(ts->wr->f_flags & O_NONBLOCK)) + p9_debug(P9_DEBUG_ERROR, "blocking write ...\n"); + + oldfs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ + ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos); + set_fs(oldfs); + + if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) + client->status = Disconnected; + return ret; +} + +/** + * p9_write_work - called when a transport can send some data + * @work: container for work to be done + * + */ + +static void p9_write_work(struct work_struct *work) +{ + int n, err; + struct p9_conn *m; + struct p9_req_t *req; + + m = container_of(work, struct p9_conn, wq); + + if (m->err < 0) { + clear_bit(Wworksched, &m->wsched); + return; + } + + if (!m->wsize) { + spin_lock(&m->client->lock); + if (list_empty(&m->unsent_req_list)) { + clear_bit(Wworksched, &m->wsched); + spin_unlock(&m->client->lock); + return; + } + + req = list_entry(m->unsent_req_list.next, struct p9_req_t, + req_list); + req->status = REQ_STATUS_SENT; + p9_debug(P9_DEBUG_TRANS, "move req %p\n", req); + list_move_tail(&req->req_list, &m->req_list); + + m->wbuf = req->tc->sdata; + m->wsize = req->tc->size; + m->wpos = 0; + spin_unlock(&m->client->lock); + } + + p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", + m, m->wpos, m->wsize); + clear_bit(Wpending, &m->wsched); + err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos); + p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err); + if (err == -EAGAIN) + goto end_clear; + + + if (err < 0) + goto error; + else if (err == 0) { + err = -EREMOTEIO; + goto error; + } + + m->wpos += err; + if (m->wpos == m->wsize) + m->wpos = m->wsize = 0; + +end_clear: + clear_bit(Wworksched, &m->wsched); + + if (m->wsize || !list_empty(&m->unsent_req_list)) { + if (test_and_clear_bit(Wpending, &m->wsched)) + n = POLLOUT; + else + n = p9_fd_poll(m->client, NULL); + + if ((n & POLLOUT) && + !test_and_set_bit(Wworksched, &m->wsched)) { + p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); + schedule_work(&m->wq); + } + } + + return; + +error: + p9_conn_cancel(m, err); + clear_bit(Wworksched, &m->wsched); +} + +static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key) +{ + struct p9_poll_wait *pwait = + container_of(wait, struct p9_poll_wait, wait); + struct p9_conn *m = pwait->conn; + unsigned long flags; + + spin_lock_irqsave(&p9_poll_lock, flags); + if (list_empty(&m->poll_pending_link)) + list_add_tail(&m->poll_pending_link, &p9_poll_pending_list); + spin_unlock_irqrestore(&p9_poll_lock, flags); + + schedule_work(&p9_poll_work); + return 1; +} + +/** + * p9_pollwait - add poll task to the wait queue + * @filp: file pointer being polled + * @wait_address: wait_q to block on + * @p: poll state + * + * called by files poll operation to add v9fs-poll task to files wait queue + */ + +static void +p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p) +{ + struct p9_conn *m = container_of(p, struct p9_conn, pt); + struct p9_poll_wait *pwait = NULL; + int i; + + for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { + if (m->poll_wait[i].wait_addr == NULL) { + pwait = &m->poll_wait[i]; + break; + } + } + + if (!pwait) { + p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n"); + return; + } + + pwait->conn = m; + pwait->wait_addr = wait_address; + init_waitqueue_func_entry(&pwait->wait, p9_pollwake); + add_wait_queue(wait_address, &pwait->wait); +} + +/** + * p9_conn_create - allocate and initialize the per-session mux data + * @client: client instance + * + * Note: Creates the polling task if this is the first session. + */ + +static struct p9_conn *p9_conn_create(struct p9_client *client) +{ + int n; + struct p9_conn *m; + + p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize); + m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL); + if (!m) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&m->mux_list); + m->client = client; + + INIT_LIST_HEAD(&m->req_list); + INIT_LIST_HEAD(&m->unsent_req_list); + INIT_WORK(&m->rq, p9_read_work); + INIT_WORK(&m->wq, p9_write_work); + INIT_LIST_HEAD(&m->poll_pending_link); + init_poll_funcptr(&m->pt, p9_pollwait); + + n = p9_fd_poll(client, &m->pt); + if (n & POLLIN) { + p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); + set_bit(Rpending, &m->wsched); + } + + if (n & POLLOUT) { + p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); + set_bit(Wpending, &m->wsched); + } + + return m; +} + +/** + * p9_poll_mux - polls a mux and schedules read or write works if necessary + * @m: connection to poll + * + */ + +static void p9_poll_mux(struct p9_conn *m) +{ + int n; + + if (m->err < 0) + return; + + n = p9_fd_poll(m->client, NULL); + if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) { + p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n); + if (n >= 0) + n = -ECONNRESET; + p9_conn_cancel(m, n); + } + + if (n & POLLIN) { + set_bit(Rpending, &m->wsched); + p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); + if (!test_and_set_bit(Rworksched, &m->wsched)) { + p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); + schedule_work(&m->rq); + } + } + + if (n & POLLOUT) { + set_bit(Wpending, &m->wsched); + p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); + if ((m->wsize || !list_empty(&m->unsent_req_list)) && + !test_and_set_bit(Wworksched, &m->wsched)) { + p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); + schedule_work(&m->wq); + } + } +} + +/** + * p9_fd_request - send 9P request + * The function can sleep until the request is scheduled for sending. + * The function can be interrupted. Return from the function is not + * a guarantee that the request is sent successfully. + * + * @client: client instance + * @req: request to be sent + * + */ + +static int p9_fd_request(struct p9_client *client, struct p9_req_t *req) +{ + int n; + struct p9_trans_fd *ts = client->trans; + struct p9_conn *m = ts->conn; + + p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", + m, current, req->tc, req->tc->id); + if (m->err < 0) + return m->err; + + spin_lock(&client->lock); + req->status = REQ_STATUS_UNSENT; + list_add_tail(&req->req_list, &m->unsent_req_list); + spin_unlock(&client->lock); + + if (test_and_clear_bit(Wpending, &m->wsched)) + n = POLLOUT; + else + n = p9_fd_poll(m->client, NULL); + + if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) + schedule_work(&m->wq); + + return 0; +} + +static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req) +{ + int ret = 1; + + p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); + + spin_lock(&client->lock); + + if (req->status == REQ_STATUS_UNSENT) { + list_del(&req->req_list); + req->status = REQ_STATUS_FLSHD; + ret = 0; + } else if (req->status == REQ_STATUS_SENT) + req->status = REQ_STATUS_FLSH; + + spin_unlock(&client->lock); + + return ret; +} + +/** + * parse_opts - parse mount options into p9_fd_opts structure + * @params: options string passed from mount + * @opts: fd transport-specific structure to parse options into + * + * Returns 0 upon success, -ERRNO upon failure + */ + +static int parse_opts(char *params, struct p9_fd_opts *opts) +{ + char *p; + substring_t args[MAX_OPT_ARGS]; + int option; + char *options, *tmp_options; + + opts->port = P9_PORT; + opts->rfd = ~0; + opts->wfd = ~0; + + if (!params) + return 0; + + tmp_options = kstrdup(params, GFP_KERNEL); + if (!tmp_options) { + p9_debug(P9_DEBUG_ERROR, + "failed to allocate copy of option string\n"); + return -ENOMEM; + } + options = tmp_options; + + while ((p = strsep(&options, ",")) != NULL) { + int token; + int r; + if (!*p) + continue; + token = match_token(p, tokens, args); + if (token != Opt_err) { + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + continue; + } + } + switch (token) { + case Opt_port: + opts->port = option; + break; + case Opt_rfdno: + opts->rfd = option; + break; + case Opt_wfdno: + opts->wfd = option; + break; + default: + continue; + } + } + + kfree(tmp_options); + return 0; +} + +static int p9_fd_open(struct p9_client *client, int rfd, int wfd) +{ + struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd), + GFP_KERNEL); + if (!ts) + return -ENOMEM; + + ts->rd = fget(rfd); + ts->wr = fget(wfd); + if (!ts->rd || !ts->wr) { + if (ts->rd) + fput(ts->rd); + if (ts->wr) + fput(ts->wr); + kfree(ts); + return -EIO; + } + + client->trans = ts; + client->status = Connected; + + return 0; +} + +static int p9_socket_open(struct p9_client *client, struct socket *csocket) +{ + struct p9_trans_fd *p; + struct file *file; + int ret; + + p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); + if (!p) + return -ENOMEM; + + csocket->sk->sk_allocation = GFP_NOIO; + file = sock_alloc_file(csocket, 0, NULL); + if (IS_ERR(file)) { + pr_err("%s (%d): failed to map fd\n", + __func__, task_pid_nr(current)); + sock_release(csocket); + kfree(p); + return PTR_ERR(file); + } + + get_file(file); + p->wr = p->rd = file; + client->trans = p; + client->status = Connected; + + p->rd->f_flags |= O_NONBLOCK; + + p->conn = p9_conn_create(client); + if (IS_ERR(p->conn)) { + ret = PTR_ERR(p->conn); + p->conn = NULL; + kfree(p); + sockfd_put(csocket); + sockfd_put(csocket); + return ret; + } + return 0; +} + +/** + * p9_mux_destroy - cancels all pending requests and frees mux resources + * @m: mux to destroy + * + */ + +static void p9_conn_destroy(struct p9_conn *m) +{ + p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", + m, m->mux_list.prev, m->mux_list.next); + + p9_mux_poll_stop(m); + cancel_work_sync(&m->rq); + cancel_work_sync(&m->wq); + + p9_conn_cancel(m, -ECONNRESET); + + m->client = NULL; + kfree(m); +} + +/** + * p9_fd_close - shutdown file descriptor transport + * @client: client instance + * + */ + +static void p9_fd_close(struct p9_client *client) +{ + struct p9_trans_fd *ts; + + if (!client) + return; + + ts = client->trans; + if (!ts) + return; + + client->status = Disconnected; + + p9_conn_destroy(ts->conn); + + if (ts->rd) + fput(ts->rd); + if (ts->wr) + fput(ts->wr); + + kfree(ts); +} + +/* + * stolen from NFS - maybe should be made a generic function? + */ +static inline int valid_ipaddr4(const char *buf) +{ + int rc, count, in[4]; + + rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]); + if (rc != 4) + return -EINVAL; + for (count = 0; count < 4; count++) { + if (in[count] > 255) + return -EINVAL; + } + return 0; +} + +static int +p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) +{ + int err; + struct socket *csocket; + struct sockaddr_in sin_server; + struct p9_fd_opts opts; + + err = parse_opts(args, &opts); + if (err < 0) + return err; + + if (valid_ipaddr4(addr) < 0) + return -EINVAL; + + csocket = NULL; + + sin_server.sin_family = AF_INET; + sin_server.sin_addr.s_addr = in_aton(addr); + sin_server.sin_port = htons(opts.port); + err = __sock_create(read_pnet(¤t->nsproxy->net_ns), PF_INET, + SOCK_STREAM, IPPROTO_TCP, &csocket, 1); + if (err) { + pr_err("%s (%d): problem creating socket\n", + __func__, task_pid_nr(current)); + return err; + } + + err = csocket->ops->connect(csocket, + (struct sockaddr *)&sin_server, + sizeof(struct sockaddr_in), 0); + if (err < 0) { + pr_err("%s (%d): problem connecting socket to %s\n", + __func__, task_pid_nr(current), addr); + sock_release(csocket); + return err; + } + + return p9_socket_open(client, csocket); +} + +static int +p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) +{ + int err; + struct socket *csocket; + struct sockaddr_un sun_server; + + csocket = NULL; + + if (strlen(addr) >= UNIX_PATH_MAX) { + pr_err("%s (%d): address too long: %s\n", + __func__, task_pid_nr(current), addr); + return -ENAMETOOLONG; + } + + sun_server.sun_family = PF_UNIX; + strcpy(sun_server.sun_path, addr); + err = __sock_create(read_pnet(¤t->nsproxy->net_ns), PF_UNIX, + SOCK_STREAM, 0, &csocket, 1); + if (err < 0) { + pr_err("%s (%d): problem creating socket\n", + __func__, task_pid_nr(current)); + + return err; + } + err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server, + sizeof(struct sockaddr_un) - 1, 0); + if (err < 0) { + pr_err("%s (%d): problem connecting socket: %s: %d\n", + __func__, task_pid_nr(current), addr, err); + sock_release(csocket); + return err; + } + + return p9_socket_open(client, csocket); +} + +static int +p9_fd_create(struct p9_client *client, const char *addr, char *args) +{ + int err; + struct p9_fd_opts opts; + struct p9_trans_fd *p; + + parse_opts(args, &opts); + + if (opts.rfd == ~0 || opts.wfd == ~0) { + pr_err("Insufficient options for proto=fd\n"); + return -ENOPROTOOPT; + } + + err = p9_fd_open(client, opts.rfd, opts.wfd); + if (err < 0) + return err; + + p = (struct p9_trans_fd *) client->trans; + p->conn = p9_conn_create(client); + if (IS_ERR(p->conn)) { + err = PTR_ERR(p->conn); + p->conn = NULL; + fput(p->rd); + fput(p->wr); + return err; + } + + return 0; +} + +static struct p9_trans_module p9_tcp_trans = { + .name = "tcp", + .maxsize = MAX_SOCK_BUF, + .def = 1, + .create = p9_fd_create_tcp, + .close = p9_fd_close, + .request = p9_fd_request, + .cancel = p9_fd_cancel, + .owner = THIS_MODULE, +}; + +static struct p9_trans_module p9_unix_trans = { + .name = "unix", + .maxsize = MAX_SOCK_BUF, + .def = 0, + .create = p9_fd_create_unix, + .close = p9_fd_close, + .request = p9_fd_request, + .cancel = p9_fd_cancel, + .owner = THIS_MODULE, +}; + +static struct p9_trans_module p9_fd_trans = { + .name = "fd", + .maxsize = MAX_SOCK_BUF, + .def = 0, + .create = p9_fd_create, + .close = p9_fd_close, + .request = p9_fd_request, + .cancel = p9_fd_cancel, + .owner = THIS_MODULE, +}; + +/** + * p9_poll_proc - poll worker thread + * @a: thread state and arguments + * + * polls all v9fs transports for new events and queues the appropriate + * work to the work queue + * + */ + +static void p9_poll_workfn(struct work_struct *work) +{ + unsigned long flags; + + p9_debug(P9_DEBUG_TRANS, "start %p\n", current); + + spin_lock_irqsave(&p9_poll_lock, flags); + while (!list_empty(&p9_poll_pending_list)) { + struct p9_conn *conn = list_first_entry(&p9_poll_pending_list, + struct p9_conn, + poll_pending_link); + list_del_init(&conn->poll_pending_link); + spin_unlock_irqrestore(&p9_poll_lock, flags); + + p9_poll_mux(conn); + + spin_lock_irqsave(&p9_poll_lock, flags); + } + spin_unlock_irqrestore(&p9_poll_lock, flags); + + p9_debug(P9_DEBUG_TRANS, "finish\n"); +} + +int p9_trans_fd_init(void) +{ + v9fs_register_trans(&p9_tcp_trans); + v9fs_register_trans(&p9_unix_trans); + v9fs_register_trans(&p9_fd_trans); + + return 0; +} + +void p9_trans_fd_exit(void) +{ + flush_work(&p9_poll_work); + v9fs_unregister_trans(&p9_tcp_trans); + v9fs_unregister_trans(&p9_unix_trans); + v9fs_unregister_trans(&p9_fd_trans); +} diff --git a/addons/9p/src/3.10.108/trans_rdma.c b/addons/9p/src/3.10.108/trans_rdma.c new file mode 100644 index 00000000..2c69ddd6 --- /dev/null +++ b/addons/9p/src/3.10.108/trans_rdma.c @@ -0,0 +1,719 @@ +/* + * linux/fs/9p/trans_rdma.c + * + * RDMA transport layer based on the trans_fd.c implementation. + * + * Copyright (C) 2008 by Tom Tucker + * Copyright (C) 2006 by Russ Cox + * Copyright (C) 2004-2005 by Latchesar Ionkov + * Copyright (C) 2004-2008 by Eric Van Hensbergen + * Copyright (C) 1997-2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define P9_PORT 5640 +#define P9_RDMA_SQ_DEPTH 32 +#define P9_RDMA_RQ_DEPTH 32 +#define P9_RDMA_SEND_SGE 4 +#define P9_RDMA_RECV_SGE 4 +#define P9_RDMA_IRD 0 +#define P9_RDMA_ORD 0 +#define P9_RDMA_TIMEOUT 30000 /* 30 seconds */ +#define P9_RDMA_MAXSIZE (4*4096) /* Min SGE is 4, so we can + * safely advertise a maxsize + * of 64k */ + +/** + * struct p9_trans_rdma - RDMA transport instance + * + * @state: tracks the transport state machine for connection setup and tear down + * @cm_id: The RDMA CM ID + * @pd: Protection Domain pointer + * @qp: Queue Pair pointer + * @cq: Completion Queue pointer + * @dm_mr: DMA Memory Region pointer + * @lkey: The local access only memory region key + * @timeout: Number of uSecs to wait for connection management events + * @sq_depth: The depth of the Send Queue + * @sq_sem: Semaphore for the SQ + * @rq_depth: The depth of the Receive Queue. + * @rq_count: Count of requests in the Receive Queue. + * @addr: The remote peer's address + * @req_lock: Protects the active request list + * @cm_done: Completion event for connection management tracking + */ +struct p9_trans_rdma { + enum { + P9_RDMA_INIT, + P9_RDMA_ADDR_RESOLVED, + P9_RDMA_ROUTE_RESOLVED, + P9_RDMA_CONNECTED, + P9_RDMA_FLUSHING, + P9_RDMA_CLOSING, + P9_RDMA_CLOSED, + } state; + struct rdma_cm_id *cm_id; + struct ib_pd *pd; + struct ib_qp *qp; + struct ib_cq *cq; + struct ib_mr *dma_mr; + u32 lkey; + long timeout; + int sq_depth; + struct semaphore sq_sem; + int rq_depth; + atomic_t rq_count; + struct sockaddr_in addr; + spinlock_t req_lock; + + struct completion cm_done; +}; + +/** + * p9_rdma_context - Keeps track of in-process WR + * + * @wc_op: The original WR op for when the CQE completes in error. + * @busa: Bus address to unmap when the WR completes + * @req: Keeps track of requests (send) + * @rc: Keepts track of replies (receive) + */ +struct p9_rdma_req; +struct p9_rdma_context { + enum ib_wc_opcode wc_op; + dma_addr_t busa; + union { + struct p9_req_t *req; + struct p9_fcall *rc; + }; +}; + +/** + * p9_rdma_opts - Collection of mount options + * @port: port of connection + * @sq_depth: The requested depth of the SQ. This really doesn't need + * to be any deeper than the number of threads used in the client + * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth + * @timeout: Time to wait in msecs for CM events + */ +struct p9_rdma_opts { + short port; + int sq_depth; + int rq_depth; + long timeout; +}; + +/* + * Option Parsing (code inspired by NFS code) + */ +enum { + /* Options that take integer arguments */ + Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout, Opt_err, +}; + +static match_table_t tokens = { + {Opt_port, "port=%u"}, + {Opt_sq_depth, "sq=%u"}, + {Opt_rq_depth, "rq=%u"}, + {Opt_timeout, "timeout=%u"}, + {Opt_err, NULL}, +}; + +/** + * parse_opts - parse mount options into rdma options structure + * @params: options string passed from mount + * @opts: rdma transport-specific structure to parse options into + * + * Returns 0 upon success, -ERRNO upon failure + */ +static int parse_opts(char *params, struct p9_rdma_opts *opts) +{ + char *p; + substring_t args[MAX_OPT_ARGS]; + int option; + char *options, *tmp_options; + + opts->port = P9_PORT; + opts->sq_depth = P9_RDMA_SQ_DEPTH; + opts->rq_depth = P9_RDMA_RQ_DEPTH; + opts->timeout = P9_RDMA_TIMEOUT; + + if (!params) + return 0; + + tmp_options = kstrdup(params, GFP_KERNEL); + if (!tmp_options) { + p9_debug(P9_DEBUG_ERROR, + "failed to allocate copy of option string\n"); + return -ENOMEM; + } + options = tmp_options; + + while ((p = strsep(&options, ",")) != NULL) { + int token; + int r; + if (!*p) + continue; + token = match_token(p, tokens, args); + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + continue; + } + switch (token) { + case Opt_port: + opts->port = option; + break; + case Opt_sq_depth: + opts->sq_depth = option; + break; + case Opt_rq_depth: + opts->rq_depth = option; + break; + case Opt_timeout: + opts->timeout = option; + break; + default: + continue; + } + } + /* RQ must be at least as large as the SQ */ + opts->rq_depth = max(opts->rq_depth, opts->sq_depth); + kfree(tmp_options); + return 0; +} + +static int +p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) +{ + struct p9_client *c = id->context; + struct p9_trans_rdma *rdma = c->trans; + switch (event->event) { + case RDMA_CM_EVENT_ADDR_RESOLVED: + BUG_ON(rdma->state != P9_RDMA_INIT); + rdma->state = P9_RDMA_ADDR_RESOLVED; + break; + + case RDMA_CM_EVENT_ROUTE_RESOLVED: + BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED); + rdma->state = P9_RDMA_ROUTE_RESOLVED; + break; + + case RDMA_CM_EVENT_ESTABLISHED: + BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED); + rdma->state = P9_RDMA_CONNECTED; + break; + + case RDMA_CM_EVENT_DISCONNECTED: + if (rdma) + rdma->state = P9_RDMA_CLOSED; + if (c) + c->status = Disconnected; + break; + + case RDMA_CM_EVENT_TIMEWAIT_EXIT: + break; + + case RDMA_CM_EVENT_ADDR_CHANGE: + case RDMA_CM_EVENT_ROUTE_ERROR: + case RDMA_CM_EVENT_DEVICE_REMOVAL: + case RDMA_CM_EVENT_MULTICAST_JOIN: + case RDMA_CM_EVENT_MULTICAST_ERROR: + case RDMA_CM_EVENT_REJECTED: + case RDMA_CM_EVENT_CONNECT_REQUEST: + case RDMA_CM_EVENT_CONNECT_RESPONSE: + case RDMA_CM_EVENT_CONNECT_ERROR: + case RDMA_CM_EVENT_ADDR_ERROR: + case RDMA_CM_EVENT_UNREACHABLE: + c->status = Disconnected; + rdma_disconnect(rdma->cm_id); + break; + default: + BUG(); + } + complete(&rdma->cm_done); + return 0; +} + +static void +handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma, + struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len) +{ + struct p9_req_t *req; + int err = 0; + int16_t tag; + + req = NULL; + ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, + DMA_FROM_DEVICE); + + if (status != IB_WC_SUCCESS) + goto err_out; + + err = p9_parse_header(c->rc, NULL, NULL, &tag, 1); + if (err) + goto err_out; + + req = p9_tag_lookup(client, tag); + if (!req) + goto err_out; + + req->rc = c->rc; + req->status = REQ_STATUS_RCVD; + p9_client_cb(client, req); + + return; + + err_out: + p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, status); + rdma->state = P9_RDMA_FLUSHING; + client->status = Disconnected; +} + +static void +handle_send(struct p9_client *client, struct p9_trans_rdma *rdma, + struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len) +{ + ib_dma_unmap_single(rdma->cm_id->device, + c->busa, c->req->tc->size, + DMA_TO_DEVICE); +} + +static void qp_event_handler(struct ib_event *event, void *context) +{ + p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n", + event->event, context); +} + +static void cq_comp_handler(struct ib_cq *cq, void *cq_context) +{ + struct p9_client *client = cq_context; + struct p9_trans_rdma *rdma = client->trans; + int ret; + struct ib_wc wc; + + ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); + while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { + struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id; + + switch (c->wc_op) { + case IB_WC_RECV: + atomic_dec(&rdma->rq_count); + handle_recv(client, rdma, c, wc.status, wc.byte_len); + break; + + case IB_WC_SEND: + handle_send(client, rdma, c, wc.status, wc.byte_len); + up(&rdma->sq_sem); + break; + + default: + pr_err("unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n", + c->wc_op, wc.opcode, wc.status); + break; + } + kfree(c); + } +} + +static void cq_event_handler(struct ib_event *e, void *v) +{ + p9_debug(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v); +} + +static void rdma_destroy_trans(struct p9_trans_rdma *rdma) +{ + if (!rdma) + return; + + if (rdma->dma_mr && !IS_ERR(rdma->dma_mr)) + ib_dereg_mr(rdma->dma_mr); + + if (rdma->qp && !IS_ERR(rdma->qp)) + ib_destroy_qp(rdma->qp); + + if (rdma->pd && !IS_ERR(rdma->pd)) + ib_dealloc_pd(rdma->pd); + + if (rdma->cq && !IS_ERR(rdma->cq)) + ib_destroy_cq(rdma->cq); + + if (rdma->cm_id && !IS_ERR(rdma->cm_id)) + rdma_destroy_id(rdma->cm_id); + + kfree(rdma); +} + +static int +post_recv(struct p9_client *client, struct p9_rdma_context *c) +{ + struct p9_trans_rdma *rdma = client->trans; + struct ib_recv_wr wr, *bad_wr; + struct ib_sge sge; + + c->busa = ib_dma_map_single(rdma->cm_id->device, + c->rc->sdata, client->msize, + DMA_FROM_DEVICE); + if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) + goto error; + + sge.addr = c->busa; + sge.length = client->msize; + sge.lkey = rdma->lkey; + + wr.next = NULL; + c->wc_op = IB_WC_RECV; + wr.wr_id = (unsigned long) c; + wr.sg_list = &sge; + wr.num_sge = 1; + return ib_post_recv(rdma->qp, &wr, &bad_wr); + + error: + p9_debug(P9_DEBUG_ERROR, "EIO\n"); + return -EIO; +} + +static int rdma_request(struct p9_client *client, struct p9_req_t *req) +{ + struct p9_trans_rdma *rdma = client->trans; + struct ib_send_wr wr, *bad_wr; + struct ib_sge sge; + int err = 0; + unsigned long flags; + struct p9_rdma_context *c = NULL; + struct p9_rdma_context *rpl_context = NULL; + + /* Allocate an fcall for the reply */ + rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS); + if (!rpl_context) { + err = -ENOMEM; + goto err_close; + } + + /* + * If the request has a buffer, steal it, otherwise + * allocate a new one. Typically, requests should already + * have receive buffers allocated and just swap them around + */ + if (!req->rc) { + req->rc = kmalloc(sizeof(struct p9_fcall)+client->msize, + GFP_NOFS); + if (req->rc) { + req->rc->sdata = (char *) req->rc + + sizeof(struct p9_fcall); + req->rc->capacity = client->msize; + } + } + rpl_context->rc = req->rc; + if (!rpl_context->rc) { + err = -ENOMEM; + goto err_free2; + } + + /* + * Post a receive buffer for this request. We need to ensure + * there is a reply buffer available for every outstanding + * request. A flushed request can result in no reply for an + * outstanding request, so we must keep a count to avoid + * overflowing the RQ. + */ + if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) { + err = post_recv(client, rpl_context); + if (err) + goto err_free1; + } else + atomic_dec(&rdma->rq_count); + + /* remove posted receive buffer from request structure */ + req->rc = NULL; + + /* Post the request */ + c = kmalloc(sizeof *c, GFP_NOFS); + if (!c) { + err = -ENOMEM; + goto err_free1; + } + c->req = req; + + c->busa = ib_dma_map_single(rdma->cm_id->device, + c->req->tc->sdata, c->req->tc->size, + DMA_TO_DEVICE); + if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) + goto error; + + sge.addr = c->busa; + sge.length = c->req->tc->size; + sge.lkey = rdma->lkey; + + wr.next = NULL; + c->wc_op = IB_WC_SEND; + wr.wr_id = (unsigned long) c; + wr.opcode = IB_WR_SEND; + wr.send_flags = IB_SEND_SIGNALED; + wr.sg_list = &sge; + wr.num_sge = 1; + + if (down_interruptible(&rdma->sq_sem)) + goto error; + + return ib_post_send(rdma->qp, &wr, &bad_wr); + + error: + kfree(c); + kfree(rpl_context->rc); + kfree(rpl_context); + p9_debug(P9_DEBUG_ERROR, "EIO\n"); + return -EIO; + err_free1: + kfree(rpl_context->rc); + err_free2: + kfree(rpl_context); + err_close: + spin_lock_irqsave(&rdma->req_lock, flags); + if (rdma->state < P9_RDMA_CLOSING) { + rdma->state = P9_RDMA_CLOSING; + spin_unlock_irqrestore(&rdma->req_lock, flags); + rdma_disconnect(rdma->cm_id); + } else + spin_unlock_irqrestore(&rdma->req_lock, flags); + return err; +} + +static void rdma_close(struct p9_client *client) +{ + struct p9_trans_rdma *rdma; + + if (!client) + return; + + rdma = client->trans; + if (!rdma) + return; + + client->status = Disconnected; + rdma_disconnect(rdma->cm_id); + rdma_destroy_trans(rdma); +} + +/** + * alloc_rdma - Allocate and initialize the rdma transport structure + * @opts: Mount options structure + */ +static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts) +{ + struct p9_trans_rdma *rdma; + + rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL); + if (!rdma) + return NULL; + + rdma->sq_depth = opts->sq_depth; + rdma->rq_depth = opts->rq_depth; + rdma->timeout = opts->timeout; + spin_lock_init(&rdma->req_lock); + init_completion(&rdma->cm_done); + sema_init(&rdma->sq_sem, rdma->sq_depth); + atomic_set(&rdma->rq_count, 0); + + return rdma; +} + +/* its not clear to me we can do anything after send has been posted */ +static int rdma_cancel(struct p9_client *client, struct p9_req_t *req) +{ + return 1; +} + +/** + * trans_create_rdma - Transport method for creating atransport instance + * @client: client instance + * @addr: IP address string + * @args: Mount options string + */ +static int +rdma_create_trans(struct p9_client *client, const char *addr, char *args) +{ + int err; + struct p9_rdma_opts opts; + struct p9_trans_rdma *rdma; + struct rdma_conn_param conn_param; + struct ib_qp_init_attr qp_attr; + struct ib_device_attr devattr; + + /* Parse the transport specific mount options */ + err = parse_opts(args, &opts); + if (err < 0) + return err; + + /* Create and initialize the RDMA transport structure */ + rdma = alloc_rdma(&opts); + if (!rdma) + return -ENOMEM; + + /* Create the RDMA CM ID */ + rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP, + IB_QPT_RC); + if (IS_ERR(rdma->cm_id)) + goto error; + + /* Associate the client with the transport */ + client->trans = rdma; + + /* Resolve the server's address */ + rdma->addr.sin_family = AF_INET; + rdma->addr.sin_addr.s_addr = in_aton(addr); + rdma->addr.sin_port = htons(opts.port); + err = rdma_resolve_addr(rdma->cm_id, NULL, + (struct sockaddr *)&rdma->addr, + rdma->timeout); + if (err) + goto error; + err = wait_for_completion_interruptible(&rdma->cm_done); + if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED)) + goto error; + + /* Resolve the route to the server */ + err = rdma_resolve_route(rdma->cm_id, rdma->timeout); + if (err) + goto error; + err = wait_for_completion_interruptible(&rdma->cm_done); + if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED)) + goto error; + + /* Query the device attributes */ + err = ib_query_device(rdma->cm_id->device, &devattr); + if (err) + goto error; + + /* Create the Completion Queue */ + rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler, + cq_event_handler, client, + opts.sq_depth + opts.rq_depth + 1, 0); + if (IS_ERR(rdma->cq)) + goto error; + ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); + + /* Create the Protection Domain */ + rdma->pd = ib_alloc_pd(rdma->cm_id->device); + if (IS_ERR(rdma->pd)) + goto error; + + /* Cache the DMA lkey in the transport */ + rdma->dma_mr = NULL; + if (devattr.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY) + rdma->lkey = rdma->cm_id->device->local_dma_lkey; + else { + rdma->dma_mr = ib_get_dma_mr(rdma->pd, IB_ACCESS_LOCAL_WRITE); + if (IS_ERR(rdma->dma_mr)) + goto error; + rdma->lkey = rdma->dma_mr->lkey; + } + + /* Create the Queue Pair */ + memset(&qp_attr, 0, sizeof qp_attr); + qp_attr.event_handler = qp_event_handler; + qp_attr.qp_context = client; + qp_attr.cap.max_send_wr = opts.sq_depth; + qp_attr.cap.max_recv_wr = opts.rq_depth; + qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE; + qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE; + qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; + qp_attr.qp_type = IB_QPT_RC; + qp_attr.send_cq = rdma->cq; + qp_attr.recv_cq = rdma->cq; + err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); + if (err) + goto error; + rdma->qp = rdma->cm_id->qp; + + /* Request a connection */ + memset(&conn_param, 0, sizeof(conn_param)); + conn_param.private_data = NULL; + conn_param.private_data_len = 0; + conn_param.responder_resources = P9_RDMA_IRD; + conn_param.initiator_depth = P9_RDMA_ORD; + err = rdma_connect(rdma->cm_id, &conn_param); + if (err) + goto error; + err = wait_for_completion_interruptible(&rdma->cm_done); + if (err || (rdma->state != P9_RDMA_CONNECTED)) + goto error; + + client->status = Connected; + + return 0; + +error: + rdma_destroy_trans(rdma); + return -ENOTCONN; +} + +static struct p9_trans_module p9_rdma_trans = { + .name = "rdma", + .maxsize = P9_RDMA_MAXSIZE, + .def = 0, + .owner = THIS_MODULE, + .create = rdma_create_trans, + .close = rdma_close, + .request = rdma_request, + .cancel = rdma_cancel, +}; + +/** + * p9_trans_rdma_init - Register the 9P RDMA transport driver + */ +static int __init p9_trans_rdma_init(void) +{ + v9fs_register_trans(&p9_rdma_trans); + return 0; +} + +static void __exit p9_trans_rdma_exit(void) +{ + v9fs_unregister_trans(&p9_rdma_trans); +} + +module_init(p9_trans_rdma_init); +module_exit(p9_trans_rdma_exit); + +MODULE_AUTHOR("Tom Tucker "); +MODULE_DESCRIPTION("RDMA Transport for 9P"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/addons/9p/src/3.10.108/trans_virtio.c b/addons/9p/src/3.10.108/trans_virtio.c new file mode 100644 index 00000000..c76a4388 --- /dev/null +++ b/addons/9p/src/3.10.108/trans_virtio.c @@ -0,0 +1,730 @@ +/* + * The Virtio 9p transport driver + * + * This is a block based transport driver based on the lguest block driver + * code. + * + * Copyright (C) 2007, 2008 Eric Van Hensbergen, IBM Corporation + * + * Based on virtio console driver + * Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "trans_common.h" + +#define VIRTQUEUE_NUM 128 + +/* a single mutex to manage channel initialization and attachment */ +static DEFINE_MUTEX(virtio_9p_lock); +static DECLARE_WAIT_QUEUE_HEAD(vp_wq); +static atomic_t vp_pinned = ATOMIC_INIT(0); + +/** + * struct virtio_chan - per-instance transport information + * @initialized: whether the channel is initialized + * @inuse: whether the channel is in use + * @lock: protects multiple elements within this structure + * @client: client instance + * @vdev: virtio dev associated with this channel + * @vq: virtio queue associated with this channel + * @sg: scatter gather list which is used to pack a request (protected?) + * + * We keep all per-channel information in a structure. + * This structure is allocated within the devices dev->mem space. + * A pointer to the structure will get put in the transport private. + * + */ + +struct virtio_chan { + bool inuse; + + spinlock_t lock; + + struct p9_client *client; + struct virtio_device *vdev; + struct virtqueue *vq; + int ring_bufs_avail; + wait_queue_head_t *vc_wq; + /* This is global limit. Since we don't have a global structure, + * will be placing it in each channel. + */ + unsigned long p9_max_pages; + /* Scatterlist: can be too big for stack. */ + struct scatterlist sg[VIRTQUEUE_NUM]; + + int tag_len; + /* + * tag name to identify a mount Non-null terminated + */ + char *tag; + + struct list_head chan_list; +}; + +static struct list_head virtio_chan_list; + +/* How many bytes left in this page. */ +static unsigned int rest_of_page(void *data) +{ + return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE); +} + +/** + * p9_virtio_close - reclaim resources of a channel + * @client: client instance + * + * This reclaims a channel by freeing its resources and + * reseting its inuse flag. + * + */ + +static void p9_virtio_close(struct p9_client *client) +{ + struct virtio_chan *chan = client->trans; + + mutex_lock(&virtio_9p_lock); + if (chan) + chan->inuse = false; + mutex_unlock(&virtio_9p_lock); +} + +/** + * req_done - callback which signals activity from the server + * @vq: virtio queue activity was received on + * + * This notifies us that the server has triggered some activity + * on the virtio channel - most likely a response to request we + * sent. Figure out which requests now have responses and wake up + * those threads. + * + * Bugs: could do with some additional sanity checking, but appears to work. + * + */ + +static void req_done(struct virtqueue *vq) +{ + struct virtio_chan *chan = vq->vdev->priv; + struct p9_fcall *rc; + unsigned int len; + struct p9_req_t *req; + unsigned long flags; + + p9_debug(P9_DEBUG_TRANS, ": request done\n"); + + while (1) { + spin_lock_irqsave(&chan->lock, flags); + rc = virtqueue_get_buf(chan->vq, &len); + if (rc == NULL) { + spin_unlock_irqrestore(&chan->lock, flags); + break; + } + chan->ring_bufs_avail = 1; + spin_unlock_irqrestore(&chan->lock, flags); + /* Wakeup if anyone waiting for VirtIO ring space. */ + wake_up(chan->vc_wq); + p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc); + p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); + req = p9_tag_lookup(chan->client, rc->tag); + req->status = REQ_STATUS_RCVD; + p9_client_cb(chan->client, req); + } +} + +/** + * pack_sg_list - pack a scatter gather list from a linear buffer + * @sg: scatter/gather list to pack into + * @start: which segment of the sg_list to start at + * @limit: maximum segment to pack data to + * @data: data to pack into scatter/gather list + * @count: amount of data to pack into the scatter/gather list + * + * sg_lists have multiple segments of various sizes. This will pack + * arbitrary data into an existing scatter gather list, segmenting the + * data as necessary within constraints. + * + */ + +static int pack_sg_list(struct scatterlist *sg, int start, + int limit, char *data, int count) +{ + int s; + int index = start; + + while (count) { + s = rest_of_page(data); + if (s > count) + s = count; + BUG_ON(index > limit); + /* Make sure we don't terminate early. */ + sg_unmark_end(&sg[index]); + sg_set_buf(&sg[index++], data, s); + count -= s; + data += s; + } + if (index-start) + sg_mark_end(&sg[index - 1]); + return index-start; +} + +/* We don't currently allow canceling of virtio requests */ +static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req) +{ + return 1; +} + +/** + * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer, + * this takes a list of pages. + * @sg: scatter/gather list to pack into + * @start: which segment of the sg_list to start at + * @pdata: a list of pages to add into sg. + * @nr_pages: number of pages to pack into the scatter/gather list + * @data: data to pack into scatter/gather list + * @count: amount of data to pack into the scatter/gather list + */ +static int +pack_sg_list_p(struct scatterlist *sg, int start, int limit, + struct page **pdata, int nr_pages, char *data, int count) +{ + int i = 0, s; + int data_off; + int index = start; + + BUG_ON(nr_pages > (limit - start)); + /* + * if the first page doesn't start at + * page boundary find the offset + */ + data_off = offset_in_page(data); + while (nr_pages) { + s = rest_of_page(data); + if (s > count) + s = count; + /* Make sure we don't terminate early. */ + sg_unmark_end(&sg[index]); + sg_set_page(&sg[index++], pdata[i++], s, data_off); + data_off = 0; + data += s; + count -= s; + nr_pages--; + } + + if (index-start) + sg_mark_end(&sg[index - 1]); + return index - start; +} + +/** + * p9_virtio_request - issue a request + * @client: client instance issuing the request + * @req: request to be issued + * + */ + +static int +p9_virtio_request(struct p9_client *client, struct p9_req_t *req) +{ + int err; + int in, out, out_sgs, in_sgs; + unsigned long flags; + struct virtio_chan *chan = client->trans; + struct scatterlist *sgs[2]; + + p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n"); + + req->status = REQ_STATUS_SENT; +req_retry: + spin_lock_irqsave(&chan->lock, flags); + + out_sgs = in_sgs = 0; + /* Handle out VirtIO ring buffers */ + out = pack_sg_list(chan->sg, 0, + VIRTQUEUE_NUM, req->tc->sdata, req->tc->size); + if (out) + sgs[out_sgs++] = chan->sg; + + in = pack_sg_list(chan->sg, out, + VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity); + if (in) + sgs[out_sgs + in_sgs++] = chan->sg + out; + + err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc, + GFP_ATOMIC); + if (err < 0) { + if (err == -ENOSPC) { + chan->ring_bufs_avail = 0; + spin_unlock_irqrestore(&chan->lock, flags); + err = wait_event_interruptible(*chan->vc_wq, + chan->ring_bufs_avail); + if (err == -ERESTARTSYS) + return err; + + p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n"); + goto req_retry; + } else { + spin_unlock_irqrestore(&chan->lock, flags); + p9_debug(P9_DEBUG_TRANS, + "virtio rpc add_sgs returned failure\n"); + return -EIO; + } + } + virtqueue_kick(chan->vq); + spin_unlock_irqrestore(&chan->lock, flags); + + p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); + return 0; +} + +static int p9_get_mapped_pages(struct virtio_chan *chan, + struct page **pages, char *data, + int nr_pages, int write, int kern_buf) +{ + int err; + if (!kern_buf) { + /* + * We allow only p9_max_pages pinned. We wait for the + * Other zc request to finish here + */ + if (atomic_read(&vp_pinned) >= chan->p9_max_pages) { + err = wait_event_interruptible(vp_wq, + (atomic_read(&vp_pinned) < chan->p9_max_pages)); + if (err == -ERESTARTSYS) + return err; + } + err = p9_payload_gup(data, &nr_pages, pages, write); + if (err < 0) + return err; + atomic_add(nr_pages, &vp_pinned); + } else { + /* kernel buffer, no need to pin pages */ + int s, index = 0; + int count = nr_pages; + while (nr_pages) { + s = rest_of_page(data); + if (is_vmalloc_addr(data)) + pages[index++] = vmalloc_to_page(data); + else + pages[index++] = kmap_to_page(data); + data += s; + nr_pages--; + } + nr_pages = count; + } + return nr_pages; +} + +/** + * p9_virtio_zc_request - issue a zero copy request + * @client: client instance issuing the request + * @req: request to be issued + * @uidata: user bffer that should be ued for zero copy read + * @uodata: user buffer that shoud be user for zero copy write + * @inlen: read buffer size + * @olen: write buffer size + * @hdrlen: reader header size, This is the size of response protocol data + * + */ +static int +p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, + char *uidata, char *uodata, int inlen, + int outlen, int in_hdr_len, int kern_buf) +{ + int in, out, err, out_sgs, in_sgs; + unsigned long flags; + int in_nr_pages = 0, out_nr_pages = 0; + struct page **in_pages = NULL, **out_pages = NULL; + struct virtio_chan *chan = client->trans; + struct scatterlist *sgs[4]; + + p9_debug(P9_DEBUG_TRANS, "virtio request\n"); + + if (uodata) { + out_nr_pages = p9_nr_pages(uodata, outlen); + out_pages = kmalloc(sizeof(struct page *) * out_nr_pages, + GFP_NOFS); + if (!out_pages) { + err = -ENOMEM; + goto err_out; + } + out_nr_pages = p9_get_mapped_pages(chan, out_pages, uodata, + out_nr_pages, 0, kern_buf); + if (out_nr_pages < 0) { + err = out_nr_pages; + kfree(out_pages); + out_pages = NULL; + goto err_out; + } + } + if (uidata) { + in_nr_pages = p9_nr_pages(uidata, inlen); + in_pages = kmalloc(sizeof(struct page *) * in_nr_pages, + GFP_NOFS); + if (!in_pages) { + err = -ENOMEM; + goto err_out; + } + in_nr_pages = p9_get_mapped_pages(chan, in_pages, uidata, + in_nr_pages, 1, kern_buf); + if (in_nr_pages < 0) { + err = in_nr_pages; + kfree(in_pages); + in_pages = NULL; + goto err_out; + } + } + req->status = REQ_STATUS_SENT; +req_retry_pinned: + spin_lock_irqsave(&chan->lock, flags); + + out_sgs = in_sgs = 0; + + /* out data */ + out = pack_sg_list(chan->sg, 0, + VIRTQUEUE_NUM, req->tc->sdata, req->tc->size); + + if (out) + sgs[out_sgs++] = chan->sg; + + if (out_pages) { + sgs[out_sgs++] = chan->sg + out; + out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, + out_pages, out_nr_pages, uodata, outlen); + } + + /* + * Take care of in data + * For example TREAD have 11. + * 11 is the read/write header = PDU Header(7) + IO Size (4). + * Arrange in such a way that server places header in the + * alloced memory and payload onto the user buffer. + */ + in = pack_sg_list(chan->sg, out, + VIRTQUEUE_NUM, req->rc->sdata, in_hdr_len); + if (in) + sgs[out_sgs + in_sgs++] = chan->sg + out; + + if (in_pages) { + sgs[out_sgs + in_sgs++] = chan->sg + out + in; + in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM, + in_pages, in_nr_pages, uidata, inlen); + } + + BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs)); + err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc, + GFP_ATOMIC); + if (err < 0) { + if (err == -ENOSPC) { + chan->ring_bufs_avail = 0; + spin_unlock_irqrestore(&chan->lock, flags); + err = wait_event_interruptible(*chan->vc_wq, + chan->ring_bufs_avail); + if (err == -ERESTARTSYS) + goto err_out; + + p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n"); + goto req_retry_pinned; + } else { + spin_unlock_irqrestore(&chan->lock, flags); + p9_debug(P9_DEBUG_TRANS, + "virtio rpc add_sgs returned failure\n"); + err = -EIO; + goto err_out; + } + } + virtqueue_kick(chan->vq); + spin_unlock_irqrestore(&chan->lock, flags); + p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); + err = wait_event_interruptible(*req->wq, + req->status >= REQ_STATUS_RCVD); + /* + * Non kernel buffers are pinned, unpin them + */ +err_out: + if (!kern_buf) { + if (in_pages) { + p9_release_pages(in_pages, in_nr_pages); + atomic_sub(in_nr_pages, &vp_pinned); + } + if (out_pages) { + p9_release_pages(out_pages, out_nr_pages); + atomic_sub(out_nr_pages, &vp_pinned); + } + /* wakeup anybody waiting for slots to pin pages */ + wake_up(&vp_wq); + } + kfree(in_pages); + kfree(out_pages); + return err; +} + +static ssize_t p9_mount_tag_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct virtio_chan *chan; + struct virtio_device *vdev; + + vdev = dev_to_virtio(dev); + chan = vdev->priv; + + return snprintf(buf, chan->tag_len + 1, "%s", chan->tag); +} + +static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL); + +/** + * p9_virtio_probe - probe for existence of 9P virtio channels + * @vdev: virtio device to probe + * + * This probes for existing virtio channels. + * + */ + +static int p9_virtio_probe(struct virtio_device *vdev) +{ + __u16 tag_len; + char *tag; + int err; + struct virtio_chan *chan; + + chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL); + if (!chan) { + pr_err("Failed to allocate virtio 9P channel\n"); + err = -ENOMEM; + goto fail; + } + + chan->vdev = vdev; + + /* We expect one virtqueue, for requests. */ + chan->vq = virtio_find_single_vq(vdev, req_done, "requests"); + if (IS_ERR(chan->vq)) { + err = PTR_ERR(chan->vq); + goto out_free_vq; + } + chan->vq->vdev->priv = chan; + spin_lock_init(&chan->lock); + + sg_init_table(chan->sg, VIRTQUEUE_NUM); + + chan->inuse = false; + if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) { + vdev->config->get(vdev, + offsetof(struct virtio_9p_config, tag_len), + &tag_len, sizeof(tag_len)); + } else { + err = -EINVAL; + goto out_free_vq; + } + tag = kmalloc(tag_len, GFP_KERNEL); + if (!tag) { + err = -ENOMEM; + goto out_free_vq; + } + vdev->config->get(vdev, offsetof(struct virtio_9p_config, tag), + tag, tag_len); + chan->tag = tag; + chan->tag_len = tag_len; + err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); + if (err) { + goto out_free_tag; + } + chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); + if (!chan->vc_wq) { + err = -ENOMEM; + goto out_free_tag; + } + init_waitqueue_head(chan->vc_wq); + chan->ring_bufs_avail = 1; + /* Ceiling limit to avoid denial of service attacks */ + chan->p9_max_pages = nr_free_buffer_pages()/4; + + mutex_lock(&virtio_9p_lock); + list_add_tail(&chan->chan_list, &virtio_chan_list); + mutex_unlock(&virtio_9p_lock); + + /* Let udev rules use the new mount_tag attribute. */ + kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); + + return 0; + +out_free_tag: + kfree(tag); +out_free_vq: + vdev->config->del_vqs(vdev); + kfree(chan); +fail: + return err; +} + + +/** + * p9_virtio_create - allocate a new virtio channel + * @client: client instance invoking this transport + * @devname: string identifying the channel to connect to (unused) + * @args: args passed from sys_mount() for per-transport options (unused) + * + * This sets up a transport channel for 9p communication. Right now + * we only match the first available channel, but eventually we couldlook up + * alternate channels by matching devname versus a virtio_config entry. + * We use a simple reference count mechanism to ensure that only a single + * mount has a channel open at a time. + * + */ + +static int +p9_virtio_create(struct p9_client *client, const char *devname, char *args) +{ + struct virtio_chan *chan; + int ret = -ENOENT; + int found = 0; + + mutex_lock(&virtio_9p_lock); + list_for_each_entry(chan, &virtio_chan_list, chan_list) { + if (!strncmp(devname, chan->tag, chan->tag_len) && + strlen(devname) == chan->tag_len) { + if (!chan->inuse) { + chan->inuse = true; + found = 1; + break; + } + ret = -EBUSY; + } + } + mutex_unlock(&virtio_9p_lock); + + if (!found) { + pr_err("no channels available\n"); + return ret; + } + + client->trans = (void *)chan; + client->status = Connected; + chan->client = client; + + return 0; +} + +/** + * p9_virtio_remove - clean up resources associated with a virtio device + * @vdev: virtio device to remove + * + */ + +static void p9_virtio_remove(struct virtio_device *vdev) +{ + struct virtio_chan *chan = vdev->priv; + + if (chan->inuse) + p9_virtio_close(chan->client); + vdev->config->del_vqs(vdev); + + mutex_lock(&virtio_9p_lock); + list_del(&chan->chan_list); + mutex_unlock(&virtio_9p_lock); + sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); + kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); + kfree(chan->tag); + kfree(chan->vc_wq); + kfree(chan); + +} + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { + VIRTIO_9P_MOUNT_TAG, +}; + +/* The standard "struct lguest_driver": */ +static struct virtio_driver p9_virtio_drv = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = p9_virtio_probe, + .remove = p9_virtio_remove, +}; + +static struct p9_trans_module p9_virtio_trans = { + .name = "virtio", + .create = p9_virtio_create, + .close = p9_virtio_close, + .request = p9_virtio_request, + .zc_request = p9_virtio_zc_request, + .cancel = p9_virtio_cancel, + /* + * We leave one entry for input and one entry for response + * headers. We also skip one more entry to accomodate, address + * that are not at page boundary, that can result in an extra + * page in zero copy. + */ + .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3), + .def = 0, + .owner = THIS_MODULE, +}; + +/* The standard init function */ +static int __init p9_virtio_init(void) +{ + INIT_LIST_HEAD(&virtio_chan_list); + + v9fs_register_trans(&p9_virtio_trans); + return register_virtio_driver(&p9_virtio_drv); +} + +static void __exit p9_virtio_cleanup(void) +{ + unregister_virtio_driver(&p9_virtio_drv); + v9fs_unregister_trans(&p9_virtio_trans); +} + +module_init(p9_virtio_init); +module_exit(p9_virtio_cleanup); + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_AUTHOR("Eric Van Hensbergen "); +MODULE_DESCRIPTION("Virtio 9p Transport"); +MODULE_LICENSE("GPL"); diff --git a/addons/9p/src/3.10.108/util.c b/addons/9p/src/3.10.108/util.c new file mode 100644 index 00000000..59f278e6 --- /dev/null +++ b/addons/9p/src/3.10.108/util.c @@ -0,0 +1,141 @@ +/* + * net/9p/util.c + * + * This file contains some helper functions + * + * Copyright (C) 2007 by Latchesar Ionkov + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * struct p9_idpool - per-connection accounting for tag idpool + * @lock: protects the pool + * @pool: idr to allocate tag id from + * + */ + +struct p9_idpool { + spinlock_t lock; + struct idr pool; +}; + +/** + * p9_idpool_create - create a new per-connection id pool + * + */ + +struct p9_idpool *p9_idpool_create(void) +{ + struct p9_idpool *p; + + p = kmalloc(sizeof(struct p9_idpool), GFP_KERNEL); + if (!p) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&p->lock); + idr_init(&p->pool); + + return p; +} +EXPORT_SYMBOL(p9_idpool_create); + +/** + * p9_idpool_destroy - create a new per-connection id pool + * @p: idpool to destroy + */ + +void p9_idpool_destroy(struct p9_idpool *p) +{ + idr_destroy(&p->pool); + kfree(p); +} +EXPORT_SYMBOL(p9_idpool_destroy); + +/** + * p9_idpool_get - allocate numeric id from pool + * @p: pool to allocate from + * + * Bugs: This seems to be an awful generic function, should it be in idr.c with + * the lock included in struct idr? + */ + +int p9_idpool_get(struct p9_idpool *p) +{ + int i; + unsigned long flags; + + idr_preload(GFP_NOFS); + spin_lock_irqsave(&p->lock, flags); + + /* no need to store exactly p, we just need something non-null */ + i = idr_alloc(&p->pool, p, 0, 0, GFP_NOWAIT); + + spin_unlock_irqrestore(&p->lock, flags); + idr_preload_end(); + if (i < 0) + return -1; + + p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p); + return i; +} +EXPORT_SYMBOL(p9_idpool_get); + +/** + * p9_idpool_put - release numeric id from pool + * @id: numeric id which is being released + * @p: pool to release id into + * + * Bugs: This seems to be an awful generic function, should it be in idr.c with + * the lock included in struct idr? + */ + +void p9_idpool_put(int id, struct p9_idpool *p) +{ + unsigned long flags; + + p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", id, p); + + spin_lock_irqsave(&p->lock, flags); + idr_remove(&p->pool, id); + spin_unlock_irqrestore(&p->lock, flags); +} +EXPORT_SYMBOL(p9_idpool_put); + +/** + * p9_idpool_check - check if the specified id is available + * @id: id to check + * @p: pool to check + */ + +int p9_idpool_check(int id, struct p9_idpool *p) +{ + return idr_find(&p->pool, id) != NULL; +} +EXPORT_SYMBOL(p9_idpool_check); + diff --git a/addons/9p/src/3.10.108/v9fs.c b/addons/9p/src/3.10.108/v9fs.c new file mode 100644 index 00000000..58e6cbce --- /dev/null +++ b/addons/9p/src/3.10.108/v9fs.c @@ -0,0 +1,677 @@ +/* + * linux/fs/9p/v9fs.c + * + * This file contains functions assisting in mapping VFS to 9P2000 + * + * Copyright (C) 2004-2008 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "cache.h" + +static DEFINE_SPINLOCK(v9fs_sessionlist_lock); +static LIST_HEAD(v9fs_sessionlist); +struct kmem_cache *v9fs_inode_cache; + +/* + * Option Parsing (code inspired by NFS code) + * NOTE: each transport will parse its own options + */ + +enum { + /* Options that take integer arguments */ + Opt_debug, Opt_dfltuid, Opt_dfltgid, Opt_afid, + /* String options */ + Opt_uname, Opt_remotename, Opt_trans, Opt_cache, Opt_cachetag, + /* Options that take no arguments */ + Opt_nodevmap, + /* Cache options */ + Opt_cache_loose, Opt_fscache, + /* Access options */ + Opt_access, Opt_posixacl, + /* Error token */ + Opt_err +}; + +static const match_table_t tokens = { + {Opt_debug, "debug=%x"}, + {Opt_dfltuid, "dfltuid=%u"}, + {Opt_dfltgid, "dfltgid=%u"}, + {Opt_afid, "afid=%u"}, + {Opt_uname, "uname=%s"}, + {Opt_remotename, "aname=%s"}, + {Opt_nodevmap, "nodevmap"}, + {Opt_cache, "cache=%s"}, + {Opt_cache_loose, "loose"}, + {Opt_fscache, "fscache"}, + {Opt_cachetag, "cachetag=%s"}, + {Opt_access, "access=%s"}, + {Opt_posixacl, "posixacl"}, + {Opt_err, NULL} +}; + +/* Interpret mount options for cache mode */ +static int get_cache_mode(char *s) +{ + int version = -EINVAL; + + if (!strcmp(s, "loose")) { + version = CACHE_LOOSE; + p9_debug(P9_DEBUG_9P, "Cache mode: loose\n"); + } else if (!strcmp(s, "fscache")) { + version = CACHE_FSCACHE; + p9_debug(P9_DEBUG_9P, "Cache mode: fscache\n"); + } else if (!strcmp(s, "none")) { + version = CACHE_NONE; + p9_debug(P9_DEBUG_9P, "Cache mode: none\n"); + } else + pr_info("Unknown Cache mode %s\n", s); + return version; +} + +/** + * v9fs_parse_options - parse mount options into session structure + * @v9ses: existing v9fs session information + * + * Return 0 upon success, -ERRNO upon failure. + */ + +static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) +{ + char *options, *tmp_options; + substring_t args[MAX_OPT_ARGS]; + char *p; + int option = 0; + char *s, *e; + int ret = 0; + + /* setup defaults */ + v9ses->afid = ~0; + v9ses->debug = 0; + v9ses->cache = CACHE_NONE; +#ifdef CONFIG_9P_FSCACHE + v9ses->cachetag = NULL; +#endif + + if (!opts) + return 0; + + tmp_options = kstrdup(opts, GFP_KERNEL); + if (!tmp_options) { + ret = -ENOMEM; + goto fail_option_alloc; + } + options = tmp_options; + + while ((p = strsep(&options, ",")) != NULL) { + int token, r; + if (!*p) + continue; + token = match_token(p, tokens, args); + switch (token) { + case Opt_debug: + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + ret = r; + continue; + } + v9ses->debug = option; +#ifdef CONFIG_NET_9P_DEBUG + p9_debug_level = option; +#endif + break; + + case Opt_dfltuid: + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + ret = r; + continue; + } + v9ses->dfltuid = make_kuid(current_user_ns(), option); + if (!uid_valid(v9ses->dfltuid)) { + p9_debug(P9_DEBUG_ERROR, + "uid field, but not a uid?\n"); + ret = -EINVAL; + continue; + } + break; + case Opt_dfltgid: + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + ret = r; + continue; + } + v9ses->dfltgid = make_kgid(current_user_ns(), option); + if (!gid_valid(v9ses->dfltgid)) { + p9_debug(P9_DEBUG_ERROR, + "gid field, but not a gid?\n"); + ret = -EINVAL; + continue; + } + break; + case Opt_afid: + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + ret = r; + continue; + } + v9ses->afid = option; + break; + case Opt_uname: + kfree(v9ses->uname); + v9ses->uname = match_strdup(&args[0]); + if (!v9ses->uname) { + ret = -ENOMEM; + goto free_and_return; + } + break; + case Opt_remotename: + kfree(v9ses->aname); + v9ses->aname = match_strdup(&args[0]); + if (!v9ses->aname) { + ret = -ENOMEM; + goto free_and_return; + } + break; + case Opt_nodevmap: + v9ses->nodev = 1; + break; + case Opt_cache_loose: + v9ses->cache = CACHE_LOOSE; + break; + case Opt_fscache: + v9ses->cache = CACHE_FSCACHE; + break; + case Opt_cachetag: +#ifdef CONFIG_9P_FSCACHE + v9ses->cachetag = match_strdup(&args[0]); +#endif + break; + case Opt_cache: + s = match_strdup(&args[0]); + if (!s) { + ret = -ENOMEM; + p9_debug(P9_DEBUG_ERROR, + "problem allocating copy of cache arg\n"); + goto free_and_return; + } + ret = get_cache_mode(s); + if (ret == -EINVAL) { + kfree(s); + goto free_and_return; + } + + v9ses->cache = ret; + kfree(s); + break; + + case Opt_access: + s = match_strdup(&args[0]); + if (!s) { + ret = -ENOMEM; + p9_debug(P9_DEBUG_ERROR, + "problem allocating copy of access arg\n"); + goto free_and_return; + } + + v9ses->flags &= ~V9FS_ACCESS_MASK; + if (strcmp(s, "user") == 0) + v9ses->flags |= V9FS_ACCESS_USER; + else if (strcmp(s, "any") == 0) + v9ses->flags |= V9FS_ACCESS_ANY; + else if (strcmp(s, "client") == 0) { + v9ses->flags |= V9FS_ACCESS_CLIENT; + } else { + uid_t uid; + v9ses->flags |= V9FS_ACCESS_SINGLE; + uid = simple_strtoul(s, &e, 10); + if (*e != '\0') { + ret = -EINVAL; + pr_info("Unknown access argument %s\n", + s); + kfree(s); + goto free_and_return; + } + v9ses->uid = make_kuid(current_user_ns(), uid); + if (!uid_valid(v9ses->uid)) { + ret = -EINVAL; + pr_info("Uknown uid %s\n", s); + kfree(s); + goto free_and_return; + } + } + + kfree(s); + break; + + case Opt_posixacl: +#ifdef CONFIG_9P_FS_POSIX_ACL + v9ses->flags |= V9FS_POSIX_ACL; +#else + p9_debug(P9_DEBUG_ERROR, + "Not defined CONFIG_9P_FS_POSIX_ACL. Ignoring posixacl option\n"); +#endif + break; + + default: + continue; + } + } + +free_and_return: + kfree(tmp_options); +fail_option_alloc: + return ret; +} + +/** + * v9fs_session_init - initialize session + * @v9ses: session information structure + * @dev_name: device being mounted + * @data: options + * + */ + +struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, + const char *dev_name, char *data) +{ + int retval = -EINVAL; + struct p9_fid *fid; + int rc; + + v9ses->uname = kstrdup(V9FS_DEFUSER, GFP_KERNEL); + if (!v9ses->uname) + return ERR_PTR(-ENOMEM); + + v9ses->aname = kstrdup(V9FS_DEFANAME, GFP_KERNEL); + if (!v9ses->aname) { + kfree(v9ses->uname); + return ERR_PTR(-ENOMEM); + } + init_rwsem(&v9ses->rename_sem); + + rc = bdi_setup_and_register(&v9ses->bdi, "9p", BDI_CAP_MAP_COPY); + if (rc) { + kfree(v9ses->aname); + kfree(v9ses->uname); + return ERR_PTR(rc); + } + + spin_lock(&v9fs_sessionlist_lock); + list_add(&v9ses->slist, &v9fs_sessionlist); + spin_unlock(&v9fs_sessionlist_lock); + + v9ses->uid = INVALID_UID; + v9ses->dfltuid = V9FS_DEFUID; + v9ses->dfltgid = V9FS_DEFGID; + + v9ses->clnt = p9_client_create(dev_name, data); + if (IS_ERR(v9ses->clnt)) { + retval = PTR_ERR(v9ses->clnt); + v9ses->clnt = NULL; + p9_debug(P9_DEBUG_ERROR, "problem initializing 9p client\n"); + goto error; + } + + v9ses->flags = V9FS_ACCESS_USER; + + if (p9_is_proto_dotl(v9ses->clnt)) { + v9ses->flags = V9FS_ACCESS_CLIENT; + v9ses->flags |= V9FS_PROTO_2000L; + } else if (p9_is_proto_dotu(v9ses->clnt)) { + v9ses->flags |= V9FS_PROTO_2000U; + } + + rc = v9fs_parse_options(v9ses, data); + if (rc < 0) { + retval = rc; + goto error; + } + + v9ses->maxdata = v9ses->clnt->msize - P9_IOHDRSZ; + + if (!v9fs_proto_dotl(v9ses) && + ((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)) { + /* + * We support ACCESS_CLIENT only for dotl. + * Fall back to ACCESS_USER + */ + v9ses->flags &= ~V9FS_ACCESS_MASK; + v9ses->flags |= V9FS_ACCESS_USER; + } + /*FIXME !! */ + /* for legacy mode, fall back to V9FS_ACCESS_ANY */ + if (!(v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) && + ((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) { + + v9ses->flags &= ~V9FS_ACCESS_MASK; + v9ses->flags |= V9FS_ACCESS_ANY; + v9ses->uid = INVALID_UID; + } + if (!v9fs_proto_dotl(v9ses) || + !((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)) { + /* + * We support ACL checks on clinet only if the protocol is + * 9P2000.L and access is V9FS_ACCESS_CLIENT. + */ + v9ses->flags &= ~V9FS_ACL_MASK; + } + + fid = p9_client_attach(v9ses->clnt, NULL, v9ses->uname, INVALID_UID, + v9ses->aname); + if (IS_ERR(fid)) { + retval = PTR_ERR(fid); + fid = NULL; + p9_debug(P9_DEBUG_ERROR, "cannot attach\n"); + goto error; + } + + if ((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_SINGLE) + fid->uid = v9ses->uid; + else + fid->uid = INVALID_UID; + +#ifdef CONFIG_9P_FSCACHE + /* register the session for caching */ + v9fs_cache_session_get_cookie(v9ses); +#endif + + return fid; + +error: + bdi_destroy(&v9ses->bdi); + return ERR_PTR(retval); +} + +/** + * v9fs_session_close - shutdown a session + * @v9ses: session information structure + * + */ + +void v9fs_session_close(struct v9fs_session_info *v9ses) +{ + if (v9ses->clnt) { + p9_client_destroy(v9ses->clnt); + v9ses->clnt = NULL; + } + +#ifdef CONFIG_9P_FSCACHE + if (v9ses->fscache) { + v9fs_cache_session_put_cookie(v9ses); + kfree(v9ses->cachetag); + } +#endif + kfree(v9ses->uname); + kfree(v9ses->aname); + + bdi_destroy(&v9ses->bdi); + + spin_lock(&v9fs_sessionlist_lock); + list_del(&v9ses->slist); + spin_unlock(&v9fs_sessionlist_lock); +} + +/** + * v9fs_session_cancel - terminate a session + * @v9ses: session to terminate + * + * mark transport as disconnected and cancel all pending requests. + */ + +void v9fs_session_cancel(struct v9fs_session_info *v9ses) { + p9_debug(P9_DEBUG_ERROR, "cancel session %p\n", v9ses); + p9_client_disconnect(v9ses->clnt); +} + +/** + * v9fs_session_begin_cancel - Begin terminate of a session + * @v9ses: session to terminate + * + * After this call we don't allow any request other than clunk. + */ + +void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses) +{ + p9_debug(P9_DEBUG_ERROR, "begin cancel session %p\n", v9ses); + p9_client_begin_disconnect(v9ses->clnt); +} + +extern int v9fs_error_init(void); + +static struct kobject *v9fs_kobj; + +#ifdef CONFIG_9P_FSCACHE +/** + * caches_show - list caches associated with a session + * + * Returns the size of buffer written. + */ + +static ssize_t caches_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + ssize_t n = 0, count = 0, limit = PAGE_SIZE; + struct v9fs_session_info *v9ses; + + spin_lock(&v9fs_sessionlist_lock); + list_for_each_entry(v9ses, &v9fs_sessionlist, slist) { + if (v9ses->cachetag) { + n = snprintf(buf, limit, "%s\n", v9ses->cachetag); + if (n < 0) { + count = n; + break; + } + + count += n; + limit -= n; + } + } + + spin_unlock(&v9fs_sessionlist_lock); + return count; +} + +static struct kobj_attribute v9fs_attr_cache = __ATTR_RO(caches); +#endif /* CONFIG_9P_FSCACHE */ + +static struct attribute *v9fs_attrs[] = { +#ifdef CONFIG_9P_FSCACHE + &v9fs_attr_cache.attr, +#endif + NULL, +}; + +static struct attribute_group v9fs_attr_group = { + .attrs = v9fs_attrs, +}; + +/** + * v9fs_sysfs_init - Initialize the v9fs sysfs interface + * + */ + +static int v9fs_sysfs_init(void) +{ + v9fs_kobj = kobject_create_and_add("9p", fs_kobj); + if (!v9fs_kobj) + return -ENOMEM; + + if (sysfs_create_group(v9fs_kobj, &v9fs_attr_group)) { + kobject_put(v9fs_kobj); + return -ENOMEM; + } + + return 0; +} + +/** + * v9fs_sysfs_cleanup - Unregister the v9fs sysfs interface + * + */ + +static void v9fs_sysfs_cleanup(void) +{ + sysfs_remove_group(v9fs_kobj, &v9fs_attr_group); + kobject_put(v9fs_kobj); +} + +static void v9fs_inode_init_once(void *foo) +{ + struct v9fs_inode *v9inode = (struct v9fs_inode *)foo; +#ifdef CONFIG_9P_FSCACHE + v9inode->fscache = NULL; +#endif + memset(&v9inode->qid, 0, sizeof(v9inode->qid)); + inode_init_once(&v9inode->vfs_inode); +} + +/** + * v9fs_init_inode_cache - initialize a cache for 9P + * Returns 0 on success. + */ +static int v9fs_init_inode_cache(void) +{ + v9fs_inode_cache = kmem_cache_create("v9fs_inode_cache", + sizeof(struct v9fs_inode), + 0, (SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD), + v9fs_inode_init_once); + if (!v9fs_inode_cache) + return -ENOMEM; + + return 0; +} + +/** + * v9fs_destroy_inode_cache - destroy the cache of 9P inode + * + */ +static void v9fs_destroy_inode_cache(void) +{ + /* + * Make sure all delayed rcu free inodes are flushed before we + * destroy cache. + */ + rcu_barrier(); + kmem_cache_destroy(v9fs_inode_cache); +} + +static int v9fs_cache_register(void) +{ + int ret; + ret = v9fs_init_inode_cache(); + if (ret < 0) + return ret; +#ifdef CONFIG_9P_FSCACHE + return fscache_register_netfs(&v9fs_cache_netfs); +#else + return ret; +#endif +} + +static void v9fs_cache_unregister(void) +{ + v9fs_destroy_inode_cache(); +#ifdef CONFIG_9P_FSCACHE + fscache_unregister_netfs(&v9fs_cache_netfs); +#endif +} + +/** + * init_v9fs - Initialize module + * + */ + +static int __init init_v9fs(void) +{ + int err; + pr_info("Installing v9fs 9p2000 file system support\n"); + /* TODO: Setup list of registered trasnport modules */ + + err = v9fs_cache_register(); + if (err < 0) { + pr_err("Failed to register v9fs for caching\n"); + return err; + } + + err = v9fs_sysfs_init(); + if (err < 0) { + pr_err("Failed to register with sysfs\n"); + goto out_cache; + } + err = register_filesystem(&v9fs_fs_type); + if (err < 0) { + pr_err("Failed to register filesystem\n"); + goto out_sysfs_cleanup; + } + + return 0; + +out_sysfs_cleanup: + v9fs_sysfs_cleanup(); + +out_cache: + v9fs_cache_unregister(); + + return err; +} + +/** + * exit_v9fs - shutdown module + * + */ + +static void __exit exit_v9fs(void) +{ + v9fs_sysfs_cleanup(); + v9fs_cache_unregister(); + unregister_filesystem(&v9fs_fs_type); +} + +module_init(init_v9fs) +module_exit(exit_v9fs) + +MODULE_AUTHOR("Latchesar Ionkov "); +MODULE_AUTHOR("Eric Van Hensbergen "); +MODULE_AUTHOR("Ron Minnich "); +MODULE_LICENSE("GPL"); diff --git a/addons/9p/src/3.10.108/v9fs.h b/addons/9p/src/3.10.108/v9fs.h new file mode 100644 index 00000000..a8e127c8 --- /dev/null +++ b/addons/9p/src/3.10.108/v9fs.h @@ -0,0 +1,227 @@ +/* + * V9FS definitions. + * + * Copyright (C) 2004-2008 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ +#ifndef FS_9P_V9FS_H +#define FS_9P_V9FS_H + +#include + +/** + * enum p9_session_flags - option flags for each 9P session + * @V9FS_PROTO_2000U: whether or not to use 9P2000.u extensions + * @V9FS_PROTO_2000L: whether or not to use 9P2000.l extensions + * @V9FS_ACCESS_SINGLE: only the mounting user can access the hierarchy + * @V9FS_ACCESS_USER: a new attach will be issued for every user (default) + * @V9FS_ACCESS_CLIENT: Just like user, but access check is performed on client. + * @V9FS_ACCESS_ANY: use a single attach for all users + * @V9FS_ACCESS_MASK: bit mask of different ACCESS options + * @V9FS_POSIX_ACL: POSIX ACLs are enforced + * + * Session flags reflect options selected by users at mount time + */ +#define V9FS_ACCESS_ANY (V9FS_ACCESS_SINGLE | \ + V9FS_ACCESS_USER | \ + V9FS_ACCESS_CLIENT) +#define V9FS_ACCESS_MASK V9FS_ACCESS_ANY +#define V9FS_ACL_MASK V9FS_POSIX_ACL + +enum p9_session_flags { + V9FS_PROTO_2000U = 0x01, + V9FS_PROTO_2000L = 0x02, + V9FS_ACCESS_SINGLE = 0x04, + V9FS_ACCESS_USER = 0x08, + V9FS_ACCESS_CLIENT = 0x10, + V9FS_POSIX_ACL = 0x20 +}; + +/* possible values of ->cache */ +/** + * enum p9_cache_modes - user specified cache preferences + * @CACHE_NONE: do not cache data, dentries, or directory contents (default) + * @CACHE_LOOSE: cache data, dentries, and directory contents w/no consistency + * + * eventually support loose, tight, time, session, default always none + */ + +enum p9_cache_modes { + CACHE_NONE, + CACHE_LOOSE, + CACHE_FSCACHE, +}; + +/** + * struct v9fs_session_info - per-instance session information + * @flags: session options of type &p9_session_flags + * @nodev: set to 1 to disable device mapping + * @debug: debug level + * @afid: authentication handle + * @cache: cache mode of type &p9_cache_modes + * @cachetag: the tag of the cache associated with this session + * @fscache: session cookie associated with FS-Cache + * @options: copy of options string given by user + * @uname: string user name to mount hierarchy as + * @aname: mount specifier for remote hierarchy + * @maxdata: maximum data to be sent/recvd per protocol message + * @dfltuid: default numeric userid to mount hierarchy as + * @dfltgid: default numeric groupid to mount hierarchy as + * @uid: if %V9FS_ACCESS_SINGLE, the numeric uid which mounted the hierarchy + * @clnt: reference to 9P network client instantiated for this session + * @slist: reference to list of registered 9p sessions + * + * This structure holds state for each session instance established during + * a sys_mount() . + * + * Bugs: there seems to be a lot of state which could be condensed and/or + * removed. + */ + +struct v9fs_session_info { + /* options */ + unsigned char flags; + unsigned char nodev; + unsigned short debug; + unsigned int afid; + unsigned int cache; +#ifdef CONFIG_9P_FSCACHE + char *cachetag; + struct fscache_cookie *fscache; +#endif + + char *uname; /* user name to mount as */ + char *aname; /* name of remote hierarchy being mounted */ + unsigned int maxdata; /* max data for client interface */ + kuid_t dfltuid; /* default uid/muid for legacy support */ + kgid_t dfltgid; /* default gid for legacy support */ + kuid_t uid; /* if ACCESS_SINGLE, the uid that has access */ + struct p9_client *clnt; /* 9p client */ + struct list_head slist; /* list of sessions registered with v9fs */ + struct backing_dev_info bdi; + struct rw_semaphore rename_sem; +}; + +/* cache_validity flags */ +#define V9FS_INO_INVALID_ATTR 0x01 + +struct v9fs_inode { +#ifdef CONFIG_9P_FSCACHE + spinlock_t fscache_lock; + struct fscache_cookie *fscache; +#endif + struct p9_qid qid; + unsigned int cache_validity; + struct p9_fid *writeback_fid; + struct mutex v_mutex; + struct inode vfs_inode; +}; + +static inline struct v9fs_inode *V9FS_I(const struct inode *inode) +{ + return container_of(inode, struct v9fs_inode, vfs_inode); +} + +struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *, + char *); +extern void v9fs_session_close(struct v9fs_session_info *v9ses); +extern void v9fs_session_cancel(struct v9fs_session_info *v9ses); +extern void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses); +extern struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, + unsigned int flags); +extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d); +extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d); +extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry); +extern void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, + void *p); +extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses, + struct p9_fid *fid, + struct super_block *sb, int new); +extern const struct inode_operations v9fs_dir_inode_operations_dotl; +extern const struct inode_operations v9fs_file_inode_operations_dotl; +extern const struct inode_operations v9fs_symlink_inode_operations_dotl; +extern struct inode *v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, + struct p9_fid *fid, + struct super_block *sb, int new); + +/* other default globals */ +#define V9FS_PORT 564 +#define V9FS_DEFUSER "nobody" +#define V9FS_DEFANAME "" +#define V9FS_DEFUID KUIDT_INIT(-2) +#define V9FS_DEFGID KGIDT_INIT(-2) + +static inline struct v9fs_session_info *v9fs_inode2v9ses(struct inode *inode) +{ + return (inode->i_sb->s_fs_info); +} + +static inline struct v9fs_session_info *v9fs_dentry2v9ses(struct dentry *dentry) +{ + return dentry->d_sb->s_fs_info; +} + +static inline int v9fs_proto_dotu(struct v9fs_session_info *v9ses) +{ + return v9ses->flags & V9FS_PROTO_2000U; +} + +static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses) +{ + return v9ses->flags & V9FS_PROTO_2000L; +} + +/** + * v9fs_get_inode_from_fid - Helper routine to populate an inode by + * issuing a attribute request + * @v9ses: session information + * @fid: fid to issue attribute request for + * @sb: superblock on which to create inode + * + */ +static inline struct inode * +v9fs_get_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, + struct super_block *sb) +{ + if (v9fs_proto_dotl(v9ses)) + return v9fs_inode_from_fid_dotl(v9ses, fid, sb, 0); + else + return v9fs_inode_from_fid(v9ses, fid, sb, 0); +} + +/** + * v9fs_get_new_inode_from_fid - Helper routine to populate an inode by + * issuing a attribute request + * @v9ses: session information + * @fid: fid to issue attribute request for + * @sb: superblock on which to create inode + * + */ +static inline struct inode * +v9fs_get_new_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, + struct super_block *sb) +{ + if (v9fs_proto_dotl(v9ses)) + return v9fs_inode_from_fid_dotl(v9ses, fid, sb, 1); + else + return v9fs_inode_from_fid(v9ses, fid, sb, 1); +} + +#endif diff --git a/addons/9p/src/3.10.108/v9fs_vfs.h b/addons/9p/src/3.10.108/v9fs_vfs.h new file mode 100644 index 00000000..dc95a252 --- /dev/null +++ b/addons/9p/src/3.10.108/v9fs_vfs.h @@ -0,0 +1,88 @@ +/* + * V9FS VFS extensions. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ +#ifndef FS_9P_V9FS_VFS_H +#define FS_9P_V9FS_VFS_H + +/* plan9 semantics are that created files are implicitly opened. + * But linux semantics are that you call create, then open. + * the plan9 approach is superior as it provides an atomic + * open. + * we track the create fid here. When the file is opened, if fidopen is + * non-zero, we use the fid and can skip some steps. + * there may be a better way to do this, but I don't know it. + * one BAD way is to clunk the fid on create, then open it again: + * you lose the atomicity of file open + */ + +/* special case: + * unlink calls remove, which is an implicit clunk. So we have to track + * that kind of thing so that we don't try to clunk a dead fid. + */ +#define P9_LOCK_TIMEOUT (30*HZ) + +extern struct file_system_type v9fs_fs_type; +extern const struct address_space_operations v9fs_addr_operations; +extern const struct file_operations v9fs_file_operations; +extern const struct file_operations v9fs_file_operations_dotl; +extern const struct file_operations v9fs_dir_operations; +extern const struct file_operations v9fs_dir_operations_dotl; +extern const struct dentry_operations v9fs_dentry_operations; +extern const struct dentry_operations v9fs_cached_dentry_operations; +extern const struct file_operations v9fs_cached_file_operations; +extern const struct file_operations v9fs_cached_file_operations_dotl; +extern struct kmem_cache *v9fs_inode_cache; + +struct inode *v9fs_alloc_inode(struct super_block *sb); +void v9fs_destroy_inode(struct inode *inode); +struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t); +int v9fs_init_inode(struct v9fs_session_info *v9ses, + struct inode *inode, umode_t mode, dev_t); +void v9fs_evict_inode(struct inode *inode); +ino_t v9fs_qid2ino(struct p9_qid *qid); +void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *); +void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *); +int v9fs_dir_release(struct inode *inode, struct file *filp); +int v9fs_file_open(struct inode *inode, struct file *file); +void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat); +int v9fs_uflags2omode(int uflags, int extended); + +ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64); +ssize_t v9fs_fid_readn(struct p9_fid *, char *, char __user *, u32, u64); +void v9fs_blank_wstat(struct p9_wstat *wstat); +int v9fs_vfs_setattr_dotl(struct dentry *, struct iattr *); +int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, + int datasync); +ssize_t v9fs_file_write_internal(struct inode *, struct p9_fid *, + const char __user *, size_t, loff_t *, int); +int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode); +int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode); +static inline void v9fs_invalidate_inode_attr(struct inode *inode) +{ + struct v9fs_inode *v9inode; + v9inode = V9FS_I(inode); + v9inode->cache_validity |= V9FS_INO_INVALID_ATTR; + return; +} + +int v9fs_open_to_dotl_flags(int flags); +#endif diff --git a/addons/9p/src/3.10.108/vfs_addr.c b/addons/9p/src/3.10.108/vfs_addr.c new file mode 100644 index 00000000..055562c5 --- /dev/null +++ b/addons/9p/src/3.10.108/vfs_addr.c @@ -0,0 +1,353 @@ +/* + * linux/fs/9p/vfs_addr.c + * + * This file contians vfs address (mmap) ops for 9P2000. + * + * Copyright (C) 2005 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "cache.h" +#include "fid.h" + +/** + * v9fs_fid_readpage - read an entire page in from 9P + * + * @fid: fid being read + * @page: structure to page + * + */ +static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page) +{ + int retval; + loff_t offset; + char *buffer; + struct inode *inode; + + inode = page->mapping->host; + p9_debug(P9_DEBUG_VFS, "\n"); + + BUG_ON(!PageLocked(page)); + + retval = v9fs_readpage_from_fscache(inode, page); + if (retval == 0) + return retval; + + buffer = kmap(page); + offset = page_offset(page); + + retval = v9fs_fid_readn(fid, buffer, NULL, PAGE_CACHE_SIZE, offset); + if (retval < 0) { + v9fs_uncache_page(inode, page); + goto done; + } + + memset(buffer + retval, 0, PAGE_CACHE_SIZE - retval); + flush_dcache_page(page); + SetPageUptodate(page); + + v9fs_readpage_to_fscache(inode, page); + retval = 0; + +done: + kunmap(page); + unlock_page(page); + return retval; +} + +/** + * v9fs_vfs_readpage - read an entire page in from 9P + * + * @filp: file being read + * @page: structure to page + * + */ + +static int v9fs_vfs_readpage(struct file *filp, struct page *page) +{ + return v9fs_fid_readpage(filp->private_data, page); +} + +/** + * v9fs_vfs_readpages - read a set of pages from 9P + * + * @filp: file being read + * @mapping: the address space + * @pages: list of pages to read + * @nr_pages: count of pages to read + * + */ + +static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + int ret = 0; + struct inode *inode; + + inode = mapping->host; + p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp); + + ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); + if (ret == 0) + return ret; + + ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp); + p9_debug(P9_DEBUG_VFS, " = %d\n", ret); + return ret; +} + +/** + * v9fs_release_page - release the private state associated with a page + * + * Returns 1 if the page can be released, false otherwise. + */ + +static int v9fs_release_page(struct page *page, gfp_t gfp) +{ + if (PagePrivate(page)) + return 0; + return v9fs_fscache_release_page(page, gfp); +} + +/** + * v9fs_invalidate_page - Invalidate a page completely or partially + * + * @page: structure to page + * @offset: offset in the page + */ + +static void v9fs_invalidate_page(struct page *page, unsigned long offset) +{ + /* + * If called with zero offset, we should release + * the private state assocated with the page + */ + if (offset == 0) + v9fs_fscache_invalidate_page(page); +} + +static int v9fs_vfs_writepage_locked(struct page *page) +{ + char *buffer; + int retval, len; + loff_t offset, size; + mm_segment_t old_fs; + struct v9fs_inode *v9inode; + struct inode *inode = page->mapping->host; + + v9inode = V9FS_I(inode); + size = i_size_read(inode); + if (page->index == size >> PAGE_CACHE_SHIFT) + len = size & ~PAGE_CACHE_MASK; + else + len = PAGE_CACHE_SIZE; + + set_page_writeback(page); + + buffer = kmap(page); + offset = page_offset(page); + + old_fs = get_fs(); + set_fs(get_ds()); + /* We should have writeback_fid always set */ + BUG_ON(!v9inode->writeback_fid); + + retval = v9fs_file_write_internal(inode, + v9inode->writeback_fid, + (__force const char __user *)buffer, + len, &offset, 0); + if (retval > 0) + retval = 0; + + set_fs(old_fs); + kunmap(page); + end_page_writeback(page); + return retval; +} + +static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc) +{ + int retval; + + retval = v9fs_vfs_writepage_locked(page); + if (retval < 0) { + if (retval == -EAGAIN) { + redirty_page_for_writepage(wbc, page); + retval = 0; + } else { + SetPageError(page); + mapping_set_error(page->mapping, retval); + } + } else + retval = 0; + + unlock_page(page); + return retval; +} + +/** + * v9fs_launder_page - Writeback a dirty page + * Returns 0 on success. + */ + +static int v9fs_launder_page(struct page *page) +{ + int retval; + struct inode *inode = page->mapping->host; + + v9fs_fscache_wait_on_page_write(inode, page); + if (clear_page_dirty_for_io(page)) { + retval = v9fs_vfs_writepage_locked(page); + if (retval) + return retval; + } + return 0; +} + +/** + * v9fs_direct_IO - 9P address space operation for direct I/O + * @rw: direction (read or write) + * @iocb: target I/O control block + * @iov: array of vectors that define I/O buffer + * @pos: offset in file to begin the operation + * @nr_segs: size of iovec array + * + * The presence of v9fs_direct_IO() in the address space ops vector + * allowes open() O_DIRECT flags which would have failed otherwise. + * + * In the non-cached mode, we shunt off direct read and write requests before + * the VFS gets them, so this method should never be called. + * + * Direct IO is not 'yet' supported in the cached mode. Hence when + * this routine is called through generic_file_aio_read(), the read/write fails + * with an error. + * + */ +static ssize_t +v9fs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, + loff_t pos, unsigned long nr_segs) +{ + /* + * FIXME + * Now that we do caching with cache mode enabled, We need + * to support direct IO + */ + p9_debug(P9_DEBUG_VFS, "v9fs_direct_IO: v9fs_direct_IO (%s) off/no(%lld/%lu) EINVAL\n", + iocb->ki_filp->f_path.dentry->d_name.name, + (long long)pos, nr_segs); + + return -EINVAL; +} + +static int v9fs_write_begin(struct file *filp, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) +{ + int retval = 0; + struct page *page; + struct v9fs_inode *v9inode; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + struct inode *inode = mapping->host; + + v9inode = V9FS_I(inode); +start: + page = grab_cache_page_write_begin(mapping, index, flags); + if (!page) { + retval = -ENOMEM; + goto out; + } + BUG_ON(!v9inode->writeback_fid); + if (PageUptodate(page)) + goto out; + + if (len == PAGE_CACHE_SIZE) + goto out; + + retval = v9fs_fid_readpage(v9inode->writeback_fid, page); + page_cache_release(page); + if (!retval) + goto start; +out: + *pagep = page; + return retval; +} + +static int v9fs_write_end(struct file *filp, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + loff_t last_pos = pos + copied; + struct inode *inode = page->mapping->host; + + if (unlikely(copied < len)) { + /* + * zero out the rest of the area + */ + unsigned from = pos & (PAGE_CACHE_SIZE - 1); + + zero_user(page, from + copied, len - copied); + flush_dcache_page(page); + } + + if (!PageUptodate(page)) + SetPageUptodate(page); + /* + * No need to use i_size_read() here, the i_size + * cannot change under us because we hold the i_mutex. + */ + if (last_pos > inode->i_size) { + inode_add_bytes(inode, last_pos - inode->i_size); + i_size_write(inode, last_pos); + } + set_page_dirty(page); + unlock_page(page); + page_cache_release(page); + + return copied; +} + + +const struct address_space_operations v9fs_addr_operations = { + .readpage = v9fs_vfs_readpage, + .readpages = v9fs_vfs_readpages, + .set_page_dirty = __set_page_dirty_nobuffers, + .writepage = v9fs_vfs_writepage, + .write_begin = v9fs_write_begin, + .write_end = v9fs_write_end, + .releasepage = v9fs_release_page, + .invalidatepage = v9fs_invalidate_page, + .launder_page = v9fs_launder_page, + .direct_IO = v9fs_direct_IO, +}; diff --git a/addons/9p/src/3.10.108/vfs_dentry.c b/addons/9p/src/3.10.108/vfs_dentry.c new file mode 100644 index 00000000..f039b104 --- /dev/null +++ b/addons/9p/src/3.10.108/vfs_dentry.c @@ -0,0 +1,139 @@ +/* + * linux/fs/9p/vfs_dentry.c + * + * This file contians vfs dentry ops for the 9P2000 protocol. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" + +/** + * v9fs_dentry_delete - called when dentry refcount equals 0 + * @dentry: dentry in question + * + * By returning 1 here we should remove cacheing of unused + * dentry components. + * + */ + +static int v9fs_dentry_delete(const struct dentry *dentry) +{ + p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n", + dentry->d_name.name, dentry); + + return 1; +} + +/** + * v9fs_cached_dentry_delete - called when dentry refcount equals 0 + * @dentry: dentry in question + * + */ +static int v9fs_cached_dentry_delete(const struct dentry *dentry) +{ + p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n", + dentry->d_name.name, dentry); + + /* Don't cache negative dentries */ + if (!dentry->d_inode) + return 1; + return 0; +} + +/** + * v9fs_dentry_release - called when dentry is going to be freed + * @dentry: dentry that is being release + * + */ + +static void v9fs_dentry_release(struct dentry *dentry) +{ + struct hlist_node *p, *n; + p9_debug(P9_DEBUG_VFS, " dentry: %s (%p)\n", + dentry->d_name.name, dentry); + hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata) + p9_client_clunk(hlist_entry(p, struct p9_fid, dlist)); + dentry->d_fsdata = NULL; +} + +static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags) +{ + struct p9_fid *fid; + struct inode *inode; + struct v9fs_inode *v9inode; + + if (flags & LOOKUP_RCU) + return -ECHILD; + + inode = dentry->d_inode; + if (!inode) + goto out_valid; + + v9inode = V9FS_I(inode); + if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) { + int retval; + struct v9fs_session_info *v9ses; + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + v9ses = v9fs_inode2v9ses(inode); + if (v9fs_proto_dotl(v9ses)) + retval = v9fs_refresh_inode_dotl(fid, inode); + else + retval = v9fs_refresh_inode(fid, inode); + if (retval == -ENOENT) + return 0; + if (retval < 0) + return retval; + } +out_valid: + return 1; +} + +const struct dentry_operations v9fs_cached_dentry_operations = { + .d_revalidate = v9fs_lookup_revalidate, + .d_weak_revalidate = v9fs_lookup_revalidate, + .d_delete = v9fs_cached_dentry_delete, + .d_release = v9fs_dentry_release, +}; + +const struct dentry_operations v9fs_dentry_operations = { + .d_delete = v9fs_dentry_delete, + .d_release = v9fs_dentry_release, +}; diff --git a/addons/9p/src/3.10.108/vfs_dir.c b/addons/9p/src/3.10.108/vfs_dir.c new file mode 100644 index 00000000..be1e34ad --- /dev/null +++ b/addons/9p/src/3.10.108/vfs_dir.c @@ -0,0 +1,269 @@ +/* + * linux/fs/9p/vfs_dir.c + * + * This file contains vfs directory ops for the 9P2000 protocol. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" + +/** + * struct p9_rdir - readdir accounting + * @mutex: mutex protecting readdir + * @head: start offset of current dirread buffer + * @tail: end offset of current dirread buffer + * @buf: dirread buffer + * + * private structure for keeping track of readdir + * allocated on demand + */ + +struct p9_rdir { + int head; + int tail; + uint8_t buf[]; +}; + +/** + * dt_type - return file type + * @mistat: mistat structure + * + */ + +static inline int dt_type(struct p9_wstat *mistat) +{ + unsigned long perm = mistat->mode; + int rettype = DT_REG; + + if (perm & P9_DMDIR) + rettype = DT_DIR; + if (perm & P9_DMSYMLINK) + rettype = DT_LNK; + + return rettype; +} + +static void p9stat_init(struct p9_wstat *stbuf) +{ + stbuf->name = NULL; + stbuf->uid = NULL; + stbuf->gid = NULL; + stbuf->muid = NULL; + stbuf->extension = NULL; +} + +/** + * v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir + * @filp: opened file structure + * @buflen: Length in bytes of buffer to allocate + * + */ + +static struct p9_rdir *v9fs_alloc_rdir_buf(struct file *filp, int buflen) +{ + struct p9_fid *fid = filp->private_data; + if (!fid->rdir) + fid->rdir = kzalloc(sizeof(struct p9_rdir) + buflen, GFP_KERNEL); + return fid->rdir; +} + +/** + * v9fs_dir_readdir - read a directory + * @filp: opened file structure + * @dirent: directory structure ??? + * @filldir: function to populate directory structure ??? + * + */ + +static int v9fs_dir_readdir(struct file *filp, void *dirent, filldir_t filldir) +{ + int over; + struct p9_wstat st; + int err = 0; + struct p9_fid *fid; + int buflen; + int reclen = 0; + struct p9_rdir *rdir; + + p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); + fid = filp->private_data; + + buflen = fid->clnt->msize - P9_IOHDRSZ; + + rdir = v9fs_alloc_rdir_buf(filp, buflen); + if (!rdir) + return -ENOMEM; + + while (1) { + if (rdir->tail == rdir->head) { + err = v9fs_file_readn(filp, rdir->buf, NULL, + buflen, filp->f_pos); + if (err <= 0) + return err; + + rdir->head = 0; + rdir->tail = err; + } + while (rdir->head < rdir->tail) { + p9stat_init(&st); + err = p9stat_read(fid->clnt, rdir->buf + rdir->head, + rdir->tail - rdir->head, &st); + if (err) { + p9_debug(P9_DEBUG_VFS, "returned %d\n", err); + p9stat_free(&st); + return -EIO; + } + reclen = st.size+2; + + over = filldir(dirent, st.name, strlen(st.name), + filp->f_pos, v9fs_qid2ino(&st.qid), dt_type(&st)); + + p9stat_free(&st); + + if (over) + return 0; + + rdir->head += reclen; + filp->f_pos += reclen; + } + } +} + +/** + * v9fs_dir_readdir_dotl - read a directory + * @filp: opened file structure + * @dirent: buffer to fill dirent structures + * @filldir: function to populate dirent structures + * + */ +static int v9fs_dir_readdir_dotl(struct file *filp, void *dirent, + filldir_t filldir) +{ + int over; + int err = 0; + struct p9_fid *fid; + int buflen; + struct p9_rdir *rdir; + struct p9_dirent curdirent; + u64 oldoffset = 0; + + p9_debug(P9_DEBUG_VFS, "name %s\n", filp->f_path.dentry->d_name.name); + fid = filp->private_data; + + buflen = fid->clnt->msize - P9_READDIRHDRSZ; + + rdir = v9fs_alloc_rdir_buf(filp, buflen); + if (!rdir) + return -ENOMEM; + + while (1) { + if (rdir->tail == rdir->head) { + err = p9_client_readdir(fid, rdir->buf, buflen, + filp->f_pos); + if (err <= 0) + return err; + + rdir->head = 0; + rdir->tail = err; + } + + while (rdir->head < rdir->tail) { + + err = p9dirent_read(fid->clnt, rdir->buf + rdir->head, + rdir->tail - rdir->head, + &curdirent); + if (err < 0) { + p9_debug(P9_DEBUG_VFS, "returned %d\n", err); + return -EIO; + } + + /* d_off in dirent structure tracks the offset into + * the next dirent in the dir. However, filldir() + * expects offset into the current dirent. Hence + * while calling filldir send the offset from the + * previous dirent structure. + */ + over = filldir(dirent, curdirent.d_name, + strlen(curdirent.d_name), + oldoffset, v9fs_qid2ino(&curdirent.qid), + curdirent.d_type); + oldoffset = curdirent.d_off; + + if (over) + return 0; + + filp->f_pos = curdirent.d_off; + rdir->head += err; + } + } +} + + +/** + * v9fs_dir_release - close a directory + * @inode: inode of the directory + * @filp: file pointer to a directory + * + */ + +int v9fs_dir_release(struct inode *inode, struct file *filp) +{ + struct p9_fid *fid; + + fid = filp->private_data; + p9_debug(P9_DEBUG_VFS, "inode: %p filp: %p fid: %d\n", + inode, filp, fid ? fid->fid : -1); + if (fid) + p9_client_clunk(fid); + return 0; +} + +const struct file_operations v9fs_dir_operations = { + .read = generic_read_dir, + .llseek = generic_file_llseek, + .readdir = v9fs_dir_readdir, + .open = v9fs_file_open, + .release = v9fs_dir_release, +}; + +const struct file_operations v9fs_dir_operations_dotl = { + .read = generic_read_dir, + .llseek = generic_file_llseek, + .readdir = v9fs_dir_readdir_dotl, + .open = v9fs_file_open, + .release = v9fs_dir_release, + .fsync = v9fs_file_fsync_dotl, +}; diff --git a/addons/9p/src/3.10.108/vfs_file.c b/addons/9p/src/3.10.108/vfs_file.c new file mode 100644 index 00000000..d384a8b7 --- /dev/null +++ b/addons/9p/src/3.10.108/vfs_file.c @@ -0,0 +1,790 @@ +/* + * linux/fs/9p/vfs_file.c + * + * This file contians vfs file ops for 9P2000. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" +#include "cache.h" + +static const struct vm_operations_struct v9fs_file_vm_ops; + +/** + * v9fs_file_open - open a file (or directory) + * @inode: inode to be opened + * @file: file being opened + * + */ + +int v9fs_file_open(struct inode *inode, struct file *file) +{ + int err; + struct v9fs_inode *v9inode; + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + int omode; + + p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file); + v9inode = V9FS_I(inode); + v9ses = v9fs_inode2v9ses(inode); + if (v9fs_proto_dotl(v9ses)) + omode = v9fs_open_to_dotl_flags(file->f_flags); + else + omode = v9fs_uflags2omode(file->f_flags, + v9fs_proto_dotu(v9ses)); + fid = file->private_data; + if (!fid) { + fid = v9fs_fid_clone(file->f_path.dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + err = p9_client_open(fid, omode); + if (err < 0) { + p9_client_clunk(fid); + return err; + } + if ((file->f_flags & O_APPEND) && + (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses))) + generic_file_llseek(file, 0, SEEK_END); + } + + file->private_data = fid; + mutex_lock(&v9inode->v_mutex); + if (v9ses->cache && !v9inode->writeback_fid && + ((file->f_flags & O_ACCMODE) != O_RDONLY)) { + /* + * clone a fid and add it to writeback_fid + * we do it during open time instead of + * page dirty time via write_begin/page_mkwrite + * because we want write after unlink usecase + * to work. + */ + fid = v9fs_writeback_fid(file->f_path.dentry); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + mutex_unlock(&v9inode->v_mutex); + goto out_error; + } + v9inode->writeback_fid = (void *) fid; + } + mutex_unlock(&v9inode->v_mutex); +#ifdef CONFIG_9P_FSCACHE + if (v9ses->cache) + v9fs_cache_inode_set_cookie(inode, file); +#endif + return 0; +out_error: + p9_client_clunk(file->private_data); + file->private_data = NULL; + return err; +} + +/** + * v9fs_file_lock - lock a file (or directory) + * @filp: file to be locked + * @cmd: lock command + * @fl: file lock structure + * + * Bugs: this looks like a local only lock, we should extend into 9P + * by using open exclusive + */ + +static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl) +{ + int res = 0; + struct inode *inode = file_inode(filp); + + p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl); + + /* No mandatory locks */ + if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) + return -ENOLCK; + + if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { + filemap_write_and_wait(inode->i_mapping); + invalidate_mapping_pages(&inode->i_data, 0, -1); + } + + return res; +} + +static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) +{ + struct p9_flock flock; + struct p9_fid *fid; + uint8_t status; + int res = 0; + unsigned char fl_type; + + fid = filp->private_data; + BUG_ON(fid == NULL); + + if ((fl->fl_flags & FL_POSIX) != FL_POSIX) + BUG(); + + res = posix_lock_file_wait(filp, fl); + if (res < 0) + goto out; + + /* convert posix lock to p9 tlock args */ + memset(&flock, 0, sizeof(flock)); + /* map the lock type */ + switch (fl->fl_type) { + case F_RDLCK: + flock.type = P9_LOCK_TYPE_RDLCK; + break; + case F_WRLCK: + flock.type = P9_LOCK_TYPE_WRLCK; + break; + case F_UNLCK: + flock.type = P9_LOCK_TYPE_UNLCK; + break; + } + flock.start = fl->fl_start; + if (fl->fl_end == OFFSET_MAX) + flock.length = 0; + else + flock.length = fl->fl_end - fl->fl_start + 1; + flock.proc_id = fl->fl_pid; + flock.client_id = utsname()->nodename; + if (IS_SETLKW(cmd)) + flock.flags = P9_LOCK_FLAGS_BLOCK; + + /* + * if its a blocked request and we get P9_LOCK_BLOCKED as the status + * for lock request, keep on trying + */ + for (;;) { + res = p9_client_lock_dotl(fid, &flock, &status); + if (res < 0) + break; + + if (status != P9_LOCK_BLOCKED) + break; + if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd)) + break; + if (schedule_timeout_interruptible(P9_LOCK_TIMEOUT) != 0) + break; + } + + /* map 9p status to VFS status */ + switch (status) { + case P9_LOCK_SUCCESS: + res = 0; + break; + case P9_LOCK_BLOCKED: + res = -EAGAIN; + break; + case P9_LOCK_ERROR: + case P9_LOCK_GRACE: + res = -ENOLCK; + break; + default: + BUG(); + } + + /* + * incase server returned error for lock request, revert + * it locally + */ + if (res < 0 && fl->fl_type != F_UNLCK) { + fl_type = fl->fl_type; + fl->fl_type = F_UNLCK; + res = posix_lock_file_wait(filp, fl); + fl->fl_type = fl_type; + } +out: + return res; +} + +static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) +{ + struct p9_getlock glock; + struct p9_fid *fid; + int res = 0; + + fid = filp->private_data; + BUG_ON(fid == NULL); + + posix_test_lock(filp, fl); + /* + * if we have a conflicting lock locally, no need to validate + * with server + */ + if (fl->fl_type != F_UNLCK) + return res; + + /* convert posix lock to p9 tgetlock args */ + memset(&glock, 0, sizeof(glock)); + glock.type = P9_LOCK_TYPE_UNLCK; + glock.start = fl->fl_start; + if (fl->fl_end == OFFSET_MAX) + glock.length = 0; + else + glock.length = fl->fl_end - fl->fl_start + 1; + glock.proc_id = fl->fl_pid; + glock.client_id = utsname()->nodename; + + res = p9_client_getlock_dotl(fid, &glock); + if (res < 0) + return res; + /* map 9p lock type to os lock type */ + switch (glock.type) { + case P9_LOCK_TYPE_RDLCK: + fl->fl_type = F_RDLCK; + break; + case P9_LOCK_TYPE_WRLCK: + fl->fl_type = F_WRLCK; + break; + case P9_LOCK_TYPE_UNLCK: + fl->fl_type = F_UNLCK; + break; + } + if (glock.type != P9_LOCK_TYPE_UNLCK) { + fl->fl_start = glock.start; + if (glock.length == 0) + fl->fl_end = OFFSET_MAX; + else + fl->fl_end = glock.start + glock.length - 1; + fl->fl_pid = glock.proc_id; + } + return res; +} + +/** + * v9fs_file_lock_dotl - lock a file (or directory) + * @filp: file to be locked + * @cmd: lock command + * @fl: file lock structure + * + */ + +static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl) +{ + struct inode *inode = file_inode(filp); + int ret = -ENOLCK; + + p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n", + filp, cmd, fl, filp->f_path.dentry->d_name.name); + + /* No mandatory locks */ + if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) + goto out_err; + + if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { + filemap_write_and_wait(inode->i_mapping); + invalidate_mapping_pages(&inode->i_data, 0, -1); + } + + if (IS_SETLK(cmd) || IS_SETLKW(cmd)) + ret = v9fs_file_do_lock(filp, cmd, fl); + else if (IS_GETLK(cmd)) + ret = v9fs_file_getlock(filp, fl); + else + ret = -EINVAL; +out_err: + return ret; +} + +/** + * v9fs_file_flock_dotl - lock a file + * @filp: file to be locked + * @cmd: lock command + * @fl: file lock structure + * + */ + +static int v9fs_file_flock_dotl(struct file *filp, int cmd, + struct file_lock *fl) +{ + struct inode *inode = file_inode(filp); + int ret = -ENOLCK; + + p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %s\n", + filp, cmd, fl, filp->f_path.dentry->d_name.name); + + /* No mandatory locks */ + if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) + goto out_err; + + if (!(fl->fl_flags & FL_FLOCK)) + goto out_err; + + if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { + filemap_write_and_wait(inode->i_mapping); + invalidate_mapping_pages(&inode->i_data, 0, -1); + } + /* Convert flock to posix lock */ + fl->fl_owner = (fl_owner_t)filp; + fl->fl_start = 0; + fl->fl_end = OFFSET_MAX; + fl->fl_flags |= FL_POSIX; + fl->fl_flags ^= FL_FLOCK; + + if (IS_SETLK(cmd) | IS_SETLKW(cmd)) + ret = v9fs_file_do_lock(filp, cmd, fl); + else + ret = -EINVAL; +out_err: + return ret; +} + +/** + * v9fs_fid_readn - read from a fid + * @fid: fid to read + * @data: data buffer to read data into + * @udata: user data buffer to read data into + * @count: size of buffer + * @offset: offset at which to read data + * + */ +ssize_t +v9fs_fid_readn(struct p9_fid *fid, char *data, char __user *udata, u32 count, + u64 offset) +{ + int n, total, size; + + p9_debug(P9_DEBUG_VFS, "fid %d offset %llu count %d\n", + fid->fid, (long long unsigned)offset, count); + n = 0; + total = 0; + size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ; + do { + n = p9_client_read(fid, data, udata, offset, count); + if (n <= 0) + break; + + if (data) + data += n; + if (udata) + udata += n; + + offset += n; + count -= n; + total += n; + } while (count > 0 && n == size); + + if (n < 0) + total = n; + + return total; +} + +/** + * v9fs_file_readn - read from a file + * @filp: file pointer to read + * @data: data buffer to read data into + * @udata: user data buffer to read data into + * @count: size of buffer + * @offset: offset at which to read data + * + */ +ssize_t +v9fs_file_readn(struct file *filp, char *data, char __user *udata, u32 count, + u64 offset) +{ + return v9fs_fid_readn(filp->private_data, data, udata, count, offset); +} + +/** + * v9fs_file_read - read from a file + * @filp: file pointer to read + * @udata: user data buffer to read data into + * @count: size of buffer + * @offset: offset at which to read data + * + */ + +static ssize_t +v9fs_file_read(struct file *filp, char __user *udata, size_t count, + loff_t * offset) +{ + int ret; + struct p9_fid *fid; + size_t size; + + p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", count, *offset); + fid = filp->private_data; + + size = fid->iounit ? fid->iounit : fid->clnt->msize - P9_IOHDRSZ; + if (count > size) + ret = v9fs_file_readn(filp, NULL, udata, count, *offset); + else + ret = p9_client_read(fid, NULL, udata, *offset, count); + + if (ret > 0) + *offset += ret; + + return ret; +} + +ssize_t +v9fs_file_write_internal(struct inode *inode, struct p9_fid *fid, + const char __user *data, size_t count, + loff_t *offset, int invalidate) +{ + int n; + loff_t i_size; + size_t total = 0; + struct p9_client *clnt; + loff_t origin = *offset; + unsigned long pg_start, pg_end; + + p9_debug(P9_DEBUG_VFS, "data %p count %d offset %x\n", + data, (int)count, (int)*offset); + + clnt = fid->clnt; + do { + n = p9_client_write(fid, NULL, data+total, origin+total, count); + if (n <= 0) + break; + count -= n; + total += n; + } while (count > 0); + + if (invalidate && (total > 0)) { + pg_start = origin >> PAGE_CACHE_SHIFT; + pg_end = (origin + total - 1) >> PAGE_CACHE_SHIFT; + if (inode->i_mapping && inode->i_mapping->nrpages) + invalidate_inode_pages2_range(inode->i_mapping, + pg_start, pg_end); + *offset += total; + i_size = i_size_read(inode); + if (*offset > i_size) { + inode_add_bytes(inode, *offset - i_size); + i_size_write(inode, *offset); + } + } + if (n < 0) + return n; + + return total; +} + +/** + * v9fs_file_write - write to a file + * @filp: file pointer to write + * @data: data buffer to write data from + * @count: size of buffer + * @offset: offset at which to write data + * + */ +static ssize_t +v9fs_file_write(struct file *filp, const char __user * data, + size_t count, loff_t *offset) +{ + ssize_t retval = 0; + loff_t origin = *offset; + + + retval = generic_write_checks(filp, &origin, &count, 0); + if (retval) + goto out; + + retval = -EINVAL; + if ((ssize_t) count < 0) + goto out; + retval = 0; + if (!count) + goto out; + + retval = v9fs_file_write_internal(file_inode(filp), + filp->private_data, + data, count, &origin, 1); + /* update offset on successful write */ + if (retval > 0) + *offset = origin; +out: + return retval; +} + + +static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end, + int datasync) +{ + struct p9_fid *fid; + struct inode *inode = filp->f_mapping->host; + struct p9_wstat wstat; + int retval; + + retval = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (retval) + return retval; + + mutex_lock(&inode->i_mutex); + p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync); + + fid = filp->private_data; + v9fs_blank_wstat(&wstat); + + retval = p9_client_wstat(fid, &wstat); + mutex_unlock(&inode->i_mutex); + + return retval; +} + +int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, + int datasync) +{ + struct p9_fid *fid; + struct inode *inode = filp->f_mapping->host; + int retval; + + retval = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (retval) + return retval; + + mutex_lock(&inode->i_mutex); + p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync); + + fid = filp->private_data; + + retval = p9_client_fsync(fid, datasync); + mutex_unlock(&inode->i_mutex); + + return retval; +} + +static int +v9fs_file_mmap(struct file *file, struct vm_area_struct *vma) +{ + int retval; + + retval = generic_file_mmap(file, vma); + if (!retval) + vma->vm_ops = &v9fs_file_vm_ops; + + return retval; +} + +static int +v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct v9fs_inode *v9inode; + struct page *page = vmf->page; + struct file *filp = vma->vm_file; + struct inode *inode = file_inode(filp); + + + p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n", + page, (unsigned long)filp->private_data); + + /* Update file times before taking page lock */ + file_update_time(filp); + + v9inode = V9FS_I(inode); + /* make sure the cache has finished storing the page */ + v9fs_fscache_wait_on_page_write(inode, page); + BUG_ON(!v9inode->writeback_fid); + lock_page(page); + if (page->mapping != inode->i_mapping) + goto out_unlock; + wait_for_stable_page(page); + + return VM_FAULT_LOCKED; +out_unlock: + unlock_page(page); + return VM_FAULT_NOPAGE; +} + +static ssize_t +v9fs_direct_read(struct file *filp, char __user *udata, size_t count, + loff_t *offsetp) +{ + loff_t size, offset; + struct inode *inode; + struct address_space *mapping; + + offset = *offsetp; + mapping = filp->f_mapping; + inode = mapping->host; + if (!count) + return 0; + size = i_size_read(inode); + if (offset < size) + filemap_write_and_wait_range(mapping, offset, + offset + count - 1); + + return v9fs_file_read(filp, udata, count, offsetp); +} + +/** + * v9fs_cached_file_read - read from a file + * @filp: file pointer to read + * @udata: user data buffer to read data into + * @count: size of buffer + * @offset: offset at which to read data + * + */ +static ssize_t +v9fs_cached_file_read(struct file *filp, char __user *data, size_t count, + loff_t *offset) +{ + if (filp->f_flags & O_DIRECT) + return v9fs_direct_read(filp, data, count, offset); + return do_sync_read(filp, data, count, offset); +} + +static ssize_t +v9fs_direct_write(struct file *filp, const char __user * data, + size_t count, loff_t *offsetp) +{ + loff_t offset; + ssize_t retval; + struct inode *inode; + struct address_space *mapping; + + offset = *offsetp; + mapping = filp->f_mapping; + inode = mapping->host; + if (!count) + return 0; + + mutex_lock(&inode->i_mutex); + retval = filemap_write_and_wait_range(mapping, offset, + offset + count - 1); + if (retval) + goto err_out; + /* + * After a write we want buffered reads to be sure to go to disk to get + * the new data. We invalidate clean cached page from the region we're + * about to write. We do this *before* the write so that if we fail + * here we fall back to buffered write + */ + if (mapping->nrpages) { + pgoff_t pg_start = offset >> PAGE_CACHE_SHIFT; + pgoff_t pg_end = (offset + count - 1) >> PAGE_CACHE_SHIFT; + + retval = invalidate_inode_pages2_range(mapping, + pg_start, pg_end); + /* + * If a page can not be invalidated, fall back + * to buffered write. + */ + if (retval) { + if (retval == -EBUSY) + goto buff_write; + goto err_out; + } + } + retval = v9fs_file_write(filp, data, count, offsetp); +err_out: + mutex_unlock(&inode->i_mutex); + return retval; + +buff_write: + mutex_unlock(&inode->i_mutex); + return do_sync_write(filp, data, count, offsetp); +} + +/** + * v9fs_cached_file_write - write to a file + * @filp: file pointer to write + * @data: data buffer to write data from + * @count: size of buffer + * @offset: offset at which to write data + * + */ +static ssize_t +v9fs_cached_file_write(struct file *filp, const char __user * data, + size_t count, loff_t *offset) +{ + + if (filp->f_flags & O_DIRECT) + return v9fs_direct_write(filp, data, count, offset); + return do_sync_write(filp, data, count, offset); +} + +static const struct vm_operations_struct v9fs_file_vm_ops = { + .fault = filemap_fault, + .page_mkwrite = v9fs_vm_page_mkwrite, + .remap_pages = generic_file_remap_pages, +}; + + +const struct file_operations v9fs_cached_file_operations = { + .llseek = generic_file_llseek, + .read = v9fs_cached_file_read, + .write = v9fs_cached_file_write, + .aio_read = generic_file_aio_read, + .aio_write = generic_file_aio_write, + .open = v9fs_file_open, + .release = v9fs_dir_release, + .lock = v9fs_file_lock, + .mmap = v9fs_file_mmap, + .fsync = v9fs_file_fsync, +}; + +const struct file_operations v9fs_cached_file_operations_dotl = { + .llseek = generic_file_llseek, + .read = v9fs_cached_file_read, + .write = v9fs_cached_file_write, + .aio_read = generic_file_aio_read, + .aio_write = generic_file_aio_write, + .open = v9fs_file_open, + .release = v9fs_dir_release, + .lock = v9fs_file_lock_dotl, + .flock = v9fs_file_flock_dotl, + .mmap = v9fs_file_mmap, + .fsync = v9fs_file_fsync_dotl, +}; + +const struct file_operations v9fs_file_operations = { + .llseek = generic_file_llseek, + .read = v9fs_file_read, + .write = v9fs_file_write, + .open = v9fs_file_open, + .release = v9fs_dir_release, + .lock = v9fs_file_lock, + .mmap = generic_file_readonly_mmap, + .fsync = v9fs_file_fsync, +}; + +const struct file_operations v9fs_file_operations_dotl = { + .llseek = generic_file_llseek, + .read = v9fs_file_read, + .write = v9fs_file_write, + .open = v9fs_file_open, + .release = v9fs_dir_release, + .lock = v9fs_file_lock_dotl, + .flock = v9fs_file_flock_dotl, + .mmap = generic_file_readonly_mmap, + .fsync = v9fs_file_fsync_dotl, +}; diff --git a/addons/9p/src/3.10.108/vfs_inode.c b/addons/9p/src/3.10.108/vfs_inode.c new file mode 100644 index 00000000..4c7d309e --- /dev/null +++ b/addons/9p/src/3.10.108/vfs_inode.c @@ -0,0 +1,1538 @@ +/* + * linux/fs/9p/vfs_inode.c + * + * This file contains vfs inode ops for the 9P2000 protocol. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" +#include "cache.h" +#include "xattr.h" +#include "acl.h" + +static const struct inode_operations v9fs_dir_inode_operations; +static const struct inode_operations v9fs_dir_inode_operations_dotu; +static const struct inode_operations v9fs_file_inode_operations; +static const struct inode_operations v9fs_symlink_inode_operations; + +/** + * unixmode2p9mode - convert unix mode bits to plan 9 + * @v9ses: v9fs session information + * @mode: mode to convert + * + */ + +static u32 unixmode2p9mode(struct v9fs_session_info *v9ses, umode_t mode) +{ + int res; + res = mode & 0777; + if (S_ISDIR(mode)) + res |= P9_DMDIR; + if (v9fs_proto_dotu(v9ses)) { + if (v9ses->nodev == 0) { + if (S_ISSOCK(mode)) + res |= P9_DMSOCKET; + if (S_ISFIFO(mode)) + res |= P9_DMNAMEDPIPE; + if (S_ISBLK(mode)) + res |= P9_DMDEVICE; + if (S_ISCHR(mode)) + res |= P9_DMDEVICE; + } + + if ((mode & S_ISUID) == S_ISUID) + res |= P9_DMSETUID; + if ((mode & S_ISGID) == S_ISGID) + res |= P9_DMSETGID; + if ((mode & S_ISVTX) == S_ISVTX) + res |= P9_DMSETVTX; + } + return res; +} + +/** + * p9mode2perm- convert plan9 mode bits to unix permission bits + * @v9ses: v9fs session information + * @stat: p9_wstat from which mode need to be derived + * + */ +static int p9mode2perm(struct v9fs_session_info *v9ses, + struct p9_wstat *stat) +{ + int res; + int mode = stat->mode; + + res = mode & S_IALLUGO; + if (v9fs_proto_dotu(v9ses)) { + if ((mode & P9_DMSETUID) == P9_DMSETUID) + res |= S_ISUID; + + if ((mode & P9_DMSETGID) == P9_DMSETGID) + res |= S_ISGID; + + if ((mode & P9_DMSETVTX) == P9_DMSETVTX) + res |= S_ISVTX; + } + return res; +} + +/** + * p9mode2unixmode- convert plan9 mode bits to unix mode bits + * @v9ses: v9fs session information + * @stat: p9_wstat from which mode need to be derived + * @rdev: major number, minor number in case of device files. + * + */ +static umode_t p9mode2unixmode(struct v9fs_session_info *v9ses, + struct p9_wstat *stat, dev_t *rdev) +{ + int res; + u32 mode = stat->mode; + + *rdev = 0; + res = p9mode2perm(v9ses, stat); + + if ((mode & P9_DMDIR) == P9_DMDIR) + res |= S_IFDIR; + else if ((mode & P9_DMSYMLINK) && (v9fs_proto_dotu(v9ses))) + res |= S_IFLNK; + else if ((mode & P9_DMSOCKET) && (v9fs_proto_dotu(v9ses)) + && (v9ses->nodev == 0)) + res |= S_IFSOCK; + else if ((mode & P9_DMNAMEDPIPE) && (v9fs_proto_dotu(v9ses)) + && (v9ses->nodev == 0)) + res |= S_IFIFO; + else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses)) + && (v9ses->nodev == 0)) { + char type = 0, ext[32]; + int major = -1, minor = -1; + + strncpy(ext, stat->extension, sizeof(ext)); + sscanf(ext, "%c %u %u", &type, &major, &minor); + switch (type) { + case 'c': + res |= S_IFCHR; + break; + case 'b': + res |= S_IFBLK; + break; + default: + p9_debug(P9_DEBUG_ERROR, "Unknown special type %c %s\n", + type, stat->extension); + }; + *rdev = MKDEV(major, minor); + } else + res |= S_IFREG; + + return res; +} + +/** + * v9fs_uflags2omode- convert posix open flags to plan 9 mode bits + * @uflags: flags to convert + * @extended: if .u extensions are active + */ + +int v9fs_uflags2omode(int uflags, int extended) +{ + int ret; + + ret = 0; + switch (uflags&3) { + default: + case O_RDONLY: + ret = P9_OREAD; + break; + + case O_WRONLY: + ret = P9_OWRITE; + break; + + case O_RDWR: + ret = P9_ORDWR; + break; + } + + if (extended) { + if (uflags & O_EXCL) + ret |= P9_OEXCL; + + if (uflags & O_APPEND) + ret |= P9_OAPPEND; + } + + return ret; +} + +/** + * v9fs_blank_wstat - helper function to setup a 9P stat structure + * @wstat: structure to initialize + * + */ + +void +v9fs_blank_wstat(struct p9_wstat *wstat) +{ + wstat->type = ~0; + wstat->dev = ~0; + wstat->qid.type = ~0; + wstat->qid.version = ~0; + *((long long *)&wstat->qid.path) = ~0; + wstat->mode = ~0; + wstat->atime = ~0; + wstat->mtime = ~0; + wstat->length = ~0; + wstat->name = NULL; + wstat->uid = NULL; + wstat->gid = NULL; + wstat->muid = NULL; + wstat->n_uid = INVALID_UID; + wstat->n_gid = INVALID_GID; + wstat->n_muid = INVALID_UID; + wstat->extension = NULL; +} + +/** + * v9fs_alloc_inode - helper function to allocate an inode + * + */ +struct inode *v9fs_alloc_inode(struct super_block *sb) +{ + struct v9fs_inode *v9inode; + v9inode = (struct v9fs_inode *)kmem_cache_alloc(v9fs_inode_cache, + GFP_KERNEL); + if (!v9inode) + return NULL; +#ifdef CONFIG_9P_FSCACHE + v9inode->fscache = NULL; + spin_lock_init(&v9inode->fscache_lock); +#endif + v9inode->writeback_fid = NULL; + v9inode->cache_validity = 0; + mutex_init(&v9inode->v_mutex); + return &v9inode->vfs_inode; +} + +/** + * v9fs_destroy_inode - destroy an inode + * + */ + +static void v9fs_i_callback(struct rcu_head *head) +{ + struct inode *inode = container_of(head, struct inode, i_rcu); + kmem_cache_free(v9fs_inode_cache, V9FS_I(inode)); +} + +void v9fs_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, v9fs_i_callback); +} + +int v9fs_init_inode(struct v9fs_session_info *v9ses, + struct inode *inode, umode_t mode, dev_t rdev) +{ + int err = 0; + + inode_init_owner(inode, NULL, mode); + inode->i_blocks = 0; + inode->i_rdev = rdev; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + inode->i_mapping->a_ops = &v9fs_addr_operations; + + switch (mode & S_IFMT) { + case S_IFIFO: + case S_IFBLK: + case S_IFCHR: + case S_IFSOCK: + if (v9fs_proto_dotl(v9ses)) { + inode->i_op = &v9fs_file_inode_operations_dotl; + } else if (v9fs_proto_dotu(v9ses)) { + inode->i_op = &v9fs_file_inode_operations; + } else { + p9_debug(P9_DEBUG_ERROR, + "special files without extended mode\n"); + err = -EINVAL; + goto error; + } + init_special_inode(inode, inode->i_mode, inode->i_rdev); + break; + case S_IFREG: + if (v9fs_proto_dotl(v9ses)) { + inode->i_op = &v9fs_file_inode_operations_dotl; + if (v9ses->cache) + inode->i_fop = + &v9fs_cached_file_operations_dotl; + else + inode->i_fop = &v9fs_file_operations_dotl; + } else { + inode->i_op = &v9fs_file_inode_operations; + if (v9ses->cache) + inode->i_fop = &v9fs_cached_file_operations; + else + inode->i_fop = &v9fs_file_operations; + } + + break; + case S_IFLNK: + if (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)) { + p9_debug(P9_DEBUG_ERROR, + "extended modes used with legacy protocol\n"); + err = -EINVAL; + goto error; + } + + if (v9fs_proto_dotl(v9ses)) + inode->i_op = &v9fs_symlink_inode_operations_dotl; + else + inode->i_op = &v9fs_symlink_inode_operations; + + break; + case S_IFDIR: + inc_nlink(inode); + if (v9fs_proto_dotl(v9ses)) + inode->i_op = &v9fs_dir_inode_operations_dotl; + else if (v9fs_proto_dotu(v9ses)) + inode->i_op = &v9fs_dir_inode_operations_dotu; + else + inode->i_op = &v9fs_dir_inode_operations; + + if (v9fs_proto_dotl(v9ses)) + inode->i_fop = &v9fs_dir_operations_dotl; + else + inode->i_fop = &v9fs_dir_operations; + + break; + default: + p9_debug(P9_DEBUG_ERROR, "BAD mode 0x%hx S_IFMT 0x%x\n", + mode, mode & S_IFMT); + err = -EINVAL; + goto error; + } +error: + return err; + +} + +/** + * v9fs_get_inode - helper function to setup an inode + * @sb: superblock + * @mode: mode to setup inode with + * + */ + +struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev) +{ + int err; + struct inode *inode; + struct v9fs_session_info *v9ses = sb->s_fs_info; + + p9_debug(P9_DEBUG_VFS, "super block: %p mode: %ho\n", sb, mode); + + inode = new_inode(sb); + if (!inode) { + pr_warn("%s (%d): Problem allocating inode\n", + __func__, task_pid_nr(current)); + return ERR_PTR(-ENOMEM); + } + err = v9fs_init_inode(v9ses, inode, mode, rdev); + if (err) { + iput(inode); + return ERR_PTR(err); + } + return inode; +} + +/* +static struct v9fs_fid* +v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry) +{ + int err; + int nfid; + struct v9fs_fid *ret; + struct v9fs_fcall *fcall; + + nfid = v9fs_get_idpool(&v9ses->fidpool); + if (nfid < 0) { + eprintk(KERN_WARNING, "no free fids available\n"); + return ERR_PTR(-ENOSPC); + } + + err = v9fs_t_walk(v9ses, fid, nfid, (char *) dentry->d_name.name, + &fcall); + + if (err < 0) { + if (fcall && fcall->id == RWALK) + goto clunk_fid; + + PRINT_FCALL_ERROR("walk error", fcall); + v9fs_put_idpool(nfid, &v9ses->fidpool); + goto error; + } + + kfree(fcall); + fcall = NULL; + ret = v9fs_fid_create(v9ses, nfid); + if (!ret) { + err = -ENOMEM; + goto clunk_fid; + } + + err = v9fs_fid_insert(ret, dentry); + if (err < 0) { + v9fs_fid_destroy(ret); + goto clunk_fid; + } + + return ret; + +clunk_fid: + v9fs_t_clunk(v9ses, nfid); + +error: + kfree(fcall); + return ERR_PTR(err); +} +*/ + + +/** + * v9fs_clear_inode - release an inode + * @inode: inode to release + * + */ +void v9fs_evict_inode(struct inode *inode) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + + truncate_inode_pages(inode->i_mapping, 0); + clear_inode(inode); + filemap_fdatawrite(inode->i_mapping); + +#ifdef CONFIG_9P_FSCACHE + v9fs_cache_inode_put_cookie(inode); +#endif + /* clunk the fid stashed in writeback_fid */ + if (v9inode->writeback_fid) { + p9_client_clunk(v9inode->writeback_fid); + v9inode->writeback_fid = NULL; + } +} + +static int v9fs_test_inode(struct inode *inode, void *data) +{ + int umode; + dev_t rdev; + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_wstat *st = (struct p9_wstat *)data; + struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); + + umode = p9mode2unixmode(v9ses, st, &rdev); + /* don't match inode of different type */ + if ((inode->i_mode & S_IFMT) != (umode & S_IFMT)) + return 0; + + /* compare qid details */ + if (memcmp(&v9inode->qid.version, + &st->qid.version, sizeof(v9inode->qid.version))) + return 0; + + if (v9inode->qid.type != st->qid.type) + return 0; + return 1; +} + +static int v9fs_test_new_inode(struct inode *inode, void *data) +{ + return 0; +} + +static int v9fs_set_inode(struct inode *inode, void *data) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_wstat *st = (struct p9_wstat *)data; + + memcpy(&v9inode->qid, &st->qid, sizeof(st->qid)); + return 0; +} + +static struct inode *v9fs_qid_iget(struct super_block *sb, + struct p9_qid *qid, + struct p9_wstat *st, + int new) +{ + dev_t rdev; + int retval; + umode_t umode; + unsigned long i_ino; + struct inode *inode; + struct v9fs_session_info *v9ses = sb->s_fs_info; + int (*test)(struct inode *, void *); + + if (new) + test = v9fs_test_new_inode; + else + test = v9fs_test_inode; + + i_ino = v9fs_qid2ino(qid); + inode = iget5_locked(sb, i_ino, test, v9fs_set_inode, st); + if (!inode) + return ERR_PTR(-ENOMEM); + if (!(inode->i_state & I_NEW)) + return inode; + /* + * initialize the inode with the stat info + * FIXME!! we may need support for stale inodes + * later. + */ + inode->i_ino = i_ino; + umode = p9mode2unixmode(v9ses, st, &rdev); + retval = v9fs_init_inode(v9ses, inode, umode, rdev); + if (retval) + goto error; + + v9fs_stat2inode(st, inode, sb); +#ifdef CONFIG_9P_FSCACHE + v9fs_cache_inode_get_cookie(inode); +#endif + unlock_new_inode(inode); + return inode; +error: + iget_failed(inode); + return ERR_PTR(retval); + +} + +struct inode * +v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, + struct super_block *sb, int new) +{ + struct p9_wstat *st; + struct inode *inode = NULL; + + st = p9_client_stat(fid); + if (IS_ERR(st)) + return ERR_CAST(st); + + inode = v9fs_qid_iget(sb, &st->qid, st, new); + p9stat_free(st); + kfree(st); + return inode; +} + +/** + * v9fs_at_to_dotl_flags- convert Linux specific AT flags to + * plan 9 AT flag. + * @flags: flags to convert + */ +static int v9fs_at_to_dotl_flags(int flags) +{ + int rflags = 0; + if (flags & AT_REMOVEDIR) + rflags |= P9_DOTL_AT_REMOVEDIR; + return rflags; +} + +/** + * v9fs_remove - helper function to remove files and directories + * @dir: directory inode that is being deleted + * @dentry: dentry that is being deleted + * @rmdir: removing a directory + * + */ + +static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags) +{ + struct inode *inode; + int retval = -EOPNOTSUPP; + struct p9_fid *v9fid, *dfid; + struct v9fs_session_info *v9ses; + + p9_debug(P9_DEBUG_VFS, "inode: %p dentry: %p rmdir: %x\n", + dir, dentry, flags); + + v9ses = v9fs_inode2v9ses(dir); + inode = dentry->d_inode; + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) { + retval = PTR_ERR(dfid); + p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", retval); + return retval; + } + if (v9fs_proto_dotl(v9ses)) + retval = p9_client_unlinkat(dfid, dentry->d_name.name, + v9fs_at_to_dotl_flags(flags)); + if (retval == -EOPNOTSUPP) { + /* Try the one based on path */ + v9fid = v9fs_fid_clone(dentry); + if (IS_ERR(v9fid)) + return PTR_ERR(v9fid); + retval = p9_client_remove(v9fid); + } + if (!retval) { + /* + * directories on unlink should have zero + * link count + */ + if (flags & AT_REMOVEDIR) { + clear_nlink(inode); + drop_nlink(dir); + } else + drop_nlink(inode); + + v9fs_invalidate_inode_attr(inode); + v9fs_invalidate_inode_attr(dir); + } + return retval; +} + +/** + * v9fs_create - Create a file + * @v9ses: session information + * @dir: directory that dentry is being created in + * @dentry: dentry that is being created + * @extension: 9p2000.u extension string to support devices, etc. + * @perm: create permissions + * @mode: open mode + * + */ +static struct p9_fid * +v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir, + struct dentry *dentry, char *extension, u32 perm, u8 mode) +{ + int err; + char *name; + struct p9_fid *dfid, *ofid, *fid; + struct inode *inode; + + p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name); + + err = 0; + ofid = NULL; + fid = NULL; + name = (char *) dentry->d_name.name; + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + return ERR_PTR(err); + } + + /* clone a fid to use for creation */ + ofid = p9_client_walk(dfid, 0, NULL, 1); + if (IS_ERR(ofid)) { + err = PTR_ERR(ofid); + p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); + return ERR_PTR(err); + } + + err = p9_client_fcreate(ofid, name, perm, mode, extension); + if (err < 0) { + p9_debug(P9_DEBUG_VFS, "p9_client_fcreate failed %d\n", err); + goto error; + } + + if (!(perm & P9_DMLINK)) { + /* now walk from the parent so we can get unopened fid */ + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + p9_debug(P9_DEBUG_VFS, + "p9_client_walk failed %d\n", err); + fid = NULL; + goto error; + } + /* + * instantiate inode and assign the unopened fid to the dentry + */ + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + p9_debug(P9_DEBUG_VFS, + "inode creation failed %d\n", err); + goto error; + } + v9fs_fid_add(dentry, fid); + d_instantiate(dentry, inode); + } + return ofid; +error: + if (ofid) + p9_client_clunk(ofid); + + if (fid) + p9_client_clunk(fid); + + return ERR_PTR(err); +} + +/** + * v9fs_vfs_create - VFS hook to create a regular file + * + * open(.., O_CREAT) is handled in v9fs_vfs_atomic_open(). This is only called + * for mknod(2). + * + * @dir: directory inode that is being created + * @dentry: dentry that is being deleted + * @mode: create permissions + * + */ + +static int +v9fs_vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, + bool excl) +{ + struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); + u32 perm = unixmode2p9mode(v9ses, mode); + struct p9_fid *fid; + + /* P9_OEXCL? */ + fid = v9fs_create(v9ses, dir, dentry, NULL, perm, P9_ORDWR); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + v9fs_invalidate_inode_attr(dir); + p9_client_clunk(fid); + + return 0; +} + +/** + * v9fs_vfs_mkdir - VFS mkdir hook to create a directory + * @dir: inode that is being unlinked + * @dentry: dentry that is being unlinked + * @mode: mode for new directory + * + */ + +static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +{ + int err; + u32 perm; + struct p9_fid *fid; + struct v9fs_session_info *v9ses; + + p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name); + err = 0; + v9ses = v9fs_inode2v9ses(dir); + perm = unixmode2p9mode(v9ses, mode | S_IFDIR); + fid = v9fs_create(v9ses, dir, dentry, NULL, perm, P9_OREAD); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + fid = NULL; + } else { + inc_nlink(dir); + v9fs_invalidate_inode_attr(dir); + } + + if (fid) + p9_client_clunk(fid); + + return err; +} + +/** + * v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode + * @dir: inode that is being walked from + * @dentry: dentry that is being walked to? + * @nameidata: path data + * + */ + +struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, + unsigned int flags) +{ + struct dentry *res; + struct super_block *sb; + struct v9fs_session_info *v9ses; + struct p9_fid *dfid, *fid; + struct inode *inode; + char *name; + + p9_debug(P9_DEBUG_VFS, "dir: %p dentry: (%s) %p flags: %x\n", + dir, dentry->d_name.name, dentry, flags); + + if (dentry->d_name.len > NAME_MAX) + return ERR_PTR(-ENAMETOOLONG); + + sb = dir->i_sb; + v9ses = v9fs_inode2v9ses(dir); + /* We can walk d_parent because we hold the dir->i_mutex */ + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) + return ERR_CAST(dfid); + + name = (char *) dentry->d_name.name; + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + if (fid == ERR_PTR(-ENOENT)) { + d_add(dentry, NULL); + return NULL; + } + return ERR_CAST(fid); + } + /* + * Make sure we don't use a wrong inode due to parallel + * unlink. For cached mode create calls request for new + * inode. But with cache disabled, lookup should do this. + */ + if (v9ses->cache) + inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); + else + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + p9_client_clunk(fid); + return ERR_CAST(inode); + } + /* + * If we had a rename on the server and a parallel lookup + * for the new name, then make sure we instantiate with + * the new name. ie look up for a/b, while on server somebody + * moved b under k and client parallely did a lookup for + * k/b. + */ + res = d_materialise_unique(dentry, inode); + if (!res) + v9fs_fid_add(dentry, fid); + else if (!IS_ERR(res)) + v9fs_fid_add(res, fid); + else + p9_client_clunk(fid); + return res; +} + +static int +v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry, + struct file *file, unsigned flags, umode_t mode, + int *opened) +{ + int err; + u32 perm; + struct v9fs_inode *v9inode; + struct v9fs_session_info *v9ses; + struct p9_fid *fid, *inode_fid; + struct dentry *res = NULL; + + if (d_unhashed(dentry)) { + res = v9fs_vfs_lookup(dir, dentry, 0); + if (IS_ERR(res)) + return PTR_ERR(res); + + if (res) + dentry = res; + } + + /* Only creates */ + if (!(flags & O_CREAT) || dentry->d_inode) + return finish_no_open(file, res); + + err = 0; + fid = NULL; + v9ses = v9fs_inode2v9ses(dir); + perm = unixmode2p9mode(v9ses, mode); + fid = v9fs_create(v9ses, dir, dentry, NULL, perm, + v9fs_uflags2omode(flags, + v9fs_proto_dotu(v9ses))); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + fid = NULL; + goto error; + } + + v9fs_invalidate_inode_attr(dir); + v9inode = V9FS_I(dentry->d_inode); + mutex_lock(&v9inode->v_mutex); + if (v9ses->cache && !v9inode->writeback_fid && + ((flags & O_ACCMODE) != O_RDONLY)) { + /* + * clone a fid and add it to writeback_fid + * we do it during open time instead of + * page dirty time via write_begin/page_mkwrite + * because we want write after unlink usecase + * to work. + */ + inode_fid = v9fs_writeback_fid(dentry); + if (IS_ERR(inode_fid)) { + err = PTR_ERR(inode_fid); + mutex_unlock(&v9inode->v_mutex); + goto error; + } + v9inode->writeback_fid = (void *) inode_fid; + } + mutex_unlock(&v9inode->v_mutex); + err = finish_open(file, dentry, generic_file_open, opened); + if (err) + goto error; + + file->private_data = fid; +#ifdef CONFIG_9P_FSCACHE + if (v9ses->cache) + v9fs_cache_inode_set_cookie(dentry->d_inode, file); +#endif + + *opened |= FILE_CREATED; +out: + dput(res); + return err; + +error: + if (fid) + p9_client_clunk(fid); + goto out; +} + +/** + * v9fs_vfs_unlink - VFS unlink hook to delete an inode + * @i: inode that is being unlinked + * @d: dentry that is being unlinked + * + */ + +int v9fs_vfs_unlink(struct inode *i, struct dentry *d) +{ + return v9fs_remove(i, d, 0); +} + +/** + * v9fs_vfs_rmdir - VFS unlink hook to delete a directory + * @i: inode that is being unlinked + * @d: dentry that is being unlinked + * + */ + +int v9fs_vfs_rmdir(struct inode *i, struct dentry *d) +{ + return v9fs_remove(i, d, AT_REMOVEDIR); +} + +/** + * v9fs_vfs_rename - VFS hook to rename an inode + * @old_dir: old dir inode + * @old_dentry: old dentry + * @new_dir: new dir inode + * @new_dentry: new dentry + * + */ + +int +v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) +{ + int retval; + struct inode *old_inode; + struct inode *new_inode; + struct v9fs_session_info *v9ses; + struct p9_fid *oldfid; + struct p9_fid *olddirfid; + struct p9_fid *newdirfid; + struct p9_wstat wstat; + + p9_debug(P9_DEBUG_VFS, "\n"); + retval = 0; + old_inode = old_dentry->d_inode; + new_inode = new_dentry->d_inode; + v9ses = v9fs_inode2v9ses(old_inode); + oldfid = v9fs_fid_lookup(old_dentry); + if (IS_ERR(oldfid)) + return PTR_ERR(oldfid); + + olddirfid = v9fs_fid_clone(old_dentry->d_parent); + if (IS_ERR(olddirfid)) { + retval = PTR_ERR(olddirfid); + goto done; + } + + newdirfid = v9fs_fid_clone(new_dentry->d_parent); + if (IS_ERR(newdirfid)) { + retval = PTR_ERR(newdirfid); + goto clunk_olddir; + } + + down_write(&v9ses->rename_sem); + if (v9fs_proto_dotl(v9ses)) { + retval = p9_client_renameat(olddirfid, old_dentry->d_name.name, + newdirfid, new_dentry->d_name.name); + if (retval == -EOPNOTSUPP) + retval = p9_client_rename(oldfid, newdirfid, + new_dentry->d_name.name); + if (retval != -EOPNOTSUPP) + goto clunk_newdir; + } + if (old_dentry->d_parent != new_dentry->d_parent) { + /* + * 9P .u can only handle file rename in the same directory + */ + + p9_debug(P9_DEBUG_ERROR, "old dir and new dir are different\n"); + retval = -EXDEV; + goto clunk_newdir; + } + v9fs_blank_wstat(&wstat); + wstat.muid = v9ses->uname; + wstat.name = (char *) new_dentry->d_name.name; + retval = p9_client_wstat(oldfid, &wstat); + +clunk_newdir: + if (!retval) { + if (new_inode) { + if (S_ISDIR(new_inode->i_mode)) + clear_nlink(new_inode); + else + drop_nlink(new_inode); + } + if (S_ISDIR(old_inode->i_mode)) { + if (!new_inode) + inc_nlink(new_dir); + drop_nlink(old_dir); + } + v9fs_invalidate_inode_attr(old_inode); + v9fs_invalidate_inode_attr(old_dir); + v9fs_invalidate_inode_attr(new_dir); + + /* successful rename */ + d_move(old_dentry, new_dentry); + } + up_write(&v9ses->rename_sem); + p9_client_clunk(newdirfid); + +clunk_olddir: + p9_client_clunk(olddirfid); + +done: + return retval; +} + +/** + * v9fs_vfs_getattr - retrieve file metadata + * @mnt: mount information + * @dentry: file to get attributes on + * @stat: metadata structure to populate + * + */ + +static int +v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat) +{ + int err; + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_wstat *st; + + p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry); + err = -EPERM; + v9ses = v9fs_dentry2v9ses(dentry); + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + generic_fillattr(dentry->d_inode, stat); + return 0; + } + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + st = p9_client_stat(fid); + if (IS_ERR(st)) + return PTR_ERR(st); + + v9fs_stat2inode(st, dentry->d_inode, dentry->d_inode->i_sb); + generic_fillattr(dentry->d_inode, stat); + + p9stat_free(st); + kfree(st); + return 0; +} + +/** + * v9fs_vfs_setattr - set file metadata + * @dentry: file whose metadata to set + * @iattr: metadata assignment structure + * + */ + +static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) +{ + int retval; + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_wstat wstat; + + p9_debug(P9_DEBUG_VFS, "\n"); + retval = inode_change_ok(dentry->d_inode, iattr); + if (retval) + return retval; + + retval = -EPERM; + v9ses = v9fs_dentry2v9ses(dentry); + fid = v9fs_fid_lookup(dentry); + if(IS_ERR(fid)) + return PTR_ERR(fid); + + v9fs_blank_wstat(&wstat); + if (iattr->ia_valid & ATTR_MODE) + wstat.mode = unixmode2p9mode(v9ses, iattr->ia_mode); + + if (iattr->ia_valid & ATTR_MTIME) + wstat.mtime = iattr->ia_mtime.tv_sec; + + if (iattr->ia_valid & ATTR_ATIME) + wstat.atime = iattr->ia_atime.tv_sec; + + if (iattr->ia_valid & ATTR_SIZE) + wstat.length = iattr->ia_size; + + if (v9fs_proto_dotu(v9ses)) { + if (iattr->ia_valid & ATTR_UID) + wstat.n_uid = iattr->ia_uid; + + if (iattr->ia_valid & ATTR_GID) + wstat.n_gid = iattr->ia_gid; + } + + /* Write all dirty data */ + if (S_ISREG(dentry->d_inode->i_mode)) + filemap_write_and_wait(dentry->d_inode->i_mapping); + + retval = p9_client_wstat(fid, &wstat); + if (retval < 0) + return retval; + + if ((iattr->ia_valid & ATTR_SIZE) && + iattr->ia_size != i_size_read(dentry->d_inode)) + truncate_setsize(dentry->d_inode, iattr->ia_size); + + v9fs_invalidate_inode_attr(dentry->d_inode); + + setattr_copy(dentry->d_inode, iattr); + mark_inode_dirty(dentry->d_inode); + return 0; +} + +/** + * v9fs_stat2inode - populate an inode structure with mistat info + * @stat: Plan 9 metadata (mistat) structure + * @inode: inode to populate + * @sb: superblock of filesystem + * + */ + +void +v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, + struct super_block *sb) +{ + umode_t mode; + char ext[32]; + char tag_name[14]; + unsigned int i_nlink; + struct v9fs_session_info *v9ses = sb->s_fs_info; + struct v9fs_inode *v9inode = V9FS_I(inode); + + set_nlink(inode, 1); + + inode->i_atime.tv_sec = stat->atime; + inode->i_mtime.tv_sec = stat->mtime; + inode->i_ctime.tv_sec = stat->mtime; + + inode->i_uid = v9ses->dfltuid; + inode->i_gid = v9ses->dfltgid; + + if (v9fs_proto_dotu(v9ses)) { + inode->i_uid = stat->n_uid; + inode->i_gid = stat->n_gid; + } + if ((S_ISREG(inode->i_mode)) || (S_ISDIR(inode->i_mode))) { + if (v9fs_proto_dotu(v9ses) && (stat->extension[0] != '\0')) { + /* + * Hadlink support got added later to + * to the .u extension. So there can be + * server out there that doesn't support + * this even with .u extension. So check + * for non NULL stat->extension + */ + strncpy(ext, stat->extension, sizeof(ext)); + /* HARDLINKCOUNT %u */ + sscanf(ext, "%13s %u", tag_name, &i_nlink); + if (!strncmp(tag_name, "HARDLINKCOUNT", 13)) + set_nlink(inode, i_nlink); + } + } + mode = p9mode2perm(v9ses, stat); + mode |= inode->i_mode & ~S_IALLUGO; + inode->i_mode = mode; + i_size_write(inode, stat->length); + + /* not real number of blocks, but 512 byte ones ... */ + inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9; + v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR; +} + +/** + * v9fs_qid2ino - convert qid into inode number + * @qid: qid to hash + * + * BUG: potential for inode number collisions? + */ + +ino_t v9fs_qid2ino(struct p9_qid *qid) +{ + u64 path = qid->path + 2; + ino_t i = 0; + + if (sizeof(ino_t) == sizeof(path)) + memcpy(&i, &path, sizeof(ino_t)); + else + i = (ino_t) (path ^ (path >> 32)); + + return i; +} + +/** + * v9fs_readlink - read a symlink's location (internal version) + * @dentry: dentry for symlink + * @buffer: buffer to load symlink location into + * @buflen: length of buffer + * + */ + +static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen) +{ + int retval; + + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_wstat *st; + + p9_debug(P9_DEBUG_VFS, " %s\n", dentry->d_name.name); + retval = -EPERM; + v9ses = v9fs_dentry2v9ses(dentry); + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + if (!v9fs_proto_dotu(v9ses)) + return -EBADF; + + st = p9_client_stat(fid); + if (IS_ERR(st)) + return PTR_ERR(st); + + if (!(st->mode & P9_DMSYMLINK)) { + retval = -EINVAL; + goto done; + } + + /* copy extension buffer into buffer */ + retval = min(strlen(st->extension)+1, (size_t)buflen); + memcpy(buffer, st->extension, retval); + + p9_debug(P9_DEBUG_VFS, "%s -> %s (%.*s)\n", + dentry->d_name.name, st->extension, buflen, buffer); + +done: + p9stat_free(st); + kfree(st); + return retval; +} + +/** + * v9fs_vfs_follow_link - follow a symlink path + * @dentry: dentry for symlink + * @nd: nameidata + * + */ + +static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd) +{ + int len = 0; + char *link = __getname(); + + p9_debug(P9_DEBUG_VFS, "%s\n", dentry->d_name.name); + + if (!link) + link = ERR_PTR(-ENOMEM); + else { + len = v9fs_readlink(dentry, link, PATH_MAX); + + if (len < 0) { + __putname(link); + link = ERR_PTR(len); + } else + link[min(len, PATH_MAX-1)] = 0; + } + nd_set_link(nd, link); + + return NULL; +} + +/** + * v9fs_vfs_put_link - release a symlink path + * @dentry: dentry for symlink + * @nd: nameidata + * @p: unused + * + */ + +void +v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) +{ + char *s = nd_get_link(nd); + + p9_debug(P9_DEBUG_VFS, " %s %s\n", + dentry->d_name.name, IS_ERR(s) ? "" : s); + if (!IS_ERR(s)) + __putname(s); +} + +/** + * v9fs_vfs_mkspecial - create a special file + * @dir: inode to create special file in + * @dentry: dentry to create + * @mode: mode to create special file + * @extension: 9p2000.u format extension string representing special file + * + */ + +static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry, + u32 perm, const char *extension) +{ + struct p9_fid *fid; + struct v9fs_session_info *v9ses; + + v9ses = v9fs_inode2v9ses(dir); + if (!v9fs_proto_dotu(v9ses)) { + p9_debug(P9_DEBUG_ERROR, "not extended\n"); + return -EPERM; + } + + fid = v9fs_create(v9ses, dir, dentry, (char *) extension, perm, + P9_OREAD); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + v9fs_invalidate_inode_attr(dir); + p9_client_clunk(fid); + return 0; +} + +/** + * v9fs_vfs_symlink - helper function to create symlinks + * @dir: directory inode containing symlink + * @dentry: dentry for symlink + * @symname: symlink data + * + * See Also: 9P2000.u RFC for more information + * + */ + +static int +v9fs_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) +{ + p9_debug(P9_DEBUG_VFS, " %lu,%s,%s\n", + dir->i_ino, dentry->d_name.name, symname); + + return v9fs_vfs_mkspecial(dir, dentry, P9_DMSYMLINK, symname); +} + +/** + * v9fs_vfs_link - create a hardlink + * @old_dentry: dentry for file to link to + * @dir: inode destination for new link + * @dentry: dentry for link + * + */ + +static int +v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *dentry) +{ + int retval; + char *name; + struct p9_fid *oldfid; + + p9_debug(P9_DEBUG_VFS, " %lu,%s,%s\n", + dir->i_ino, dentry->d_name.name, old_dentry->d_name.name); + + oldfid = v9fs_fid_clone(old_dentry); + if (IS_ERR(oldfid)) + return PTR_ERR(oldfid); + + name = __getname(); + if (unlikely(!name)) { + retval = -ENOMEM; + goto clunk_fid; + } + + sprintf(name, "%d\n", oldfid->fid); + retval = v9fs_vfs_mkspecial(dir, dentry, P9_DMLINK, name); + __putname(name); + if (!retval) { + v9fs_refresh_inode(oldfid, old_dentry->d_inode); + v9fs_invalidate_inode_attr(dir); + } +clunk_fid: + p9_client_clunk(oldfid); + return retval; +} + +/** + * v9fs_vfs_mknod - create a special file + * @dir: inode destination for new link + * @dentry: dentry for file + * @mode: mode for creation + * @rdev: device associated with special file + * + */ + +static int +v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) +{ + struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); + int retval; + char *name; + u32 perm; + + p9_debug(P9_DEBUG_VFS, " %lu,%s mode: %hx MAJOR: %u MINOR: %u\n", + dir->i_ino, dentry->d_name.name, mode, + MAJOR(rdev), MINOR(rdev)); + + if (!new_valid_dev(rdev)) + return -EINVAL; + + name = __getname(); + if (!name) + return -ENOMEM; + /* build extension */ + if (S_ISBLK(mode)) + sprintf(name, "b %u %u", MAJOR(rdev), MINOR(rdev)); + else if (S_ISCHR(mode)) + sprintf(name, "c %u %u", MAJOR(rdev), MINOR(rdev)); + else if (S_ISFIFO(mode)) + *name = 0; + else if (S_ISSOCK(mode)) + *name = 0; + else { + __putname(name); + return -EINVAL; + } + + perm = unixmode2p9mode(v9ses, mode); + retval = v9fs_vfs_mkspecial(dir, dentry, perm, name); + __putname(name); + + return retval; +} + +int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode) +{ + int umode; + dev_t rdev; + loff_t i_size; + struct p9_wstat *st; + struct v9fs_session_info *v9ses; + + v9ses = v9fs_inode2v9ses(inode); + st = p9_client_stat(fid); + if (IS_ERR(st)) + return PTR_ERR(st); + /* + * Don't update inode if the file type is different + */ + umode = p9mode2unixmode(v9ses, st, &rdev); + if ((inode->i_mode & S_IFMT) != (umode & S_IFMT)) + goto out; + + spin_lock(&inode->i_lock); + /* + * We don't want to refresh inode->i_size, + * because we may have cached data + */ + i_size = inode->i_size; + v9fs_stat2inode(st, inode, inode->i_sb); + if (v9ses->cache) + inode->i_size = i_size; + spin_unlock(&inode->i_lock); +out: + p9stat_free(st); + kfree(st); + return 0; +} + +static const struct inode_operations v9fs_dir_inode_operations_dotu = { + .create = v9fs_vfs_create, + .lookup = v9fs_vfs_lookup, + .atomic_open = v9fs_vfs_atomic_open, + .symlink = v9fs_vfs_symlink, + .link = v9fs_vfs_link, + .unlink = v9fs_vfs_unlink, + .mkdir = v9fs_vfs_mkdir, + .rmdir = v9fs_vfs_rmdir, + .mknod = v9fs_vfs_mknod, + .rename = v9fs_vfs_rename, + .getattr = v9fs_vfs_getattr, + .setattr = v9fs_vfs_setattr, +}; + +static const struct inode_operations v9fs_dir_inode_operations = { + .create = v9fs_vfs_create, + .lookup = v9fs_vfs_lookup, + .atomic_open = v9fs_vfs_atomic_open, + .unlink = v9fs_vfs_unlink, + .mkdir = v9fs_vfs_mkdir, + .rmdir = v9fs_vfs_rmdir, + .mknod = v9fs_vfs_mknod, + .rename = v9fs_vfs_rename, + .getattr = v9fs_vfs_getattr, + .setattr = v9fs_vfs_setattr, +}; + +static const struct inode_operations v9fs_file_inode_operations = { + .getattr = v9fs_vfs_getattr, + .setattr = v9fs_vfs_setattr, +}; + +static const struct inode_operations v9fs_symlink_inode_operations = { + .readlink = generic_readlink, + .follow_link = v9fs_vfs_follow_link, + .put_link = v9fs_vfs_put_link, + .getattr = v9fs_vfs_getattr, + .setattr = v9fs_vfs_setattr, +}; + diff --git a/addons/9p/src/3.10.108/vfs_inode_dotl.c b/addons/9p/src/3.10.108/vfs_inode_dotl.c new file mode 100644 index 00000000..65b21a24 --- /dev/null +++ b/addons/9p/src/3.10.108/vfs_inode_dotl.c @@ -0,0 +1,1033 @@ +/* + * linux/fs/9p/vfs_inode_dotl.c + * + * This file contains vfs inode ops for the 9P2000.L protocol. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" +#include "cache.h" +#include "xattr.h" +#include "acl.h" + +static int +v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode, + dev_t rdev); + +/** + * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a + * new file system object. This checks the S_ISGID to determine the owning + * group of the new file system object. + */ + +static kgid_t v9fs_get_fsgid_for_create(struct inode *dir_inode) +{ + BUG_ON(dir_inode == NULL); + + if (dir_inode->i_mode & S_ISGID) { + /* set_gid bit is set.*/ + return dir_inode->i_gid; + } + return current_fsgid(); +} + +static int v9fs_test_inode_dotl(struct inode *inode, void *data) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_stat_dotl *st = (struct p9_stat_dotl *)data; + + /* don't match inode of different type */ + if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT)) + return 0; + + if (inode->i_generation != st->st_gen) + return 0; + + /* compare qid details */ + if (memcmp(&v9inode->qid.version, + &st->qid.version, sizeof(v9inode->qid.version))) + return 0; + + if (v9inode->qid.type != st->qid.type) + return 0; + return 1; +} + +/* Always get a new inode */ +static int v9fs_test_new_inode_dotl(struct inode *inode, void *data) +{ + return 0; +} + +static int v9fs_set_inode_dotl(struct inode *inode, void *data) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_stat_dotl *st = (struct p9_stat_dotl *)data; + + memcpy(&v9inode->qid, &st->qid, sizeof(st->qid)); + inode->i_generation = st->st_gen; + return 0; +} + +static struct inode *v9fs_qid_iget_dotl(struct super_block *sb, + struct p9_qid *qid, + struct p9_fid *fid, + struct p9_stat_dotl *st, + int new) +{ + int retval; + unsigned long i_ino; + struct inode *inode; + struct v9fs_session_info *v9ses = sb->s_fs_info; + int (*test)(struct inode *, void *); + + if (new) + test = v9fs_test_new_inode_dotl; + else + test = v9fs_test_inode_dotl; + + i_ino = v9fs_qid2ino(qid); + inode = iget5_locked(sb, i_ino, test, v9fs_set_inode_dotl, st); + if (!inode) + return ERR_PTR(-ENOMEM); + if (!(inode->i_state & I_NEW)) + return inode; + /* + * initialize the inode with the stat info + * FIXME!! we may need support for stale inodes + * later. + */ + inode->i_ino = i_ino; + retval = v9fs_init_inode(v9ses, inode, + st->st_mode, new_decode_dev(st->st_rdev)); + if (retval) + goto error; + + v9fs_stat2inode_dotl(st, inode); +#ifdef CONFIG_9P_FSCACHE + v9fs_cache_inode_get_cookie(inode); +#endif + retval = v9fs_get_acl(inode, fid); + if (retval) + goto error; + + unlock_new_inode(inode); + return inode; +error: + iget_failed(inode); + return ERR_PTR(retval); + +} + +struct inode * +v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, + struct super_block *sb, int new) +{ + struct p9_stat_dotl *st; + struct inode *inode = NULL; + + st = p9_client_getattr_dotl(fid, P9_STATS_BASIC | P9_STATS_GEN); + if (IS_ERR(st)) + return ERR_CAST(st); + + inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st, new); + kfree(st); + return inode; +} + +struct dotl_openflag_map { + int open_flag; + int dotl_flag; +}; + +static int v9fs_mapped_dotl_flags(int flags) +{ + int i; + int rflags = 0; + struct dotl_openflag_map dotl_oflag_map[] = { + { O_CREAT, P9_DOTL_CREATE }, + { O_EXCL, P9_DOTL_EXCL }, + { O_NOCTTY, P9_DOTL_NOCTTY }, + { O_APPEND, P9_DOTL_APPEND }, + { O_NONBLOCK, P9_DOTL_NONBLOCK }, + { O_DSYNC, P9_DOTL_DSYNC }, + { FASYNC, P9_DOTL_FASYNC }, + { O_DIRECT, P9_DOTL_DIRECT }, + { O_LARGEFILE, P9_DOTL_LARGEFILE }, + { O_DIRECTORY, P9_DOTL_DIRECTORY }, + { O_NOFOLLOW, P9_DOTL_NOFOLLOW }, + { O_NOATIME, P9_DOTL_NOATIME }, + { O_CLOEXEC, P9_DOTL_CLOEXEC }, + { O_SYNC, P9_DOTL_SYNC}, + }; + for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) { + if (flags & dotl_oflag_map[i].open_flag) + rflags |= dotl_oflag_map[i].dotl_flag; + } + return rflags; +} + +/** + * v9fs_open_to_dotl_flags- convert Linux specific open flags to + * plan 9 open flag. + * @flags: flags to convert + */ +int v9fs_open_to_dotl_flags(int flags) +{ + int rflags = 0; + + /* + * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY + * and P9_DOTL_NOACCESS + */ + rflags |= flags & O_ACCMODE; + rflags |= v9fs_mapped_dotl_flags(flags); + + return rflags; +} + +/** + * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol. + * @dir: directory inode that is being created + * @dentry: dentry that is being deleted + * @mode: create permissions + * + */ + +static int +v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode, + bool excl) +{ + return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0); +} + +static int +v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, + struct file *file, unsigned flags, umode_t omode, + int *opened) +{ + int err = 0; + kgid_t gid; + umode_t mode; + char *name = NULL; + struct p9_qid qid; + struct inode *inode; + struct p9_fid *fid = NULL; + struct v9fs_inode *v9inode; + struct p9_fid *dfid, *ofid, *inode_fid; + struct v9fs_session_info *v9ses; + struct posix_acl *pacl = NULL, *dacl = NULL; + struct dentry *res = NULL; + + if (d_unhashed(dentry)) { + res = v9fs_vfs_lookup(dir, dentry, 0); + if (IS_ERR(res)) + return PTR_ERR(res); + + if (res) + dentry = res; + } + + /* Only creates */ + if (!(flags & O_CREAT)) + return finish_no_open(file, res); + else if (dentry->d_inode) { + if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) + return -EEXIST; + else + return finish_no_open(file, res); + } + + v9ses = v9fs_inode2v9ses(dir); + + name = (char *) dentry->d_name.name; + p9_debug(P9_DEBUG_VFS, "name:%s flags:0x%x mode:0x%hx\n", + name, flags, omode); + + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + goto out; + } + + /* clone a fid to use for creation */ + ofid = p9_client_walk(dfid, 0, NULL, 1); + if (IS_ERR(ofid)) { + err = PTR_ERR(ofid); + p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); + goto out; + } + + gid = v9fs_get_fsgid_for_create(dir); + + mode = omode; + /* Update mode based on ACL value */ + err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); + if (err) { + p9_debug(P9_DEBUG_VFS, "Failed to get acl values in creat %d\n", + err); + goto error; + } + err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags), + mode, gid, &qid); + if (err < 0) { + p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n", + err); + goto error; + } + v9fs_invalidate_inode_attr(dir); + + /* instantiate inode and assign the unopened fid to the dentry */ + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); + fid = NULL; + goto error; + } + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err); + goto error; + } + /* Now set the ACL based on the default value */ + v9fs_set_create_acl(inode, fid, dacl, pacl); + + v9fs_fid_add(dentry, fid); + d_instantiate(dentry, inode); + + v9inode = V9FS_I(inode); + mutex_lock(&v9inode->v_mutex); + if (v9ses->cache && !v9inode->writeback_fid && + ((flags & O_ACCMODE) != O_RDONLY)) { + /* + * clone a fid and add it to writeback_fid + * we do it during open time instead of + * page dirty time via write_begin/page_mkwrite + * because we want write after unlink usecase + * to work. + */ + inode_fid = v9fs_writeback_fid(dentry); + if (IS_ERR(inode_fid)) { + err = PTR_ERR(inode_fid); + mutex_unlock(&v9inode->v_mutex); + goto err_clunk_old_fid; + } + v9inode->writeback_fid = (void *) inode_fid; + } + mutex_unlock(&v9inode->v_mutex); + /* Since we are opening a file, assign the open fid to the file */ + err = finish_open(file, dentry, generic_file_open, opened); + if (err) + goto err_clunk_old_fid; + file->private_data = ofid; +#ifdef CONFIG_9P_FSCACHE + if (v9ses->cache) + v9fs_cache_inode_set_cookie(inode, file); +#endif + *opened |= FILE_CREATED; +out: + v9fs_put_acl(dacl, pacl); + dput(res); + return err; + +error: + if (fid) + p9_client_clunk(fid); +err_clunk_old_fid: + if (ofid) + p9_client_clunk(ofid); + goto out; +} + +/** + * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory + * @dir: inode that is being unlinked + * @dentry: dentry that is being unlinked + * @mode: mode for new directory + * + */ + +static int v9fs_vfs_mkdir_dotl(struct inode *dir, + struct dentry *dentry, umode_t omode) +{ + int err; + struct v9fs_session_info *v9ses; + struct p9_fid *fid = NULL, *dfid = NULL; + kgid_t gid; + char *name; + umode_t mode; + struct inode *inode; + struct p9_qid qid; + struct dentry *dir_dentry; + struct posix_acl *dacl = NULL, *pacl = NULL; + + p9_debug(P9_DEBUG_VFS, "name %s\n", dentry->d_name.name); + err = 0; + v9ses = v9fs_inode2v9ses(dir); + + omode |= S_IFDIR; + if (dir->i_mode & S_ISGID) + omode |= S_ISGID; + + dir_dentry = dentry->d_parent; + dfid = v9fs_fid_lookup(dir_dentry); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + dfid = NULL; + goto error; + } + + gid = v9fs_get_fsgid_for_create(dir); + mode = omode; + /* Update mode based on ACL value */ + err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); + if (err) { + p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mkdir %d\n", + err); + goto error; + } + name = (char *) dentry->d_name.name; + err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid); + if (err < 0) + goto error; + + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", + err); + fid = NULL; + goto error; + } + + /* instantiate inode and assign the unopened fid to the dentry */ + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", + err); + goto error; + } + v9fs_fid_add(dentry, fid); + v9fs_set_create_acl(inode, fid, dacl, pacl); + d_instantiate(dentry, inode); + fid = NULL; + err = 0; + } else { + /* + * Not in cached mode. No need to populate + * inode with stat. We need to get an inode + * so that we can set the acl with dentry + */ + inode = v9fs_get_inode(dir->i_sb, mode, 0); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + v9fs_set_create_acl(inode, fid, dacl, pacl); + d_instantiate(dentry, inode); + } + inc_nlink(dir); + v9fs_invalidate_inode_attr(dir); +error: + if (fid) + p9_client_clunk(fid); + v9fs_put_acl(dacl, pacl); + return err; +} + +static int +v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat) +{ + int err; + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_stat_dotl *st; + + p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry); + err = -EPERM; + v9ses = v9fs_dentry2v9ses(dentry); + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + generic_fillattr(dentry->d_inode, stat); + return 0; + } + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + /* Ask for all the fields in stat structure. Server will return + * whatever it supports + */ + + st = p9_client_getattr_dotl(fid, P9_STATS_ALL); + if (IS_ERR(st)) + return PTR_ERR(st); + + v9fs_stat2inode_dotl(st, dentry->d_inode); + generic_fillattr(dentry->d_inode, stat); + /* Change block size to what the server returned */ + stat->blksize = st->st_blksize; + + kfree(st); + return 0; +} + +/* + * Attribute flags. + */ +#define P9_ATTR_MODE (1 << 0) +#define P9_ATTR_UID (1 << 1) +#define P9_ATTR_GID (1 << 2) +#define P9_ATTR_SIZE (1 << 3) +#define P9_ATTR_ATIME (1 << 4) +#define P9_ATTR_MTIME (1 << 5) +#define P9_ATTR_CTIME (1 << 6) +#define P9_ATTR_ATIME_SET (1 << 7) +#define P9_ATTR_MTIME_SET (1 << 8) + +struct dotl_iattr_map { + int iattr_valid; + int p9_iattr_valid; +}; + +static int v9fs_mapped_iattr_valid(int iattr_valid) +{ + int i; + int p9_iattr_valid = 0; + struct dotl_iattr_map dotl_iattr_map[] = { + { ATTR_MODE, P9_ATTR_MODE }, + { ATTR_UID, P9_ATTR_UID }, + { ATTR_GID, P9_ATTR_GID }, + { ATTR_SIZE, P9_ATTR_SIZE }, + { ATTR_ATIME, P9_ATTR_ATIME }, + { ATTR_MTIME, P9_ATTR_MTIME }, + { ATTR_CTIME, P9_ATTR_CTIME }, + { ATTR_ATIME_SET, P9_ATTR_ATIME_SET }, + { ATTR_MTIME_SET, P9_ATTR_MTIME_SET }, + }; + for (i = 0; i < ARRAY_SIZE(dotl_iattr_map); i++) { + if (iattr_valid & dotl_iattr_map[i].iattr_valid) + p9_iattr_valid |= dotl_iattr_map[i].p9_iattr_valid; + } + return p9_iattr_valid; +} + +/** + * v9fs_vfs_setattr_dotl - set file metadata + * @dentry: file whose metadata to set + * @iattr: metadata assignment structure + * + */ + +int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) +{ + int retval; + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_iattr_dotl p9attr; + struct inode *inode = dentry->d_inode; + + p9_debug(P9_DEBUG_VFS, "\n"); + + retval = inode_change_ok(inode, iattr); + if (retval) + return retval; + + p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid); + p9attr.mode = iattr->ia_mode; + p9attr.uid = iattr->ia_uid; + p9attr.gid = iattr->ia_gid; + p9attr.size = iattr->ia_size; + p9attr.atime_sec = iattr->ia_atime.tv_sec; + p9attr.atime_nsec = iattr->ia_atime.tv_nsec; + p9attr.mtime_sec = iattr->ia_mtime.tv_sec; + p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; + + retval = -EPERM; + v9ses = v9fs_dentry2v9ses(dentry); + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + /* Write all dirty data */ + if (S_ISREG(inode->i_mode)) + filemap_write_and_wait(inode->i_mapping); + + retval = p9_client_setattr(fid, &p9attr); + if (retval < 0) + return retval; + + if ((iattr->ia_valid & ATTR_SIZE) && + iattr->ia_size != i_size_read(inode)) + truncate_setsize(inode, iattr->ia_size); + + v9fs_invalidate_inode_attr(inode); + setattr_copy(inode, iattr); + mark_inode_dirty(inode); + if (iattr->ia_valid & ATTR_MODE) { + /* We also want to update ACL when we update mode bits */ + retval = v9fs_acl_chmod(inode, fid); + if (retval < 0) + return retval; + } + return 0; +} + +/** + * v9fs_stat2inode_dotl - populate an inode structure with stat info + * @stat: stat structure + * @inode: inode to populate + * @sb: superblock of filesystem + * + */ + +void +v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode) +{ + umode_t mode; + struct v9fs_inode *v9inode = V9FS_I(inode); + + if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) { + inode->i_atime.tv_sec = stat->st_atime_sec; + inode->i_atime.tv_nsec = stat->st_atime_nsec; + inode->i_mtime.tv_sec = stat->st_mtime_sec; + inode->i_mtime.tv_nsec = stat->st_mtime_nsec; + inode->i_ctime.tv_sec = stat->st_ctime_sec; + inode->i_ctime.tv_nsec = stat->st_ctime_nsec; + inode->i_uid = stat->st_uid; + inode->i_gid = stat->st_gid; + set_nlink(inode, stat->st_nlink); + + mode = stat->st_mode & S_IALLUGO; + mode |= inode->i_mode & ~S_IALLUGO; + inode->i_mode = mode; + + i_size_write(inode, stat->st_size); + inode->i_blocks = stat->st_blocks; + } else { + if (stat->st_result_mask & P9_STATS_ATIME) { + inode->i_atime.tv_sec = stat->st_atime_sec; + inode->i_atime.tv_nsec = stat->st_atime_nsec; + } + if (stat->st_result_mask & P9_STATS_MTIME) { + inode->i_mtime.tv_sec = stat->st_mtime_sec; + inode->i_mtime.tv_nsec = stat->st_mtime_nsec; + } + if (stat->st_result_mask & P9_STATS_CTIME) { + inode->i_ctime.tv_sec = stat->st_ctime_sec; + inode->i_ctime.tv_nsec = stat->st_ctime_nsec; + } + if (stat->st_result_mask & P9_STATS_UID) + inode->i_uid = stat->st_uid; + if (stat->st_result_mask & P9_STATS_GID) + inode->i_gid = stat->st_gid; + if (stat->st_result_mask & P9_STATS_NLINK) + set_nlink(inode, stat->st_nlink); + if (stat->st_result_mask & P9_STATS_MODE) { + inode->i_mode = stat->st_mode; + if ((S_ISBLK(inode->i_mode)) || + (S_ISCHR(inode->i_mode))) + init_special_inode(inode, inode->i_mode, + inode->i_rdev); + } + if (stat->st_result_mask & P9_STATS_RDEV) + inode->i_rdev = new_decode_dev(stat->st_rdev); + if (stat->st_result_mask & P9_STATS_SIZE) + i_size_write(inode, stat->st_size); + if (stat->st_result_mask & P9_STATS_BLOCKS) + inode->i_blocks = stat->st_blocks; + } + if (stat->st_result_mask & P9_STATS_GEN) + inode->i_generation = stat->st_gen; + + /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION + * because the inode structure does not have fields for them. + */ + v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR; +} + +static int +v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry, + const char *symname) +{ + int err; + kgid_t gid; + char *name; + struct p9_qid qid; + struct inode *inode; + struct p9_fid *dfid; + struct p9_fid *fid = NULL; + struct v9fs_session_info *v9ses; + + name = (char *) dentry->d_name.name; + p9_debug(P9_DEBUG_VFS, "%lu,%s,%s\n", dir->i_ino, name, symname); + v9ses = v9fs_inode2v9ses(dir); + + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + return err; + } + + gid = v9fs_get_fsgid_for_create(dir); + + /* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */ + err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid); + + if (err < 0) { + p9_debug(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err); + goto error; + } + + v9fs_invalidate_inode_attr(dir); + if (v9ses->cache) { + /* Now walk from the parent so we can get an unopened fid. */ + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", + err); + fid = NULL; + goto error; + } + + /* instantiate inode and assign the unopened fid to dentry */ + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", + err); + goto error; + } + v9fs_fid_add(dentry, fid); + d_instantiate(dentry, inode); + fid = NULL; + err = 0; + } else { + /* Not in cached mode. No need to populate inode with stat */ + inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + d_instantiate(dentry, inode); + } + +error: + if (fid) + p9_client_clunk(fid); + + return err; +} + +/** + * v9fs_vfs_link_dotl - create a hardlink for dotl + * @old_dentry: dentry for file to link to + * @dir: inode destination for new link + * @dentry: dentry for link + * + */ + +static int +v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir, + struct dentry *dentry) +{ + int err; + char *name; + struct dentry *dir_dentry; + struct p9_fid *dfid, *oldfid; + struct v9fs_session_info *v9ses; + + p9_debug(P9_DEBUG_VFS, "dir ino: %lu, old_name: %s, new_name: %s\n", + dir->i_ino, old_dentry->d_name.name, dentry->d_name.name); + + v9ses = v9fs_inode2v9ses(dir); + dir_dentry = dentry->d_parent; + dfid = v9fs_fid_lookup(dir_dentry); + if (IS_ERR(dfid)) + return PTR_ERR(dfid); + + oldfid = v9fs_fid_lookup(old_dentry); + if (IS_ERR(oldfid)) + return PTR_ERR(oldfid); + + name = (char *) dentry->d_name.name; + + err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name); + + if (err < 0) { + p9_debug(P9_DEBUG_VFS, "p9_client_link failed %d\n", err); + return err; + } + + v9fs_invalidate_inode_attr(dir); + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + /* Get the latest stat info from server. */ + struct p9_fid *fid; + fid = v9fs_fid_lookup(old_dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + v9fs_refresh_inode_dotl(fid, old_dentry->d_inode); + } + ihold(old_dentry->d_inode); + d_instantiate(dentry, old_dentry->d_inode); + + return err; +} + +/** + * v9fs_vfs_mknod_dotl - create a special file + * @dir: inode destination for new link + * @dentry: dentry for file + * @mode: mode for creation + * @rdev: device associated with special file + * + */ +static int +v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode, + dev_t rdev) +{ + int err; + kgid_t gid; + char *name; + umode_t mode; + struct v9fs_session_info *v9ses; + struct p9_fid *fid = NULL, *dfid = NULL; + struct inode *inode; + struct p9_qid qid; + struct dentry *dir_dentry; + struct posix_acl *dacl = NULL, *pacl = NULL; + + p9_debug(P9_DEBUG_VFS, " %lu,%s mode: %hx MAJOR: %u MINOR: %u\n", + dir->i_ino, dentry->d_name.name, omode, + MAJOR(rdev), MINOR(rdev)); + + if (!new_valid_dev(rdev)) + return -EINVAL; + + v9ses = v9fs_inode2v9ses(dir); + dir_dentry = dentry->d_parent; + dfid = v9fs_fid_lookup(dir_dentry); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + dfid = NULL; + goto error; + } + + gid = v9fs_get_fsgid_for_create(dir); + mode = omode; + /* Update mode based on ACL value */ + err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); + if (err) { + p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mknod %d\n", + err); + goto error; + } + name = (char *) dentry->d_name.name; + + err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid); + if (err < 0) + goto error; + + v9fs_invalidate_inode_attr(dir); + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", + err); + fid = NULL; + goto error; + } + + /* instantiate inode and assign the unopened fid to the dentry */ + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", + err); + goto error; + } + v9fs_set_create_acl(inode, fid, dacl, pacl); + v9fs_fid_add(dentry, fid); + d_instantiate(dentry, inode); + fid = NULL; + err = 0; + } else { + /* + * Not in cached mode. No need to populate inode with stat. + * socket syscall returns a fd, so we need instantiate + */ + inode = v9fs_get_inode(dir->i_sb, mode, rdev); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + v9fs_set_create_acl(inode, fid, dacl, pacl); + d_instantiate(dentry, inode); + } +error: + if (fid) + p9_client_clunk(fid); + v9fs_put_acl(dacl, pacl); + return err; +} + +/** + * v9fs_vfs_follow_link_dotl - follow a symlink path + * @dentry: dentry for symlink + * @nd: nameidata + * + */ + +static void * +v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd) +{ + int retval; + struct p9_fid *fid; + char *link = __getname(); + char *target; + + p9_debug(P9_DEBUG_VFS, "%s\n", dentry->d_name.name); + + if (!link) { + link = ERR_PTR(-ENOMEM); + goto ndset; + } + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) { + __putname(link); + link = ERR_CAST(fid); + goto ndset; + } + retval = p9_client_readlink(fid, &target); + if (!retval) { + strcpy(link, target); + kfree(target); + goto ndset; + } + __putname(link); + link = ERR_PTR(retval); +ndset: + nd_set_link(nd, link); + return NULL; +} + +int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode) +{ + loff_t i_size; + struct p9_stat_dotl *st; + struct v9fs_session_info *v9ses; + + v9ses = v9fs_inode2v9ses(inode); + st = p9_client_getattr_dotl(fid, P9_STATS_ALL); + if (IS_ERR(st)) + return PTR_ERR(st); + /* + * Don't update inode if the file type is different + */ + if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT)) + goto out; + + spin_lock(&inode->i_lock); + /* + * We don't want to refresh inode->i_size, + * because we may have cached data + */ + i_size = inode->i_size; + v9fs_stat2inode_dotl(st, inode); + if (v9ses->cache) + inode->i_size = i_size; + spin_unlock(&inode->i_lock); +out: + kfree(st); + return 0; +} + +const struct inode_operations v9fs_dir_inode_operations_dotl = { + .create = v9fs_vfs_create_dotl, + .atomic_open = v9fs_vfs_atomic_open_dotl, + .lookup = v9fs_vfs_lookup, + .link = v9fs_vfs_link_dotl, + .symlink = v9fs_vfs_symlink_dotl, + .unlink = v9fs_vfs_unlink, + .mkdir = v9fs_vfs_mkdir_dotl, + .rmdir = v9fs_vfs_rmdir, + .mknod = v9fs_vfs_mknod_dotl, + .rename = v9fs_vfs_rename, + .getattr = v9fs_vfs_getattr_dotl, + .setattr = v9fs_vfs_setattr_dotl, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = v9fs_listxattr, + .get_acl = v9fs_iop_get_acl, +}; + +const struct inode_operations v9fs_file_inode_operations_dotl = { + .getattr = v9fs_vfs_getattr_dotl, + .setattr = v9fs_vfs_setattr_dotl, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = v9fs_listxattr, + .get_acl = v9fs_iop_get_acl, +}; + +const struct inode_operations v9fs_symlink_inode_operations_dotl = { + .readlink = generic_readlink, + .follow_link = v9fs_vfs_follow_link_dotl, + .put_link = v9fs_vfs_put_link, + .getattr = v9fs_vfs_getattr_dotl, + .setattr = v9fs_vfs_setattr_dotl, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = v9fs_listxattr, +}; diff --git a/addons/9p/src/3.10.108/vfs_super.c b/addons/9p/src/3.10.108/vfs_super.c new file mode 100644 index 00000000..2756dcd5 --- /dev/null +++ b/addons/9p/src/3.10.108/vfs_super.c @@ -0,0 +1,368 @@ +/* + * linux/fs/9p/vfs_super.c + * + * This file contians superblock ops for 9P2000. It is intended that + * you mount this file system on directories. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" +#include "xattr.h" +#include "acl.h" + +static const struct super_operations v9fs_super_ops, v9fs_super_ops_dotl; + +/** + * v9fs_set_super - set the superblock + * @s: super block + * @data: file system specific data + * + */ + +static int v9fs_set_super(struct super_block *s, void *data) +{ + s->s_fs_info = data; + return set_anon_super(s, data); +} + +/** + * v9fs_fill_super - populate superblock with info + * @sb: superblock + * @v9ses: session information + * @flags: flags propagated from v9fs_mount() + * + */ + +static void +v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses, + int flags, void *data) +{ + sb->s_maxbytes = MAX_LFS_FILESIZE; + sb->s_blocksize_bits = fls(v9ses->maxdata - 1); + sb->s_blocksize = 1 << sb->s_blocksize_bits; + sb->s_magic = V9FS_MAGIC; + if (v9fs_proto_dotl(v9ses)) { + sb->s_op = &v9fs_super_ops_dotl; + sb->s_xattr = v9fs_xattr_handlers; + } else + sb->s_op = &v9fs_super_ops; + sb->s_bdi = &v9ses->bdi; + if (v9ses->cache) + sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE; + + sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME; + if (!v9ses->cache) + sb->s_flags |= MS_SYNCHRONOUS; + +#ifdef CONFIG_9P_FS_POSIX_ACL + if ((v9ses->flags & V9FS_ACL_MASK) == V9FS_POSIX_ACL) + sb->s_flags |= MS_POSIXACL; +#endif + + save_mount_options(sb, data); +} + +/** + * v9fs_mount - mount a superblock + * @fs_type: file system type + * @flags: mount flags + * @dev_name: device name that was mounted + * @data: mount options + * + */ + +static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) +{ + struct super_block *sb = NULL; + struct inode *inode = NULL; + struct dentry *root = NULL; + struct v9fs_session_info *v9ses = NULL; + umode_t mode = S_IRWXUGO | S_ISVTX; + struct p9_fid *fid; + int retval = 0; + + p9_debug(P9_DEBUG_VFS, "\n"); + + v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL); + if (!v9ses) + return ERR_PTR(-ENOMEM); + + fid = v9fs_session_init(v9ses, dev_name, data); + if (IS_ERR(fid)) { + retval = PTR_ERR(fid); + /* + * we need to call session_close to tear down some + * of the data structure setup by session_init + */ + goto close_session; + } + + sb = sget(fs_type, NULL, v9fs_set_super, flags, v9ses); + if (IS_ERR(sb)) { + retval = PTR_ERR(sb); + goto clunk_fid; + } + v9fs_fill_super(sb, v9ses, flags, data); + + if (v9ses->cache) + sb->s_d_op = &v9fs_cached_dentry_operations; + else + sb->s_d_op = &v9fs_dentry_operations; + + inode = v9fs_get_inode(sb, S_IFDIR | mode, 0); + if (IS_ERR(inode)) { + retval = PTR_ERR(inode); + goto release_sb; + } + + root = d_make_root(inode); + if (!root) { + retval = -ENOMEM; + goto release_sb; + } + sb->s_root = root; + if (v9fs_proto_dotl(v9ses)) { + struct p9_stat_dotl *st = NULL; + st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); + if (IS_ERR(st)) { + retval = PTR_ERR(st); + goto release_sb; + } + root->d_inode->i_ino = v9fs_qid2ino(&st->qid); + v9fs_stat2inode_dotl(st, root->d_inode); + kfree(st); + } else { + struct p9_wstat *st = NULL; + st = p9_client_stat(fid); + if (IS_ERR(st)) { + retval = PTR_ERR(st); + goto release_sb; + } + + root->d_inode->i_ino = v9fs_qid2ino(&st->qid); + v9fs_stat2inode(st, root->d_inode, sb); + + p9stat_free(st); + kfree(st); + } + retval = v9fs_get_acl(inode, fid); + if (retval) + goto release_sb; + v9fs_fid_add(root, fid); + + p9_debug(P9_DEBUG_VFS, " simple set mount, return 0\n"); + return dget(sb->s_root); + +clunk_fid: + p9_client_clunk(fid); +close_session: + v9fs_session_close(v9ses); + kfree(v9ses); + return ERR_PTR(retval); + +release_sb: + /* + * we will do the session_close and root dentry release + * in the below call. But we need to clunk fid, because we haven't + * attached the fid to dentry so it won't get clunked + * automatically. + */ + p9_client_clunk(fid); + deactivate_locked_super(sb); + return ERR_PTR(retval); +} + +/** + * v9fs_kill_super - Kill Superblock + * @s: superblock + * + */ + +static void v9fs_kill_super(struct super_block *s) +{ + struct v9fs_session_info *v9ses = s->s_fs_info; + + p9_debug(P9_DEBUG_VFS, " %p\n", s); + + kill_anon_super(s); + + v9fs_session_cancel(v9ses); + v9fs_session_close(v9ses); + kfree(v9ses); + s->s_fs_info = NULL; + p9_debug(P9_DEBUG_VFS, "exiting kill_super\n"); +} + +static void +v9fs_umount_begin(struct super_block *sb) +{ + struct v9fs_session_info *v9ses; + + v9ses = sb->s_fs_info; + v9fs_session_begin_cancel(v9ses); +} + +static int v9fs_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_rstatfs rs; + int res; + + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) { + res = PTR_ERR(fid); + goto done; + } + + v9ses = v9fs_dentry2v9ses(dentry); + if (v9fs_proto_dotl(v9ses)) { + res = p9_client_statfs(fid, &rs); + if (res == 0) { + buf->f_type = rs.type; + buf->f_bsize = rs.bsize; + buf->f_blocks = rs.blocks; + buf->f_bfree = rs.bfree; + buf->f_bavail = rs.bavail; + buf->f_files = rs.files; + buf->f_ffree = rs.ffree; + buf->f_fsid.val[0] = rs.fsid & 0xFFFFFFFFUL; + buf->f_fsid.val[1] = (rs.fsid >> 32) & 0xFFFFFFFFUL; + buf->f_namelen = rs.namelen; + } + if (res != -ENOSYS) + goto done; + } + res = simple_statfs(dentry, buf); +done: + return res; +} + +static int v9fs_drop_inode(struct inode *inode) +{ + struct v9fs_session_info *v9ses; + v9ses = v9fs_inode2v9ses(inode); + if (v9ses->cache) + return generic_drop_inode(inode); + /* + * in case of non cached mode always drop the + * the inode because we want the inode attribute + * to always match that on the server. + */ + return 1; +} + +static int v9fs_write_inode(struct inode *inode, + struct writeback_control *wbc) +{ + int ret; + struct p9_wstat wstat; + struct v9fs_inode *v9inode; + /* + * send an fsync request to server irrespective of + * wbc->sync_mode. + */ + p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); + v9inode = V9FS_I(inode); + if (!v9inode->writeback_fid) + return 0; + v9fs_blank_wstat(&wstat); + + ret = p9_client_wstat(v9inode->writeback_fid, &wstat); + if (ret < 0) { + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); + return ret; + } + return 0; +} + +static int v9fs_write_inode_dotl(struct inode *inode, + struct writeback_control *wbc) +{ + int ret; + struct v9fs_inode *v9inode; + /* + * send an fsync request to server irrespective of + * wbc->sync_mode. + */ + p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); + v9inode = V9FS_I(inode); + if (!v9inode->writeback_fid) + return 0; + ret = p9_client_fsync(v9inode->writeback_fid, 0); + if (ret < 0) { + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); + return ret; + } + return 0; +} + +static const struct super_operations v9fs_super_ops = { + .alloc_inode = v9fs_alloc_inode, + .destroy_inode = v9fs_destroy_inode, + .statfs = simple_statfs, + .evict_inode = v9fs_evict_inode, + .show_options = generic_show_options, + .umount_begin = v9fs_umount_begin, + .write_inode = v9fs_write_inode, +}; + +static const struct super_operations v9fs_super_ops_dotl = { + .alloc_inode = v9fs_alloc_inode, + .destroy_inode = v9fs_destroy_inode, + .statfs = v9fs_statfs, + .drop_inode = v9fs_drop_inode, + .evict_inode = v9fs_evict_inode, + .show_options = generic_show_options, + .umount_begin = v9fs_umount_begin, + .write_inode = v9fs_write_inode_dotl, +}; + +struct file_system_type v9fs_fs_type = { + .name = "9p", + .mount = v9fs_mount, + .kill_sb = v9fs_kill_super, + .owner = THIS_MODULE, + .fs_flags = FS_RENAME_DOES_D_MOVE, +}; +MODULE_ALIAS_FS("9p"); diff --git a/addons/9p/src/3.10.108/xattr.c b/addons/9p/src/3.10.108/xattr.c new file mode 100644 index 00000000..c45e016b --- /dev/null +++ b/addons/9p/src/3.10.108/xattr.c @@ -0,0 +1,175 @@ +/* + * Copyright IBM Corporation, 2010 + * Author Aneesh Kumar K.V + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +#include +#include +#include +#include +#include + +#include "fid.h" +#include "xattr.h" + +ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name, + void *buffer, size_t buffer_size) +{ + ssize_t retval; + int msize, read_count; + u64 offset = 0, attr_size; + struct p9_fid *attr_fid; + + attr_fid = p9_client_xattrwalk(fid, name, &attr_size); + if (IS_ERR(attr_fid)) { + retval = PTR_ERR(attr_fid); + p9_debug(P9_DEBUG_VFS, "p9_client_attrwalk failed %zd\n", + retval); + attr_fid = NULL; + goto error; + } + if (!buffer_size) { + /* request to get the attr_size */ + retval = attr_size; + goto error; + } + if (attr_size > buffer_size) { + retval = -ERANGE; + goto error; + } + msize = attr_fid->clnt->msize; + while (attr_size) { + if (attr_size > (msize - P9_IOHDRSZ)) + read_count = msize - P9_IOHDRSZ; + else + read_count = attr_size; + read_count = p9_client_read(attr_fid, ((char *)buffer)+offset, + NULL, offset, read_count); + if (read_count < 0) { + /* error in xattr read */ + retval = read_count; + goto error; + } + offset += read_count; + attr_size -= read_count; + } + /* Total read xattr bytes */ + retval = offset; +error: + if (attr_fid) + p9_client_clunk(attr_fid); + return retval; + +} + + +/* + * v9fs_xattr_get() + * + * Copy an extended attribute into the buffer + * provided, or compute the buffer size required. + * Buffer is NULL to compute the size of the buffer required. + * + * Returns a negative error number on failure, or the number of bytes + * used / required on success. + */ +ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name, + void *buffer, size_t buffer_size) +{ + struct p9_fid *fid; + + p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n", + name, buffer_size); + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + return v9fs_fid_xattr_get(fid, name, buffer, buffer_size); +} + +/* + * v9fs_xattr_set() + * + * Create, replace or remove an extended attribute for this inode. Buffer + * is NULL to remove an existing extended attribute, and non-NULL to + * either replace an existing extended attribute, or create a new extended + * attribute. The flags XATTR_REPLACE and XATTR_CREATE + * specify that an extended attribute must exist and must not exist + * previous to the call, respectively. + * + * Returns 0, or a negative error number on failure. + */ +int v9fs_xattr_set(struct dentry *dentry, const char *name, + const void *value, size_t value_len, int flags) +{ + struct p9_fid *fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + return v9fs_fid_xattr_set(fid, name, value, value_len, flags); +} + +int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, + const void *value, size_t value_len, int flags) +{ + u64 offset = 0; + int retval, msize, write_count; + + p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n", + name, value_len, flags); + + /* Clone it */ + fid = p9_client_walk(fid, 0, NULL, 1); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + /* + * On success fid points to xattr + */ + retval = p9_client_xattrcreate(fid, name, value_len, flags); + if (retval < 0) { + p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n", + retval); + p9_client_clunk(fid); + return retval; + } + msize = fid->clnt->msize; + while (value_len) { + if (value_len > (msize - P9_IOHDRSZ)) + write_count = msize - P9_IOHDRSZ; + else + write_count = value_len; + write_count = p9_client_write(fid, ((char *)value)+offset, + NULL, offset, write_count); + if (write_count < 0) { + /* error in xattr write */ + retval = write_count; + break; + } + offset += write_count; + value_len -= write_count; + } + return p9_client_clunk(fid); +} + +ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) +{ + return v9fs_xattr_get(dentry, NULL, buffer, buffer_size); +} + +const struct xattr_handler *v9fs_xattr_handlers[] = { + &v9fs_xattr_user_handler, +#ifdef CONFIG_9P_FS_POSIX_ACL + &v9fs_xattr_acl_access_handler, + &v9fs_xattr_acl_default_handler, +#endif + NULL +}; diff --git a/addons/9p/src/3.10.108/xattr.h b/addons/9p/src/3.10.108/xattr.h new file mode 100644 index 00000000..eec348a3 --- /dev/null +++ b/addons/9p/src/3.10.108/xattr.h @@ -0,0 +1,35 @@ +/* + * Copyright IBM Corporation, 2010 + * Author Aneesh Kumar K.V + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ +#ifndef FS_9P_XATTR_H +#define FS_9P_XATTR_H + +#include +#include +#include + +extern const struct xattr_handler *v9fs_xattr_handlers[]; +extern struct xattr_handler v9fs_xattr_user_handler; +extern const struct xattr_handler v9fs_xattr_acl_access_handler; +extern const struct xattr_handler v9fs_xattr_acl_default_handler; + +extern ssize_t v9fs_fid_xattr_get(struct p9_fid *, const char *, + void *, size_t); +extern ssize_t v9fs_xattr_get(struct dentry *, const char *, + void *, size_t); +extern int v9fs_fid_xattr_set(struct p9_fid *, const char *, + const void *, size_t, int); +extern int v9fs_xattr_set(struct dentry *, const char *, + const void *, size_t, int); +extern ssize_t v9fs_listxattr(struct dentry *, char *, size_t); +#endif /* FS_9P_XATTR_H */ diff --git a/addons/9p/src/3.10.108/xattr_user.c b/addons/9p/src/3.10.108/xattr_user.c new file mode 100644 index 00000000..d0b701b7 --- /dev/null +++ b/addons/9p/src/3.10.108/xattr_user.c @@ -0,0 +1,80 @@ +/* + * Copyright IBM Corporation, 2010 + * Author Aneesh Kumar K.V + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + + +#include +#include +#include +#include +#include "xattr.h" + +static int v9fs_xattr_user_get(struct dentry *dentry, const char *name, + void *buffer, size_t size, int type) +{ + int retval; + char *full_name; + size_t name_len; + size_t prefix_len = XATTR_USER_PREFIX_LEN; + + if (name == NULL) + return -EINVAL; + + if (strcmp(name, "") == 0) + return -EINVAL; + + name_len = strlen(name); + full_name = kmalloc(prefix_len + name_len + 1 , GFP_KERNEL); + if (!full_name) + return -ENOMEM; + memcpy(full_name, XATTR_USER_PREFIX, prefix_len); + memcpy(full_name+prefix_len, name, name_len); + full_name[prefix_len + name_len] = '\0'; + + retval = v9fs_xattr_get(dentry, full_name, buffer, size); + kfree(full_name); + return retval; +} + +static int v9fs_xattr_user_set(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, int type) +{ + int retval; + char *full_name; + size_t name_len; + size_t prefix_len = XATTR_USER_PREFIX_LEN; + + if (name == NULL) + return -EINVAL; + + if (strcmp(name, "") == 0) + return -EINVAL; + + name_len = strlen(name); + full_name = kmalloc(prefix_len + name_len + 1 , GFP_KERNEL); + if (!full_name) + return -ENOMEM; + memcpy(full_name, XATTR_USER_PREFIX, prefix_len); + memcpy(full_name + prefix_len, name, name_len); + full_name[prefix_len + name_len] = '\0'; + + retval = v9fs_xattr_set(dentry, full_name, value, size, flags); + kfree(full_name); + return retval; +} + +struct xattr_handler v9fs_xattr_user_handler = { + .prefix = XATTR_USER_PREFIX, + .get = v9fs_xattr_user_get, + .set = v9fs_xattr_user_set, +}; diff --git a/addons/9p/src/4.4.180/Makefile b/addons/9p/src/4.4.180/Makefile new file mode 100644 index 00000000..2fb0605e --- /dev/null +++ b/addons/9p/src/4.4.180/Makefile @@ -0,0 +1,35 @@ +obj-m := 9p.o + +9p-objs := \ + vfs_super.o \ + vfs_inode.o \ + vfs_inode_dotl.o \ + vfs_addr.o \ + vfs_file.o \ + vfs_dir.o \ + vfs_dentry.o \ + v9fs.o \ + fid.o \ + xattr.o + +9p-y += cache.o +9p-n += acl.o + +obj-m := 9pnet.o +obj-m += 9pnet_virtio.o +obj-n += 9pnet_rdma.o + +9pnet-objs := \ + mod.o \ + client.o \ + error.o \ + util.o \ + protocol.o \ + trans_fd.o \ + trans_common.o \ + +9pnet_virtio-objs := \ + trans_virtio.o + +9pnet_rdma-objs := \ + trans_rdma.o diff --git a/addons/9p/src/4.4.180/acl.c b/addons/9p/src/4.4.180/acl.c new file mode 100644 index 00000000..c30c6cea --- /dev/null +++ b/addons/9p/src/4.4.180/acl.c @@ -0,0 +1,338 @@ +/* + * Copyright IBM Corporation, 2010 + * Author Aneesh Kumar K.V + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "xattr.h" +#include "acl.h" +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" + +static struct posix_acl *__v9fs_get_acl(struct p9_fid *fid, char *name) +{ + ssize_t size; + void *value = NULL; + struct posix_acl *acl = NULL; + + size = v9fs_fid_xattr_get(fid, name, NULL, 0); + if (size > 0) { + value = kzalloc(size, GFP_NOFS); + if (!value) + return ERR_PTR(-ENOMEM); + size = v9fs_fid_xattr_get(fid, name, value, size); + if (size > 0) { + acl = posix_acl_from_xattr(&init_user_ns, value, size); + if (IS_ERR(acl)) + goto err_out; + } + } else if (size == -ENODATA || size == 0 || + size == -ENOSYS || size == -EOPNOTSUPP) { + acl = NULL; + } else + acl = ERR_PTR(-EIO); + +err_out: + kfree(value); + return acl; +} + +int v9fs_get_acl(struct inode *inode, struct p9_fid *fid) +{ + int retval = 0; + struct posix_acl *pacl, *dacl; + struct v9fs_session_info *v9ses; + + v9ses = v9fs_inode2v9ses(inode); + if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) || + ((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) { + set_cached_acl(inode, ACL_TYPE_DEFAULT, NULL); + set_cached_acl(inode, ACL_TYPE_ACCESS, NULL); + return 0; + } + /* get the default/access acl values and cache them */ + dacl = __v9fs_get_acl(fid, POSIX_ACL_XATTR_DEFAULT); + pacl = __v9fs_get_acl(fid, POSIX_ACL_XATTR_ACCESS); + + if (!IS_ERR(dacl) && !IS_ERR(pacl)) { + set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl); + set_cached_acl(inode, ACL_TYPE_ACCESS, pacl); + } else + retval = -EIO; + + if (!IS_ERR(dacl)) + posix_acl_release(dacl); + + if (!IS_ERR(pacl)) + posix_acl_release(pacl); + + return retval; +} + +static struct posix_acl *v9fs_get_cached_acl(struct inode *inode, int type) +{ + struct posix_acl *acl; + /* + * 9p Always cache the acl value when + * instantiating the inode (v9fs_inode_from_fid) + */ + acl = get_cached_acl(inode, type); + BUG_ON(acl == ACL_NOT_CACHED); + return acl; +} + +struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type) +{ + struct v9fs_session_info *v9ses; + + v9ses = v9fs_inode2v9ses(inode); + if (((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) || + ((v9ses->flags & V9FS_ACL_MASK) != V9FS_POSIX_ACL)) { + /* + * On access = client and acl = on mode get the acl + * values from the server + */ + return NULL; + } + return v9fs_get_cached_acl(inode, type); + +} + +static int v9fs_set_acl(struct p9_fid *fid, int type, struct posix_acl *acl) +{ + int retval; + char *name; + size_t size; + void *buffer; + if (!acl) + return 0; + + /* Set a setxattr request to server */ + size = posix_acl_xattr_size(acl->a_count); + buffer = kmalloc(size, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + retval = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); + if (retval < 0) + goto err_free_out; + switch (type) { + case ACL_TYPE_ACCESS: + name = POSIX_ACL_XATTR_ACCESS; + break; + case ACL_TYPE_DEFAULT: + name = POSIX_ACL_XATTR_DEFAULT; + break; + default: + BUG(); + } + retval = v9fs_fid_xattr_set(fid, name, buffer, size, 0); +err_free_out: + kfree(buffer); + return retval; +} + +int v9fs_acl_chmod(struct inode *inode, struct p9_fid *fid) +{ + int retval = 0; + struct posix_acl *acl; + + if (S_ISLNK(inode->i_mode)) + return -EOPNOTSUPP; + acl = v9fs_get_cached_acl(inode, ACL_TYPE_ACCESS); + if (acl) { + retval = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); + if (retval) + return retval; + set_cached_acl(inode, ACL_TYPE_ACCESS, acl); + retval = v9fs_set_acl(fid, ACL_TYPE_ACCESS, acl); + posix_acl_release(acl); + } + return retval; +} + +int v9fs_set_create_acl(struct inode *inode, struct p9_fid *fid, + struct posix_acl *dacl, struct posix_acl *acl) +{ + set_cached_acl(inode, ACL_TYPE_DEFAULT, dacl); + set_cached_acl(inode, ACL_TYPE_ACCESS, acl); + v9fs_set_acl(fid, ACL_TYPE_DEFAULT, dacl); + v9fs_set_acl(fid, ACL_TYPE_ACCESS, acl); + return 0; +} + +void v9fs_put_acl(struct posix_acl *dacl, + struct posix_acl *acl) +{ + posix_acl_release(dacl); + posix_acl_release(acl); +} + +int v9fs_acl_mode(struct inode *dir, umode_t *modep, + struct posix_acl **dpacl, struct posix_acl **pacl) +{ + int retval = 0; + umode_t mode = *modep; + struct posix_acl *acl = NULL; + + if (!S_ISLNK(mode)) { + acl = v9fs_get_cached_acl(dir, ACL_TYPE_DEFAULT); + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (!acl) + mode &= ~current_umask(); + } + if (acl) { + if (S_ISDIR(mode)) + *dpacl = posix_acl_dup(acl); + retval = __posix_acl_create(&acl, GFP_NOFS, &mode); + if (retval < 0) + return retval; + if (retval > 0) + *pacl = acl; + else + posix_acl_release(acl); + } + *modep = mode; + return 0; +} + +static int v9fs_xattr_get_acl(const struct xattr_handler *handler, + struct dentry *dentry, const char *name, + void *buffer, size_t size) +{ + struct v9fs_session_info *v9ses; + struct posix_acl *acl; + int error; + + if (strcmp(name, "") != 0) + return -EINVAL; + + v9ses = v9fs_dentry2v9ses(dentry); + /* + * We allow set/get/list of acl when access=client is not specified + */ + if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) + return v9fs_xattr_get(dentry, handler->prefix, buffer, size); + + acl = v9fs_get_cached_acl(d_inode(dentry), handler->flags); + if (IS_ERR(acl)) + return PTR_ERR(acl); + if (acl == NULL) + return -ENODATA; + error = posix_acl_to_xattr(&init_user_ns, acl, buffer, size); + posix_acl_release(acl); + + return error; +} + +static int v9fs_xattr_set_acl(const struct xattr_handler *handler, + struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) +{ + int retval; + struct posix_acl *acl; + struct v9fs_session_info *v9ses; + struct inode *inode = d_inode(dentry); + + if (strcmp(name, "") != 0) + return -EINVAL; + + v9ses = v9fs_dentry2v9ses(dentry); + /* + * set the attribute on the remote. Without even looking at the + * xattr value. We leave it to the server to validate + */ + if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) + return v9fs_xattr_set(dentry, handler->prefix, value, size, + flags); + + if (S_ISLNK(inode->i_mode)) + return -EOPNOTSUPP; + if (!inode_owner_or_capable(inode)) + return -EPERM; + if (value) { + /* update the cached acl value */ + acl = posix_acl_from_xattr(&init_user_ns, value, size); + if (IS_ERR(acl)) + return PTR_ERR(acl); + else if (acl) { + retval = posix_acl_valid(acl); + if (retval) + goto err_out; + } + } else + acl = NULL; + + switch (handler->flags) { + case ACL_TYPE_ACCESS: + if (acl) { + struct iattr iattr; + struct posix_acl *old_acl = acl; + + retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl); + if (retval) + goto err_out; + if (!acl) { + /* + * ACL can be represented + * by the mode bits. So don't + * update ACL. + */ + posix_acl_release(old_acl); + value = NULL; + size = 0; + } + iattr.ia_valid = ATTR_MODE; + /* FIXME should we update ctime ? + * What is the following setxattr update the + * mode ? + */ + v9fs_vfs_setattr_dotl(dentry, &iattr); + } + break; + case ACL_TYPE_DEFAULT: + if (!S_ISDIR(inode->i_mode)) { + retval = acl ? -EINVAL : 0; + goto err_out; + } + break; + default: + BUG(); + } + retval = v9fs_xattr_set(dentry, handler->prefix, value, size, flags); + if (!retval) + set_cached_acl(inode, handler->flags, acl); +err_out: + posix_acl_release(acl); + return retval; +} + +const struct xattr_handler v9fs_xattr_acl_access_handler = { + .prefix = POSIX_ACL_XATTR_ACCESS, + .flags = ACL_TYPE_ACCESS, + .get = v9fs_xattr_get_acl, + .set = v9fs_xattr_set_acl, +}; + +const struct xattr_handler v9fs_xattr_acl_default_handler = { + .prefix = POSIX_ACL_XATTR_DEFAULT, + .flags = ACL_TYPE_DEFAULT, + .get = v9fs_xattr_get_acl, + .set = v9fs_xattr_set_acl, +}; diff --git a/addons/9p/src/4.4.180/acl.h b/addons/9p/src/4.4.180/acl.h new file mode 100644 index 00000000..e4f7e882 --- /dev/null +++ b/addons/9p/src/4.4.180/acl.h @@ -0,0 +1,55 @@ +/* + * Copyright IBM Corporation, 2010 + * Author Aneesh Kumar K.V + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ +#ifndef FS_9P_ACL_H +#define FS_9P_ACL_H + +#ifdef CONFIG_9P_FS_POSIX_ACL +extern int v9fs_get_acl(struct inode *, struct p9_fid *); +extern struct posix_acl *v9fs_iop_get_acl(struct inode *inode, int type); +extern int v9fs_acl_chmod(struct inode *, struct p9_fid *); +extern int v9fs_set_create_acl(struct inode *, struct p9_fid *, + struct posix_acl *, struct posix_acl *); +extern int v9fs_acl_mode(struct inode *dir, umode_t *modep, + struct posix_acl **dpacl, struct posix_acl **pacl); +extern void v9fs_put_acl(struct posix_acl *dacl, struct posix_acl *acl); +#else +#define v9fs_iop_get_acl NULL +static inline int v9fs_get_acl(struct inode *inode, struct p9_fid *fid) +{ + return 0; +} +static inline int v9fs_acl_chmod(struct inode *inode, struct p9_fid *fid) +{ + return 0; +} +static inline int v9fs_set_create_acl(struct inode *inode, + struct p9_fid *fid, + struct posix_acl *dacl, + struct posix_acl *acl) +{ + return 0; +} +static inline void v9fs_put_acl(struct posix_acl *dacl, + struct posix_acl *acl) +{ +} +static inline int v9fs_acl_mode(struct inode *dir, umode_t *modep, + struct posix_acl **dpacl, + struct posix_acl **pacl) +{ + return 0; +} + +#endif +#endif /* FS_9P_XATTR_H */ diff --git a/addons/9p/src/4.4.180/cache.c b/addons/9p/src/4.4.180/cache.c new file mode 100644 index 00000000..103ca5e1 --- /dev/null +++ b/addons/9p/src/4.4.180/cache.c @@ -0,0 +1,414 @@ +/* + * V9FS cache definitions. + * + * Copyright (C) 2009 by Abhishek Kulkarni + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "cache.h" + +#define CACHETAG_LEN 11 + +struct fscache_netfs v9fs_cache_netfs = { + .name = "9p", + .version = 0, +}; + +/** + * v9fs_random_cachetag - Generate a random tag to be associated + * with a new cache session. + * + * The value of jiffies is used for a fairly randomly cache tag. + */ + +static +int v9fs_random_cachetag(struct v9fs_session_info *v9ses) +{ + v9ses->cachetag = kmalloc(CACHETAG_LEN, GFP_KERNEL); + if (!v9ses->cachetag) + return -ENOMEM; + + return scnprintf(v9ses->cachetag, CACHETAG_LEN, "%lu", jiffies); +} + +static uint16_t v9fs_cache_session_get_key(const void *cookie_netfs_data, + void *buffer, uint16_t bufmax) +{ + struct v9fs_session_info *v9ses; + uint16_t klen = 0; + + v9ses = (struct v9fs_session_info *)cookie_netfs_data; + p9_debug(P9_DEBUG_FSC, "session %p buf %p size %u\n", + v9ses, buffer, bufmax); + + if (v9ses->cachetag) + klen = strlen(v9ses->cachetag); + + if (klen > bufmax) + return 0; + + memcpy(buffer, v9ses->cachetag, klen); + p9_debug(P9_DEBUG_FSC, "cache session tag %s\n", v9ses->cachetag); + return klen; +} + +const struct fscache_cookie_def v9fs_cache_session_index_def = { + .name = "9P.session", + .type = FSCACHE_COOKIE_TYPE_INDEX, + .get_key = v9fs_cache_session_get_key, +}; + +void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses) +{ + /* If no cache session tag was specified, we generate a random one. */ + if (!v9ses->cachetag) + v9fs_random_cachetag(v9ses); + + v9ses->fscache = fscache_acquire_cookie(v9fs_cache_netfs.primary_index, + &v9fs_cache_session_index_def, + v9ses, true); + p9_debug(P9_DEBUG_FSC, "session %p get cookie %p\n", + v9ses, v9ses->fscache); +} + +void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses) +{ + p9_debug(P9_DEBUG_FSC, "session %p put cookie %p\n", + v9ses, v9ses->fscache); + fscache_relinquish_cookie(v9ses->fscache, 0); + v9ses->fscache = NULL; +} + + +static uint16_t v9fs_cache_inode_get_key(const void *cookie_netfs_data, + void *buffer, uint16_t bufmax) +{ + const struct v9fs_inode *v9inode = cookie_netfs_data; + memcpy(buffer, &v9inode->qid.path, sizeof(v9inode->qid.path)); + p9_debug(P9_DEBUG_FSC, "inode %p get key %llu\n", + &v9inode->vfs_inode, v9inode->qid.path); + return sizeof(v9inode->qid.path); +} + +static void v9fs_cache_inode_get_attr(const void *cookie_netfs_data, + uint64_t *size) +{ + const struct v9fs_inode *v9inode = cookie_netfs_data; + *size = i_size_read(&v9inode->vfs_inode); + + p9_debug(P9_DEBUG_FSC, "inode %p get attr %llu\n", + &v9inode->vfs_inode, *size); +} + +static uint16_t v9fs_cache_inode_get_aux(const void *cookie_netfs_data, + void *buffer, uint16_t buflen) +{ + const struct v9fs_inode *v9inode = cookie_netfs_data; + memcpy(buffer, &v9inode->qid.version, sizeof(v9inode->qid.version)); + p9_debug(P9_DEBUG_FSC, "inode %p get aux %u\n", + &v9inode->vfs_inode, v9inode->qid.version); + return sizeof(v9inode->qid.version); +} + +static enum +fscache_checkaux v9fs_cache_inode_check_aux(void *cookie_netfs_data, + const void *buffer, + uint16_t buflen) +{ + const struct v9fs_inode *v9inode = cookie_netfs_data; + + if (buflen != sizeof(v9inode->qid.version)) + return FSCACHE_CHECKAUX_OBSOLETE; + + if (memcmp(buffer, &v9inode->qid.version, + sizeof(v9inode->qid.version))) + return FSCACHE_CHECKAUX_OBSOLETE; + + return FSCACHE_CHECKAUX_OKAY; +} + +static void v9fs_cache_inode_now_uncached(void *cookie_netfs_data) +{ + struct v9fs_inode *v9inode = cookie_netfs_data; + struct pagevec pvec; + pgoff_t first; + int loop, nr_pages; + + pagevec_init(&pvec, 0); + first = 0; + + for (;;) { + nr_pages = pagevec_lookup(&pvec, v9inode->vfs_inode.i_mapping, + first, + PAGEVEC_SIZE - pagevec_count(&pvec)); + if (!nr_pages) + break; + + for (loop = 0; loop < nr_pages; loop++) + ClearPageFsCache(pvec.pages[loop]); + + first = pvec.pages[nr_pages - 1]->index + 1; + + pvec.nr = nr_pages; + pagevec_release(&pvec); + cond_resched(); + } +} + +const struct fscache_cookie_def v9fs_cache_inode_index_def = { + .name = "9p.inode", + .type = FSCACHE_COOKIE_TYPE_DATAFILE, + .get_key = v9fs_cache_inode_get_key, + .get_attr = v9fs_cache_inode_get_attr, + .get_aux = v9fs_cache_inode_get_aux, + .check_aux = v9fs_cache_inode_check_aux, + .now_uncached = v9fs_cache_inode_now_uncached, +}; + +void v9fs_cache_inode_get_cookie(struct inode *inode) +{ + struct v9fs_inode *v9inode; + struct v9fs_session_info *v9ses; + + if (!S_ISREG(inode->i_mode)) + return; + + v9inode = V9FS_I(inode); + if (v9inode->fscache) + return; + + v9ses = v9fs_inode2v9ses(inode); + v9inode->fscache = fscache_acquire_cookie(v9ses->fscache, + &v9fs_cache_inode_index_def, + v9inode, true); + + p9_debug(P9_DEBUG_FSC, "inode %p get cookie %p\n", + inode, v9inode->fscache); +} + +void v9fs_cache_inode_put_cookie(struct inode *inode) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + + if (!v9inode->fscache) + return; + p9_debug(P9_DEBUG_FSC, "inode %p put cookie %p\n", + inode, v9inode->fscache); + + fscache_relinquish_cookie(v9inode->fscache, 0); + v9inode->fscache = NULL; +} + +void v9fs_cache_inode_flush_cookie(struct inode *inode) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + + if (!v9inode->fscache) + return; + p9_debug(P9_DEBUG_FSC, "inode %p flush cookie %p\n", + inode, v9inode->fscache); + + fscache_relinquish_cookie(v9inode->fscache, 1); + v9inode->fscache = NULL; +} + +void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + + if (!v9inode->fscache) + return; + + mutex_lock(&v9inode->fscache_lock); + + if ((filp->f_flags & O_ACCMODE) != O_RDONLY) + v9fs_cache_inode_flush_cookie(inode); + else + v9fs_cache_inode_get_cookie(inode); + + mutex_unlock(&v9inode->fscache_lock); +} + +void v9fs_cache_inode_reset_cookie(struct inode *inode) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + struct v9fs_session_info *v9ses; + struct fscache_cookie *old; + + if (!v9inode->fscache) + return; + + old = v9inode->fscache; + + mutex_lock(&v9inode->fscache_lock); + fscache_relinquish_cookie(v9inode->fscache, 1); + + v9ses = v9fs_inode2v9ses(inode); + v9inode->fscache = fscache_acquire_cookie(v9ses->fscache, + &v9fs_cache_inode_index_def, + v9inode, true); + p9_debug(P9_DEBUG_FSC, "inode %p revalidating cookie old %p new %p\n", + inode, old, v9inode->fscache); + + mutex_unlock(&v9inode->fscache_lock); +} + +int __v9fs_fscache_release_page(struct page *page, gfp_t gfp) +{ + struct inode *inode = page->mapping->host; + struct v9fs_inode *v9inode = V9FS_I(inode); + + BUG_ON(!v9inode->fscache); + + return fscache_maybe_release_page(v9inode->fscache, page, gfp); +} + +void __v9fs_fscache_invalidate_page(struct page *page) +{ + struct inode *inode = page->mapping->host; + struct v9fs_inode *v9inode = V9FS_I(inode); + + BUG_ON(!v9inode->fscache); + + if (PageFsCache(page)) { + fscache_wait_on_page_write(v9inode->fscache, page); + BUG_ON(!PageLocked(page)); + fscache_uncache_page(v9inode->fscache, page); + } +} + +static void v9fs_vfs_readpage_complete(struct page *page, void *data, + int error) +{ + if (!error) + SetPageUptodate(page); + + unlock_page(page); +} + +/** + * __v9fs_readpage_from_fscache - read a page from cache + * + * Returns 0 if the pages are in cache and a BIO is submitted, + * 1 if the pages are not in cache and -error otherwise. + */ + +int __v9fs_readpage_from_fscache(struct inode *inode, struct page *page) +{ + int ret; + const struct v9fs_inode *v9inode = V9FS_I(inode); + + p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page); + if (!v9inode->fscache) + return -ENOBUFS; + + ret = fscache_read_or_alloc_page(v9inode->fscache, + page, + v9fs_vfs_readpage_complete, + NULL, + GFP_KERNEL); + switch (ret) { + case -ENOBUFS: + case -ENODATA: + p9_debug(P9_DEBUG_FSC, "page/inode not in cache %d\n", ret); + return 1; + case 0: + p9_debug(P9_DEBUG_FSC, "BIO submitted\n"); + return ret; + default: + p9_debug(P9_DEBUG_FSC, "ret %d\n", ret); + return ret; + } +} + +/** + * __v9fs_readpages_from_fscache - read multiple pages from cache + * + * Returns 0 if the pages are in cache and a BIO is submitted, + * 1 if the pages are not in cache and -error otherwise. + */ + +int __v9fs_readpages_from_fscache(struct inode *inode, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages) +{ + int ret; + const struct v9fs_inode *v9inode = V9FS_I(inode); + + p9_debug(P9_DEBUG_FSC, "inode %p pages %u\n", inode, *nr_pages); + if (!v9inode->fscache) + return -ENOBUFS; + + ret = fscache_read_or_alloc_pages(v9inode->fscache, + mapping, pages, nr_pages, + v9fs_vfs_readpage_complete, + NULL, + mapping_gfp_mask(mapping)); + switch (ret) { + case -ENOBUFS: + case -ENODATA: + p9_debug(P9_DEBUG_FSC, "pages/inodes not in cache %d\n", ret); + return 1; + case 0: + BUG_ON(!list_empty(pages)); + BUG_ON(*nr_pages != 0); + p9_debug(P9_DEBUG_FSC, "BIO submitted\n"); + return ret; + default: + p9_debug(P9_DEBUG_FSC, "ret %d\n", ret); + return ret; + } +} + +/** + * __v9fs_readpage_to_fscache - write a page to the cache + * + */ + +void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page) +{ + int ret; + const struct v9fs_inode *v9inode = V9FS_I(inode); + + p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page); + ret = fscache_write_page(v9inode->fscache, page, GFP_KERNEL); + p9_debug(P9_DEBUG_FSC, "ret = %d\n", ret); + if (ret != 0) + v9fs_uncache_page(inode, page); +} + +/* + * wait for a page to complete writing to the cache + */ +void __v9fs_fscache_wait_on_page_write(struct inode *inode, struct page *page) +{ + const struct v9fs_inode *v9inode = V9FS_I(inode); + p9_debug(P9_DEBUG_FSC, "inode %p page %p\n", inode, page); + if (PageFsCache(page)) + fscache_wait_on_page_write(v9inode->fscache, page); +} diff --git a/addons/9p/src/4.4.180/cache.h b/addons/9p/src/4.4.180/cache.h new file mode 100644 index 00000000..247e47e5 --- /dev/null +++ b/addons/9p/src/4.4.180/cache.h @@ -0,0 +1,152 @@ +/* + * V9FS cache definitions. + * + * Copyright (C) 2009 by Abhishek Kulkarni + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#ifndef _9P_CACHE_H +#define _9P_CACHE_H +#ifdef CONFIG_9P_FSCACHE +#include +#include + +extern struct fscache_netfs v9fs_cache_netfs; +extern const struct fscache_cookie_def v9fs_cache_session_index_def; +extern const struct fscache_cookie_def v9fs_cache_inode_index_def; + +extern void v9fs_cache_session_get_cookie(struct v9fs_session_info *v9ses); +extern void v9fs_cache_session_put_cookie(struct v9fs_session_info *v9ses); + +extern void v9fs_cache_inode_get_cookie(struct inode *inode); +extern void v9fs_cache_inode_put_cookie(struct inode *inode); +extern void v9fs_cache_inode_flush_cookie(struct inode *inode); +extern void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *filp); +extern void v9fs_cache_inode_reset_cookie(struct inode *inode); + +extern int __v9fs_cache_register(void); +extern void __v9fs_cache_unregister(void); + +extern int __v9fs_fscache_release_page(struct page *page, gfp_t gfp); +extern void __v9fs_fscache_invalidate_page(struct page *page); +extern int __v9fs_readpage_from_fscache(struct inode *inode, + struct page *page); +extern int __v9fs_readpages_from_fscache(struct inode *inode, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages); +extern void __v9fs_readpage_to_fscache(struct inode *inode, struct page *page); +extern void __v9fs_fscache_wait_on_page_write(struct inode *inode, + struct page *page); + +static inline int v9fs_fscache_release_page(struct page *page, + gfp_t gfp) +{ + return __v9fs_fscache_release_page(page, gfp); +} + +static inline void v9fs_fscache_invalidate_page(struct page *page) +{ + __v9fs_fscache_invalidate_page(page); +} + +static inline int v9fs_readpage_from_fscache(struct inode *inode, + struct page *page) +{ + return __v9fs_readpage_from_fscache(inode, page); +} + +static inline int v9fs_readpages_from_fscache(struct inode *inode, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages) +{ + return __v9fs_readpages_from_fscache(inode, mapping, pages, + nr_pages); +} + +static inline void v9fs_readpage_to_fscache(struct inode *inode, + struct page *page) +{ + if (PageFsCache(page)) + __v9fs_readpage_to_fscache(inode, page); +} + +static inline void v9fs_uncache_page(struct inode *inode, struct page *page) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + fscache_uncache_page(v9inode->fscache, page); + BUG_ON(PageFsCache(page)); +} + +static inline void v9fs_fscache_wait_on_page_write(struct inode *inode, + struct page *page) +{ + return __v9fs_fscache_wait_on_page_write(inode, page); +} + +#else /* CONFIG_9P_FSCACHE */ + +static inline void v9fs_cache_inode_get_cookie(struct inode *inode) +{ +} + +static inline void v9fs_cache_inode_put_cookie(struct inode *inode) +{ +} + +static inline void v9fs_cache_inode_set_cookie(struct inode *inode, struct file *file) +{ +} + +static inline int v9fs_fscache_release_page(struct page *page, + gfp_t gfp) { + return 1; +} + +static inline void v9fs_fscache_invalidate_page(struct page *page) {} + +static inline int v9fs_readpage_from_fscache(struct inode *inode, + struct page *page) +{ + return -ENOBUFS; +} + +static inline int v9fs_readpages_from_fscache(struct inode *inode, + struct address_space *mapping, + struct list_head *pages, + unsigned *nr_pages) +{ + return -ENOBUFS; +} + +static inline void v9fs_readpage_to_fscache(struct inode *inode, + struct page *page) +{} + +static inline void v9fs_uncache_page(struct inode *inode, struct page *page) +{} + +static inline void v9fs_fscache_wait_on_page_write(struct inode *inode, + struct page *page) +{ + return; +} + +#endif /* CONFIG_9P_FSCACHE */ +#endif /* _9P_CACHE_H */ diff --git a/addons/9p/src/4.4.180/client.c b/addons/9p/src/4.4.180/client.c new file mode 100644 index 00000000..443db202 --- /dev/null +++ b/addons/9p/src/4.4.180/client.c @@ -0,0 +1,2297 @@ +/* + * net/9p/clnt.c + * + * 9P Client + * + * Copyright (C) 2008 by Eric Van Hensbergen + * Copyright (C) 2007 by Latchesar Ionkov + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "protocol.h" + +#define CREATE_TRACE_POINTS +#include + +/* + * Client Option Parsing (code inspired by NFS code) + * - a little lazy - parse all client options + */ + +enum { + Opt_msize, + Opt_trans, + Opt_legacy, + Opt_version, + Opt_err, +}; + +static const match_table_t tokens = { + {Opt_msize, "msize=%u"}, + {Opt_legacy, "noextend"}, + {Opt_trans, "trans=%s"}, + {Opt_version, "version=%s"}, + {Opt_err, NULL}, +}; + +inline int p9_is_proto_dotl(struct p9_client *clnt) +{ + return clnt->proto_version == p9_proto_2000L; +} +EXPORT_SYMBOL(p9_is_proto_dotl); + +inline int p9_is_proto_dotu(struct p9_client *clnt) +{ + return clnt->proto_version == p9_proto_2000u; +} +EXPORT_SYMBOL(p9_is_proto_dotu); + +/* + * Some error codes are taken directly from the server replies, + * make sure they are valid. + */ +static int safe_errno(int err) +{ + if ((err > 0) || (err < -MAX_ERRNO)) { + p9_debug(P9_DEBUG_ERROR, "Invalid error code %d\n", err); + return -EPROTO; + } + return err; +} + + +/* Interpret mount option for protocol version */ +static int get_protocol_version(char *s) +{ + int version = -EINVAL; + + if (!strcmp(s, "9p2000")) { + version = p9_proto_legacy; + p9_debug(P9_DEBUG_9P, "Protocol version: Legacy\n"); + } else if (!strcmp(s, "9p2000.u")) { + version = p9_proto_2000u; + p9_debug(P9_DEBUG_9P, "Protocol version: 9P2000.u\n"); + } else if (!strcmp(s, "9p2000.L")) { + version = p9_proto_2000L; + p9_debug(P9_DEBUG_9P, "Protocol version: 9P2000.L\n"); + } else + pr_info("Unknown protocol version %s\n", s); + + return version; +} + +/** + * parse_options - parse mount options into client structure + * @opts: options string passed from mount + * @clnt: existing v9fs client information + * + * Return 0 upon success, -ERRNO upon failure + */ + +static int parse_opts(char *opts, struct p9_client *clnt) +{ + char *options, *tmp_options; + char *p; + substring_t args[MAX_OPT_ARGS]; + int option; + char *s; + int ret = 0; + + clnt->proto_version = p9_proto_2000L; + clnt->msize = 8192; + + if (!opts) + return 0; + + tmp_options = kstrdup(opts, GFP_KERNEL); + if (!tmp_options) { + p9_debug(P9_DEBUG_ERROR, + "failed to allocate copy of option string\n"); + return -ENOMEM; + } + options = tmp_options; + + while ((p = strsep(&options, ",")) != NULL) { + int token, r; + if (!*p) + continue; + token = match_token(p, tokens, args); + switch (token) { + case Opt_msize: + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + ret = r; + continue; + } + if (option < 4096) { + p9_debug(P9_DEBUG_ERROR, + "msize should be at least 4k\n"); + ret = -EINVAL; + continue; + } + clnt->msize = option; + break; + case Opt_trans: + s = match_strdup(&args[0]); + if (!s) { + ret = -ENOMEM; + p9_debug(P9_DEBUG_ERROR, + "problem allocating copy of trans arg\n"); + goto free_and_return; + } + clnt->trans_mod = v9fs_get_trans_by_name(s); + if (clnt->trans_mod == NULL) { + pr_info("Could not find request transport: %s\n", + s); + ret = -EINVAL; + kfree(s); + goto free_and_return; + } + kfree(s); + break; + case Opt_legacy: + clnt->proto_version = p9_proto_legacy; + break; + case Opt_version: + s = match_strdup(&args[0]); + if (!s) { + ret = -ENOMEM; + p9_debug(P9_DEBUG_ERROR, + "problem allocating copy of version arg\n"); + goto free_and_return; + } + ret = get_protocol_version(s); + if (ret == -EINVAL) { + kfree(s); + goto free_and_return; + } + kfree(s); + clnt->proto_version = ret; + break; + default: + continue; + } + } + +free_and_return: + kfree(tmp_options); + return ret; +} + +static struct p9_fcall *p9_fcall_alloc(int alloc_msize) +{ + struct p9_fcall *fc; + fc = kmalloc(sizeof(struct p9_fcall) + alloc_msize, GFP_NOFS); + if (!fc) + return NULL; + fc->capacity = alloc_msize; + fc->sdata = (char *) fc + sizeof(struct p9_fcall); + return fc; +} + +/** + * p9_tag_alloc - lookup/allocate a request by tag + * @c: client session to lookup tag within + * @tag: numeric id for transaction + * + * this is a simple array lookup, but will grow the + * request_slots as necessary to accommodate transaction + * ids which did not previously have a slot. + * + * this code relies on the client spinlock to manage locks, its + * possible we should switch to something else, but I'd rather + * stick with something low-overhead for the common case. + * + */ + +static struct p9_req_t * +p9_tag_alloc(struct p9_client *c, u16 tag, unsigned int max_size) +{ + unsigned long flags; + int row, col; + struct p9_req_t *req; + int alloc_msize = min(c->msize, max_size); + + /* This looks up the original request by tag so we know which + * buffer to read the data into */ + tag++; + + if (tag >= c->max_tag) { + spin_lock_irqsave(&c->lock, flags); + /* check again since original check was outside of lock */ + while (tag >= c->max_tag) { + row = (tag / P9_ROW_MAXTAG); + c->reqs[row] = kcalloc(P9_ROW_MAXTAG, + sizeof(struct p9_req_t), GFP_ATOMIC); + + if (!c->reqs[row]) { + pr_err("Couldn't grow tag array\n"); + spin_unlock_irqrestore(&c->lock, flags); + return ERR_PTR(-ENOMEM); + } + for (col = 0; col < P9_ROW_MAXTAG; col++) { + c->reqs[row][col].status = REQ_STATUS_IDLE; + c->reqs[row][col].tc = NULL; + } + c->max_tag += P9_ROW_MAXTAG; + } + spin_unlock_irqrestore(&c->lock, flags); + } + row = tag / P9_ROW_MAXTAG; + col = tag % P9_ROW_MAXTAG; + + req = &c->reqs[row][col]; + if (!req->wq) { + req->wq = kmalloc(sizeof(wait_queue_head_t), GFP_NOFS); + if (!req->wq) + goto grow_failed; + init_waitqueue_head(req->wq); + } + + if (!req->tc) + req->tc = p9_fcall_alloc(alloc_msize); + if (!req->rc) + req->rc = p9_fcall_alloc(alloc_msize); + if (!req->tc || !req->rc) + goto grow_failed; + + p9pdu_reset(req->tc); + p9pdu_reset(req->rc); + + req->tc->tag = tag-1; + req->status = REQ_STATUS_ALLOC; + + return req; + +grow_failed: + pr_err("Couldn't grow tag array\n"); + kfree(req->tc); + kfree(req->rc); + kfree(req->wq); + req->tc = req->rc = NULL; + req->wq = NULL; + return ERR_PTR(-ENOMEM); +} + +/** + * p9_tag_lookup - lookup a request by tag + * @c: client session to lookup tag within + * @tag: numeric id for transaction + * + */ + +struct p9_req_t *p9_tag_lookup(struct p9_client *c, u16 tag) +{ + int row, col; + + /* This looks up the original request by tag so we know which + * buffer to read the data into */ + tag++; + + if(tag >= c->max_tag) + return NULL; + + row = tag / P9_ROW_MAXTAG; + col = tag % P9_ROW_MAXTAG; + + return &c->reqs[row][col]; +} +EXPORT_SYMBOL(p9_tag_lookup); + +/** + * p9_tag_init - setup tags structure and contents + * @c: v9fs client struct + * + * This initializes the tags structure for each client instance. + * + */ + +static int p9_tag_init(struct p9_client *c) +{ + int err = 0; + + c->tagpool = p9_idpool_create(); + if (IS_ERR(c->tagpool)) { + err = PTR_ERR(c->tagpool); + goto error; + } + err = p9_idpool_get(c->tagpool); /* reserve tag 0 */ + if (err < 0) { + p9_idpool_destroy(c->tagpool); + goto error; + } + c->max_tag = 0; +error: + return err; +} + +/** + * p9_tag_cleanup - cleans up tags structure and reclaims resources + * @c: v9fs client struct + * + * This frees resources associated with the tags structure + * + */ +static void p9_tag_cleanup(struct p9_client *c) +{ + int row, col; + + /* check to insure all requests are idle */ + for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) { + for (col = 0; col < P9_ROW_MAXTAG; col++) { + if (c->reqs[row][col].status != REQ_STATUS_IDLE) { + p9_debug(P9_DEBUG_MUX, + "Attempting to cleanup non-free tag %d,%d\n", + row, col); + /* TODO: delay execution of cleanup */ + return; + } + } + } + + if (c->tagpool) { + p9_idpool_put(0, c->tagpool); /* free reserved tag 0 */ + p9_idpool_destroy(c->tagpool); + } + + /* free requests associated with tags */ + for (row = 0; row < (c->max_tag/P9_ROW_MAXTAG); row++) { + for (col = 0; col < P9_ROW_MAXTAG; col++) { + kfree(c->reqs[row][col].wq); + kfree(c->reqs[row][col].tc); + kfree(c->reqs[row][col].rc); + } + kfree(c->reqs[row]); + } + c->max_tag = 0; +} + +/** + * p9_free_req - free a request and clean-up as necessary + * c: client state + * r: request to release + * + */ + +static void p9_free_req(struct p9_client *c, struct p9_req_t *r) +{ + int tag = r->tc->tag; + p9_debug(P9_DEBUG_MUX, "clnt %p req %p tag: %d\n", c, r, tag); + + r->status = REQ_STATUS_IDLE; + if (tag != P9_NOTAG && p9_idpool_check(tag, c->tagpool)) + p9_idpool_put(tag, c->tagpool); +} + +/** + * p9_client_cb - call back from transport to client + * c: client state + * req: request received + * + */ +void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status) +{ + p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc->tag); + + /* + * This barrier is needed to make sure any change made to req before + * the other thread wakes up will indeed be seen by the waiting side. + */ + smp_wmb(); + req->status = status; + + wake_up(req->wq); + p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag); +} +EXPORT_SYMBOL(p9_client_cb); + +/** + * p9_parse_header - parse header arguments out of a packet + * @pdu: packet to parse + * @size: size of packet + * @type: type of request + * @tag: tag of packet + * @rewind: set if we need to rewind offset afterwards + */ + +int +p9_parse_header(struct p9_fcall *pdu, int32_t *size, int8_t *type, int16_t *tag, + int rewind) +{ + int8_t r_type; + int16_t r_tag; + int32_t r_size; + int offset = pdu->offset; + int err; + + pdu->offset = 0; + if (pdu->size == 0) + pdu->size = 7; + + err = p9pdu_readf(pdu, 0, "dbw", &r_size, &r_type, &r_tag); + if (err) + goto rewind_and_exit; + + pdu->size = r_size; + pdu->id = r_type; + pdu->tag = r_tag; + + p9_debug(P9_DEBUG_9P, "<<< size=%d type: %d tag: %d\n", + pdu->size, pdu->id, pdu->tag); + + if (type) + *type = r_type; + if (tag) + *tag = r_tag; + if (size) + *size = r_size; + + +rewind_and_exit: + if (rewind) + pdu->offset = offset; + return err; +} +EXPORT_SYMBOL(p9_parse_header); + +/** + * p9_check_errors - check 9p packet for error return and process it + * @c: current client instance + * @req: request to parse and check for error conditions + * + * returns error code if one is discovered, otherwise returns 0 + * + * this will have to be more complicated if we have multiple + * error packet types + */ + +static int p9_check_errors(struct p9_client *c, struct p9_req_t *req) +{ + int8_t type; + int err; + int ecode; + + err = p9_parse_header(req->rc, NULL, &type, NULL, 0); + /* + * dump the response from server + * This should be after check errors which poplulate pdu_fcall. + */ + trace_9p_protocol_dump(c, req->rc); + if (err) { + p9_debug(P9_DEBUG_ERROR, "couldn't parse header %d\n", err); + return err; + } + if (type != P9_RERROR && type != P9_RLERROR) + return 0; + + if (!p9_is_proto_dotl(c)) { + char *ename; + err = p9pdu_readf(req->rc, c->proto_version, "s?d", + &ename, &ecode); + if (err) + goto out_err; + + if (p9_is_proto_dotu(c)) + err = -ecode; + + if (!err || !IS_ERR_VALUE(err)) { + err = p9_errstr2errno(ename, strlen(ename)); + + p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", + -ecode, ename); + } + kfree(ename); + } else { + err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode); + err = -ecode; + + p9_debug(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode); + } + + return err; + +out_err: + p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err); + + return err; +} + +/** + * p9_check_zc_errors - check 9p packet for error return and process it + * @c: current client instance + * @req: request to parse and check for error conditions + * @in_hdrlen: Size of response protocol buffer. + * + * returns error code if one is discovered, otherwise returns 0 + * + * this will have to be more complicated if we have multiple + * error packet types + */ + +static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req, + struct iov_iter *uidata, int in_hdrlen) +{ + int err; + int ecode; + int8_t type; + char *ename = NULL; + + err = p9_parse_header(req->rc, NULL, &type, NULL, 0); + /* + * dump the response from server + * This should be after parse_header which poplulate pdu_fcall. + */ + trace_9p_protocol_dump(c, req->rc); + if (err) { + p9_debug(P9_DEBUG_ERROR, "couldn't parse header %d\n", err); + return err; + } + + if (type != P9_RERROR && type != P9_RLERROR) + return 0; + + if (!p9_is_proto_dotl(c)) { + /* Error is reported in string format */ + int len; + /* 7 = header size for RERROR; */ + int inline_len = in_hdrlen - 7; + + len = req->rc->size - req->rc->offset; + if (len > (P9_ZC_HDR_SZ - 7)) { + err = -EFAULT; + goto out_err; + } + + ename = &req->rc->sdata[req->rc->offset]; + if (len > inline_len) { + /* We have error in external buffer */ + err = copy_from_iter(ename + inline_len, + len - inline_len, uidata); + if (err != len - inline_len) { + err = -EFAULT; + goto out_err; + } + } + ename = NULL; + err = p9pdu_readf(req->rc, c->proto_version, "s?d", + &ename, &ecode); + if (err) + goto out_err; + + if (p9_is_proto_dotu(c)) + err = -ecode; + + if (!err || !IS_ERR_VALUE(err)) { + err = p9_errstr2errno(ename, strlen(ename)); + + p9_debug(P9_DEBUG_9P, "<<< RERROR (%d) %s\n", + -ecode, ename); + } + kfree(ename); + } else { + err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode); + err = -ecode; + + p9_debug(P9_DEBUG_9P, "<<< RLERROR (%d)\n", -ecode); + } + return err; + +out_err: + p9_debug(P9_DEBUG_ERROR, "couldn't parse error%d\n", err); + return err; +} + +static struct p9_req_t * +p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...); + +/** + * p9_client_flush - flush (cancel) a request + * @c: client state + * @oldreq: request to cancel + * + * This sents a flush for a particular request and links + * the flush request to the original request. The current + * code only supports a single flush request although the protocol + * allows for multiple flush requests to be sent for a single request. + * + */ + +static int p9_client_flush(struct p9_client *c, struct p9_req_t *oldreq) +{ + struct p9_req_t *req; + int16_t oldtag; + int err; + + err = p9_parse_header(oldreq->tc, NULL, NULL, &oldtag, 1); + if (err) + return err; + + p9_debug(P9_DEBUG_9P, ">>> TFLUSH tag %d\n", oldtag); + + req = p9_client_rpc(c, P9_TFLUSH, "w", oldtag); + if (IS_ERR(req)) + return PTR_ERR(req); + + /* + * if we haven't received a response for oldreq, + * remove it from the list + */ + if (oldreq->status == REQ_STATUS_SENT) + if (c->trans_mod->cancelled) + c->trans_mod->cancelled(c, oldreq); + + p9_free_req(c, req); + return 0; +} + +static struct p9_req_t *p9_client_prepare_req(struct p9_client *c, + int8_t type, int req_size, + const char *fmt, va_list ap) +{ + int tag, err; + struct p9_req_t *req; + + p9_debug(P9_DEBUG_MUX, "client %p op %d\n", c, type); + + /* we allow for any status other than disconnected */ + if (c->status == Disconnected) + return ERR_PTR(-EIO); + + /* if status is begin_disconnected we allow only clunk request */ + if ((c->status == BeginDisconnect) && (type != P9_TCLUNK)) + return ERR_PTR(-EIO); + + tag = P9_NOTAG; + if (type != P9_TVERSION) { + tag = p9_idpool_get(c->tagpool); + if (tag < 0) + return ERR_PTR(-ENOMEM); + } + + req = p9_tag_alloc(c, tag, req_size); + if (IS_ERR(req)) + return req; + + /* marshall the data */ + p9pdu_prepare(req->tc, tag, type); + err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap); + if (err) + goto reterr; + p9pdu_finalize(c, req->tc); + trace_9p_client_req(c, type, tag); + return req; +reterr: + p9_free_req(c, req); + return ERR_PTR(err); +} + +/** + * p9_client_rpc - issue a request and wait for a response + * @c: client session + * @type: type of request + * @fmt: protocol format string (see protocol.c) + * + * Returns request structure (which client must free using p9_free_req) + */ + +static struct p9_req_t * +p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) +{ + va_list ap; + int sigpending, err; + unsigned long flags; + struct p9_req_t *req; + + va_start(ap, fmt); + req = p9_client_prepare_req(c, type, c->msize, fmt, ap); + va_end(ap); + if (IS_ERR(req)) + return req; + + if (signal_pending(current)) { + sigpending = 1; + clear_thread_flag(TIF_SIGPENDING); + } else + sigpending = 0; + + err = c->trans_mod->request(c, req); + if (err < 0) { + if (err != -ERESTARTSYS && err != -EFAULT) + c->status = Disconnected; + goto reterr; + } +again: + /* Wait for the response */ + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD); + + /* + * Make sure our req is coherent with regard to updates in other + * threads - echoes to wmb() in the callback + */ + smp_rmb(); + + if ((err == -ERESTARTSYS) && (c->status == Connected) + && (type == P9_TFLUSH)) { + sigpending = 1; + clear_thread_flag(TIF_SIGPENDING); + goto again; + } + + if (req->status == REQ_STATUS_ERROR) { + p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); + err = req->t_err; + } + if ((err == -ERESTARTSYS) && (c->status == Connected)) { + p9_debug(P9_DEBUG_MUX, "flushing\n"); + sigpending = 1; + clear_thread_flag(TIF_SIGPENDING); + + if (c->trans_mod->cancel(c, req)) + p9_client_flush(c, req); + + /* if we received the response anyway, don't signal error */ + if (req->status == REQ_STATUS_RCVD) + err = 0; + } + if (sigpending) { + spin_lock_irqsave(¤t->sighand->siglock, flags); + recalc_sigpending(); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); + } + if (err < 0) + goto reterr; + + err = p9_check_errors(c, req); + trace_9p_client_res(c, type, req->rc->tag, err); + if (!err) + return req; +reterr: + p9_free_req(c, req); + return ERR_PTR(safe_errno(err)); +} + +/** + * p9_client_zc_rpc - issue a request and wait for a response + * @c: client session + * @type: type of request + * @uidata: destination for zero copy read + * @uodata: source for zero copy write + * @inlen: read buffer size + * @olen: write buffer size + * @hdrlen: reader header size, This is the size of response protocol data + * @fmt: protocol format string (see protocol.c) + * + * Returns request structure (which client must free using p9_free_req) + */ +static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type, + struct iov_iter *uidata, + struct iov_iter *uodata, + int inlen, int olen, int in_hdrlen, + const char *fmt, ...) +{ + va_list ap; + int sigpending, err; + unsigned long flags; + struct p9_req_t *req; + + va_start(ap, fmt); + /* + * We allocate a inline protocol data of only 4k bytes. + * The actual content is passed in zero-copy fashion. + */ + req = p9_client_prepare_req(c, type, P9_ZC_HDR_SZ, fmt, ap); + va_end(ap); + if (IS_ERR(req)) + return req; + + if (signal_pending(current)) { + sigpending = 1; + clear_thread_flag(TIF_SIGPENDING); + } else + sigpending = 0; + + err = c->trans_mod->zc_request(c, req, uidata, uodata, + inlen, olen, in_hdrlen); + if (err < 0) { + if (err == -EIO) + c->status = Disconnected; + if (err != -ERESTARTSYS) + goto reterr; + } + if (req->status == REQ_STATUS_ERROR) { + p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); + err = req->t_err; + } + if ((err == -ERESTARTSYS) && (c->status == Connected)) { + p9_debug(P9_DEBUG_MUX, "flushing\n"); + sigpending = 1; + clear_thread_flag(TIF_SIGPENDING); + + if (c->trans_mod->cancel(c, req)) + p9_client_flush(c, req); + + /* if we received the response anyway, don't signal error */ + if (req->status == REQ_STATUS_RCVD) + err = 0; + } + if (sigpending) { + spin_lock_irqsave(¤t->sighand->siglock, flags); + recalc_sigpending(); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); + } + if (err < 0) + goto reterr; + + err = p9_check_zc_errors(c, req, uidata, in_hdrlen); + trace_9p_client_res(c, type, req->rc->tag, err); + if (!err) + return req; +reterr: + p9_free_req(c, req); + return ERR_PTR(safe_errno(err)); +} + +static struct p9_fid *p9_fid_create(struct p9_client *clnt) +{ + int ret; + struct p9_fid *fid; + unsigned long flags; + + p9_debug(P9_DEBUG_FID, "clnt %p\n", clnt); + fid = kmalloc(sizeof(struct p9_fid), GFP_KERNEL); + if (!fid) + return ERR_PTR(-ENOMEM); + + ret = p9_idpool_get(clnt->fidpool); + if (ret < 0) { + ret = -ENOSPC; + goto error; + } + fid->fid = ret; + + memset(&fid->qid, 0, sizeof(struct p9_qid)); + fid->mode = -1; + fid->uid = current_fsuid(); + fid->clnt = clnt; + fid->rdir = NULL; + spin_lock_irqsave(&clnt->lock, flags); + list_add(&fid->flist, &clnt->fidlist); + spin_unlock_irqrestore(&clnt->lock, flags); + + return fid; + +error: + kfree(fid); + return ERR_PTR(ret); +} + +static void p9_fid_destroy(struct p9_fid *fid) +{ + struct p9_client *clnt; + unsigned long flags; + + p9_debug(P9_DEBUG_FID, "fid %d\n", fid->fid); + clnt = fid->clnt; + p9_idpool_put(fid->fid, clnt->fidpool); + spin_lock_irqsave(&clnt->lock, flags); + list_del(&fid->flist); + spin_unlock_irqrestore(&clnt->lock, flags); + kfree(fid->rdir); + kfree(fid); +} + +static int p9_client_version(struct p9_client *c) +{ + int err = 0; + struct p9_req_t *req; + char *version = NULL; + int msize; + + p9_debug(P9_DEBUG_9P, ">>> TVERSION msize %d protocol %d\n", + c->msize, c->proto_version); + + switch (c->proto_version) { + case p9_proto_2000L: + req = p9_client_rpc(c, P9_TVERSION, "ds", + c->msize, "9P2000.L"); + break; + case p9_proto_2000u: + req = p9_client_rpc(c, P9_TVERSION, "ds", + c->msize, "9P2000.u"); + break; + case p9_proto_legacy: + req = p9_client_rpc(c, P9_TVERSION, "ds", + c->msize, "9P2000"); + break; + default: + return -EINVAL; + } + + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version); + if (err) { + p9_debug(P9_DEBUG_9P, "version error %d\n", err); + trace_9p_protocol_dump(c, req->rc); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RVERSION msize %d %s\n", msize, version); + if (!strncmp(version, "9P2000.L", 8)) + c->proto_version = p9_proto_2000L; + else if (!strncmp(version, "9P2000.u", 8)) + c->proto_version = p9_proto_2000u; + else if (!strncmp(version, "9P2000", 6)) + c->proto_version = p9_proto_legacy; + else { + p9_debug(P9_DEBUG_ERROR, + "server returned an unknown version: %s\n", version); + err = -EREMOTEIO; + goto error; + } + + if (msize < 4096) { + p9_debug(P9_DEBUG_ERROR, + "server returned a msize < 4096: %d\n", msize); + err = -EREMOTEIO; + goto error; + } + if (msize < c->msize) + c->msize = msize; + +error: + kfree(version); + p9_free_req(c, req); + + return err; +} + +struct p9_client *p9_client_create(const char *dev_name, char *options) +{ + int err; + struct p9_client *clnt; + char *client_id; + + err = 0; + clnt = kmalloc(sizeof(struct p9_client), GFP_KERNEL); + if (!clnt) + return ERR_PTR(-ENOMEM); + + clnt->trans_mod = NULL; + clnt->trans = NULL; + + client_id = utsname()->nodename; + memcpy(clnt->name, client_id, strlen(client_id) + 1); + + spin_lock_init(&clnt->lock); + INIT_LIST_HEAD(&clnt->fidlist); + + err = p9_tag_init(clnt); + if (err < 0) + goto free_client; + + err = parse_opts(options, clnt); + if (err < 0) + goto destroy_tagpool; + + if (!clnt->trans_mod) + clnt->trans_mod = v9fs_get_default_trans(); + + if (clnt->trans_mod == NULL) { + err = -EPROTONOSUPPORT; + p9_debug(P9_DEBUG_ERROR, + "No transport defined or default transport\n"); + goto destroy_tagpool; + } + + clnt->fidpool = p9_idpool_create(); + if (IS_ERR(clnt->fidpool)) { + err = PTR_ERR(clnt->fidpool); + goto put_trans; + } + + p9_debug(P9_DEBUG_MUX, "clnt %p trans %p msize %d protocol %d\n", + clnt, clnt->trans_mod, clnt->msize, clnt->proto_version); + + err = clnt->trans_mod->create(clnt, dev_name, options); + if (err) + goto destroy_fidpool; + + if (clnt->msize > clnt->trans_mod->maxsize) + clnt->msize = clnt->trans_mod->maxsize; + + if (clnt->msize < 4096) { + p9_debug(P9_DEBUG_ERROR, + "Please specify a msize of at least 4k\n"); + err = -EINVAL; + goto close_trans; + } + + err = p9_client_version(clnt); + if (err) + goto close_trans; + + return clnt; + +close_trans: + clnt->trans_mod->close(clnt); +destroy_fidpool: + p9_idpool_destroy(clnt->fidpool); +put_trans: + v9fs_put_trans(clnt->trans_mod); +destroy_tagpool: + p9_idpool_destroy(clnt->tagpool); +free_client: + kfree(clnt); + return ERR_PTR(err); +} +EXPORT_SYMBOL(p9_client_create); + +void p9_client_destroy(struct p9_client *clnt) +{ + struct p9_fid *fid, *fidptr; + + p9_debug(P9_DEBUG_MUX, "clnt %p\n", clnt); + + if (clnt->trans_mod) + clnt->trans_mod->close(clnt); + + v9fs_put_trans(clnt->trans_mod); + + list_for_each_entry_safe(fid, fidptr, &clnt->fidlist, flist) { + pr_info("Found fid %d not clunked\n", fid->fid); + p9_fid_destroy(fid); + } + + if (clnt->fidpool) + p9_idpool_destroy(clnt->fidpool); + + p9_tag_cleanup(clnt); + + kfree(clnt); +} +EXPORT_SYMBOL(p9_client_destroy); + +void p9_client_disconnect(struct p9_client *clnt) +{ + p9_debug(P9_DEBUG_9P, "clnt %p\n", clnt); + clnt->status = Disconnected; +} +EXPORT_SYMBOL(p9_client_disconnect); + +void p9_client_begin_disconnect(struct p9_client *clnt) +{ + p9_debug(P9_DEBUG_9P, "clnt %p\n", clnt); + clnt->status = BeginDisconnect; +} +EXPORT_SYMBOL(p9_client_begin_disconnect); + +struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, + char *uname, kuid_t n_uname, char *aname) +{ + int err = 0; + struct p9_req_t *req; + struct p9_fid *fid; + struct p9_qid qid; + + + p9_debug(P9_DEBUG_9P, ">>> TATTACH afid %d uname %s aname %s\n", + afid ? afid->fid : -1, uname, aname); + fid = p9_fid_create(clnt); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + fid = NULL; + goto error; + } + fid->uid = n_uname; + + req = p9_client_rpc(clnt, P9_TATTACH, "ddss?u", fid->fid, + afid ? afid->fid : P9_NOFID, uname, aname, n_uname); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RATTACH qid %x.%llx.%x\n", + qid.type, (unsigned long long)qid.path, qid.version); + + memmove(&fid->qid, &qid, sizeof(struct p9_qid)); + + p9_free_req(clnt, req); + return fid; + +error: + if (fid) + p9_fid_destroy(fid); + return ERR_PTR(err); +} +EXPORT_SYMBOL(p9_client_attach); + +struct p9_fid *p9_client_walk(struct p9_fid *oldfid, uint16_t nwname, + char **wnames, int clone) +{ + int err; + struct p9_client *clnt; + struct p9_fid *fid; + struct p9_qid *wqids; + struct p9_req_t *req; + uint16_t nwqids, count; + + err = 0; + wqids = NULL; + clnt = oldfid->clnt; + if (clone) { + fid = p9_fid_create(clnt); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + fid = NULL; + goto error; + } + + fid->uid = oldfid->uid; + } else + fid = oldfid; + + + p9_debug(P9_DEBUG_9P, ">>> TWALK fids %d,%d nwname %ud wname[0] %s\n", + oldfid->fid, fid->fid, nwname, wnames ? wnames[0] : NULL); + + req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid, + nwname, wnames); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + goto clunk_fid; + } + p9_free_req(clnt, req); + + p9_debug(P9_DEBUG_9P, "<<< RWALK nwqid %d:\n", nwqids); + + if (nwqids != nwname) { + err = -ENOENT; + goto clunk_fid; + } + + for (count = 0; count < nwqids; count++) + p9_debug(P9_DEBUG_9P, "<<< [%d] %x.%llx.%x\n", + count, wqids[count].type, + (unsigned long long)wqids[count].path, + wqids[count].version); + + if (nwname) + memmove(&fid->qid, &wqids[nwqids - 1], sizeof(struct p9_qid)); + else + fid->qid = oldfid->qid; + + kfree(wqids); + return fid; + +clunk_fid: + kfree(wqids); + p9_client_clunk(fid); + fid = NULL; + +error: + if (fid && (fid != oldfid)) + p9_fid_destroy(fid); + + return ERR_PTR(err); +} +EXPORT_SYMBOL(p9_client_walk); + +int p9_client_open(struct p9_fid *fid, int mode) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + struct p9_qid qid; + int iounit; + + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> %s fid %d mode %d\n", + p9_is_proto_dotl(clnt) ? "TLOPEN" : "TOPEN", fid->fid, mode); + err = 0; + + if (fid->mode != -1) + return -EINVAL; + + if (p9_is_proto_dotl(clnt)) + req = p9_client_rpc(clnt, P9_TLOPEN, "dd", fid->fid, mode); + else + req = p9_client_rpc(clnt, P9_TOPEN, "db", fid->fid, mode); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } + + p9_debug(P9_DEBUG_9P, "<<< %s qid %x.%llx.%x iounit %x\n", + p9_is_proto_dotl(clnt) ? "RLOPEN" : "ROPEN", qid.type, + (unsigned long long)qid.path, qid.version, iounit); + + fid->mode = mode; + fid->iounit = iounit; + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_open); + +int p9_client_create_dotl(struct p9_fid *ofid, char *name, u32 flags, u32 mode, + kgid_t gid, struct p9_qid *qid) +{ + int err = 0; + struct p9_client *clnt; + struct p9_req_t *req; + int iounit; + + p9_debug(P9_DEBUG_9P, + ">>> TLCREATE fid %d name %s flags %d mode %d gid %d\n", + ofid->fid, name, flags, mode, + from_kgid(&init_user_ns, gid)); + clnt = ofid->clnt; + + if (ofid->mode != -1) + return -EINVAL; + + req = p9_client_rpc(clnt, P9_TLCREATE, "dsddg", ofid->fid, name, flags, + mode, gid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } + + p9_debug(P9_DEBUG_9P, "<<< RLCREATE qid %x.%llx.%x iounit %x\n", + qid->type, + (unsigned long long)qid->path, + qid->version, iounit); + + ofid->mode = mode; + ofid->iounit = iounit; + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_create_dotl); + +int p9_client_fcreate(struct p9_fid *fid, char *name, u32 perm, int mode, + char *extension) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + struct p9_qid qid; + int iounit; + + p9_debug(P9_DEBUG_9P, ">>> TCREATE fid %d name %s perm %d mode %d\n", + fid->fid, name, perm, mode); + err = 0; + clnt = fid->clnt; + + if (fid->mode != -1) + return -EINVAL; + + req = p9_client_rpc(clnt, P9_TCREATE, "dsdb?s", fid->fid, name, perm, + mode, extension); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } + + p9_debug(P9_DEBUG_9P, "<<< RCREATE qid %x.%llx.%x iounit %x\n", + qid.type, + (unsigned long long)qid.path, + qid.version, iounit); + + fid->mode = mode; + fid->iounit = iounit; + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_fcreate); + +int p9_client_symlink(struct p9_fid *dfid, char *name, char *symtgt, kgid_t gid, + struct p9_qid *qid) +{ + int err = 0; + struct p9_client *clnt; + struct p9_req_t *req; + + p9_debug(P9_DEBUG_9P, ">>> TSYMLINK dfid %d name %s symtgt %s\n", + dfid->fid, name, symtgt); + clnt = dfid->clnt; + + req = p9_client_rpc(clnt, P9_TSYMLINK, "dssg", dfid->fid, name, symtgt, + gid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } + + p9_debug(P9_DEBUG_9P, "<<< RSYMLINK qid %x.%llx.%x\n", + qid->type, (unsigned long long)qid->path, qid->version); + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_symlink); + +int p9_client_link(struct p9_fid *dfid, struct p9_fid *oldfid, char *newname) +{ + struct p9_client *clnt; + struct p9_req_t *req; + + p9_debug(P9_DEBUG_9P, ">>> TLINK dfid %d oldfid %d newname %s\n", + dfid->fid, oldfid->fid, newname); + clnt = dfid->clnt; + req = p9_client_rpc(clnt, P9_TLINK, "dds", dfid->fid, oldfid->fid, + newname); + if (IS_ERR(req)) + return PTR_ERR(req); + + p9_debug(P9_DEBUG_9P, "<<< RLINK\n"); + p9_free_req(clnt, req); + return 0; +} +EXPORT_SYMBOL(p9_client_link); + +int p9_client_fsync(struct p9_fid *fid, int datasync) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + p9_debug(P9_DEBUG_9P, ">>> TFSYNC fid %d datasync:%d\n", + fid->fid, datasync); + err = 0; + clnt = fid->clnt; + + req = p9_client_rpc(clnt, P9_TFSYNC, "dd", fid->fid, datasync); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RFSYNC fid %d\n", fid->fid); + + p9_free_req(clnt, req); + +error: + return err; +} +EXPORT_SYMBOL(p9_client_fsync); + +int p9_client_clunk(struct p9_fid *fid) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + int retries = 0; + + if (!fid) { + pr_warn("%s (%d): Trying to clunk with NULL fid\n", + __func__, task_pid_nr(current)); + dump_stack(); + return 0; + } + +again: + p9_debug(P9_DEBUG_9P, ">>> TCLUNK fid %d (try %d)\n", fid->fid, + retries); + err = 0; + clnt = fid->clnt; + + req = p9_client_rpc(clnt, P9_TCLUNK, "d", fid->fid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RCLUNK fid %d\n", fid->fid); + + p9_free_req(clnt, req); +error: + /* + * Fid is not valid even after a failed clunk + * If interrupted, retry once then give up and + * leak fid until umount. + */ + if (err == -ERESTARTSYS) { + if (retries++ == 0) + goto again; + } else + p9_fid_destroy(fid); + return err; +} +EXPORT_SYMBOL(p9_client_clunk); + +int p9_client_remove(struct p9_fid *fid) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + p9_debug(P9_DEBUG_9P, ">>> TREMOVE fid %d\n", fid->fid); + err = 0; + clnt = fid->clnt; + + req = p9_client_rpc(clnt, P9_TREMOVE, "d", fid->fid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RREMOVE fid %d\n", fid->fid); + + p9_free_req(clnt, req); +error: + if (err == -ERESTARTSYS) + p9_client_clunk(fid); + else + p9_fid_destroy(fid); + return err; +} +EXPORT_SYMBOL(p9_client_remove); + +int p9_client_unlinkat(struct p9_fid *dfid, const char *name, int flags) +{ + int err = 0; + struct p9_req_t *req; + struct p9_client *clnt; + + p9_debug(P9_DEBUG_9P, ">>> TUNLINKAT fid %d %s %d\n", + dfid->fid, name, flags); + + clnt = dfid->clnt; + req = p9_client_rpc(clnt, P9_TUNLINKAT, "dsd", dfid->fid, name, flags); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RUNLINKAT fid %d %s\n", dfid->fid, name); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_unlinkat); + +int +p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err) +{ + struct p9_client *clnt = fid->clnt; + struct p9_req_t *req; + int total = 0; + *err = 0; + + p9_debug(P9_DEBUG_9P, ">>> TREAD fid %d offset %llu %d\n", + fid->fid, (unsigned long long) offset, (int)iov_iter_count(to)); + + while (iov_iter_count(to)) { + int count = iov_iter_count(to); + int rsize, non_zc = 0; + char *dataptr; + + rsize = fid->iounit; + if (!rsize || rsize > clnt->msize-P9_IOHDRSZ) + rsize = clnt->msize - P9_IOHDRSZ; + + if (count < rsize) + rsize = count; + + /* Don't bother zerocopy for small IO (< 1024) */ + if (clnt->trans_mod->zc_request && rsize > 1024) { + /* + * response header len is 11 + * PDU Header(7) + IO Size (4) + */ + req = p9_client_zc_rpc(clnt, P9_TREAD, to, NULL, rsize, + 0, 11, "dqd", fid->fid, + offset, rsize); + } else { + non_zc = 1; + req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset, + rsize); + } + if (IS_ERR(req)) { + *err = PTR_ERR(req); + break; + } + + *err = p9pdu_readf(req->rc, clnt->proto_version, + "D", &count, &dataptr); + if (*err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + break; + } + if (rsize < count) { + pr_err("bogus RREAD count (%d > %d)\n", count, rsize); + count = rsize; + } + + p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count); + if (!count) { + p9_free_req(clnt, req); + break; + } + + if (non_zc) { + int n = copy_to_iter(dataptr, count, to); + total += n; + offset += n; + if (n != count) { + *err = -EFAULT; + p9_free_req(clnt, req); + break; + } + } else { + iov_iter_advance(to, count); + total += count; + offset += count; + } + p9_free_req(clnt, req); + } + return total; +} +EXPORT_SYMBOL(p9_client_read); + +int +p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err) +{ + struct p9_client *clnt = fid->clnt; + struct p9_req_t *req; + int total = 0; + *err = 0; + + p9_debug(P9_DEBUG_9P, ">>> TWRITE fid %d offset %llu count %zd\n", + fid->fid, (unsigned long long) offset, + iov_iter_count(from)); + + while (iov_iter_count(from)) { + int count = iov_iter_count(from); + int rsize = fid->iounit; + if (!rsize || rsize > clnt->msize-P9_IOHDRSZ) + rsize = clnt->msize - P9_IOHDRSZ; + + if (count < rsize) + rsize = count; + + /* Don't bother zerocopy for small IO (< 1024) */ + if (clnt->trans_mod->zc_request && rsize > 1024) { + req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, from, 0, + rsize, P9_ZC_HDR_SZ, "dqd", + fid->fid, offset, rsize); + } else { + req = p9_client_rpc(clnt, P9_TWRITE, "dqV", fid->fid, + offset, rsize, from); + } + if (IS_ERR(req)) { + *err = PTR_ERR(req); + break; + } + + *err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count); + if (*err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + break; + } + if (rsize < count) { + pr_err("bogus RWRITE count (%d > %d)\n", count, rsize); + count = rsize; + } + + p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count); + + p9_free_req(clnt, req); + iov_iter_advance(from, count); + total += count; + offset += count; + } + return total; +} +EXPORT_SYMBOL(p9_client_write); + +struct p9_wstat *p9_client_stat(struct p9_fid *fid) +{ + int err; + struct p9_client *clnt; + struct p9_wstat *ret = kmalloc(sizeof(struct p9_wstat), GFP_KERNEL); + struct p9_req_t *req; + u16 ignored; + + p9_debug(P9_DEBUG_9P, ">>> TSTAT fid %d\n", fid->fid); + + if (!ret) + return ERR_PTR(-ENOMEM); + + err = 0; + clnt = fid->clnt; + + req = p9_client_rpc(clnt, P9_TSTAT, "d", fid->fid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + goto error; + } + + p9_debug(P9_DEBUG_9P, + "<<< RSTAT sz=%x type=%x dev=%x qid=%x.%llx.%x\n" + "<<< mode=%8.8x atime=%8.8x mtime=%8.8x length=%llx\n" + "<<< name=%s uid=%s gid=%s muid=%s extension=(%s)\n" + "<<< uid=%d gid=%d n_muid=%d\n", + ret->size, ret->type, ret->dev, ret->qid.type, + (unsigned long long)ret->qid.path, ret->qid.version, ret->mode, + ret->atime, ret->mtime, (unsigned long long)ret->length, + ret->name, ret->uid, ret->gid, ret->muid, ret->extension, + from_kuid(&init_user_ns, ret->n_uid), + from_kgid(&init_user_ns, ret->n_gid), + from_kuid(&init_user_ns, ret->n_muid)); + + p9_free_req(clnt, req); + return ret; + +error: + kfree(ret); + return ERR_PTR(err); +} +EXPORT_SYMBOL(p9_client_stat); + +struct p9_stat_dotl *p9_client_getattr_dotl(struct p9_fid *fid, + u64 request_mask) +{ + int err; + struct p9_client *clnt; + struct p9_stat_dotl *ret = kmalloc(sizeof(struct p9_stat_dotl), + GFP_KERNEL); + struct p9_req_t *req; + + p9_debug(P9_DEBUG_9P, ">>> TGETATTR fid %d, request_mask %lld\n", + fid->fid, request_mask); + + if (!ret) + return ERR_PTR(-ENOMEM); + + err = 0; + clnt = fid->clnt; + + req = p9_client_rpc(clnt, P9_TGETATTR, "dq", fid->fid, request_mask); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + goto error; + } + + p9_debug(P9_DEBUG_9P, + "<<< RGETATTR st_result_mask=%lld\n" + "<<< qid=%x.%llx.%x\n" + "<<< st_mode=%8.8x st_nlink=%llu\n" + "<<< st_uid=%d st_gid=%d\n" + "<<< st_rdev=%llx st_size=%llx st_blksize=%llu st_blocks=%llu\n" + "<<< st_atime_sec=%lld st_atime_nsec=%lld\n" + "<<< st_mtime_sec=%lld st_mtime_nsec=%lld\n" + "<<< st_ctime_sec=%lld st_ctime_nsec=%lld\n" + "<<< st_btime_sec=%lld st_btime_nsec=%lld\n" + "<<< st_gen=%lld st_data_version=%lld", + ret->st_result_mask, ret->qid.type, ret->qid.path, + ret->qid.version, ret->st_mode, ret->st_nlink, + from_kuid(&init_user_ns, ret->st_uid), + from_kgid(&init_user_ns, ret->st_gid), + ret->st_rdev, ret->st_size, ret->st_blksize, + ret->st_blocks, ret->st_atime_sec, ret->st_atime_nsec, + ret->st_mtime_sec, ret->st_mtime_nsec, ret->st_ctime_sec, + ret->st_ctime_nsec, ret->st_btime_sec, ret->st_btime_nsec, + ret->st_gen, ret->st_data_version); + + p9_free_req(clnt, req); + return ret; + +error: + kfree(ret); + return ERR_PTR(err); +} +EXPORT_SYMBOL(p9_client_getattr_dotl); + +static int p9_client_statsize(struct p9_wstat *wst, int proto_version) +{ + int ret; + + /* NOTE: size shouldn't include its own length */ + /* size[2] type[2] dev[4] qid[13] */ + /* mode[4] atime[4] mtime[4] length[8]*/ + /* name[s] uid[s] gid[s] muid[s] */ + ret = 2+4+13+4+4+4+8+2+2+2+2; + + if (wst->name) + ret += strlen(wst->name); + if (wst->uid) + ret += strlen(wst->uid); + if (wst->gid) + ret += strlen(wst->gid); + if (wst->muid) + ret += strlen(wst->muid); + + if ((proto_version == p9_proto_2000u) || + (proto_version == p9_proto_2000L)) { + ret += 2+4+4+4; /* extension[s] n_uid[4] n_gid[4] n_muid[4] */ + if (wst->extension) + ret += strlen(wst->extension); + } + + return ret; +} + +int p9_client_wstat(struct p9_fid *fid, struct p9_wstat *wst) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = fid->clnt; + wst->size = p9_client_statsize(wst, clnt->proto_version); + p9_debug(P9_DEBUG_9P, ">>> TWSTAT fid %d\n", fid->fid); + p9_debug(P9_DEBUG_9P, + " sz=%x type=%x dev=%x qid=%x.%llx.%x\n" + " mode=%8.8x atime=%8.8x mtime=%8.8x length=%llx\n" + " name=%s uid=%s gid=%s muid=%s extension=(%s)\n" + " uid=%d gid=%d n_muid=%d\n", + wst->size, wst->type, wst->dev, wst->qid.type, + (unsigned long long)wst->qid.path, wst->qid.version, wst->mode, + wst->atime, wst->mtime, (unsigned long long)wst->length, + wst->name, wst->uid, wst->gid, wst->muid, wst->extension, + from_kuid(&init_user_ns, wst->n_uid), + from_kgid(&init_user_ns, wst->n_gid), + from_kuid(&init_user_ns, wst->n_muid)); + + req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RWSTAT fid %d\n", fid->fid); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_wstat); + +int p9_client_setattr(struct p9_fid *fid, struct p9_iattr_dotl *p9attr) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> TSETATTR fid %d\n", fid->fid); + p9_debug(P9_DEBUG_9P, + " valid=%x mode=%x uid=%d gid=%d size=%lld\n" + " atime_sec=%lld atime_nsec=%lld\n" + " mtime_sec=%lld mtime_nsec=%lld\n", + p9attr->valid, p9attr->mode, + from_kuid(&init_user_ns, p9attr->uid), + from_kgid(&init_user_ns, p9attr->gid), + p9attr->size, p9attr->atime_sec, p9attr->atime_nsec, + p9attr->mtime_sec, p9attr->mtime_nsec); + + req = p9_client_rpc(clnt, P9_TSETATTR, "dI", fid->fid, p9attr); + + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RSETATTR fid %d\n", fid->fid); + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_setattr); + +int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = fid->clnt; + + p9_debug(P9_DEBUG_9P, ">>> TSTATFS fid %d\n", fid->fid); + + req = p9_client_rpc(clnt, P9_TSTATFS, "d", fid->fid); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "ddqqqqqqd", &sb->type, + &sb->bsize, &sb->blocks, &sb->bfree, &sb->bavail, + &sb->files, &sb->ffree, &sb->fsid, &sb->namelen); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RSTATFS fid %d type 0x%lx bsize %ld " + "blocks %llu bfree %llu bavail %llu files %llu ffree %llu " + "fsid %llu namelen %ld\n", + fid->fid, (long unsigned int)sb->type, (long int)sb->bsize, + sb->blocks, sb->bfree, sb->bavail, sb->files, sb->ffree, + sb->fsid, (long int)sb->namelen); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_statfs); + +int p9_client_rename(struct p9_fid *fid, + struct p9_fid *newdirfid, const char *name) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = fid->clnt; + + p9_debug(P9_DEBUG_9P, ">>> TRENAME fid %d newdirfid %d name %s\n", + fid->fid, newdirfid->fid, name); + + req = p9_client_rpc(clnt, P9_TRENAME, "dds", fid->fid, + newdirfid->fid, name); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RRENAME fid %d\n", fid->fid); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_rename); + +int p9_client_renameat(struct p9_fid *olddirfid, const char *old_name, + struct p9_fid *newdirfid, const char *new_name) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + err = 0; + clnt = olddirfid->clnt; + + p9_debug(P9_DEBUG_9P, ">>> TRENAMEAT olddirfid %d old name %s" + " newdirfid %d new name %s\n", olddirfid->fid, old_name, + newdirfid->fid, new_name); + + req = p9_client_rpc(clnt, P9_TRENAMEAT, "dsds", olddirfid->fid, + old_name, newdirfid->fid, new_name); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + p9_debug(P9_DEBUG_9P, "<<< RRENAMEAT newdirfid %d new name %s\n", + newdirfid->fid, new_name); + + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_renameat); + +/* + * An xattrwalk without @attr_name gives the fid for the lisxattr namespace + */ +struct p9_fid *p9_client_xattrwalk(struct p9_fid *file_fid, + const char *attr_name, u64 *attr_size) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + struct p9_fid *attr_fid; + + err = 0; + clnt = file_fid->clnt; + attr_fid = p9_fid_create(clnt); + if (IS_ERR(attr_fid)) { + err = PTR_ERR(attr_fid); + attr_fid = NULL; + goto error; + } + p9_debug(P9_DEBUG_9P, + ">>> TXATTRWALK file_fid %d, attr_fid %d name %s\n", + file_fid->fid, attr_fid->fid, attr_name); + + req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds", + file_fid->fid, attr_fid->fid, attr_name); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + p9_free_req(clnt, req); + goto clunk_fid; + } + p9_free_req(clnt, req); + p9_debug(P9_DEBUG_9P, "<<< RXATTRWALK fid %d size %llu\n", + attr_fid->fid, *attr_size); + return attr_fid; +clunk_fid: + p9_client_clunk(attr_fid); + attr_fid = NULL; +error: + if (attr_fid && (attr_fid != file_fid)) + p9_fid_destroy(attr_fid); + + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(p9_client_xattrwalk); + +int p9_client_xattrcreate(struct p9_fid *fid, const char *name, + u64 attr_size, int flags) +{ + int err; + struct p9_req_t *req; + struct p9_client *clnt; + + p9_debug(P9_DEBUG_9P, + ">>> TXATTRCREATE fid %d name %s size %lld flag %d\n", + fid->fid, name, (long long)attr_size, flags); + err = 0; + clnt = fid->clnt; + req = p9_client_rpc(clnt, P9_TXATTRCREATE, "dsqd", + fid->fid, name, attr_size, flags); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RXATTRCREATE fid %d\n", fid->fid); + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL_GPL(p9_client_xattrcreate); + +int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset) +{ + int err, rsize, non_zc = 0; + struct p9_client *clnt; + struct p9_req_t *req; + char *dataptr; + struct kvec kv = {.iov_base = data, .iov_len = count}; + struct iov_iter to; + + iov_iter_kvec(&to, READ | ITER_KVEC, &kv, 1, count); + + p9_debug(P9_DEBUG_9P, ">>> TREADDIR fid %d offset %llu count %d\n", + fid->fid, (unsigned long long) offset, count); + + err = 0; + clnt = fid->clnt; + + rsize = fid->iounit; + if (!rsize || rsize > clnt->msize-P9_READDIRHDRSZ) + rsize = clnt->msize - P9_READDIRHDRSZ; + + if (count < rsize) + rsize = count; + + /* Don't bother zerocopy for small IO (< 1024) */ + if (clnt->trans_mod->zc_request && rsize > 1024) { + /* + * response header len is 11 + * PDU Header(7) + IO Size (4) + */ + req = p9_client_zc_rpc(clnt, P9_TREADDIR, &to, NULL, rsize, 0, + 11, "dqd", fid->fid, offset, rsize); + } else { + non_zc = 1; + req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid, + offset, rsize); + } + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto error; + } + + err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto free_and_error; + } + if (rsize < count) { + pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize); + count = rsize; + } + + p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count); + + if (non_zc) + memmove(data, dataptr, count); + + p9_free_req(clnt, req); + return count; + +free_and_error: + p9_free_req(clnt, req); +error: + return err; +} +EXPORT_SYMBOL(p9_client_readdir); + +int p9_client_mknod_dotl(struct p9_fid *fid, char *name, int mode, + dev_t rdev, kgid_t gid, struct p9_qid *qid) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + err = 0; + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> TMKNOD fid %d name %s mode %d major %d " + "minor %d\n", fid->fid, name, mode, MAJOR(rdev), MINOR(rdev)); + req = p9_client_rpc(clnt, P9_TMKNOD, "dsdddg", fid->fid, name, mode, + MAJOR(rdev), MINOR(rdev), gid); + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RMKNOD qid %x.%llx.%x\n", qid->type, + (unsigned long long)qid->path, qid->version); + +error: + p9_free_req(clnt, req); + return err; + +} +EXPORT_SYMBOL(p9_client_mknod_dotl); + +int p9_client_mkdir_dotl(struct p9_fid *fid, char *name, int mode, + kgid_t gid, struct p9_qid *qid) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + err = 0; + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> TMKDIR fid %d name %s mode %d gid %d\n", + fid->fid, name, mode, from_kgid(&init_user_ns, gid)); + req = p9_client_rpc(clnt, P9_TMKDIR, "dsdg", fid->fid, name, mode, + gid); + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RMKDIR qid %x.%llx.%x\n", qid->type, + (unsigned long long)qid->path, qid->version); + +error: + p9_free_req(clnt, req); + return err; + +} +EXPORT_SYMBOL(p9_client_mkdir_dotl); + +int p9_client_lock_dotl(struct p9_fid *fid, struct p9_flock *flock, u8 *status) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + err = 0; + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> TLOCK fid %d type %i flags %d " + "start %lld length %lld proc_id %d client_id %s\n", + fid->fid, flock->type, flock->flags, flock->start, + flock->length, flock->proc_id, flock->client_id); + + req = p9_client_rpc(clnt, P9_TLOCK, "dbdqqds", fid->fid, flock->type, + flock->flags, flock->start, flock->length, + flock->proc_id, flock->client_id); + + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, clnt->proto_version, "b", status); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RLOCK status %i\n", *status); +error: + p9_free_req(clnt, req); + return err; + +} +EXPORT_SYMBOL(p9_client_lock_dotl); + +int p9_client_getlock_dotl(struct p9_fid *fid, struct p9_getlock *glock) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + err = 0; + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> TGETLOCK fid %d, type %i start %lld " + "length %lld proc_id %d client_id %s\n", fid->fid, glock->type, + glock->start, glock->length, glock->proc_id, glock->client_id); + + req = p9_client_rpc(clnt, P9_TGETLOCK, "dbqqds", fid->fid, glock->type, + glock->start, glock->length, glock->proc_id, glock->client_id); + + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, clnt->proto_version, "bqqds", &glock->type, + &glock->start, &glock->length, &glock->proc_id, + &glock->client_id); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RGETLOCK type %i start %lld length %lld " + "proc_id %d client_id %s\n", glock->type, glock->start, + glock->length, glock->proc_id, glock->client_id); +error: + p9_free_req(clnt, req); + return err; +} +EXPORT_SYMBOL(p9_client_getlock_dotl); + +int p9_client_readlink(struct p9_fid *fid, char **target) +{ + int err; + struct p9_client *clnt; + struct p9_req_t *req; + + err = 0; + clnt = fid->clnt; + p9_debug(P9_DEBUG_9P, ">>> TREADLINK fid %d\n", fid->fid); + + req = p9_client_rpc(clnt, P9_TREADLINK, "d", fid->fid); + if (IS_ERR(req)) + return PTR_ERR(req); + + err = p9pdu_readf(req->rc, clnt->proto_version, "s", target); + if (err) { + trace_9p_protocol_dump(clnt, req->rc); + goto error; + } + p9_debug(P9_DEBUG_9P, "<<< RREADLINK target %s\n", *target); +error: + p9_free_req(clnt, req); + return err; +} +EXPORT_SYMBOL(p9_client_readlink); diff --git a/addons/9p/src/4.4.180/error.c b/addons/9p/src/4.4.180/error.c new file mode 100644 index 00000000..126fd0dc --- /dev/null +++ b/addons/9p/src/4.4.180/error.c @@ -0,0 +1,247 @@ +/* + * linux/fs/9p/error.c + * + * Error string handling + * + * Plan 9 uses error strings, Unix uses error numbers. These functions + * try to help manage that and provide for dynamically adding error + * mappings. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +/** + * struct errormap - map string errors from Plan 9 to Linux numeric ids + * @name: string sent over 9P + * @val: numeric id most closely representing @name + * @namelen: length of string + * @list: hash-table list for string lookup + */ +struct errormap { + char *name; + int val; + + int namelen; + struct hlist_node list; +}; + +#define ERRHASHSZ 32 +static struct hlist_head hash_errmap[ERRHASHSZ]; + +/* FixMe - reduce to a reasonable size */ +static struct errormap errmap[] = { + {"Operation not permitted", EPERM}, + {"wstat prohibited", EPERM}, + {"No such file or directory", ENOENT}, + {"directory entry not found", ENOENT}, + {"file not found", ENOENT}, + {"Interrupted system call", EINTR}, + {"Input/output error", EIO}, + {"No such device or address", ENXIO}, + {"Argument list too long", E2BIG}, + {"Bad file descriptor", EBADF}, + {"Resource temporarily unavailable", EAGAIN}, + {"Cannot allocate memory", ENOMEM}, + {"Permission denied", EACCES}, + {"Bad address", EFAULT}, + {"Block device required", ENOTBLK}, + {"Device or resource busy", EBUSY}, + {"File exists", EEXIST}, + {"Invalid cross-device link", EXDEV}, + {"No such device", ENODEV}, + {"Not a directory", ENOTDIR}, + {"Is a directory", EISDIR}, + {"Invalid argument", EINVAL}, + {"Too many open files in system", ENFILE}, + {"Too many open files", EMFILE}, + {"Text file busy", ETXTBSY}, + {"File too large", EFBIG}, + {"No space left on device", ENOSPC}, + {"Illegal seek", ESPIPE}, + {"Read-only file system", EROFS}, + {"Too many links", EMLINK}, + {"Broken pipe", EPIPE}, + {"Numerical argument out of domain", EDOM}, + {"Numerical result out of range", ERANGE}, + {"Resource deadlock avoided", EDEADLK}, + {"File name too long", ENAMETOOLONG}, + {"No locks available", ENOLCK}, + {"Function not implemented", ENOSYS}, + {"Directory not empty", ENOTEMPTY}, + {"Too many levels of symbolic links", ELOOP}, + {"No message of desired type", ENOMSG}, + {"Identifier removed", EIDRM}, + {"No data available", ENODATA}, + {"Machine is not on the network", ENONET}, + {"Package not installed", ENOPKG}, + {"Object is remote", EREMOTE}, + {"Link has been severed", ENOLINK}, + {"Communication error on send", ECOMM}, + {"Protocol error", EPROTO}, + {"Bad message", EBADMSG}, + {"File descriptor in bad state", EBADFD}, + {"Streams pipe error", ESTRPIPE}, + {"Too many users", EUSERS}, + {"Socket operation on non-socket", ENOTSOCK}, + {"Message too long", EMSGSIZE}, + {"Protocol not available", ENOPROTOOPT}, + {"Protocol not supported", EPROTONOSUPPORT}, + {"Socket type not supported", ESOCKTNOSUPPORT}, + {"Operation not supported", EOPNOTSUPP}, + {"Protocol family not supported", EPFNOSUPPORT}, + {"Network is down", ENETDOWN}, + {"Network is unreachable", ENETUNREACH}, + {"Network dropped connection on reset", ENETRESET}, + {"Software caused connection abort", ECONNABORTED}, + {"Connection reset by peer", ECONNRESET}, + {"No buffer space available", ENOBUFS}, + {"Transport endpoint is already connected", EISCONN}, + {"Transport endpoint is not connected", ENOTCONN}, + {"Cannot send after transport endpoint shutdown", ESHUTDOWN}, + {"Connection timed out", ETIMEDOUT}, + {"Connection refused", ECONNREFUSED}, + {"Host is down", EHOSTDOWN}, + {"No route to host", EHOSTUNREACH}, + {"Operation already in progress", EALREADY}, + {"Operation now in progress", EINPROGRESS}, + {"Is a named type file", EISNAM}, + {"Remote I/O error", EREMOTEIO}, + {"Disk quota exceeded", EDQUOT}, +/* errors from fossil, vacfs, and u9fs */ + {"fid unknown or out of range", EBADF}, + {"permission denied", EACCES}, + {"file does not exist", ENOENT}, + {"authentication failed", ECONNREFUSED}, + {"bad offset in directory read", ESPIPE}, + {"bad use of fid", EBADF}, + {"wstat can't convert between files and directories", EPERM}, + {"directory is not empty", ENOTEMPTY}, + {"file exists", EEXIST}, + {"file already exists", EEXIST}, + {"file or directory already exists", EEXIST}, + {"fid already in use", EBADF}, + {"file in use", ETXTBSY}, + {"i/o error", EIO}, + {"file already open for I/O", ETXTBSY}, + {"illegal mode", EINVAL}, + {"illegal name", ENAMETOOLONG}, + {"not a directory", ENOTDIR}, + {"not a member of proposed group", EPERM}, + {"not owner", EACCES}, + {"only owner can change group in wstat", EACCES}, + {"read only file system", EROFS}, + {"no access to special file", EPERM}, + {"i/o count too large", EIO}, + {"unknown group", EINVAL}, + {"unknown user", EINVAL}, + {"bogus wstat buffer", EPROTO}, + {"exclusive use file already open", EAGAIN}, + {"corrupted directory entry", EIO}, + {"corrupted file entry", EIO}, + {"corrupted block label", EIO}, + {"corrupted meta data", EIO}, + {"illegal offset", EINVAL}, + {"illegal path element", ENOENT}, + {"root of file system is corrupted", EIO}, + {"corrupted super block", EIO}, + {"protocol botch", EPROTO}, + {"file system is full", ENOSPC}, + {"file is in use", EAGAIN}, + {"directory entry is not allocated", ENOENT}, + {"file is read only", EROFS}, + {"file has been removed", EIDRM}, + {"only support truncation to zero length", EPERM}, + {"cannot remove root", EPERM}, + {"file too big", EFBIG}, + {"venti i/o error", EIO}, + /* these are not errors */ + {"u9fs rhostsauth: no authentication required", 0}, + {"u9fs authnone: no authentication required", 0}, + {NULL, -1} +}; + +/** + * p9_error_init - preload mappings into hash list + * + */ + +int p9_error_init(void) +{ + struct errormap *c; + int bucket; + + /* initialize hash table */ + for (bucket = 0; bucket < ERRHASHSZ; bucket++) + INIT_HLIST_HEAD(&hash_errmap[bucket]); + + /* load initial error map into hash table */ + for (c = errmap; c->name != NULL; c++) { + c->namelen = strlen(c->name); + bucket = jhash(c->name, c->namelen, 0) % ERRHASHSZ; + INIT_HLIST_NODE(&c->list); + hlist_add_head(&c->list, &hash_errmap[bucket]); + } + + return 1; +} +EXPORT_SYMBOL(p9_error_init); + +/** + * errstr2errno - convert error string to error number + * @errstr: error string + * @len: length of error string + * + */ + +int p9_errstr2errno(char *errstr, int len) +{ + int errno; + struct errormap *c; + int bucket; + + errno = 0; + c = NULL; + bucket = jhash(errstr, len, 0) % ERRHASHSZ; + hlist_for_each_entry(c, &hash_errmap[bucket], list) { + if (c->namelen == len && !memcmp(c->name, errstr, len)) { + errno = c->val; + break; + } + } + + if (errno == 0) { + /* TODO: if error isn't found, add it dynamically */ + errstr[len] = 0; + pr_err("%s: server reported unknown error %s\n", + __func__, errstr); + errno = ESERVERFAULT; + } + + return -errno; +} +EXPORT_SYMBOL(p9_errstr2errno); diff --git a/addons/9p/src/4.4.180/fid.c b/addons/9p/src/4.4.180/fid.c new file mode 100644 index 00000000..47db55ae --- /dev/null +++ b/addons/9p/src/4.4.180/fid.c @@ -0,0 +1,306 @@ +/* + * V9FS FID Management + * + * Copyright (C) 2007 by Latchesar Ionkov + * Copyright (C) 2005, 2006 by Eric Van Hensbergen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" + +/** + * v9fs_fid_add - add a fid to a dentry + * @dentry: dentry that the fid is being added to + * @fid: fid to add + * + */ + +static inline void __add_fid(struct dentry *dentry, struct p9_fid *fid) +{ + hlist_add_head(&fid->dlist, (struct hlist_head *)&dentry->d_fsdata); +} + +void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid) +{ + spin_lock(&dentry->d_lock); + __add_fid(dentry, fid); + spin_unlock(&dentry->d_lock); +} + +/** + * v9fs_fid_find - retrieve a fid that belongs to the specified uid + * @dentry: dentry to look for fid in + * @uid: return fid that belongs to the specified user + * @any: if non-zero, return any fid associated with the dentry + * + */ + +static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any) +{ + struct p9_fid *fid, *ret; + + p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p) uid %d any %d\n", + dentry, dentry, from_kuid(&init_user_ns, uid), + any); + ret = NULL; + /* we'll recheck under lock if there's anything to look in */ + if (dentry->d_fsdata) { + struct hlist_head *h = (struct hlist_head *)&dentry->d_fsdata; + spin_lock(&dentry->d_lock); + hlist_for_each_entry(fid, h, dlist) { + if (any || uid_eq(fid->uid, uid)) { + ret = fid; + break; + } + } + spin_unlock(&dentry->d_lock); + } + + return ret; +} + +/* + * We need to hold v9ses->rename_sem as long as we hold references + * to returned path array. Array element contain pointers to + * dentry names. + */ +static int build_path_from_dentry(struct v9fs_session_info *v9ses, + struct dentry *dentry, char ***names) +{ + int n = 0, i; + char **wnames; + struct dentry *ds; + + for (ds = dentry; !IS_ROOT(ds); ds = ds->d_parent) + n++; + + wnames = kmalloc(sizeof(char *) * n, GFP_KERNEL); + if (!wnames) + goto err_out; + + for (ds = dentry, i = (n-1); i >= 0; i--, ds = ds->d_parent) + wnames[i] = (char *)ds->d_name.name; + + *names = wnames; + return n; +err_out: + return -ENOMEM; +} + +static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, + kuid_t uid, int any) +{ + struct dentry *ds; + char **wnames, *uname; + int i, n, l, clone, access; + struct v9fs_session_info *v9ses; + struct p9_fid *fid, *old_fid = NULL; + + v9ses = v9fs_dentry2v9ses(dentry); + access = v9ses->flags & V9FS_ACCESS_MASK; + fid = v9fs_fid_find(dentry, uid, any); + if (fid) + return fid; + /* + * we don't have a matching fid. To do a TWALK we need + * parent fid. We need to prevent rename when we want to + * look at the parent. + */ + down_read(&v9ses->rename_sem); + ds = dentry->d_parent; + fid = v9fs_fid_find(ds, uid, any); + if (fid) { + /* Found the parent fid do a lookup with that */ + fid = p9_client_walk(fid, 1, (char **)&dentry->d_name.name, 1); + goto fid_out; + } + up_read(&v9ses->rename_sem); + + /* start from the root and try to do a lookup */ + fid = v9fs_fid_find(dentry->d_sb->s_root, uid, any); + if (!fid) { + /* the user is not attached to the fs yet */ + if (access == V9FS_ACCESS_SINGLE) + return ERR_PTR(-EPERM); + + if (v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) + uname = NULL; + else + uname = v9ses->uname; + + fid = p9_client_attach(v9ses->clnt, NULL, uname, uid, + v9ses->aname); + if (IS_ERR(fid)) + return fid; + + v9fs_fid_add(dentry->d_sb->s_root, fid); + } + /* If we are root ourself just return that */ + if (dentry->d_sb->s_root == dentry) + return fid; + /* + * Do a multipath walk with attached root. + * When walking parent we need to make sure we + * don't have a parallel rename happening + */ + down_read(&v9ses->rename_sem); + n = build_path_from_dentry(v9ses, dentry, &wnames); + if (n < 0) { + fid = ERR_PTR(n); + goto err_out; + } + clone = 1; + i = 0; + while (i < n) { + l = min(n - i, P9_MAXWELEM); + /* + * We need to hold rename lock when doing a multipath + * walk to ensure none of the patch component change + */ + fid = p9_client_walk(fid, l, &wnames[i], clone); + if (IS_ERR(fid)) { + if (old_fid) { + /* + * If we fail, clunk fid which are mapping + * to path component and not the last component + * of the path. + */ + p9_client_clunk(old_fid); + } + kfree(wnames); + goto err_out; + } + old_fid = fid; + i += l; + clone = 0; + } + kfree(wnames); +fid_out: + if (!IS_ERR(fid)) { + spin_lock(&dentry->d_lock); + if (d_unhashed(dentry)) { + spin_unlock(&dentry->d_lock); + p9_client_clunk(fid); + fid = ERR_PTR(-ENOENT); + } else { + __add_fid(dentry, fid); + spin_unlock(&dentry->d_lock); + } + } +err_out: + up_read(&v9ses->rename_sem); + return fid; +} + +/** + * v9fs_fid_lookup - lookup for a fid, try to walk if not found + * @dentry: dentry to look for fid in + * + * Look for a fid in the specified dentry for the current user. + * If no fid is found, try to create one walking from a fid from the parent + * dentry (if it has one), or the root dentry. If the user haven't accessed + * the fs yet, attach now and walk from the root. + */ + +struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) +{ + kuid_t uid; + int any, access; + struct v9fs_session_info *v9ses; + + v9ses = v9fs_dentry2v9ses(dentry); + access = v9ses->flags & V9FS_ACCESS_MASK; + switch (access) { + case V9FS_ACCESS_SINGLE: + case V9FS_ACCESS_USER: + case V9FS_ACCESS_CLIENT: + uid = current_fsuid(); + any = 0; + break; + + case V9FS_ACCESS_ANY: + uid = v9ses->uid; + any = 1; + break; + + default: + uid = INVALID_UID; + any = 0; + break; + } + return v9fs_fid_lookup_with_uid(dentry, uid, any); +} + +struct p9_fid *v9fs_fid_clone(struct dentry *dentry) +{ + struct p9_fid *fid, *ret; + + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return fid; + + ret = p9_client_walk(fid, 0, NULL, 1); + return ret; +} + +static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, kuid_t uid) +{ + struct p9_fid *fid, *ret; + + fid = v9fs_fid_lookup_with_uid(dentry, uid, 0); + if (IS_ERR(fid)) + return fid; + + ret = p9_client_walk(fid, 0, NULL, 1); + return ret; +} + +struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) +{ + int err; + struct p9_fid *fid; + + fid = v9fs_fid_clone_with_uid(dentry, GLOBAL_ROOT_UID); + if (IS_ERR(fid)) + goto error_out; + /* + * writeback fid will only be used to write back the + * dirty pages. We always request for the open fid in read-write + * mode so that a partial page write which result in page + * read can work. + */ + err = p9_client_open(fid, O_RDWR); + if (err < 0) { + p9_client_clunk(fid); + fid = ERR_PTR(err); + goto error_out; + } +error_out: + return fid; +} diff --git a/addons/9p/src/4.4.180/fid.h b/addons/9p/src/4.4.180/fid.h new file mode 100644 index 00000000..2b6787fc --- /dev/null +++ b/addons/9p/src/4.4.180/fid.h @@ -0,0 +1,30 @@ +/* + * V9FS FID Management + * + * Copyright (C) 2005 by Eric Van Hensbergen + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ +#ifndef FS_9P_FID_H +#define FS_9P_FID_H +#include + +struct p9_fid *v9fs_fid_lookup(struct dentry *dentry); +struct p9_fid *v9fs_fid_clone(struct dentry *dentry); +void v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid); +struct p9_fid *v9fs_writeback_fid(struct dentry *dentry); +#endif diff --git a/addons/9p/src/4.4.180/mod.c b/addons/9p/src/4.4.180/mod.c new file mode 100644 index 00000000..6ab36aea --- /dev/null +++ b/addons/9p/src/4.4.180/mod.c @@ -0,0 +1,201 @@ +/* + * net/9p/9p.c + * + * 9P entry point + * + * Copyright (C) 2007 by Latchesar Ionkov + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_NET_9P_DEBUG +unsigned int p9_debug_level = 0; /* feature-rific global debug level */ +EXPORT_SYMBOL(p9_debug_level); +module_param_named(debug, p9_debug_level, uint, 0); +MODULE_PARM_DESC(debug, "9P debugging level"); + +void _p9_debug(enum p9_debug_flags level, const char *func, + const char *fmt, ...) +{ + struct va_format vaf; + va_list args; + + if ((p9_debug_level & level) != level) + return; + + va_start(args, fmt); + + vaf.fmt = fmt; + vaf.va = &args; + + if (level == P9_DEBUG_9P) + pr_notice("(%8.8d) %pV", task_pid_nr(current), &vaf); + else + pr_notice("-- %s (%d): %pV", func, task_pid_nr(current), &vaf); + + va_end(args); +} +EXPORT_SYMBOL(_p9_debug); +#endif + +/* + * Dynamic Transport Registration Routines + * + */ + +static DEFINE_SPINLOCK(v9fs_trans_lock); +static LIST_HEAD(v9fs_trans_list); + +/** + * v9fs_register_trans - register a new transport with 9p + * @m: structure describing the transport module and entry points + * + */ +void v9fs_register_trans(struct p9_trans_module *m) +{ + spin_lock(&v9fs_trans_lock); + list_add_tail(&m->list, &v9fs_trans_list); + spin_unlock(&v9fs_trans_lock); +} +EXPORT_SYMBOL(v9fs_register_trans); + +/** + * v9fs_unregister_trans - unregister a 9p transport + * @m: the transport to remove + * + */ +void v9fs_unregister_trans(struct p9_trans_module *m) +{ + spin_lock(&v9fs_trans_lock); + list_del_init(&m->list); + spin_unlock(&v9fs_trans_lock); +} +EXPORT_SYMBOL(v9fs_unregister_trans); + +/** + * v9fs_get_trans_by_name - get transport with the matching name + * @name: string identifying transport + * + */ +struct p9_trans_module *v9fs_get_trans_by_name(char *s) +{ + struct p9_trans_module *t, *found = NULL; + + spin_lock(&v9fs_trans_lock); + + list_for_each_entry(t, &v9fs_trans_list, list) + if (strcmp(t->name, s) == 0 && + try_module_get(t->owner)) { + found = t; + break; + } + + spin_unlock(&v9fs_trans_lock); + return found; +} +EXPORT_SYMBOL(v9fs_get_trans_by_name); + +/** + * v9fs_get_default_trans - get the default transport + * + */ + +struct p9_trans_module *v9fs_get_default_trans(void) +{ + struct p9_trans_module *t, *found = NULL; + + spin_lock(&v9fs_trans_lock); + + list_for_each_entry(t, &v9fs_trans_list, list) + if (t->def && try_module_get(t->owner)) { + found = t; + break; + } + + if (!found) + list_for_each_entry(t, &v9fs_trans_list, list) + if (try_module_get(t->owner)) { + found = t; + break; + } + + spin_unlock(&v9fs_trans_lock); + return found; +} +EXPORT_SYMBOL(v9fs_get_default_trans); + +/** + * v9fs_put_trans - put trans + * @m: transport to put + * + */ +void v9fs_put_trans(struct p9_trans_module *m) +{ + if (m) + module_put(m->owner); +} + +/** + * init_p9 - Initialize module + * + */ +static int __init init_p9(void) +{ + int ret = 0; + + p9_error_init(); + pr_info("Installing 9P2000 support\n"); + p9_trans_fd_init(); + + return ret; +} + +/** + * exit_p9 - shutdown module + * + */ + +static void __exit exit_p9(void) +{ + pr_info("Unloading 9P2000 support\n"); + + p9_trans_fd_exit(); +} + +module_init(init_p9) +module_exit(exit_p9) + +MODULE_AUTHOR("Latchesar Ionkov "); +MODULE_AUTHOR("Eric Van Hensbergen "); +MODULE_AUTHOR("Ron Minnich "); +MODULE_LICENSE("GPL"); diff --git a/addons/9p/src/4.4.180/protocol.c b/addons/9p/src/4.4.180/protocol.c new file mode 100644 index 00000000..7f1b45c0 --- /dev/null +++ b/addons/9p/src/4.4.180/protocol.c @@ -0,0 +1,634 @@ +/* + * net/9p/protocol.c + * + * 9P Protocol Support Code + * + * Copyright (C) 2008 by Eric Van Hensbergen + * + * Base on code from Anthony Liguori + * Copyright (C) 2008 by IBM, Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "protocol.h" + +#include + +static int +p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); + +void p9stat_free(struct p9_wstat *stbuf) +{ + kfree(stbuf->name); + stbuf->name = NULL; + kfree(stbuf->uid); + stbuf->uid = NULL; + kfree(stbuf->gid); + stbuf->gid = NULL; + kfree(stbuf->muid); + stbuf->muid = NULL; + kfree(stbuf->extension); + stbuf->extension = NULL; +} +EXPORT_SYMBOL(p9stat_free); + +size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size) +{ + size_t len = min(pdu->size - pdu->offset, size); + memcpy(data, &pdu->sdata[pdu->offset], len); + pdu->offset += len; + return size - len; +} + +static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size) +{ + size_t len = min(pdu->capacity - pdu->size, size); + memcpy(&pdu->sdata[pdu->size], data, len); + pdu->size += len; + return size - len; +} + +static size_t +pdu_write_u(struct p9_fcall *pdu, struct iov_iter *from, size_t size) +{ + size_t len = min(pdu->capacity - pdu->size, size); + struct iov_iter i = *from; + if (copy_from_iter(&pdu->sdata[pdu->size], len, &i) != len) + len = 0; + + pdu->size += len; + return size - len; +} + +/* + b - int8_t + w - int16_t + d - int32_t + q - int64_t + s - string + u - numeric uid + g - numeric gid + S - stat + Q - qid + D - data blob (int32_t size followed by void *, results are not freed) + T - array of strings (int16_t count, followed by strings) + R - array of qids (int16_t count, followed by qids) + A - stat for 9p2000.L (p9_stat_dotl) + ? - if optional = 1, continue parsing +*/ + +static int +p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt, + va_list ap) +{ + const char *ptr; + int errcode = 0; + + for (ptr = fmt; *ptr; ptr++) { + switch (*ptr) { + case 'b':{ + int8_t *val = va_arg(ap, int8_t *); + if (pdu_read(pdu, val, sizeof(*val))) { + errcode = -EFAULT; + break; + } + } + break; + case 'w':{ + int16_t *val = va_arg(ap, int16_t *); + __le16 le_val; + if (pdu_read(pdu, &le_val, sizeof(le_val))) { + errcode = -EFAULT; + break; + } + *val = le16_to_cpu(le_val); + } + break; + case 'd':{ + int32_t *val = va_arg(ap, int32_t *); + __le32 le_val; + if (pdu_read(pdu, &le_val, sizeof(le_val))) { + errcode = -EFAULT; + break; + } + *val = le32_to_cpu(le_val); + } + break; + case 'q':{ + int64_t *val = va_arg(ap, int64_t *); + __le64 le_val; + if (pdu_read(pdu, &le_val, sizeof(le_val))) { + errcode = -EFAULT; + break; + } + *val = le64_to_cpu(le_val); + } + break; + case 's':{ + char **sptr = va_arg(ap, char **); + uint16_t len; + + errcode = p9pdu_readf(pdu, proto_version, + "w", &len); + if (errcode) + break; + + *sptr = kmalloc(len + 1, GFP_NOFS); + if (*sptr == NULL) { + errcode = -EFAULT; + break; + } + if (pdu_read(pdu, *sptr, len)) { + errcode = -EFAULT; + kfree(*sptr); + *sptr = NULL; + } else + (*sptr)[len] = 0; + } + break; + case 'u': { + kuid_t *uid = va_arg(ap, kuid_t *); + __le32 le_val; + if (pdu_read(pdu, &le_val, sizeof(le_val))) { + errcode = -EFAULT; + break; + } + *uid = make_kuid(&init_user_ns, + le32_to_cpu(le_val)); + } break; + case 'g': { + kgid_t *gid = va_arg(ap, kgid_t *); + __le32 le_val; + if (pdu_read(pdu, &le_val, sizeof(le_val))) { + errcode = -EFAULT; + break; + } + *gid = make_kgid(&init_user_ns, + le32_to_cpu(le_val)); + } break; + case 'Q':{ + struct p9_qid *qid = + va_arg(ap, struct p9_qid *); + + errcode = p9pdu_readf(pdu, proto_version, "bdq", + &qid->type, &qid->version, + &qid->path); + } + break; + case 'S':{ + struct p9_wstat *stbuf = + va_arg(ap, struct p9_wstat *); + + memset(stbuf, 0, sizeof(struct p9_wstat)); + stbuf->n_uid = stbuf->n_muid = INVALID_UID; + stbuf->n_gid = INVALID_GID; + + errcode = + p9pdu_readf(pdu, proto_version, + "wwdQdddqssss?sugu", + &stbuf->size, &stbuf->type, + &stbuf->dev, &stbuf->qid, + &stbuf->mode, &stbuf->atime, + &stbuf->mtime, &stbuf->length, + &stbuf->name, &stbuf->uid, + &stbuf->gid, &stbuf->muid, + &stbuf->extension, + &stbuf->n_uid, &stbuf->n_gid, + &stbuf->n_muid); + if (errcode) + p9stat_free(stbuf); + } + break; + case 'D':{ + uint32_t *count = va_arg(ap, uint32_t *); + void **data = va_arg(ap, void **); + + errcode = + p9pdu_readf(pdu, proto_version, "d", count); + if (!errcode) { + *count = + min_t(uint32_t, *count, + pdu->size - pdu->offset); + *data = &pdu->sdata[pdu->offset]; + } + } + break; + case 'T':{ + uint16_t *nwname = va_arg(ap, uint16_t *); + char ***wnames = va_arg(ap, char ***); + + errcode = p9pdu_readf(pdu, proto_version, + "w", nwname); + if (!errcode) { + *wnames = + kmalloc(sizeof(char *) * *nwname, + GFP_NOFS); + if (!*wnames) + errcode = -ENOMEM; + } + + if (!errcode) { + int i; + + for (i = 0; i < *nwname; i++) { + errcode = + p9pdu_readf(pdu, + proto_version, + "s", + &(*wnames)[i]); + if (errcode) + break; + } + } + + if (errcode) { + if (*wnames) { + int i; + + for (i = 0; i < *nwname; i++) + kfree((*wnames)[i]); + } + kfree(*wnames); + *wnames = NULL; + } + } + break; + case 'R':{ + uint16_t *nwqid = va_arg(ap, uint16_t *); + struct p9_qid **wqids = + va_arg(ap, struct p9_qid **); + + *wqids = NULL; + + errcode = + p9pdu_readf(pdu, proto_version, "w", nwqid); + if (!errcode) { + *wqids = + kmalloc(*nwqid * + sizeof(struct p9_qid), + GFP_NOFS); + if (*wqids == NULL) + errcode = -ENOMEM; + } + + if (!errcode) { + int i; + + for (i = 0; i < *nwqid; i++) { + errcode = + p9pdu_readf(pdu, + proto_version, + "Q", + &(*wqids)[i]); + if (errcode) + break; + } + } + + if (errcode) { + kfree(*wqids); + *wqids = NULL; + } + } + break; + case 'A': { + struct p9_stat_dotl *stbuf = + va_arg(ap, struct p9_stat_dotl *); + + memset(stbuf, 0, sizeof(struct p9_stat_dotl)); + errcode = + p9pdu_readf(pdu, proto_version, + "qQdugqqqqqqqqqqqqqqq", + &stbuf->st_result_mask, + &stbuf->qid, + &stbuf->st_mode, + &stbuf->st_uid, &stbuf->st_gid, + &stbuf->st_nlink, + &stbuf->st_rdev, &stbuf->st_size, + &stbuf->st_blksize, &stbuf->st_blocks, + &stbuf->st_atime_sec, + &stbuf->st_atime_nsec, + &stbuf->st_mtime_sec, + &stbuf->st_mtime_nsec, + &stbuf->st_ctime_sec, + &stbuf->st_ctime_nsec, + &stbuf->st_btime_sec, + &stbuf->st_btime_nsec, + &stbuf->st_gen, + &stbuf->st_data_version); + } + break; + case '?': + if ((proto_version != p9_proto_2000u) && + (proto_version != p9_proto_2000L)) + return 0; + break; + default: + BUG(); + break; + } + + if (errcode) + break; + } + + return errcode; +} + +int +p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, + va_list ap) +{ + const char *ptr; + int errcode = 0; + + for (ptr = fmt; *ptr; ptr++) { + switch (*ptr) { + case 'b':{ + int8_t val = va_arg(ap, int); + if (pdu_write(pdu, &val, sizeof(val))) + errcode = -EFAULT; + } + break; + case 'w':{ + __le16 val = cpu_to_le16(va_arg(ap, int)); + if (pdu_write(pdu, &val, sizeof(val))) + errcode = -EFAULT; + } + break; + case 'd':{ + __le32 val = cpu_to_le32(va_arg(ap, int32_t)); + if (pdu_write(pdu, &val, sizeof(val))) + errcode = -EFAULT; + } + break; + case 'q':{ + __le64 val = cpu_to_le64(va_arg(ap, int64_t)); + if (pdu_write(pdu, &val, sizeof(val))) + errcode = -EFAULT; + } + break; + case 's':{ + const char *sptr = va_arg(ap, const char *); + uint16_t len = 0; + if (sptr) + len = min_t(size_t, strlen(sptr), + USHRT_MAX); + + errcode = p9pdu_writef(pdu, proto_version, + "w", len); + if (!errcode && pdu_write(pdu, sptr, len)) + errcode = -EFAULT; + } + break; + case 'u': { + kuid_t uid = va_arg(ap, kuid_t); + __le32 val = cpu_to_le32( + from_kuid(&init_user_ns, uid)); + if (pdu_write(pdu, &val, sizeof(val))) + errcode = -EFAULT; + } break; + case 'g': { + kgid_t gid = va_arg(ap, kgid_t); + __le32 val = cpu_to_le32( + from_kgid(&init_user_ns, gid)); + if (pdu_write(pdu, &val, sizeof(val))) + errcode = -EFAULT; + } break; + case 'Q':{ + const struct p9_qid *qid = + va_arg(ap, const struct p9_qid *); + errcode = + p9pdu_writef(pdu, proto_version, "bdq", + qid->type, qid->version, + qid->path); + } break; + case 'S':{ + const struct p9_wstat *stbuf = + va_arg(ap, const struct p9_wstat *); + errcode = + p9pdu_writef(pdu, proto_version, + "wwdQdddqssss?sugu", + stbuf->size, stbuf->type, + stbuf->dev, &stbuf->qid, + stbuf->mode, stbuf->atime, + stbuf->mtime, stbuf->length, + stbuf->name, stbuf->uid, + stbuf->gid, stbuf->muid, + stbuf->extension, stbuf->n_uid, + stbuf->n_gid, stbuf->n_muid); + } break; + case 'V':{ + uint32_t count = va_arg(ap, uint32_t); + struct iov_iter *from = + va_arg(ap, struct iov_iter *); + errcode = p9pdu_writef(pdu, proto_version, "d", + count); + if (!errcode && pdu_write_u(pdu, from, count)) + errcode = -EFAULT; + } + break; + case 'T':{ + uint16_t nwname = va_arg(ap, int); + const char **wnames = va_arg(ap, const char **); + + errcode = p9pdu_writef(pdu, proto_version, "w", + nwname); + if (!errcode) { + int i; + + for (i = 0; i < nwname; i++) { + errcode = + p9pdu_writef(pdu, + proto_version, + "s", + wnames[i]); + if (errcode) + break; + } + } + } + break; + case 'R':{ + uint16_t nwqid = va_arg(ap, int); + struct p9_qid *wqids = + va_arg(ap, struct p9_qid *); + + errcode = p9pdu_writef(pdu, proto_version, "w", + nwqid); + if (!errcode) { + int i; + + for (i = 0; i < nwqid; i++) { + errcode = + p9pdu_writef(pdu, + proto_version, + "Q", + &wqids[i]); + if (errcode) + break; + } + } + } + break; + case 'I':{ + struct p9_iattr_dotl *p9attr = va_arg(ap, + struct p9_iattr_dotl *); + + errcode = p9pdu_writef(pdu, proto_version, + "ddugqqqqq", + p9attr->valid, + p9attr->mode, + p9attr->uid, + p9attr->gid, + p9attr->size, + p9attr->atime_sec, + p9attr->atime_nsec, + p9attr->mtime_sec, + p9attr->mtime_nsec); + } + break; + case '?': + if ((proto_version != p9_proto_2000u) && + (proto_version != p9_proto_2000L)) + return 0; + break; + default: + BUG(); + break; + } + + if (errcode) + break; + } + + return errcode; +} + +int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = p9pdu_vreadf(pdu, proto_version, fmt, ap); + va_end(ap); + + return ret; +} + +static int +p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...) +{ + va_list ap; + int ret; + + va_start(ap, fmt); + ret = p9pdu_vwritef(pdu, proto_version, fmt, ap); + va_end(ap); + + return ret; +} + +int p9stat_read(struct p9_client *clnt, char *buf, int len, struct p9_wstat *st) +{ + struct p9_fcall fake_pdu; + int ret; + + fake_pdu.size = len; + fake_pdu.capacity = len; + fake_pdu.sdata = buf; + fake_pdu.offset = 0; + + ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "S", st); + if (ret) { + p9_debug(P9_DEBUG_9P, "<<< p9stat_read failed: %d\n", ret); + trace_9p_protocol_dump(clnt, &fake_pdu); + return ret; + } + + return fake_pdu.offset; +} +EXPORT_SYMBOL(p9stat_read); + +int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type) +{ + pdu->id = type; + return p9pdu_writef(pdu, 0, "dbw", 0, type, tag); +} + +int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu) +{ + int size = pdu->size; + int err; + + pdu->size = 0; + err = p9pdu_writef(pdu, 0, "d", size); + pdu->size = size; + + trace_9p_protocol_dump(clnt, pdu); + p9_debug(P9_DEBUG_9P, ">>> size=%d type: %d tag: %d\n", + pdu->size, pdu->id, pdu->tag); + + return err; +} + +void p9pdu_reset(struct p9_fcall *pdu) +{ + pdu->offset = 0; + pdu->size = 0; +} + +int p9dirent_read(struct p9_client *clnt, char *buf, int len, + struct p9_dirent *dirent) +{ + struct p9_fcall fake_pdu; + int ret; + char *nameptr; + + fake_pdu.size = len; + fake_pdu.capacity = len; + fake_pdu.sdata = buf; + fake_pdu.offset = 0; + + ret = p9pdu_readf(&fake_pdu, clnt->proto_version, "Qqbs", &dirent->qid, + &dirent->d_off, &dirent->d_type, &nameptr); + if (ret) { + p9_debug(P9_DEBUG_9P, "<<< p9dirent_read failed: %d\n", ret); + trace_9p_protocol_dump(clnt, &fake_pdu); + goto out; + } + + strcpy(dirent->d_name, nameptr); + kfree(nameptr); + +out: + return fake_pdu.offset; +} +EXPORT_SYMBOL(p9dirent_read); diff --git a/addons/9p/src/4.4.180/protocol.h b/addons/9p/src/4.4.180/protocol.h new file mode 100644 index 00000000..2cc525fa --- /dev/null +++ b/addons/9p/src/4.4.180/protocol.h @@ -0,0 +1,34 @@ +/* + * net/9p/protocol.h + * + * 9P Protocol Support Code + * + * Copyright (C) 2008 by Eric Van Hensbergen + * + * Base on code from Anthony Liguori + * Copyright (C) 2008 by IBM, Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +int p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt, + va_list ap); +int p9pdu_readf(struct p9_fcall *pdu, int proto_version, const char *fmt, ...); +int p9pdu_prepare(struct p9_fcall *pdu, int16_t tag, int8_t type); +int p9pdu_finalize(struct p9_client *clnt, struct p9_fcall *pdu); +void p9pdu_reset(struct p9_fcall *pdu); +size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size); diff --git a/addons/9p/src/4.4.180/trans_common.c b/addons/9p/src/4.4.180/trans_common.c new file mode 100644 index 00000000..38aa6345 --- /dev/null +++ b/addons/9p/src/4.4.180/trans_common.c @@ -0,0 +1,29 @@ +/* + * Copyright IBM Corporation, 2010 + * Author Venkateswararao Jujjuri + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +#include +#include + +/** + * p9_release_req_pages - Release pages after the transaction. + */ +void p9_release_pages(struct page **pages, int nr_pages) +{ + int i; + + for (i = 0; i < nr_pages; i++) + if (pages[i]) + put_page(pages[i]); +} +EXPORT_SYMBOL(p9_release_pages); diff --git a/addons/9p/src/4.4.180/trans_common.h b/addons/9p/src/4.4.180/trans_common.h new file mode 100644 index 00000000..c43babb3 --- /dev/null +++ b/addons/9p/src/4.4.180/trans_common.h @@ -0,0 +1,15 @@ +/* + * Copyright IBM Corporation, 2010 + * Author Venkateswararao Jujjuri + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +void p9_release_pages(struct page **, int); diff --git a/addons/9p/src/4.4.180/trans_fd.c b/addons/9p/src/4.4.180/trans_fd.c new file mode 100644 index 00000000..2f68ffda --- /dev/null +++ b/addons/9p/src/4.4.180/trans_fd.c @@ -0,0 +1,1122 @@ +/* + * linux/fs/9p/trans_fd.c + * + * Fd transport layer. Includes deprecated socket layer. + * + * Copyright (C) 2006 by Russ Cox + * Copyright (C) 2004-2005 by Latchesar Ionkov + * Copyright (C) 2004-2008 by Eric Van Hensbergen + * Copyright (C) 1997-2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include /* killme */ + +#define P9_PORT 564 +#define MAX_SOCK_BUF (64*1024) +#define MAXPOLLWADDR 2 + +/** + * struct p9_fd_opts - per-transport options + * @rfd: file descriptor for reading (trans=fd) + * @wfd: file descriptor for writing (trans=fd) + * @port: port to connect to (trans=tcp) + * + */ + +struct p9_fd_opts { + int rfd; + int wfd; + u16 port; + int privport; +}; + +/* + * Option Parsing (code inspired by NFS code) + * - a little lazy - parse all fd-transport options + */ + +enum { + /* Options that take integer arguments */ + Opt_port, Opt_rfdno, Opt_wfdno, Opt_err, + /* Options that take no arguments */ + Opt_privport, +}; + +static const match_table_t tokens = { + {Opt_port, "port=%u"}, + {Opt_rfdno, "rfdno=%u"}, + {Opt_wfdno, "wfdno=%u"}, + {Opt_privport, "privport"}, + {Opt_err, NULL}, +}; + +enum { + Rworksched = 1, /* read work scheduled or running */ + Rpending = 2, /* can read */ + Wworksched = 4, /* write work scheduled or running */ + Wpending = 8, /* can write */ +}; + +struct p9_poll_wait { + struct p9_conn *conn; + wait_queue_t wait; + wait_queue_head_t *wait_addr; +}; + +/** + * struct p9_conn - fd mux connection state information + * @mux_list: list link for mux to manage multiple connections (?) + * @client: reference to client instance for this connection + * @err: error state + * @req_list: accounting for requests which have been sent + * @unsent_req_list: accounting for requests that haven't been sent + * @req: current request being processed (if any) + * @tmp_buf: temporary buffer to read in header + * @rsize: amount to read for current frame + * @rpos: read position in current frame + * @rbuf: current read buffer + * @wpos: write position for current frame + * @wsize: amount of data to write for current frame + * @wbuf: current write buffer + * @poll_pending_link: pending links to be polled per conn + * @poll_wait: array of wait_q's for various worker threads + * @pt: poll state + * @rq: current read work + * @wq: current write work + * @wsched: ???? + * + */ + +struct p9_conn { + struct list_head mux_list; + struct p9_client *client; + int err; + struct list_head req_list; + struct list_head unsent_req_list; + struct p9_req_t *req; + char tmp_buf[7]; + int rsize; + int rpos; + char *rbuf; + int wpos; + int wsize; + char *wbuf; + struct list_head poll_pending_link; + struct p9_poll_wait poll_wait[MAXPOLLWADDR]; + poll_table pt; + struct work_struct rq; + struct work_struct wq; + unsigned long wsched; +}; + +/** + * struct p9_trans_fd - transport state + * @rd: reference to file to read from + * @wr: reference of file to write to + * @conn: connection state reference + * + */ + +struct p9_trans_fd { + struct file *rd; + struct file *wr; + struct p9_conn conn; +}; + +static void p9_poll_workfn(struct work_struct *work); + +static DEFINE_SPINLOCK(p9_poll_lock); +static LIST_HEAD(p9_poll_pending_list); +static DECLARE_WORK(p9_poll_work, p9_poll_workfn); + +static unsigned int p9_ipport_resv_min = P9_DEF_MIN_RESVPORT; +static unsigned int p9_ipport_resv_max = P9_DEF_MAX_RESVPORT; + +static void p9_mux_poll_stop(struct p9_conn *m) +{ + unsigned long flags; + int i; + + for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { + struct p9_poll_wait *pwait = &m->poll_wait[i]; + + if (pwait->wait_addr) { + remove_wait_queue(pwait->wait_addr, &pwait->wait); + pwait->wait_addr = NULL; + } + } + + spin_lock_irqsave(&p9_poll_lock, flags); + list_del_init(&m->poll_pending_link); + spin_unlock_irqrestore(&p9_poll_lock, flags); + + flush_work(&p9_poll_work); +} + +/** + * p9_conn_cancel - cancel all pending requests with error + * @m: mux data + * @err: error code + * + */ + +static void p9_conn_cancel(struct p9_conn *m, int err) +{ + struct p9_req_t *req, *rtmp; + unsigned long flags; + LIST_HEAD(cancel_list); + + p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); + + spin_lock_irqsave(&m->client->lock, flags); + + if (m->err) { + spin_unlock_irqrestore(&m->client->lock, flags); + return; + } + + m->err = err; + + list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { + list_move(&req->req_list, &cancel_list); + } + list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { + list_move(&req->req_list, &cancel_list); + } + spin_unlock_irqrestore(&m->client->lock, flags); + + list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { + p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req); + list_del(&req->req_list); + if (!req->t_err) + req->t_err = err; + p9_client_cb(m->client, req, REQ_STATUS_ERROR); + } +} + +static int +p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt) +{ + int ret, n; + struct p9_trans_fd *ts = NULL; + + if (client && client->status == Connected) + ts = client->trans; + + if (!ts) + return -EREMOTEIO; + + if (!ts->rd->f_op->poll) + return -EIO; + + if (!ts->wr->f_op->poll) + return -EIO; + + ret = ts->rd->f_op->poll(ts->rd, pt); + if (ret < 0) + return ret; + + if (ts->rd != ts->wr) { + n = ts->wr->f_op->poll(ts->wr, pt); + if (n < 0) + return n; + ret = (ret & ~POLLOUT) | (n & ~POLLIN); + } + + return ret; +} + +/** + * p9_fd_read- read from a fd + * @client: client instance + * @v: buffer to receive data into + * @len: size of receive buffer + * + */ + +static int p9_fd_read(struct p9_client *client, void *v, int len) +{ + int ret; + struct p9_trans_fd *ts = NULL; + + if (client && client->status != Disconnected) + ts = client->trans; + + if (!ts) + return -EREMOTEIO; + + if (!(ts->rd->f_flags & O_NONBLOCK)) + p9_debug(P9_DEBUG_ERROR, "blocking read ...\n"); + + ret = kernel_read(ts->rd, ts->rd->f_pos, v, len); + if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) + client->status = Disconnected; + return ret; +} + +/** + * p9_read_work - called when there is some data to be read from a transport + * @work: container of work to be done + * + */ + +static void p9_read_work(struct work_struct *work) +{ + int n, err; + struct p9_conn *m; + int status = REQ_STATUS_ERROR; + + m = container_of(work, struct p9_conn, rq); + + if (m->err < 0) + return; + + p9_debug(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos); + + if (!m->rbuf) { + m->rbuf = m->tmp_buf; + m->rpos = 0; + m->rsize = 7; /* start by reading header */ + } + + clear_bit(Rpending, &m->wsched); + p9_debug(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", + m, m->rpos, m->rsize, m->rsize-m->rpos); + err = p9_fd_read(m->client, m->rbuf + m->rpos, + m->rsize - m->rpos); + p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err); + if (err == -EAGAIN) { + goto end_clear; + } + + if (err <= 0) + goto error; + + m->rpos += err; + + if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */ + u16 tag; + p9_debug(P9_DEBUG_TRANS, "got new header\n"); + + n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */ + if (n >= m->client->msize) { + p9_debug(P9_DEBUG_ERROR, + "requested packet size too big: %d\n", n); + err = -EIO; + goto error; + } + + tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */ + p9_debug(P9_DEBUG_TRANS, + "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); + + m->req = p9_tag_lookup(m->client, tag); + if (!m->req || (m->req->status != REQ_STATUS_SENT)) { + p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", + tag); + err = -EIO; + goto error; + } + + if (m->req->rc == NULL) { + m->req->rc = kmalloc(sizeof(struct p9_fcall) + + m->client->msize, GFP_NOFS); + if (!m->req->rc) { + m->req = NULL; + err = -ENOMEM; + goto error; + } + } + m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall); + memcpy(m->rbuf, m->tmp_buf, m->rsize); + m->rsize = n; + } + + /* not an else because some packets (like clunk) have no payload */ + if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */ + p9_debug(P9_DEBUG_TRANS, "got new packet\n"); + spin_lock(&m->client->lock); + if (m->req->status != REQ_STATUS_ERROR) + status = REQ_STATUS_RCVD; + list_del(&m->req->req_list); + spin_unlock(&m->client->lock); + p9_client_cb(m->client, m->req, status); + m->rbuf = NULL; + m->rpos = 0; + m->rsize = 0; + m->req = NULL; + } + +end_clear: + clear_bit(Rworksched, &m->wsched); + + if (!list_empty(&m->req_list)) { + if (test_and_clear_bit(Rpending, &m->wsched)) + n = POLLIN; + else + n = p9_fd_poll(m->client, NULL); + + if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) { + p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); + schedule_work(&m->rq); + } + } + + return; +error: + p9_conn_cancel(m, err); + clear_bit(Rworksched, &m->wsched); +} + +/** + * p9_fd_write - write to a socket + * @client: client instance + * @v: buffer to send data from + * @len: size of send buffer + * + */ + +static int p9_fd_write(struct p9_client *client, void *v, int len) +{ + int ret; + mm_segment_t oldfs; + struct p9_trans_fd *ts = NULL; + + if (client && client->status != Disconnected) + ts = client->trans; + + if (!ts) + return -EREMOTEIO; + + if (!(ts->wr->f_flags & O_NONBLOCK)) + p9_debug(P9_DEBUG_ERROR, "blocking write ...\n"); + + oldfs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ + ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos); + set_fs(oldfs); + + if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) + client->status = Disconnected; + return ret; +} + +/** + * p9_write_work - called when a transport can send some data + * @work: container for work to be done + * + */ + +static void p9_write_work(struct work_struct *work) +{ + int n, err; + struct p9_conn *m; + struct p9_req_t *req; + + m = container_of(work, struct p9_conn, wq); + + if (m->err < 0) { + clear_bit(Wworksched, &m->wsched); + return; + } + + if (!m->wsize) { + spin_lock(&m->client->lock); + if (list_empty(&m->unsent_req_list)) { + clear_bit(Wworksched, &m->wsched); + spin_unlock(&m->client->lock); + return; + } + + req = list_entry(m->unsent_req_list.next, struct p9_req_t, + req_list); + req->status = REQ_STATUS_SENT; + p9_debug(P9_DEBUG_TRANS, "move req %p\n", req); + list_move_tail(&req->req_list, &m->req_list); + + m->wbuf = req->tc->sdata; + m->wsize = req->tc->size; + m->wpos = 0; + spin_unlock(&m->client->lock); + } + + p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", + m, m->wpos, m->wsize); + clear_bit(Wpending, &m->wsched); + err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos); + p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err); + if (err == -EAGAIN) + goto end_clear; + + + if (err < 0) + goto error; + else if (err == 0) { + err = -EREMOTEIO; + goto error; + } + + m->wpos += err; + if (m->wpos == m->wsize) + m->wpos = m->wsize = 0; + +end_clear: + clear_bit(Wworksched, &m->wsched); + + if (m->wsize || !list_empty(&m->unsent_req_list)) { + if (test_and_clear_bit(Wpending, &m->wsched)) + n = POLLOUT; + else + n = p9_fd_poll(m->client, NULL); + + if ((n & POLLOUT) && + !test_and_set_bit(Wworksched, &m->wsched)) { + p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); + schedule_work(&m->wq); + } + } + + return; + +error: + p9_conn_cancel(m, err); + clear_bit(Wworksched, &m->wsched); +} + +static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key) +{ + struct p9_poll_wait *pwait = + container_of(wait, struct p9_poll_wait, wait); + struct p9_conn *m = pwait->conn; + unsigned long flags; + + spin_lock_irqsave(&p9_poll_lock, flags); + if (list_empty(&m->poll_pending_link)) + list_add_tail(&m->poll_pending_link, &p9_poll_pending_list); + spin_unlock_irqrestore(&p9_poll_lock, flags); + + schedule_work(&p9_poll_work); + return 1; +} + +/** + * p9_pollwait - add poll task to the wait queue + * @filp: file pointer being polled + * @wait_address: wait_q to block on + * @p: poll state + * + * called by files poll operation to add v9fs-poll task to files wait queue + */ + +static void +p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p) +{ + struct p9_conn *m = container_of(p, struct p9_conn, pt); + struct p9_poll_wait *pwait = NULL; + int i; + + for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { + if (m->poll_wait[i].wait_addr == NULL) { + pwait = &m->poll_wait[i]; + break; + } + } + + if (!pwait) { + p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n"); + return; + } + + pwait->conn = m; + pwait->wait_addr = wait_address; + init_waitqueue_func_entry(&pwait->wait, p9_pollwake); + add_wait_queue(wait_address, &pwait->wait); +} + +/** + * p9_conn_create - initialize the per-session mux data + * @client: client instance + * + * Note: Creates the polling task if this is the first session. + */ + +static void p9_conn_create(struct p9_client *client) +{ + int n; + struct p9_trans_fd *ts = client->trans; + struct p9_conn *m = &ts->conn; + + p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize); + + INIT_LIST_HEAD(&m->mux_list); + m->client = client; + + INIT_LIST_HEAD(&m->req_list); + INIT_LIST_HEAD(&m->unsent_req_list); + INIT_WORK(&m->rq, p9_read_work); + INIT_WORK(&m->wq, p9_write_work); + INIT_LIST_HEAD(&m->poll_pending_link); + init_poll_funcptr(&m->pt, p9_pollwait); + + n = p9_fd_poll(client, &m->pt); + if (n & POLLIN) { + p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); + set_bit(Rpending, &m->wsched); + } + + if (n & POLLOUT) { + p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); + set_bit(Wpending, &m->wsched); + } +} + +/** + * p9_poll_mux - polls a mux and schedules read or write works if necessary + * @m: connection to poll + * + */ + +static void p9_poll_mux(struct p9_conn *m) +{ + int n; + + if (m->err < 0) + return; + + n = p9_fd_poll(m->client, NULL); + if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) { + p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n); + if (n >= 0) + n = -ECONNRESET; + p9_conn_cancel(m, n); + } + + if (n & POLLIN) { + set_bit(Rpending, &m->wsched); + p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m); + if (!test_and_set_bit(Rworksched, &m->wsched)) { + p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m); + schedule_work(&m->rq); + } + } + + if (n & POLLOUT) { + set_bit(Wpending, &m->wsched); + p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m); + if ((m->wsize || !list_empty(&m->unsent_req_list)) && + !test_and_set_bit(Wworksched, &m->wsched)) { + p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m); + schedule_work(&m->wq); + } + } +} + +/** + * p9_fd_request - send 9P request + * The function can sleep until the request is scheduled for sending. + * The function can be interrupted. Return from the function is not + * a guarantee that the request is sent successfully. + * + * @client: client instance + * @req: request to be sent + * + */ + +static int p9_fd_request(struct p9_client *client, struct p9_req_t *req) +{ + int n; + struct p9_trans_fd *ts = client->trans; + struct p9_conn *m = &ts->conn; + + p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", + m, current, req->tc, req->tc->id); + if (m->err < 0) + return m->err; + + spin_lock(&client->lock); + req->status = REQ_STATUS_UNSENT; + list_add_tail(&req->req_list, &m->unsent_req_list); + spin_unlock(&client->lock); + + if (test_and_clear_bit(Wpending, &m->wsched)) + n = POLLOUT; + else + n = p9_fd_poll(m->client, NULL); + + if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) + schedule_work(&m->wq); + + return 0; +} + +static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req) +{ + int ret = 1; + + p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); + + spin_lock(&client->lock); + + if (req->status == REQ_STATUS_UNSENT) { + list_del(&req->req_list); + req->status = REQ_STATUS_FLSHD; + ret = 0; + } + spin_unlock(&client->lock); + + return ret; +} + +static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req) +{ + p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req); + + /* we haven't received a response for oldreq, + * remove it from the list. + */ + spin_lock(&client->lock); + list_del(&req->req_list); + spin_unlock(&client->lock); + + return 0; +} + +/** + * parse_opts - parse mount options into p9_fd_opts structure + * @params: options string passed from mount + * @opts: fd transport-specific structure to parse options into + * + * Returns 0 upon success, -ERRNO upon failure + */ + +static int parse_opts(char *params, struct p9_fd_opts *opts) +{ + char *p; + substring_t args[MAX_OPT_ARGS]; + int option; + char *options, *tmp_options; + + opts->port = P9_PORT; + opts->rfd = ~0; + opts->wfd = ~0; + opts->privport = 0; + + if (!params) + return 0; + + tmp_options = kstrdup(params, GFP_KERNEL); + if (!tmp_options) { + p9_debug(P9_DEBUG_ERROR, + "failed to allocate copy of option string\n"); + return -ENOMEM; + } + options = tmp_options; + + while ((p = strsep(&options, ",")) != NULL) { + int token; + int r; + if (!*p) + continue; + token = match_token(p, tokens, args); + if ((token != Opt_err) && (token != Opt_privport)) { + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + continue; + } + } + switch (token) { + case Opt_port: + opts->port = option; + break; + case Opt_rfdno: + opts->rfd = option; + break; + case Opt_wfdno: + opts->wfd = option; + break; + case Opt_privport: + opts->privport = 1; + break; + default: + continue; + } + } + + kfree(tmp_options); + return 0; +} + +static int p9_fd_open(struct p9_client *client, int rfd, int wfd) +{ + struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd), + GFP_KERNEL); + if (!ts) + return -ENOMEM; + + ts->rd = fget(rfd); + ts->wr = fget(wfd); + if (!ts->rd || !ts->wr) { + if (ts->rd) + fput(ts->rd); + if (ts->wr) + fput(ts->wr); + kfree(ts); + return -EIO; + } + + client->trans = ts; + client->status = Connected; + + return 0; +} + +static int p9_socket_open(struct p9_client *client, struct socket *csocket) +{ + struct p9_trans_fd *p; + struct file *file; + + p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); + if (!p) + return -ENOMEM; + + csocket->sk->sk_allocation = GFP_NOIO; + file = sock_alloc_file(csocket, 0, NULL); + if (IS_ERR(file)) { + pr_err("%s (%d): failed to map fd\n", + __func__, task_pid_nr(current)); + sock_release(csocket); + kfree(p); + return PTR_ERR(file); + } + + get_file(file); + p->wr = p->rd = file; + client->trans = p; + client->status = Connected; + + p->rd->f_flags |= O_NONBLOCK; + + p9_conn_create(client); + return 0; +} + +/** + * p9_mux_destroy - cancels all pending requests of mux + * @m: mux to destroy + * + */ + +static void p9_conn_destroy(struct p9_conn *m) +{ + p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", + m, m->mux_list.prev, m->mux_list.next); + + p9_mux_poll_stop(m); + cancel_work_sync(&m->rq); + cancel_work_sync(&m->wq); + + p9_conn_cancel(m, -ECONNRESET); + + m->client = NULL; +} + +/** + * p9_fd_close - shutdown file descriptor transport + * @client: client instance + * + */ + +static void p9_fd_close(struct p9_client *client) +{ + struct p9_trans_fd *ts; + + if (!client) + return; + + ts = client->trans; + if (!ts) + return; + + client->status = Disconnected; + + p9_conn_destroy(&ts->conn); + + if (ts->rd) + fput(ts->rd); + if (ts->wr) + fput(ts->wr); + + kfree(ts); +} + +/* + * stolen from NFS - maybe should be made a generic function? + */ +static inline int valid_ipaddr4(const char *buf) +{ + int rc, count, in[4]; + + rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]); + if (rc != 4) + return -EINVAL; + for (count = 0; count < 4; count++) { + if (in[count] > 255) + return -EINVAL; + } + return 0; +} + +static int p9_bind_privport(struct socket *sock) +{ + struct sockaddr_in cl; + int port, err = -EINVAL; + + memset(&cl, 0, sizeof(cl)); + cl.sin_family = AF_INET; + cl.sin_addr.s_addr = INADDR_ANY; + for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) { + cl.sin_port = htons((ushort)port); + err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl)); + if (err != -EADDRINUSE) + break; + } + return err; +} + + +static int +p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) +{ + int err; + struct socket *csocket; + struct sockaddr_in sin_server; + struct p9_fd_opts opts; + + err = parse_opts(args, &opts); + if (err < 0) + return err; + + if (addr == NULL || valid_ipaddr4(addr) < 0) + return -EINVAL; + + csocket = NULL; + + sin_server.sin_family = AF_INET; + sin_server.sin_addr.s_addr = in_aton(addr); + sin_server.sin_port = htons(opts.port); + err = __sock_create(current->nsproxy->net_ns, PF_INET, + SOCK_STREAM, IPPROTO_TCP, &csocket, 1); + if (err) { + pr_err("%s (%d): problem creating socket\n", + __func__, task_pid_nr(current)); + return err; + } + + if (opts.privport) { + err = p9_bind_privport(csocket); + if (err < 0) { + pr_err("%s (%d): problem binding to privport\n", + __func__, task_pid_nr(current)); + sock_release(csocket); + return err; + } + } + + err = csocket->ops->connect(csocket, + (struct sockaddr *)&sin_server, + sizeof(struct sockaddr_in), 0); + if (err < 0) { + pr_err("%s (%d): problem connecting socket to %s\n", + __func__, task_pid_nr(current), addr); + sock_release(csocket); + return err; + } + + return p9_socket_open(client, csocket); +} + +static int +p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) +{ + int err; + struct socket *csocket; + struct sockaddr_un sun_server; + + csocket = NULL; + + if (addr == NULL) + return -EINVAL; + + if (strlen(addr) >= UNIX_PATH_MAX) { + pr_err("%s (%d): address too long: %s\n", + __func__, task_pid_nr(current), addr); + return -ENAMETOOLONG; + } + + sun_server.sun_family = PF_UNIX; + strcpy(sun_server.sun_path, addr); + err = __sock_create(current->nsproxy->net_ns, PF_UNIX, + SOCK_STREAM, 0, &csocket, 1); + if (err < 0) { + pr_err("%s (%d): problem creating socket\n", + __func__, task_pid_nr(current)); + + return err; + } + err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server, + sizeof(struct sockaddr_un) - 1, 0); + if (err < 0) { + pr_err("%s (%d): problem connecting socket: %s: %d\n", + __func__, task_pid_nr(current), addr, err); + sock_release(csocket); + return err; + } + + return p9_socket_open(client, csocket); +} + +static int +p9_fd_create(struct p9_client *client, const char *addr, char *args) +{ + int err; + struct p9_fd_opts opts; + + parse_opts(args, &opts); + + if (opts.rfd == ~0 || opts.wfd == ~0) { + pr_err("Insufficient options for proto=fd\n"); + return -ENOPROTOOPT; + } + + err = p9_fd_open(client, opts.rfd, opts.wfd); + if (err < 0) + return err; + + p9_conn_create(client); + + return 0; +} + +static struct p9_trans_module p9_tcp_trans = { + .name = "tcp", + .maxsize = MAX_SOCK_BUF, + .def = 0, + .create = p9_fd_create_tcp, + .close = p9_fd_close, + .request = p9_fd_request, + .cancel = p9_fd_cancel, + .cancelled = p9_fd_cancelled, + .owner = THIS_MODULE, +}; + +static struct p9_trans_module p9_unix_trans = { + .name = "unix", + .maxsize = MAX_SOCK_BUF, + .def = 0, + .create = p9_fd_create_unix, + .close = p9_fd_close, + .request = p9_fd_request, + .cancel = p9_fd_cancel, + .cancelled = p9_fd_cancelled, + .owner = THIS_MODULE, +}; + +static struct p9_trans_module p9_fd_trans = { + .name = "fd", + .maxsize = MAX_SOCK_BUF, + .def = 0, + .create = p9_fd_create, + .close = p9_fd_close, + .request = p9_fd_request, + .cancel = p9_fd_cancel, + .cancelled = p9_fd_cancelled, + .owner = THIS_MODULE, +}; + +/** + * p9_poll_proc - poll worker thread + * @a: thread state and arguments + * + * polls all v9fs transports for new events and queues the appropriate + * work to the work queue + * + */ + +static void p9_poll_workfn(struct work_struct *work) +{ + unsigned long flags; + + p9_debug(P9_DEBUG_TRANS, "start %p\n", current); + + spin_lock_irqsave(&p9_poll_lock, flags); + while (!list_empty(&p9_poll_pending_list)) { + struct p9_conn *conn = list_first_entry(&p9_poll_pending_list, + struct p9_conn, + poll_pending_link); + list_del_init(&conn->poll_pending_link); + spin_unlock_irqrestore(&p9_poll_lock, flags); + + p9_poll_mux(conn); + + spin_lock_irqsave(&p9_poll_lock, flags); + } + spin_unlock_irqrestore(&p9_poll_lock, flags); + + p9_debug(P9_DEBUG_TRANS, "finish\n"); +} + +int p9_trans_fd_init(void) +{ + v9fs_register_trans(&p9_tcp_trans); + v9fs_register_trans(&p9_unix_trans); + v9fs_register_trans(&p9_fd_trans); + + return 0; +} + +void p9_trans_fd_exit(void) +{ + flush_work(&p9_poll_work); + v9fs_unregister_trans(&p9_tcp_trans); + v9fs_unregister_trans(&p9_unix_trans); + v9fs_unregister_trans(&p9_fd_trans); +} diff --git a/addons/9p/src/4.4.180/trans_rdma.c b/addons/9p/src/4.4.180/trans_rdma.c new file mode 100644 index 00000000..f42550dd --- /dev/null +++ b/addons/9p/src/4.4.180/trans_rdma.c @@ -0,0 +1,784 @@ +/* + * linux/fs/9p/trans_rdma.c + * + * RDMA transport layer based on the trans_fd.c implementation. + * + * Copyright (C) 2008 by Tom Tucker + * Copyright (C) 2006 by Russ Cox + * Copyright (C) 2004-2005 by Latchesar Ionkov + * Copyright (C) 2004-2008 by Eric Van Hensbergen + * Copyright (C) 1997-2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define P9_PORT 5640 +#define P9_RDMA_SQ_DEPTH 32 +#define P9_RDMA_RQ_DEPTH 32 +#define P9_RDMA_SEND_SGE 4 +#define P9_RDMA_RECV_SGE 4 +#define P9_RDMA_IRD 0 +#define P9_RDMA_ORD 0 +#define P9_RDMA_TIMEOUT 30000 /* 30 seconds */ +#define P9_RDMA_MAXSIZE (1024*1024) /* 1MB */ + +/** + * struct p9_trans_rdma - RDMA transport instance + * + * @state: tracks the transport state machine for connection setup and tear down + * @cm_id: The RDMA CM ID + * @pd: Protection Domain pointer + * @qp: Queue Pair pointer + * @cq: Completion Queue pointer + * @dm_mr: DMA Memory Region pointer + * @lkey: The local access only memory region key + * @timeout: Number of uSecs to wait for connection management events + * @sq_depth: The depth of the Send Queue + * @sq_sem: Semaphore for the SQ + * @rq_depth: The depth of the Receive Queue. + * @rq_sem: Semaphore for the RQ + * @excess_rc : Amount of posted Receive Contexts without a pending request. + * See rdma_request() + * @addr: The remote peer's address + * @req_lock: Protects the active request list + * @cm_done: Completion event for connection management tracking + */ +struct p9_trans_rdma { + enum { + P9_RDMA_INIT, + P9_RDMA_ADDR_RESOLVED, + P9_RDMA_ROUTE_RESOLVED, + P9_RDMA_CONNECTED, + P9_RDMA_FLUSHING, + P9_RDMA_CLOSING, + P9_RDMA_CLOSED, + } state; + struct rdma_cm_id *cm_id; + struct ib_pd *pd; + struct ib_qp *qp; + struct ib_cq *cq; + long timeout; + int sq_depth; + struct semaphore sq_sem; + int rq_depth; + struct semaphore rq_sem; + atomic_t excess_rc; + struct sockaddr_in addr; + spinlock_t req_lock; + + struct completion cm_done; +}; + +/** + * p9_rdma_context - Keeps track of in-process WR + * + * @wc_op: The original WR op for when the CQE completes in error. + * @busa: Bus address to unmap when the WR completes + * @req: Keeps track of requests (send) + * @rc: Keepts track of replies (receive) + */ +struct p9_rdma_req; +struct p9_rdma_context { + enum ib_wc_opcode wc_op; + dma_addr_t busa; + union { + struct p9_req_t *req; + struct p9_fcall *rc; + }; +}; + +/** + * p9_rdma_opts - Collection of mount options + * @port: port of connection + * @sq_depth: The requested depth of the SQ. This really doesn't need + * to be any deeper than the number of threads used in the client + * @rq_depth: The depth of the RQ. Should be greater than or equal to SQ depth + * @timeout: Time to wait in msecs for CM events + */ +struct p9_rdma_opts { + short port; + int sq_depth; + int rq_depth; + long timeout; + int privport; +}; + +/* + * Option Parsing (code inspired by NFS code) + */ +enum { + /* Options that take integer arguments */ + Opt_port, Opt_rq_depth, Opt_sq_depth, Opt_timeout, + /* Options that take no argument */ + Opt_privport, + Opt_err, +}; + +static match_table_t tokens = { + {Opt_port, "port=%u"}, + {Opt_sq_depth, "sq=%u"}, + {Opt_rq_depth, "rq=%u"}, + {Opt_timeout, "timeout=%u"}, + {Opt_privport, "privport"}, + {Opt_err, NULL}, +}; + +/** + * parse_opts - parse mount options into rdma options structure + * @params: options string passed from mount + * @opts: rdma transport-specific structure to parse options into + * + * Returns 0 upon success, -ERRNO upon failure + */ +static int parse_opts(char *params, struct p9_rdma_opts *opts) +{ + char *p; + substring_t args[MAX_OPT_ARGS]; + int option; + char *options, *tmp_options; + + opts->port = P9_PORT; + opts->sq_depth = P9_RDMA_SQ_DEPTH; + opts->rq_depth = P9_RDMA_RQ_DEPTH; + opts->timeout = P9_RDMA_TIMEOUT; + opts->privport = 0; + + if (!params) + return 0; + + tmp_options = kstrdup(params, GFP_KERNEL); + if (!tmp_options) { + p9_debug(P9_DEBUG_ERROR, + "failed to allocate copy of option string\n"); + return -ENOMEM; + } + options = tmp_options; + + while ((p = strsep(&options, ",")) != NULL) { + int token; + int r; + if (!*p) + continue; + token = match_token(p, tokens, args); + if ((token != Opt_err) && (token != Opt_privport)) { + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + continue; + } + } + switch (token) { + case Opt_port: + opts->port = option; + break; + case Opt_sq_depth: + opts->sq_depth = option; + break; + case Opt_rq_depth: + opts->rq_depth = option; + break; + case Opt_timeout: + opts->timeout = option; + break; + case Opt_privport: + opts->privport = 1; + break; + default: + continue; + } + } + /* RQ must be at least as large as the SQ */ + opts->rq_depth = max(opts->rq_depth, opts->sq_depth); + kfree(tmp_options); + return 0; +} + +static int +p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event) +{ + struct p9_client *c = id->context; + struct p9_trans_rdma *rdma = c->trans; + switch (event->event) { + case RDMA_CM_EVENT_ADDR_RESOLVED: + BUG_ON(rdma->state != P9_RDMA_INIT); + rdma->state = P9_RDMA_ADDR_RESOLVED; + break; + + case RDMA_CM_EVENT_ROUTE_RESOLVED: + BUG_ON(rdma->state != P9_RDMA_ADDR_RESOLVED); + rdma->state = P9_RDMA_ROUTE_RESOLVED; + break; + + case RDMA_CM_EVENT_ESTABLISHED: + BUG_ON(rdma->state != P9_RDMA_ROUTE_RESOLVED); + rdma->state = P9_RDMA_CONNECTED; + break; + + case RDMA_CM_EVENT_DISCONNECTED: + if (rdma) + rdma->state = P9_RDMA_CLOSED; + if (c) + c->status = Disconnected; + break; + + case RDMA_CM_EVENT_TIMEWAIT_EXIT: + break; + + case RDMA_CM_EVENT_ADDR_CHANGE: + case RDMA_CM_EVENT_ROUTE_ERROR: + case RDMA_CM_EVENT_DEVICE_REMOVAL: + case RDMA_CM_EVENT_MULTICAST_JOIN: + case RDMA_CM_EVENT_MULTICAST_ERROR: + case RDMA_CM_EVENT_REJECTED: + case RDMA_CM_EVENT_CONNECT_REQUEST: + case RDMA_CM_EVENT_CONNECT_RESPONSE: + case RDMA_CM_EVENT_CONNECT_ERROR: + case RDMA_CM_EVENT_ADDR_ERROR: + case RDMA_CM_EVENT_UNREACHABLE: + c->status = Disconnected; + rdma_disconnect(rdma->cm_id); + break; + default: + BUG(); + } + complete(&rdma->cm_done); + return 0; +} + +static void +handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma, + struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len) +{ + struct p9_req_t *req; + int err = 0; + int16_t tag; + + req = NULL; + ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, + DMA_FROM_DEVICE); + + if (status != IB_WC_SUCCESS) + goto err_out; + + err = p9_parse_header(c->rc, NULL, NULL, &tag, 1); + if (err) + goto err_out; + + req = p9_tag_lookup(client, tag); + if (!req) + goto err_out; + + /* Check that we have not yet received a reply for this request. + */ + if (unlikely(req->rc)) { + pr_err("Duplicate reply for request %d", tag); + goto err_out; + } + + req->rc = c->rc; + p9_client_cb(client, req, REQ_STATUS_RCVD); + + return; + + err_out: + p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, status); + rdma->state = P9_RDMA_FLUSHING; + client->status = Disconnected; +} + +static void +handle_send(struct p9_client *client, struct p9_trans_rdma *rdma, + struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len) +{ + ib_dma_unmap_single(rdma->cm_id->device, + c->busa, c->req->tc->size, + DMA_TO_DEVICE); +} + +static void qp_event_handler(struct ib_event *event, void *context) +{ + p9_debug(P9_DEBUG_ERROR, "QP event %d context %p\n", + event->event, context); +} + +static void cq_comp_handler(struct ib_cq *cq, void *cq_context) +{ + struct p9_client *client = cq_context; + struct p9_trans_rdma *rdma = client->trans; + int ret; + struct ib_wc wc; + + ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); + while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) { + struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id; + + switch (c->wc_op) { + case IB_WC_RECV: + handle_recv(client, rdma, c, wc.status, wc.byte_len); + up(&rdma->rq_sem); + break; + + case IB_WC_SEND: + handle_send(client, rdma, c, wc.status, wc.byte_len); + up(&rdma->sq_sem); + break; + + default: + pr_err("unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n", + c->wc_op, wc.opcode, wc.status); + break; + } + kfree(c); + } +} + +static void cq_event_handler(struct ib_event *e, void *v) +{ + p9_debug(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v); +} + +static void rdma_destroy_trans(struct p9_trans_rdma *rdma) +{ + if (!rdma) + return; + + if (rdma->qp && !IS_ERR(rdma->qp)) + ib_destroy_qp(rdma->qp); + + if (rdma->pd && !IS_ERR(rdma->pd)) + ib_dealloc_pd(rdma->pd); + + if (rdma->cq && !IS_ERR(rdma->cq)) + ib_destroy_cq(rdma->cq); + + if (rdma->cm_id && !IS_ERR(rdma->cm_id)) + rdma_destroy_id(rdma->cm_id); + + kfree(rdma); +} + +static int +post_recv(struct p9_client *client, struct p9_rdma_context *c) +{ + struct p9_trans_rdma *rdma = client->trans; + struct ib_recv_wr wr, *bad_wr; + struct ib_sge sge; + + c->busa = ib_dma_map_single(rdma->cm_id->device, + c->rc->sdata, client->msize, + DMA_FROM_DEVICE); + if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) + goto error; + + sge.addr = c->busa; + sge.length = client->msize; + sge.lkey = rdma->pd->local_dma_lkey; + + wr.next = NULL; + c->wc_op = IB_WC_RECV; + wr.wr_id = (unsigned long) c; + wr.sg_list = &sge; + wr.num_sge = 1; + return ib_post_recv(rdma->qp, &wr, &bad_wr); + + error: + p9_debug(P9_DEBUG_ERROR, "EIO\n"); + return -EIO; +} + +static int rdma_request(struct p9_client *client, struct p9_req_t *req) +{ + struct p9_trans_rdma *rdma = client->trans; + struct ib_send_wr wr, *bad_wr; + struct ib_sge sge; + int err = 0; + unsigned long flags; + struct p9_rdma_context *c = NULL; + struct p9_rdma_context *rpl_context = NULL; + + /* When an error occurs between posting the recv and the send, + * there will be a receive context posted without a pending request. + * Since there is no way to "un-post" it, we remember it and skip + * post_recv() for the next request. + * So here, + * see if we are this `next request' and need to absorb an excess rc. + * If yes, then drop and free our own, and do not recv_post(). + **/ + if (unlikely(atomic_read(&rdma->excess_rc) > 0)) { + if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) { + /* Got one ! */ + kfree(req->rc); + req->rc = NULL; + goto dont_need_post_recv; + } else { + /* We raced and lost. */ + atomic_inc(&rdma->excess_rc); + } + } + + /* Allocate an fcall for the reply */ + rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS); + if (!rpl_context) { + err = -ENOMEM; + goto recv_error; + } + rpl_context->rc = req->rc; + + /* + * Post a receive buffer for this request. We need to ensure + * there is a reply buffer available for every outstanding + * request. A flushed request can result in no reply for an + * outstanding request, so we must keep a count to avoid + * overflowing the RQ. + */ + if (down_interruptible(&rdma->rq_sem)) { + err = -EINTR; + goto recv_error; + } + + err = post_recv(client, rpl_context); + if (err) { + p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n"); + goto recv_error; + } + /* remove posted receive buffer from request structure */ + req->rc = NULL; + +dont_need_post_recv: + /* Post the request */ + c = kmalloc(sizeof *c, GFP_NOFS); + if (!c) { + err = -ENOMEM; + goto send_error; + } + c->req = req; + + c->busa = ib_dma_map_single(rdma->cm_id->device, + c->req->tc->sdata, c->req->tc->size, + DMA_TO_DEVICE); + if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) { + err = -EIO; + goto send_error; + } + + sge.addr = c->busa; + sge.length = c->req->tc->size; + sge.lkey = rdma->pd->local_dma_lkey; + + wr.next = NULL; + c->wc_op = IB_WC_SEND; + wr.wr_id = (unsigned long) c; + wr.opcode = IB_WR_SEND; + wr.send_flags = IB_SEND_SIGNALED; + wr.sg_list = &sge; + wr.num_sge = 1; + + if (down_interruptible(&rdma->sq_sem)) { + err = -EINTR; + goto send_error; + } + + /* Mark request as `sent' *before* we actually send it, + * because doing if after could erase the REQ_STATUS_RCVD + * status in case of a very fast reply. + */ + req->status = REQ_STATUS_SENT; + err = ib_post_send(rdma->qp, &wr, &bad_wr); + if (err) + goto send_error; + + /* Success */ + return 0; + + /* Handle errors that happened during or while preparing the send: */ + send_error: + req->status = REQ_STATUS_ERROR; + kfree(c); + p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err); + + /* Ach. + * We did recv_post(), but not send. We have one recv_post in excess. + */ + atomic_inc(&rdma->excess_rc); + return err; + + /* Handle errors that happened during or while preparing post_recv(): */ + recv_error: + kfree(rpl_context); + spin_lock_irqsave(&rdma->req_lock, flags); + if (rdma->state < P9_RDMA_CLOSING) { + rdma->state = P9_RDMA_CLOSING; + spin_unlock_irqrestore(&rdma->req_lock, flags); + rdma_disconnect(rdma->cm_id); + } else + spin_unlock_irqrestore(&rdma->req_lock, flags); + return err; +} + +static void rdma_close(struct p9_client *client) +{ + struct p9_trans_rdma *rdma; + + if (!client) + return; + + rdma = client->trans; + if (!rdma) + return; + + client->status = Disconnected; + rdma_disconnect(rdma->cm_id); + rdma_destroy_trans(rdma); +} + +/** + * alloc_rdma - Allocate and initialize the rdma transport structure + * @opts: Mount options structure + */ +static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts) +{ + struct p9_trans_rdma *rdma; + + rdma = kzalloc(sizeof(struct p9_trans_rdma), GFP_KERNEL); + if (!rdma) + return NULL; + + rdma->sq_depth = opts->sq_depth; + rdma->rq_depth = opts->rq_depth; + rdma->timeout = opts->timeout; + spin_lock_init(&rdma->req_lock); + init_completion(&rdma->cm_done); + sema_init(&rdma->sq_sem, rdma->sq_depth); + sema_init(&rdma->rq_sem, rdma->rq_depth); + atomic_set(&rdma->excess_rc, 0); + + return rdma; +} + +static int rdma_cancel(struct p9_client *client, struct p9_req_t *req) +{ + /* Nothing to do here. + * We will take care of it (if we have to) in rdma_cancelled() + */ + return 1; +} + +/* A request has been fully flushed without a reply. + * That means we have posted one buffer in excess. + */ +static int rdma_cancelled(struct p9_client *client, struct p9_req_t *req) +{ + struct p9_trans_rdma *rdma = client->trans; + atomic_inc(&rdma->excess_rc); + return 0; +} + +static int p9_rdma_bind_privport(struct p9_trans_rdma *rdma) +{ + struct sockaddr_in cl = { + .sin_family = AF_INET, + .sin_addr.s_addr = htonl(INADDR_ANY), + }; + int port, err = -EINVAL; + + for (port = P9_DEF_MAX_RESVPORT; port >= P9_DEF_MIN_RESVPORT; port--) { + cl.sin_port = htons((ushort)port); + err = rdma_bind_addr(rdma->cm_id, (struct sockaddr *)&cl); + if (err != -EADDRINUSE) + break; + } + return err; +} + +/** + * trans_create_rdma - Transport method for creating atransport instance + * @client: client instance + * @addr: IP address string + * @args: Mount options string + */ +static int +rdma_create_trans(struct p9_client *client, const char *addr, char *args) +{ + int err; + struct p9_rdma_opts opts; + struct p9_trans_rdma *rdma; + struct rdma_conn_param conn_param; + struct ib_qp_init_attr qp_attr; + struct ib_cq_init_attr cq_attr = {}; + + if (addr == NULL) + return -EINVAL; + + /* Parse the transport specific mount options */ + err = parse_opts(args, &opts); + if (err < 0) + return err; + + /* Create and initialize the RDMA transport structure */ + rdma = alloc_rdma(&opts); + if (!rdma) + return -ENOMEM; + + /* Create the RDMA CM ID */ + rdma->cm_id = rdma_create_id(&init_net, p9_cm_event_handler, client, + RDMA_PS_TCP, IB_QPT_RC); + if (IS_ERR(rdma->cm_id)) + goto error; + + /* Associate the client with the transport */ + client->trans = rdma; + + /* Bind to a privileged port if we need to */ + if (opts.privport) { + err = p9_rdma_bind_privport(rdma); + if (err < 0) { + pr_err("%s (%d): problem binding to privport: %d\n", + __func__, task_pid_nr(current), -err); + goto error; + } + } + + /* Resolve the server's address */ + rdma->addr.sin_family = AF_INET; + rdma->addr.sin_addr.s_addr = in_aton(addr); + rdma->addr.sin_port = htons(opts.port); + err = rdma_resolve_addr(rdma->cm_id, NULL, + (struct sockaddr *)&rdma->addr, + rdma->timeout); + if (err) + goto error; + err = wait_for_completion_interruptible(&rdma->cm_done); + if (err || (rdma->state != P9_RDMA_ADDR_RESOLVED)) + goto error; + + /* Resolve the route to the server */ + err = rdma_resolve_route(rdma->cm_id, rdma->timeout); + if (err) + goto error; + err = wait_for_completion_interruptible(&rdma->cm_done); + if (err || (rdma->state != P9_RDMA_ROUTE_RESOLVED)) + goto error; + + /* Create the Completion Queue */ + cq_attr.cqe = opts.sq_depth + opts.rq_depth + 1; + rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler, + cq_event_handler, client, + &cq_attr); + if (IS_ERR(rdma->cq)) + goto error; + ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP); + + /* Create the Protection Domain */ + rdma->pd = ib_alloc_pd(rdma->cm_id->device); + if (IS_ERR(rdma->pd)) + goto error; + + /* Create the Queue Pair */ + memset(&qp_attr, 0, sizeof qp_attr); + qp_attr.event_handler = qp_event_handler; + qp_attr.qp_context = client; + qp_attr.cap.max_send_wr = opts.sq_depth; + qp_attr.cap.max_recv_wr = opts.rq_depth; + qp_attr.cap.max_send_sge = P9_RDMA_SEND_SGE; + qp_attr.cap.max_recv_sge = P9_RDMA_RECV_SGE; + qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR; + qp_attr.qp_type = IB_QPT_RC; + qp_attr.send_cq = rdma->cq; + qp_attr.recv_cq = rdma->cq; + err = rdma_create_qp(rdma->cm_id, rdma->pd, &qp_attr); + if (err) + goto error; + rdma->qp = rdma->cm_id->qp; + + /* Request a connection */ + memset(&conn_param, 0, sizeof(conn_param)); + conn_param.private_data = NULL; + conn_param.private_data_len = 0; + conn_param.responder_resources = P9_RDMA_IRD; + conn_param.initiator_depth = P9_RDMA_ORD; + err = rdma_connect(rdma->cm_id, &conn_param); + if (err) + goto error; + err = wait_for_completion_interruptible(&rdma->cm_done); + if (err || (rdma->state != P9_RDMA_CONNECTED)) + goto error; + + client->status = Connected; + + return 0; + +error: + rdma_destroy_trans(rdma); + return -ENOTCONN; +} + +static struct p9_trans_module p9_rdma_trans = { + .name = "rdma", + .maxsize = P9_RDMA_MAXSIZE, + .def = 0, + .owner = THIS_MODULE, + .create = rdma_create_trans, + .close = rdma_close, + .request = rdma_request, + .cancel = rdma_cancel, + .cancelled = rdma_cancelled, +}; + +/** + * p9_trans_rdma_init - Register the 9P RDMA transport driver + */ +static int __init p9_trans_rdma_init(void) +{ + v9fs_register_trans(&p9_rdma_trans); + return 0; +} + +static void __exit p9_trans_rdma_exit(void) +{ + v9fs_unregister_trans(&p9_rdma_trans); +} + +module_init(p9_trans_rdma_init); +module_exit(p9_trans_rdma_exit); + +MODULE_AUTHOR("Tom Tucker "); +MODULE_DESCRIPTION("RDMA Transport for 9P"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/addons/9p/src/4.4.180/trans_virtio.c b/addons/9p/src/4.4.180/trans_virtio.c new file mode 100644 index 00000000..2a15b6aa --- /dev/null +++ b/addons/9p/src/4.4.180/trans_virtio.c @@ -0,0 +1,788 @@ +/* + * The Virtio 9p transport driver + * + * This is a block based transport driver based on the lguest block driver + * code. + * + * Copyright (C) 2007, 2008 Eric Van Hensbergen, IBM Corporation + * + * Based on virtio console driver + * Copyright (C) 2006, 2007 Rusty Russell, IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "trans_common.h" + +#define VIRTQUEUE_NUM 128 + +/* a single mutex to manage channel initialization and attachment */ +static DEFINE_MUTEX(virtio_9p_lock); +static DECLARE_WAIT_QUEUE_HEAD(vp_wq); +static atomic_t vp_pinned = ATOMIC_INIT(0); + +/** + * struct virtio_chan - per-instance transport information + * @initialized: whether the channel is initialized + * @inuse: whether the channel is in use + * @lock: protects multiple elements within this structure + * @client: client instance + * @vdev: virtio dev associated with this channel + * @vq: virtio queue associated with this channel + * @sg: scatter gather list which is used to pack a request (protected?) + * + * We keep all per-channel information in a structure. + * This structure is allocated within the devices dev->mem space. + * A pointer to the structure will get put in the transport private. + * + */ + +struct virtio_chan { + bool inuse; + + spinlock_t lock; + + struct p9_client *client; + struct virtio_device *vdev; + struct virtqueue *vq; + int ring_bufs_avail; + wait_queue_head_t *vc_wq; + /* This is global limit. Since we don't have a global structure, + * will be placing it in each channel. + */ + unsigned long p9_max_pages; + /* Scatterlist: can be too big for stack. */ + struct scatterlist sg[VIRTQUEUE_NUM]; + + int tag_len; + /* + * tag name to identify a mount Non-null terminated + */ + char *tag; + + struct list_head chan_list; +}; + +static struct list_head virtio_chan_list; + +/* How many bytes left in this page. */ +static unsigned int rest_of_page(void *data) +{ + return PAGE_SIZE - ((unsigned long)data % PAGE_SIZE); +} + +/** + * p9_virtio_close - reclaim resources of a channel + * @client: client instance + * + * This reclaims a channel by freeing its resources and + * reseting its inuse flag. + * + */ + +static void p9_virtio_close(struct p9_client *client) +{ + struct virtio_chan *chan = client->trans; + + mutex_lock(&virtio_9p_lock); + if (chan) + chan->inuse = false; + mutex_unlock(&virtio_9p_lock); +} + +/** + * req_done - callback which signals activity from the server + * @vq: virtio queue activity was received on + * + * This notifies us that the server has triggered some activity + * on the virtio channel - most likely a response to request we + * sent. Figure out which requests now have responses and wake up + * those threads. + * + * Bugs: could do with some additional sanity checking, but appears to work. + * + */ + +static void req_done(struct virtqueue *vq) +{ + struct virtio_chan *chan = vq->vdev->priv; + struct p9_fcall *rc; + unsigned int len; + struct p9_req_t *req; + unsigned long flags; + + p9_debug(P9_DEBUG_TRANS, ": request done\n"); + + while (1) { + spin_lock_irqsave(&chan->lock, flags); + rc = virtqueue_get_buf(chan->vq, &len); + if (rc == NULL) { + spin_unlock_irqrestore(&chan->lock, flags); + break; + } + chan->ring_bufs_avail = 1; + spin_unlock_irqrestore(&chan->lock, flags); + /* Wakeup if anyone waiting for VirtIO ring space. */ + wake_up(chan->vc_wq); + p9_debug(P9_DEBUG_TRANS, ": rc %p\n", rc); + p9_debug(P9_DEBUG_TRANS, ": lookup tag %d\n", rc->tag); + req = p9_tag_lookup(chan->client, rc->tag); + p9_client_cb(chan->client, req, REQ_STATUS_RCVD); + } +} + +/** + * pack_sg_list - pack a scatter gather list from a linear buffer + * @sg: scatter/gather list to pack into + * @start: which segment of the sg_list to start at + * @limit: maximum segment to pack data to + * @data: data to pack into scatter/gather list + * @count: amount of data to pack into the scatter/gather list + * + * sg_lists have multiple segments of various sizes. This will pack + * arbitrary data into an existing scatter gather list, segmenting the + * data as necessary within constraints. + * + */ + +static int pack_sg_list(struct scatterlist *sg, int start, + int limit, char *data, int count) +{ + int s; + int index = start; + + while (count) { + s = rest_of_page(data); + if (s > count) + s = count; + BUG_ON(index >= limit); + /* Make sure we don't terminate early. */ + sg_unmark_end(&sg[index]); + sg_set_buf(&sg[index++], data, s); + count -= s; + data += s; + } + if (index-start) + sg_mark_end(&sg[index - 1]); + return index-start; +} + +/* We don't currently allow canceling of virtio requests */ +static int p9_virtio_cancel(struct p9_client *client, struct p9_req_t *req) +{ + return 1; +} + +/** + * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer, + * this takes a list of pages. + * @sg: scatter/gather list to pack into + * @start: which segment of the sg_list to start at + * @pdata: a list of pages to add into sg. + * @nr_pages: number of pages to pack into the scatter/gather list + * @offs: amount of data in the beginning of first page _not_ to pack + * @count: amount of data to pack into the scatter/gather list + */ +static int +pack_sg_list_p(struct scatterlist *sg, int start, int limit, + struct page **pdata, int nr_pages, size_t offs, int count) +{ + int i = 0, s; + int data_off = offs; + int index = start; + + BUG_ON(nr_pages > (limit - start)); + /* + * if the first page doesn't start at + * page boundary find the offset + */ + while (nr_pages) { + s = PAGE_SIZE - data_off; + if (s > count) + s = count; + BUG_ON(index >= limit); + /* Make sure we don't terminate early. */ + sg_unmark_end(&sg[index]); + sg_set_page(&sg[index++], pdata[i++], s, data_off); + data_off = 0; + count -= s; + nr_pages--; + } + + if (index-start) + sg_mark_end(&sg[index - 1]); + return index - start; +} + +/** + * p9_virtio_request - issue a request + * @client: client instance issuing the request + * @req: request to be issued + * + */ + +static int +p9_virtio_request(struct p9_client *client, struct p9_req_t *req) +{ + int err; + int in, out, out_sgs, in_sgs; + unsigned long flags; + struct virtio_chan *chan = client->trans; + struct scatterlist *sgs[2]; + + p9_debug(P9_DEBUG_TRANS, "9p debug: virtio request\n"); + + req->status = REQ_STATUS_SENT; +req_retry: + spin_lock_irqsave(&chan->lock, flags); + + out_sgs = in_sgs = 0; + /* Handle out VirtIO ring buffers */ + out = pack_sg_list(chan->sg, 0, + VIRTQUEUE_NUM, req->tc->sdata, req->tc->size); + if (out) + sgs[out_sgs++] = chan->sg; + + in = pack_sg_list(chan->sg, out, + VIRTQUEUE_NUM, req->rc->sdata, req->rc->capacity); + if (in) + sgs[out_sgs + in_sgs++] = chan->sg + out; + + err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc, + GFP_ATOMIC); + if (err < 0) { + if (err == -ENOSPC) { + chan->ring_bufs_avail = 0; + spin_unlock_irqrestore(&chan->lock, flags); + err = wait_event_killable(*chan->vc_wq, + chan->ring_bufs_avail); + if (err == -ERESTARTSYS) + return err; + + p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n"); + goto req_retry; + } else { + spin_unlock_irqrestore(&chan->lock, flags); + p9_debug(P9_DEBUG_TRANS, + "virtio rpc add_sgs returned failure\n"); + return -EIO; + } + } + virtqueue_kick(chan->vq); + spin_unlock_irqrestore(&chan->lock, flags); + + p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); + return 0; +} + +static int p9_get_mapped_pages(struct virtio_chan *chan, + struct page ***pages, + struct iov_iter *data, + int count, + size_t *offs, + int *need_drop) +{ + int nr_pages; + int err; + + if (!iov_iter_count(data)) + return 0; + + if (!(data->type & ITER_KVEC)) { + int n; + /* + * We allow only p9_max_pages pinned. We wait for the + * Other zc request to finish here + */ + if (atomic_read(&vp_pinned) >= chan->p9_max_pages) { + err = wait_event_killable(vp_wq, + (atomic_read(&vp_pinned) < chan->p9_max_pages)); + if (err == -ERESTARTSYS) + return err; + } + n = iov_iter_get_pages_alloc(data, pages, count, offs); + if (n < 0) + return n; + *need_drop = 1; + nr_pages = DIV_ROUND_UP(n + *offs, PAGE_SIZE); + atomic_add(nr_pages, &vp_pinned); + return n; + } else { + /* kernel buffer, no need to pin pages */ + int index; + size_t len; + void *p; + + /* we'd already checked that it's non-empty */ + while (1) { + len = iov_iter_single_seg_count(data); + if (likely(len)) { + p = data->kvec->iov_base + data->iov_offset; + break; + } + iov_iter_advance(data, 0); + } + if (len > count) + len = count; + + nr_pages = DIV_ROUND_UP((unsigned long)p + len, PAGE_SIZE) - + (unsigned long)p / PAGE_SIZE; + + *pages = kmalloc(sizeof(struct page *) * nr_pages, GFP_NOFS); + if (!*pages) + return -ENOMEM; + + *need_drop = 0; + p -= (*offs = (unsigned long)p % PAGE_SIZE); + for (index = 0; index < nr_pages; index++) { + if (is_vmalloc_addr(p)) + (*pages)[index] = vmalloc_to_page(p); + else + (*pages)[index] = kmap_to_page(p); + p += PAGE_SIZE; + } + return len; + } +} + +/** + * p9_virtio_zc_request - issue a zero copy request + * @client: client instance issuing the request + * @req: request to be issued + * @uidata: user bffer that should be ued for zero copy read + * @uodata: user buffer that shoud be user for zero copy write + * @inlen: read buffer size + * @olen: write buffer size + * @hdrlen: reader header size, This is the size of response protocol data + * + */ +static int +p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req, + struct iov_iter *uidata, struct iov_iter *uodata, + int inlen, int outlen, int in_hdr_len) +{ + int in, out, err, out_sgs, in_sgs; + unsigned long flags; + int in_nr_pages = 0, out_nr_pages = 0; + struct page **in_pages = NULL, **out_pages = NULL; + struct virtio_chan *chan = client->trans; + struct scatterlist *sgs[4]; + size_t offs; + int need_drop = 0; + + p9_debug(P9_DEBUG_TRANS, "virtio request\n"); + + if (uodata) { + __le32 sz; + int n = p9_get_mapped_pages(chan, &out_pages, uodata, + outlen, &offs, &need_drop); + if (n < 0) + return n; + out_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE); + if (n != outlen) { + __le32 v = cpu_to_le32(n); + memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4); + outlen = n; + } + /* The size field of the message must include the length of the + * header and the length of the data. We didn't actually know + * the length of the data until this point so add it in now. + */ + sz = cpu_to_le32(req->tc->size + outlen); + memcpy(&req->tc->sdata[0], &sz, sizeof(sz)); + } else if (uidata) { + int n = p9_get_mapped_pages(chan, &in_pages, uidata, + inlen, &offs, &need_drop); + if (n < 0) + return n; + in_nr_pages = DIV_ROUND_UP(n + offs, PAGE_SIZE); + if (n != inlen) { + __le32 v = cpu_to_le32(n); + memcpy(&req->tc->sdata[req->tc->size - 4], &v, 4); + inlen = n; + } + } + req->status = REQ_STATUS_SENT; +req_retry_pinned: + spin_lock_irqsave(&chan->lock, flags); + + out_sgs = in_sgs = 0; + + /* out data */ + out = pack_sg_list(chan->sg, 0, + VIRTQUEUE_NUM, req->tc->sdata, req->tc->size); + + if (out) + sgs[out_sgs++] = chan->sg; + + if (out_pages) { + sgs[out_sgs++] = chan->sg + out; + out += pack_sg_list_p(chan->sg, out, VIRTQUEUE_NUM, + out_pages, out_nr_pages, offs, outlen); + } + + /* + * Take care of in data + * For example TREAD have 11. + * 11 is the read/write header = PDU Header(7) + IO Size (4). + * Arrange in such a way that server places header in the + * alloced memory and payload onto the user buffer. + */ + in = pack_sg_list(chan->sg, out, + VIRTQUEUE_NUM, req->rc->sdata, in_hdr_len); + if (in) + sgs[out_sgs + in_sgs++] = chan->sg + out; + + if (in_pages) { + sgs[out_sgs + in_sgs++] = chan->sg + out + in; + in += pack_sg_list_p(chan->sg, out + in, VIRTQUEUE_NUM, + in_pages, in_nr_pages, offs, inlen); + } + + BUG_ON(out_sgs + in_sgs > ARRAY_SIZE(sgs)); + err = virtqueue_add_sgs(chan->vq, sgs, out_sgs, in_sgs, req->tc, + GFP_ATOMIC); + if (err < 0) { + if (err == -ENOSPC) { + chan->ring_bufs_avail = 0; + spin_unlock_irqrestore(&chan->lock, flags); + err = wait_event_killable(*chan->vc_wq, + chan->ring_bufs_avail); + if (err == -ERESTARTSYS) + goto err_out; + + p9_debug(P9_DEBUG_TRANS, "Retry virtio request\n"); + goto req_retry_pinned; + } else { + spin_unlock_irqrestore(&chan->lock, flags); + p9_debug(P9_DEBUG_TRANS, + "virtio rpc add_sgs returned failure\n"); + err = -EIO; + goto err_out; + } + } + virtqueue_kick(chan->vq); + spin_unlock_irqrestore(&chan->lock, flags); + p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n"); + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD); + /* + * Non kernel buffers are pinned, unpin them + */ +err_out: + if (need_drop) { + if (in_pages) { + p9_release_pages(in_pages, in_nr_pages); + atomic_sub(in_nr_pages, &vp_pinned); + } + if (out_pages) { + p9_release_pages(out_pages, out_nr_pages); + atomic_sub(out_nr_pages, &vp_pinned); + } + /* wakeup anybody waiting for slots to pin pages */ + wake_up(&vp_wq); + } + kfree(in_pages); + kfree(out_pages); + return err; +} + +static ssize_t p9_mount_tag_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct virtio_chan *chan; + struct virtio_device *vdev; + + vdev = dev_to_virtio(dev); + chan = vdev->priv; + + memcpy(buf, chan->tag, chan->tag_len); + buf[chan->tag_len] = 0; + + return chan->tag_len + 1; +} + +static DEVICE_ATTR(mount_tag, 0444, p9_mount_tag_show, NULL); + +/** + * p9_virtio_probe - probe for existence of 9P virtio channels + * @vdev: virtio device to probe + * + * This probes for existing virtio channels. + * + */ + +static int p9_virtio_probe(struct virtio_device *vdev) +{ + __u16 tag_len; + char *tag; + int err; + struct virtio_chan *chan; + + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + + chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL); + if (!chan) { + pr_err("Failed to allocate virtio 9P channel\n"); + err = -ENOMEM; + goto fail; + } + + chan->vdev = vdev; + + /* We expect one virtqueue, for requests. */ + chan->vq = virtio_find_single_vq(vdev, req_done, "requests"); + if (IS_ERR(chan->vq)) { + err = PTR_ERR(chan->vq); + goto out_free_chan; + } + chan->vq->vdev->priv = chan; + spin_lock_init(&chan->lock); + + sg_init_table(chan->sg, VIRTQUEUE_NUM); + + chan->inuse = false; + if (virtio_has_feature(vdev, VIRTIO_9P_MOUNT_TAG)) { + virtio_cread(vdev, struct virtio_9p_config, tag_len, &tag_len); + } else { + err = -EINVAL; + goto out_free_vq; + } + tag = kmalloc(tag_len, GFP_KERNEL); + if (!tag) { + err = -ENOMEM; + goto out_free_vq; + } + + virtio_cread_bytes(vdev, offsetof(struct virtio_9p_config, tag), + tag, tag_len); + chan->tag = tag; + chan->tag_len = tag_len; + err = sysfs_create_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); + if (err) { + goto out_free_tag; + } + chan->vc_wq = kmalloc(sizeof(wait_queue_head_t), GFP_KERNEL); + if (!chan->vc_wq) { + err = -ENOMEM; + goto out_free_tag; + } + init_waitqueue_head(chan->vc_wq); + chan->ring_bufs_avail = 1; + /* Ceiling limit to avoid denial of service attacks */ + chan->p9_max_pages = nr_free_buffer_pages()/4; + + virtio_device_ready(vdev); + + mutex_lock(&virtio_9p_lock); + list_add_tail(&chan->chan_list, &virtio_chan_list); + mutex_unlock(&virtio_9p_lock); + + /* Let udev rules use the new mount_tag attribute. */ + kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); + + return 0; + +out_free_tag: + kfree(tag); +out_free_vq: + vdev->config->del_vqs(vdev); +out_free_chan: + kfree(chan); +fail: + return err; +} + + +/** + * p9_virtio_create - allocate a new virtio channel + * @client: client instance invoking this transport + * @devname: string identifying the channel to connect to (unused) + * @args: args passed from sys_mount() for per-transport options (unused) + * + * This sets up a transport channel for 9p communication. Right now + * we only match the first available channel, but eventually we couldlook up + * alternate channels by matching devname versus a virtio_config entry. + * We use a simple reference count mechanism to ensure that only a single + * mount has a channel open at a time. + * + */ + +static int +p9_virtio_create(struct p9_client *client, const char *devname, char *args) +{ + struct virtio_chan *chan; + int ret = -ENOENT; + int found = 0; + + if (devname == NULL) + return -EINVAL; + + mutex_lock(&virtio_9p_lock); + list_for_each_entry(chan, &virtio_chan_list, chan_list) { + if (!strncmp(devname, chan->tag, chan->tag_len) && + strlen(devname) == chan->tag_len) { + if (!chan->inuse) { + chan->inuse = true; + found = 1; + break; + } + ret = -EBUSY; + } + } + mutex_unlock(&virtio_9p_lock); + + if (!found) { + pr_err("no channels available\n"); + return ret; + } + + client->trans = (void *)chan; + client->status = Connected; + chan->client = client; + + return 0; +} + +/** + * p9_virtio_remove - clean up resources associated with a virtio device + * @vdev: virtio device to remove + * + */ + +static void p9_virtio_remove(struct virtio_device *vdev) +{ + struct virtio_chan *chan = vdev->priv; + unsigned long warning_time; + + mutex_lock(&virtio_9p_lock); + + /* Remove self from list so we don't get new users. */ + list_del(&chan->chan_list); + warning_time = jiffies; + + /* Wait for existing users to close. */ + while (chan->inuse) { + mutex_unlock(&virtio_9p_lock); + msleep(250); + if (time_after(jiffies, warning_time + 10 * HZ)) { + dev_emerg(&vdev->dev, + "p9_virtio_remove: waiting for device in use.\n"); + warning_time = jiffies; + } + mutex_lock(&virtio_9p_lock); + } + + mutex_unlock(&virtio_9p_lock); + + vdev->config->reset(vdev); + vdev->config->del_vqs(vdev); + + sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); + kobject_uevent(&(vdev->dev.kobj), KOBJ_CHANGE); + kfree(chan->tag); + kfree(chan->vc_wq); + kfree(chan); + +} + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_9P, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { + VIRTIO_9P_MOUNT_TAG, +}; + +/* The standard "struct lguest_driver": */ +static struct virtio_driver p9_virtio_drv = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = p9_virtio_probe, + .remove = p9_virtio_remove, +}; + +static struct p9_trans_module p9_virtio_trans = { + .name = "virtio", + .create = p9_virtio_create, + .close = p9_virtio_close, + .request = p9_virtio_request, + .zc_request = p9_virtio_zc_request, + .cancel = p9_virtio_cancel, + /* + * We leave one entry for input and one entry for response + * headers. We also skip one more entry to accomodate, address + * that are not at page boundary, that can result in an extra + * page in zero copy. + */ + .maxsize = PAGE_SIZE * (VIRTQUEUE_NUM - 3), + .def = 1, + .owner = THIS_MODULE, +}; + +/* The standard init function */ +static int __init p9_virtio_init(void) +{ + INIT_LIST_HEAD(&virtio_chan_list); + + v9fs_register_trans(&p9_virtio_trans); + return register_virtio_driver(&p9_virtio_drv); +} + +static void __exit p9_virtio_cleanup(void) +{ + unregister_virtio_driver(&p9_virtio_drv); + v9fs_unregister_trans(&p9_virtio_trans); +} + +module_init(p9_virtio_init); +module_exit(p9_virtio_cleanup); + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_AUTHOR("Eric Van Hensbergen "); +MODULE_DESCRIPTION("Virtio 9p Transport"); +MODULE_LICENSE("GPL"); diff --git a/addons/9p/src/4.4.180/util.c b/addons/9p/src/4.4.180/util.c new file mode 100644 index 00000000..59f278e6 --- /dev/null +++ b/addons/9p/src/4.4.180/util.c @@ -0,0 +1,141 @@ +/* + * net/9p/util.c + * + * This file contains some helper functions + * + * Copyright (C) 2007 by Latchesar Ionkov + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/** + * struct p9_idpool - per-connection accounting for tag idpool + * @lock: protects the pool + * @pool: idr to allocate tag id from + * + */ + +struct p9_idpool { + spinlock_t lock; + struct idr pool; +}; + +/** + * p9_idpool_create - create a new per-connection id pool + * + */ + +struct p9_idpool *p9_idpool_create(void) +{ + struct p9_idpool *p; + + p = kmalloc(sizeof(struct p9_idpool), GFP_KERNEL); + if (!p) + return ERR_PTR(-ENOMEM); + + spin_lock_init(&p->lock); + idr_init(&p->pool); + + return p; +} +EXPORT_SYMBOL(p9_idpool_create); + +/** + * p9_idpool_destroy - create a new per-connection id pool + * @p: idpool to destroy + */ + +void p9_idpool_destroy(struct p9_idpool *p) +{ + idr_destroy(&p->pool); + kfree(p); +} +EXPORT_SYMBOL(p9_idpool_destroy); + +/** + * p9_idpool_get - allocate numeric id from pool + * @p: pool to allocate from + * + * Bugs: This seems to be an awful generic function, should it be in idr.c with + * the lock included in struct idr? + */ + +int p9_idpool_get(struct p9_idpool *p) +{ + int i; + unsigned long flags; + + idr_preload(GFP_NOFS); + spin_lock_irqsave(&p->lock, flags); + + /* no need to store exactly p, we just need something non-null */ + i = idr_alloc(&p->pool, p, 0, 0, GFP_NOWAIT); + + spin_unlock_irqrestore(&p->lock, flags); + idr_preload_end(); + if (i < 0) + return -1; + + p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", i, p); + return i; +} +EXPORT_SYMBOL(p9_idpool_get); + +/** + * p9_idpool_put - release numeric id from pool + * @id: numeric id which is being released + * @p: pool to release id into + * + * Bugs: This seems to be an awful generic function, should it be in idr.c with + * the lock included in struct idr? + */ + +void p9_idpool_put(int id, struct p9_idpool *p) +{ + unsigned long flags; + + p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", id, p); + + spin_lock_irqsave(&p->lock, flags); + idr_remove(&p->pool, id); + spin_unlock_irqrestore(&p->lock, flags); +} +EXPORT_SYMBOL(p9_idpool_put); + +/** + * p9_idpool_check - check if the specified id is available + * @id: id to check + * @p: pool to check + */ + +int p9_idpool_check(int id, struct p9_idpool *p) +{ + return idr_find(&p->pool, id) != NULL; +} +EXPORT_SYMBOL(p9_idpool_check); + diff --git a/addons/9p/src/4.4.180/v9fs.c b/addons/9p/src/4.4.180/v9fs.c new file mode 100644 index 00000000..1e9bb8db --- /dev/null +++ b/addons/9p/src/4.4.180/v9fs.c @@ -0,0 +1,700 @@ +/* + * linux/fs/9p/v9fs.c + * + * This file contains functions assisting in mapping VFS to 9P2000 + * + * Copyright (C) 2004-2008 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "cache.h" + +static DEFINE_SPINLOCK(v9fs_sessionlist_lock); +static LIST_HEAD(v9fs_sessionlist); +struct kmem_cache *v9fs_inode_cache; + +/* + * Option Parsing (code inspired by NFS code) + * NOTE: each transport will parse its own options + */ + +enum { + /* Options that take integer arguments */ + Opt_debug, Opt_dfltuid, Opt_dfltgid, Opt_afid, + /* String options */ + Opt_uname, Opt_remotename, Opt_cache, Opt_cachetag, + /* Options that take no arguments */ + Opt_nodevmap, + /* Cache options */ + Opt_cache_loose, Opt_fscache, Opt_mmap, + /* Access options */ + Opt_access, Opt_posixacl, + /* Lock timeout option */ + Opt_locktimeout, + /* Error token */ + Opt_err +}; + +static const match_table_t tokens = { + {Opt_debug, "debug=%x"}, + {Opt_dfltuid, "dfltuid=%u"}, + {Opt_dfltgid, "dfltgid=%u"}, + {Opt_afid, "afid=%u"}, + {Opt_uname, "uname=%s"}, + {Opt_remotename, "aname=%s"}, + {Opt_nodevmap, "nodevmap"}, + {Opt_cache, "cache=%s"}, + {Opt_cache_loose, "loose"}, + {Opt_fscache, "fscache"}, + {Opt_mmap, "mmap"}, + {Opt_cachetag, "cachetag=%s"}, + {Opt_access, "access=%s"}, + {Opt_posixacl, "posixacl"}, + {Opt_locktimeout, "locktimeout=%u"}, + {Opt_err, NULL} +}; + +/* Interpret mount options for cache mode */ +static int get_cache_mode(char *s) +{ + int version = -EINVAL; + + if (!strcmp(s, "loose")) { + version = CACHE_LOOSE; + p9_debug(P9_DEBUG_9P, "Cache mode: loose\n"); + } else if (!strcmp(s, "fscache")) { + version = CACHE_FSCACHE; + p9_debug(P9_DEBUG_9P, "Cache mode: fscache\n"); + } else if (!strcmp(s, "mmap")) { + version = CACHE_MMAP; + p9_debug(P9_DEBUG_9P, "Cache mode: mmap\n"); + } else if (!strcmp(s, "none")) { + version = CACHE_NONE; + p9_debug(P9_DEBUG_9P, "Cache mode: none\n"); + } else + pr_info("Unknown Cache mode %s\n", s); + return version; +} + +/** + * v9fs_parse_options - parse mount options into session structure + * @v9ses: existing v9fs session information + * + * Return 0 upon success, -ERRNO upon failure. + */ + +static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts) +{ + char *options, *tmp_options; + substring_t args[MAX_OPT_ARGS]; + char *p; + int option = 0; + char *s, *e; + int ret = 0; + + /* setup defaults */ + v9ses->afid = ~0; + v9ses->debug = 0; + v9ses->cache = CACHE_NONE; +#ifdef CONFIG_9P_FSCACHE + v9ses->cachetag = NULL; +#endif + v9ses->session_lock_timeout = P9_LOCK_TIMEOUT; + + if (!opts) + return 0; + + tmp_options = kstrdup(opts, GFP_KERNEL); + if (!tmp_options) { + ret = -ENOMEM; + goto fail_option_alloc; + } + options = tmp_options; + + while ((p = strsep(&options, ",")) != NULL) { + int token, r; + if (!*p) + continue; + token = match_token(p, tokens, args); + switch (token) { + case Opt_debug: + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + ret = r; + continue; + } + v9ses->debug = option; +#ifdef CONFIG_NET_9P_DEBUG + p9_debug_level = option; +#endif + break; + + case Opt_dfltuid: + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + ret = r; + continue; + } + v9ses->dfltuid = make_kuid(current_user_ns(), option); + if (!uid_valid(v9ses->dfltuid)) { + p9_debug(P9_DEBUG_ERROR, + "uid field, but not a uid?\n"); + ret = -EINVAL; + continue; + } + break; + case Opt_dfltgid: + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + ret = r; + continue; + } + v9ses->dfltgid = make_kgid(current_user_ns(), option); + if (!gid_valid(v9ses->dfltgid)) { + p9_debug(P9_DEBUG_ERROR, + "gid field, but not a gid?\n"); + ret = -EINVAL; + continue; + } + break; + case Opt_afid: + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + ret = r; + continue; + } + v9ses->afid = option; + break; + case Opt_uname: + kfree(v9ses->uname); + v9ses->uname = match_strdup(&args[0]); + if (!v9ses->uname) { + ret = -ENOMEM; + goto free_and_return; + } + break; + case Opt_remotename: + kfree(v9ses->aname); + v9ses->aname = match_strdup(&args[0]); + if (!v9ses->aname) { + ret = -ENOMEM; + goto free_and_return; + } + break; + case Opt_nodevmap: + v9ses->nodev = 1; + break; + case Opt_cache_loose: + v9ses->cache = CACHE_LOOSE; + break; + case Opt_fscache: + v9ses->cache = CACHE_FSCACHE; + break; + case Opt_mmap: + v9ses->cache = CACHE_MMAP; + break; + case Opt_cachetag: +#ifdef CONFIG_9P_FSCACHE + v9ses->cachetag = match_strdup(&args[0]); +#endif + break; + case Opt_cache: + s = match_strdup(&args[0]); + if (!s) { + ret = -ENOMEM; + p9_debug(P9_DEBUG_ERROR, + "problem allocating copy of cache arg\n"); + goto free_and_return; + } + ret = get_cache_mode(s); + if (ret == -EINVAL) { + kfree(s); + goto free_and_return; + } + + v9ses->cache = ret; + kfree(s); + break; + + case Opt_access: + s = match_strdup(&args[0]); + if (!s) { + ret = -ENOMEM; + p9_debug(P9_DEBUG_ERROR, + "problem allocating copy of access arg\n"); + goto free_and_return; + } + + v9ses->flags &= ~V9FS_ACCESS_MASK; + if (strcmp(s, "user") == 0) + v9ses->flags |= V9FS_ACCESS_USER; + else if (strcmp(s, "any") == 0) + v9ses->flags |= V9FS_ACCESS_ANY; + else if (strcmp(s, "client") == 0) { + v9ses->flags |= V9FS_ACCESS_CLIENT; + } else { + uid_t uid; + v9ses->flags |= V9FS_ACCESS_SINGLE; + uid = simple_strtoul(s, &e, 10); + if (*e != '\0') { + ret = -EINVAL; + pr_info("Unknown access argument %s\n", + s); + kfree(s); + goto free_and_return; + } + v9ses->uid = make_kuid(current_user_ns(), uid); + if (!uid_valid(v9ses->uid)) { + ret = -EINVAL; + pr_info("Uknown uid %s\n", s); + kfree(s); + goto free_and_return; + } + } + + kfree(s); + break; + + case Opt_posixacl: +#ifdef CONFIG_9P_FS_POSIX_ACL + v9ses->flags |= V9FS_POSIX_ACL; +#else + p9_debug(P9_DEBUG_ERROR, + "Not defined CONFIG_9P_FS_POSIX_ACL. Ignoring posixacl option\n"); +#endif + break; + + case Opt_locktimeout: + r = match_int(&args[0], &option); + if (r < 0) { + p9_debug(P9_DEBUG_ERROR, + "integer field, but no integer?\n"); + ret = r; + continue; + } + if (option < 1) { + p9_debug(P9_DEBUG_ERROR, + "locktimeout must be a greater than zero integer.\n"); + ret = -EINVAL; + continue; + } + v9ses->session_lock_timeout = (long)option * HZ; + break; + + default: + continue; + } + } + +free_and_return: + kfree(tmp_options); +fail_option_alloc: + return ret; +} + +/** + * v9fs_session_init - initialize session + * @v9ses: session information structure + * @dev_name: device being mounted + * @data: options + * + */ + +struct p9_fid *v9fs_session_init(struct v9fs_session_info *v9ses, + const char *dev_name, char *data) +{ + struct p9_fid *fid; + int rc = -ENOMEM; + + v9ses->uname = kstrdup(V9FS_DEFUSER, GFP_KERNEL); + if (!v9ses->uname) + goto err_names; + + v9ses->aname = kstrdup(V9FS_DEFANAME, GFP_KERNEL); + if (!v9ses->aname) + goto err_names; + init_rwsem(&v9ses->rename_sem); + + rc = bdi_setup_and_register(&v9ses->bdi, "9p"); + if (rc) + goto err_names; + + v9ses->uid = INVALID_UID; + v9ses->dfltuid = V9FS_DEFUID; + v9ses->dfltgid = V9FS_DEFGID; + + v9ses->clnt = p9_client_create(dev_name, data); + if (IS_ERR(v9ses->clnt)) { + rc = PTR_ERR(v9ses->clnt); + p9_debug(P9_DEBUG_ERROR, "problem initializing 9p client\n"); + goto err_bdi; + } + + v9ses->flags = V9FS_ACCESS_USER; + + if (p9_is_proto_dotl(v9ses->clnt)) { + v9ses->flags = V9FS_ACCESS_CLIENT; + v9ses->flags |= V9FS_PROTO_2000L; + } else if (p9_is_proto_dotu(v9ses->clnt)) { + v9ses->flags |= V9FS_PROTO_2000U; + } + + rc = v9fs_parse_options(v9ses, data); + if (rc < 0) + goto err_clnt; + + v9ses->maxdata = v9ses->clnt->msize - P9_IOHDRSZ; + + if (!v9fs_proto_dotl(v9ses) && + ((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)) { + /* + * We support ACCESS_CLIENT only for dotl. + * Fall back to ACCESS_USER + */ + v9ses->flags &= ~V9FS_ACCESS_MASK; + v9ses->flags |= V9FS_ACCESS_USER; + } + /*FIXME !! */ + /* for legacy mode, fall back to V9FS_ACCESS_ANY */ + if (!(v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) && + ((v9ses->flags&V9FS_ACCESS_MASK) == V9FS_ACCESS_USER)) { + + v9ses->flags &= ~V9FS_ACCESS_MASK; + v9ses->flags |= V9FS_ACCESS_ANY; + v9ses->uid = INVALID_UID; + } + if (!v9fs_proto_dotl(v9ses) || + !((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_CLIENT)) { + /* + * We support ACL checks on clinet only if the protocol is + * 9P2000.L and access is V9FS_ACCESS_CLIENT. + */ + v9ses->flags &= ~V9FS_ACL_MASK; + } + + fid = p9_client_attach(v9ses->clnt, NULL, v9ses->uname, INVALID_UID, + v9ses->aname); + if (IS_ERR(fid)) { + rc = PTR_ERR(fid); + p9_debug(P9_DEBUG_ERROR, "cannot attach\n"); + goto err_clnt; + } + + if ((v9ses->flags & V9FS_ACCESS_MASK) == V9FS_ACCESS_SINGLE) + fid->uid = v9ses->uid; + else + fid->uid = INVALID_UID; + +#ifdef CONFIG_9P_FSCACHE + /* register the session for caching */ + v9fs_cache_session_get_cookie(v9ses); +#endif + spin_lock(&v9fs_sessionlist_lock); + list_add(&v9ses->slist, &v9fs_sessionlist); + spin_unlock(&v9fs_sessionlist_lock); + + return fid; + +err_clnt: + p9_client_destroy(v9ses->clnt); +err_bdi: + bdi_destroy(&v9ses->bdi); +err_names: + kfree(v9ses->uname); + kfree(v9ses->aname); + return ERR_PTR(rc); +} + +/** + * v9fs_session_close - shutdown a session + * @v9ses: session information structure + * + */ + +void v9fs_session_close(struct v9fs_session_info *v9ses) +{ + if (v9ses->clnt) { + p9_client_destroy(v9ses->clnt); + v9ses->clnt = NULL; + } + +#ifdef CONFIG_9P_FSCACHE + if (v9ses->fscache) { + v9fs_cache_session_put_cookie(v9ses); + kfree(v9ses->cachetag); + } +#endif + kfree(v9ses->uname); + kfree(v9ses->aname); + + bdi_destroy(&v9ses->bdi); + + spin_lock(&v9fs_sessionlist_lock); + list_del(&v9ses->slist); + spin_unlock(&v9fs_sessionlist_lock); +} + +/** + * v9fs_session_cancel - terminate a session + * @v9ses: session to terminate + * + * mark transport as disconnected and cancel all pending requests. + */ + +void v9fs_session_cancel(struct v9fs_session_info *v9ses) { + p9_debug(P9_DEBUG_ERROR, "cancel session %p\n", v9ses); + p9_client_disconnect(v9ses->clnt); +} + +/** + * v9fs_session_begin_cancel - Begin terminate of a session + * @v9ses: session to terminate + * + * After this call we don't allow any request other than clunk. + */ + +void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses) +{ + p9_debug(P9_DEBUG_ERROR, "begin cancel session %p\n", v9ses); + p9_client_begin_disconnect(v9ses->clnt); +} + +extern int v9fs_error_init(void); + +static struct kobject *v9fs_kobj; + +#ifdef CONFIG_9P_FSCACHE +/** + * caches_show - list caches associated with a session + * + * Returns the size of buffer written. + */ + +static ssize_t caches_show(struct kobject *kobj, + struct kobj_attribute *attr, + char *buf) +{ + ssize_t n = 0, count = 0, limit = PAGE_SIZE; + struct v9fs_session_info *v9ses; + + spin_lock(&v9fs_sessionlist_lock); + list_for_each_entry(v9ses, &v9fs_sessionlist, slist) { + if (v9ses->cachetag) { + n = snprintf(buf, limit, "%s\n", v9ses->cachetag); + if (n < 0) { + count = n; + break; + } + + count += n; + limit -= n; + } + } + + spin_unlock(&v9fs_sessionlist_lock); + return count; +} + +static struct kobj_attribute v9fs_attr_cache = __ATTR_RO(caches); +#endif /* CONFIG_9P_FSCACHE */ + +static struct attribute *v9fs_attrs[] = { +#ifdef CONFIG_9P_FSCACHE + &v9fs_attr_cache.attr, +#endif + NULL, +}; + +static struct attribute_group v9fs_attr_group = { + .attrs = v9fs_attrs, +}; + +/** + * v9fs_sysfs_init - Initialize the v9fs sysfs interface + * + */ + +static int __init v9fs_sysfs_init(void) +{ + v9fs_kobj = kobject_create_and_add("9p", fs_kobj); + if (!v9fs_kobj) + return -ENOMEM; + + if (sysfs_create_group(v9fs_kobj, &v9fs_attr_group)) { + kobject_put(v9fs_kobj); + return -ENOMEM; + } + + return 0; +} + +/** + * v9fs_sysfs_cleanup - Unregister the v9fs sysfs interface + * + */ + +static void v9fs_sysfs_cleanup(void) +{ + sysfs_remove_group(v9fs_kobj, &v9fs_attr_group); + kobject_put(v9fs_kobj); +} + +static void v9fs_inode_init_once(void *foo) +{ + struct v9fs_inode *v9inode = (struct v9fs_inode *)foo; +#ifdef CONFIG_9P_FSCACHE + v9inode->fscache = NULL; +#endif + memset(&v9inode->qid, 0, sizeof(v9inode->qid)); + inode_init_once(&v9inode->vfs_inode); +} + +/** + * v9fs_init_inode_cache - initialize a cache for 9P + * Returns 0 on success. + */ +static int v9fs_init_inode_cache(void) +{ + v9fs_inode_cache = kmem_cache_create("v9fs_inode_cache", + sizeof(struct v9fs_inode), + 0, (SLAB_RECLAIM_ACCOUNT| + SLAB_MEM_SPREAD), + v9fs_inode_init_once); + if (!v9fs_inode_cache) + return -ENOMEM; + + return 0; +} + +/** + * v9fs_destroy_inode_cache - destroy the cache of 9P inode + * + */ +static void v9fs_destroy_inode_cache(void) +{ + /* + * Make sure all delayed rcu free inodes are flushed before we + * destroy cache. + */ + rcu_barrier(); + kmem_cache_destroy(v9fs_inode_cache); +} + +static int v9fs_cache_register(void) +{ + int ret; + ret = v9fs_init_inode_cache(); + if (ret < 0) + return ret; +#ifdef CONFIG_9P_FSCACHE + ret = fscache_register_netfs(&v9fs_cache_netfs); + if (ret < 0) + v9fs_destroy_inode_cache(); +#endif + return ret; +} + +static void v9fs_cache_unregister(void) +{ + v9fs_destroy_inode_cache(); +#ifdef CONFIG_9P_FSCACHE + fscache_unregister_netfs(&v9fs_cache_netfs); +#endif +} + +/** + * init_v9fs - Initialize module + * + */ + +static int __init init_v9fs(void) +{ + int err; + pr_info("Installing v9fs 9p2000 file system support\n"); + /* TODO: Setup list of registered trasnport modules */ + + err = v9fs_cache_register(); + if (err < 0) { + pr_err("Failed to register v9fs for caching\n"); + return err; + } + + err = v9fs_sysfs_init(); + if (err < 0) { + pr_err("Failed to register with sysfs\n"); + goto out_cache; + } + err = register_filesystem(&v9fs_fs_type); + if (err < 0) { + pr_err("Failed to register filesystem\n"); + goto out_sysfs_cleanup; + } + + return 0; + +out_sysfs_cleanup: + v9fs_sysfs_cleanup(); + +out_cache: + v9fs_cache_unregister(); + + return err; +} + +/** + * exit_v9fs - shutdown module + * + */ + +static void __exit exit_v9fs(void) +{ + v9fs_sysfs_cleanup(); + v9fs_cache_unregister(); + unregister_filesystem(&v9fs_fs_type); +} + +module_init(init_v9fs) +module_exit(exit_v9fs) + +MODULE_AUTHOR("Latchesar Ionkov "); +MODULE_AUTHOR("Eric Van Hensbergen "); +MODULE_AUTHOR("Ron Minnich "); +MODULE_LICENSE("GPL"); diff --git a/addons/9p/src/4.4.180/v9fs.h b/addons/9p/src/4.4.180/v9fs.h new file mode 100644 index 00000000..3775f275 --- /dev/null +++ b/addons/9p/src/4.4.180/v9fs.h @@ -0,0 +1,226 @@ +/* + * V9FS definitions. + * + * Copyright (C) 2004-2008 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ +#ifndef FS_9P_V9FS_H +#define FS_9P_V9FS_H + +#include + +/** + * enum p9_session_flags - option flags for each 9P session + * @V9FS_PROTO_2000U: whether or not to use 9P2000.u extensions + * @V9FS_PROTO_2000L: whether or not to use 9P2000.l extensions + * @V9FS_ACCESS_SINGLE: only the mounting user can access the hierarchy + * @V9FS_ACCESS_USER: a new attach will be issued for every user (default) + * @V9FS_ACCESS_CLIENT: Just like user, but access check is performed on client. + * @V9FS_ACCESS_ANY: use a single attach for all users + * @V9FS_ACCESS_MASK: bit mask of different ACCESS options + * @V9FS_POSIX_ACL: POSIX ACLs are enforced + * + * Session flags reflect options selected by users at mount time + */ +#define V9FS_ACCESS_ANY (V9FS_ACCESS_SINGLE | \ + V9FS_ACCESS_USER | \ + V9FS_ACCESS_CLIENT) +#define V9FS_ACCESS_MASK V9FS_ACCESS_ANY +#define V9FS_ACL_MASK V9FS_POSIX_ACL + +enum p9_session_flags { + V9FS_PROTO_2000U = 0x01, + V9FS_PROTO_2000L = 0x02, + V9FS_ACCESS_SINGLE = 0x04, + V9FS_ACCESS_USER = 0x08, + V9FS_ACCESS_CLIENT = 0x10, + V9FS_POSIX_ACL = 0x20 +}; + +/* possible values of ->cache */ +/** + * enum p9_cache_modes - user specified cache preferences + * @CACHE_NONE: do not cache data, dentries, or directory contents (default) + * @CACHE_LOOSE: cache data, dentries, and directory contents w/no consistency + * + * eventually support loose, tight, time, session, default always none + */ + +enum p9_cache_modes { + CACHE_NONE, + CACHE_MMAP, + CACHE_LOOSE, + CACHE_FSCACHE, +}; + +/** + * struct v9fs_session_info - per-instance session information + * @flags: session options of type &p9_session_flags + * @nodev: set to 1 to disable device mapping + * @debug: debug level + * @afid: authentication handle + * @cache: cache mode of type &p9_cache_modes + * @cachetag: the tag of the cache associated with this session + * @fscache: session cookie associated with FS-Cache + * @uname: string user name to mount hierarchy as + * @aname: mount specifier for remote hierarchy + * @maxdata: maximum data to be sent/recvd per protocol message + * @dfltuid: default numeric userid to mount hierarchy as + * @dfltgid: default numeric groupid to mount hierarchy as + * @uid: if %V9FS_ACCESS_SINGLE, the numeric uid which mounted the hierarchy + * @clnt: reference to 9P network client instantiated for this session + * @slist: reference to list of registered 9p sessions + * + * This structure holds state for each session instance established during + * a sys_mount() . + * + * Bugs: there seems to be a lot of state which could be condensed and/or + * removed. + */ + +struct v9fs_session_info { + /* options */ + unsigned char flags; + unsigned char nodev; + unsigned short debug; + unsigned int afid; + unsigned int cache; +#ifdef CONFIG_9P_FSCACHE + char *cachetag; + struct fscache_cookie *fscache; +#endif + + char *uname; /* user name to mount as */ + char *aname; /* name of remote hierarchy being mounted */ + unsigned int maxdata; /* max data for client interface */ + kuid_t dfltuid; /* default uid/muid for legacy support */ + kgid_t dfltgid; /* default gid for legacy support */ + kuid_t uid; /* if ACCESS_SINGLE, the uid that has access */ + struct p9_client *clnt; /* 9p client */ + struct list_head slist; /* list of sessions registered with v9fs */ + struct backing_dev_info bdi; + struct rw_semaphore rename_sem; + long session_lock_timeout; /* retry interval for blocking locks */ +}; + +/* cache_validity flags */ +#define V9FS_INO_INVALID_ATTR 0x01 + +struct v9fs_inode { +#ifdef CONFIG_9P_FSCACHE + struct mutex fscache_lock; + struct fscache_cookie *fscache; +#endif + struct p9_qid qid; + unsigned int cache_validity; + struct p9_fid *writeback_fid; + struct mutex v_mutex; + struct inode vfs_inode; +}; + +static inline struct v9fs_inode *V9FS_I(const struct inode *inode) +{ + return container_of(inode, struct v9fs_inode, vfs_inode); +} + +struct p9_fid *v9fs_session_init(struct v9fs_session_info *, const char *, + char *); +extern void v9fs_session_close(struct v9fs_session_info *v9ses); +extern void v9fs_session_cancel(struct v9fs_session_info *v9ses); +extern void v9fs_session_begin_cancel(struct v9fs_session_info *v9ses); +extern struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, + unsigned int flags); +extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d); +extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d); +extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry); +extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses, + struct p9_fid *fid, + struct super_block *sb, int new); +extern const struct inode_operations v9fs_dir_inode_operations_dotl; +extern const struct inode_operations v9fs_file_inode_operations_dotl; +extern const struct inode_operations v9fs_symlink_inode_operations_dotl; +extern struct inode *v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, + struct p9_fid *fid, + struct super_block *sb, int new); + +/* other default globals */ +#define V9FS_PORT 564 +#define V9FS_DEFUSER "nobody" +#define V9FS_DEFANAME "" +#define V9FS_DEFUID KUIDT_INIT(-2) +#define V9FS_DEFGID KGIDT_INIT(-2) + +static inline struct v9fs_session_info *v9fs_inode2v9ses(struct inode *inode) +{ + return (inode->i_sb->s_fs_info); +} + +static inline struct v9fs_session_info *v9fs_dentry2v9ses(struct dentry *dentry) +{ + return dentry->d_sb->s_fs_info; +} + +static inline int v9fs_proto_dotu(struct v9fs_session_info *v9ses) +{ + return v9ses->flags & V9FS_PROTO_2000U; +} + +static inline int v9fs_proto_dotl(struct v9fs_session_info *v9ses) +{ + return v9ses->flags & V9FS_PROTO_2000L; +} + +/** + * v9fs_get_inode_from_fid - Helper routine to populate an inode by + * issuing a attribute request + * @v9ses: session information + * @fid: fid to issue attribute request for + * @sb: superblock on which to create inode + * + */ +static inline struct inode * +v9fs_get_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, + struct super_block *sb) +{ + if (v9fs_proto_dotl(v9ses)) + return v9fs_inode_from_fid_dotl(v9ses, fid, sb, 0); + else + return v9fs_inode_from_fid(v9ses, fid, sb, 0); +} + +/** + * v9fs_get_new_inode_from_fid - Helper routine to populate an inode by + * issuing a attribute request + * @v9ses: session information + * @fid: fid to issue attribute request for + * @sb: superblock on which to create inode + * + */ +static inline struct inode * +v9fs_get_new_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, + struct super_block *sb) +{ + if (v9fs_proto_dotl(v9ses)) + return v9fs_inode_from_fid_dotl(v9ses, fid, sb, 1); + else + return v9fs_inode_from_fid(v9ses, fid, sb, 1); +} + +#endif diff --git a/addons/9p/src/4.4.180/v9fs_vfs.h b/addons/9p/src/4.4.180/v9fs_vfs.h new file mode 100644 index 00000000..aaee1e65 --- /dev/null +++ b/addons/9p/src/4.4.180/v9fs_vfs.h @@ -0,0 +1,105 @@ +/* + * V9FS VFS extensions. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ +#ifndef FS_9P_V9FS_VFS_H +#define FS_9P_V9FS_VFS_H + +/* plan9 semantics are that created files are implicitly opened. + * But linux semantics are that you call create, then open. + * the plan9 approach is superior as it provides an atomic + * open. + * we track the create fid here. When the file is opened, if fidopen is + * non-zero, we use the fid and can skip some steps. + * there may be a better way to do this, but I don't know it. + * one BAD way is to clunk the fid on create, then open it again: + * you lose the atomicity of file open + */ + +/* special case: + * unlink calls remove, which is an implicit clunk. So we have to track + * that kind of thing so that we don't try to clunk a dead fid. + */ +#define P9_LOCK_TIMEOUT (30*HZ) + +/* flags for v9fs_stat2inode() & v9fs_stat2inode_dotl() */ +#define V9FS_STAT2INODE_KEEP_ISIZE 1 + +extern struct file_system_type v9fs_fs_type; +extern const struct address_space_operations v9fs_addr_operations; +extern const struct file_operations v9fs_file_operations; +extern const struct file_operations v9fs_file_operations_dotl; +extern const struct file_operations v9fs_dir_operations; +extern const struct file_operations v9fs_dir_operations_dotl; +extern const struct dentry_operations v9fs_dentry_operations; +extern const struct dentry_operations v9fs_cached_dentry_operations; +extern const struct file_operations v9fs_cached_file_operations; +extern const struct file_operations v9fs_cached_file_operations_dotl; +extern const struct file_operations v9fs_mmap_file_operations; +extern const struct file_operations v9fs_mmap_file_operations_dotl; +extern struct kmem_cache *v9fs_inode_cache; + +struct inode *v9fs_alloc_inode(struct super_block *sb); +void v9fs_destroy_inode(struct inode *inode); +struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t); +int v9fs_init_inode(struct v9fs_session_info *v9ses, + struct inode *inode, umode_t mode, dev_t); +void v9fs_evict_inode(struct inode *inode); +ino_t v9fs_qid2ino(struct p9_qid *qid); +void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, + struct super_block *sb, unsigned int flags); +void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode, + unsigned int flags); +int v9fs_dir_release(struct inode *inode, struct file *filp); +int v9fs_file_open(struct inode *inode, struct file *file); +void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat); +int v9fs_uflags2omode(int uflags, int extended); + +void v9fs_blank_wstat(struct p9_wstat *wstat); +int v9fs_vfs_setattr_dotl(struct dentry *, struct iattr *); +int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, + int datasync); +int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode); +int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode); +static inline void v9fs_invalidate_inode_attr(struct inode *inode) +{ + struct v9fs_inode *v9inode; + v9inode = V9FS_I(inode); + v9inode->cache_validity |= V9FS_INO_INVALID_ATTR; + return; +} + +int v9fs_open_to_dotl_flags(int flags); + +static inline void v9fs_i_size_write(struct inode *inode, loff_t i_size) +{ + /* + * 32-bit need the lock, concurrent updates could break the + * sequences and make i_size_read() loop forever. + * 64-bit updates are atomic and can skip the locking. + */ + if (sizeof(i_size) > sizeof(long)) + spin_lock(&inode->i_lock); + i_size_write(inode, i_size); + if (sizeof(i_size) > sizeof(long)) + spin_unlock(&inode->i_lock); +} +#endif diff --git a/addons/9p/src/4.4.180/vfs_addr.c b/addons/9p/src/4.4.180/vfs_addr.c new file mode 100644 index 00000000..e9e04376 --- /dev/null +++ b/addons/9p/src/4.4.180/vfs_addr.c @@ -0,0 +1,351 @@ +/* + * linux/fs/9p/vfs_addr.c + * + * This file contians vfs address (mmap) ops for 9P2000. + * + * Copyright (C) 2005 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "cache.h" +#include "fid.h" + +/** + * v9fs_fid_readpage - read an entire page in from 9P + * + * @fid: fid being read + * @page: structure to page + * + */ +static int v9fs_fid_readpage(struct p9_fid *fid, struct page *page) +{ + struct inode *inode = page->mapping->host; + struct bio_vec bvec = {.bv_page = page, .bv_len = PAGE_SIZE}; + struct iov_iter to; + int retval, err; + + p9_debug(P9_DEBUG_VFS, "\n"); + + BUG_ON(!PageLocked(page)); + + retval = v9fs_readpage_from_fscache(inode, page); + if (retval == 0) + return retval; + + iov_iter_bvec(&to, ITER_BVEC | READ, &bvec, 1, PAGE_SIZE); + + retval = p9_client_read(fid, page_offset(page), &to, &err); + if (err) { + v9fs_uncache_page(inode, page); + retval = err; + goto done; + } + + zero_user(page, retval, PAGE_SIZE - retval); + flush_dcache_page(page); + SetPageUptodate(page); + + v9fs_readpage_to_fscache(inode, page); + retval = 0; + +done: + unlock_page(page); + return retval; +} + +/** + * v9fs_vfs_readpage - read an entire page in from 9P + * + * @filp: file being read + * @page: structure to page + * + */ + +static int v9fs_vfs_readpage(struct file *filp, struct page *page) +{ + return v9fs_fid_readpage(filp->private_data, page); +} + +/** + * v9fs_vfs_readpages - read a set of pages from 9P + * + * @filp: file being read + * @mapping: the address space + * @pages: list of pages to read + * @nr_pages: count of pages to read + * + */ + +static int v9fs_vfs_readpages(struct file *filp, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages) +{ + int ret = 0; + struct inode *inode; + + inode = mapping->host; + p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, filp); + + ret = v9fs_readpages_from_fscache(inode, mapping, pages, &nr_pages); + if (ret == 0) + return ret; + + ret = read_cache_pages(mapping, pages, (void *)v9fs_vfs_readpage, filp); + p9_debug(P9_DEBUG_VFS, " = %d\n", ret); + return ret; +} + +/** + * v9fs_release_page - release the private state associated with a page + * + * Returns 1 if the page can be released, false otherwise. + */ + +static int v9fs_release_page(struct page *page, gfp_t gfp) +{ + if (PagePrivate(page)) + return 0; + return v9fs_fscache_release_page(page, gfp); +} + +/** + * v9fs_invalidate_page - Invalidate a page completely or partially + * + * @page: structure to page + * @offset: offset in the page + */ + +static void v9fs_invalidate_page(struct page *page, unsigned int offset, + unsigned int length) +{ + /* + * If called with zero offset, we should release + * the private state assocated with the page + */ + if (offset == 0 && length == PAGE_CACHE_SIZE) + v9fs_fscache_invalidate_page(page); +} + +static int v9fs_vfs_writepage_locked(struct page *page) +{ + struct inode *inode = page->mapping->host; + struct v9fs_inode *v9inode = V9FS_I(inode); + loff_t size = i_size_read(inode); + struct iov_iter from; + struct bio_vec bvec; + int err, len; + + if (page->index == size >> PAGE_CACHE_SHIFT) + len = size & ~PAGE_CACHE_MASK; + else + len = PAGE_CACHE_SIZE; + + bvec.bv_page = page; + bvec.bv_offset = 0; + bvec.bv_len = len; + iov_iter_bvec(&from, ITER_BVEC | WRITE, &bvec, 1, len); + + /* We should have writeback_fid always set */ + BUG_ON(!v9inode->writeback_fid); + + set_page_writeback(page); + + p9_client_write(v9inode->writeback_fid, page_offset(page), &from, &err); + + end_page_writeback(page); + return err; +} + +static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc) +{ + int retval; + + p9_debug(P9_DEBUG_VFS, "page %p\n", page); + + retval = v9fs_vfs_writepage_locked(page); + if (retval < 0) { + if (retval == -EAGAIN) { + redirty_page_for_writepage(wbc, page); + retval = 0; + } else { + SetPageError(page); + mapping_set_error(page->mapping, retval); + } + } else + retval = 0; + + unlock_page(page); + return retval; +} + +/** + * v9fs_launder_page - Writeback a dirty page + * Returns 0 on success. + */ + +static int v9fs_launder_page(struct page *page) +{ + int retval; + struct inode *inode = page->mapping->host; + + v9fs_fscache_wait_on_page_write(inode, page); + if (clear_page_dirty_for_io(page)) { + retval = v9fs_vfs_writepage_locked(page); + if (retval) + return retval; + } + return 0; +} + +/** + * v9fs_direct_IO - 9P address space operation for direct I/O + * @iocb: target I/O control block + * @pos: offset in file to begin the operation + * + * The presence of v9fs_direct_IO() in the address space ops vector + * allowes open() O_DIRECT flags which would have failed otherwise. + * + * In the non-cached mode, we shunt off direct read and write requests before + * the VFS gets them, so this method should never be called. + * + * Direct IO is not 'yet' supported in the cached mode. Hence when + * this routine is called through generic_file_aio_read(), the read/write fails + * with an error. + * + */ +static ssize_t +v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) +{ + struct file *file = iocb->ki_filp; + ssize_t n; + int err = 0; + if (iov_iter_rw(iter) == WRITE) { + n = p9_client_write(file->private_data, pos, iter, &err); + if (n) { + struct inode *inode = file_inode(file); + loff_t i_size = i_size_read(inode); + if (pos + n > i_size) + inode_add_bytes(inode, pos + n - i_size); + } + } else { + n = p9_client_read(file->private_data, pos, iter, &err); + } + return n ? n : err; +} + +static int v9fs_write_begin(struct file *filp, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) +{ + int retval = 0; + struct page *page; + struct v9fs_inode *v9inode; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + struct inode *inode = mapping->host; + + + p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); + + v9inode = V9FS_I(inode); +start: + page = grab_cache_page_write_begin(mapping, index, flags); + if (!page) { + retval = -ENOMEM; + goto out; + } + BUG_ON(!v9inode->writeback_fid); + if (PageUptodate(page)) + goto out; + + if (len == PAGE_CACHE_SIZE) + goto out; + + retval = v9fs_fid_readpage(v9inode->writeback_fid, page); + page_cache_release(page); + if (!retval) + goto start; +out: + *pagep = page; + return retval; +} + +static int v9fs_write_end(struct file *filp, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) +{ + loff_t last_pos = pos + copied; + struct inode *inode = page->mapping->host; + + p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); + + if (unlikely(copied < len)) { + /* + * zero out the rest of the area + */ + unsigned from = pos & (PAGE_CACHE_SIZE - 1); + + zero_user(page, from + copied, len - copied); + flush_dcache_page(page); + } + + if (!PageUptodate(page)) + SetPageUptodate(page); + /* + * No need to use i_size_read() here, the i_size + * cannot change under us because we hold the i_mutex. + */ + if (last_pos > inode->i_size) { + inode_add_bytes(inode, last_pos - inode->i_size); + i_size_write(inode, last_pos); + } + set_page_dirty(page); + unlock_page(page); + page_cache_release(page); + + return copied; +} + + +const struct address_space_operations v9fs_addr_operations = { + .readpage = v9fs_vfs_readpage, + .readpages = v9fs_vfs_readpages, + .set_page_dirty = __set_page_dirty_nobuffers, + .writepage = v9fs_vfs_writepage, + .write_begin = v9fs_write_begin, + .write_end = v9fs_write_end, + .releasepage = v9fs_release_page, + .invalidatepage = v9fs_invalidate_page, + .launder_page = v9fs_launder_page, + .direct_IO = v9fs_direct_IO, +}; diff --git a/addons/9p/src/4.4.180/vfs_dentry.c b/addons/9p/src/4.4.180/vfs_dentry.c new file mode 100644 index 00000000..bd456c66 --- /dev/null +++ b/addons/9p/src/4.4.180/vfs_dentry.c @@ -0,0 +1,122 @@ +/* + * linux/fs/9p/vfs_dentry.c + * + * This file contians vfs dentry ops for the 9P2000 protocol. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" + +/** + * v9fs_cached_dentry_delete - called when dentry refcount equals 0 + * @dentry: dentry in question + * + */ +static int v9fs_cached_dentry_delete(const struct dentry *dentry) +{ + p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n", + dentry, dentry); + + /* Don't cache negative dentries */ + if (d_really_is_negative(dentry)) + return 1; + return 0; +} + +/** + * v9fs_dentry_release - called when dentry is going to be freed + * @dentry: dentry that is being release + * + */ + +static void v9fs_dentry_release(struct dentry *dentry) +{ + struct hlist_node *p, *n; + p9_debug(P9_DEBUG_VFS, " dentry: %pd (%p)\n", + dentry, dentry); + hlist_for_each_safe(p, n, (struct hlist_head *)&dentry->d_fsdata) + p9_client_clunk(hlist_entry(p, struct p9_fid, dlist)); + dentry->d_fsdata = NULL; +} + +static int v9fs_lookup_revalidate(struct dentry *dentry, unsigned int flags) +{ + struct p9_fid *fid; + struct inode *inode; + struct v9fs_inode *v9inode; + + if (flags & LOOKUP_RCU) + return -ECHILD; + + inode = d_inode(dentry); + if (!inode) + goto out_valid; + + v9inode = V9FS_I(inode); + if (v9inode->cache_validity & V9FS_INO_INVALID_ATTR) { + int retval; + struct v9fs_session_info *v9ses; + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + v9ses = v9fs_inode2v9ses(inode); + if (v9fs_proto_dotl(v9ses)) + retval = v9fs_refresh_inode_dotl(fid, inode); + else + retval = v9fs_refresh_inode(fid, inode); + if (retval == -ENOENT) + return 0; + if (retval < 0) + return retval; + } +out_valid: + return 1; +} + +const struct dentry_operations v9fs_cached_dentry_operations = { + .d_revalidate = v9fs_lookup_revalidate, + .d_weak_revalidate = v9fs_lookup_revalidate, + .d_delete = v9fs_cached_dentry_delete, + .d_release = v9fs_dentry_release, +}; + +const struct dentry_operations v9fs_dentry_operations = { + .d_delete = always_delete_dentry, + .d_release = v9fs_dentry_release, +}; diff --git a/addons/9p/src/4.4.180/vfs_dir.c b/addons/9p/src/4.4.180/vfs_dir.c new file mode 100644 index 00000000..05769219 --- /dev/null +++ b/addons/9p/src/4.4.180/vfs_dir.c @@ -0,0 +1,248 @@ +/* + * linux/fs/9p/vfs_dir.c + * + * This file contains vfs directory ops for the 9P2000 protocol. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" + +/** + * struct p9_rdir - readdir accounting + * @head: start offset of current dirread buffer + * @tail: end offset of current dirread buffer + * @buf: dirread buffer + * + * private structure for keeping track of readdir + * allocated on demand + */ + +struct p9_rdir { + int head; + int tail; + uint8_t buf[]; +}; + +/** + * dt_type - return file type + * @mistat: mistat structure + * + */ + +static inline int dt_type(struct p9_wstat *mistat) +{ + unsigned long perm = mistat->mode; + int rettype = DT_REG; + + if (perm & P9_DMDIR) + rettype = DT_DIR; + if (perm & P9_DMSYMLINK) + rettype = DT_LNK; + + return rettype; +} + +/** + * v9fs_alloc_rdir_buf - Allocate buffer used for read and readdir + * @filp: opened file structure + * @buflen: Length in bytes of buffer to allocate + * + */ + +static struct p9_rdir *v9fs_alloc_rdir_buf(struct file *filp, int buflen) +{ + struct p9_fid *fid = filp->private_data; + if (!fid->rdir) + fid->rdir = kzalloc(sizeof(struct p9_rdir) + buflen, GFP_KERNEL); + return fid->rdir; +} + +/** + * v9fs_dir_readdir - iterate through a directory + * @file: opened file structure + * @ctx: actor we feed the entries to + * + */ + +static int v9fs_dir_readdir(struct file *file, struct dir_context *ctx) +{ + bool over; + struct p9_wstat st; + int err = 0; + struct p9_fid *fid; + int buflen; + struct p9_rdir *rdir; + struct kvec kvec; + + p9_debug(P9_DEBUG_VFS, "name %pD\n", file); + fid = file->private_data; + + buflen = fid->clnt->msize - P9_IOHDRSZ; + + rdir = v9fs_alloc_rdir_buf(file, buflen); + if (!rdir) + return -ENOMEM; + kvec.iov_base = rdir->buf; + kvec.iov_len = buflen; + + while (1) { + if (rdir->tail == rdir->head) { + struct iov_iter to; + int n; + iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buflen); + n = p9_client_read(file->private_data, ctx->pos, &to, + &err); + if (err) + return err; + if (n == 0) + return 0; + + rdir->head = 0; + rdir->tail = n; + } + while (rdir->head < rdir->tail) { + err = p9stat_read(fid->clnt, rdir->buf + rdir->head, + rdir->tail - rdir->head, &st); + if (err <= 0) { + p9_debug(P9_DEBUG_VFS, "returned %d\n", err); + return -EIO; + } + + over = !dir_emit(ctx, st.name, strlen(st.name), + v9fs_qid2ino(&st.qid), dt_type(&st)); + p9stat_free(&st); + if (over) + return 0; + + rdir->head += err; + ctx->pos += err; + } + } +} + +/** + * v9fs_dir_readdir_dotl - iterate through a directory + * @file: opened file structure + * @ctx: actor we feed the entries to + * + */ +static int v9fs_dir_readdir_dotl(struct file *file, struct dir_context *ctx) +{ + int err = 0; + struct p9_fid *fid; + int buflen; + struct p9_rdir *rdir; + struct p9_dirent curdirent; + + p9_debug(P9_DEBUG_VFS, "name %pD\n", file); + fid = file->private_data; + + buflen = fid->clnt->msize - P9_READDIRHDRSZ; + + rdir = v9fs_alloc_rdir_buf(file, buflen); + if (!rdir) + return -ENOMEM; + + while (1) { + if (rdir->tail == rdir->head) { + err = p9_client_readdir(fid, rdir->buf, buflen, + ctx->pos); + if (err <= 0) + return err; + + rdir->head = 0; + rdir->tail = err; + } + + while (rdir->head < rdir->tail) { + + err = p9dirent_read(fid->clnt, rdir->buf + rdir->head, + rdir->tail - rdir->head, + &curdirent); + if (err < 0) { + p9_debug(P9_DEBUG_VFS, "returned %d\n", err); + return -EIO; + } + + if (!dir_emit(ctx, curdirent.d_name, + strlen(curdirent.d_name), + v9fs_qid2ino(&curdirent.qid), + curdirent.d_type)) + return 0; + + ctx->pos = curdirent.d_off; + rdir->head += err; + } + } +} + + +/** + * v9fs_dir_release - close a directory + * @inode: inode of the directory + * @filp: file pointer to a directory + * + */ + +int v9fs_dir_release(struct inode *inode, struct file *filp) +{ + struct p9_fid *fid; + + fid = filp->private_data; + p9_debug(P9_DEBUG_VFS, "inode: %p filp: %p fid: %d\n", + inode, filp, fid ? fid->fid : -1); + if (fid) + p9_client_clunk(fid); + return 0; +} + +const struct file_operations v9fs_dir_operations = { + .read = generic_read_dir, + .llseek = generic_file_llseek, + .iterate = v9fs_dir_readdir, + .open = v9fs_file_open, + .release = v9fs_dir_release, +}; + +const struct file_operations v9fs_dir_operations_dotl = { + .read = generic_read_dir, + .llseek = generic_file_llseek, + .iterate = v9fs_dir_readdir_dotl, + .open = v9fs_file_open, + .release = v9fs_dir_release, + .fsync = v9fs_file_fsync_dotl, +}; diff --git a/addons/9p/src/4.4.180/vfs_file.c b/addons/9p/src/4.4.180/vfs_file.c new file mode 100644 index 00000000..373cc505 --- /dev/null +++ b/addons/9p/src/4.4.180/vfs_file.c @@ -0,0 +1,726 @@ +/* + * linux/fs/9p/vfs_file.c + * + * This file contians vfs file ops for 9P2000. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" +#include "cache.h" + +static const struct vm_operations_struct v9fs_file_vm_ops; +static const struct vm_operations_struct v9fs_mmap_file_vm_ops; + +/** + * v9fs_file_open - open a file (or directory) + * @inode: inode to be opened + * @file: file being opened + * + */ + +int v9fs_file_open(struct inode *inode, struct file *file) +{ + int err; + struct v9fs_inode *v9inode; + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + int omode; + + p9_debug(P9_DEBUG_VFS, "inode: %p file: %p\n", inode, file); + v9inode = V9FS_I(inode); + v9ses = v9fs_inode2v9ses(inode); + if (v9fs_proto_dotl(v9ses)) + omode = v9fs_open_to_dotl_flags(file->f_flags); + else + omode = v9fs_uflags2omode(file->f_flags, + v9fs_proto_dotu(v9ses)); + fid = file->private_data; + if (!fid) { + fid = v9fs_fid_clone(file_dentry(file)); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + err = p9_client_open(fid, omode); + if (err < 0) { + p9_client_clunk(fid); + return err; + } + if ((file->f_flags & O_APPEND) && + (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses))) + generic_file_llseek(file, 0, SEEK_END); + } + + file->private_data = fid; + mutex_lock(&v9inode->v_mutex); + if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) && + !v9inode->writeback_fid && + ((file->f_flags & O_ACCMODE) != O_RDONLY)) { + /* + * clone a fid and add it to writeback_fid + * we do it during open time instead of + * page dirty time via write_begin/page_mkwrite + * because we want write after unlink usecase + * to work. + */ + fid = v9fs_writeback_fid(file_dentry(file)); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + mutex_unlock(&v9inode->v_mutex); + goto out_error; + } + v9inode->writeback_fid = (void *) fid; + } + mutex_unlock(&v9inode->v_mutex); + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) + v9fs_cache_inode_set_cookie(inode, file); + return 0; +out_error: + p9_client_clunk(file->private_data); + file->private_data = NULL; + return err; +} + +/** + * v9fs_file_lock - lock a file (or directory) + * @filp: file to be locked + * @cmd: lock command + * @fl: file lock structure + * + * Bugs: this looks like a local only lock, we should extend into 9P + * by using open exclusive + */ + +static int v9fs_file_lock(struct file *filp, int cmd, struct file_lock *fl) +{ + int res = 0; + struct inode *inode = file_inode(filp); + + p9_debug(P9_DEBUG_VFS, "filp: %p lock: %p\n", filp, fl); + + /* No mandatory locks */ + if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) + return -ENOLCK; + + if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { + filemap_write_and_wait(inode->i_mapping); + invalidate_mapping_pages(&inode->i_data, 0, -1); + } + + return res; +} + +static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) +{ + struct p9_flock flock; + struct p9_fid *fid; + uint8_t status = P9_LOCK_ERROR; + int res = 0; + unsigned char fl_type; + struct v9fs_session_info *v9ses; + + fid = filp->private_data; + BUG_ON(fid == NULL); + + if ((fl->fl_flags & FL_POSIX) != FL_POSIX) + BUG(); + + res = locks_lock_file_wait(filp, fl); + if (res < 0) + goto out; + + /* convert posix lock to p9 tlock args */ + memset(&flock, 0, sizeof(flock)); + /* map the lock type */ + switch (fl->fl_type) { + case F_RDLCK: + flock.type = P9_LOCK_TYPE_RDLCK; + break; + case F_WRLCK: + flock.type = P9_LOCK_TYPE_WRLCK; + break; + case F_UNLCK: + flock.type = P9_LOCK_TYPE_UNLCK; + break; + } + flock.start = fl->fl_start; + if (fl->fl_end == OFFSET_MAX) + flock.length = 0; + else + flock.length = fl->fl_end - fl->fl_start + 1; + flock.proc_id = fl->fl_pid; + flock.client_id = fid->clnt->name; + if (IS_SETLKW(cmd)) + flock.flags = P9_LOCK_FLAGS_BLOCK; + + v9ses = v9fs_inode2v9ses(file_inode(filp)); + + /* + * if its a blocked request and we get P9_LOCK_BLOCKED as the status + * for lock request, keep on trying + */ + for (;;) { + res = p9_client_lock_dotl(fid, &flock, &status); + if (res < 0) + goto out_unlock; + + if (status != P9_LOCK_BLOCKED) + break; + if (status == P9_LOCK_BLOCKED && !IS_SETLKW(cmd)) + break; + if (schedule_timeout_interruptible(v9ses->session_lock_timeout) + != 0) + break; + /* + * p9_client_lock_dotl overwrites flock.client_id with the + * server message, free and reuse the client name + */ + if (flock.client_id != fid->clnt->name) { + kfree(flock.client_id); + flock.client_id = fid->clnt->name; + } + } + + /* map 9p status to VFS status */ + switch (status) { + case P9_LOCK_SUCCESS: + res = 0; + break; + case P9_LOCK_BLOCKED: + res = -EAGAIN; + break; + default: + WARN_ONCE(1, "unknown lock status code: %d\n", status); + /* fallthough */ + case P9_LOCK_ERROR: + case P9_LOCK_GRACE: + res = -ENOLCK; + break; + } + +out_unlock: + /* + * incase server returned error for lock request, revert + * it locally + */ + if (res < 0 && fl->fl_type != F_UNLCK) { + fl_type = fl->fl_type; + fl->fl_type = F_UNLCK; + /* Even if this fails we want to return the remote error */ + locks_lock_file_wait(filp, fl); + fl->fl_type = fl_type; + } + if (flock.client_id != fid->clnt->name) + kfree(flock.client_id); +out: + return res; +} + +static int v9fs_file_getlock(struct file *filp, struct file_lock *fl) +{ + struct p9_getlock glock; + struct p9_fid *fid; + int res = 0; + + fid = filp->private_data; + BUG_ON(fid == NULL); + + posix_test_lock(filp, fl); + /* + * if we have a conflicting lock locally, no need to validate + * with server + */ + if (fl->fl_type != F_UNLCK) + return res; + + /* convert posix lock to p9 tgetlock args */ + memset(&glock, 0, sizeof(glock)); + glock.type = P9_LOCK_TYPE_UNLCK; + glock.start = fl->fl_start; + if (fl->fl_end == OFFSET_MAX) + glock.length = 0; + else + glock.length = fl->fl_end - fl->fl_start + 1; + glock.proc_id = fl->fl_pid; + glock.client_id = fid->clnt->name; + + res = p9_client_getlock_dotl(fid, &glock); + if (res < 0) + goto out; + /* map 9p lock type to os lock type */ + switch (glock.type) { + case P9_LOCK_TYPE_RDLCK: + fl->fl_type = F_RDLCK; + break; + case P9_LOCK_TYPE_WRLCK: + fl->fl_type = F_WRLCK; + break; + case P9_LOCK_TYPE_UNLCK: + fl->fl_type = F_UNLCK; + break; + } + if (glock.type != P9_LOCK_TYPE_UNLCK) { + fl->fl_start = glock.start; + if (glock.length == 0) + fl->fl_end = OFFSET_MAX; + else + fl->fl_end = glock.start + glock.length - 1; + fl->fl_pid = glock.proc_id; + } +out: + if (glock.client_id != fid->clnt->name) + kfree(glock.client_id); + return res; +} + +/** + * v9fs_file_lock_dotl - lock a file (or directory) + * @filp: file to be locked + * @cmd: lock command + * @fl: file lock structure + * + */ + +static int v9fs_file_lock_dotl(struct file *filp, int cmd, struct file_lock *fl) +{ + struct inode *inode = file_inode(filp); + int ret = -ENOLCK; + + p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n", + filp, cmd, fl, filp); + + /* No mandatory locks */ + if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) + goto out_err; + + if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { + filemap_write_and_wait(inode->i_mapping); + invalidate_mapping_pages(&inode->i_data, 0, -1); + } + + if (IS_SETLK(cmd) || IS_SETLKW(cmd)) + ret = v9fs_file_do_lock(filp, cmd, fl); + else if (IS_GETLK(cmd)) + ret = v9fs_file_getlock(filp, fl); + else + ret = -EINVAL; +out_err: + return ret; +} + +/** + * v9fs_file_flock_dotl - lock a file + * @filp: file to be locked + * @cmd: lock command + * @fl: file lock structure + * + */ + +static int v9fs_file_flock_dotl(struct file *filp, int cmd, + struct file_lock *fl) +{ + struct inode *inode = file_inode(filp); + int ret = -ENOLCK; + + p9_debug(P9_DEBUG_VFS, "filp: %p cmd:%d lock: %p name: %pD\n", + filp, cmd, fl, filp); + + /* No mandatory locks */ + if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) + goto out_err; + + if (!(fl->fl_flags & FL_FLOCK)) + goto out_err; + + if ((IS_SETLK(cmd) || IS_SETLKW(cmd)) && fl->fl_type != F_UNLCK) { + filemap_write_and_wait(inode->i_mapping); + invalidate_mapping_pages(&inode->i_data, 0, -1); + } + /* Convert flock to posix lock */ + fl->fl_flags |= FL_POSIX; + fl->fl_flags ^= FL_FLOCK; + + if (IS_SETLK(cmd) | IS_SETLKW(cmd)) + ret = v9fs_file_do_lock(filp, cmd, fl); + else + ret = -EINVAL; +out_err: + return ret; +} + +/** + * v9fs_file_read - read from a file + * @filp: file pointer to read + * @udata: user data buffer to read data into + * @count: size of buffer + * @offset: offset at which to read data + * + */ + +static ssize_t +v9fs_file_read_iter(struct kiocb *iocb, struct iov_iter *to) +{ + struct p9_fid *fid = iocb->ki_filp->private_data; + int ret, err = 0; + + p9_debug(P9_DEBUG_VFS, "count %zu offset %lld\n", + iov_iter_count(to), iocb->ki_pos); + + ret = p9_client_read(fid, iocb->ki_pos, to, &err); + if (!ret) + return err; + + iocb->ki_pos += ret; + return ret; +} + +/** + * v9fs_file_write - write to a file + * @filp: file pointer to write + * @data: data buffer to write data from + * @count: size of buffer + * @offset: offset at which to write data + * + */ +static ssize_t +v9fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + ssize_t retval; + loff_t origin; + int err = 0; + + retval = generic_write_checks(iocb, from); + if (retval <= 0) + return retval; + + origin = iocb->ki_pos; + retval = p9_client_write(file->private_data, iocb->ki_pos, from, &err); + if (retval > 0) { + struct inode *inode = file_inode(file); + loff_t i_size; + unsigned long pg_start, pg_end; + pg_start = origin >> PAGE_CACHE_SHIFT; + pg_end = (origin + retval - 1) >> PAGE_CACHE_SHIFT; + if (inode->i_mapping && inode->i_mapping->nrpages) + invalidate_inode_pages2_range(inode->i_mapping, + pg_start, pg_end); + iocb->ki_pos += retval; + i_size = i_size_read(inode); + if (iocb->ki_pos > i_size) { + inode_add_bytes(inode, iocb->ki_pos - i_size); + /* + * Need to serialize against i_size_write() in + * v9fs_stat2inode() + */ + v9fs_i_size_write(inode, iocb->ki_pos); + } + return retval; + } + return err; +} + +static int v9fs_file_fsync(struct file *filp, loff_t start, loff_t end, + int datasync) +{ + struct p9_fid *fid; + struct inode *inode = filp->f_mapping->host; + struct p9_wstat wstat; + int retval; + + retval = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (retval) + return retval; + + mutex_lock(&inode->i_mutex); + p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync); + + fid = filp->private_data; + v9fs_blank_wstat(&wstat); + + retval = p9_client_wstat(fid, &wstat); + mutex_unlock(&inode->i_mutex); + + return retval; +} + +int v9fs_file_fsync_dotl(struct file *filp, loff_t start, loff_t end, + int datasync) +{ + struct p9_fid *fid; + struct inode *inode = filp->f_mapping->host; + int retval; + + retval = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (retval) + return retval; + + mutex_lock(&inode->i_mutex); + p9_debug(P9_DEBUG_VFS, "filp %p datasync %x\n", filp, datasync); + + fid = filp->private_data; + + retval = p9_client_fsync(fid, datasync); + mutex_unlock(&inode->i_mutex); + + return retval; +} + +static int +v9fs_file_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int retval; + + + retval = generic_file_mmap(filp, vma); + if (!retval) + vma->vm_ops = &v9fs_file_vm_ops; + + return retval; +} + +static int +v9fs_mmap_file_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int retval; + struct inode *inode; + struct v9fs_inode *v9inode; + struct p9_fid *fid; + + inode = file_inode(filp); + v9inode = V9FS_I(inode); + mutex_lock(&v9inode->v_mutex); + if (!v9inode->writeback_fid && + (vma->vm_flags & VM_WRITE)) { + /* + * clone a fid and add it to writeback_fid + * we do it during mmap instead of + * page dirty time via write_begin/page_mkwrite + * because we want write after unlink usecase + * to work. + */ + fid = v9fs_writeback_fid(file_dentry(filp)); + if (IS_ERR(fid)) { + retval = PTR_ERR(fid); + mutex_unlock(&v9inode->v_mutex); + return retval; + } + v9inode->writeback_fid = (void *) fid; + } + mutex_unlock(&v9inode->v_mutex); + + retval = generic_file_mmap(filp, vma); + if (!retval) + vma->vm_ops = &v9fs_mmap_file_vm_ops; + + return retval; +} + +static int +v9fs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct v9fs_inode *v9inode; + struct page *page = vmf->page; + struct file *filp = vma->vm_file; + struct inode *inode = file_inode(filp); + + + p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n", + page, (unsigned long)filp->private_data); + + /* Update file times before taking page lock */ + file_update_time(filp); + + v9inode = V9FS_I(inode); + /* make sure the cache has finished storing the page */ + v9fs_fscache_wait_on_page_write(inode, page); + BUG_ON(!v9inode->writeback_fid); + lock_page(page); + if (page->mapping != inode->i_mapping) + goto out_unlock; + wait_for_stable_page(page); + + return VM_FAULT_LOCKED; +out_unlock: + unlock_page(page); + return VM_FAULT_NOPAGE; +} + +/** + * v9fs_mmap_file_read - read from a file + * @filp: file pointer to read + * @data: user data buffer to read data into + * @count: size of buffer + * @offset: offset at which to read data + * + */ +static ssize_t +v9fs_mmap_file_read_iter(struct kiocb *iocb, struct iov_iter *to) +{ + /* TODO: Check if there are dirty pages */ + return v9fs_file_read_iter(iocb, to); +} + +/** + * v9fs_mmap_file_write - write to a file + * @filp: file pointer to write + * @data: data buffer to write data from + * @count: size of buffer + * @offset: offset at which to write data + * + */ +static ssize_t +v9fs_mmap_file_write_iter(struct kiocb *iocb, struct iov_iter *from) +{ + /* + * TODO: invalidate mmaps on filp's inode between + * offset and offset+count + */ + return v9fs_file_write_iter(iocb, from); +} + +static void v9fs_mmap_vm_close(struct vm_area_struct *vma) +{ + struct inode *inode; + + struct writeback_control wbc = { + .nr_to_write = LONG_MAX, + .sync_mode = WB_SYNC_ALL, + .range_start = vma->vm_pgoff * PAGE_SIZE, + /* absolute end, byte at end included */ + .range_end = vma->vm_pgoff * PAGE_SIZE + + (vma->vm_end - vma->vm_start - 1), + }; + + + p9_debug(P9_DEBUG_VFS, "9p VMA close, %p, flushing", vma); + + inode = file_inode(vma->vm_file); + + if (!mapping_cap_writeback_dirty(inode->i_mapping)) + wbc.nr_to_write = 0; + + might_sleep(); + sync_inode(inode, &wbc); +} + + +static const struct vm_operations_struct v9fs_file_vm_ops = { + .fault = filemap_fault, + .map_pages = filemap_map_pages, + .page_mkwrite = v9fs_vm_page_mkwrite, +}; + +static const struct vm_operations_struct v9fs_mmap_file_vm_ops = { + .close = v9fs_mmap_vm_close, + .fault = filemap_fault, + .map_pages = filemap_map_pages, + .page_mkwrite = v9fs_vm_page_mkwrite, +}; + + +const struct file_operations v9fs_cached_file_operations = { + .llseek = generic_file_llseek, + .read_iter = generic_file_read_iter, + .write_iter = generic_file_write_iter, + .open = v9fs_file_open, + .release = v9fs_dir_release, + .lock = v9fs_file_lock, + .mmap = v9fs_file_mmap, + .fsync = v9fs_file_fsync, +}; + +const struct file_operations v9fs_cached_file_operations_dotl = { + .llseek = generic_file_llseek, + .read_iter = generic_file_read_iter, + .write_iter = generic_file_write_iter, + .open = v9fs_file_open, + .release = v9fs_dir_release, + .lock = v9fs_file_lock_dotl, + .flock = v9fs_file_flock_dotl, + .mmap = v9fs_file_mmap, + .fsync = v9fs_file_fsync_dotl, +}; + +const struct file_operations v9fs_file_operations = { + .llseek = generic_file_llseek, + .read_iter = v9fs_file_read_iter, + .write_iter = v9fs_file_write_iter, + .open = v9fs_file_open, + .release = v9fs_dir_release, + .lock = v9fs_file_lock, + .mmap = generic_file_readonly_mmap, + .fsync = v9fs_file_fsync, +}; + +const struct file_operations v9fs_file_operations_dotl = { + .llseek = generic_file_llseek, + .read_iter = v9fs_file_read_iter, + .write_iter = v9fs_file_write_iter, + .open = v9fs_file_open, + .release = v9fs_dir_release, + .lock = v9fs_file_lock_dotl, + .flock = v9fs_file_flock_dotl, + .mmap = generic_file_readonly_mmap, + .fsync = v9fs_file_fsync_dotl, +}; + +const struct file_operations v9fs_mmap_file_operations = { + .llseek = generic_file_llseek, + .read_iter = v9fs_mmap_file_read_iter, + .write_iter = v9fs_mmap_file_write_iter, + .open = v9fs_file_open, + .release = v9fs_dir_release, + .lock = v9fs_file_lock, + .mmap = v9fs_mmap_file_mmap, + .fsync = v9fs_file_fsync, +}; + +const struct file_operations v9fs_mmap_file_operations_dotl = { + .llseek = generic_file_llseek, + .read_iter = v9fs_mmap_file_read_iter, + .write_iter = v9fs_mmap_file_write_iter, + .open = v9fs_file_open, + .release = v9fs_dir_release, + .lock = v9fs_file_lock_dotl, + .flock = v9fs_file_flock_dotl, + .mmap = v9fs_mmap_file_mmap, + .fsync = v9fs_file_fsync_dotl, +}; diff --git a/addons/9p/src/4.4.180/vfs_inode.c b/addons/9p/src/4.4.180/vfs_inode.c new file mode 100644 index 00000000..2de1505a --- /dev/null +++ b/addons/9p/src/4.4.180/vfs_inode.c @@ -0,0 +1,1462 @@ +/* + * linux/fs/9p/vfs_inode.c + * + * This file contains vfs inode ops for the 9P2000 protocol. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" +#include "cache.h" +#include "xattr.h" +#include "acl.h" + +static const struct inode_operations v9fs_dir_inode_operations; +static const struct inode_operations v9fs_dir_inode_operations_dotu; +static const struct inode_operations v9fs_file_inode_operations; +static const struct inode_operations v9fs_symlink_inode_operations; + +/** + * unixmode2p9mode - convert unix mode bits to plan 9 + * @v9ses: v9fs session information + * @mode: mode to convert + * + */ + +static u32 unixmode2p9mode(struct v9fs_session_info *v9ses, umode_t mode) +{ + int res; + res = mode & 0777; + if (S_ISDIR(mode)) + res |= P9_DMDIR; + if (v9fs_proto_dotu(v9ses)) { + if (v9ses->nodev == 0) { + if (S_ISSOCK(mode)) + res |= P9_DMSOCKET; + if (S_ISFIFO(mode)) + res |= P9_DMNAMEDPIPE; + if (S_ISBLK(mode)) + res |= P9_DMDEVICE; + if (S_ISCHR(mode)) + res |= P9_DMDEVICE; + } + + if ((mode & S_ISUID) == S_ISUID) + res |= P9_DMSETUID; + if ((mode & S_ISGID) == S_ISGID) + res |= P9_DMSETGID; + if ((mode & S_ISVTX) == S_ISVTX) + res |= P9_DMSETVTX; + } + return res; +} + +/** + * p9mode2perm- convert plan9 mode bits to unix permission bits + * @v9ses: v9fs session information + * @stat: p9_wstat from which mode need to be derived + * + */ +static int p9mode2perm(struct v9fs_session_info *v9ses, + struct p9_wstat *stat) +{ + int res; + int mode = stat->mode; + + res = mode & S_IALLUGO; + if (v9fs_proto_dotu(v9ses)) { + if ((mode & P9_DMSETUID) == P9_DMSETUID) + res |= S_ISUID; + + if ((mode & P9_DMSETGID) == P9_DMSETGID) + res |= S_ISGID; + + if ((mode & P9_DMSETVTX) == P9_DMSETVTX) + res |= S_ISVTX; + } + return res; +} + +/** + * p9mode2unixmode- convert plan9 mode bits to unix mode bits + * @v9ses: v9fs session information + * @stat: p9_wstat from which mode need to be derived + * @rdev: major number, minor number in case of device files. + * + */ +static umode_t p9mode2unixmode(struct v9fs_session_info *v9ses, + struct p9_wstat *stat, dev_t *rdev) +{ + int res; + u32 mode = stat->mode; + + *rdev = 0; + res = p9mode2perm(v9ses, stat); + + if ((mode & P9_DMDIR) == P9_DMDIR) + res |= S_IFDIR; + else if ((mode & P9_DMSYMLINK) && (v9fs_proto_dotu(v9ses))) + res |= S_IFLNK; + else if ((mode & P9_DMSOCKET) && (v9fs_proto_dotu(v9ses)) + && (v9ses->nodev == 0)) + res |= S_IFSOCK; + else if ((mode & P9_DMNAMEDPIPE) && (v9fs_proto_dotu(v9ses)) + && (v9ses->nodev == 0)) + res |= S_IFIFO; + else if ((mode & P9_DMDEVICE) && (v9fs_proto_dotu(v9ses)) + && (v9ses->nodev == 0)) { + char type = 0, ext[32]; + int major = -1, minor = -1; + + strlcpy(ext, stat->extension, sizeof(ext)); + sscanf(ext, "%c %i %i", &type, &major, &minor); + switch (type) { + case 'c': + res |= S_IFCHR; + break; + case 'b': + res |= S_IFBLK; + break; + default: + p9_debug(P9_DEBUG_ERROR, "Unknown special type %c %s\n", + type, stat->extension); + }; + *rdev = MKDEV(major, minor); + } else + res |= S_IFREG; + + return res; +} + +/** + * v9fs_uflags2omode- convert posix open flags to plan 9 mode bits + * @uflags: flags to convert + * @extended: if .u extensions are active + */ + +int v9fs_uflags2omode(int uflags, int extended) +{ + int ret; + + ret = 0; + switch (uflags&3) { + default: + case O_RDONLY: + ret = P9_OREAD; + break; + + case O_WRONLY: + ret = P9_OWRITE; + break; + + case O_RDWR: + ret = P9_ORDWR; + break; + } + + if (extended) { + if (uflags & O_EXCL) + ret |= P9_OEXCL; + + if (uflags & O_APPEND) + ret |= P9_OAPPEND; + } + + return ret; +} + +/** + * v9fs_blank_wstat - helper function to setup a 9P stat structure + * @wstat: structure to initialize + * + */ + +void +v9fs_blank_wstat(struct p9_wstat *wstat) +{ + wstat->type = ~0; + wstat->dev = ~0; + wstat->qid.type = ~0; + wstat->qid.version = ~0; + *((long long *)&wstat->qid.path) = ~0; + wstat->mode = ~0; + wstat->atime = ~0; + wstat->mtime = ~0; + wstat->length = ~0; + wstat->name = NULL; + wstat->uid = NULL; + wstat->gid = NULL; + wstat->muid = NULL; + wstat->n_uid = INVALID_UID; + wstat->n_gid = INVALID_GID; + wstat->n_muid = INVALID_UID; + wstat->extension = NULL; +} + +/** + * v9fs_alloc_inode - helper function to allocate an inode + * + */ +struct inode *v9fs_alloc_inode(struct super_block *sb) +{ + struct v9fs_inode *v9inode; + v9inode = (struct v9fs_inode *)kmem_cache_alloc(v9fs_inode_cache, + GFP_KERNEL); + if (!v9inode) + return NULL; +#ifdef CONFIG_9P_FSCACHE + v9inode->fscache = NULL; + mutex_init(&v9inode->fscache_lock); +#endif + v9inode->writeback_fid = NULL; + v9inode->cache_validity = 0; + mutex_init(&v9inode->v_mutex); + return &v9inode->vfs_inode; +} + +/** + * v9fs_destroy_inode - destroy an inode + * + */ + +static void v9fs_i_callback(struct rcu_head *head) +{ + struct inode *inode = container_of(head, struct inode, i_rcu); + kmem_cache_free(v9fs_inode_cache, V9FS_I(inode)); +} + +void v9fs_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, v9fs_i_callback); +} + +int v9fs_init_inode(struct v9fs_session_info *v9ses, + struct inode *inode, umode_t mode, dev_t rdev) +{ + int err = 0; + + inode_init_owner(inode, NULL, mode); + inode->i_blocks = 0; + inode->i_rdev = rdev; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + inode->i_mapping->a_ops = &v9fs_addr_operations; + + switch (mode & S_IFMT) { + case S_IFIFO: + case S_IFBLK: + case S_IFCHR: + case S_IFSOCK: + if (v9fs_proto_dotl(v9ses)) { + inode->i_op = &v9fs_file_inode_operations_dotl; + } else if (v9fs_proto_dotu(v9ses)) { + inode->i_op = &v9fs_file_inode_operations; + } else { + p9_debug(P9_DEBUG_ERROR, + "special files without extended mode\n"); + err = -EINVAL; + goto error; + } + init_special_inode(inode, inode->i_mode, inode->i_rdev); + break; + case S_IFREG: + if (v9fs_proto_dotl(v9ses)) { + inode->i_op = &v9fs_file_inode_operations_dotl; + if (v9ses->cache == CACHE_LOOSE || + v9ses->cache == CACHE_FSCACHE) + inode->i_fop = + &v9fs_cached_file_operations_dotl; + else if (v9ses->cache == CACHE_MMAP) + inode->i_fop = &v9fs_mmap_file_operations_dotl; + else + inode->i_fop = &v9fs_file_operations_dotl; + } else { + inode->i_op = &v9fs_file_inode_operations; + if (v9ses->cache == CACHE_LOOSE || + v9ses->cache == CACHE_FSCACHE) + inode->i_fop = + &v9fs_cached_file_operations; + else if (v9ses->cache == CACHE_MMAP) + inode->i_fop = &v9fs_mmap_file_operations; + else + inode->i_fop = &v9fs_file_operations; + } + + break; + case S_IFLNK: + if (!v9fs_proto_dotu(v9ses) && !v9fs_proto_dotl(v9ses)) { + p9_debug(P9_DEBUG_ERROR, + "extended modes used with legacy protocol\n"); + err = -EINVAL; + goto error; + } + + if (v9fs_proto_dotl(v9ses)) + inode->i_op = &v9fs_symlink_inode_operations_dotl; + else + inode->i_op = &v9fs_symlink_inode_operations; + + break; + case S_IFDIR: + inc_nlink(inode); + if (v9fs_proto_dotl(v9ses)) + inode->i_op = &v9fs_dir_inode_operations_dotl; + else if (v9fs_proto_dotu(v9ses)) + inode->i_op = &v9fs_dir_inode_operations_dotu; + else + inode->i_op = &v9fs_dir_inode_operations; + + if (v9fs_proto_dotl(v9ses)) + inode->i_fop = &v9fs_dir_operations_dotl; + else + inode->i_fop = &v9fs_dir_operations; + + break; + default: + p9_debug(P9_DEBUG_ERROR, "BAD mode 0x%hx S_IFMT 0x%x\n", + mode, mode & S_IFMT); + err = -EINVAL; + goto error; + } +error: + return err; + +} + +/** + * v9fs_get_inode - helper function to setup an inode + * @sb: superblock + * @mode: mode to setup inode with + * + */ + +struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev) +{ + int err; + struct inode *inode; + struct v9fs_session_info *v9ses = sb->s_fs_info; + + p9_debug(P9_DEBUG_VFS, "super block: %p mode: %ho\n", sb, mode); + + inode = new_inode(sb); + if (!inode) { + pr_warn("%s (%d): Problem allocating inode\n", + __func__, task_pid_nr(current)); + return ERR_PTR(-ENOMEM); + } + err = v9fs_init_inode(v9ses, inode, mode, rdev); + if (err) { + iput(inode); + return ERR_PTR(err); + } + return inode; +} + +/* +static struct v9fs_fid* +v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry) +{ + int err; + int nfid; + struct v9fs_fid *ret; + struct v9fs_fcall *fcall; + + nfid = v9fs_get_idpool(&v9ses->fidpool); + if (nfid < 0) { + eprintk(KERN_WARNING, "no free fids available\n"); + return ERR_PTR(-ENOSPC); + } + + err = v9fs_t_walk(v9ses, fid, nfid, (char *) dentry->d_name.name, + &fcall); + + if (err < 0) { + if (fcall && fcall->id == RWALK) + goto clunk_fid; + + PRINT_FCALL_ERROR("walk error", fcall); + v9fs_put_idpool(nfid, &v9ses->fidpool); + goto error; + } + + kfree(fcall); + fcall = NULL; + ret = v9fs_fid_create(v9ses, nfid); + if (!ret) { + err = -ENOMEM; + goto clunk_fid; + } + + err = v9fs_fid_insert(ret, dentry); + if (err < 0) { + v9fs_fid_destroy(ret); + goto clunk_fid; + } + + return ret; + +clunk_fid: + v9fs_t_clunk(v9ses, nfid); + +error: + kfree(fcall); + return ERR_PTR(err); +} +*/ + + +/** + * v9fs_clear_inode - release an inode + * @inode: inode to release + * + */ +void v9fs_evict_inode(struct inode *inode) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + + truncate_inode_pages_final(&inode->i_data); + clear_inode(inode); + filemap_fdatawrite(&inode->i_data); + + v9fs_cache_inode_put_cookie(inode); + /* clunk the fid stashed in writeback_fid */ + if (v9inode->writeback_fid) { + p9_client_clunk(v9inode->writeback_fid); + v9inode->writeback_fid = NULL; + } +} + +static int v9fs_test_inode(struct inode *inode, void *data) +{ + int umode; + dev_t rdev; + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_wstat *st = (struct p9_wstat *)data; + struct v9fs_session_info *v9ses = v9fs_inode2v9ses(inode); + + umode = p9mode2unixmode(v9ses, st, &rdev); + /* don't match inode of different type */ + if ((inode->i_mode & S_IFMT) != (umode & S_IFMT)) + return 0; + + /* compare qid details */ + if (memcmp(&v9inode->qid.version, + &st->qid.version, sizeof(v9inode->qid.version))) + return 0; + + if (v9inode->qid.type != st->qid.type) + return 0; + + if (v9inode->qid.path != st->qid.path) + return 0; + return 1; +} + +static int v9fs_test_new_inode(struct inode *inode, void *data) +{ + return 0; +} + +static int v9fs_set_inode(struct inode *inode, void *data) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_wstat *st = (struct p9_wstat *)data; + + memcpy(&v9inode->qid, &st->qid, sizeof(st->qid)); + return 0; +} + +static struct inode *v9fs_qid_iget(struct super_block *sb, + struct p9_qid *qid, + struct p9_wstat *st, + int new) +{ + dev_t rdev; + int retval; + umode_t umode; + unsigned long i_ino; + struct inode *inode; + struct v9fs_session_info *v9ses = sb->s_fs_info; + int (*test)(struct inode *, void *); + + if (new) + test = v9fs_test_new_inode; + else + test = v9fs_test_inode; + + i_ino = v9fs_qid2ino(qid); + inode = iget5_locked(sb, i_ino, test, v9fs_set_inode, st); + if (!inode) + return ERR_PTR(-ENOMEM); + if (!(inode->i_state & I_NEW)) + return inode; + /* + * initialize the inode with the stat info + * FIXME!! we may need support for stale inodes + * later. + */ + inode->i_ino = i_ino; + umode = p9mode2unixmode(v9ses, st, &rdev); + retval = v9fs_init_inode(v9ses, inode, umode, rdev); + if (retval) + goto error; + + v9fs_stat2inode(st, inode, sb, 0); + v9fs_cache_inode_get_cookie(inode); + unlock_new_inode(inode); + return inode; +error: + iget_failed(inode); + return ERR_PTR(retval); + +} + +struct inode * +v9fs_inode_from_fid(struct v9fs_session_info *v9ses, struct p9_fid *fid, + struct super_block *sb, int new) +{ + struct p9_wstat *st; + struct inode *inode = NULL; + + st = p9_client_stat(fid); + if (IS_ERR(st)) + return ERR_CAST(st); + + inode = v9fs_qid_iget(sb, &st->qid, st, new); + p9stat_free(st); + kfree(st); + return inode; +} + +/** + * v9fs_at_to_dotl_flags- convert Linux specific AT flags to + * plan 9 AT flag. + * @flags: flags to convert + */ +static int v9fs_at_to_dotl_flags(int flags) +{ + int rflags = 0; + if (flags & AT_REMOVEDIR) + rflags |= P9_DOTL_AT_REMOVEDIR; + return rflags; +} + +/** + * v9fs_remove - helper function to remove files and directories + * @dir: directory inode that is being deleted + * @dentry: dentry that is being deleted + * @flags: removing a directory + * + */ + +static int v9fs_remove(struct inode *dir, struct dentry *dentry, int flags) +{ + struct inode *inode; + int retval = -EOPNOTSUPP; + struct p9_fid *v9fid, *dfid; + struct v9fs_session_info *v9ses; + + p9_debug(P9_DEBUG_VFS, "inode: %p dentry: %p rmdir: %x\n", + dir, dentry, flags); + + v9ses = v9fs_inode2v9ses(dir); + inode = d_inode(dentry); + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) { + retval = PTR_ERR(dfid); + p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", retval); + return retval; + } + if (v9fs_proto_dotl(v9ses)) + retval = p9_client_unlinkat(dfid, dentry->d_name.name, + v9fs_at_to_dotl_flags(flags)); + if (retval == -EOPNOTSUPP) { + /* Try the one based on path */ + v9fid = v9fs_fid_clone(dentry); + if (IS_ERR(v9fid)) + return PTR_ERR(v9fid); + retval = p9_client_remove(v9fid); + } + if (!retval) { + /* + * directories on unlink should have zero + * link count + */ + if (flags & AT_REMOVEDIR) { + clear_nlink(inode); + drop_nlink(dir); + } else + drop_nlink(inode); + + v9fs_invalidate_inode_attr(inode); + v9fs_invalidate_inode_attr(dir); + } + return retval; +} + +/** + * v9fs_create - Create a file + * @v9ses: session information + * @dir: directory that dentry is being created in + * @dentry: dentry that is being created + * @extension: 9p2000.u extension string to support devices, etc. + * @perm: create permissions + * @mode: open mode + * + */ +static struct p9_fid * +v9fs_create(struct v9fs_session_info *v9ses, struct inode *dir, + struct dentry *dentry, char *extension, u32 perm, u8 mode) +{ + int err; + char *name; + struct p9_fid *dfid, *ofid, *fid; + struct inode *inode; + + p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry); + + err = 0; + ofid = NULL; + fid = NULL; + name = (char *) dentry->d_name.name; + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + return ERR_PTR(err); + } + + /* clone a fid to use for creation */ + ofid = p9_client_walk(dfid, 0, NULL, 1); + if (IS_ERR(ofid)) { + err = PTR_ERR(ofid); + p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); + return ERR_PTR(err); + } + + err = p9_client_fcreate(ofid, name, perm, mode, extension); + if (err < 0) { + p9_debug(P9_DEBUG_VFS, "p9_client_fcreate failed %d\n", err); + goto error; + } + + if (!(perm & P9_DMLINK)) { + /* now walk from the parent so we can get unopened fid */ + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + p9_debug(P9_DEBUG_VFS, + "p9_client_walk failed %d\n", err); + fid = NULL; + goto error; + } + /* + * instantiate inode and assign the unopened fid to the dentry + */ + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + p9_debug(P9_DEBUG_VFS, + "inode creation failed %d\n", err); + goto error; + } + v9fs_fid_add(dentry, fid); + d_instantiate(dentry, inode); + } + return ofid; +error: + if (ofid) + p9_client_clunk(ofid); + + if (fid) + p9_client_clunk(fid); + + return ERR_PTR(err); +} + +/** + * v9fs_vfs_create - VFS hook to create a regular file + * + * open(.., O_CREAT) is handled in v9fs_vfs_atomic_open(). This is only called + * for mknod(2). + * + * @dir: directory inode that is being created + * @dentry: dentry that is being deleted + * @mode: create permissions + * + */ + +static int +v9fs_vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, + bool excl) +{ + struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); + u32 perm = unixmode2p9mode(v9ses, mode); + struct p9_fid *fid; + + /* P9_OEXCL? */ + fid = v9fs_create(v9ses, dir, dentry, NULL, perm, P9_ORDWR); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + v9fs_invalidate_inode_attr(dir); + p9_client_clunk(fid); + + return 0; +} + +/** + * v9fs_vfs_mkdir - VFS mkdir hook to create a directory + * @dir: inode that is being unlinked + * @dentry: dentry that is being unlinked + * @mode: mode for new directory + * + */ + +static int v9fs_vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) +{ + int err; + u32 perm; + struct p9_fid *fid; + struct v9fs_session_info *v9ses; + + p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry); + err = 0; + v9ses = v9fs_inode2v9ses(dir); + perm = unixmode2p9mode(v9ses, mode | S_IFDIR); + fid = v9fs_create(v9ses, dir, dentry, NULL, perm, P9_OREAD); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + fid = NULL; + } else { + inc_nlink(dir); + v9fs_invalidate_inode_attr(dir); + } + + if (fid) + p9_client_clunk(fid); + + return err; +} + +/** + * v9fs_vfs_lookup - VFS lookup hook to "walk" to a new inode + * @dir: inode that is being walked from + * @dentry: dentry that is being walked to? + * @flags: lookup flags (unused) + * + */ + +struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, + unsigned int flags) +{ + struct dentry *res; + struct v9fs_session_info *v9ses; + struct p9_fid *dfid, *fid; + struct inode *inode; + char *name; + + p9_debug(P9_DEBUG_VFS, "dir: %p dentry: (%pd) %p flags: %x\n", + dir, dentry, dentry, flags); + + if (dentry->d_name.len > NAME_MAX) + return ERR_PTR(-ENAMETOOLONG); + + v9ses = v9fs_inode2v9ses(dir); + /* We can walk d_parent because we hold the dir->i_mutex */ + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) + return ERR_CAST(dfid); + + name = (char *) dentry->d_name.name; + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + if (fid == ERR_PTR(-ENOENT)) { + d_add(dentry, NULL); + return NULL; + } + return ERR_CAST(fid); + } + /* + * Make sure we don't use a wrong inode due to parallel + * unlink. For cached mode create calls request for new + * inode. But with cache disabled, lookup should do this. + */ + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) + inode = v9fs_get_inode_from_fid(v9ses, fid, dir->i_sb); + else + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + p9_client_clunk(fid); + return ERR_CAST(inode); + } + /* + * If we had a rename on the server and a parallel lookup + * for the new name, then make sure we instantiate with + * the new name. ie look up for a/b, while on server somebody + * moved b under k and client parallely did a lookup for + * k/b. + */ + res = d_splice_alias(inode, dentry); + if (!res) + v9fs_fid_add(dentry, fid); + else if (!IS_ERR(res)) + v9fs_fid_add(res, fid); + else + p9_client_clunk(fid); + return res; +} + +static int +v9fs_vfs_atomic_open(struct inode *dir, struct dentry *dentry, + struct file *file, unsigned flags, umode_t mode, + int *opened) +{ + int err; + u32 perm; + struct v9fs_inode *v9inode; + struct v9fs_session_info *v9ses; + struct p9_fid *fid, *inode_fid; + struct dentry *res = NULL; + + if (d_unhashed(dentry)) { + res = v9fs_vfs_lookup(dir, dentry, 0); + if (IS_ERR(res)) + return PTR_ERR(res); + + if (res) + dentry = res; + } + + /* Only creates */ + if (!(flags & O_CREAT) || d_really_is_positive(dentry)) + return finish_no_open(file, res); + + err = 0; + + v9ses = v9fs_inode2v9ses(dir); + perm = unixmode2p9mode(v9ses, mode); + fid = v9fs_create(v9ses, dir, dentry, NULL, perm, + v9fs_uflags2omode(flags, + v9fs_proto_dotu(v9ses))); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + fid = NULL; + goto error; + } + + v9fs_invalidate_inode_attr(dir); + v9inode = V9FS_I(d_inode(dentry)); + mutex_lock(&v9inode->v_mutex); + if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) && + !v9inode->writeback_fid && + ((flags & O_ACCMODE) != O_RDONLY)) { + /* + * clone a fid and add it to writeback_fid + * we do it during open time instead of + * page dirty time via write_begin/page_mkwrite + * because we want write after unlink usecase + * to work. + */ + inode_fid = v9fs_writeback_fid(dentry); + if (IS_ERR(inode_fid)) { + err = PTR_ERR(inode_fid); + mutex_unlock(&v9inode->v_mutex); + goto error; + } + v9inode->writeback_fid = (void *) inode_fid; + } + mutex_unlock(&v9inode->v_mutex); + err = finish_open(file, dentry, generic_file_open, opened); + if (err) + goto error; + + file->private_data = fid; + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) + v9fs_cache_inode_set_cookie(d_inode(dentry), file); + + *opened |= FILE_CREATED; +out: + dput(res); + return err; + +error: + if (fid) + p9_client_clunk(fid); + goto out; +} + +/** + * v9fs_vfs_unlink - VFS unlink hook to delete an inode + * @i: inode that is being unlinked + * @d: dentry that is being unlinked + * + */ + +int v9fs_vfs_unlink(struct inode *i, struct dentry *d) +{ + return v9fs_remove(i, d, 0); +} + +/** + * v9fs_vfs_rmdir - VFS unlink hook to delete a directory + * @i: inode that is being unlinked + * @d: dentry that is being unlinked + * + */ + +int v9fs_vfs_rmdir(struct inode *i, struct dentry *d) +{ + return v9fs_remove(i, d, AT_REMOVEDIR); +} + +/** + * v9fs_vfs_rename - VFS hook to rename an inode + * @old_dir: old dir inode + * @old_dentry: old dentry + * @new_dir: new dir inode + * @new_dentry: new dentry + * + */ + +int +v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, + struct inode *new_dir, struct dentry *new_dentry) +{ + int retval; + struct inode *old_inode; + struct inode *new_inode; + struct v9fs_session_info *v9ses; + struct p9_fid *oldfid; + struct p9_fid *olddirfid; + struct p9_fid *newdirfid; + struct p9_wstat wstat; + + p9_debug(P9_DEBUG_VFS, "\n"); + retval = 0; + old_inode = d_inode(old_dentry); + new_inode = d_inode(new_dentry); + v9ses = v9fs_inode2v9ses(old_inode); + oldfid = v9fs_fid_lookup(old_dentry); + if (IS_ERR(oldfid)) + return PTR_ERR(oldfid); + + olddirfid = v9fs_fid_clone(old_dentry->d_parent); + if (IS_ERR(olddirfid)) { + retval = PTR_ERR(olddirfid); + goto done; + } + + newdirfid = v9fs_fid_clone(new_dentry->d_parent); + if (IS_ERR(newdirfid)) { + retval = PTR_ERR(newdirfid); + goto clunk_olddir; + } + + down_write(&v9ses->rename_sem); + if (v9fs_proto_dotl(v9ses)) { + retval = p9_client_renameat(olddirfid, old_dentry->d_name.name, + newdirfid, new_dentry->d_name.name); + if (retval == -EOPNOTSUPP) + retval = p9_client_rename(oldfid, newdirfid, + new_dentry->d_name.name); + if (retval != -EOPNOTSUPP) + goto clunk_newdir; + } + if (old_dentry->d_parent != new_dentry->d_parent) { + /* + * 9P .u can only handle file rename in the same directory + */ + + p9_debug(P9_DEBUG_ERROR, "old dir and new dir are different\n"); + retval = -EXDEV; + goto clunk_newdir; + } + v9fs_blank_wstat(&wstat); + wstat.muid = v9ses->uname; + wstat.name = (char *) new_dentry->d_name.name; + retval = p9_client_wstat(oldfid, &wstat); + +clunk_newdir: + if (!retval) { + if (new_inode) { + if (S_ISDIR(new_inode->i_mode)) + clear_nlink(new_inode); + else + drop_nlink(new_inode); + } + if (S_ISDIR(old_inode->i_mode)) { + if (!new_inode) + inc_nlink(new_dir); + drop_nlink(old_dir); + } + v9fs_invalidate_inode_attr(old_inode); + v9fs_invalidate_inode_attr(old_dir); + v9fs_invalidate_inode_attr(new_dir); + + /* successful rename */ + d_move(old_dentry, new_dentry); + } + up_write(&v9ses->rename_sem); + p9_client_clunk(newdirfid); + +clunk_olddir: + p9_client_clunk(olddirfid); + +done: + return retval; +} + +/** + * v9fs_vfs_getattr - retrieve file metadata + * @mnt: mount information + * @dentry: file to get attributes on + * @stat: metadata structure to populate + * + */ + +static int +v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat) +{ + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_wstat *st; + + p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry); + v9ses = v9fs_dentry2v9ses(dentry); + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + generic_fillattr(d_inode(dentry), stat); + return 0; + } + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + st = p9_client_stat(fid); + if (IS_ERR(st)) + return PTR_ERR(st); + + v9fs_stat2inode(st, d_inode(dentry), d_inode(dentry)->i_sb, 0); + generic_fillattr(d_inode(dentry), stat); + + p9stat_free(st); + kfree(st); + return 0; +} + +/** + * v9fs_vfs_setattr - set file metadata + * @dentry: file whose metadata to set + * @iattr: metadata assignment structure + * + */ + +static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) +{ + int retval; + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_wstat wstat; + + p9_debug(P9_DEBUG_VFS, "\n"); + retval = inode_change_ok(d_inode(dentry), iattr); + if (retval) + return retval; + + retval = -EPERM; + v9ses = v9fs_dentry2v9ses(dentry); + fid = v9fs_fid_lookup(dentry); + if(IS_ERR(fid)) + return PTR_ERR(fid); + + v9fs_blank_wstat(&wstat); + if (iattr->ia_valid & ATTR_MODE) + wstat.mode = unixmode2p9mode(v9ses, iattr->ia_mode); + + if (iattr->ia_valid & ATTR_MTIME) + wstat.mtime = iattr->ia_mtime.tv_sec; + + if (iattr->ia_valid & ATTR_ATIME) + wstat.atime = iattr->ia_atime.tv_sec; + + if (iattr->ia_valid & ATTR_SIZE) + wstat.length = iattr->ia_size; + + if (v9fs_proto_dotu(v9ses)) { + if (iattr->ia_valid & ATTR_UID) + wstat.n_uid = iattr->ia_uid; + + if (iattr->ia_valid & ATTR_GID) + wstat.n_gid = iattr->ia_gid; + } + + /* Write all dirty data */ + if (d_is_reg(dentry)) + filemap_write_and_wait(d_inode(dentry)->i_mapping); + + retval = p9_client_wstat(fid, &wstat); + if (retval < 0) + return retval; + + if ((iattr->ia_valid & ATTR_SIZE) && + iattr->ia_size != i_size_read(d_inode(dentry))) + truncate_setsize(d_inode(dentry), iattr->ia_size); + + v9fs_invalidate_inode_attr(d_inode(dentry)); + + setattr_copy(d_inode(dentry), iattr); + mark_inode_dirty(d_inode(dentry)); + return 0; +} + +/** + * v9fs_stat2inode - populate an inode structure with mistat info + * @stat: Plan 9 metadata (mistat) structure + * @inode: inode to populate + * @sb: superblock of filesystem + * @flags: control flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE) + * + */ + +void +v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode, + struct super_block *sb, unsigned int flags) +{ + umode_t mode; + char ext[32]; + char tag_name[14]; + unsigned int i_nlink; + struct v9fs_session_info *v9ses = sb->s_fs_info; + struct v9fs_inode *v9inode = V9FS_I(inode); + + set_nlink(inode, 1); + + inode->i_atime.tv_sec = stat->atime; + inode->i_mtime.tv_sec = stat->mtime; + inode->i_ctime.tv_sec = stat->mtime; + + inode->i_uid = v9ses->dfltuid; + inode->i_gid = v9ses->dfltgid; + + if (v9fs_proto_dotu(v9ses)) { + inode->i_uid = stat->n_uid; + inode->i_gid = stat->n_gid; + } + if ((S_ISREG(inode->i_mode)) || (S_ISDIR(inode->i_mode))) { + if (v9fs_proto_dotu(v9ses) && (stat->extension[0] != '\0')) { + /* + * Hadlink support got added later to + * to the .u extension. So there can be + * server out there that doesn't support + * this even with .u extension. So check + * for non NULL stat->extension + */ + strlcpy(ext, stat->extension, sizeof(ext)); + /* HARDLINKCOUNT %u */ + sscanf(ext, "%13s %u", tag_name, &i_nlink); + if (!strncmp(tag_name, "HARDLINKCOUNT", 13)) + set_nlink(inode, i_nlink); + } + } + mode = p9mode2perm(v9ses, stat); + mode |= inode->i_mode & ~S_IALLUGO; + inode->i_mode = mode; + + if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE)) + v9fs_i_size_write(inode, stat->length); + /* not real number of blocks, but 512 byte ones ... */ + inode->i_blocks = (stat->length + 512 - 1) >> 9; + v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR; +} + +/** + * v9fs_qid2ino - convert qid into inode number + * @qid: qid to hash + * + * BUG: potential for inode number collisions? + */ + +ino_t v9fs_qid2ino(struct p9_qid *qid) +{ + u64 path = qid->path + 2; + ino_t i = 0; + + if (sizeof(ino_t) == sizeof(path)) + memcpy(&i, &path, sizeof(ino_t)); + else + i = (ino_t) (path ^ (path >> 32)); + + return i; +} + +/** + * v9fs_vfs_follow_link - follow a symlink path + * @dentry: dentry for symlink + * @cookie: place to pass the data to put_link() + */ + +static const char *v9fs_vfs_follow_link(struct dentry *dentry, void **cookie) +{ + struct v9fs_session_info *v9ses = v9fs_dentry2v9ses(dentry); + struct p9_fid *fid = v9fs_fid_lookup(dentry); + struct p9_wstat *st; + char *res; + + p9_debug(P9_DEBUG_VFS, "%pd\n", dentry); + + if (IS_ERR(fid)) + return ERR_CAST(fid); + + if (!v9fs_proto_dotu(v9ses)) + return ERR_PTR(-EBADF); + + st = p9_client_stat(fid); + if (IS_ERR(st)) + return ERR_CAST(st); + + if (!(st->mode & P9_DMSYMLINK)) { + p9stat_free(st); + kfree(st); + return ERR_PTR(-EINVAL); + } + res = st->extension; + st->extension = NULL; + if (strlen(res) >= PATH_MAX) + res[PATH_MAX - 1] = '\0'; + + p9stat_free(st); + kfree(st); + return *cookie = res; +} + +/** + * v9fs_vfs_mkspecial - create a special file + * @dir: inode to create special file in + * @dentry: dentry to create + * @perm: mode to create special file + * @extension: 9p2000.u format extension string representing special file + * + */ + +static int v9fs_vfs_mkspecial(struct inode *dir, struct dentry *dentry, + u32 perm, const char *extension) +{ + struct p9_fid *fid; + struct v9fs_session_info *v9ses; + + v9ses = v9fs_inode2v9ses(dir); + if (!v9fs_proto_dotu(v9ses)) { + p9_debug(P9_DEBUG_ERROR, "not extended\n"); + return -EPERM; + } + + fid = v9fs_create(v9ses, dir, dentry, (char *) extension, perm, + P9_OREAD); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + v9fs_invalidate_inode_attr(dir); + p9_client_clunk(fid); + return 0; +} + +/** + * v9fs_vfs_symlink - helper function to create symlinks + * @dir: directory inode containing symlink + * @dentry: dentry for symlink + * @symname: symlink data + * + * See Also: 9P2000.u RFC for more information + * + */ + +static int +v9fs_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) +{ + p9_debug(P9_DEBUG_VFS, " %lu,%pd,%s\n", + dir->i_ino, dentry, symname); + + return v9fs_vfs_mkspecial(dir, dentry, P9_DMSYMLINK, symname); +} + +#define U32_MAX_DIGITS 10 + +/** + * v9fs_vfs_link - create a hardlink + * @old_dentry: dentry for file to link to + * @dir: inode destination for new link + * @dentry: dentry for link + * + */ + +static int +v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir, + struct dentry *dentry) +{ + int retval; + char name[1 + U32_MAX_DIGITS + 2]; /* sign + number + \n + \0 */ + struct p9_fid *oldfid; + + p9_debug(P9_DEBUG_VFS, " %lu,%pd,%pd\n", + dir->i_ino, dentry, old_dentry); + + oldfid = v9fs_fid_clone(old_dentry); + if (IS_ERR(oldfid)) + return PTR_ERR(oldfid); + + sprintf(name, "%d\n", oldfid->fid); + retval = v9fs_vfs_mkspecial(dir, dentry, P9_DMLINK, name); + if (!retval) { + v9fs_refresh_inode(oldfid, d_inode(old_dentry)); + v9fs_invalidate_inode_attr(dir); + } + p9_client_clunk(oldfid); + return retval; +} + +/** + * v9fs_vfs_mknod - create a special file + * @dir: inode destination for new link + * @dentry: dentry for file + * @mode: mode for creation + * @rdev: device associated with special file + * + */ + +static int +v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) +{ + struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir); + int retval; + char name[2 + U32_MAX_DIGITS + 1 + U32_MAX_DIGITS + 1]; + u32 perm; + + p9_debug(P9_DEBUG_VFS, " %lu,%pd mode: %hx MAJOR: %u MINOR: %u\n", + dir->i_ino, dentry, mode, + MAJOR(rdev), MINOR(rdev)); + + /* build extension */ + if (S_ISBLK(mode)) + sprintf(name, "b %u %u", MAJOR(rdev), MINOR(rdev)); + else if (S_ISCHR(mode)) + sprintf(name, "c %u %u", MAJOR(rdev), MINOR(rdev)); + else + *name = 0; + + perm = unixmode2p9mode(v9ses, mode); + retval = v9fs_vfs_mkspecial(dir, dentry, perm, name); + + return retval; +} + +int v9fs_refresh_inode(struct p9_fid *fid, struct inode *inode) +{ + int umode; + dev_t rdev; + struct p9_wstat *st; + struct v9fs_session_info *v9ses; + unsigned int flags; + + v9ses = v9fs_inode2v9ses(inode); + st = p9_client_stat(fid); + if (IS_ERR(st)) + return PTR_ERR(st); + /* + * Don't update inode if the file type is different + */ + umode = p9mode2unixmode(v9ses, st, &rdev); + if ((inode->i_mode & S_IFMT) != (umode & S_IFMT)) + goto out; + + /* + * We don't want to refresh inode->i_size, + * because we may have cached data + */ + flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ? + V9FS_STAT2INODE_KEEP_ISIZE : 0; + v9fs_stat2inode(st, inode, inode->i_sb, flags); +out: + p9stat_free(st); + kfree(st); + return 0; +} + +static const struct inode_operations v9fs_dir_inode_operations_dotu = { + .create = v9fs_vfs_create, + .lookup = v9fs_vfs_lookup, + .atomic_open = v9fs_vfs_atomic_open, + .symlink = v9fs_vfs_symlink, + .link = v9fs_vfs_link, + .unlink = v9fs_vfs_unlink, + .mkdir = v9fs_vfs_mkdir, + .rmdir = v9fs_vfs_rmdir, + .mknod = v9fs_vfs_mknod, + .rename = v9fs_vfs_rename, + .getattr = v9fs_vfs_getattr, + .setattr = v9fs_vfs_setattr, +}; + +static const struct inode_operations v9fs_dir_inode_operations = { + .create = v9fs_vfs_create, + .lookup = v9fs_vfs_lookup, + .atomic_open = v9fs_vfs_atomic_open, + .unlink = v9fs_vfs_unlink, + .mkdir = v9fs_vfs_mkdir, + .rmdir = v9fs_vfs_rmdir, + .mknod = v9fs_vfs_mknod, + .rename = v9fs_vfs_rename, + .getattr = v9fs_vfs_getattr, + .setattr = v9fs_vfs_setattr, +}; + +static const struct inode_operations v9fs_file_inode_operations = { + .getattr = v9fs_vfs_getattr, + .setattr = v9fs_vfs_setattr, +}; + +static const struct inode_operations v9fs_symlink_inode_operations = { + .readlink = generic_readlink, + .follow_link = v9fs_vfs_follow_link, + .put_link = kfree_put_link, + .getattr = v9fs_vfs_getattr, + .setattr = v9fs_vfs_setattr, +}; + diff --git a/addons/9p/src/4.4.180/vfs_inode_dotl.c b/addons/9p/src/4.4.180/vfs_inode_dotl.c new file mode 100644 index 00000000..7ae67fcc --- /dev/null +++ b/addons/9p/src/4.4.180/vfs_inode_dotl.c @@ -0,0 +1,999 @@ +/* + * linux/fs/9p/vfs_inode_dotl.c + * + * This file contains vfs inode ops for the 9P2000.L protocol. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" +#include "cache.h" +#include "xattr.h" +#include "acl.h" + +static int +v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode, + dev_t rdev); + +/** + * v9fs_get_fsgid_for_create - Helper function to get the gid for creating a + * new file system object. This checks the S_ISGID to determine the owning + * group of the new file system object. + */ + +static kgid_t v9fs_get_fsgid_for_create(struct inode *dir_inode) +{ + BUG_ON(dir_inode == NULL); + + if (dir_inode->i_mode & S_ISGID) { + /* set_gid bit is set.*/ + return dir_inode->i_gid; + } + return current_fsgid(); +} + +static int v9fs_test_inode_dotl(struct inode *inode, void *data) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_stat_dotl *st = (struct p9_stat_dotl *)data; + + /* don't match inode of different type */ + if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT)) + return 0; + + if (inode->i_generation != st->st_gen) + return 0; + + /* compare qid details */ + if (memcmp(&v9inode->qid.version, + &st->qid.version, sizeof(v9inode->qid.version))) + return 0; + + if (v9inode->qid.type != st->qid.type) + return 0; + + if (v9inode->qid.path != st->qid.path) + return 0; + return 1; +} + +/* Always get a new inode */ +static int v9fs_test_new_inode_dotl(struct inode *inode, void *data) +{ + return 0; +} + +static int v9fs_set_inode_dotl(struct inode *inode, void *data) +{ + struct v9fs_inode *v9inode = V9FS_I(inode); + struct p9_stat_dotl *st = (struct p9_stat_dotl *)data; + + memcpy(&v9inode->qid, &st->qid, sizeof(st->qid)); + inode->i_generation = st->st_gen; + return 0; +} + +static struct inode *v9fs_qid_iget_dotl(struct super_block *sb, + struct p9_qid *qid, + struct p9_fid *fid, + struct p9_stat_dotl *st, + int new) +{ + int retval; + unsigned long i_ino; + struct inode *inode; + struct v9fs_session_info *v9ses = sb->s_fs_info; + int (*test)(struct inode *, void *); + + if (new) + test = v9fs_test_new_inode_dotl; + else + test = v9fs_test_inode_dotl; + + i_ino = v9fs_qid2ino(qid); + inode = iget5_locked(sb, i_ino, test, v9fs_set_inode_dotl, st); + if (!inode) + return ERR_PTR(-ENOMEM); + if (!(inode->i_state & I_NEW)) + return inode; + /* + * initialize the inode with the stat info + * FIXME!! we may need support for stale inodes + * later. + */ + inode->i_ino = i_ino; + retval = v9fs_init_inode(v9ses, inode, + st->st_mode, new_decode_dev(st->st_rdev)); + if (retval) + goto error; + + v9fs_stat2inode_dotl(st, inode, 0); + v9fs_cache_inode_get_cookie(inode); + retval = v9fs_get_acl(inode, fid); + if (retval) + goto error; + + unlock_new_inode(inode); + return inode; +error: + iget_failed(inode); + return ERR_PTR(retval); + +} + +struct inode * +v9fs_inode_from_fid_dotl(struct v9fs_session_info *v9ses, struct p9_fid *fid, + struct super_block *sb, int new) +{ + struct p9_stat_dotl *st; + struct inode *inode = NULL; + + st = p9_client_getattr_dotl(fid, P9_STATS_BASIC | P9_STATS_GEN); + if (IS_ERR(st)) + return ERR_CAST(st); + + inode = v9fs_qid_iget_dotl(sb, &st->qid, fid, st, new); + kfree(st); + return inode; +} + +struct dotl_openflag_map { + int open_flag; + int dotl_flag; +}; + +static int v9fs_mapped_dotl_flags(int flags) +{ + int i; + int rflags = 0; + struct dotl_openflag_map dotl_oflag_map[] = { + { O_CREAT, P9_DOTL_CREATE }, + { O_EXCL, P9_DOTL_EXCL }, + { O_NOCTTY, P9_DOTL_NOCTTY }, + { O_APPEND, P9_DOTL_APPEND }, + { O_NONBLOCK, P9_DOTL_NONBLOCK }, + { O_DSYNC, P9_DOTL_DSYNC }, + { FASYNC, P9_DOTL_FASYNC }, + { O_DIRECT, P9_DOTL_DIRECT }, + { O_LARGEFILE, P9_DOTL_LARGEFILE }, + { O_DIRECTORY, P9_DOTL_DIRECTORY }, + { O_NOFOLLOW, P9_DOTL_NOFOLLOW }, + { O_NOATIME, P9_DOTL_NOATIME }, + { O_CLOEXEC, P9_DOTL_CLOEXEC }, + { O_SYNC, P9_DOTL_SYNC}, + }; + for (i = 0; i < ARRAY_SIZE(dotl_oflag_map); i++) { + if (flags & dotl_oflag_map[i].open_flag) + rflags |= dotl_oflag_map[i].dotl_flag; + } + return rflags; +} + +/** + * v9fs_open_to_dotl_flags- convert Linux specific open flags to + * plan 9 open flag. + * @flags: flags to convert + */ +int v9fs_open_to_dotl_flags(int flags) +{ + int rflags = 0; + + /* + * We have same bits for P9_DOTL_READONLY, P9_DOTL_WRONLY + * and P9_DOTL_NOACCESS + */ + rflags |= flags & O_ACCMODE; + rflags |= v9fs_mapped_dotl_flags(flags); + + return rflags; +} + +/** + * v9fs_vfs_create_dotl - VFS hook to create files for 9P2000.L protocol. + * @dir: directory inode that is being created + * @dentry: dentry that is being deleted + * @omode: create permissions + * + */ + +static int +v9fs_vfs_create_dotl(struct inode *dir, struct dentry *dentry, umode_t omode, + bool excl) +{ + return v9fs_vfs_mknod_dotl(dir, dentry, omode, 0); +} + +static int +v9fs_vfs_atomic_open_dotl(struct inode *dir, struct dentry *dentry, + struct file *file, unsigned flags, umode_t omode, + int *opened) +{ + int err = 0; + kgid_t gid; + umode_t mode; + char *name = NULL; + struct p9_qid qid; + struct inode *inode; + struct p9_fid *fid = NULL; + struct v9fs_inode *v9inode; + struct p9_fid *dfid, *ofid, *inode_fid; + struct v9fs_session_info *v9ses; + struct posix_acl *pacl = NULL, *dacl = NULL; + struct dentry *res = NULL; + + if (d_unhashed(dentry)) { + res = v9fs_vfs_lookup(dir, dentry, 0); + if (IS_ERR(res)) + return PTR_ERR(res); + + if (res) + dentry = res; + } + + /* Only creates */ + if (!(flags & O_CREAT) || d_really_is_positive(dentry)) + return finish_no_open(file, res); + + v9ses = v9fs_inode2v9ses(dir); + + name = (char *) dentry->d_name.name; + p9_debug(P9_DEBUG_VFS, "name:%s flags:0x%x mode:0x%hx\n", + name, flags, omode); + + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + goto out; + } + + /* clone a fid to use for creation */ + ofid = p9_client_walk(dfid, 0, NULL, 1); + if (IS_ERR(ofid)) { + err = PTR_ERR(ofid); + p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); + goto out; + } + + gid = v9fs_get_fsgid_for_create(dir); + + mode = omode; + /* Update mode based on ACL value */ + err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); + if (err) { + p9_debug(P9_DEBUG_VFS, "Failed to get acl values in creat %d\n", + err); + goto error; + } + err = p9_client_create_dotl(ofid, name, v9fs_open_to_dotl_flags(flags), + mode, gid, &qid); + if (err < 0) { + p9_debug(P9_DEBUG_VFS, "p9_client_open_dotl failed in creat %d\n", + err); + goto error; + } + v9fs_invalidate_inode_attr(dir); + + /* instantiate inode and assign the unopened fid to the dentry */ + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", err); + fid = NULL; + goto error; + } + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", err); + goto error; + } + /* Now set the ACL based on the default value */ + v9fs_set_create_acl(inode, fid, dacl, pacl); + + v9fs_fid_add(dentry, fid); + d_instantiate(dentry, inode); + + v9inode = V9FS_I(inode); + mutex_lock(&v9inode->v_mutex); + if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) && + !v9inode->writeback_fid && + ((flags & O_ACCMODE) != O_RDONLY)) { + /* + * clone a fid and add it to writeback_fid + * we do it during open time instead of + * page dirty time via write_begin/page_mkwrite + * because we want write after unlink usecase + * to work. + */ + inode_fid = v9fs_writeback_fid(dentry); + if (IS_ERR(inode_fid)) { + err = PTR_ERR(inode_fid); + mutex_unlock(&v9inode->v_mutex); + goto err_clunk_old_fid; + } + v9inode->writeback_fid = (void *) inode_fid; + } + mutex_unlock(&v9inode->v_mutex); + /* Since we are opening a file, assign the open fid to the file */ + err = finish_open(file, dentry, generic_file_open, opened); + if (err) + goto err_clunk_old_fid; + file->private_data = ofid; + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) + v9fs_cache_inode_set_cookie(inode, file); + *opened |= FILE_CREATED; +out: + v9fs_put_acl(dacl, pacl); + dput(res); + return err; + +error: + if (fid) + p9_client_clunk(fid); +err_clunk_old_fid: + if (ofid) + p9_client_clunk(ofid); + goto out; +} + +/** + * v9fs_vfs_mkdir_dotl - VFS mkdir hook to create a directory + * @dir: inode that is being unlinked + * @dentry: dentry that is being unlinked + * @omode: mode for new directory + * + */ + +static int v9fs_vfs_mkdir_dotl(struct inode *dir, + struct dentry *dentry, umode_t omode) +{ + int err; + struct v9fs_session_info *v9ses; + struct p9_fid *fid = NULL, *dfid = NULL; + kgid_t gid; + char *name; + umode_t mode; + struct inode *inode; + struct p9_qid qid; + struct dentry *dir_dentry; + struct posix_acl *dacl = NULL, *pacl = NULL; + + p9_debug(P9_DEBUG_VFS, "name %pd\n", dentry); + err = 0; + v9ses = v9fs_inode2v9ses(dir); + + omode |= S_IFDIR; + if (dir->i_mode & S_ISGID) + omode |= S_ISGID; + + dir_dentry = dentry->d_parent; + dfid = v9fs_fid_lookup(dir_dentry); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + dfid = NULL; + goto error; + } + + gid = v9fs_get_fsgid_for_create(dir); + mode = omode; + /* Update mode based on ACL value */ + err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); + if (err) { + p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mkdir %d\n", + err); + goto error; + } + name = (char *) dentry->d_name.name; + err = p9_client_mkdir_dotl(dfid, name, mode, gid, &qid); + if (err < 0) + goto error; + + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", + err); + fid = NULL; + goto error; + } + + /* instantiate inode and assign the unopened fid to the dentry */ + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", + err); + goto error; + } + v9fs_fid_add(dentry, fid); + v9fs_set_create_acl(inode, fid, dacl, pacl); + d_instantiate(dentry, inode); + fid = NULL; + err = 0; + } else { + /* + * Not in cached mode. No need to populate + * inode with stat. We need to get an inode + * so that we can set the acl with dentry + */ + inode = v9fs_get_inode(dir->i_sb, mode, 0); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + v9fs_set_create_acl(inode, fid, dacl, pacl); + d_instantiate(dentry, inode); + } + inc_nlink(dir); + v9fs_invalidate_inode_attr(dir); +error: + if (fid) + p9_client_clunk(fid); + v9fs_put_acl(dacl, pacl); + return err; +} + +static int +v9fs_vfs_getattr_dotl(struct vfsmount *mnt, struct dentry *dentry, + struct kstat *stat) +{ + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_stat_dotl *st; + + p9_debug(P9_DEBUG_VFS, "dentry: %p\n", dentry); + v9ses = v9fs_dentry2v9ses(dentry); + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + generic_fillattr(d_inode(dentry), stat); + return 0; + } + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + /* Ask for all the fields in stat structure. Server will return + * whatever it supports + */ + + st = p9_client_getattr_dotl(fid, P9_STATS_ALL); + if (IS_ERR(st)) + return PTR_ERR(st); + + v9fs_stat2inode_dotl(st, d_inode(dentry), 0); + generic_fillattr(d_inode(dentry), stat); + /* Change block size to what the server returned */ + stat->blksize = st->st_blksize; + + kfree(st); + return 0; +} + +/* + * Attribute flags. + */ +#define P9_ATTR_MODE (1 << 0) +#define P9_ATTR_UID (1 << 1) +#define P9_ATTR_GID (1 << 2) +#define P9_ATTR_SIZE (1 << 3) +#define P9_ATTR_ATIME (1 << 4) +#define P9_ATTR_MTIME (1 << 5) +#define P9_ATTR_CTIME (1 << 6) +#define P9_ATTR_ATIME_SET (1 << 7) +#define P9_ATTR_MTIME_SET (1 << 8) + +struct dotl_iattr_map { + int iattr_valid; + int p9_iattr_valid; +}; + +static int v9fs_mapped_iattr_valid(int iattr_valid) +{ + int i; + int p9_iattr_valid = 0; + struct dotl_iattr_map dotl_iattr_map[] = { + { ATTR_MODE, P9_ATTR_MODE }, + { ATTR_UID, P9_ATTR_UID }, + { ATTR_GID, P9_ATTR_GID }, + { ATTR_SIZE, P9_ATTR_SIZE }, + { ATTR_ATIME, P9_ATTR_ATIME }, + { ATTR_MTIME, P9_ATTR_MTIME }, + { ATTR_CTIME, P9_ATTR_CTIME }, + { ATTR_ATIME_SET, P9_ATTR_ATIME_SET }, + { ATTR_MTIME_SET, P9_ATTR_MTIME_SET }, + }; + for (i = 0; i < ARRAY_SIZE(dotl_iattr_map); i++) { + if (iattr_valid & dotl_iattr_map[i].iattr_valid) + p9_iattr_valid |= dotl_iattr_map[i].p9_iattr_valid; + } + return p9_iattr_valid; +} + +/** + * v9fs_vfs_setattr_dotl - set file metadata + * @dentry: file whose metadata to set + * @iattr: metadata assignment structure + * + */ + +int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) +{ + int retval; + struct p9_fid *fid; + struct p9_iattr_dotl p9attr; + struct inode *inode = d_inode(dentry); + + p9_debug(P9_DEBUG_VFS, "\n"); + + retval = inode_change_ok(inode, iattr); + if (retval) + return retval; + + p9attr.valid = v9fs_mapped_iattr_valid(iattr->ia_valid); + p9attr.mode = iattr->ia_mode; + p9attr.uid = iattr->ia_uid; + p9attr.gid = iattr->ia_gid; + p9attr.size = iattr->ia_size; + p9attr.atime_sec = iattr->ia_atime.tv_sec; + p9attr.atime_nsec = iattr->ia_atime.tv_nsec; + p9attr.mtime_sec = iattr->ia_mtime.tv_sec; + p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; + + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + /* Write all dirty data */ + if (S_ISREG(inode->i_mode)) + filemap_write_and_wait(inode->i_mapping); + + retval = p9_client_setattr(fid, &p9attr); + if (retval < 0) + return retval; + + if ((iattr->ia_valid & ATTR_SIZE) && + iattr->ia_size != i_size_read(inode)) + truncate_setsize(inode, iattr->ia_size); + + v9fs_invalidate_inode_attr(inode); + setattr_copy(inode, iattr); + mark_inode_dirty(inode); + if (iattr->ia_valid & ATTR_MODE) { + /* We also want to update ACL when we update mode bits */ + retval = v9fs_acl_chmod(inode, fid); + if (retval < 0) + return retval; + } + return 0; +} + +/** + * v9fs_stat2inode_dotl - populate an inode structure with stat info + * @stat: stat structure + * @inode: inode to populate + * @flags: ctrl flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE) + * + */ + +void +v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode, + unsigned int flags) +{ + umode_t mode; + struct v9fs_inode *v9inode = V9FS_I(inode); + + if ((stat->st_result_mask & P9_STATS_BASIC) == P9_STATS_BASIC) { + inode->i_atime.tv_sec = stat->st_atime_sec; + inode->i_atime.tv_nsec = stat->st_atime_nsec; + inode->i_mtime.tv_sec = stat->st_mtime_sec; + inode->i_mtime.tv_nsec = stat->st_mtime_nsec; + inode->i_ctime.tv_sec = stat->st_ctime_sec; + inode->i_ctime.tv_nsec = stat->st_ctime_nsec; + inode->i_uid = stat->st_uid; + inode->i_gid = stat->st_gid; + set_nlink(inode, stat->st_nlink); + + mode = stat->st_mode & S_IALLUGO; + mode |= inode->i_mode & ~S_IALLUGO; + inode->i_mode = mode; + + if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE)) + v9fs_i_size_write(inode, stat->st_size); + inode->i_blocks = stat->st_blocks; + } else { + if (stat->st_result_mask & P9_STATS_ATIME) { + inode->i_atime.tv_sec = stat->st_atime_sec; + inode->i_atime.tv_nsec = stat->st_atime_nsec; + } + if (stat->st_result_mask & P9_STATS_MTIME) { + inode->i_mtime.tv_sec = stat->st_mtime_sec; + inode->i_mtime.tv_nsec = stat->st_mtime_nsec; + } + if (stat->st_result_mask & P9_STATS_CTIME) { + inode->i_ctime.tv_sec = stat->st_ctime_sec; + inode->i_ctime.tv_nsec = stat->st_ctime_nsec; + } + if (stat->st_result_mask & P9_STATS_UID) + inode->i_uid = stat->st_uid; + if (stat->st_result_mask & P9_STATS_GID) + inode->i_gid = stat->st_gid; + if (stat->st_result_mask & P9_STATS_NLINK) + set_nlink(inode, stat->st_nlink); + if (stat->st_result_mask & P9_STATS_MODE) { + inode->i_mode = stat->st_mode; + if ((S_ISBLK(inode->i_mode)) || + (S_ISCHR(inode->i_mode))) + init_special_inode(inode, inode->i_mode, + inode->i_rdev); + } + if (stat->st_result_mask & P9_STATS_RDEV) + inode->i_rdev = new_decode_dev(stat->st_rdev); + if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) && + stat->st_result_mask & P9_STATS_SIZE) + v9fs_i_size_write(inode, stat->st_size); + if (stat->st_result_mask & P9_STATS_BLOCKS) + inode->i_blocks = stat->st_blocks; + } + if (stat->st_result_mask & P9_STATS_GEN) + inode->i_generation = stat->st_gen; + + /* Currently we don't support P9_STATS_BTIME and P9_STATS_DATA_VERSION + * because the inode structure does not have fields for them. + */ + v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR; +} + +static int +v9fs_vfs_symlink_dotl(struct inode *dir, struct dentry *dentry, + const char *symname) +{ + int err; + kgid_t gid; + char *name; + struct p9_qid qid; + struct inode *inode; + struct p9_fid *dfid; + struct p9_fid *fid = NULL; + struct v9fs_session_info *v9ses; + + name = (char *) dentry->d_name.name; + p9_debug(P9_DEBUG_VFS, "%lu,%s,%s\n", dir->i_ino, name, symname); + v9ses = v9fs_inode2v9ses(dir); + + dfid = v9fs_fid_lookup(dentry->d_parent); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + return err; + } + + gid = v9fs_get_fsgid_for_create(dir); + + /* Server doesn't alter fid on TSYMLINK. Hence no need to clone it. */ + err = p9_client_symlink(dfid, name, (char *)symname, gid, &qid); + + if (err < 0) { + p9_debug(P9_DEBUG_VFS, "p9_client_symlink failed %d\n", err); + goto error; + } + + v9fs_invalidate_inode_attr(dir); + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + /* Now walk from the parent so we can get an unopened fid. */ + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", + err); + fid = NULL; + goto error; + } + + /* instantiate inode and assign the unopened fid to dentry */ + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", + err); + goto error; + } + v9fs_fid_add(dentry, fid); + d_instantiate(dentry, inode); + fid = NULL; + err = 0; + } else { + /* Not in cached mode. No need to populate inode with stat */ + inode = v9fs_get_inode(dir->i_sb, S_IFLNK, 0); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + d_instantiate(dentry, inode); + } + +error: + if (fid) + p9_client_clunk(fid); + + return err; +} + +/** + * v9fs_vfs_link_dotl - create a hardlink for dotl + * @old_dentry: dentry for file to link to + * @dir: inode destination for new link + * @dentry: dentry for link + * + */ + +static int +v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir, + struct dentry *dentry) +{ + int err; + struct dentry *dir_dentry; + struct p9_fid *dfid, *oldfid; + struct v9fs_session_info *v9ses; + + p9_debug(P9_DEBUG_VFS, "dir ino: %lu, old_name: %pd, new_name: %pd\n", + dir->i_ino, old_dentry, dentry); + + v9ses = v9fs_inode2v9ses(dir); + dir_dentry = dentry->d_parent; + dfid = v9fs_fid_lookup(dir_dentry); + if (IS_ERR(dfid)) + return PTR_ERR(dfid); + + oldfid = v9fs_fid_lookup(old_dentry); + if (IS_ERR(oldfid)) + return PTR_ERR(oldfid); + + err = p9_client_link(dfid, oldfid, (char *)dentry->d_name.name); + + if (err < 0) { + p9_debug(P9_DEBUG_VFS, "p9_client_link failed %d\n", err); + return err; + } + + v9fs_invalidate_inode_attr(dir); + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + /* Get the latest stat info from server. */ + struct p9_fid *fid; + fid = v9fs_fid_lookup(old_dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + v9fs_refresh_inode_dotl(fid, d_inode(old_dentry)); + } + ihold(d_inode(old_dentry)); + d_instantiate(dentry, d_inode(old_dentry)); + + return err; +} + +/** + * v9fs_vfs_mknod_dotl - create a special file + * @dir: inode destination for new link + * @dentry: dentry for file + * @omode: mode for creation + * @rdev: device associated with special file + * + */ +static int +v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode, + dev_t rdev) +{ + int err; + kgid_t gid; + char *name; + umode_t mode; + struct v9fs_session_info *v9ses; + struct p9_fid *fid = NULL, *dfid = NULL; + struct inode *inode; + struct p9_qid qid; + struct dentry *dir_dentry; + struct posix_acl *dacl = NULL, *pacl = NULL; + + p9_debug(P9_DEBUG_VFS, " %lu,%pd mode: %hx MAJOR: %u MINOR: %u\n", + dir->i_ino, dentry, omode, + MAJOR(rdev), MINOR(rdev)); + + v9ses = v9fs_inode2v9ses(dir); + dir_dentry = dentry->d_parent; + dfid = v9fs_fid_lookup(dir_dentry); + if (IS_ERR(dfid)) { + err = PTR_ERR(dfid); + p9_debug(P9_DEBUG_VFS, "fid lookup failed %d\n", err); + dfid = NULL; + goto error; + } + + gid = v9fs_get_fsgid_for_create(dir); + mode = omode; + /* Update mode based on ACL value */ + err = v9fs_acl_mode(dir, &mode, &dacl, &pacl); + if (err) { + p9_debug(P9_DEBUG_VFS, "Failed to get acl values in mknod %d\n", + err); + goto error; + } + name = (char *) dentry->d_name.name; + + err = p9_client_mknod_dotl(dfid, name, mode, rdev, gid, &qid); + if (err < 0) + goto error; + + v9fs_invalidate_inode_attr(dir); + fid = p9_client_walk(dfid, 1, &name, 1); + if (IS_ERR(fid)) { + err = PTR_ERR(fid); + p9_debug(P9_DEBUG_VFS, "p9_client_walk failed %d\n", + err); + fid = NULL; + goto error; + } + + /* instantiate inode and assign the unopened fid to the dentry */ + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { + inode = v9fs_get_new_inode_from_fid(v9ses, fid, dir->i_sb); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + p9_debug(P9_DEBUG_VFS, "inode creation failed %d\n", + err); + goto error; + } + v9fs_set_create_acl(inode, fid, dacl, pacl); + v9fs_fid_add(dentry, fid); + d_instantiate(dentry, inode); + fid = NULL; + err = 0; + } else { + /* + * Not in cached mode. No need to populate inode with stat. + * socket syscall returns a fd, so we need instantiate + */ + inode = v9fs_get_inode(dir->i_sb, mode, rdev); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); + goto error; + } + v9fs_set_create_acl(inode, fid, dacl, pacl); + d_instantiate(dentry, inode); + } +error: + if (fid) + p9_client_clunk(fid); + v9fs_put_acl(dacl, pacl); + return err; +} + +/** + * v9fs_vfs_follow_link_dotl - follow a symlink path + * @dentry: dentry for symlink + * @cookie: place to pass the data to put_link() + */ + +static const char * +v9fs_vfs_follow_link_dotl(struct dentry *dentry, void **cookie) +{ + struct p9_fid *fid = v9fs_fid_lookup(dentry); + char *target; + int retval; + + p9_debug(P9_DEBUG_VFS, "%pd\n", dentry); + + if (IS_ERR(fid)) + return ERR_CAST(fid); + retval = p9_client_readlink(fid, &target); + if (retval) + return ERR_PTR(retval); + return *cookie = target; +} + +int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode) +{ + struct p9_stat_dotl *st; + struct v9fs_session_info *v9ses; + unsigned int flags; + + v9ses = v9fs_inode2v9ses(inode); + st = p9_client_getattr_dotl(fid, P9_STATS_ALL); + if (IS_ERR(st)) + return PTR_ERR(st); + /* + * Don't update inode if the file type is different + */ + if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT)) + goto out; + + /* + * We don't want to refresh inode->i_size, + * because we may have cached data + */ + flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ? + V9FS_STAT2INODE_KEEP_ISIZE : 0; + v9fs_stat2inode_dotl(st, inode, flags); +out: + kfree(st); + return 0; +} + +const struct inode_operations v9fs_dir_inode_operations_dotl = { + .create = v9fs_vfs_create_dotl, + .atomic_open = v9fs_vfs_atomic_open_dotl, + .lookup = v9fs_vfs_lookup, + .link = v9fs_vfs_link_dotl, + .symlink = v9fs_vfs_symlink_dotl, + .unlink = v9fs_vfs_unlink, + .mkdir = v9fs_vfs_mkdir_dotl, + .rmdir = v9fs_vfs_rmdir, + .mknod = v9fs_vfs_mknod_dotl, + .rename = v9fs_vfs_rename, + .getattr = v9fs_vfs_getattr_dotl, + .setattr = v9fs_vfs_setattr_dotl, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = v9fs_listxattr, + .get_acl = v9fs_iop_get_acl, +}; + +const struct inode_operations v9fs_file_inode_operations_dotl = { + .getattr = v9fs_vfs_getattr_dotl, + .setattr = v9fs_vfs_setattr_dotl, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = v9fs_listxattr, + .get_acl = v9fs_iop_get_acl, +}; + +const struct inode_operations v9fs_symlink_inode_operations_dotl = { + .readlink = generic_readlink, + .follow_link = v9fs_vfs_follow_link_dotl, + .put_link = kfree_put_link, + .getattr = v9fs_vfs_getattr_dotl, + .setattr = v9fs_vfs_setattr_dotl, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, + .removexattr = generic_removexattr, + .listxattr = v9fs_listxattr, +}; diff --git a/addons/9p/src/4.4.180/vfs_super.c b/addons/9p/src/4.4.180/vfs_super.c new file mode 100644 index 00000000..ccf935d9 --- /dev/null +++ b/addons/9p/src/4.4.180/vfs_super.c @@ -0,0 +1,366 @@ +/* + * linux/fs/9p/vfs_super.c + * + * This file contians superblock ops for 9P2000. It is intended that + * you mount this file system on directories. + * + * Copyright (C) 2004 by Eric Van Hensbergen + * Copyright (C) 2002 by Ron Minnich + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to: + * Free Software Foundation + * 51 Franklin Street, Fifth Floor + * Boston, MA 02111-1301 USA + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "v9fs.h" +#include "v9fs_vfs.h" +#include "fid.h" +#include "xattr.h" +#include "acl.h" + +static const struct super_operations v9fs_super_ops, v9fs_super_ops_dotl; + +/** + * v9fs_set_super - set the superblock + * @s: super block + * @data: file system specific data + * + */ + +static int v9fs_set_super(struct super_block *s, void *data) +{ + s->s_fs_info = data; + return set_anon_super(s, data); +} + +/** + * v9fs_fill_super - populate superblock with info + * @sb: superblock + * @v9ses: session information + * @flags: flags propagated from v9fs_mount() + * + */ + +static void +v9fs_fill_super(struct super_block *sb, struct v9fs_session_info *v9ses, + int flags, void *data) +{ + sb->s_maxbytes = MAX_LFS_FILESIZE; + sb->s_blocksize_bits = fls(v9ses->maxdata - 1); + sb->s_blocksize = 1 << sb->s_blocksize_bits; + sb->s_magic = V9FS_MAGIC; + if (v9fs_proto_dotl(v9ses)) { + sb->s_op = &v9fs_super_ops_dotl; + sb->s_xattr = v9fs_xattr_handlers; + } else + sb->s_op = &v9fs_super_ops; + sb->s_bdi = &v9ses->bdi; + if (v9ses->cache) + sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_CACHE_SIZE; + + sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME; + if (!v9ses->cache) + sb->s_flags |= MS_SYNCHRONOUS; + +#ifdef CONFIG_9P_FS_POSIX_ACL + if ((v9ses->flags & V9FS_ACL_MASK) == V9FS_POSIX_ACL) + sb->s_flags |= MS_POSIXACL; +#endif + + save_mount_options(sb, data); +} + +/** + * v9fs_mount - mount a superblock + * @fs_type: file system type + * @flags: mount flags + * @dev_name: device name that was mounted + * @data: mount options + * + */ + +static struct dentry *v9fs_mount(struct file_system_type *fs_type, int flags, + const char *dev_name, void *data) +{ + struct super_block *sb = NULL; + struct inode *inode = NULL; + struct dentry *root = NULL; + struct v9fs_session_info *v9ses = NULL; + umode_t mode = S_IRWXUGO | S_ISVTX; + struct p9_fid *fid; + int retval = 0; + + p9_debug(P9_DEBUG_VFS, "\n"); + + v9ses = kzalloc(sizeof(struct v9fs_session_info), GFP_KERNEL); + if (!v9ses) + return ERR_PTR(-ENOMEM); + + fid = v9fs_session_init(v9ses, dev_name, data); + if (IS_ERR(fid)) { + retval = PTR_ERR(fid); + goto free_session; + } + + sb = sget(fs_type, NULL, v9fs_set_super, flags, v9ses); + if (IS_ERR(sb)) { + retval = PTR_ERR(sb); + goto clunk_fid; + } + v9fs_fill_super(sb, v9ses, flags, data); + + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) + sb->s_d_op = &v9fs_cached_dentry_operations; + else + sb->s_d_op = &v9fs_dentry_operations; + + inode = v9fs_get_inode(sb, S_IFDIR | mode, 0); + if (IS_ERR(inode)) { + retval = PTR_ERR(inode); + goto release_sb; + } + + root = d_make_root(inode); + if (!root) { + retval = -ENOMEM; + goto release_sb; + } + sb->s_root = root; + if (v9fs_proto_dotl(v9ses)) { + struct p9_stat_dotl *st = NULL; + st = p9_client_getattr_dotl(fid, P9_STATS_BASIC); + if (IS_ERR(st)) { + retval = PTR_ERR(st); + goto release_sb; + } + d_inode(root)->i_ino = v9fs_qid2ino(&st->qid); + v9fs_stat2inode_dotl(st, d_inode(root), 0); + kfree(st); + } else { + struct p9_wstat *st = NULL; + st = p9_client_stat(fid); + if (IS_ERR(st)) { + retval = PTR_ERR(st); + goto release_sb; + } + + d_inode(root)->i_ino = v9fs_qid2ino(&st->qid); + v9fs_stat2inode(st, d_inode(root), sb, 0); + + p9stat_free(st); + kfree(st); + } + retval = v9fs_get_acl(inode, fid); + if (retval) + goto release_sb; + v9fs_fid_add(root, fid); + + p9_debug(P9_DEBUG_VFS, " simple set mount, return 0\n"); + return dget(sb->s_root); + +clunk_fid: + p9_client_clunk(fid); + v9fs_session_close(v9ses); +free_session: + kfree(v9ses); + return ERR_PTR(retval); + +release_sb: + /* + * we will do the session_close and root dentry release + * in the below call. But we need to clunk fid, because we haven't + * attached the fid to dentry so it won't get clunked + * automatically. + */ + p9_client_clunk(fid); + deactivate_locked_super(sb); + return ERR_PTR(retval); +} + +/** + * v9fs_kill_super - Kill Superblock + * @s: superblock + * + */ + +static void v9fs_kill_super(struct super_block *s) +{ + struct v9fs_session_info *v9ses = s->s_fs_info; + + p9_debug(P9_DEBUG_VFS, " %p\n", s); + + kill_anon_super(s); + + v9fs_session_cancel(v9ses); + v9fs_session_close(v9ses); + kfree(v9ses); + s->s_fs_info = NULL; + p9_debug(P9_DEBUG_VFS, "exiting kill_super\n"); +} + +static void +v9fs_umount_begin(struct super_block *sb) +{ + struct v9fs_session_info *v9ses; + + v9ses = sb->s_fs_info; + v9fs_session_begin_cancel(v9ses); +} + +static int v9fs_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct v9fs_session_info *v9ses; + struct p9_fid *fid; + struct p9_rstatfs rs; + int res; + + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) { + res = PTR_ERR(fid); + goto done; + } + + v9ses = v9fs_dentry2v9ses(dentry); + if (v9fs_proto_dotl(v9ses)) { + res = p9_client_statfs(fid, &rs); + if (res == 0) { + buf->f_type = rs.type; + buf->f_bsize = rs.bsize; + buf->f_blocks = rs.blocks; + buf->f_bfree = rs.bfree; + buf->f_bavail = rs.bavail; + buf->f_files = rs.files; + buf->f_ffree = rs.ffree; + buf->f_fsid.val[0] = rs.fsid & 0xFFFFFFFFUL; + buf->f_fsid.val[1] = (rs.fsid >> 32) & 0xFFFFFFFFUL; + buf->f_namelen = rs.namelen; + } + if (res != -ENOSYS) + goto done; + } + res = simple_statfs(dentry, buf); +done: + return res; +} + +static int v9fs_drop_inode(struct inode *inode) +{ + struct v9fs_session_info *v9ses; + v9ses = v9fs_inode2v9ses(inode); + if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) + return generic_drop_inode(inode); + /* + * in case of non cached mode always drop the + * the inode because we want the inode attribute + * to always match that on the server. + */ + return 1; +} + +static int v9fs_write_inode(struct inode *inode, + struct writeback_control *wbc) +{ + int ret; + struct p9_wstat wstat; + struct v9fs_inode *v9inode; + /* + * send an fsync request to server irrespective of + * wbc->sync_mode. + */ + p9_debug(P9_DEBUG_VFS, "%s: inode %p\n", __func__, inode); + v9inode = V9FS_I(inode); + if (!v9inode->writeback_fid) + return 0; + v9fs_blank_wstat(&wstat); + + ret = p9_client_wstat(v9inode->writeback_fid, &wstat); + if (ret < 0) { + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); + return ret; + } + return 0; +} + +static int v9fs_write_inode_dotl(struct inode *inode, + struct writeback_control *wbc) +{ + int ret; + struct v9fs_inode *v9inode; + /* + * send an fsync request to server irrespective of + * wbc->sync_mode. + */ + v9inode = V9FS_I(inode); + p9_debug(P9_DEBUG_VFS, "%s: inode %p, writeback_fid %p\n", + __func__, inode, v9inode->writeback_fid); + if (!v9inode->writeback_fid) + return 0; + + ret = p9_client_fsync(v9inode->writeback_fid, 0); + if (ret < 0) { + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); + return ret; + } + return 0; +} + +static const struct super_operations v9fs_super_ops = { + .alloc_inode = v9fs_alloc_inode, + .destroy_inode = v9fs_destroy_inode, + .statfs = simple_statfs, + .evict_inode = v9fs_evict_inode, + .show_options = generic_show_options, + .umount_begin = v9fs_umount_begin, + .write_inode = v9fs_write_inode, +}; + +static const struct super_operations v9fs_super_ops_dotl = { + .alloc_inode = v9fs_alloc_inode, + .destroy_inode = v9fs_destroy_inode, + .statfs = v9fs_statfs, + .drop_inode = v9fs_drop_inode, + .evict_inode = v9fs_evict_inode, + .show_options = generic_show_options, + .umount_begin = v9fs_umount_begin, + .write_inode = v9fs_write_inode_dotl, +}; + +struct file_system_type v9fs_fs_type = { + .name = "9p", + .mount = v9fs_mount, + .kill_sb = v9fs_kill_super, + .owner = THIS_MODULE, + .fs_flags = FS_RENAME_DOES_D_MOVE, +}; +MODULE_ALIAS_FS("9p"); diff --git a/addons/9p/src/4.4.180/xattr.c b/addons/9p/src/4.4.180/xattr.c new file mode 100644 index 00000000..f35168ce --- /dev/null +++ b/addons/9p/src/4.4.180/xattr.c @@ -0,0 +1,195 @@ +/* + * Copyright IBM Corporation, 2010 + * Author Aneesh Kumar K.V + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ + +#include +#include +#include +#include +#include +#include + +#include "fid.h" +#include "xattr.h" + +ssize_t v9fs_fid_xattr_get(struct p9_fid *fid, const char *name, + void *buffer, size_t buffer_size) +{ + ssize_t retval; + u64 attr_size; + struct p9_fid *attr_fid; + struct kvec kvec = {.iov_base = buffer, .iov_len = buffer_size}; + struct iov_iter to; + int err; + + iov_iter_kvec(&to, READ | ITER_KVEC, &kvec, 1, buffer_size); + + attr_fid = p9_client_xattrwalk(fid, name, &attr_size); + if (IS_ERR(attr_fid)) { + retval = PTR_ERR(attr_fid); + p9_debug(P9_DEBUG_VFS, "p9_client_attrwalk failed %zd\n", + retval); + return retval; + } + if (attr_size > buffer_size) { + if (!buffer_size) /* request to get the attr_size */ + retval = attr_size; + else + retval = -ERANGE; + } else { + iov_iter_truncate(&to, attr_size); + retval = p9_client_read(attr_fid, 0, &to, &err); + if (err) + retval = err; + } + p9_client_clunk(attr_fid); + return retval; +} + + +/* + * v9fs_xattr_get() + * + * Copy an extended attribute into the buffer + * provided, or compute the buffer size required. + * Buffer is NULL to compute the size of the buffer required. + * + * Returns a negative error number on failure, or the number of bytes + * used / required on success. + */ +ssize_t v9fs_xattr_get(struct dentry *dentry, const char *name, + void *buffer, size_t buffer_size) +{ + struct p9_fid *fid; + + p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu\n", + name, buffer_size); + fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + return v9fs_fid_xattr_get(fid, name, buffer, buffer_size); +} + +/* + * v9fs_xattr_set() + * + * Create, replace or remove an extended attribute for this inode. Buffer + * is NULL to remove an existing extended attribute, and non-NULL to + * either replace an existing extended attribute, or create a new extended + * attribute. The flags XATTR_REPLACE and XATTR_CREATE + * specify that an extended attribute must exist and must not exist + * previous to the call, respectively. + * + * Returns 0, or a negative error number on failure. + */ +int v9fs_xattr_set(struct dentry *dentry, const char *name, + const void *value, size_t value_len, int flags) +{ + struct p9_fid *fid = v9fs_fid_lookup(dentry); + if (IS_ERR(fid)) + return PTR_ERR(fid); + return v9fs_fid_xattr_set(fid, name, value, value_len, flags); +} + +int v9fs_fid_xattr_set(struct p9_fid *fid, const char *name, + const void *value, size_t value_len, int flags) +{ + struct kvec kvec = {.iov_base = (void *)value, .iov_len = value_len}; + struct iov_iter from; + int retval, err; + + iov_iter_kvec(&from, WRITE | ITER_KVEC, &kvec, 1, value_len); + + p9_debug(P9_DEBUG_VFS, "name = %s value_len = %zu flags = %d\n", + name, value_len, flags); + + /* Clone it */ + fid = p9_client_walk(fid, 0, NULL, 1); + if (IS_ERR(fid)) + return PTR_ERR(fid); + + /* + * On success fid points to xattr + */ + retval = p9_client_xattrcreate(fid, name, value_len, flags); + if (retval < 0) + p9_debug(P9_DEBUG_VFS, "p9_client_xattrcreate failed %d\n", + retval); + else + p9_client_write(fid, 0, &from, &retval); + err = p9_client_clunk(fid); + if (!retval && err) + retval = err; + return retval; +} + +ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) +{ + return v9fs_xattr_get(dentry, NULL, buffer, buffer_size); +} + +static int v9fs_xattr_handler_get(const struct xattr_handler *handler, + struct dentry *dentry, const char *name, + void *buffer, size_t size) +{ + const char *full_name = xattr_full_name(handler, name); + + if (strcmp(name, "") == 0) + return -EINVAL; + return v9fs_xattr_get(dentry, full_name, buffer, size); +} + +static int v9fs_xattr_handler_set(const struct xattr_handler *handler, + struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) +{ + const char *full_name = xattr_full_name(handler, name); + + if (strcmp(name, "") == 0) + return -EINVAL; + return v9fs_xattr_set(dentry, full_name, value, size, flags); +} + +static struct xattr_handler v9fs_xattr_user_handler = { + .prefix = XATTR_USER_PREFIX, + .get = v9fs_xattr_handler_get, + .set = v9fs_xattr_handler_set, +}; + +static struct xattr_handler v9fs_xattr_trusted_handler = { + .prefix = XATTR_TRUSTED_PREFIX, + .get = v9fs_xattr_handler_get, + .set = v9fs_xattr_handler_set, +}; + +#ifdef CONFIG_9P_FS_SECURITY +static struct xattr_handler v9fs_xattr_security_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .get = v9fs_xattr_handler_get, + .set = v9fs_xattr_handler_set, +}; +#endif + +const struct xattr_handler *v9fs_xattr_handlers[] = { + &v9fs_xattr_user_handler, + &v9fs_xattr_trusted_handler, +#ifdef CONFIG_9P_FS_POSIX_ACL + &v9fs_xattr_acl_access_handler, + &v9fs_xattr_acl_default_handler, +#endif +#ifdef CONFIG_9P_FS_SECURITY + &v9fs_xattr_security_handler, +#endif + NULL +}; diff --git a/addons/9p/src/4.4.180/xattr.h b/addons/9p/src/4.4.180/xattr.h new file mode 100644 index 00000000..c63c3bea --- /dev/null +++ b/addons/9p/src/4.4.180/xattr.h @@ -0,0 +1,34 @@ +/* + * Copyright IBM Corporation, 2010 + * Author Aneesh Kumar K.V + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2.1 of the GNU Lesser General Public License + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it would be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + * + */ +#ifndef FS_9P_XATTR_H +#define FS_9P_XATTR_H + +#include +#include +#include + +extern const struct xattr_handler *v9fs_xattr_handlers[]; +extern const struct xattr_handler v9fs_xattr_acl_access_handler; +extern const struct xattr_handler v9fs_xattr_acl_default_handler; + +extern ssize_t v9fs_fid_xattr_get(struct p9_fid *, const char *, + void *, size_t); +extern ssize_t v9fs_xattr_get(struct dentry *, const char *, + void *, size_t); +extern int v9fs_fid_xattr_set(struct p9_fid *, const char *, + const void *, size_t, int); +extern int v9fs_xattr_set(struct dentry *, const char *, + const void *, size_t, int); +extern ssize_t v9fs_listxattr(struct dentry *, char *, size_t); +#endif /* FS_9P_XATTR_H */ diff --git a/addons/acpid/all/addons/acpid.tgz b/addons/acpid/all/addons/acpid.tgz new file mode 100644 index 00000000..84fb7611 Binary files /dev/null and b/addons/acpid/all/addons/acpid.tgz differ diff --git a/addons/acpid/install.sh b/addons/acpid/install.sh new file mode 100644 index 00000000..e7ae16b7 --- /dev/null +++ b/addons/acpid/install.sh @@ -0,0 +1,12 @@ +if [ "${1}" = "sys" ]; then + echo "Installing module and daemon for ACPI button" + if [ ! -f /tmpRoot/lib/modules/button.ko ]; then + cp /modules/button.ko /tmpRoot/lib/modules/ + fi + tar -zxvf /addons/acpid.tgz -C /tmpRoot/ + chmod 755 /tmpRoot/usr/sbin/acpid + chmod 644 /tmpRoot/etc/acpi/events/power + chmod 744 /tmpRoot/etc/acpi/power.sh + chmod 744 /tmpRoot/usr/lib/systemd/system/acpid.service + ln -sf /usr/lib/systemd/system/acpid.service /tmpRoot/etc/systemd/system/multi-user.target.wants/acpid.service +fi diff --git a/addons/acpid/manifest.yml b/addons/acpid/manifest.yml new file mode 100644 index 00000000..e22a99a1 --- /dev/null +++ b/addons/acpid/manifest.yml @@ -0,0 +1,36 @@ +version: 1 +name: acpid +description: "Flexible and extensible daemon for delivering ACPI events" +available-for: + bromolow-3.10.108: + install-script: "install.sh" + copy: "all" + modules: true + apollolake-4.4.180: + install-script: "install.sh" + copy: "all" + modules: true + broadwell-4.4.180: + install-script: "install.sh" + copy: "all" + modules: true + broadwellnk-4.4.180: + install-script: "install.sh" + copy: "all" + modules: true + denverton-4.4.180: + install-script: "install.sh" + copy: "all" + modules: true + geminilake-4.4.180: + install-script: "install.sh" + copy: "all" + modules: true + v1000-4.4.180: + install-script: "install.sh" + copy: "all" + modules: true + purley-4.4.180: + install-script: "install.sh" + copy: "all" + modules: false diff --git a/addons/acpid/src/3.10.108/Makefile b/addons/acpid/src/3.10.108/Makefile new file mode 100644 index 00000000..fe863c2b --- /dev/null +++ b/addons/acpid/src/3.10.108/Makefile @@ -0,0 +1,46 @@ +# +# Makefile for the Linux ACPI interpreter +# + +# +# ACPI Boot-Time Table Parsing +# +#obj-$(CONFIG_X86) += blacklist.o + +# Power management related files +#acpi-$(CONFIG_ACPI_SLEEP) += proc.o + + +# +# ACPI Bus and Device Drivers +# +#acpi-$(CONFIG_ACPI_DOCK) += dock.o +#acpi-$(CONFIG_X86_INTEL_LPSS) += acpi_lpss.o +#acpi-$(CONFIG_X86) += acpi_cmos_rtc.o +#acpi-$(CONFIG_DEBUG_FS) += debugfs.o +acpi-$(CONFIG_ACPI_NUMA) += numa.o +#acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o + +# These are (potentially) separate modules + +# IPMI may be used by other drivers, so it has to initialise before them +#obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o + +#obj-$(CONFIG_ACPI_AC) += ac.o +obj-$(CONFIG_ACPI_BUTTON) += button.o +#obj-$(CONFIG_ACPI_FAN) += fan.o +#obj-$(CONFIG_ACPI_VIDEO) += video.o +#obj-$(CONFIG_ACPI_PCI_SLOT) += pci_slot.o +#obj-$(CONFIG_ACPI_CONTAINER) += container.o +#obj-$(CONFIG_ACPI_THERMAL) += thermal.o +#obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o +#obj-$(CONFIG_ACPI_BATTERY) += battery.o +#obj-$(CONFIG_ACPI_SBS) += sbshc.o +#obj-$(CONFIG_ACPI_SBS) += sbs.o +#obj-$(CONFIG_ACPI_HED) += hed.o +#obj-$(CONFIG_ACPI_EC_DEBUGFS) += ec_sys.o +#obj-$(CONFIG_ACPI_CUSTOM_METHOD)+= custom_method.o +#obj-$(CONFIG_ACPI_BGRT) += bgrt.o +#obj-$(CONFIG_ACPI_I2C) += acpi_i2c.o + +#obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o diff --git a/addons/acpid/src/3.10.108/button.c b/addons/acpid/src/3.10.108/button.c new file mode 100644 index 00000000..d2e617b5 --- /dev/null +++ b/addons/acpid/src/3.10.108/button.c @@ -0,0 +1,454 @@ +/* + * button.c - ACPI Button Driver + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PREFIX "ACPI: " + +#define ACPI_BUTTON_CLASS "button" +#define ACPI_BUTTON_FILE_INFO "info" +#define ACPI_BUTTON_FILE_STATE "state" +#define ACPI_BUTTON_TYPE_UNKNOWN 0x00 +#define ACPI_BUTTON_NOTIFY_STATUS 0x80 + +#define ACPI_BUTTON_SUBCLASS_POWER "power" +#define ACPI_BUTTON_HID_POWER "PNP0C0C" +#define ACPI_BUTTON_DEVICE_NAME_POWER "Power Button" +#define ACPI_BUTTON_TYPE_POWER 0x01 + +#define ACPI_BUTTON_SUBCLASS_SLEEP "sleep" +#define ACPI_BUTTON_HID_SLEEP "PNP0C0E" +#define ACPI_BUTTON_DEVICE_NAME_SLEEP "Sleep Button" +#define ACPI_BUTTON_TYPE_SLEEP 0x03 + +#define ACPI_BUTTON_SUBCLASS_LID "lid" +#define ACPI_BUTTON_HID_LID "PNP0C0D" +#define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch" +#define ACPI_BUTTON_TYPE_LID 0x05 + +#define _COMPONENT ACPI_BUTTON_COMPONENT +ACPI_MODULE_NAME("button"); + +MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION("ACPI Button Driver"); +MODULE_LICENSE("GPL"); + +static const struct acpi_device_id button_device_ids[] = { + {ACPI_BUTTON_HID_LID, 0}, + {ACPI_BUTTON_HID_SLEEP, 0}, + {ACPI_BUTTON_HID_SLEEPF, 0}, + {ACPI_BUTTON_HID_POWER, 0}, + {ACPI_BUTTON_HID_POWERF, 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, button_device_ids); + +static int acpi_button_add(struct acpi_device *device); +static int acpi_button_remove(struct acpi_device *device); +static void acpi_button_notify(struct acpi_device *device, u32 event); + +#ifdef CONFIG_PM_SLEEP +static int acpi_button_resume(struct device *dev); +#endif +static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume); + +static struct acpi_driver acpi_button_driver = { + .name = "button", + .class = ACPI_BUTTON_CLASS, + .ids = button_device_ids, + .ops = { + .add = acpi_button_add, + .remove = acpi_button_remove, + .notify = acpi_button_notify, + }, + .drv.pm = &acpi_button_pm, +}; + +struct acpi_button { + unsigned int type; + struct input_dev *input; + char phys[32]; /* for input device */ + unsigned long pushed; + bool wakeup_enabled; +}; + +static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); +static struct acpi_device *lid_device; + +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +static struct proc_dir_entry *acpi_button_dir; +static struct proc_dir_entry *acpi_lid_dir; + +static int acpi_button_state_seq_show(struct seq_file *seq, void *offset) +{ + struct acpi_device *device = seq->private; + acpi_status status; + unsigned long long state; + + status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); + seq_printf(seq, "state: %s\n", + ACPI_FAILURE(status) ? "unsupported" : + (state ? "open" : "closed")); + return 0; +} + +static int acpi_button_state_open_fs(struct inode *inode, struct file *file) +{ + return single_open(file, acpi_button_state_seq_show, PDE_DATA(inode)); +} + +static const struct file_operations acpi_button_state_fops = { + .owner = THIS_MODULE, + .open = acpi_button_state_open_fs, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int acpi_button_add_fs(struct acpi_device *device) +{ + struct acpi_button *button = acpi_driver_data(device); + struct proc_dir_entry *entry = NULL; + int ret = 0; + + /* procfs I/F for ACPI lid device only */ + if (button->type != ACPI_BUTTON_TYPE_LID) + return 0; + + if (acpi_button_dir || acpi_lid_dir) { + printk(KERN_ERR PREFIX "More than one Lid device found!\n"); + return -EEXIST; + } + + /* create /proc/acpi/button */ + acpi_button_dir = proc_mkdir(ACPI_BUTTON_CLASS, acpi_root_dir); + if (!acpi_button_dir) + return -ENODEV; + + /* create /proc/acpi/button/lid */ + acpi_lid_dir = proc_mkdir(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir); + if (!acpi_lid_dir) { + ret = -ENODEV; + goto remove_button_dir; + } + + /* create /proc/acpi/button/lid/LID/ */ + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), acpi_lid_dir); + if (!acpi_device_dir(device)) { + ret = -ENODEV; + goto remove_lid_dir; + } + + /* create /proc/acpi/button/lid/LID/state */ + entry = proc_create_data(ACPI_BUTTON_FILE_STATE, + S_IRUGO, acpi_device_dir(device), + &acpi_button_state_fops, device); + if (!entry) { + ret = -ENODEV; + goto remove_dev_dir; + } + +done: + return ret; + +remove_dev_dir: + remove_proc_entry(acpi_device_bid(device), + acpi_lid_dir); + acpi_device_dir(device) = NULL; +remove_lid_dir: + remove_proc_entry(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir); +remove_button_dir: + remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir); + goto done; +} + +static int acpi_button_remove_fs(struct acpi_device *device) +{ + struct acpi_button *button = acpi_driver_data(device); + + if (button->type != ACPI_BUTTON_TYPE_LID) + return 0; + + remove_proc_entry(ACPI_BUTTON_FILE_STATE, + acpi_device_dir(device)); + remove_proc_entry(acpi_device_bid(device), + acpi_lid_dir); + acpi_device_dir(device) = NULL; + remove_proc_entry(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir); + remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir); + + return 0; +} + +/* -------------------------------------------------------------------------- + Driver Interface + -------------------------------------------------------------------------- */ +int acpi_lid_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&acpi_lid_notifier, nb); +} +EXPORT_SYMBOL(acpi_lid_notifier_register); + +int acpi_lid_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb); +} +EXPORT_SYMBOL(acpi_lid_notifier_unregister); + +int acpi_lid_open(void) +{ + acpi_status status; + unsigned long long state; + + if (!lid_device) + return -ENODEV; + + status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL, + &state); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return !!state; +} +EXPORT_SYMBOL(acpi_lid_open); + +static int acpi_lid_send_state(struct acpi_device *device) +{ + struct acpi_button *button = acpi_driver_data(device); + unsigned long long state; + acpi_status status; + int ret; + + status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); + if (ACPI_FAILURE(status)) + return -ENODEV; + + /* input layer checks if event is redundant */ + input_report_switch(button->input, SW_LID, !state); + input_sync(button->input); + + if (state) + pm_wakeup_event(&device->dev, 0); + + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); + if (ret == NOTIFY_DONE) + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, + device); + if (ret == NOTIFY_DONE || ret == NOTIFY_OK) { + /* + * It is also regarded as success if the notifier_chain + * returns NOTIFY_OK or NOTIFY_DONE. + */ + ret = 0; + } + return ret; +} + +static void acpi_button_notify(struct acpi_device *device, u32 event) +{ + struct acpi_button *button = acpi_driver_data(device); + struct input_dev *input; + + switch (event) { + case ACPI_FIXED_HARDWARE_EVENT: + event = ACPI_BUTTON_NOTIFY_STATUS; + /* fall through */ + case ACPI_BUTTON_NOTIFY_STATUS: + input = button->input; + if (button->type == ACPI_BUTTON_TYPE_LID) { + acpi_lid_send_state(device); + } else { + int keycode = test_bit(KEY_SLEEP, input->keybit) ? + KEY_SLEEP : KEY_POWER; + + input_report_key(input, keycode, 1); + input_sync(input); + input_report_key(input, keycode, 0); + input_sync(input); + + pm_wakeup_event(&device->dev, 0); + } + + acpi_bus_generate_proc_event(device, event, ++button->pushed); + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Unsupported event [0x%x]\n", event)); + break; + } +} + +#ifdef CONFIG_PM_SLEEP +static int acpi_button_resume(struct device *dev) +{ + struct acpi_device *device = to_acpi_device(dev); + struct acpi_button *button = acpi_driver_data(device); + + if (button->type == ACPI_BUTTON_TYPE_LID) + return acpi_lid_send_state(device); + return 0; +} +#endif + +static int acpi_button_add(struct acpi_device *device) +{ + struct acpi_button *button; + struct input_dev *input; + const char *hid = acpi_device_hid(device); + char *name, *class; + int error; + + button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL); + if (!button) + return -ENOMEM; + + device->driver_data = button; + + button->input = input = input_allocate_device(); + if (!input) { + error = -ENOMEM; + goto err_free_button; + } + + name = acpi_device_name(device); + class = acpi_device_class(device); + + if (!strcmp(hid, ACPI_BUTTON_HID_POWER) || + !strcmp(hid, ACPI_BUTTON_HID_POWERF)) { + button->type = ACPI_BUTTON_TYPE_POWER; + strcpy(name, ACPI_BUTTON_DEVICE_NAME_POWER); + sprintf(class, "%s/%s", + ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER); + } else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEP) || + !strcmp(hid, ACPI_BUTTON_HID_SLEEPF)) { + button->type = ACPI_BUTTON_TYPE_SLEEP; + strcpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP); + sprintf(class, "%s/%s", + ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP); + } else if (!strcmp(hid, ACPI_BUTTON_HID_LID)) { + button->type = ACPI_BUTTON_TYPE_LID; + strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID); + sprintf(class, "%s/%s", + ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID); + } else { + printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid); + error = -ENODEV; + goto err_free_input; + } + + error = acpi_button_add_fs(device); + if (error) + goto err_free_input; + + snprintf(button->phys, sizeof(button->phys), "%s/button/input0", hid); + + input->name = name; + input->phys = button->phys; + input->id.bustype = BUS_HOST; + input->id.product = button->type; + input->dev.parent = &device->dev; + + switch (button->type) { + case ACPI_BUTTON_TYPE_POWER: + input->evbit[0] = BIT_MASK(EV_KEY); + set_bit(KEY_POWER, input->keybit); + break; + + case ACPI_BUTTON_TYPE_SLEEP: + input->evbit[0] = BIT_MASK(EV_KEY); + set_bit(KEY_SLEEP, input->keybit); + break; + + case ACPI_BUTTON_TYPE_LID: + input->evbit[0] = BIT_MASK(EV_SW); + set_bit(SW_LID, input->swbit); + break; + } + + error = input_register_device(input); + if (error) + goto err_remove_fs; + if (button->type == ACPI_BUTTON_TYPE_LID) { + acpi_lid_send_state(device); + /* + * This assumes there's only one lid device, or if there are + * more we only care about the last one... + */ + lid_device = device; + } + + if (device->wakeup.flags.valid) { + /* Button's GPE is run-wake GPE */ + acpi_enable_gpe(device->wakeup.gpe_device, + device->wakeup.gpe_number); + if (!device_may_wakeup(&device->dev)) { + device_set_wakeup_enable(&device->dev, true); + button->wakeup_enabled = true; + } + } + + printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); + return 0; + + err_remove_fs: + acpi_button_remove_fs(device); + err_free_input: + input_free_device(input); + err_free_button: + kfree(button); + return error; +} + +static int acpi_button_remove(struct acpi_device *device) +{ + struct acpi_button *button = acpi_driver_data(device); + + if (device->wakeup.flags.valid) { + acpi_disable_gpe(device->wakeup.gpe_device, + device->wakeup.gpe_number); + if (button->wakeup_enabled) + device_set_wakeup_enable(&device->dev, false); + } + + acpi_button_remove_fs(device); + input_unregister_device(button->input); + kfree(button); + return 0; +} + +module_acpi_driver(acpi_button_driver); diff --git a/addons/acpid/src/4.4.180/Makefile b/addons/acpid/src/4.4.180/Makefile new file mode 100644 index 00000000..2fff3349 --- /dev/null +++ b/addons/acpid/src/4.4.180/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ACPI_BUTTON) += button.o diff --git a/addons/acpid/src/4.4.180/Module.symvers b/addons/acpid/src/4.4.180/Module.symvers new file mode 100644 index 00000000..e69de29b diff --git a/addons/acpid/src/4.4.180/button.c b/addons/acpid/src/4.4.180/button.c new file mode 100644 index 00000000..5c3b0918 --- /dev/null +++ b/addons/acpid/src/4.4.180/button.c @@ -0,0 +1,449 @@ +/* + * button.c - ACPI Button Driver + * + * Copyright (C) 2001, 2002 Andy Grover + * Copyright (C) 2001, 2002 Paul Diefenbaugh + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PREFIX "ACPI: " + +#define ACPI_BUTTON_CLASS "button" +#define ACPI_BUTTON_FILE_INFO "info" +#define ACPI_BUTTON_FILE_STATE "state" +#define ACPI_BUTTON_TYPE_UNKNOWN 0x00 +#define ACPI_BUTTON_NOTIFY_STATUS 0x80 + +#define ACPI_BUTTON_SUBCLASS_POWER "power" +#define ACPI_BUTTON_HID_POWER "PNP0C0C" +#define ACPI_BUTTON_DEVICE_NAME_POWER "Power Button" +#define ACPI_BUTTON_TYPE_POWER 0x01 + +#define ACPI_BUTTON_SUBCLASS_SLEEP "sleep" +#define ACPI_BUTTON_HID_SLEEP "PNP0C0E" +#define ACPI_BUTTON_DEVICE_NAME_SLEEP "Sleep Button" +#define ACPI_BUTTON_TYPE_SLEEP 0x03 + +#define ACPI_BUTTON_SUBCLASS_LID "lid" +#define ACPI_BUTTON_HID_LID "PNP0C0D" +#define ACPI_BUTTON_DEVICE_NAME_LID "Lid Switch" +#define ACPI_BUTTON_TYPE_LID 0x05 + +#define _COMPONENT ACPI_BUTTON_COMPONENT +ACPI_MODULE_NAME("button"); + +MODULE_AUTHOR("Paul Diefenbaugh"); +MODULE_DESCRIPTION("ACPI Button Driver"); +MODULE_LICENSE("GPL"); + +static const struct acpi_device_id button_device_ids[] = { + {ACPI_BUTTON_HID_LID, 0}, + {ACPI_BUTTON_HID_SLEEP, 0}, + {ACPI_BUTTON_HID_SLEEPF, 0}, + {ACPI_BUTTON_HID_POWER, 0}, + {ACPI_BUTTON_HID_POWERF, 0}, + {"", 0}, +}; +MODULE_DEVICE_TABLE(acpi, button_device_ids); + +static int acpi_button_add(struct acpi_device *device); +static int acpi_button_remove(struct acpi_device *device); +static void acpi_button_notify(struct acpi_device *device, u32 event); + +#ifdef CONFIG_PM_SLEEP +static int acpi_button_suspend(struct device *dev); +static int acpi_button_resume(struct device *dev); +#else +#define acpi_button_suspend NULL +#define acpi_button_resume NULL +#endif +static SIMPLE_DEV_PM_OPS(acpi_button_pm, acpi_button_suspend, acpi_button_resume); + +static struct acpi_driver acpi_button_driver = { + .name = "button", + .class = ACPI_BUTTON_CLASS, + .ids = button_device_ids, + .ops = { + .add = acpi_button_add, + .remove = acpi_button_remove, + .notify = acpi_button_notify, + }, + .drv.pm = &acpi_button_pm, +}; + +struct acpi_button { + unsigned int type; + struct input_dev *input; + char phys[32]; /* for input device */ + unsigned long pushed; + bool suspended; +}; + +static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); +static struct acpi_device *lid_device; + +/* -------------------------------------------------------------------------- + FS Interface (/proc) + -------------------------------------------------------------------------- */ + +static struct proc_dir_entry *acpi_button_dir; +static struct proc_dir_entry *acpi_lid_dir; + +static int acpi_button_state_seq_show(struct seq_file *seq, void *offset) +{ + struct acpi_device *device = seq->private; + acpi_status status; + unsigned long long state; + + status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); + seq_printf(seq, "state: %s\n", + ACPI_FAILURE(status) ? "unsupported" : + (state ? "open" : "closed")); + return 0; +} + +static int acpi_button_state_open_fs(struct inode *inode, struct file *file) +{ + return single_open(file, acpi_button_state_seq_show, PDE_DATA(inode)); +} + +static const struct file_operations acpi_button_state_fops = { + .owner = THIS_MODULE, + .open = acpi_button_state_open_fs, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int acpi_button_add_fs(struct acpi_device *device) +{ + struct acpi_button *button = acpi_driver_data(device); + struct proc_dir_entry *entry = NULL; + int ret = 0; + + /* procfs I/F for ACPI lid device only */ + if (button->type != ACPI_BUTTON_TYPE_LID) + return 0; + + if (acpi_button_dir || acpi_lid_dir) { + printk(KERN_ERR PREFIX "More than one Lid device found!\n"); + return -EEXIST; + } + + /* create /proc/acpi/button */ + acpi_button_dir = proc_mkdir(ACPI_BUTTON_CLASS, acpi_root_dir); + if (!acpi_button_dir) + return -ENODEV; + + /* create /proc/acpi/button/lid */ + acpi_lid_dir = proc_mkdir(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir); + if (!acpi_lid_dir) { + ret = -ENODEV; + goto remove_button_dir; + } + + /* create /proc/acpi/button/lid/LID/ */ + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), acpi_lid_dir); + if (!acpi_device_dir(device)) { + ret = -ENODEV; + goto remove_lid_dir; + } + + /* create /proc/acpi/button/lid/LID/state */ + entry = proc_create_data(ACPI_BUTTON_FILE_STATE, + S_IRUGO, acpi_device_dir(device), + &acpi_button_state_fops, device); + if (!entry) { + ret = -ENODEV; + goto remove_dev_dir; + } + +done: + return ret; + +remove_dev_dir: + remove_proc_entry(acpi_device_bid(device), + acpi_lid_dir); + acpi_device_dir(device) = NULL; +remove_lid_dir: + remove_proc_entry(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir); +remove_button_dir: + remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir); + goto done; +} + +static int acpi_button_remove_fs(struct acpi_device *device) +{ + struct acpi_button *button = acpi_driver_data(device); + + if (button->type != ACPI_BUTTON_TYPE_LID) + return 0; + + remove_proc_entry(ACPI_BUTTON_FILE_STATE, + acpi_device_dir(device)); + remove_proc_entry(acpi_device_bid(device), + acpi_lid_dir); + acpi_device_dir(device) = NULL; + remove_proc_entry(ACPI_BUTTON_SUBCLASS_LID, acpi_button_dir); + remove_proc_entry(ACPI_BUTTON_CLASS, acpi_root_dir); + + return 0; +} + +/* -------------------------------------------------------------------------- + Driver Interface + -------------------------------------------------------------------------- */ +int acpi_lid_notifier_register(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&acpi_lid_notifier, nb); +} +EXPORT_SYMBOL(acpi_lid_notifier_register); + +int acpi_lid_notifier_unregister(struct notifier_block *nb) +{ + return blocking_notifier_chain_unregister(&acpi_lid_notifier, nb); +} +EXPORT_SYMBOL(acpi_lid_notifier_unregister); + +int acpi_lid_open(void) +{ + acpi_status status; + unsigned long long state; + + if (!lid_device) + return -ENODEV; + + status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL, + &state); + if (ACPI_FAILURE(status)) + return -ENODEV; + + return !!state; +} +EXPORT_SYMBOL(acpi_lid_open); + +static int acpi_lid_send_state(struct acpi_device *device) +{ + struct acpi_button *button = acpi_driver_data(device); + unsigned long long state; + acpi_status status; + int ret; + + status = acpi_evaluate_integer(device->handle, "_LID", NULL, &state); + if (ACPI_FAILURE(status)) + return -ENODEV; + + /* input layer checks if event is redundant */ + input_report_switch(button->input, SW_LID, !state); + input_sync(button->input); + + if (state) + pm_wakeup_event(&device->dev, 0); + + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); + if (ret == NOTIFY_DONE) + ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, + device); + if (ret == NOTIFY_DONE || ret == NOTIFY_OK) { + /* + * It is also regarded as success if the notifier_chain + * returns NOTIFY_OK or NOTIFY_DONE. + */ + ret = 0; + } + return ret; +} + +static void acpi_button_notify(struct acpi_device *device, u32 event) +{ + struct acpi_button *button = acpi_driver_data(device); + struct input_dev *input; + + switch (event) { + case ACPI_FIXED_HARDWARE_EVENT: + event = ACPI_BUTTON_NOTIFY_STATUS; + /* fall through */ + case ACPI_BUTTON_NOTIFY_STATUS: + input = button->input; + if (button->type == ACPI_BUTTON_TYPE_LID) { + acpi_lid_send_state(device); + } else { + int keycode; + + pm_wakeup_event(&device->dev, 0); + if (button->suspended) + break; + + keycode = test_bit(KEY_SLEEP, input->keybit) ? + KEY_SLEEP : KEY_POWER; + input_report_key(input, keycode, 1); + input_sync(input); + input_report_key(input, keycode, 0); + input_sync(input); + + acpi_bus_generate_netlink_event( + device->pnp.device_class, + dev_name(&device->dev), + event, ++button->pushed); + } + break; + default: + ACPI_DEBUG_PRINT((ACPI_DB_INFO, + "Unsupported event [0x%x]\n", event)); + break; + } +} + +#ifdef CONFIG_PM_SLEEP +static int acpi_button_suspend(struct device *dev) +{ + struct acpi_device *device = to_acpi_device(dev); + struct acpi_button *button = acpi_driver_data(device); + + button->suspended = true; + return 0; +} + +static int acpi_button_resume(struct device *dev) +{ + struct acpi_device *device = to_acpi_device(dev); + struct acpi_button *button = acpi_driver_data(device); + + button->suspended = false; + if (button->type == ACPI_BUTTON_TYPE_LID) + return acpi_lid_send_state(device); + return 0; +} +#endif + +static int acpi_button_add(struct acpi_device *device) +{ + struct acpi_button *button; + struct input_dev *input; + const char *hid = acpi_device_hid(device); + char *name, *class; + int error; + + button = kzalloc(sizeof(struct acpi_button), GFP_KERNEL); + if (!button) + return -ENOMEM; + + device->driver_data = button; + + button->input = input = input_allocate_device(); + if (!input) { + error = -ENOMEM; + goto err_free_button; + } + + name = acpi_device_name(device); + class = acpi_device_class(device); + + if (!strcmp(hid, ACPI_BUTTON_HID_POWER) || + !strcmp(hid, ACPI_BUTTON_HID_POWERF)) { + button->type = ACPI_BUTTON_TYPE_POWER; + strcpy(name, ACPI_BUTTON_DEVICE_NAME_POWER); + sprintf(class, "%s/%s", + ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_POWER); + } else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEP) || + !strcmp(hid, ACPI_BUTTON_HID_SLEEPF)) { + button->type = ACPI_BUTTON_TYPE_SLEEP; + strcpy(name, ACPI_BUTTON_DEVICE_NAME_SLEEP); + sprintf(class, "%s/%s", + ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_SLEEP); + } else if (!strcmp(hid, ACPI_BUTTON_HID_LID)) { + button->type = ACPI_BUTTON_TYPE_LID; + strcpy(name, ACPI_BUTTON_DEVICE_NAME_LID); + sprintf(class, "%s/%s", + ACPI_BUTTON_CLASS, ACPI_BUTTON_SUBCLASS_LID); + } else { + printk(KERN_ERR PREFIX "Unsupported hid [%s]\n", hid); + error = -ENODEV; + goto err_free_input; + } + + error = acpi_button_add_fs(device); + if (error) + goto err_free_input; + + snprintf(button->phys, sizeof(button->phys), "%s/button/input0", hid); + + input->name = name; + input->phys = button->phys; + input->id.bustype = BUS_HOST; + input->id.product = button->type; + input->dev.parent = &device->dev; + + switch (button->type) { + case ACPI_BUTTON_TYPE_POWER: + input_set_capability(input, EV_KEY, KEY_POWER); + break; + + case ACPI_BUTTON_TYPE_SLEEP: + input_set_capability(input, EV_KEY, KEY_SLEEP); + break; + + case ACPI_BUTTON_TYPE_LID: + input_set_capability(input, EV_SW, SW_LID); + break; + } + + error = input_register_device(input); + if (error) + goto err_remove_fs; + if (button->type == ACPI_BUTTON_TYPE_LID) { + acpi_lid_send_state(device); + /* + * This assumes there's only one lid device, or if there are + * more we only care about the last one... + */ + lid_device = device; + } + + printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); + return 0; + + err_remove_fs: + acpi_button_remove_fs(device); + err_free_input: + input_free_device(input); + err_free_button: + kfree(button); + return error; +} + +static int acpi_button_remove(struct acpi_device *device) +{ + struct acpi_button *button = acpi_driver_data(device); + + acpi_button_remove_fs(device); + input_unregister_device(button->input); + kfree(button); + return 0; +} + +module_acpi_driver(acpi_button_driver); diff --git a/addons/acpid/src/4.4.180/modules.order b/addons/acpid/src/4.4.180/modules.order new file mode 100644 index 00000000..e69de29b diff --git a/addons/atl1c/install.sh b/addons/atl1c/install.sh new file mode 100644 index 00000000..035b4149 --- /dev/null +++ b/addons/atl1c/install.sh @@ -0,0 +1,4 @@ +if [ "${1}" = "rd" ]; then + echo "Installing module for Atheros L1C Gigabit Ethernet adapter" + ${INSMOD} "/modules/atl1c.ko" ${PARAMS} +fi diff --git a/addons/atl1c/manifest.yml b/addons/atl1c/manifest.yml new file mode 100644 index 00000000..d2a309c2 --- /dev/null +++ b/addons/atl1c/manifest.yml @@ -0,0 +1,28 @@ +version: 1 +name: atl1c +description: "Driver for Atheros L1C Gigabit Ethernet adapters" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/addons/atl1c/src/3.10.108/Makefile b/addons/atl1c/src/3.10.108/Makefile new file mode 100644 index 00000000..d6fff141 --- /dev/null +++ b/addons/atl1c/src/3.10.108/Makefile @@ -0,0 +1,2 @@ +obj-m += atl1c.o +atl1c-objs := atl1c_main.o atl1c_hw.o atl1c_ethtool.o diff --git a/addons/atl1c/src/3.10.108/atl1c.h b/addons/atl1c/src/3.10.108/atl1c.h new file mode 100644 index 00000000..0f055652 --- /dev/null +++ b/addons/atl1c/src/3.10.108/atl1c.h @@ -0,0 +1,606 @@ +/* + * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL1C_H_ +#define _ATL1C_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atl1c_hw.h" + +/* Wake Up Filter Control */ +#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ +#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +#define AT_VLAN_TO_TAG(_vlan, _tag) \ + _tag = ((((_vlan) >> 8) & 0xFF) |\ + (((_vlan) & 0xFF) << 8)) + +#define AT_TAG_TO_VLAN(_tag, _vlan) \ + _vlan = ((((_tag) >> 8) & 0xFF) |\ + (((_tag) & 0xFF) << 8)) + +#define SPEED_0 0xffff +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN) +#define MAX_JUMBO_FRAME_SIZE (6*1024) + +#define AT_MAX_RECEIVE_QUEUE 4 +#define AT_DEF_RECEIVE_QUEUE 1 +#define AT_MAX_TRANSMIT_QUEUE 2 + +#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL +#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL + +#define AT_TX_WATCHDOG (5 * HZ) +#define AT_MAX_INT_WORK 5 +#define AT_TWSI_EEPROM_TIMEOUT 100 +#define AT_HW_MAX_IDLE_DELAY 10 +#define AT_SUSPEND_LINK_TIMEOUT 100 + +#define AT_ASPM_L0S_TIMER 6 +#define AT_ASPM_L1_TIMER 12 +#define AT_LCKDET_TIMER 12 + +#define ATL1C_PCIE_L0S_L1_DISABLE 0x01 +#define ATL1C_PCIE_PHY_RESET 0x02 + +#define ATL1C_ASPM_L0s_ENABLE 0x0001 +#define ATL1C_ASPM_L1_ENABLE 0x0002 + +#define AT_REGS_LEN (74 * sizeof(u32)) +#define AT_EEPROM_LEN 512 + +#define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) +#define ATL1C_RFD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_rx_free_desc) +#define ATL1C_TPD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_tpd_desc) +#define ATL1C_RRD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_recv_ret_status) + +/* tpd word 1 bit 0:7 General Checksum task offload */ +#define TPD_L4HDR_OFFSET_MASK 0x00FF +#define TPD_L4HDR_OFFSET_SHIFT 0 + +/* tpd word 1 bit 0:7 Large Send task offload (IPv4/IPV6) */ +#define TPD_TCPHDR_OFFSET_MASK 0x00FF +#define TPD_TCPHDR_OFFSET_SHIFT 0 + +/* tpd word 1 bit 0:7 Custom Checksum task offload */ +#define TPD_PLOADOFFSET_MASK 0x00FF +#define TPD_PLOADOFFSET_SHIFT 0 + +/* tpd word 1 bit 8:17 */ +#define TPD_CCSUM_EN_MASK 0x0001 +#define TPD_CCSUM_EN_SHIFT 8 +#define TPD_IP_CSUM_MASK 0x0001 +#define TPD_IP_CSUM_SHIFT 9 +#define TPD_TCP_CSUM_MASK 0x0001 +#define TPD_TCP_CSUM_SHIFT 10 +#define TPD_UDP_CSUM_MASK 0x0001 +#define TPD_UDP_CSUM_SHIFT 11 +#define TPD_LSO_EN_MASK 0x0001 /* TCP Large Send Offload */ +#define TPD_LSO_EN_SHIFT 12 +#define TPD_LSO_VER_MASK 0x0001 +#define TPD_LSO_VER_SHIFT 13 /* 0 : ipv4; 1 : ipv4/ipv6 */ +#define TPD_CON_VTAG_MASK 0x0001 +#define TPD_CON_VTAG_SHIFT 14 +#define TPD_INS_VTAG_MASK 0x0001 +#define TPD_INS_VTAG_SHIFT 15 +#define TPD_IPV4_PACKET_MASK 0x0001 /* valid when LSO VER is 1 */ +#define TPD_IPV4_PACKET_SHIFT 16 +#define TPD_ETH_TYPE_MASK 0x0001 +#define TPD_ETH_TYPE_SHIFT 17 /* 0 : 802.3 frame; 1 : Ethernet */ + +/* tpd word 18:25 Custom Checksum task offload */ +#define TPD_CCSUM_OFFSET_MASK 0x00FF +#define TPD_CCSUM_OFFSET_SHIFT 18 +#define TPD_CCSUM_EPAD_MASK 0x0001 +#define TPD_CCSUM_EPAD_SHIFT 30 + +/* tpd word 18:30 Large Send task offload (IPv4/IPV6) */ +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 18 + +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 31 + +struct atl1c_tpd_desc { + __le16 buffer_len; /* include 4-byte CRC */ + __le16 vlan_tag; + __le32 word1; + __le64 buffer_addr; +}; + +struct atl1c_tpd_ext_desc { + u32 reservd_0; + __le32 word1; + __le32 pkt_len; + u32 reservd_1; +}; +/* rrs word 0 bit 0:31 */ +#define RRS_RX_CSUM_MASK 0xFFFF +#define RRS_RX_CSUM_SHIFT 0 +#define RRS_RX_RFD_CNT_MASK 0x000F +#define RRS_RX_RFD_CNT_SHIFT 16 +#define RRS_RX_RFD_INDEX_MASK 0x0FFF +#define RRS_RX_RFD_INDEX_SHIFT 20 + +/* rrs flag bit 0:16 */ +#define RRS_HEAD_LEN_MASK 0x00FF +#define RRS_HEAD_LEN_SHIFT 0 +#define RRS_HDS_TYPE_MASK 0x0003 +#define RRS_HDS_TYPE_SHIFT 8 +#define RRS_CPU_NUM_MASK 0x0003 +#define RRS_CPU_NUM_SHIFT 10 +#define RRS_HASH_FLG_MASK 0x000F +#define RRS_HASH_FLG_SHIFT 12 + +#define RRS_HDS_TYPE_HEAD 1 +#define RRS_HDS_TYPE_DATA 2 + +#define RRS_IS_NO_HDS_TYPE(flag) \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == 0) + +#define RRS_IS_HDS_HEAD(flag) \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \ + RRS_HDS_TYPE_HEAD) + +#define RRS_IS_HDS_DATA(flag) \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \ + RRS_HDS_TYPE_DATA) + +/* rrs word 3 bit 0:31 */ +#define RRS_PKT_SIZE_MASK 0x3FFF +#define RRS_PKT_SIZE_SHIFT 0 +#define RRS_ERR_L4_CSUM_MASK 0x0001 +#define RRS_ERR_L4_CSUM_SHIFT 14 +#define RRS_ERR_IP_CSUM_MASK 0x0001 +#define RRS_ERR_IP_CSUM_SHIFT 15 +#define RRS_VLAN_INS_MASK 0x0001 +#define RRS_VLAN_INS_SHIFT 16 +#define RRS_PROT_ID_MASK 0x0007 +#define RRS_PROT_ID_SHIFT 17 +#define RRS_RX_ERR_SUM_MASK 0x0001 +#define RRS_RX_ERR_SUM_SHIFT 20 +#define RRS_RX_ERR_CRC_MASK 0x0001 +#define RRS_RX_ERR_CRC_SHIFT 21 +#define RRS_RX_ERR_FAE_MASK 0x0001 +#define RRS_RX_ERR_FAE_SHIFT 22 +#define RRS_RX_ERR_TRUNC_MASK 0x0001 +#define RRS_RX_ERR_TRUNC_SHIFT 23 +#define RRS_RX_ERR_RUNC_MASK 0x0001 +#define RRS_RX_ERR_RUNC_SHIFT 24 +#define RRS_RX_ERR_ICMP_MASK 0x0001 +#define RRS_RX_ERR_ICMP_SHIFT 25 +#define RRS_PACKET_BCAST_MASK 0x0001 +#define RRS_PACKET_BCAST_SHIFT 26 +#define RRS_PACKET_MCAST_MASK 0x0001 +#define RRS_PACKET_MCAST_SHIFT 27 +#define RRS_PACKET_TYPE_MASK 0x0001 +#define RRS_PACKET_TYPE_SHIFT 28 +#define RRS_FIFO_FULL_MASK 0x0001 +#define RRS_FIFO_FULL_SHIFT 29 +#define RRS_802_3_LEN_ERR_MASK 0x0001 +#define RRS_802_3_LEN_ERR_SHIFT 30 +#define RRS_RXD_UPDATED_MASK 0x0001 +#define RRS_RXD_UPDATED_SHIFT 31 + +#define RRS_ERR_L4_CSUM 0x00004000 +#define RRS_ERR_IP_CSUM 0x00008000 +#define RRS_VLAN_INS 0x00010000 +#define RRS_RX_ERR_SUM 0x00100000 +#define RRS_RX_ERR_CRC 0x00200000 +#define RRS_802_3_LEN_ERR 0x40000000 +#define RRS_RXD_UPDATED 0x80000000 + +#define RRS_PACKET_TYPE_802_3 1 +#define RRS_PACKET_TYPE_ETH 0 +#define RRS_PACKET_IS_ETH(word) \ + ((((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK) == \ + RRS_PACKET_TYPE_ETH) +#define RRS_RXD_IS_VALID(word) \ + ((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1) + +#define RRS_PACKET_PROT_IS_IPV4_ONLY(word) \ + ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 1) +#define RRS_PACKET_PROT_IS_IPV6_ONLY(word) \ + ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 6) + +struct atl1c_recv_ret_status { + __le32 word0; + __le32 rss_hash; + __le16 vlan_tag; + __le16 flag; + __le32 word3; +}; + +/* RFD descriptor */ +struct atl1c_rx_free_desc { + __le64 buffer_addr; +}; + +/* DMA Order Settings */ +enum atl1c_dma_order { + atl1c_dma_ord_in = 1, + atl1c_dma_ord_enh = 2, + atl1c_dma_ord_out = 4 +}; + +enum atl1c_dma_rcb { + atl1c_rcb_64 = 0, + atl1c_rcb_128 = 1 +}; + +enum atl1c_mac_speed { + atl1c_mac_speed_0 = 0, + atl1c_mac_speed_10_100 = 1, + atl1c_mac_speed_1000 = 2 +}; + +enum atl1c_dma_req_block { + atl1c_dma_req_128 = 0, + atl1c_dma_req_256 = 1, + atl1c_dma_req_512 = 2, + atl1c_dma_req_1024 = 3, + atl1c_dma_req_2048 = 4, + atl1c_dma_req_4096 = 5 +}; + + +enum atl1c_nic_type { + athr_l1c = 0, + athr_l2c = 1, + athr_l2c_b, + athr_l2c_b2, + athr_l1d, + athr_l1d_2, +}; + +enum atl1c_trans_queue { + atl1c_trans_normal = 0, + atl1c_trans_high = 1 +}; + +struct atl1c_hw_stats { + /* rx */ + unsigned long rx_ok; /* The number of good packet received. */ + unsigned long rx_bcast; /* The number of good broadcast packet received. */ + unsigned long rx_mcast; /* The number of good multicast packet received. */ + unsigned long rx_pause; /* The number of Pause packet received. */ + unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */ + unsigned long rx_fcs_err; /* The number of packets with bad FCS. */ + unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */ + unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */ + unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */ + unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */ + unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */ + unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */ + unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */ + unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */ + unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */ + unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */ + unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */ + unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */ + unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */ + unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */ + unsigned long rx_align_err; /* Alignment Error */ + unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */ + unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */ + unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */ + + /* tx */ + unsigned long tx_ok; /* The number of good packet transmitted. */ + unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */ + unsigned long tx_mcast; /* The number of good multicast packet transmitted. */ + unsigned long tx_pause; /* The number of Pause packet transmitted. */ + unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */ + unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */ + unsigned long tx_defer; /* The number of packets transmitted that is deferred. */ + unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */ + unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */ + unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */ + unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */ + unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */ + unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */ + unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */ + unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */ + unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */ + unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */ + unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */ + unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */ + unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ + unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */ + unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */ + unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */ + unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */ + unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */ +}; + +struct atl1c_hw { + u8 __iomem *hw_addr; /* inner register address */ + struct atl1c_adapter *adapter; + enum atl1c_nic_type nic_type; + enum atl1c_dma_order dma_order; + enum atl1c_dma_rcb rcb_value; + enum atl1c_dma_req_block dmar_block; + + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u16 phy_id1; + u16 phy_id2; + + u32 intr_mask; + + u8 preamble_len; + u16 max_frame_size; + u16 min_frame_size; + + enum atl1c_mac_speed mac_speed; + bool mac_duplex; + bool hibernate; + u16 media_type; +#define MEDIA_TYPE_AUTO_SENSOR 0 +#define MEDIA_TYPE_100M_FULL 1 +#define MEDIA_TYPE_100M_HALF 2 +#define MEDIA_TYPE_10M_FULL 3 +#define MEDIA_TYPE_10M_HALF 4 + + u16 autoneg_advertised; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + u16 tx_imt; /* TX Interrupt Moderator timer ( 2us resolution) */ + u16 rx_imt; /* RX Interrupt Moderator timer ( 2us resolution) */ + u16 ict; /* Interrupt Clear timer (2us resolution) */ + u16 ctrl_flags; +#define ATL1C_INTR_CLEAR_ON_READ 0x0001 +#define ATL1C_INTR_MODRT_ENABLE 0x0002 +#define ATL1C_CMB_ENABLE 0x0004 +#define ATL1C_SMB_ENABLE 0x0010 +#define ATL1C_TXQ_MODE_ENHANCE 0x0020 +#define ATL1C_RX_IPV6_CHKSUM 0x0040 +#define ATL1C_ASPM_L0S_SUPPORT 0x0080 +#define ATL1C_ASPM_L1_SUPPORT 0x0100 +#define ATL1C_ASPM_CTRL_MON 0x0200 +#define ATL1C_HIB_DISABLE 0x0400 +#define ATL1C_APS_MODE_ENABLE 0x0800 +#define ATL1C_LINK_EXT_SYNC 0x1000 +#define ATL1C_CLK_GATING_EN 0x2000 +#define ATL1C_FPGA_VERSION 0x8000 + u16 link_cap_flags; +#define ATL1C_LINK_CAP_1000M 0x0001 + u32 smb_timer; + + u16 rrd_thresh; /* Threshold of number of RRD produced to trigger + interrupt request */ + u16 tpd_thresh; + u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */ + u8 rfd_burst; + u32 base_cpu; + u32 indirect_tab; + u8 mac_addr[ETH_ALEN]; + u8 perm_mac_addr[ETH_ALEN]; + + bool phy_configured; + bool re_autoneg; + bool emi_ca; + bool msi_lnkpatch; /* link patch for specific platforms */ +}; + +/* + * atl1c_ring_header represents a single, contiguous block of DMA space + * mapped for the three descriptor rings (tpd, rfd, rrd) described below + */ +struct atl1c_ring_header { + void *desc; /* virtual address */ + dma_addr_t dma; /* physical address*/ + unsigned int size; /* length in bytes */ +}; + +/* + * atl1c_buffer is wrapper around a pointer to a socket buffer + * so a DMA handle can be stored along with the skb + */ +struct atl1c_buffer { + struct sk_buff *skb; /* socket buffer */ + u16 length; /* rx buffer length */ + u16 flags; /* information of buffer */ +#define ATL1C_BUFFER_FREE 0x0001 +#define ATL1C_BUFFER_BUSY 0x0002 +#define ATL1C_BUFFER_STATE_MASK 0x0003 + +#define ATL1C_PCIMAP_SINGLE 0x0004 +#define ATL1C_PCIMAP_PAGE 0x0008 +#define ATL1C_PCIMAP_TYPE_MASK 0x000C + +#define ATL1C_PCIMAP_TODEVICE 0x0010 +#define ATL1C_PCIMAP_FROMDEVICE 0x0020 +#define ATL1C_PCIMAP_DIRECTION_MASK 0x0030 + dma_addr_t dma; +}; + +#define ATL1C_SET_BUFFER_STATE(buff, state) do { \ + ((buff)->flags) &= ~ATL1C_BUFFER_STATE_MASK; \ + ((buff)->flags) |= (state); \ + } while (0) + +#define ATL1C_SET_PCIMAP_TYPE(buff, type, direction) do { \ + ((buff)->flags) &= ~ATL1C_PCIMAP_TYPE_MASK; \ + ((buff)->flags) |= (type); \ + ((buff)->flags) &= ~ATL1C_PCIMAP_DIRECTION_MASK; \ + ((buff)->flags) |= (direction); \ + } while (0) + +/* transimit packet descriptor (tpd) ring */ +struct atl1c_tpd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; /* this is protectd by adapter->tx_lock */ + atomic_t next_to_clean; + struct atl1c_buffer *buffer_info; +}; + +/* receive free descriptor (rfd) ring */ +struct atl1c_rfd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; + u16 next_to_clean; + struct atl1c_buffer *buffer_info; +}; + +/* receive return descriptor (rrd) ring */ +struct atl1c_rrd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; + u16 next_to_clean; +}; + +/* board specific private data structure */ +struct atl1c_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct napi_struct napi; + struct page *rx_page; + unsigned int rx_page_offset; + unsigned int rx_frag_size; + struct atl1c_hw hw; + struct atl1c_hw_stats hw_stats; + struct mii_if_info mii; /* MII interface info */ + u16 rx_buffer_len; + + unsigned long flags; +#define __AT_TESTING 0x0001 +#define __AT_RESETTING 0x0002 +#define __AT_DOWN 0x0003 + unsigned long work_event; +#define ATL1C_WORK_EVENT_RESET 0 +#define ATL1C_WORK_EVENT_LINK_CHANGE 1 + u32 msg_enable; + + bool have_msi; + u32 wol; + u16 link_speed; + u16 link_duplex; + + spinlock_t mdio_lock; + spinlock_t tx_lock; + atomic_t irq_sem; + + struct work_struct common_task; + struct timer_list watchdog_timer; + struct timer_list phy_config_timer; + + /* All Descriptor memory */ + struct atl1c_ring_header ring_header; + struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE]; + struct atl1c_rfd_ring rfd_ring; + struct atl1c_rrd_ring rrd_ring; + u32 bd_number; /* board number;*/ +}; + +#define AT_WRITE_REG(a, reg, value) ( \ + writel((value), ((a)->hw_addr + reg))) + +#define AT_WRITE_FLUSH(a) (\ + readl((a)->hw_addr)) + +#define AT_READ_REG(a, reg, pdata) do { \ + if (unlikely((a)->hibernate)) { \ + readl((a)->hw_addr + reg); \ + *(u32 *)pdata = readl((a)->hw_addr + reg); \ + } else { \ + *(u32 *)pdata = readl((a)->hw_addr + reg); \ + } \ + } while (0) + +#define AT_WRITE_REGB(a, reg, value) (\ + writeb((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGB(a, reg) (\ + readb((a)->hw_addr + reg)) + +#define AT_WRITE_REGW(a, reg, value) (\ + writew((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGW(a, reg, pdata) do { \ + if (unlikely((a)->hibernate)) { \ + readw((a)->hw_addr + reg); \ + *(u16 *)pdata = readw((a)->hw_addr + reg); \ + } else { \ + *(u16 *)pdata = readw((a)->hw_addr + reg); \ + } \ + } while (0) + +#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), (((a)->hw_addr + reg) + ((offset) << 2)))) + +#define AT_READ_REG_ARRAY(a, reg, offset) ( \ + readl(((a)->hw_addr + reg) + ((offset) << 2))) + +extern char atl1c_driver_name[]; +extern char atl1c_driver_version[]; + +extern void atl1c_reinit_locked(struct atl1c_adapter *adapter); +extern s32 atl1c_reset_hw(struct atl1c_hw *hw); +extern void atl1c_set_ethtool_ops(struct net_device *netdev); +#endif /* _ATL1C_H_ */ diff --git a/addons/atl1c/src/3.10.108/atl1c_ethtool.c b/addons/atl1c/src/3.10.108/atl1c_ethtool.c new file mode 100644 index 00000000..859ea844 --- /dev/null +++ b/addons/atl1c/src/3.10.108/atl1c_ethtool.c @@ -0,0 +1,309 @@ +/* + * Copyright(c) 2009 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include +#include + +#include "atl1c.h" + +static int atl1c_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) + ecmd->supported |= SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_TP; + + ecmd->advertising |= hw->autoneg_advertised; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (adapter->link_speed != SPEED_0) { + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, -1); + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_ENABLE; + return 0; +} + +static int atl1c_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u16 autoneg_advertised; + + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + autoneg_advertised = ADVERTISED_Autoneg; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + if (speed == SPEED_1000) { + if (ecmd->duplex != DUPLEX_FULL) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "1000M half is invalid\n"); + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + autoneg_advertised = ADVERTISED_1000baseT_Full; + } else if (speed == SPEED_100) { + if (ecmd->duplex == DUPLEX_FULL) + autoneg_advertised = ADVERTISED_100baseT_Full; + else + autoneg_advertised = ADVERTISED_100baseT_Half; + } else { + if (ecmd->duplex == DUPLEX_FULL) + autoneg_advertised = ADVERTISED_10baseT_Full; + else + autoneg_advertised = ADVERTISED_10baseT_Half; + } + } + + if (hw->autoneg_advertised != autoneg_advertised) { + hw->autoneg_advertised = autoneg_advertised; + if (atl1c_restart_autoneg(hw) != 0) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "ethtool speed/duplex setting failed\n"); + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + } + clear_bit(__AT_RESETTING, &adapter->flags); + return 0; +} + +static u32 atl1c_get_msglevel(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void atl1c_set_msglevel(struct net_device *netdev, u32 data) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int atl1c_get_regs_len(struct net_device *netdev) +{ + return AT_REGS_LEN; +} + +static void atl1c_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, AT_REGS_LEN); + + regs->version = 1; + AT_READ_REG(hw, REG_PM_CTRL, p++); + AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++); + AT_READ_REG(hw, REG_TWSI_CTRL, p++); + AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL, p++); + AT_READ_REG(hw, REG_MASTER_CTRL, p++); + AT_READ_REG(hw, REG_MANUAL_TIMER_INIT, p++); + AT_READ_REG(hw, REG_IRQ_MODRT_TIMER_INIT, p++); + AT_READ_REG(hw, REG_GPHY_CTRL, p++); + AT_READ_REG(hw, REG_LINK_CTRL, p++); + AT_READ_REG(hw, REG_IDLE_STATUS, p++); + AT_READ_REG(hw, REG_MDIO_CTRL, p++); + AT_READ_REG(hw, REG_SERDES, p++); + AT_READ_REG(hw, REG_MAC_CTRL, p++); + AT_READ_REG(hw, REG_MAC_IPG_IFG, p++); + AT_READ_REG(hw, REG_MAC_STA_ADDR, p++); + AT_READ_REG(hw, REG_MAC_STA_ADDR+4, p++); + AT_READ_REG(hw, REG_RX_HASH_TABLE, p++); + AT_READ_REG(hw, REG_RX_HASH_TABLE+4, p++); + AT_READ_REG(hw, REG_RXQ_CTRL, p++); + AT_READ_REG(hw, REG_TXQ_CTRL, p++); + AT_READ_REG(hw, REG_MTU, p++); + AT_READ_REG(hw, REG_WOL_CTRL, p++); + + atl1c_read_phy_reg(hw, MII_BMCR, &phy_data); + regs_buff[AT_REGS_LEN/sizeof(u32) - 2] = (u32) phy_data; + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + regs_buff[AT_REGS_LEN/sizeof(u32) - 1] = (u32) phy_data; +} + +static int atl1c_get_eeprom_len(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (atl1c_check_eeprom_exist(&adapter->hw)) + return AT_EEPROM_LEN; + else + return 0; +} + +static int atl1c_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u32 *eeprom_buff; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EINVAL; + + if (!atl1c_check_eeprom_exist(hw)) /* not exist */ + return -EINVAL; + + eeprom->magic = adapter->pdev->vendor | + (adapter->pdev->device << 16); + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + + eeprom_buff = kmalloc(sizeof(u32) * + (last_dword - first_dword + 1), GFP_KERNEL); + if (eeprom_buff == NULL) + return -ENOMEM; + + for (i = first_dword; i < last_dword; i++) { + if (!atl1c_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) { + kfree(eeprom_buff); + return -EIO; + } + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; + return 0; +} + +static void atl1c_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, atl1c_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = atl1c_get_regs_len(netdev); + drvinfo->eedump_len = atl1c_get_eeprom_len(netdev); +} + +static void atl1c_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + if (adapter->wol & AT_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & AT_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & AT_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & AT_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & AT_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | + WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) + return -EOPNOTSUPP; + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= AT_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= AT_WUFC_LNKC; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int atl1c_nway_reset(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + atl1c_reinit_locked(adapter); + return 0; +} + +static const struct ethtool_ops atl1c_ethtool_ops = { + .get_settings = atl1c_get_settings, + .set_settings = atl1c_set_settings, + .get_drvinfo = atl1c_get_drvinfo, + .get_regs_len = atl1c_get_regs_len, + .get_regs = atl1c_get_regs, + .get_wol = atl1c_get_wol, + .set_wol = atl1c_set_wol, + .get_msglevel = atl1c_get_msglevel, + .set_msglevel = atl1c_set_msglevel, + .nway_reset = atl1c_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = atl1c_get_eeprom_len, + .get_eeprom = atl1c_get_eeprom, +}; + +void atl1c_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops); +} diff --git a/addons/atl1c/src/3.10.108/atl1c_hw.c b/addons/atl1c/src/3.10.108/atl1c_hw.c new file mode 100644 index 00000000..3ef7092e --- /dev/null +++ b/addons/atl1c/src/3.10.108/atl1c_hw.c @@ -0,0 +1,865 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include + +#include "atl1c.h" + +/* + * check_eeprom_exist + * return 1 if eeprom exist + */ +int atl1c_check_eeprom_exist(struct atl1c_hw *hw) +{ + u32 data; + + AT_READ_REG(hw, REG_TWSI_DEBUG, &data); + if (data & TWSI_DEBUG_DEV_EXIST) + return 1; + + AT_READ_REG(hw, REG_MASTER_CTRL, &data); + if (data & MASTER_CTRL_OTP_SEL) + return 1; + return 0; +} + +void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr) +{ + u32 value; + /* + * 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword + */ + value = mac_addr[2] << 24 | + mac_addr[3] << 16 | + mac_addr[4] << 8 | + mac_addr[5]; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); + /* hight dword */ + value = mac_addr[0] << 8 | + mac_addr[1]; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); +} + +/* read mac address from hardware register */ +static bool atl1c_read_current_addr(struct atl1c_hw *hw, u8 *eth_addr) +{ + u32 addr[2]; + + AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]); + AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]); + + *(u32 *) ð_addr[2] = htonl(addr[0]); + *(u16 *) ð_addr[0] = htons((u16)addr[1]); + + return is_valid_ether_addr(eth_addr); +} + +/* + * atl1c_get_permanent_address + * return 0 if get valid mac address, + */ +static int atl1c_get_permanent_address(struct atl1c_hw *hw) +{ + u32 i; + u32 otp_ctrl_data; + u32 twsi_ctrl_data; + u16 phy_data; + bool raise_vol = false; + + /* MAC-address from BIOS is the 1st priority */ + if (atl1c_read_current_addr(hw, hw->perm_mac_addr)) + return 0; + + /* init */ + AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); + if (atl1c_check_eeprom_exist(hw)) { + if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { + /* Enable OTP CLK */ + if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { + otp_ctrl_data |= OTP_CTRL_CLK_EN; + AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); + AT_WRITE_FLUSH(hw); + msleep(1); + } + } + /* raise voltage temporally for l2cb */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { + atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data); + phy_data &= ~ANACTRL_HB_EN; + atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data); + atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); + phy_data |= VOLT_CTRL_SWLOWEST; + atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); + udelay(20); + raise_vol = true; + } + + AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); + twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; + AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data); + for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) { + msleep(10); + AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); + if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0) + break; + } + if (i >= AT_TWSI_EEPROM_TIMEOUT) + return -1; + } + /* Disable OTP_CLK */ + if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) { + otp_ctrl_data &= ~OTP_CTRL_CLK_EN; + AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); + msleep(1); + } + if (raise_vol) { + atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data); + phy_data |= ANACTRL_HB_EN; + atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data); + atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); + phy_data &= ~VOLT_CTRL_SWLOWEST; + atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); + udelay(20); + } + + if (atl1c_read_current_addr(hw, hw->perm_mac_addr)) + return 0; + + return -1; +} + +bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value) +{ + int i; + int ret = false; + u32 otp_ctrl_data; + u32 control; + u32 data; + + if (offset & 3) + return ret; /* address do not align */ + + AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); + if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) + AT_WRITE_REG(hw, REG_OTP_CTRL, + (otp_ctrl_data | OTP_CTRL_CLK_EN)); + + AT_WRITE_REG(hw, REG_EEPROM_DATA_LO, 0); + control = (offset & EEPROM_CTRL_ADDR_MASK) << EEPROM_CTRL_ADDR_SHIFT; + AT_WRITE_REG(hw, REG_EEPROM_CTRL, control); + + for (i = 0; i < 10; i++) { + udelay(100); + AT_READ_REG(hw, REG_EEPROM_CTRL, &control); + if (control & EEPROM_CTRL_RW) + break; + } + if (control & EEPROM_CTRL_RW) { + AT_READ_REG(hw, REG_EEPROM_CTRL, &data); + AT_READ_REG(hw, REG_EEPROM_DATA_LO, p_value); + data = data & 0xFFFF; + *p_value = swab32((data << 16) | (*p_value >> 16)); + ret = true; + } + if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) + AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); + + return ret; +} +/* + * Reads the adapter's MAC address from the EEPROM + * + * hw - Struct containing variables accessed by shared code + */ +int atl1c_read_mac_addr(struct atl1c_hw *hw) +{ + int err = 0; + + err = atl1c_get_permanent_address(hw); + if (err) + eth_random_addr(hw->perm_mac_addr); + + memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr)); + return err; +} + +/* + * atl1c_hash_mc_addr + * purpose + * set hash value for a multicast address + * hash calcu processing : + * 1. calcu 32bit CRC for multicast address + * 2. reverse crc with MSB to LSB + */ +u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr) +{ + u32 crc32; + u32 value = 0; + int i; + + crc32 = ether_crc_le(6, mc_addr); + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* + * The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper bit of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + + mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); + + mta |= (1 << hash_bit); + + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); +} + +/* + * wait mdio module be idle + * return true: idle + * false: still busy + */ +bool atl1c_wait_mdio_idle(struct atl1c_hw *hw) +{ + u32 val; + int i; + + for (i = 0; i < MDIO_MAX_AC_TO; i++) { + AT_READ_REG(hw, REG_MDIO_CTRL, &val); + if (!(val & (MDIO_CTRL_BUSY | MDIO_CTRL_START))) + break; + udelay(10); + } + + return i != MDIO_MAX_AC_TO; +} + +void atl1c_stop_phy_polling(struct atl1c_hw *hw) +{ + if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION)) + return; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, 0); + atl1c_wait_mdio_idle(hw); +} + +void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel) +{ + u32 val; + + if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION)) + return; + + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_REG, 1) | + MDIO_CTRL_START | + MDIO_CTRL_OP_READ; + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + atl1c_wait_mdio_idle(hw); + val |= MDIO_CTRL_AP_EN; + val &= ~MDIO_CTRL_START; + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + udelay(30); +} + + +/* + * atl1c_read_phy_core + * core funtion to read register in PHY via MDIO control regsiter. + * ext: extension register (see IEEE 802.3) + * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) + * reg: reg to read + */ +int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 *phy_data) +{ + u32 val; + u16 clk_sel = MDIO_CTRL_CLK_25_4; + + atl1c_stop_phy_polling(hw); + + *phy_data = 0; + + /* only l2c_b2 & l1d_2 could use slow clock */ + if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) && + hw->hibernate) + clk_sel = MDIO_CTRL_CLK_25_128; + if (ext) { + val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg); + AT_WRITE_REG(hw, REG_MDIO_EXTN, val); + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + MDIO_CTRL_START | + MDIO_CTRL_MODE_EXT | + MDIO_CTRL_OP_READ; + } else { + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_REG, reg) | + MDIO_CTRL_START | + MDIO_CTRL_OP_READ; + } + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + + if (!atl1c_wait_mdio_idle(hw)) + return -1; + + AT_READ_REG(hw, REG_MDIO_CTRL, &val); + *phy_data = (u16)FIELD_GETX(val, MDIO_CTRL_DATA); + + atl1c_start_phy_polling(hw, clk_sel); + + return 0; +} + +/* + * atl1c_write_phy_core + * core funtion to write to register in PHY via MDIO control regsiter. + * ext: extension register (see IEEE 802.3) + * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) + * reg: reg to write + */ +int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 phy_data) +{ + u32 val; + u16 clk_sel = MDIO_CTRL_CLK_25_4; + + atl1c_stop_phy_polling(hw); + + + /* only l2c_b2 & l1d_2 could use slow clock */ + if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) && + hw->hibernate) + clk_sel = MDIO_CTRL_CLK_25_128; + + if (ext) { + val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg); + AT_WRITE_REG(hw, REG_MDIO_EXTN, val); + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_DATA, phy_data) | + MDIO_CTRL_START | + MDIO_CTRL_MODE_EXT; + } else { + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_DATA, phy_data) | + FIELDX(MDIO_CTRL_REG, reg) | + MDIO_CTRL_START; + } + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + + if (!atl1c_wait_mdio_idle(hw)) + return -1; + + atl1c_start_phy_polling(hw, clk_sel); + + return 0; +} + +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) +{ + return atl1c_read_phy_core(hw, false, 0, reg_addr, phy_data); +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data) +{ + return atl1c_write_phy_core(hw, false, 0, reg_addr, phy_data); +} + +/* read from PHY extension register */ +int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 *phy_data) +{ + return atl1c_read_phy_core(hw, true, dev_addr, reg_addr, phy_data); +} + +/* write to PHY extension register */ +int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 phy_data) +{ + return atl1c_write_phy_core(hw, true, dev_addr, reg_addr, phy_data); +} + +int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) +{ + int err; + + err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr); + if (unlikely(err)) + return err; + else + err = atl1c_read_phy_reg(hw, MII_DBG_DATA, phy_data); + + return err; +} + +int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data) +{ + int err; + + err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr); + if (unlikely(err)) + return err; + else + err = atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data); + + return err; +} + +/* + * Configures PHY autoneg and flow control advertisement settings + * + * hw - Struct containing variables accessed by shared code + */ +static int atl1c_phy_setup_adv(struct atl1c_hw *hw) +{ + u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL; + u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP & + ~GIGA_CR_1000T_SPEED_MASK; + + if (hw->autoneg_advertised & ADVERTISED_10baseT_Half) + mii_adv_data |= ADVERTISE_10HALF; + if (hw->autoneg_advertised & ADVERTISED_10baseT_Full) + mii_adv_data |= ADVERTISE_10FULL; + if (hw->autoneg_advertised & ADVERTISED_100baseT_Half) + mii_adv_data |= ADVERTISE_100HALF; + if (hw->autoneg_advertised & ADVERTISED_100baseT_Full) + mii_adv_data |= ADVERTISE_100FULL; + + if (hw->autoneg_advertised & ADVERTISED_Autoneg) + mii_adv_data |= ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL; + + if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) { + if (hw->autoneg_advertised & ADVERTISED_1000baseT_Half) + mii_giga_ctrl_data |= ADVERTISE_1000HALF; + if (hw->autoneg_advertised & ADVERTISED_1000baseT_Full) + mii_giga_ctrl_data |= ADVERTISE_1000FULL; + if (hw->autoneg_advertised & ADVERTISED_Autoneg) + mii_giga_ctrl_data |= ADVERTISE_1000HALF | + ADVERTISE_1000FULL; + } + + if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 || + atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0) + return -1; + return 0; +} + +void atl1c_phy_disable(struct atl1c_hw *hw) +{ + atl1c_power_saving(hw, 0); +} + + +int atl1c_phy_reset(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + u16 phy_data; + u32 phy_ctrl_data, lpi_ctrl; + int err; + + /* reset PHY core */ + AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl_data); + phy_ctrl_data &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_PHY_IDDQ | + GPHY_CTRL_GATE_25M_EN | GPHY_CTRL_PWDOWN_HW | GPHY_CTRL_CLS); + phy_ctrl_data |= GPHY_CTRL_SEL_ANA_RST; + if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE)) + phy_ctrl_data |= (GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE); + else + phy_ctrl_data &= ~(GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data); + AT_WRITE_FLUSH(hw); + udelay(10); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data | GPHY_CTRL_EXT_RESET); + AT_WRITE_FLUSH(hw); + udelay(10 * GPHY_CTRL_EXT_RST_TO); /* delay 800us */ + + /* switch clock */ + if (hw->nic_type == athr_l2c_b) { + atl1c_read_phy_dbg(hw, MIIDBG_CFGLPSPD, &phy_data); + atl1c_write_phy_dbg(hw, MIIDBG_CFGLPSPD, + phy_data & ~CFGLPSPD_RSTCNT_CLK125SW); + } + + /* tx-half amplitude issue fix */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { + atl1c_read_phy_dbg(hw, MIIDBG_CABLE1TH_DET, &phy_data); + phy_data |= CABLE1TH_DET_EN; + atl1c_write_phy_dbg(hw, MIIDBG_CABLE1TH_DET, phy_data); + } + + /* clear bit3 of dbgport 3B to lower voltage */ + if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE)) { + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { + atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); + phy_data &= ~VOLT_CTRL_SWLOWEST; + atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); + } + /* power saving config */ + phy_data = + hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 ? + L1D_LEGCYPS_DEF : L1C_LEGCYPS_DEF; + atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS, phy_data); + /* hib */ + atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, + SYSMODCTRL_IECHOADJ_DEF); + } else { + /* disable pws */ + atl1c_read_phy_dbg(hw, MIIDBG_LEGCYPS, &phy_data); + atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS, + phy_data & ~LEGCYPS_EN); + /* disable hibernate */ + atl1c_read_phy_dbg(hw, MIIDBG_HIBNEG, &phy_data); + atl1c_write_phy_dbg(hw, MIIDBG_HIBNEG, + phy_data & HIBNEG_PSHIB_EN); + } + /* disable AZ(EEE) by default */ + if (hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 || + hw->nic_type == athr_l2c_b2) { + AT_READ_REG(hw, REG_LPI_CTRL, &lpi_ctrl); + AT_WRITE_REG(hw, REG_LPI_CTRL, lpi_ctrl & ~LPI_CTRL_EN); + atl1c_write_phy_ext(hw, MIIEXT_ANEG, MIIEXT_LOCAL_EEEADV, 0); + atl1c_write_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL3, + L2CB_CLDCTRL3); + } + + /* other debug port to set */ + atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, ANACTRL_DEF); + atl1c_write_phy_dbg(hw, MIIDBG_SRDSYSMOD, SRDSYSMOD_DEF); + atl1c_write_phy_dbg(hw, MIIDBG_TST10BTCFG, TST10BTCFG_DEF); + /* UNH-IOL test issue, set bit7 */ + atl1c_write_phy_dbg(hw, MIIDBG_TST100BTCFG, + TST100BTCFG_DEF | TST100BTCFG_LITCH_EN); + + /* set phy interrupt mask */ + phy_data = IER_LINK_UP | IER_LINK_DOWN; + err = atl1c_write_phy_reg(hw, MII_IER, phy_data); + if (err) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "Error enable PHY linkChange Interrupt\n"); + return err; + } + return 0; +} + +int atl1c_phy_init(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + int ret_val; + u16 mii_bmcr_data = BMCR_RESET; + + if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) || + (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) { + dev_err(&pdev->dev, "Error get phy ID\n"); + return -1; + } + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + ret_val = atl1c_phy_setup_adv(hw); + if (ret_val) { + if (netif_msg_link(adapter)) + dev_err(&pdev->dev, + "Error Setting up Auto-Negotiation\n"); + return ret_val; + } + mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART; + break; + case MEDIA_TYPE_100M_FULL: + mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX; + break; + case MEDIA_TYPE_100M_HALF: + mii_bmcr_data |= BMCR_SPEED100; + break; + case MEDIA_TYPE_10M_FULL: + mii_bmcr_data |= BMCR_FULLDPLX; + break; + case MEDIA_TYPE_10M_HALF: + break; + default: + if (netif_msg_link(adapter)) + dev_err(&pdev->dev, "Wrong Media type %d\n", + hw->media_type); + return -1; + break; + } + + ret_val = atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); + if (ret_val) + return ret_val; + hw->phy_configured = true; + + return 0; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex) +{ + int err; + u16 phy_data; + + /* Read PHY Specific Status Register (17) */ + err = atl1c_read_phy_reg(hw, MII_GIGA_PSSR, &phy_data); + if (err) + return err; + + if (!(phy_data & GIGA_PSSR_SPD_DPLX_RESOLVED)) + return -1; + + switch (phy_data & GIGA_PSSR_SPEED) { + case GIGA_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case GIGA_PSSR_100MBS: + *speed = SPEED_100; + break; + case GIGA_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + return -1; + break; + } + + if (phy_data & GIGA_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; +} + +/* select one link mode to get lower power consumption */ +int atl1c_phy_to_ps_link(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + int ret = 0; + u16 autoneg_advertised = ADVERTISED_10baseT_Half; + u16 save_autoneg_advertised; + u16 phy_data; + u16 mii_lpa_data; + u16 speed = SPEED_0; + u16 duplex = FULL_DUPLEX; + int i; + + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + if (phy_data & BMSR_LSTATUS) { + atl1c_read_phy_reg(hw, MII_LPA, &mii_lpa_data); + if (mii_lpa_data & LPA_10FULL) + autoneg_advertised = ADVERTISED_10baseT_Full; + else if (mii_lpa_data & LPA_10HALF) + autoneg_advertised = ADVERTISED_10baseT_Half; + else if (mii_lpa_data & LPA_100HALF) + autoneg_advertised = ADVERTISED_100baseT_Half; + else if (mii_lpa_data & LPA_100FULL) + autoneg_advertised = ADVERTISED_100baseT_Full; + + save_autoneg_advertised = hw->autoneg_advertised; + hw->phy_configured = false; + hw->autoneg_advertised = autoneg_advertised; + if (atl1c_restart_autoneg(hw) != 0) { + dev_dbg(&pdev->dev, "phy autoneg failed\n"); + ret = -1; + } + hw->autoneg_advertised = save_autoneg_advertised; + + if (mii_lpa_data) { + for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { + mdelay(100); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + if (phy_data & BMSR_LSTATUS) { + if (atl1c_get_speed_and_duplex(hw, &speed, + &duplex) != 0) + dev_dbg(&pdev->dev, + "get speed and duplex failed\n"); + break; + } + } + } + } else { + speed = SPEED_10; + duplex = HALF_DUPLEX; + } + adapter->link_speed = speed; + adapter->link_duplex = duplex; + + return ret; +} + +int atl1c_restart_autoneg(struct atl1c_hw *hw) +{ + int err = 0; + u16 mii_bmcr_data = BMCR_RESET; + + err = atl1c_phy_setup_adv(hw); + if (err) + return err; + mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART; + + return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); +} + +int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + u32 master_ctrl, mac_ctrl, phy_ctrl; + u32 wol_ctrl, speed; + u16 phy_data; + + wol_ctrl = 0; + speed = adapter->link_speed == SPEED_1000 ? + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100; + + AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl); + AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl); + AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl); + + master_ctrl &= ~MASTER_CTRL_CLK_SEL_DIS; + mac_ctrl = FIELD_SETX(mac_ctrl, MAC_CTRL_SPEED, speed); + mac_ctrl &= ~(MAC_CTRL_DUPLX | MAC_CTRL_RX_EN | MAC_CTRL_TX_EN); + if (adapter->link_duplex == FULL_DUPLEX) + mac_ctrl |= MAC_CTRL_DUPLX; + phy_ctrl &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_CLS); + phy_ctrl |= GPHY_CTRL_SEL_ANA_RST | GPHY_CTRL_HIB_PULSE | + GPHY_CTRL_HIB_EN; + if (!wufc) { /* without WoL */ + master_ctrl |= MASTER_CTRL_CLK_SEL_DIS; + phy_ctrl |= GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PWDOWN_HW; + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl); + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + hw->phy_configured = false; /* re-init PHY when resume */ + return 0; + } + phy_ctrl |= GPHY_CTRL_EXT_RESET; + if (wufc & AT_WUFC_MAG) { + mac_ctrl |= MAC_CTRL_RX_EN | MAC_CTRL_BC_EN; + wol_ctrl |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V11) + wol_ctrl |= WOL_PATTERN_EN | WOL_PATTERN_PME_EN; + } + if (wufc & AT_WUFC_LNKC) { + wol_ctrl |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; + if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) { + dev_dbg(&pdev->dev, "%s: write phy MII_IER failed.\n", + atl1c_driver_name); + } + } + /* clear PHY interrupt */ + atl1c_read_phy_reg(hw, MII_ISR, &phy_data); + + dev_dbg(&pdev->dev, "%s: suspend MAC=%x,MASTER=%x,PHY=0x%x,WOL=%x\n", + atl1c_driver_name, mac_ctrl, master_ctrl, phy_ctrl, wol_ctrl); + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl); + AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl); + + return 0; +} + + +/* configure phy after Link change Event */ +void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed) +{ + u16 phy_val; + bool adj_thresh = false; + + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 || + hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2) + adj_thresh = true; + + if (link_speed != SPEED_0) { /* link up */ + /* az with brcm, half-amp */ + if (hw->nic_type == athr_l1d_2) { + atl1c_read_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL6, + &phy_val); + phy_val = FIELD_GETX(phy_val, CLDCTRL6_CAB_LEN); + phy_val = phy_val > CLDCTRL6_CAB_LEN_SHORT ? + AZ_ANADECT_LONG : AZ_ANADECT_DEF; + atl1c_write_phy_dbg(hw, MIIDBG_AZ_ANADECT, phy_val); + } + /* threshold adjust */ + if (adj_thresh && link_speed == SPEED_100 && hw->msi_lnkpatch) { + atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB, L1D_MSE16DB_UP); + atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, + L1D_SYSMODCTRL_IECHOADJ_DEF); + } + } else { /* link down */ + if (adj_thresh && hw->msi_lnkpatch) { + atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, + SYSMODCTRL_IECHOADJ_DEF); + atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB, + L1D_MSE16DB_DOWN); + } + } +} diff --git a/addons/atl1c/src/3.10.108/atl1c_hw.h b/addons/atl1c/src/3.10.108/atl1c_hw.h new file mode 100644 index 00000000..21d8c4db --- /dev/null +++ b/addons/atl1c/src/3.10.108/atl1c_hw.h @@ -0,0 +1,1024 @@ +/* + * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL1C_HW_H_ +#define _ATL1C_HW_H_ + +#include +#include + +#define FIELD_GETX(_x, _name) ((_x) >> (_name##_SHIFT) & (_name##_MASK)) +#define FIELD_SETX(_x, _name, _v) \ +(((_x) & ~((_name##_MASK) << (_name##_SHIFT))) |\ +(((_v) & (_name##_MASK)) << (_name##_SHIFT))) +#define FIELDX(_name, _v) (((_v) & (_name##_MASK)) << (_name##_SHIFT)) + +struct atl1c_adapter; +struct atl1c_hw; + +/* function prototype */ +void atl1c_phy_disable(struct atl1c_hw *hw); +void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr); +int atl1c_phy_reset(struct atl1c_hw *hw); +int atl1c_read_mac_addr(struct atl1c_hw *hw); +int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex); +u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr); +void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value); +int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data); +int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data); +bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value); +int atl1c_phy_init(struct atl1c_hw *hw); +int atl1c_check_eeprom_exist(struct atl1c_hw *hw); +int atl1c_restart_autoneg(struct atl1c_hw *hw); +int atl1c_phy_to_ps_link(struct atl1c_hw *hw); +int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc); +bool atl1c_wait_mdio_idle(struct atl1c_hw *hw); +void atl1c_stop_phy_polling(struct atl1c_hw *hw); +void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel); +int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 *phy_data); +int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 phy_data); +int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 *phy_data); +int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 phy_data); +int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data); +int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data); +void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed); + +/* hw-ids */ +#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062 +#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063 +#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */ +#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */ +#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */ +#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */ +#define L2CB_V10 0xc0 +#define L2CB_V11 0xc1 +#define L2CB_V20 0xc0 +#define L2CB_V21 0xc1 + +/* register definition */ +#define REG_DEVICE_CAP 0x5C +#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 +#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0 + +#define DEVICE_CTRL_MAXRRS_MIN 2 + +#define REG_LINK_CTRL 0x68 +#define LINK_CTRL_L0S_EN 0x01 +#define LINK_CTRL_L1_EN 0x02 +#define LINK_CTRL_EXT_SYNC 0x80 + +#define REG_PCIE_IND_ACC_ADDR 0x80 +#define REG_PCIE_IND_ACC_DATA 0x84 + +#define REG_DEV_SERIALNUM_CTRL 0x200 +#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */ +#define REG_DEV_MAC_SEL_SHIFT 0 +#define REG_DEV_SERIAL_NUM_EN_MASK 0x1 +#define REG_DEV_SERIAL_NUM_EN_SHIFT 1 + +#define REG_TWSI_CTRL 0x218 +#define TWSI_CTLR_FREQ_MASK 0x3UL +#define TWSI_CTRL_FREQ_SHIFT 24 +#define TWSI_CTRL_FREQ_100K 0 +#define TWSI_CTRL_FREQ_200K 1 +#define TWSI_CTRL_FREQ_300K 2 +#define TWSI_CTRL_FREQ_400K 3 +#define TWSI_CTRL_LD_EXIST BIT(23) +#define TWSI_CTRL_HW_LDSTAT BIT(12) /* 0:finish,1:in progress */ +#define TWSI_CTRL_SW_LDSTART BIT(11) +#define TWSI_CTRL_LD_OFFSET_MASK 0xFF +#define TWSI_CTRL_LD_OFFSET_SHIFT 0 + +#define REG_PCIE_DEV_MISC_CTRL 0x21C +#define PCIE_DEV_MISC_EXT_PIPE 0x2 +#define PCIE_DEV_MISC_RETRY_BUFDIS 0x1 +#define PCIE_DEV_MISC_SPIROM_EXIST 0x4 +#define PCIE_DEV_MISC_SERDES_ENDIAN 0x8 +#define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10 + +#define REG_PCIE_PHYMISC 0x1000 +#define PCIE_PHYMISC_FORCE_RCV_DET BIT(2) +#define PCIE_PHYMISC_NFTS_MASK 0xFFUL +#define PCIE_PHYMISC_NFTS_SHIFT 16 + +#define REG_PCIE_PHYMISC2 0x1004 +#define PCIE_PHYMISC2_L0S_TH_MASK 0x3UL +#define PCIE_PHYMISC2_L0S_TH_SHIFT 18 +#define L2CB1_PCIE_PHYMISC2_L0S_TH 3 +#define PCIE_PHYMISC2_CDR_BW_MASK 0x3UL +#define PCIE_PHYMISC2_CDR_BW_SHIFT 16 +#define L2CB1_PCIE_PHYMISC2_CDR_BW 3 + +#define REG_TWSI_DEBUG 0x1108 +#define TWSI_DEBUG_DEV_EXIST BIT(29) + +#define REG_DMA_DBG 0x1114 +#define DMA_DBG_VENDOR_MSG BIT(0) + +#define REG_EEPROM_CTRL 0x12C0 +#define EEPROM_CTRL_DATA_HI_MASK 0xFFFF +#define EEPROM_CTRL_DATA_HI_SHIFT 0 +#define EEPROM_CTRL_ADDR_MASK 0x3FF +#define EEPROM_CTRL_ADDR_SHIFT 16 +#define EEPROM_CTRL_ACK 0x40000000 +#define EEPROM_CTRL_RW 0x80000000 + +#define REG_EEPROM_DATA_LO 0x12C4 + +#define REG_OTP_CTRL 0x12F0 +#define OTP_CTRL_CLK_EN BIT(1) + +#define REG_PM_CTRL 0x12F8 +#define PM_CTRL_HOTRST BIT(31) +#define PM_CTRL_MAC_ASPM_CHK BIT(30) /* L0s/L1 dis by MAC based on + * thrghput(setting in 15A0) */ +#define PM_CTRL_SA_DLY_EN BIT(29) +#define PM_CTRL_L0S_BUFSRX_EN BIT(28) +#define PM_CTRL_LCKDET_TIMER_MASK 0xFUL +#define PM_CTRL_LCKDET_TIMER_SHIFT 24 +#define PM_CTRL_LCKDET_TIMER_DEF 0xC +#define PM_CTRL_PM_REQ_TIMER_MASK 0xFUL +#define PM_CTRL_PM_REQ_TIMER_SHIFT 20 /* pm_request_l1 time > @ + * ->L0s not L1 */ +#define PM_CTRL_PM_REQ_TO_DEF 0xF +#define PMCTRL_TXL1_AFTER_L0S BIT(19) /* l1dv2.0+ */ +#define L1D_PMCTRL_L1_ENTRY_TM_MASK 7UL /* l1dv2.0+, 3bits */ +#define L1D_PMCTRL_L1_ENTRY_TM_SHIFT 16 +#define L1D_PMCTRL_L1_ENTRY_TM_DIS 0 +#define L1D_PMCTRL_L1_ENTRY_TM_2US 1 +#define L1D_PMCTRL_L1_ENTRY_TM_4US 2 +#define L1D_PMCTRL_L1_ENTRY_TM_8US 3 +#define L1D_PMCTRL_L1_ENTRY_TM_16US 4 +#define L1D_PMCTRL_L1_ENTRY_TM_24US 5 +#define L1D_PMCTRL_L1_ENTRY_TM_32US 6 +#define L1D_PMCTRL_L1_ENTRY_TM_63US 7 +#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xFUL /* l1C 4bits */ +#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16 +#define L2CB1_PM_CTRL_L1_ENTRY_TM 7 +#define L1C_PM_CTRL_L1_ENTRY_TM 0xF +#define PM_CTRL_RCVR_WT_TIMER BIT(15) /* 1:1us, 0:2ms */ +#define PM_CTRL_CLK_PWM_VER1_1 BIT(14) /* 0:1.0a,1:1.1 */ +#define PM_CTRL_CLK_SWH_L1 BIT(13) /* en pcie clk sw in L1 */ +#define PM_CTRL_ASPM_L0S_EN BIT(12) +#define PM_CTRL_RXL1_AFTER_L0S BIT(11) /* l1dv2.0+ */ +#define L1D_PMCTRL_L0S_TIMER_MASK 7UL /* l1d2.0+, 3bits*/ +#define L1D_PMCTRL_L0S_TIMER_SHIFT 8 +#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xFUL /* l1c, 4bits */ +#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8 +#define PM_CTRL_SERDES_BUFS_RX_L1_EN BIT(7) +#define PM_CTRL_SERDES_PD_EX_L1 BIT(6) /* power down serdes rx */ +#define PM_CTRL_SERDES_PLL_L1_EN BIT(5) +#define PM_CTRL_SERDES_L1_EN BIT(4) +#define PM_CTRL_ASPM_L1_EN BIT(3) +#define PM_CTRL_CLK_REQ_EN BIT(2) +#define PM_CTRL_RBER_EN BIT(1) +#define PM_CTRL_SPRSDWER_EN BIT(0) + +#define REG_LTSSM_ID_CTRL 0x12FC +#define LTSSM_ID_EN_WRO 0x1000 + + +/* Selene Master Control Register */ +#define REG_MASTER_CTRL 0x1400 +#define MASTER_CTRL_OTP_SEL BIT(31) +#define MASTER_DEV_NUM_MASK 0x7FUL +#define MASTER_DEV_NUM_SHIFT 24 +#define MASTER_REV_NUM_MASK 0xFFUL +#define MASTER_REV_NUM_SHIFT 16 +#define MASTER_CTRL_INT_RDCLR BIT(14) +#define MASTER_CTRL_CLK_SEL_DIS BIT(12) /* 1:alwys sel pclk from + * serdes, not sw to 25M */ +#define MASTER_CTRL_RX_ITIMER_EN BIT(11) /* IRQ MODURATION FOR RX */ +#define MASTER_CTRL_TX_ITIMER_EN BIT(10) /* MODURATION FOR TX/RX */ +#define MASTER_CTRL_MANU_INT BIT(9) /* SOFT MANUAL INT */ +#define MASTER_CTRL_MANUTIMER_EN BIT(8) +#define MASTER_CTRL_SA_TIMER_EN BIT(7) /* SYS ALIVE TIMER EN */ +#define MASTER_CTRL_OOB_DIS BIT(6) /* OUT OF BOX DIS */ +#define MASTER_CTRL_WAKEN_25M BIT(5) /* WAKE WO. PCIE CLK */ +#define MASTER_CTRL_BERT_START BIT(4) +#define MASTER_PCIE_TSTMOD_MASK 3UL +#define MASTER_PCIE_TSTMOD_SHIFT 2 +#define MASTER_PCIE_RST BIT(1) +#define MASTER_CTRL_SOFT_RST BIT(0) /* RST MAC & DMA */ +#define DMA_MAC_RST_TO 50 + +/* Timer Initial Value Register */ +#define REG_MANUAL_TIMER_INIT 0x1404 + +/* IRQ ModeratorTimer Initial Value Register */ +#define REG_IRQ_MODRT_TIMER_INIT 0x1408 +#define IRQ_MODRT_TIMER_MASK 0xffff +#define IRQ_MODRT_TX_TIMER_SHIFT 0 +#define IRQ_MODRT_RX_TIMER_SHIFT 16 + +#define REG_GPHY_CTRL 0x140C +#define GPHY_CTRL_ADDR_MASK 0x1FUL +#define GPHY_CTRL_ADDR_SHIFT 19 +#define GPHY_CTRL_BP_VLTGSW BIT(18) +#define GPHY_CTRL_100AB_EN BIT(17) +#define GPHY_CTRL_10AB_EN BIT(16) +#define GPHY_CTRL_PHY_PLL_BYPASS BIT(15) +#define GPHY_CTRL_PWDOWN_HW BIT(14) /* affect MAC&PHY, to low pw */ +#define GPHY_CTRL_PHY_PLL_ON BIT(13) /* 1:pll always on, 0:can sw */ +#define GPHY_CTRL_SEL_ANA_RST BIT(12) +#define GPHY_CTRL_HIB_PULSE BIT(11) +#define GPHY_CTRL_HIB_EN BIT(10) +#define GPHY_CTRL_GIGA_DIS BIT(9) +#define GPHY_CTRL_PHY_IDDQ_DIS BIT(8) /* pw on RST */ +#define GPHY_CTRL_PHY_IDDQ BIT(7) /* bit8 affect bit7 while rb */ +#define GPHY_CTRL_LPW_EXIT BIT(6) +#define GPHY_CTRL_GATE_25M_EN BIT(5) +#define GPHY_CTRL_REV_ANEG BIT(4) +#define GPHY_CTRL_ANEG_NOW BIT(3) +#define GPHY_CTRL_LED_MODE BIT(2) +#define GPHY_CTRL_RTL_MODE BIT(1) +#define GPHY_CTRL_EXT_RESET BIT(0) /* 1:out of DSP RST status */ +#define GPHY_CTRL_EXT_RST_TO 80 /* 800us atmost */ +#define GPHY_CTRL_CLS (\ + GPHY_CTRL_LED_MODE |\ + GPHY_CTRL_100AB_EN |\ + GPHY_CTRL_PHY_PLL_ON) + +/* Block IDLE Status Register */ +#define REG_IDLE_STATUS 0x1410 +#define IDLE_STATUS_SFORCE_MASK 0xFUL +#define IDLE_STATUS_SFORCE_SHIFT 14 +#define IDLE_STATUS_CALIB_DONE BIT(13) +#define IDLE_STATUS_CALIB_RES_MASK 0x1FUL +#define IDLE_STATUS_CALIB_RES_SHIFT 8 +#define IDLE_STATUS_CALIBERR_MASK 0xFUL +#define IDLE_STATUS_CALIBERR_SHIFT 4 +#define IDLE_STATUS_TXQ_BUSY BIT(3) +#define IDLE_STATUS_RXQ_BUSY BIT(2) +#define IDLE_STATUS_TXMAC_BUSY BIT(1) +#define IDLE_STATUS_RXMAC_BUSY BIT(0) +#define IDLE_STATUS_MASK (\ + IDLE_STATUS_TXQ_BUSY |\ + IDLE_STATUS_RXQ_BUSY |\ + IDLE_STATUS_TXMAC_BUSY |\ + IDLE_STATUS_RXMAC_BUSY) + +/* MDIO Control Register */ +#define REG_MDIO_CTRL 0x1414 +#define MDIO_CTRL_MODE_EXT BIT(30) +#define MDIO_CTRL_POST_READ BIT(29) +#define MDIO_CTRL_AP_EN BIT(28) +#define MDIO_CTRL_BUSY BIT(27) +#define MDIO_CTRL_CLK_SEL_MASK 0x7UL +#define MDIO_CTRL_CLK_SEL_SHIFT 24 +#define MDIO_CTRL_CLK_25_4 0 /* 25MHz divide 4 */ +#define MDIO_CTRL_CLK_25_6 2 +#define MDIO_CTRL_CLK_25_8 3 +#define MDIO_CTRL_CLK_25_10 4 +#define MDIO_CTRL_CLK_25_32 5 +#define MDIO_CTRL_CLK_25_64 6 +#define MDIO_CTRL_CLK_25_128 7 +#define MDIO_CTRL_START BIT(23) +#define MDIO_CTRL_SPRES_PRMBL BIT(22) +#define MDIO_CTRL_OP_READ BIT(21) /* 1:read, 0:write */ +#define MDIO_CTRL_REG_MASK 0x1FUL +#define MDIO_CTRL_REG_SHIFT 16 +#define MDIO_CTRL_DATA_MASK 0xFFFFUL +#define MDIO_CTRL_DATA_SHIFT 0 +#define MDIO_MAX_AC_TO 120 /* 1.2ms timeout for slow clk */ + +/* for extension reg access */ +#define REG_MDIO_EXTN 0x1448 +#define MDIO_EXTN_PORTAD_MASK 0x1FUL +#define MDIO_EXTN_PORTAD_SHIFT 21 +#define MDIO_EXTN_DEVAD_MASK 0x1FUL +#define MDIO_EXTN_DEVAD_SHIFT 16 +#define MDIO_EXTN_REG_MASK 0xFFFFUL +#define MDIO_EXTN_REG_SHIFT 0 + +/* BIST Control and Status Register0 (for the Packet Memory) */ +#define REG_BIST0_CTRL 0x141c +#define BIST0_NOW 0x1 +#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is + * un-repairable because + * it has address decoder + * failure or more than 1 cell + * stuck-to-x failure */ +#define BIST0_FUSE_FLAG 0x4 + +/* BIST Control and Status Register1(for the retry buffer of PCI Express) */ +#define REG_BIST1_CTRL 0x1420 +#define BIST1_NOW 0x1 +#define BIST1_SRAM_FAIL 0x2 +#define BIST1_FUSE_FLAG 0x4 + +/* SerDes Lock Detect Control and Status Register */ +#define REG_SERDES 0x1424 +#define SERDES_PHY_CLK_SLOWDOWN BIT(18) +#define SERDES_MAC_CLK_SLOWDOWN BIT(17) +#define SERDES_SELFB_PLL_MASK 0x3UL +#define SERDES_SELFB_PLL_SHIFT 14 +#define SERDES_PHYCLK_SEL_GTX BIT(13) /* 1:gtx_clk, 0:25M */ +#define SERDES_PCIECLK_SEL_SRDS BIT(12) /* 1:serdes,0:25M */ +#define SERDES_BUFS_RX_EN BIT(11) +#define SERDES_PD_RX BIT(10) +#define SERDES_PLL_EN BIT(9) +#define SERDES_EN BIT(8) +#define SERDES_SELFB_PLL_SEL_CSR BIT(6) /* 0:state-machine,1:csr */ +#define SERDES_SELFB_PLL_CSR_MASK 0x3UL +#define SERDES_SELFB_PLL_CSR_SHIFT 4 +#define SERDES_SELFB_PLL_CSR_4 3 /* 4-12% OV-CLK */ +#define SERDES_SELFB_PLL_CSR_0 2 /* 0-4% OV-CLK */ +#define SERDES_SELFB_PLL_CSR_12 1 /* 12-18% OV-CLK */ +#define SERDES_SELFB_PLL_CSR_18 0 /* 18-25% OV-CLK */ +#define SERDES_VCO_SLOW BIT(3) +#define SERDES_VCO_FAST BIT(2) +#define SERDES_LOCK_DETECT_EN BIT(1) +#define SERDES_LOCK_DETECT BIT(0) + +#define REG_LPI_DECISN_TIMER 0x143C +#define L2CB_LPI_DESISN_TIMER 0x7D00 + +#define REG_LPI_CTRL 0x1440 +#define LPI_CTRL_CHK_DA BIT(31) +#define LPI_CTRL_ENH_TO_MASK 0x1FFFUL +#define LPI_CTRL_ENH_TO_SHIFT 12 +#define LPI_CTRL_ENH_TH_MASK 0x1FUL +#define LPI_CTRL_ENH_TH_SHIFT 6 +#define LPI_CTRL_ENH_EN BIT(5) +#define LPI_CTRL_CHK_RX BIT(4) +#define LPI_CTRL_CHK_STATE BIT(3) +#define LPI_CTRL_GMII BIT(2) +#define LPI_CTRL_TO_PHY BIT(1) +#define LPI_CTRL_EN BIT(0) + +#define REG_LPI_WAIT 0x1444 +#define LPI_WAIT_TIMER_MASK 0xFFFFUL +#define LPI_WAIT_TIMER_SHIFT 0 + +/* MAC Control Register */ +#define REG_MAC_CTRL 0x1480 +#define MAC_CTRL_SPEED_MODE_SW BIT(30) /* 0:phy,1:sw */ +#define MAC_CTRL_HASH_ALG_CRC32 BIT(29) /* 1:legacy,0:lw_5b */ +#define MAC_CTRL_SINGLE_PAUSE_EN BIT(28) +#define MAC_CTRL_DBG BIT(27) +#define MAC_CTRL_BC_EN BIT(26) +#define MAC_CTRL_MC_ALL_EN BIT(25) +#define MAC_CTRL_RX_CHKSUM_EN BIT(24) +#define MAC_CTRL_TX_HUGE BIT(23) +#define MAC_CTRL_DBG_TX_BKPRESURE BIT(22) +#define MAC_CTRL_SPEED_MASK 3UL +#define MAC_CTRL_SPEED_SHIFT 20 +#define MAC_CTRL_SPEED_10_100 1 +#define MAC_CTRL_SPEED_1000 2 +#define MAC_CTRL_TX_SIMURST BIT(19) +#define MAC_CTRL_SCNT BIT(17) +#define MAC_CTRL_TX_PAUSE BIT(16) +#define MAC_CTRL_PROMIS_EN BIT(15) +#define MAC_CTRL_RMV_VLAN BIT(14) +#define MAC_CTRL_PRMLEN_MASK 0xFUL +#define MAC_CTRL_PRMLEN_SHIFT 10 +#define MAC_CTRL_HUGE_EN BIT(9) +#define MAC_CTRL_LENCHK BIT(8) +#define MAC_CTRL_PAD BIT(7) +#define MAC_CTRL_ADD_CRC BIT(6) +#define MAC_CTRL_DUPLX BIT(5) +#define MAC_CTRL_LOOPBACK BIT(4) +#define MAC_CTRL_RX_FLOW BIT(3) +#define MAC_CTRL_TX_FLOW BIT(2) +#define MAC_CTRL_RX_EN BIT(1) +#define MAC_CTRL_TX_EN BIT(0) + +/* MAC IPG/IFG Control Register */ +#define REG_MAC_IPG_IFG 0x1484 +#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back + * inter-packet gap. The + * default is 96-bit time */ +#define MAC_IPG_IFG_IPGT_MASK 0x7f +#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to + * enforce in between RX frames */ +#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */ +#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */ +#define MAC_IPG_IFG_IPGR1_MASK 0x7f +#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */ +#define MAC_IPG_IFG_IPGR2_MASK 0x7f + +/* MAC STATION ADDRESS */ +#define REG_MAC_STA_ADDR 0x1488 + +/* Hash table for multicast address */ +#define REG_RX_HASH_TABLE 0x1490 + +/* MAC Half-Duplex Control Register */ +#define REG_MAC_HALF_DUPLX_CTRL 0x1498 +#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */ +#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff +#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 +#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* No back-off on backpressure, + * immediately start the + * transmission after back pressure */ +#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */ +#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */ + +/* Maximum Frame Length Control Register */ +#define REG_MTU 0x149c + +/* Wake-On-Lan control register */ +#define REG_WOL_CTRL 0x14a0 +#define WOL_PT7_MATCH BIT(31) +#define WOL_PT6_MATCH BIT(30) +#define WOL_PT5_MATCH BIT(29) +#define WOL_PT4_MATCH BIT(28) +#define WOL_PT3_MATCH BIT(27) +#define WOL_PT2_MATCH BIT(26) +#define WOL_PT1_MATCH BIT(25) +#define WOL_PT0_MATCH BIT(24) +#define WOL_PT7_EN BIT(23) +#define WOL_PT6_EN BIT(22) +#define WOL_PT5_EN BIT(21) +#define WOL_PT4_EN BIT(20) +#define WOL_PT3_EN BIT(19) +#define WOL_PT2_EN BIT(18) +#define WOL_PT1_EN BIT(17) +#define WOL_PT0_EN BIT(16) +#define WOL_LNKCHG_ST BIT(10) +#define WOL_MAGIC_ST BIT(9) +#define WOL_PATTERN_ST BIT(8) +#define WOL_OOB_EN BIT(6) +#define WOL_LINK_CHG_PME_EN BIT(5) +#define WOL_LINK_CHG_EN BIT(4) +#define WOL_MAGIC_PME_EN BIT(3) +#define WOL_MAGIC_EN BIT(2) +#define WOL_PATTERN_PME_EN BIT(1) +#define WOL_PATTERN_EN BIT(0) + +/* WOL Length ( 2 DWORD ) */ +#define REG_WOL_PTLEN1 0x14A4 +#define WOL_PTLEN1_3_MASK 0xFFUL +#define WOL_PTLEN1_3_SHIFT 24 +#define WOL_PTLEN1_2_MASK 0xFFUL +#define WOL_PTLEN1_2_SHIFT 16 +#define WOL_PTLEN1_1_MASK 0xFFUL +#define WOL_PTLEN1_1_SHIFT 8 +#define WOL_PTLEN1_0_MASK 0xFFUL +#define WOL_PTLEN1_0_SHIFT 0 + +#define REG_WOL_PTLEN2 0x14A8 +#define WOL_PTLEN2_7_MASK 0xFFUL +#define WOL_PTLEN2_7_SHIFT 24 +#define WOL_PTLEN2_6_MASK 0xFFUL +#define WOL_PTLEN2_6_SHIFT 16 +#define WOL_PTLEN2_5_MASK 0xFFUL +#define WOL_PTLEN2_5_SHIFT 8 +#define WOL_PTLEN2_4_MASK 0xFFUL +#define WOL_PTLEN2_4_SHIFT 0 + +/* Internal SRAM Partition Register */ +#define RFDX_HEAD_ADDR_MASK 0x03FF +#define RFDX_HARD_ADDR_SHIFT 0 +#define RFDX_TAIL_ADDR_MASK 0x03FF +#define RFDX_TAIL_ADDR_SHIFT 16 + +#define REG_SRAM_RFD0_INFO 0x1500 +#define REG_SRAM_RFD1_INFO 0x1504 +#define REG_SRAM_RFD2_INFO 0x1508 +#define REG_SRAM_RFD3_INFO 0x150C + +#define REG_RFD_NIC_LEN 0x1510 /* In 8-bytes */ +#define RFD_NIC_LEN_MASK 0x03FF + +#define REG_SRAM_TRD_ADDR 0x1518 +#define TPD_HEAD_ADDR_MASK 0x03FF +#define TPD_HEAD_ADDR_SHIFT 0 +#define TPD_TAIL_ADDR_MASK 0x03FF +#define TPD_TAIL_ADDR_SHIFT 16 + +#define REG_SRAM_TRD_LEN 0x151C /* In 8-bytes */ +#define TPD_NIC_LEN_MASK 0x03FF + +#define REG_SRAM_RXF_ADDR 0x1520 +#define REG_SRAM_RXF_LEN 0x1524 +#define REG_SRAM_TXF_ADDR 0x1528 +#define REG_SRAM_TXF_LEN 0x152C +#define REG_SRAM_TCPH_ADDR 0x1530 +#define REG_SRAM_PKTH_ADDR 0x1532 + +/* + * Load Ptr Register + * Software sets this bit after the initialization of the head and tail */ +#define REG_LOAD_PTR 0x1534 + +/* + * addresses of all descriptors, as well as the following descriptor + * control register, which triggers each function block to load the head + * pointer to prepare for the operation. This bit is then self-cleared + * after one cycle. + */ +#define REG_RX_BASE_ADDR_HI 0x1540 +#define REG_TX_BASE_ADDR_HI 0x1544 +#define REG_RFD0_HEAD_ADDR_LO 0x1550 +#define REG_RFD_RING_SIZE 0x1560 +#define RFD_RING_SIZE_MASK 0x0FFF +#define REG_RX_BUF_SIZE 0x1564 +#define RX_BUF_SIZE_MASK 0xFFFF +#define REG_RRD0_HEAD_ADDR_LO 0x1568 +#define REG_RRD_RING_SIZE 0x1578 +#define RRD_RING_SIZE_MASK 0x0FFF +#define REG_TPD_PRI1_ADDR_LO 0x157C +#define REG_TPD_PRI0_ADDR_LO 0x1580 +#define REG_TPD_RING_SIZE 0x1584 +#define TPD_RING_SIZE_MASK 0xFFFF + +/* TXQ Control Register */ +#define REG_TXQ_CTRL 0x1590 +#define TXQ_TXF_BURST_NUM_MASK 0xFFFFUL +#define TXQ_TXF_BURST_NUM_SHIFT 16 +#define L1C_TXQ_TXF_BURST_PREF 0x200 +#define L2CB_TXQ_TXF_BURST_PREF 0x40 +#define TXQ_CTRL_PEDING_CLR BIT(8) +#define TXQ_CTRL_LS_8023_EN BIT(7) +#define TXQ_CTRL_ENH_MODE BIT(6) +#define TXQ_CTRL_EN BIT(5) +#define TXQ_CTRL_IP_OPTION_EN BIT(4) +#define TXQ_NUM_TPD_BURST_MASK 0xFUL +#define TXQ_NUM_TPD_BURST_SHIFT 0 +#define TXQ_NUM_TPD_BURST_DEF 5 +#define TXQ_CFGV (\ + FIELDX(TXQ_NUM_TPD_BURST, TXQ_NUM_TPD_BURST_DEF) |\ + TXQ_CTRL_ENH_MODE |\ + TXQ_CTRL_LS_8023_EN |\ + TXQ_CTRL_IP_OPTION_EN) +#define L1C_TXQ_CFGV (\ + TXQ_CFGV |\ + FIELDX(TXQ_TXF_BURST_NUM, L1C_TXQ_TXF_BURST_PREF)) +#define L2CB_TXQ_CFGV (\ + TXQ_CFGV |\ + FIELDX(TXQ_TXF_BURST_NUM, L2CB_TXQ_TXF_BURST_PREF)) + + +/* Jumbo packet Threshold for task offload */ +#define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */ +#define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF +#define MAX_TSO_FRAME_SIZE (7*1024) + +#define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */ +#define TXF_WATER_MARK_MASK 0x0FFF +#define TXF_LOW_WATER_MARK_SHIFT 0 +#define TXF_HIGH_WATER_MARK_SHIFT 16 +#define TXQ_CTRL_BURST_MODE_EN 0x80000000 + +#define REG_THRUPUT_MON_CTRL 0x159C +#define THRUPUT_MON_RATE_MASK 0x3 +#define THRUPUT_MON_RATE_SHIFT 0 +#define THRUPUT_MON_EN 0x80 + +/* RXQ Control Register */ +#define REG_RXQ_CTRL 0x15A0 +#define ASPM_THRUPUT_LIMIT_MASK 0x3 +#define ASPM_THRUPUT_LIMIT_SHIFT 0 +#define ASPM_THRUPUT_LIMIT_NO 0x00 +#define ASPM_THRUPUT_LIMIT_1M 0x01 +#define ASPM_THRUPUT_LIMIT_10M 0x02 +#define ASPM_THRUPUT_LIMIT_100M 0x03 +#define IPV6_CHKSUM_CTRL_EN BIT(7) +#define RXQ_RFD_BURST_NUM_MASK 0x003F +#define RXQ_RFD_BURST_NUM_SHIFT 20 +#define RXQ_NUM_RFD_PREF_DEF 8 +#define RSS_MODE_MASK 3UL +#define RSS_MODE_SHIFT 26 +#define RSS_MODE_DIS 0 +#define RSS_MODE_SQSI 1 +#define RSS_MODE_MQSI 2 +#define RSS_MODE_MQMI 3 +#define RSS_NIP_QUEUE_SEL BIT(28) /* 0:q0, 1:table */ +#define RRS_HASH_CTRL_EN BIT(29) +#define RX_CUT_THRU_EN BIT(30) +#define RXQ_CTRL_EN BIT(31) + +#define REG_RFD_FREE_THRESH 0x15A4 +#define RFD_FREE_THRESH_MASK 0x003F +#define RFD_FREE_HI_THRESH_SHIFT 0 +#define RFD_FREE_LO_THRESH_SHIFT 6 + +/* RXF flow control register */ +#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8 +#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0 +#define RXQ_RXF_PAUSE_TH_HI_MASK 0x0FFF +#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16 +#define RXQ_RXF_PAUSE_TH_LO_MASK 0x0FFF + +#define REG_RXD_DMA_CTRL 0x15AC +#define RXD_DMA_THRESH_MASK 0x0FFF /* In 8-bytes */ +#define RXD_DMA_THRESH_SHIFT 0 +#define RXD_DMA_DOWN_TIMER_MASK 0xFFFF +#define RXD_DMA_DOWN_TIMER_SHIFT 16 + +/* DMA Engine Control Register */ +#define REG_DMA_CTRL 0x15C0 +#define DMA_CTRL_SMB_NOW BIT(31) +#define DMA_CTRL_WPEND_CLR BIT(30) +#define DMA_CTRL_RPEND_CLR BIT(29) +#define DMA_CTRL_WDLY_CNT_MASK 0xFUL +#define DMA_CTRL_WDLY_CNT_SHIFT 16 +#define DMA_CTRL_WDLY_CNT_DEF 4 +#define DMA_CTRL_RDLY_CNT_MASK 0x1FUL +#define DMA_CTRL_RDLY_CNT_SHIFT 11 +#define DMA_CTRL_RDLY_CNT_DEF 15 +#define DMA_CTRL_RREQ_PRI_DATA BIT(10) /* 0:tpd, 1:data */ +#define DMA_CTRL_WREQ_BLEN_MASK 7UL +#define DMA_CTRL_WREQ_BLEN_SHIFT 7 +#define DMA_CTRL_RREQ_BLEN_MASK 7UL +#define DMA_CTRL_RREQ_BLEN_SHIFT 4 +#define L1C_CTRL_DMA_RCB_LEN128 BIT(3) /* 0:64bytes,1:128bytes */ +#define DMA_CTRL_RORDER_MODE_MASK 7UL +#define DMA_CTRL_RORDER_MODE_SHIFT 0 +#define DMA_CTRL_RORDER_MODE_OUT 4 +#define DMA_CTRL_RORDER_MODE_ENHANCE 2 +#define DMA_CTRL_RORDER_MODE_IN 1 + +/* INT-triggle/SMB Control Register */ +#define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */ +#define SMB_STAT_TIMER_MASK 0xFFFFFF +#define REG_TINT_TPD_THRESH 0x15C8 /* tpd th to trig intrrupt */ + +/* Mail box */ +#define MB_RFDX_PROD_IDX_MASK 0xFFFF +#define REG_MB_RFD0_PROD_IDX 0x15E0 + +#define REG_TPD_PRI1_PIDX 0x15F0 /* 16bit,hi-tpd producer idx */ +#define REG_TPD_PRI0_PIDX 0x15F2 /* 16bit,lo-tpd producer idx */ +#define REG_TPD_PRI1_CIDX 0x15F4 /* 16bit,hi-tpd consumer idx */ +#define REG_TPD_PRI0_CIDX 0x15F6 /* 16bit,lo-tpd consumer idx */ + +#define REG_MB_RFD01_CONS_IDX 0x15F8 +#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF +#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000 + +/* Interrupt Status Register */ +#define REG_ISR 0x1600 +#define ISR_SMB 0x00000001 +#define ISR_TIMER 0x00000002 +/* + * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set + * in Table 51 Selene Master Control Register (Offset 0x1400). + */ +#define ISR_MANUAL 0x00000004 +#define ISR_HW_RXF_OV 0x00000008 /* RXF overflow interrupt */ +#define ISR_RFD0_UR 0x00000010 /* RFD0 under run */ +#define ISR_RFD1_UR 0x00000020 +#define ISR_RFD2_UR 0x00000040 +#define ISR_RFD3_UR 0x00000080 +#define ISR_TXF_UR 0x00000100 +#define ISR_DMAR_TO_RST 0x00000200 +#define ISR_DMAW_TO_RST 0x00000400 +#define ISR_TX_CREDIT 0x00000800 +#define ISR_GPHY 0x00001000 +/* GPHY low power state interrupt */ +#define ISR_GPHY_LPW 0x00002000 +#define ISR_TXQ_TO_RST 0x00004000 +#define ISR_TX_PKT 0x00008000 +#define ISR_RX_PKT_0 0x00010000 +#define ISR_RX_PKT_1 0x00020000 +#define ISR_RX_PKT_2 0x00040000 +#define ISR_RX_PKT_3 0x00080000 +#define ISR_MAC_RX 0x00100000 +#define ISR_MAC_TX 0x00200000 +#define ISR_UR_DETECTED 0x00400000 +#define ISR_FERR_DETECTED 0x00800000 +#define ISR_NFERR_DETECTED 0x01000000 +#define ISR_CERR_DETECTED 0x02000000 +#define ISR_PHY_LINKDOWN 0x04000000 +#define ISR_DIS_INT 0x80000000 + +/* Interrupt Mask Register */ +#define REG_IMR 0x1604 + +#define IMR_NORMAL_MASK (\ + ISR_MANUAL |\ + ISR_HW_RXF_OV |\ + ISR_RFD0_UR |\ + ISR_TXF_UR |\ + ISR_DMAR_TO_RST |\ + ISR_TXQ_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_GPHY |\ + ISR_TX_PKT |\ + ISR_RX_PKT_0 |\ + ISR_GPHY_LPW |\ + ISR_PHY_LINKDOWN) + +#define ISR_RX_PKT (\ + ISR_RX_PKT_0 |\ + ISR_RX_PKT_1 |\ + ISR_RX_PKT_2 |\ + ISR_RX_PKT_3) + +#define ISR_OVER (\ + ISR_RFD0_UR |\ + ISR_RFD1_UR |\ + ISR_RFD2_UR |\ + ISR_RFD3_UR |\ + ISR_HW_RXF_OV |\ + ISR_TXF_UR) + +#define ISR_ERROR (\ + ISR_DMAR_TO_RST |\ + ISR_TXQ_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_PHY_LINKDOWN) + +#define REG_INT_RETRIG_TIMER 0x1608 +#define INT_RETRIG_TIMER_MASK 0xFFFF + +#define REG_MAC_RX_STATUS_BIN 0x1700 +#define REG_MAC_RX_STATUS_END 0x175c +#define REG_MAC_TX_STATUS_BIN 0x1760 +#define REG_MAC_TX_STATUS_END 0x17c0 + +#define REG_CLK_GATING_CTRL 0x1814 +#define CLK_GATING_DMAW_EN 0x0001 +#define CLK_GATING_DMAR_EN 0x0002 +#define CLK_GATING_TXQ_EN 0x0004 +#define CLK_GATING_RXQ_EN 0x0008 +#define CLK_GATING_TXMAC_EN 0x0010 +#define CLK_GATING_RXMAC_EN 0x0020 + +#define CLK_GATING_EN_ALL (CLK_GATING_DMAW_EN |\ + CLK_GATING_DMAR_EN |\ + CLK_GATING_TXQ_EN |\ + CLK_GATING_RXQ_EN |\ + CLK_GATING_TXMAC_EN|\ + CLK_GATING_RXMAC_EN) + +/* DEBUG ADDR */ +#define REG_DEBUG_DATA0 0x1900 +#define REG_DEBUG_DATA1 0x1904 + +#define L1D_MPW_PHYID1 0xD01C /* V7 */ +#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */ +#define L1D_MPW_PHYID3 0xD01E /* V8 */ + + +/* Autoneg Advertisement Register */ +#define ADVERTISE_DEFAULT_CAP \ + (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM) + +/* 1000BASE-T Control Register */ +#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */ + +#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */ +#define GIGA_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ +#define GIGA_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define GIGA_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define GIGA_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define GIGA_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define GIGA_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ +#define GIGA_CR_1000T_SPEED_MASK 0x0300 +#define GIGA_CR_1000T_DEFAULT_CAP 0x0300 + +/* PHY Specific Status Register */ +#define MII_GIGA_PSSR 0x11 +#define GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define GIGA_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define GIGA_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define GIGA_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define GIGA_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define GIGA_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +/* PHY Interrupt Enable Register */ +#define MII_IER 0x12 +#define IER_LINK_UP 0x0400 +#define IER_LINK_DOWN 0x0800 + +/* PHY Interrupt Status Register */ +#define MII_ISR 0x13 +#define ISR_LINK_UP 0x0400 +#define ISR_LINK_DOWN 0x0800 + +/* Cable-Detect-Test Control Register */ +#define MII_CDTC 0x16 +#define CDTC_EN_OFF 0 /* sc */ +#define CDTC_EN_BITS 1 +#define CDTC_PAIR_OFF 8 +#define CDTC_PAIR_BIT 2 + +/* Cable-Detect-Test Status Register */ +#define MII_CDTS 0x1C +#define CDTS_STATUS_OFF 8 +#define CDTS_STATUS_BITS 2 +#define CDTS_STATUS_NORMAL 0 +#define CDTS_STATUS_SHORT 1 +#define CDTS_STATUS_OPEN 2 +#define CDTS_STATUS_INVALID 3 + +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + +/***************************** debug port *************************************/ + +#define MIIDBG_ANACTRL 0x00 +#define ANACTRL_CLK125M_DELAY_EN 0x8000 +#define ANACTRL_VCO_FAST 0x4000 +#define ANACTRL_VCO_SLOW 0x2000 +#define ANACTRL_AFE_MODE_EN 0x1000 +#define ANACTRL_LCKDET_PHY 0x800 +#define ANACTRL_LCKDET_EN 0x400 +#define ANACTRL_OEN_125M 0x200 +#define ANACTRL_HBIAS_EN 0x100 +#define ANACTRL_HB_EN 0x80 +#define ANACTRL_SEL_HSP 0x40 +#define ANACTRL_CLASSA_EN 0x20 +#define ANACTRL_MANUSWON_SWR_MASK 3U +#define ANACTRL_MANUSWON_SWR_SHIFT 2 +#define ANACTRL_MANUSWON_SWR_2V 0 +#define ANACTRL_MANUSWON_SWR_1P9V 1 +#define ANACTRL_MANUSWON_SWR_1P8V 2 +#define ANACTRL_MANUSWON_SWR_1P7V 3 +#define ANACTRL_MANUSWON_BW3_4M 0x2 +#define ANACTRL_RESTART_CAL 0x1 +#define ANACTRL_DEF 0x02EF + +#define MIIDBG_SYSMODCTRL 0x04 +#define SYSMODCTRL_IECHOADJ_PFMH_PHY 0x8000 +#define SYSMODCTRL_IECHOADJ_BIASGEN 0x4000 +#define SYSMODCTRL_IECHOADJ_PFML_PHY 0x2000 +#define SYSMODCTRL_IECHOADJ_PS_MASK 3U +#define SYSMODCTRL_IECHOADJ_PS_SHIFT 10 +#define SYSMODCTRL_IECHOADJ_PS_40 3 +#define SYSMODCTRL_IECHOADJ_PS_20 2 +#define SYSMODCTRL_IECHOADJ_PS_0 1 +#define SYSMODCTRL_IECHOADJ_10BT_100MV 0x40 /* 1:100mv, 0:200mv */ +#define SYSMODCTRL_IECHOADJ_HLFAP_MASK 3U +#define SYSMODCTRL_IECHOADJ_HLFAP_SHIFT 4 +#define SYSMODCTRL_IECHOADJ_VDFULBW 0x8 +#define SYSMODCTRL_IECHOADJ_VDBIASHLF 0x4 +#define SYSMODCTRL_IECHOADJ_VDAMPHLF 0x2 +#define SYSMODCTRL_IECHOADJ_VDLANSW 0x1 +#define SYSMODCTRL_IECHOADJ_DEF 0x88BB /* ???? */ + +/* for l1d & l2cb */ +#define SYSMODCTRL_IECHOADJ_CUR_ADD 0x8000 +#define SYSMODCTRL_IECHOADJ_CUR_MASK 7U +#define SYSMODCTRL_IECHOADJ_CUR_SHIFT 12 +#define SYSMODCTRL_IECHOADJ_VOL_MASK 0xFU +#define SYSMODCTRL_IECHOADJ_VOL_SHIFT 8 +#define SYSMODCTRL_IECHOADJ_VOL_17ALL 3 +#define SYSMODCTRL_IECHOADJ_VOL_100M15 1 +#define SYSMODCTRL_IECHOADJ_VOL_10M17 0 +#define SYSMODCTRL_IECHOADJ_BIAS1_MASK 0xFU +#define SYSMODCTRL_IECHOADJ_BIAS1_SHIFT 4 +#define SYSMODCTRL_IECHOADJ_BIAS2_MASK 0xFU +#define SYSMODCTRL_IECHOADJ_BIAS2_SHIFT 0 +#define L1D_SYSMODCTRL_IECHOADJ_DEF 0x4FBB + +#define MIIDBG_SRDSYSMOD 0x05 +#define SRDSYSMOD_LCKDET_EN 0x2000 +#define SRDSYSMOD_PLL_EN 0x800 +#define SRDSYSMOD_SEL_HSP 0x400 +#define SRDSYSMOD_HLFTXDR 0x200 +#define SRDSYSMOD_TXCLK_DELAY_EN 0x100 +#define SRDSYSMOD_TXELECIDLE 0x80 +#define SRDSYSMOD_DEEMP_EN 0x40 +#define SRDSYSMOD_MS_PAD 0x4 +#define SRDSYSMOD_CDR_ADC_VLTG 0x2 +#define SRDSYSMOD_CDR_DAC_1MA 0x1 +#define SRDSYSMOD_DEF 0x2C46 + +#define MIIDBG_CFGLPSPD 0x0A +#define CFGLPSPD_RSTCNT_MASK 3U +#define CFGLPSPD_RSTCNT_SHIFT 14 +#define CFGLPSPD_RSTCNT_CLK125SW 0x2000 + +#define MIIDBG_HIBNEG 0x0B +#define HIBNEG_PSHIB_EN 0x8000 +#define HIBNEG_WAKE_BOTH 0x4000 +#define HIBNEG_ONOFF_ANACHG_SUDEN 0x2000 +#define HIBNEG_HIB_PULSE 0x1000 +#define HIBNEG_GATE_25M_EN 0x800 +#define HIBNEG_RST_80U 0x400 +#define HIBNEG_RST_TIMER_MASK 3U +#define HIBNEG_RST_TIMER_SHIFT 8 +#define HIBNEG_GTX_CLK_DELAY_MASK 3U +#define HIBNEG_GTX_CLK_DELAY_SHIFT 5 +#define HIBNEG_BYPSS_BRKTIMER 0x10 +#define HIBNEG_DEF 0xBC40 + +#define MIIDBG_TST10BTCFG 0x12 +#define TST10BTCFG_INTV_TIMER_MASK 3U +#define TST10BTCFG_INTV_TIMER_SHIFT 14 +#define TST10BTCFG_TRIGER_TIMER_MASK 3U +#define TST10BTCFG_TRIGER_TIMER_SHIFT 12 +#define TST10BTCFG_DIV_MAN_MLT3_EN 0x800 +#define TST10BTCFG_OFF_DAC_IDLE 0x400 +#define TST10BTCFG_LPBK_DEEP 0x4 /* 1:deep,0:shallow */ +#define TST10BTCFG_DEF 0x4C04 + +#define MIIDBG_AZ_ANADECT 0x15 +#define AZ_ANADECT_10BTRX_TH 0x8000 +#define AZ_ANADECT_BOTH_01CHNL 0x4000 +#define AZ_ANADECT_INTV_MASK 0x3FU +#define AZ_ANADECT_INTV_SHIFT 8 +#define AZ_ANADECT_THRESH_MASK 0xFU +#define AZ_ANADECT_THRESH_SHIFT 4 +#define AZ_ANADECT_CHNL_MASK 0xFU +#define AZ_ANADECT_CHNL_SHIFT 0 +#define AZ_ANADECT_DEF 0x3220 +#define AZ_ANADECT_LONG 0xb210 + +#define MIIDBG_MSE16DB 0x18 /* l1d */ +#define L1D_MSE16DB_UP 0x05EA +#define L1D_MSE16DB_DOWN 0x02EA + +#define MIIDBG_LEGCYPS 0x29 +#define LEGCYPS_EN 0x8000 +#define LEGCYPS_DAC_AMP1000_MASK 7U +#define LEGCYPS_DAC_AMP1000_SHIFT 12 +#define LEGCYPS_DAC_AMP100_MASK 7U +#define LEGCYPS_DAC_AMP100_SHIFT 9 +#define LEGCYPS_DAC_AMP10_MASK 7U +#define LEGCYPS_DAC_AMP10_SHIFT 6 +#define LEGCYPS_UNPLUG_TIMER_MASK 7U +#define LEGCYPS_UNPLUG_TIMER_SHIFT 3 +#define LEGCYPS_UNPLUG_DECT_EN 0x4 +#define LEGCYPS_ECNC_PS_EN 0x1 +#define L1D_LEGCYPS_DEF 0x129D +#define L1C_LEGCYPS_DEF 0x36DD + +#define MIIDBG_TST100BTCFG 0x36 +#define TST100BTCFG_NORMAL_BW_EN 0x8000 +#define TST100BTCFG_BADLNK_BYPASS 0x4000 +#define TST100BTCFG_SHORTCABL_TH_MASK 0x3FU +#define TST100BTCFG_SHORTCABL_TH_SHIFT 8 +#define TST100BTCFG_LITCH_EN 0x80 +#define TST100BTCFG_VLT_SW 0x40 +#define TST100BTCFG_LONGCABL_TH_MASK 0x3FU +#define TST100BTCFG_LONGCABL_TH_SHIFT 0 +#define TST100BTCFG_DEF 0xE12C + +#define MIIDBG_VOLT_CTRL 0x3B /* only for l2cb 1 & 2 */ +#define VOLT_CTRL_CABLE1TH_MASK 0x1FFU +#define VOLT_CTRL_CABLE1TH_SHIFT 7 +#define VOLT_CTRL_AMPCTRL_MASK 3U +#define VOLT_CTRL_AMPCTRL_SHIFT 5 +#define VOLT_CTRL_SW_BYPASS 0x10 +#define VOLT_CTRL_SWLOWEST 0x8 +#define VOLT_CTRL_DACAMP10_MASK 7U +#define VOLT_CTRL_DACAMP10_SHIFT 0 + +#define MIIDBG_CABLE1TH_DET 0x3E +#define CABLE1TH_DET_EN 0x8000 + + +/******* dev 3 *********/ +#define MIIEXT_PCS 3 + +#define MIIEXT_CLDCTRL3 0x8003 +#define CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000 +#define CLDCTRL3_AZ_DISAMP 0x1000 +#define L2CB_CLDCTRL3 0x4D19 +#define L1D_CLDCTRL3 0xDD19 + +#define MIIEXT_CLDCTRL6 0x8006 +#define CLDCTRL6_CAB_LEN_MASK 0x1FFU +#define CLDCTRL6_CAB_LEN_SHIFT 0 +#define CLDCTRL6_CAB_LEN_SHORT 0x50 + +/********* dev 7 **********/ +#define MIIEXT_ANEG 7 + +#define MIIEXT_LOCAL_EEEADV 0x3C +#define LOCAL_EEEADV_1000BT 0x4 +#define LOCAL_EEEADV_100BT 0x2 + +#define MIIEXT_REMOTE_EEEADV 0x3D +#define REMOTE_EEEADV_1000BT 0x4 +#define REMOTE_EEEADV_100BT 0x2 + +#define MIIEXT_EEE_ANEG 0x8000 +#define EEE_ANEG_1000M 0x4 +#define EEE_ANEG_100M 0x2 + +#endif /*_ATL1C_HW_H_*/ diff --git a/addons/atl1c/src/3.10.108/atl1c_main.c b/addons/atl1c/src/3.10.108/atl1c_main.c new file mode 100644 index 00000000..297c3e5e --- /dev/null +++ b/addons/atl1c/src/3.10.108/atl1c_main.c @@ -0,0 +1,2818 @@ +/* + * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include "atl1c.h" + +#define ATL1C_DRV_VERSION "1.0.1.1-NAPI" +char atl1c_driver_name[] = "atl1c"; +char atl1c_driver_version[] = ATL1C_DRV_VERSION; + +/* + * atl1c_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)}, + /* required last entry */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl); + +MODULE_AUTHOR("Jie Yang"); +MODULE_AUTHOR("Qualcomm Atheros Inc., "); +MODULE_DESCRIPTION("Qualcom Atheros 100/1000M Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ATL1C_DRV_VERSION); + +static int atl1c_stop_mac(struct atl1c_hw *hw); +static void atl1c_disable_l0s_l1(struct atl1c_hw *hw); +static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed); +static void atl1c_start_mac(struct atl1c_adapter *adapter); +static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, + int *work_done, int work_to_do); +static int atl1c_up(struct atl1c_adapter *adapter); +static void atl1c_down(struct atl1c_adapter *adapter); +static int atl1c_reset_mac(struct atl1c_hw *hw); +static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter); +static int atl1c_configure(struct atl1c_adapter *adapter); +static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter); + +static const u16 atl1c_pay_load_size[] = { + 128, 256, 512, 1024, 2048, 4096, +}; + + +static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; +static void atl1c_pcie_patch(struct atl1c_hw *hw) +{ + u32 mst_data, data; + + /* pclk sel could switch to 25M */ + AT_READ_REG(hw, REG_MASTER_CTRL, &mst_data); + mst_data &= ~MASTER_CTRL_CLK_SEL_DIS; + AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data); + + /* WoL/PCIE related settings */ + if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { + AT_READ_REG(hw, REG_PCIE_PHYMISC, &data); + data |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data); + } else { /* new dev set bit5 of MASTER */ + if (!(mst_data & MASTER_CTRL_WAKEN_25M)) + AT_WRITE_REG(hw, REG_MASTER_CTRL, + mst_data | MASTER_CTRL_WAKEN_25M); + } + /* aspm/PCIE setting only for l2cb 1.0 */ + if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) { + AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data); + data = FIELD_SETX(data, PCIE_PHYMISC2_CDR_BW, + L2CB1_PCIE_PHYMISC2_CDR_BW); + data = FIELD_SETX(data, PCIE_PHYMISC2_L0S_TH, + L2CB1_PCIE_PHYMISC2_L0S_TH); + AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data); + /* extend L1 sync timer */ + AT_READ_REG(hw, REG_LINK_CTRL, &data); + data |= LINK_CTRL_EXT_SYNC; + AT_WRITE_REG(hw, REG_LINK_CTRL, data); + } + /* l2cb 1.x & l1d 1.x */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) { + AT_READ_REG(hw, REG_PM_CTRL, &data); + data |= PM_CTRL_L0S_BUFSRX_EN; + AT_WRITE_REG(hw, REG_PM_CTRL, data); + /* clear vendor msg */ + AT_READ_REG(hw, REG_DMA_DBG, &data); + AT_WRITE_REG(hw, REG_DMA_DBG, data & ~DMA_DBG_VENDOR_MSG); + } +} + +/* FIXME: no need any more ? */ +/* + * atl1c_init_pcie - init PCIE module + */ +static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) +{ + u32 data; + u32 pci_cmd; + struct pci_dev *pdev = hw->adapter->pdev; + int pos; + + AT_READ_REG(hw, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_COMMAND_IO); + AT_WRITE_REG(hw, PCI_COMMAND, pci_cmd); + + /* + * Clear any PowerSaveing Settings + */ + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + /* wol sts read-clear */ + AT_READ_REG(hw, REG_WOL_CTRL, &data); + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* + * Mask some pcie error bits + */ + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); + data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); + pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); + /* clear error status */ + pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_NFED | + PCI_EXP_DEVSTA_FED | + PCI_EXP_DEVSTA_CED | + PCI_EXP_DEVSTA_URD); + + AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data); + data &= ~LTSSM_ID_EN_WRO; + AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data); + + atl1c_pcie_patch(hw); + if (flag & ATL1C_PCIE_L0S_L1_DISABLE) + atl1c_disable_l0s_l1(hw); + + msleep(5); +} + +/** + * atl1c_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static inline void atl1c_irq_enable(struct atl1c_adapter *adapter) +{ + if (likely(atomic_dec_and_test(&adapter->irq_sem))) { + AT_WRITE_REG(&adapter->hw, REG_ISR, 0x7FFFFFFF); + AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); + AT_WRITE_FLUSH(&adapter->hw); + } +} + +/** + * atl1c_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static inline void atl1c_irq_disable(struct atl1c_adapter *adapter) +{ + atomic_inc(&adapter->irq_sem); + AT_WRITE_REG(&adapter->hw, REG_IMR, 0); + AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT); + AT_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/** + * atl1c_irq_reset - reset interrupt confiure on the NIC + * @adapter: board private structure + */ +static inline void atl1c_irq_reset(struct atl1c_adapter *adapter) +{ + atomic_set(&adapter->irq_sem, 1); + atl1c_irq_enable(adapter); +} + +/* + * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads + * of the idle status register until the device is actually idle + */ +static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl) +{ + int timeout; + u32 data; + + for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { + AT_READ_REG(hw, REG_IDLE_STATUS, &data); + if ((data & modu_ctrl) == 0) + return 0; + msleep(1); + } + return data; +} + +/** + * atl1c_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1c_phy_config(unsigned long data) +{ + struct atl1c_adapter *adapter = (struct atl1c_adapter *) data; + struct atl1c_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + atl1c_restart_autoneg(hw); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); +} + +void atl1c_reinit_locked(struct atl1c_adapter *adapter) +{ + WARN_ON(in_interrupt()); + atl1c_down(adapter); + atl1c_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); +} + +static void atl1c_check_link_status(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err; + unsigned long flags; + u16 speed, duplex, phy_data; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + /* MII_BMSR must read twise */ + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + + if ((phy_data & BMSR_LSTATUS) == 0) { + /* link down */ + netif_carrier_off(netdev); + hw->hibernate = true; + if (atl1c_reset_mac(hw) != 0) + if (netif_msg_hw(adapter)) + dev_warn(&pdev->dev, "reset mac failed\n"); + atl1c_set_aspm(hw, SPEED_0); + atl1c_post_phy_linkchg(hw, SPEED_0); + atl1c_reset_dma_ring(adapter); + atl1c_configure(adapter); + } else { + /* Link Up */ + hw->hibernate = false; + spin_lock_irqsave(&adapter->mdio_lock, flags); + err = atl1c_get_speed_and_duplex(hw, &speed, &duplex); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + if (unlikely(err)) + return; + /* link result is our setting */ + if (adapter->link_speed != speed || + adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl1c_set_aspm(hw, speed); + atl1c_post_phy_linkchg(hw, speed); + atl1c_start_mac(adapter); + if (netif_msg_link(adapter)) + dev_info(&pdev->dev, + "%s: %s NIC Link is Up<%d Mbps %s>\n", + atl1c_driver_name, netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex"); + } + if (!netif_carrier_ok(netdev)) + netif_carrier_on(netdev); + } +} + +static void atl1c_link_chg_event(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + u16 phy_data; + u16 link_up; + + spin_lock(&adapter->mdio_lock); + atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->mdio_lock); + link_up = phy_data & BMSR_LSTATUS; + /* notify upper layer link down ASAP */ + if (!link_up) { + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + netif_carrier_off(netdev); + if (netif_msg_link(adapter)) + dev_info(&pdev->dev, + "%s: %s NIC Link is Down\n", + atl1c_driver_name, netdev->name); + adapter->link_speed = SPEED_0; + } + } + + set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event); + schedule_work(&adapter->common_task); +} + +static void atl1c_common_task(struct work_struct *work) +{ + struct atl1c_adapter *adapter; + struct net_device *netdev; + + adapter = container_of(work, struct atl1c_adapter, common_task); + netdev = adapter->netdev; + + if (test_bit(__AT_DOWN, &adapter->flags)) + return; + + if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) { + netif_device_detach(netdev); + atl1c_down(adapter); + atl1c_up(adapter); + netif_device_attach(netdev); + } + + if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE, + &adapter->work_event)) { + atl1c_irq_disable(adapter); + atl1c_check_link_status(adapter); + atl1c_irq_enable(adapter); + } +} + + +static void atl1c_del_timer(struct atl1c_adapter *adapter) +{ + del_timer_sync(&adapter->phy_config_timer); +} + + +/** + * atl1c_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atl1c_tx_timeout(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); + schedule_work(&adapter->common_task); +} + +/** + * atl1c_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atl1c_set_multi(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u32 mac_ctrl_data; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data); + + if (netdev->flags & IFF_PROMISC) { + mac_ctrl_data |= MAC_CTRL_PROMIS_EN; + } else if (netdev->flags & IFF_ALLMULTI) { + mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; + mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN; + } else { + mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + } + + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + + /* clear the old settings from the multicast hash table */ + AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + /* comoute mc addresses' hash value ,and put it into hash table */ + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atl1c_hash_mc_addr(hw, ha->addr); + atl1c_hash_set(hw, hash_value); + } +} + +static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + *mac_ctrl_data |= MAC_CTRL_RMV_VLAN; + } else { + /* disable VLAN tag insert/strip */ + *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN; + } +} + +static void atl1c_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + u32 mac_ctrl_data = 0; + + if (netif_msg_pktdata(adapter)) + dev_dbg(&pdev->dev, "atl1c_vlan_mode\n"); + + atl1c_irq_disable(adapter); + AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data); + __atl1c_vlan_mode(features, &mac_ctrl_data); + AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); + atl1c_irq_enable(adapter); +} + +static void atl1c_restore_vlan(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + if (netif_msg_pktdata(adapter)) + dev_dbg(&pdev->dev, "atl1c_restore_vlan\n"); + atl1c_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +/** + * atl1c_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atl1c_set_mac_addr(struct net_device *netdev, void *p) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); + + return 0; +} + +static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, + struct net_device *dev) +{ + unsigned int head_size; + int mtu = dev->mtu; + + adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? + roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; + + head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + adapter->rx_frag_size = roundup_pow_of_two(head_size); +} + +static netdev_features_t atl1c_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (netdev->mtu > MAX_TSO_FRAME_SIZE) + features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + + return features; +} + +static int atl1c_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + atl1c_vlan_mode(netdev, features); + + return 0; +} + +/** + * atl1c_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1c_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + /* Fast Ethernet controller doesn't support jumbo packet */ + if (((hw->nic_type == athr_l2c || + hw->nic_type == athr_l2c_b || + hw->nic_type == athr_l2c_b2) && new_mtu > ETH_DATA_LEN) || + max_frame < ETH_ZLEN + ETH_FCS_LEN || + max_frame > MAX_JUMBO_FRAME_SIZE) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); + return -EINVAL; + } + /* set MTU */ + if (old_mtu != new_mtu && netif_running(netdev)) { + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + netdev->mtu = new_mtu; + adapter->hw.max_frame_size = new_mtu; + atl1c_set_rxbufsize(adapter, netdev); + atl1c_down(adapter); + netdev_update_features(netdev); + atl1c_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); + } + return 0; +} + +/* + * caller should hold mdio_lock + */ +static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1c_read_phy_reg(&adapter->hw, reg_num, &result); + return result; +} + +static void atl1c_mdio_write(struct net_device *netdev, int phy_id, + int reg_num, int val) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + atl1c_write_phy_reg(&adapter->hw, reg_num, val); +} + +static int atl1c_mii_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long flags; + int retval = 0; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 0; + break; + + case SIOCGMIIREG: + if (atl1c_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) { + retval = -EIO; + goto out; + } + break; + + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) { + retval = -EFAULT; + goto out; + } + + dev_dbg(&pdev->dev, " write %x %x", + data->reg_num, data->val_in); + if (atl1c_write_phy_reg(&adapter->hw, + data->reg_num, data->val_in)) { + retval = -EIO; + goto out; + } + break; + + default: + retval = -EOPNOTSUPP; + break; + } +out: + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + return retval; +} + +static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atl1c_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * atl1c_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + */ +static int atl1c_alloc_queues(struct atl1c_adapter *adapter) +{ + return 0; +} + +static void atl1c_set_mac_type(struct atl1c_hw *hw) +{ + switch (hw->device_id) { + case PCI_DEVICE_ID_ATTANSIC_L2C: + hw->nic_type = athr_l2c; + break; + case PCI_DEVICE_ID_ATTANSIC_L1C: + hw->nic_type = athr_l1c; + break; + case PCI_DEVICE_ID_ATHEROS_L2C_B: + hw->nic_type = athr_l2c_b; + break; + case PCI_DEVICE_ID_ATHEROS_L2C_B2: + hw->nic_type = athr_l2c_b2; + break; + case PCI_DEVICE_ID_ATHEROS_L1D: + hw->nic_type = athr_l1d; + break; + case PCI_DEVICE_ID_ATHEROS_L1D_2_0: + hw->nic_type = athr_l1d_2; + break; + default: + break; + } +} + +static int atl1c_setup_mac_funcs(struct atl1c_hw *hw) +{ + u32 link_ctrl_data; + + atl1c_set_mac_type(hw); + AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); + + hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE | + ATL1C_TXQ_MODE_ENHANCE; + hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT | + ATL1C_ASPM_L1_SUPPORT; + hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; + + if (hw->nic_type == athr_l1c || + hw->nic_type == athr_l1d || + hw->nic_type == athr_l1d_2) + hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; + return 0; +} + +struct atl1c_platform_patch { + u16 pci_did; + u8 pci_revid; + u16 subsystem_vid; + u16 subsystem_did; + u32 patch_flag; +#define ATL1C_LINK_PATCH 0x1 +}; +static const struct atl1c_platform_patch plats[] = { +{0x2060, 0xC1, 0x1019, 0x8152, 0x1}, +{0x2060, 0xC1, 0x1019, 0x2060, 0x1}, +{0x2060, 0xC1, 0x1019, 0xE000, 0x1}, +{0x2062, 0xC0, 0x1019, 0x8152, 0x1}, +{0x2062, 0xC0, 0x1019, 0x2062, 0x1}, +{0x2062, 0xC0, 0x1458, 0xE000, 0x1}, +{0x2062, 0xC1, 0x1019, 0x8152, 0x1}, +{0x2062, 0xC1, 0x1019, 0x2062, 0x1}, +{0x2062, 0xC1, 0x1458, 0xE000, 0x1}, +{0x2062, 0xC1, 0x1565, 0x2802, 0x1}, +{0x2062, 0xC1, 0x1565, 0x2801, 0x1}, +{0x1073, 0xC0, 0x1019, 0x8151, 0x1}, +{0x1073, 0xC0, 0x1019, 0x1073, 0x1}, +{0x1073, 0xC0, 0x1458, 0xE000, 0x1}, +{0x1083, 0xC0, 0x1458, 0xE000, 0x1}, +{0x1083, 0xC0, 0x1019, 0x8151, 0x1}, +{0x1083, 0xC0, 0x1019, 0x1083, 0x1}, +{0x1083, 0xC0, 0x1462, 0x7680, 0x1}, +{0x1083, 0xC0, 0x1565, 0x2803, 0x1}, +{0}, +}; + +static void atl1c_patch_assign(struct atl1c_hw *hw) +{ + struct pci_dev *pdev = hw->adapter->pdev; + u32 misc_ctrl; + int i = 0; + + hw->msi_lnkpatch = false; + + while (plats[i].pci_did != 0) { + if (plats[i].pci_did == hw->device_id && + plats[i].pci_revid == hw->revision_id && + plats[i].subsystem_vid == hw->subsystem_vendor_id && + plats[i].subsystem_did == hw->subsystem_id) { + if (plats[i].patch_flag & ATL1C_LINK_PATCH) + hw->msi_lnkpatch = true; + } + i++; + } + + if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 && + hw->revision_id == L2CB_V21) { + /* config acess mode */ + pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR, + REG_PCIE_DEV_MISC_CTRL); + pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl); + misc_ctrl &= ~0x100; + pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR, + REG_PCIE_DEV_MISC_CTRL); + pci_write_config_dword(pdev, REG_PCIE_IND_ACC_DATA, misc_ctrl); + } +} +/** + * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter) + * @adapter: board private structure to initialize + * + * atl1c_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int atl1c_sw_init(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 revision; + + + adapter->wol = 0; + device_set_wakeup_enable(&pdev->dev, false); + adapter->link_speed = SPEED_0; + adapter->link_duplex = FULL_DUPLEX; + adapter->tpd_ring[0].count = 1024; + adapter->rfd_ring.count = 512; + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + pci_read_config_dword(pdev, PCI_CLASS_REVISION, &revision); + hw->revision_id = revision & 0xFF; + /* before link up, we assume hibernate is true */ + hw->hibernate = true; + hw->media_type = MEDIA_TYPE_AUTO_SENSOR; + if (atl1c_setup_mac_funcs(hw) != 0) { + dev_err(&pdev->dev, "set mac function pointers failed\n"); + return -1; + } + atl1c_patch_assign(hw); + + hw->intr_mask = IMR_NORMAL_MASK; + hw->phy_configured = false; + hw->preamble_len = 7; + hw->max_frame_size = adapter->netdev->mtu; + hw->autoneg_advertised = ADVERTISED_Autoneg; + hw->indirect_tab = 0xE4E4E4E4; + hw->base_cpu = 0; + + hw->ict = 50000; /* 100ms */ + hw->smb_timer = 200000; /* 400ms */ + hw->rx_imt = 200; + hw->tx_imt = 1000; + + hw->tpd_burst = 5; + hw->rfd_burst = 8; + hw->dma_order = atl1c_dma_ord_out; + hw->dmar_block = atl1c_dma_req_1024; + + if (atl1c_alloc_queues(adapter)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + /* TODO */ + atl1c_set_rxbufsize(adapter, adapter->netdev); + atomic_set(&adapter->irq_sem, 1); + spin_lock_init(&adapter->mdio_lock); + spin_lock_init(&adapter->tx_lock); + set_bit(__AT_DOWN, &adapter->flags); + + return 0; +} + +static inline void atl1c_clean_buffer(struct pci_dev *pdev, + struct atl1c_buffer *buffer_info, int in_irq) +{ + u16 pci_driection; + if (buffer_info->flags & ATL1C_BUFFER_FREE) + return; + if (buffer_info->dma) { + if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE) + pci_driection = PCI_DMA_FROMDEVICE; + else + pci_driection = PCI_DMA_TODEVICE; + + if (buffer_info->flags & ATL1C_PCIMAP_SINGLE) + pci_unmap_single(pdev, buffer_info->dma, + buffer_info->length, pci_driection); + else if (buffer_info->flags & ATL1C_PCIMAP_PAGE) + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, pci_driection); + } + if (buffer_info->skb) { + if (in_irq) + dev_kfree_skb_irq(buffer_info->skb); + else + dev_kfree_skb(buffer_info->skb); + } + buffer_info->dma = 0; + buffer_info->skb = NULL; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); +} +/** + * atl1c_clean_tx_ring - Free Tx-skb + * @adapter: board private structure + */ +static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + struct atl1c_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + u16 index, ring_count; + + ring_count = tpd_ring->count; + for (index = 0; index < ring_count; index++) { + buffer_info = &tpd_ring->buffer_info[index]; + atl1c_clean_buffer(pdev, buffer_info, 0); + } + + /* Zero out Tx-buffers */ + memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) * + ring_count); + atomic_set(&tpd_ring->next_to_clean, 0); + tpd_ring->next_to_use = 0; +} + +/** + * atl1c_clean_rx_ring - Free rx-reservation skbs + * @adapter: board private structure + */ +static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter) +{ + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1c_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + int j; + + for (j = 0; j < rfd_ring->count; j++) { + buffer_info = &rfd_ring->buffer_info[j]; + atl1c_clean_buffer(pdev, buffer_info, 0); + } + /* zero out the descriptor ring */ + memset(rfd_ring->desc, 0, rfd_ring->size); + rfd_ring->next_to_clean = 0; + rfd_ring->next_to_use = 0; + rrd_ring->next_to_use = 0; + rrd_ring->next_to_clean = 0; +} + +/* + * Read / Write Ptr Initialize: + */ +static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter) +{ + struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1c_buffer *buffer_info; + int i, j; + + for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { + tpd_ring[i].next_to_use = 0; + atomic_set(&tpd_ring[i].next_to_clean, 0); + buffer_info = tpd_ring[i].buffer_info; + for (j = 0; j < tpd_ring->count; j++) + ATL1C_SET_BUFFER_STATE(&buffer_info[i], + ATL1C_BUFFER_FREE); + } + rfd_ring->next_to_use = 0; + rfd_ring->next_to_clean = 0; + rrd_ring->next_to_use = 0; + rrd_ring->next_to_clean = 0; + for (j = 0; j < rfd_ring->count; j++) { + buffer_info = &rfd_ring->buffer_info[j]; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); + } +} + +/** + * atl1c_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void atl1c_free_ring_resources(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + pci_free_consistent(pdev, adapter->ring_header.size, + adapter->ring_header.desc, + adapter->ring_header.dma); + adapter->ring_header.desc = NULL; + + /* Note: just free tdp_ring.buffer_info, + * it contain rfd_ring.buffer_info, do not double free */ + if (adapter->tpd_ring[0].buffer_info) { + kfree(adapter->tpd_ring[0].buffer_info); + adapter->tpd_ring[0].buffer_info = NULL; + } + if (adapter->rx_page) { + put_page(adapter->rx_page); + adapter->rx_page = NULL; + } +} + +/** + * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1c_ring_header *ring_header = &adapter->ring_header; + int size; + int i; + int count = 0; + int rx_desc_count = 0; + u32 offset = 0; + + rrd_ring->count = rfd_ring->count; + for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++) + tpd_ring[i].count = tpd_ring[0].count; + + /* 2 tpd queue, one high priority queue, + * another normal priority queue */ + size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 + + rfd_ring->count); + tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); + if (unlikely(!tpd_ring->buffer_info)) + goto err_nomem; + + for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { + tpd_ring[i].buffer_info = + (tpd_ring->buffer_info + count); + count += tpd_ring[i].count; + } + + rfd_ring->buffer_info = + (tpd_ring->buffer_info + count); + count += rfd_ring->count; + rx_desc_count += rfd_ring->count; + + /* + * real ring DMA buffer + * each ring/block may need up to 8 bytes for alignment, hence the + * additional bytes tacked onto the end. + */ + ring_header->size = size = + sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 + + sizeof(struct atl1c_rx_free_desc) * rx_desc_count + + sizeof(struct atl1c_recv_ret_status) * rx_desc_count + + 8 * 4; + + ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, + &ring_header->dma, GFP_KERNEL); + if (unlikely(!ring_header->desc)) { + dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); + goto err_nomem; + } + /* init TPD ring */ + + tpd_ring[0].dma = roundup(ring_header->dma, 8); + offset = tpd_ring[0].dma - ring_header->dma; + for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { + tpd_ring[i].dma = ring_header->dma + offset; + tpd_ring[i].desc = (u8 *) ring_header->desc + offset; + tpd_ring[i].size = + sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count; + offset += roundup(tpd_ring[i].size, 8); + } + /* init RFD ring */ + rfd_ring->dma = ring_header->dma + offset; + rfd_ring->desc = (u8 *) ring_header->desc + offset; + rfd_ring->size = sizeof(struct atl1c_rx_free_desc) * rfd_ring->count; + offset += roundup(rfd_ring->size, 8); + + /* init RRD ring */ + rrd_ring->dma = ring_header->dma + offset; + rrd_ring->desc = (u8 *) ring_header->desc + offset; + rrd_ring->size = sizeof(struct atl1c_recv_ret_status) * + rrd_ring->count; + offset += roundup(rrd_ring->size, 8); + + return 0; + +err_nomem: + kfree(tpd_ring->buffer_info); + return -ENOMEM; +} + +static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) + adapter->tpd_ring; + + /* TPD */ + AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI, + (u32)((tpd_ring[atl1c_trans_normal].dma & + AT_DMA_HI_ADDR_MASK) >> 32)); + /* just enable normal priority TX queue */ + AT_WRITE_REG(hw, REG_TPD_PRI0_ADDR_LO, + (u32)(tpd_ring[atl1c_trans_normal].dma & + AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_TPD_PRI1_ADDR_LO, + (u32)(tpd_ring[atl1c_trans_high].dma & + AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_TPD_RING_SIZE, + (u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK)); + + + /* RFD */ + AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI, + (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32)); + AT_WRITE_REG(hw, REG_RFD0_HEAD_ADDR_LO, + (u32)(rfd_ring->dma & AT_DMA_LO_ADDR_MASK)); + + AT_WRITE_REG(hw, REG_RFD_RING_SIZE, + rfd_ring->count & RFD_RING_SIZE_MASK); + AT_WRITE_REG(hw, REG_RX_BUF_SIZE, + adapter->rx_buffer_len & RX_BUF_SIZE_MASK); + + /* RRD */ + AT_WRITE_REG(hw, REG_RRD0_HEAD_ADDR_LO, + (u32)(rrd_ring->dma & AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_RRD_RING_SIZE, + (rrd_ring->count & RRD_RING_SIZE_MASK)); + + if (hw->nic_type == athr_l2c_b) { + AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L); + AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L); + AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L); + AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L); + AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L); + AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L); + AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/ + AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/ + } + /* Load all of base address above */ + AT_WRITE_REG(hw, REG_LOAD_PTR, 1); +} + +static void atl1c_configure_tx(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + int max_pay_load; + u16 tx_offload_thresh; + u32 txq_ctrl_data; + + tx_offload_thresh = MAX_TSO_FRAME_SIZE; + AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH, + (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK); + max_pay_load = pcie_get_readrq(adapter->pdev) >> 8; + hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); + /* + * if BIOS had changed the dam-read-max-length to an invalid value, + * restore it to default value + */ + if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) { + pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN); + hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN; + } + txq_ctrl_data = + hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ? + L2CB_TXQ_CFGV : L1C_TXQ_CFGV; + + AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); +} + +static void atl1c_configure_rx(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 rxq_ctrl_data; + + rxq_ctrl_data = (hw->rfd_burst & RXQ_RFD_BURST_NUM_MASK) << + RXQ_RFD_BURST_NUM_SHIFT; + + if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM) + rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN; + + /* aspm for gigabit */ + if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0) + rxq_ctrl_data = FIELD_SETX(rxq_ctrl_data, ASPM_THRUPUT_LIMIT, + ASPM_THRUPUT_LIMIT_100M); + + AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); +} + +static void atl1c_configure_dma(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 dma_ctrl_data; + + dma_ctrl_data = FIELDX(DMA_CTRL_RORDER_MODE, DMA_CTRL_RORDER_MODE_OUT) | + DMA_CTRL_RREQ_PRI_DATA | + FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) | + FIELDX(DMA_CTRL_WDLY_CNT, DMA_CTRL_WDLY_CNT_DEF) | + FIELDX(DMA_CTRL_RDLY_CNT, DMA_CTRL_RDLY_CNT_DEF); + + AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); +} + +/* + * Stop the mac, transmit and receive units + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +static int atl1c_stop_mac(struct atl1c_hw *hw) +{ + u32 data; + + AT_READ_REG(hw, REG_RXQ_CTRL, &data); + data &= ~RXQ_CTRL_EN; + AT_WRITE_REG(hw, REG_RXQ_CTRL, data); + + AT_READ_REG(hw, REG_TXQ_CTRL, &data); + data &= ~TXQ_CTRL_EN; + AT_WRITE_REG(hw, REG_TXQ_CTRL, data); + + atl1c_wait_until_idle(hw, IDLE_STATUS_RXQ_BUSY | IDLE_STATUS_TXQ_BUSY); + + AT_READ_REG(hw, REG_MAC_CTRL, &data); + data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN); + AT_WRITE_REG(hw, REG_MAC_CTRL, data); + + return (int)atl1c_wait_until_idle(hw, + IDLE_STATUS_TXMAC_BUSY | IDLE_STATUS_RXMAC_BUSY); +} + +static void atl1c_start_mac(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 mac, txq, rxq; + + hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX ? true : false; + hw->mac_speed = adapter->link_speed == SPEED_1000 ? + atl1c_mac_speed_1000 : atl1c_mac_speed_10_100; + + AT_READ_REG(hw, REG_TXQ_CTRL, &txq); + AT_READ_REG(hw, REG_RXQ_CTRL, &rxq); + AT_READ_REG(hw, REG_MAC_CTRL, &mac); + + txq |= TXQ_CTRL_EN; + rxq |= RXQ_CTRL_EN; + mac |= MAC_CTRL_TX_EN | MAC_CTRL_TX_FLOW | + MAC_CTRL_RX_EN | MAC_CTRL_RX_FLOW | + MAC_CTRL_ADD_CRC | MAC_CTRL_PAD | + MAC_CTRL_BC_EN | MAC_CTRL_SINGLE_PAUSE_EN | + MAC_CTRL_HASH_ALG_CRC32; + if (hw->mac_duplex) + mac |= MAC_CTRL_DUPLX; + else + mac &= ~MAC_CTRL_DUPLX; + mac = FIELD_SETX(mac, MAC_CTRL_SPEED, hw->mac_speed); + mac = FIELD_SETX(mac, MAC_CTRL_PRMLEN, hw->preamble_len); + + AT_WRITE_REG(hw, REG_TXQ_CTRL, txq); + AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac); +} + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +static int atl1c_reset_mac(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + u32 ctrl_data = 0; + + atl1c_stop_mac(hw); + /* + * Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + AT_READ_REG(hw, REG_MASTER_CTRL, &ctrl_data); + ctrl_data |= MASTER_CTRL_OOB_DIS; + AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data | MASTER_CTRL_SOFT_RST); + + AT_WRITE_FLUSH(hw); + msleep(10); + /* Wait at least 10ms for All module to be Idle */ + + if (atl1c_wait_until_idle(hw, IDLE_STATUS_MASK)) { + dev_err(&pdev->dev, + "MAC state machine can't be idle since" + " disabled for 10ms second\n"); + return -1; + } + AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data); + + /* driver control speed/duplex */ + AT_READ_REG(hw, REG_MAC_CTRL, &ctrl_data); + AT_WRITE_REG(hw, REG_MAC_CTRL, ctrl_data | MAC_CTRL_SPEED_MODE_SW); + + /* clk switch setting */ + AT_READ_REG(hw, REG_SERDES, &ctrl_data); + switch (hw->nic_type) { + case athr_l2c_b: + ctrl_data &= ~(SERDES_PHY_CLK_SLOWDOWN | + SERDES_MAC_CLK_SLOWDOWN); + AT_WRITE_REG(hw, REG_SERDES, ctrl_data); + break; + case athr_l2c_b2: + case athr_l1d_2: + ctrl_data |= SERDES_PHY_CLK_SLOWDOWN | SERDES_MAC_CLK_SLOWDOWN; + AT_WRITE_REG(hw, REG_SERDES, ctrl_data); + break; + default: + break; + } + + return 0; +} + +static void atl1c_disable_l0s_l1(struct atl1c_hw *hw) +{ + u16 ctrl_flags = hw->ctrl_flags; + + hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT); + atl1c_set_aspm(hw, SPEED_0); + hw->ctrl_flags = ctrl_flags; +} + +/* + * Set ASPM state. + * Enable/disable L0s/L1 depend on link state. + */ +static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed) +{ + u32 pm_ctrl_data; + u32 link_l1_timer; + + AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); + pm_ctrl_data &= ~(PM_CTRL_ASPM_L1_EN | + PM_CTRL_ASPM_L0S_EN | + PM_CTRL_MAC_ASPM_CHK); + /* L1 timer */ + if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { + pm_ctrl_data &= ~PMCTRL_TXL1_AFTER_L0S; + link_l1_timer = + link_speed == SPEED_1000 || link_speed == SPEED_100 ? + L1D_PMCTRL_L1_ENTRY_TM_16US : 1; + pm_ctrl_data = FIELD_SETX(pm_ctrl_data, + L1D_PMCTRL_L1_ENTRY_TM, link_l1_timer); + } else { + link_l1_timer = hw->nic_type == athr_l2c_b ? + L2CB1_PM_CTRL_L1_ENTRY_TM : L1C_PM_CTRL_L1_ENTRY_TM; + if (link_speed != SPEED_1000 && link_speed != SPEED_100) + link_l1_timer = 1; + pm_ctrl_data = FIELD_SETX(pm_ctrl_data, + PM_CTRL_L1_ENTRY_TIMER, link_l1_timer); + } + + /* L0S/L1 enable */ + if ((hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) && link_speed != SPEED_0) + pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK; + if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) + pm_ctrl_data |= PM_CTRL_ASPM_L1_EN | PM_CTRL_MAC_ASPM_CHK; + + /* l2cb & l1d & l2cb2 & l1d2 */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || + hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { + pm_ctrl_data = FIELD_SETX(pm_ctrl_data, + PM_CTRL_PM_REQ_TIMER, PM_CTRL_PM_REQ_TO_DEF); + pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER | + PM_CTRL_SERDES_PD_EX_L1 | + PM_CTRL_CLK_SWH_L1; + pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN | + PM_CTRL_SERDES_PLL_L1_EN | + PM_CTRL_SERDES_BUFS_RX_L1_EN | + PM_CTRL_SA_DLY_EN | + PM_CTRL_HOTRST); + /* disable l0s if link down or l2cb */ + if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b) + pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; + } else { /* l1c */ + pm_ctrl_data = + FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, 0); + if (link_speed != SPEED_0) { + pm_ctrl_data |= PM_CTRL_SERDES_L1_EN | + PM_CTRL_SERDES_PLL_L1_EN | + PM_CTRL_SERDES_BUFS_RX_L1_EN; + pm_ctrl_data &= ~(PM_CTRL_SERDES_PD_EX_L1 | + PM_CTRL_CLK_SWH_L1 | + PM_CTRL_ASPM_L0S_EN | + PM_CTRL_ASPM_L1_EN); + } else { /* link down */ + pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; + pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN | + PM_CTRL_SERDES_PLL_L1_EN | + PM_CTRL_SERDES_BUFS_RX_L1_EN | + PM_CTRL_ASPM_L0S_EN); + } + } + AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); + + return; +} + +/** + * atl1c_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static int atl1c_configure_mac(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 master_ctrl_data = 0; + u32 intr_modrt_data; + u32 data; + + AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); + master_ctrl_data &= ~(MASTER_CTRL_TX_ITIMER_EN | + MASTER_CTRL_RX_ITIMER_EN | + MASTER_CTRL_INT_RDCLR); + /* clear interrupt status */ + AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); + /* Clear any WOL status */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + /* set Interrupt Clear Timer + * HW will enable self to assert interrupt event to system after + * waiting x-time for software to notify it accept interrupt. + */ + + data = CLK_GATING_EN_ALL; + if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) { + if (hw->nic_type == athr_l2c_b) + data &= ~CLK_GATING_RXMAC_EN; + } else + data = 0; + AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data); + + AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER, + hw->ict & INT_RETRIG_TIMER_MASK); + + atl1c_configure_des_ring(adapter); + + if (hw->ctrl_flags & ATL1C_INTR_MODRT_ENABLE) { + intr_modrt_data = (hw->tx_imt & IRQ_MODRT_TIMER_MASK) << + IRQ_MODRT_TX_TIMER_SHIFT; + intr_modrt_data |= (hw->rx_imt & IRQ_MODRT_TIMER_MASK) << + IRQ_MODRT_RX_TIMER_SHIFT; + AT_WRITE_REG(hw, REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data); + master_ctrl_data |= + MASTER_CTRL_TX_ITIMER_EN | MASTER_CTRL_RX_ITIMER_EN; + } + + if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ) + master_ctrl_data |= MASTER_CTRL_INT_RDCLR; + + master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN; + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); + + AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, + hw->smb_timer & SMB_STAT_TIMER_MASK); + + /* set MTU */ + AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + + VLAN_HLEN + ETH_FCS_LEN); + + atl1c_configure_tx(adapter); + atl1c_configure_rx(adapter); + atl1c_configure_dma(adapter); + + return 0; +} + +static int atl1c_configure(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int num; + + atl1c_init_ring_ptrs(adapter); + atl1c_set_multi(netdev); + atl1c_restore_vlan(adapter); + + num = atl1c_alloc_rx_buffer(adapter); + if (unlikely(num == 0)) + return -ENOMEM; + + if (atl1c_configure_mac(adapter)) + return -EIO; + + return 0; +} + +static void atl1c_update_hw_stats(struct atl1c_adapter *adapter) +{ + u16 hw_reg_addr = 0; + unsigned long *stats_item = NULL; + u32 data; + + /* update rx status */ + hw_reg_addr = REG_MAC_RX_STATUS_BIN; + stats_item = &adapter->hw_stats.rx_ok; + while (hw_reg_addr <= REG_MAC_RX_STATUS_END) { + AT_READ_REG(&adapter->hw, hw_reg_addr, &data); + *stats_item += data; + stats_item++; + hw_reg_addr += 4; + } +/* update tx status */ + hw_reg_addr = REG_MAC_TX_STATUS_BIN; + stats_item = &adapter->hw_stats.tx_ok; + while (hw_reg_addr <= REG_MAC_TX_STATUS_END) { + AT_READ_REG(&adapter->hw, hw_reg_addr, &data); + *stats_item += data; + stats_item++; + hw_reg_addr += 4; + } +} + +/** + * atl1c_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + */ +static struct net_device_stats *atl1c_get_stats(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw_stats *hw_stats = &adapter->hw_stats; + struct net_device_stats *net_stats = &netdev->stats; + + atl1c_update_hw_stats(adapter); + net_stats->rx_packets = hw_stats->rx_ok; + net_stats->tx_packets = hw_stats->tx_ok; + net_stats->rx_bytes = hw_stats->rx_byte_cnt; + net_stats->tx_bytes = hw_stats->tx_byte_cnt; + net_stats->multicast = hw_stats->rx_mcast; + net_stats->collisions = hw_stats->tx_1_col + + hw_stats->tx_2_col * 2 + + hw_stats->tx_late_col + hw_stats->tx_abort_col; + net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + hw_stats->rx_sz_ov + + hw_stats->rx_rrd_ov + hw_stats->rx_align_err; + net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; + net_stats->rx_length_errors = hw_stats->rx_len_err; + net_stats->rx_crc_errors = hw_stats->rx_fcs_err; + net_stats->rx_frame_errors = hw_stats->rx_align_err; + net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; + + net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; + + net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col + + hw_stats->tx_underrun + hw_stats->tx_trunc; + net_stats->tx_fifo_errors = hw_stats->tx_underrun; + net_stats->tx_aborted_errors = hw_stats->tx_abort_col; + net_stats->tx_window_errors = hw_stats->tx_late_col; + + return net_stats; +} + +static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter) +{ + u16 phy_data; + + spin_lock(&adapter->mdio_lock); + atl1c_read_phy_reg(&adapter->hw, MII_ISR, &phy_data); + spin_unlock(&adapter->mdio_lock); +} + +static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + struct atl1c_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); + u16 hw_next_to_clean; + u16 reg; + + reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX; + + AT_READ_REGW(&adapter->hw, reg, &hw_next_to_clean); + + while (next_to_clean != hw_next_to_clean) { + buffer_info = &tpd_ring->buffer_info[next_to_clean]; + atl1c_clean_buffer(pdev, buffer_info, 1); + if (++next_to_clean == tpd_ring->count) + next_to_clean = 0; + atomic_set(&tpd_ring->next_to_clean, next_to_clean); + } + + if (netif_queue_stopped(adapter->netdev) && + netif_carrier_ok(adapter->netdev)) { + netif_wake_queue(adapter->netdev); + } + + return true; +} + +/** + * atl1c_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t atl1c_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct atl1c_hw *hw = &adapter->hw; + int max_ints = AT_MAX_INT_WORK; + int handled = IRQ_NONE; + u32 status; + u32 reg_data; + + do { + AT_READ_REG(hw, REG_ISR, ®_data); + status = reg_data & hw->intr_mask; + + if (status == 0 || (status & ISR_DIS_INT) != 0) { + if (max_ints != AT_MAX_INT_WORK) + handled = IRQ_HANDLED; + break; + } + /* link event */ + if (status & ISR_GPHY) + atl1c_clear_phy_int(adapter); + /* Ack ISR */ + AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); + if (status & ISR_RX_PKT) { + if (likely(napi_schedule_prep(&adapter->napi))) { + hw->intr_mask &= ~ISR_RX_PKT; + AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); + __napi_schedule(&adapter->napi); + } + } + if (status & ISR_TX_PKT) + atl1c_clean_tx_irq(adapter, atl1c_trans_normal); + + handled = IRQ_HANDLED; + /* check if PCIE PHY Link down */ + if (status & ISR_ERROR) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "atl1c hardware error (status = 0x%x)\n", + status & ISR_ERROR); + /* reset MAC */ + set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); + schedule_work(&adapter->common_task); + return IRQ_HANDLED; + } + + if (status & ISR_OVER) + if (netif_msg_intr(adapter)) + dev_warn(&pdev->dev, + "TX/RX overflow (status = 0x%x)\n", + status & ISR_OVER); + + /* link event */ + if (status & (ISR_GPHY | ISR_MANUAL)) { + netdev->stats.tx_carrier_errors++; + atl1c_link_chg_event(adapter); + break; + } + + } while (--max_ints > 0); + /* re-enable Interrupt*/ + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + return handled; +} + +static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter, + struct sk_buff *skb, struct atl1c_recv_ret_status *prrs) +{ + /* + * The pid field in RRS in not correct sometimes, so we + * cannot figure out if the packet is fragmented or not, + * so we tell the KERNEL CHECKSUM_NONE + */ + skb_checksum_none_assert(skb); +} + +static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) +{ + struct sk_buff *skb; + struct page *page; + + if (adapter->rx_frag_size > PAGE_SIZE) + return netdev_alloc_skb(adapter->netdev, + adapter->rx_buffer_len); + + page = adapter->rx_page; + if (!page) { + adapter->rx_page = page = alloc_page(GFP_ATOMIC); + if (unlikely(!page)) + return NULL; + adapter->rx_page_offset = 0; + } + + skb = build_skb(page_address(page) + adapter->rx_page_offset, + adapter->rx_frag_size); + if (likely(skb)) { + adapter->rx_page_offset += adapter->rx_frag_size; + if (adapter->rx_page_offset >= PAGE_SIZE) + adapter->rx_page = NULL; + else + get_page(page); + } + return skb; +} + +static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) +{ + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct pci_dev *pdev = adapter->pdev; + struct atl1c_buffer *buffer_info, *next_info; + struct sk_buff *skb; + void *vir_addr = NULL; + u16 num_alloc = 0; + u16 rfd_next_to_use, next_next; + struct atl1c_rx_free_desc *rfd_desc; + dma_addr_t mapping; + + next_next = rfd_next_to_use = rfd_ring->next_to_use; + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + + while (next_info->flags & ATL1C_BUFFER_FREE) { + rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); + + skb = atl1c_alloc_skb(adapter); + if (unlikely(!skb)) { + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, "alloc rx buffer failed\n"); + break; + } + + /* + * Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + vir_addr = skb->data; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + buffer_info->skb = skb; + buffer_info->length = adapter->rx_buffer_len; + mapping = pci_map_single(pdev, vir_addr, + buffer_info->length, + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(pdev, mapping))) { + dev_kfree_skb(skb); + buffer_info->skb = NULL; + buffer_info->length = 0; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); + netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed"); + break; + } + buffer_info->dma = mapping; + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, + ATL1C_PCIMAP_FROMDEVICE); + rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + rfd_next_to_use = next_next; + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + num_alloc++; + } + + if (num_alloc) { + /* TODO: update mailbox here */ + wmb(); + rfd_ring->next_to_use = rfd_next_to_use; + AT_WRITE_REG(&adapter->hw, REG_MB_RFD0_PROD_IDX, + rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK); + } + + return num_alloc; +} + +static void atl1c_clean_rrd(struct atl1c_rrd_ring *rrd_ring, + struct atl1c_recv_ret_status *rrs, u16 num) +{ + u16 i; + /* the relationship between rrd and rfd is one map one */ + for (i = 0; i < num; i++, rrs = ATL1C_RRD_DESC(rrd_ring, + rrd_ring->next_to_clean)) { + rrs->word3 &= ~RRS_RXD_UPDATED; + if (++rrd_ring->next_to_clean == rrd_ring->count) + rrd_ring->next_to_clean = 0; + } +} + +static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring, + struct atl1c_recv_ret_status *rrs, u16 num) +{ + u16 i; + u16 rfd_index; + struct atl1c_buffer *buffer_info = rfd_ring->buffer_info; + + rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) & + RRS_RX_RFD_INDEX_MASK; + for (i = 0; i < num; i++) { + buffer_info[rfd_index].skb = NULL; + ATL1C_SET_BUFFER_STATE(&buffer_info[rfd_index], + ATL1C_BUFFER_FREE); + if (++rfd_index == rfd_ring->count) + rfd_index = 0; + } + rfd_ring->next_to_clean = rfd_index; +} + +static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, + int *work_done, int work_to_do) +{ + u16 rfd_num, rfd_index; + u16 count = 0; + u16 length; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct sk_buff *skb; + struct atl1c_recv_ret_status *rrs; + struct atl1c_buffer *buffer_info; + + while (1) { + if (*work_done >= work_to_do) + break; + rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean); + if (likely(RRS_RXD_IS_VALID(rrs->word3))) { + rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) & + RRS_RX_RFD_CNT_MASK; + if (unlikely(rfd_num != 1)) + /* TODO support mul rfd*/ + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, + "Multi rfd not support yet!\n"); + goto rrs_checked; + } else { + break; + } +rrs_checked: + atl1c_clean_rrd(rrd_ring, rrs, rfd_num); + if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) { + atl1c_clean_rfd(rfd_ring, rrs, rfd_num); + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, + "wrong packet! rrs word3 is %x\n", + rrs->word3); + continue; + } + + length = le16_to_cpu((rrs->word3 >> RRS_PKT_SIZE_SHIFT) & + RRS_PKT_SIZE_MASK); + /* Good Receive */ + if (likely(rfd_num == 1)) { + rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) & + RRS_RX_RFD_INDEX_MASK; + buffer_info = &rfd_ring->buffer_info[rfd_index]; + pci_unmap_single(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + skb = buffer_info->skb; + } else { + /* TODO */ + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, + "Multi rfd not support yet!\n"); + break; + } + atl1c_clean_rfd(rfd_ring, rrs, rfd_num); + skb_put(skb, length - ETH_FCS_LEN); + skb->protocol = eth_type_trans(skb, netdev); + atl1c_rx_checksum(adapter, skb, rrs); + if (rrs->word3 & RRS_VLAN_INS) { + u16 vlan; + + AT_TAG_TO_VLAN(rrs->vlan_tag, vlan); + vlan = le16_to_cpu(vlan); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); + } + netif_receive_skb(skb); + + (*work_done)++; + count++; + } + if (count) + atl1c_alloc_rx_buffer(adapter); +} + +/** + * atl1c_clean - NAPI Rx polling callback + */ +static int atl1c_clean(struct napi_struct *napi, int budget) +{ + struct atl1c_adapter *adapter = + container_of(napi, struct atl1c_adapter, napi); + int work_done = 0; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(adapter->netdev)) + goto quit_polling; + /* just enable one RXQ */ + atl1c_clean_rx_irq(adapter, &work_done, budget); + + if (work_done < budget) { +quit_polling: + napi_complete(napi); + adapter->hw.intr_mask |= ISR_RX_PKT; + AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); + } + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void atl1c_netpoll(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + atl1c_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + u16 next_to_use = 0; + u16 next_to_clean = 0; + + next_to_clean = atomic_read(&tpd_ring->next_to_clean); + next_to_use = tpd_ring->next_to_use; + + return (u16)(next_to_clean > next_to_use) ? + (next_to_clean - next_to_use - 1) : + (tpd_ring->count + next_to_clean - next_to_use - 1); +} + +/* + * get next usable tpd + * Note: should call atl1c_tdp_avail to make sure + * there is enough tpd to use + */ +static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + struct atl1c_tpd_desc *tpd_desc; + u16 next_to_use = 0; + + next_to_use = tpd_ring->next_to_use; + if (++tpd_ring->next_to_use == tpd_ring->count) + tpd_ring->next_to_use = 0; + tpd_desc = ATL1C_TPD_DESC(tpd_ring, next_to_use); + memset(tpd_desc, 0, sizeof(struct atl1c_tpd_desc)); + return tpd_desc; +} + +static struct atl1c_buffer * +atl1c_get_tx_buffer(struct atl1c_adapter *adapter, struct atl1c_tpd_desc *tpd) +{ + struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; + + return &tpd_ring->buffer_info[tpd - + (struct atl1c_tpd_desc *)tpd_ring->desc]; +} + +/* Calculate the transmit packet descript needed*/ +static u16 atl1c_cal_tpd_req(const struct sk_buff *skb) +{ + u16 tpd_req; + u16 proto_hdr_len = 0; + + tpd_req = skb_shinfo(skb)->nr_frags + 1; + + if (skb_is_gso(skb)) { + proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (proto_hdr_len < skb_headlen(skb)) + tpd_req++; + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + tpd_req++; + } + return tpd_req; +} + +static int atl1c_tso_csum(struct atl1c_adapter *adapter, + struct sk_buff *skb, + struct atl1c_tpd_desc **tpd, + enum atl1c_trans_queue type) +{ + struct pci_dev *pdev = adapter->pdev; + u8 hdr_len; + u32 real_len; + unsigned short offload_type; + int err; + + if (skb_is_gso(skb)) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (unlikely(err)) + return -1; + } + offload_type = skb_shinfo(skb)->gso_type; + + if (offload_type & SKB_GSO_TCPV4) { + real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + + ntohs(ip_hdr(skb)->tot_len)); + + if (real_len < skb->len) + pskb_trim(skb, real_len); + + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (unlikely(skb->len == hdr_len)) { + /* only xsum need */ + if (netif_msg_tx_queued(adapter)) + dev_warn(&pdev->dev, + "IPV4 tso with zero data??\n"); + goto check_sum; + } else { + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic( + ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + (*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT; + } + } + + if (offload_type & SKB_GSO_TCPV6) { + struct atl1c_tpd_ext_desc *etpd = + *(struct atl1c_tpd_ext_desc **)(tpd); + + memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc)); + *tpd = atl1c_get_tpd(adapter, type); + ipv6_hdr(skb)->payload_len = 0; + /* check payload == 0 byte ? */ + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (unlikely(skb->len == hdr_len)) { + /* only xsum need */ + if (netif_msg_tx_queued(adapter)) + dev_warn(&pdev->dev, + "IPV6 tso with zero data??\n"); + goto check_sum; + } else + tcp_hdr(skb)->check = ~csum_ipv6_magic( + &ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + etpd->word1 |= 1 << TPD_LSO_EN_SHIFT; + etpd->word1 |= 1 << TPD_LSO_VER_SHIFT; + etpd->pkt_len = cpu_to_le32(skb->len); + (*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT; + } + + (*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT; + (*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) << + TPD_TCPHDR_OFFSET_SHIFT; + (*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) << + TPD_MSS_SHIFT; + return 0; + } + +check_sum: + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + u8 css, cso; + cso = skb_checksum_start_offset(skb); + + if (unlikely(cso & 0x1)) { + if (netif_msg_tx_err(adapter)) + dev_err(&adapter->pdev->dev, + "payload offset should not an event number\n"); + return -1; + } else { + css = cso + skb->csum_offset; + + (*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) << + TPD_PLOADOFFSET_SHIFT; + (*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) << + TPD_CCSUM_OFFSET_SHIFT; + (*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT; + } + } + return 0; +} + +static void atl1c_tx_rollback(struct atl1c_adapter *adpt, + struct atl1c_tpd_desc *first_tpd, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type]; + struct atl1c_buffer *buffer_info; + struct atl1c_tpd_desc *tpd; + u16 first_index, index; + + first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc; + index = first_index; + while (index != tpd_ring->next_to_use) { + tpd = ATL1C_TPD_DESC(tpd_ring, index); + buffer_info = &tpd_ring->buffer_info[index]; + atl1c_clean_buffer(adpt->pdev, buffer_info, 0); + memset(tpd, 0, sizeof(struct atl1c_tpd_desc)); + if (++index == tpd_ring->count) + index = 0; + } + tpd_ring->next_to_use = first_index; +} + +static int atl1c_tx_map(struct atl1c_adapter *adapter, + struct sk_buff *skb, struct atl1c_tpd_desc *tpd, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_desc *use_tpd = NULL; + struct atl1c_buffer *buffer_info = NULL; + u16 buf_len = skb_headlen(skb); + u16 map_len = 0; + u16 mapped_len = 0; + u16 hdr_len = 0; + u16 nr_frags; + u16 f; + int tso; + + nr_frags = skb_shinfo(skb)->nr_frags; + tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK; + if (tso) { + /* TSO */ + map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + use_tpd = tpd; + + buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); + buffer_info->length = map_len; + buffer_info->dma = pci_map_single(adapter->pdev, + skb->data, hdr_len, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(adapter->pdev, + buffer_info->dma))) + goto err_dma; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, + ATL1C_PCIMAP_TODEVICE); + mapped_len += map_len; + use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + use_tpd->buffer_len = cpu_to_le16(buffer_info->length); + } + + if (mapped_len < buf_len) { + /* mapped_len == 0, means we should use the first tpd, + which is given by caller */ + if (mapped_len == 0) + use_tpd = tpd; + else { + use_tpd = atl1c_get_tpd(adapter, type); + memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); + } + buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); + buffer_info->length = buf_len - mapped_len; + buffer_info->dma = + pci_map_single(adapter->pdev, skb->data + mapped_len, + buffer_info->length, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(adapter->pdev, + buffer_info->dma))) + goto err_dma; + + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, + ATL1C_PCIMAP_TODEVICE); + use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + use_tpd->buffer_len = cpu_to_le16(buffer_info->length); + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + + use_tpd = atl1c_get_tpd(adapter, type); + memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); + + buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); + buffer_info->length = skb_frag_size(frag); + buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, + frag, 0, + buffer_info->length, + DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) + goto err_dma; + + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE, + ATL1C_PCIMAP_TODEVICE); + use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + use_tpd->buffer_len = cpu_to_le16(buffer_info->length); + } + + /* The last tpd */ + use_tpd->word1 |= 1 << TPD_EOP_SHIFT; + /* The last buffer info contain the skb address, + so it will be free after unmap */ + buffer_info->skb = skb; + + return 0; + +err_dma: + buffer_info->dma = 0; + buffer_info->length = 0; + return -1; +} + +static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb, + struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + u16 reg; + + reg = type == atl1c_trans_high ? REG_TPD_PRI1_PIDX : REG_TPD_PRI0_PIDX; + AT_WRITE_REGW(&adapter->hw, reg, tpd_ring->next_to_use); +} + +static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + u16 tpd_req = 1; + struct atl1c_tpd_desc *tpd; + enum atl1c_trans_queue type = atl1c_trans_normal; + + if (test_bit(__AT_DOWN, &adapter->flags)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + tpd_req = atl1c_cal_tpd_req(skb); + if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { + if (netif_msg_pktdata(adapter)) + dev_info(&adapter->pdev->dev, "tx locked\n"); + return NETDEV_TX_LOCKED; + } + + if (atl1c_tpd_avail(adapter, type) < tpd_req) { + /* no enough descriptor, just stop queue */ + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + tpd = atl1c_get_tpd(adapter, type); + + /* do TSO and check sum */ + if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) { + spin_unlock_irqrestore(&adapter->tx_lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(vlan_tx_tag_present(skb))) { + u16 vlan = vlan_tx_tag_get(skb); + __le16 tag; + + vlan = cpu_to_le16(vlan); + AT_VLAN_TO_TAG(vlan, tag); + tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT; + tpd->vlan_tag = tag; + } + + if (skb_network_offset(skb) != ETH_HLEN) + tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */ + + if (atl1c_tx_map(adapter, skb, tpd, type) < 0) { + netif_info(adapter, tx_done, adapter->netdev, + "tx-skb droppted due to dma error\n"); + /* roll back tpd/buffer */ + atl1c_tx_rollback(adapter, tpd, type); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + dev_kfree_skb(skb); + } else { + atl1c_tx_queue(adapter, skb, tpd, type); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + } + + return NETDEV_TX_OK; +} + +static void atl1c_free_irq(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); + + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); +} + +static int atl1c_request_irq(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int flags = 0; + int err = 0; + + adapter->have_msi = true; + err = pci_enable_msi(adapter->pdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_err(&pdev->dev, + "Unable to allocate MSI interrupt Error: %d\n", + err); + adapter->have_msi = false; + } + + if (!adapter->have_msi) + flags |= IRQF_SHARED; + err = request_irq(adapter->pdev->irq, atl1c_intr, flags, + netdev->name, netdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_err(&pdev->dev, + "Unable to allocate interrupt Error: %d\n", + err); + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); + return err; + } + if (netif_msg_ifup(adapter)) + dev_dbg(&pdev->dev, "atl1c_request_irq OK\n"); + return err; +} + + +static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter) +{ + /* release tx-pending skbs and reset tx/rx ring index */ + atl1c_clean_tx_ring(adapter, atl1c_trans_normal); + atl1c_clean_tx_ring(adapter, atl1c_trans_high); + atl1c_clean_rx_ring(adapter); +} + +static int atl1c_up(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + netif_carrier_off(netdev); + + err = atl1c_configure(adapter); + if (unlikely(err)) + goto err_up; + + err = atl1c_request_irq(adapter); + if (unlikely(err)) + goto err_up; + + atl1c_check_link_status(adapter); + clear_bit(__AT_DOWN, &adapter->flags); + napi_enable(&adapter->napi); + atl1c_irq_enable(adapter); + netif_start_queue(netdev); + return err; + +err_up: + atl1c_clean_rx_ring(adapter); + return err; +} + +static void atl1c_down(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + atl1c_del_timer(adapter); + adapter->work_event = 0; /* clear all event */ + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__AT_DOWN, &adapter->flags); + netif_carrier_off(netdev); + napi_disable(&adapter->napi); + atl1c_irq_disable(adapter); + atl1c_free_irq(adapter); + /* disable ASPM if device inactive */ + atl1c_disable_l0s_l1(&adapter->hw); + /* reset MAC to disable all RX/TX */ + atl1c_reset_mac(&adapter->hw); + msleep(1); + + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; + atl1c_reset_dma_ring(adapter); +} + +/** + * atl1c_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl1c_open(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__AT_TESTING, &adapter->flags)) + return -EBUSY; + + /* allocate rx/tx dma buffer & descriptors */ + err = atl1c_setup_ring_resources(adapter); + if (unlikely(err)) + return err; + + err = atl1c_up(adapter); + if (unlikely(err)) + goto err_up; + + return 0; + +err_up: + atl1c_free_irq(adapter); + atl1c_free_ring_resources(adapter); + atl1c_reset_mac(&adapter->hw); + return err; +} + +/** + * atl1c_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl1c_close(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + set_bit(__AT_DOWN, &adapter->flags); + cancel_work_sync(&adapter->common_task); + atl1c_down(adapter); + atl1c_free_ring_resources(adapter); + return 0; +} + +static int atl1c_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u32 wufc = adapter->wol; + + atl1c_disable_l0s_l1(hw); + if (netif_running(netdev)) { + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + atl1c_down(adapter); + } + netif_device_detach(netdev); + + if (wufc) + if (atl1c_phy_to_ps_link(hw) != 0) + dev_dbg(&pdev->dev, "phy power saving failed"); + + atl1c_power_saving(hw, wufc); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int atl1c_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); + atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE); + + atl1c_phy_reset(&adapter->hw); + atl1c_reset_mac(&adapter->hw); + atl1c_phy_init(&adapter->hw); + +#if 0 + AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data); + pm_data &= ~PM_CTRLSTAT_PME_EN; + AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data); +#endif + + netif_device_attach(netdev); + if (netif_running(netdev)) + atl1c_up(adapter); + + return 0; +} +#endif + +static void atl1c_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + atl1c_suspend(&pdev->dev); + pci_wake_from_d3(pdev, adapter->wol); + pci_set_power_state(pdev, PCI_D3hot); +} + +static const struct net_device_ops atl1c_netdev_ops = { + .ndo_open = atl1c_open, + .ndo_stop = atl1c_close, + .ndo_validate_addr = eth_validate_addr, + .ndo_start_xmit = atl1c_xmit_frame, + .ndo_set_mac_address = atl1c_set_mac_addr, + .ndo_set_rx_mode = atl1c_set_multi, + .ndo_change_mtu = atl1c_change_mtu, + .ndo_fix_features = atl1c_fix_features, + .ndo_set_features = atl1c_set_features, + .ndo_do_ioctl = atl1c_ioctl, + .ndo_tx_timeout = atl1c_tx_timeout, + .ndo_get_stats = atl1c_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = atl1c_netpoll, +#endif +}; + +static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev) +{ + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + netdev->netdev_ops = &atl1c_netdev_ops; + netdev->watchdog_timeo = AT_TX_WATCHDOG; + atl1c_set_ethtool_ops(netdev); + + /* TODO: add when ready */ + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_TSO | + NETIF_F_TSO6; + netdev->features = netdev->hw_features | + NETIF_F_HW_VLAN_CTAG_TX; + return 0; +} + +/** + * atl1c_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl1c_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl1c_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl1c_adapter *adapter; + static int cards_found; + + int err = 0; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + return err; + } + + /* + * The atl1c chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { + dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); + goto err_dma; + } + + err = pci_request_regions(pdev, atl1c_driver_name); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_pci_reg; + } + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct atl1c_adapter)); + if (netdev == NULL) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + err = atl1c_init_netdev(netdev, pdev); + if (err) { + dev_err(&pdev->dev, "init netdevice failed\n"); + goto err_init_netdev; + } + adapter = netdev_priv(netdev); + adapter->bd_number = cards_found; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.adapter = adapter; + adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg); + adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + if (!adapter->hw.hw_addr) { + err = -EIO; + dev_err(&pdev->dev, "cannot map device registers\n"); + goto err_ioremap; + } + + /* init mii data */ + adapter->mii.dev = netdev; + adapter->mii.mdio_read = atl1c_mdio_read; + adapter->mii.mdio_write = atl1c_mdio_write; + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK; + netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64); + setup_timer(&adapter->phy_config_timer, atl1c_phy_config, + (unsigned long)adapter); + /* setup the private structure */ + err = atl1c_sw_init(adapter); + if (err) { + dev_err(&pdev->dev, "net device private data init failed\n"); + goto err_sw_init; + } + atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE); + + /* Init GPHY as early as possible due to power saving issue */ + atl1c_phy_reset(&adapter->hw); + + err = atl1c_reset_mac(&adapter->hw); + if (err) { + err = -EIO; + goto err_reset; + } + + /* reset the controller to + * put the device in a known good starting state */ + err = atl1c_phy_init(&adapter->hw); + if (err) { + err = -EIO; + goto err_reset; + } + if (atl1c_read_mac_addr(&adapter->hw)) { + /* got a random MAC address, set NET_ADDR_RANDOM to netdev */ + netdev->addr_assign_type = NET_ADDR_RANDOM; + } + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + if (netif_msg_probe(adapter)) + dev_dbg(&pdev->dev, "mac address : %pM\n", + adapter->hw.mac_addr); + + atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); + INIT_WORK(&adapter->common_task, atl1c_common_task); + adapter->work_event = 0; + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "register netdevice failed\n"); + goto err_register; + } + + if (netif_msg_probe(adapter)) + dev_info(&pdev->dev, "version %s\n", ATL1C_DRV_VERSION); + cards_found++; + return 0; + +err_reset: +err_register: +err_sw_init: + iounmap(adapter->hw.hw_addr); +err_init_netdev: +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * atl1c_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl1c_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void atl1c_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + unregister_netdev(netdev); + /* restore permanent address */ + atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.perm_mac_addr); + atl1c_phy_disable(&adapter->hw); + + iounmap(adapter->hw.hw_addr); + + pci_release_regions(pdev); + pci_disable_device(pdev); + free_netdev(netdev); +} + +/** + * atl1c_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + atl1c_down(adapter); + + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * atl1c_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + atl1c_reset_mac(&adapter->hw); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * atl1c_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the atl1c_resume routine. + */ +static void atl1c_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (atl1c_up(adapter)) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "Cannot bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static const struct pci_error_handlers atl1c_err_handler = { + .error_detected = atl1c_io_error_detected, + .slot_reset = atl1c_io_slot_reset, + .resume = atl1c_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume); + +static struct pci_driver atl1c_driver = { + .name = atl1c_driver_name, + .id_table = atl1c_pci_tbl, + .probe = atl1c_probe, + .remove = atl1c_remove, + .shutdown = atl1c_shutdown, + .err_handler = &atl1c_err_handler, + .driver.pm = &atl1c_pm_ops, +}; + +/** + * atl1c_init_module - Driver Registration Routine + * + * atl1c_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init atl1c_init_module(void) +{ + return pci_register_driver(&atl1c_driver); +} + +/** + * atl1c_exit_module - Driver Exit Cleanup Routine + * + * atl1c_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit atl1c_exit_module(void) +{ + pci_unregister_driver(&atl1c_driver); +} + +module_init(atl1c_init_module); +module_exit(atl1c_exit_module); diff --git a/addons/atl1c/src/4.4.180/Makefile b/addons/atl1c/src/4.4.180/Makefile new file mode 100644 index 00000000..d6fff141 --- /dev/null +++ b/addons/atl1c/src/4.4.180/Makefile @@ -0,0 +1,2 @@ +obj-m += atl1c.o +atl1c-objs := atl1c_main.o atl1c_hw.o atl1c_ethtool.o diff --git a/addons/atl1c/src/4.4.180/atl1c.h b/addons/atl1c/src/4.4.180/atl1c.h new file mode 100644 index 00000000..b9203d92 --- /dev/null +++ b/addons/atl1c/src/4.4.180/atl1c.h @@ -0,0 +1,605 @@ +/* + * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL1C_H_ +#define _ATL1C_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atl1c_hw.h" + +/* Wake Up Filter Control */ +#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ +#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +#define AT_VLAN_TO_TAG(_vlan, _tag) \ + _tag = ((((_vlan) >> 8) & 0xFF) |\ + (((_vlan) & 0xFF) << 8)) + +#define AT_TAG_TO_VLAN(_tag, _vlan) \ + _vlan = ((((_tag) >> 8) & 0xFF) |\ + (((_tag) & 0xFF) << 8)) + +#define SPEED_0 0xffff +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define AT_RX_BUF_SIZE (ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN) +#define MAX_JUMBO_FRAME_SIZE (6*1024) + +#define AT_MAX_RECEIVE_QUEUE 4 +#define AT_DEF_RECEIVE_QUEUE 1 +#define AT_MAX_TRANSMIT_QUEUE 2 + +#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL +#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL + +#define AT_TX_WATCHDOG (5 * HZ) +#define AT_MAX_INT_WORK 5 +#define AT_TWSI_EEPROM_TIMEOUT 100 +#define AT_HW_MAX_IDLE_DELAY 10 +#define AT_SUSPEND_LINK_TIMEOUT 100 + +#define AT_ASPM_L0S_TIMER 6 +#define AT_ASPM_L1_TIMER 12 +#define AT_LCKDET_TIMER 12 + +#define ATL1C_PCIE_L0S_L1_DISABLE 0x01 +#define ATL1C_PCIE_PHY_RESET 0x02 + +#define ATL1C_ASPM_L0s_ENABLE 0x0001 +#define ATL1C_ASPM_L1_ENABLE 0x0002 + +#define AT_REGS_LEN (74 * sizeof(u32)) +#define AT_EEPROM_LEN 512 + +#define ATL1C_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i])) +#define ATL1C_RFD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_rx_free_desc) +#define ATL1C_TPD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_tpd_desc) +#define ATL1C_RRD_DESC(R, i) ATL1C_GET_DESC(R, i, struct atl1c_recv_ret_status) + +/* tpd word 1 bit 0:7 General Checksum task offload */ +#define TPD_L4HDR_OFFSET_MASK 0x00FF +#define TPD_L4HDR_OFFSET_SHIFT 0 + +/* tpd word 1 bit 0:7 Large Send task offload (IPv4/IPV6) */ +#define TPD_TCPHDR_OFFSET_MASK 0x00FF +#define TPD_TCPHDR_OFFSET_SHIFT 0 + +/* tpd word 1 bit 0:7 Custom Checksum task offload */ +#define TPD_PLOADOFFSET_MASK 0x00FF +#define TPD_PLOADOFFSET_SHIFT 0 + +/* tpd word 1 bit 8:17 */ +#define TPD_CCSUM_EN_MASK 0x0001 +#define TPD_CCSUM_EN_SHIFT 8 +#define TPD_IP_CSUM_MASK 0x0001 +#define TPD_IP_CSUM_SHIFT 9 +#define TPD_TCP_CSUM_MASK 0x0001 +#define TPD_TCP_CSUM_SHIFT 10 +#define TPD_UDP_CSUM_MASK 0x0001 +#define TPD_UDP_CSUM_SHIFT 11 +#define TPD_LSO_EN_MASK 0x0001 /* TCP Large Send Offload */ +#define TPD_LSO_EN_SHIFT 12 +#define TPD_LSO_VER_MASK 0x0001 +#define TPD_LSO_VER_SHIFT 13 /* 0 : ipv4; 1 : ipv4/ipv6 */ +#define TPD_CON_VTAG_MASK 0x0001 +#define TPD_CON_VTAG_SHIFT 14 +#define TPD_INS_VTAG_MASK 0x0001 +#define TPD_INS_VTAG_SHIFT 15 +#define TPD_IPV4_PACKET_MASK 0x0001 /* valid when LSO VER is 1 */ +#define TPD_IPV4_PACKET_SHIFT 16 +#define TPD_ETH_TYPE_MASK 0x0001 +#define TPD_ETH_TYPE_SHIFT 17 /* 0 : 802.3 frame; 1 : Ethernet */ + +/* tpd word 18:25 Custom Checksum task offload */ +#define TPD_CCSUM_OFFSET_MASK 0x00FF +#define TPD_CCSUM_OFFSET_SHIFT 18 +#define TPD_CCSUM_EPAD_MASK 0x0001 +#define TPD_CCSUM_EPAD_SHIFT 30 + +/* tpd word 18:30 Large Send task offload (IPv4/IPV6) */ +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 18 + +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 31 + +struct atl1c_tpd_desc { + __le16 buffer_len; /* include 4-byte CRC */ + __le16 vlan_tag; + __le32 word1; + __le64 buffer_addr; +}; + +struct atl1c_tpd_ext_desc { + u32 reservd_0; + __le32 word1; + __le32 pkt_len; + u32 reservd_1; +}; +/* rrs word 0 bit 0:31 */ +#define RRS_RX_CSUM_MASK 0xFFFF +#define RRS_RX_CSUM_SHIFT 0 +#define RRS_RX_RFD_CNT_MASK 0x000F +#define RRS_RX_RFD_CNT_SHIFT 16 +#define RRS_RX_RFD_INDEX_MASK 0x0FFF +#define RRS_RX_RFD_INDEX_SHIFT 20 + +/* rrs flag bit 0:16 */ +#define RRS_HEAD_LEN_MASK 0x00FF +#define RRS_HEAD_LEN_SHIFT 0 +#define RRS_HDS_TYPE_MASK 0x0003 +#define RRS_HDS_TYPE_SHIFT 8 +#define RRS_CPU_NUM_MASK 0x0003 +#define RRS_CPU_NUM_SHIFT 10 +#define RRS_HASH_FLG_MASK 0x000F +#define RRS_HASH_FLG_SHIFT 12 + +#define RRS_HDS_TYPE_HEAD 1 +#define RRS_HDS_TYPE_DATA 2 + +#define RRS_IS_NO_HDS_TYPE(flag) \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == 0) + +#define RRS_IS_HDS_HEAD(flag) \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \ + RRS_HDS_TYPE_HEAD) + +#define RRS_IS_HDS_DATA(flag) \ + ((((flag) >> (RRS_HDS_TYPE_SHIFT)) & RRS_HDS_TYPE_MASK) == \ + RRS_HDS_TYPE_DATA) + +/* rrs word 3 bit 0:31 */ +#define RRS_PKT_SIZE_MASK 0x3FFF +#define RRS_PKT_SIZE_SHIFT 0 +#define RRS_ERR_L4_CSUM_MASK 0x0001 +#define RRS_ERR_L4_CSUM_SHIFT 14 +#define RRS_ERR_IP_CSUM_MASK 0x0001 +#define RRS_ERR_IP_CSUM_SHIFT 15 +#define RRS_VLAN_INS_MASK 0x0001 +#define RRS_VLAN_INS_SHIFT 16 +#define RRS_PROT_ID_MASK 0x0007 +#define RRS_PROT_ID_SHIFT 17 +#define RRS_RX_ERR_SUM_MASK 0x0001 +#define RRS_RX_ERR_SUM_SHIFT 20 +#define RRS_RX_ERR_CRC_MASK 0x0001 +#define RRS_RX_ERR_CRC_SHIFT 21 +#define RRS_RX_ERR_FAE_MASK 0x0001 +#define RRS_RX_ERR_FAE_SHIFT 22 +#define RRS_RX_ERR_TRUNC_MASK 0x0001 +#define RRS_RX_ERR_TRUNC_SHIFT 23 +#define RRS_RX_ERR_RUNC_MASK 0x0001 +#define RRS_RX_ERR_RUNC_SHIFT 24 +#define RRS_RX_ERR_ICMP_MASK 0x0001 +#define RRS_RX_ERR_ICMP_SHIFT 25 +#define RRS_PACKET_BCAST_MASK 0x0001 +#define RRS_PACKET_BCAST_SHIFT 26 +#define RRS_PACKET_MCAST_MASK 0x0001 +#define RRS_PACKET_MCAST_SHIFT 27 +#define RRS_PACKET_TYPE_MASK 0x0001 +#define RRS_PACKET_TYPE_SHIFT 28 +#define RRS_FIFO_FULL_MASK 0x0001 +#define RRS_FIFO_FULL_SHIFT 29 +#define RRS_802_3_LEN_ERR_MASK 0x0001 +#define RRS_802_3_LEN_ERR_SHIFT 30 +#define RRS_RXD_UPDATED_MASK 0x0001 +#define RRS_RXD_UPDATED_SHIFT 31 + +#define RRS_ERR_L4_CSUM 0x00004000 +#define RRS_ERR_IP_CSUM 0x00008000 +#define RRS_VLAN_INS 0x00010000 +#define RRS_RX_ERR_SUM 0x00100000 +#define RRS_RX_ERR_CRC 0x00200000 +#define RRS_802_3_LEN_ERR 0x40000000 +#define RRS_RXD_UPDATED 0x80000000 + +#define RRS_PACKET_TYPE_802_3 1 +#define RRS_PACKET_TYPE_ETH 0 +#define RRS_PACKET_IS_ETH(word) \ + ((((word) >> RRS_PACKET_TYPE_SHIFT) & RRS_PACKET_TYPE_MASK) == \ + RRS_PACKET_TYPE_ETH) +#define RRS_RXD_IS_VALID(word) \ + ((((word) >> RRS_RXD_UPDATED_SHIFT) & RRS_RXD_UPDATED_MASK) == 1) + +#define RRS_PACKET_PROT_IS_IPV4_ONLY(word) \ + ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 1) +#define RRS_PACKET_PROT_IS_IPV6_ONLY(word) \ + ((((word) >> RRS_PROT_ID_SHIFT) & RRS_PROT_ID_MASK) == 6) + +struct atl1c_recv_ret_status { + __le32 word0; + __le32 rss_hash; + __le16 vlan_tag; + __le16 flag; + __le32 word3; +}; + +/* RFD descriptor */ +struct atl1c_rx_free_desc { + __le64 buffer_addr; +}; + +/* DMA Order Settings */ +enum atl1c_dma_order { + atl1c_dma_ord_in = 1, + atl1c_dma_ord_enh = 2, + atl1c_dma_ord_out = 4 +}; + +enum atl1c_dma_rcb { + atl1c_rcb_64 = 0, + atl1c_rcb_128 = 1 +}; + +enum atl1c_mac_speed { + atl1c_mac_speed_0 = 0, + atl1c_mac_speed_10_100 = 1, + atl1c_mac_speed_1000 = 2 +}; + +enum atl1c_dma_req_block { + atl1c_dma_req_128 = 0, + atl1c_dma_req_256 = 1, + atl1c_dma_req_512 = 2, + atl1c_dma_req_1024 = 3, + atl1c_dma_req_2048 = 4, + atl1c_dma_req_4096 = 5 +}; + + +enum atl1c_nic_type { + athr_l1c = 0, + athr_l2c = 1, + athr_l2c_b, + athr_l2c_b2, + athr_l1d, + athr_l1d_2, +}; + +enum atl1c_trans_queue { + atl1c_trans_normal = 0, + atl1c_trans_high = 1 +}; + +struct atl1c_hw_stats { + /* rx */ + unsigned long rx_ok; /* The number of good packet received. */ + unsigned long rx_bcast; /* The number of good broadcast packet received. */ + unsigned long rx_mcast; /* The number of good multicast packet received. */ + unsigned long rx_pause; /* The number of Pause packet received. */ + unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */ + unsigned long rx_fcs_err; /* The number of packets with bad FCS. */ + unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */ + unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */ + unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */ + unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */ + unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */ + unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */ + unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */ + unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */ + unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */ + unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */ + unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */ + unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */ + unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */ + unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */ + unsigned long rx_align_err; /* Alignment Error */ + unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */ + unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */ + unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */ + + /* tx */ + unsigned long tx_ok; /* The number of good packet transmitted. */ + unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */ + unsigned long tx_mcast; /* The number of good multicast packet transmitted. */ + unsigned long tx_pause; /* The number of Pause packet transmitted. */ + unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */ + unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */ + unsigned long tx_defer; /* The number of packets transmitted that is deferred. */ + unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */ + unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */ + unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */ + unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */ + unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */ + unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */ + unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */ + unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */ + unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */ + unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */ + unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */ + unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */ + unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ + unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */ + unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */ + unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */ + unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */ + unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */ +}; + +struct atl1c_hw { + u8 __iomem *hw_addr; /* inner register address */ + struct atl1c_adapter *adapter; + enum atl1c_nic_type nic_type; + enum atl1c_dma_order dma_order; + enum atl1c_dma_rcb rcb_value; + enum atl1c_dma_req_block dmar_block; + + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u16 phy_id1; + u16 phy_id2; + + u32 intr_mask; + + u8 preamble_len; + u16 max_frame_size; + u16 min_frame_size; + + enum atl1c_mac_speed mac_speed; + bool mac_duplex; + bool hibernate; + u16 media_type; +#define MEDIA_TYPE_AUTO_SENSOR 0 +#define MEDIA_TYPE_100M_FULL 1 +#define MEDIA_TYPE_100M_HALF 2 +#define MEDIA_TYPE_10M_FULL 3 +#define MEDIA_TYPE_10M_HALF 4 + + u16 autoneg_advertised; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + u16 tx_imt; /* TX Interrupt Moderator timer ( 2us resolution) */ + u16 rx_imt; /* RX Interrupt Moderator timer ( 2us resolution) */ + u16 ict; /* Interrupt Clear timer (2us resolution) */ + u16 ctrl_flags; +#define ATL1C_INTR_CLEAR_ON_READ 0x0001 +#define ATL1C_INTR_MODRT_ENABLE 0x0002 +#define ATL1C_CMB_ENABLE 0x0004 +#define ATL1C_SMB_ENABLE 0x0010 +#define ATL1C_TXQ_MODE_ENHANCE 0x0020 +#define ATL1C_RX_IPV6_CHKSUM 0x0040 +#define ATL1C_ASPM_L0S_SUPPORT 0x0080 +#define ATL1C_ASPM_L1_SUPPORT 0x0100 +#define ATL1C_ASPM_CTRL_MON 0x0200 +#define ATL1C_HIB_DISABLE 0x0400 +#define ATL1C_APS_MODE_ENABLE 0x0800 +#define ATL1C_LINK_EXT_SYNC 0x1000 +#define ATL1C_CLK_GATING_EN 0x2000 +#define ATL1C_FPGA_VERSION 0x8000 + u16 link_cap_flags; +#define ATL1C_LINK_CAP_1000M 0x0001 + u32 smb_timer; + + u16 rrd_thresh; /* Threshold of number of RRD produced to trigger + interrupt request */ + u16 tpd_thresh; + u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */ + u8 rfd_burst; + u32 base_cpu; + u32 indirect_tab; + u8 mac_addr[ETH_ALEN]; + u8 perm_mac_addr[ETH_ALEN]; + + bool phy_configured; + bool re_autoneg; + bool emi_ca; + bool msi_lnkpatch; /* link patch for specific platforms */ +}; + +/* + * atl1c_ring_header represents a single, contiguous block of DMA space + * mapped for the three descriptor rings (tpd, rfd, rrd) described below + */ +struct atl1c_ring_header { + void *desc; /* virtual address */ + dma_addr_t dma; /* physical address*/ + unsigned int size; /* length in bytes */ +}; + +/* + * atl1c_buffer is wrapper around a pointer to a socket buffer + * so a DMA handle can be stored along with the skb + */ +struct atl1c_buffer { + struct sk_buff *skb; /* socket buffer */ + u16 length; /* rx buffer length */ + u16 flags; /* information of buffer */ +#define ATL1C_BUFFER_FREE 0x0001 +#define ATL1C_BUFFER_BUSY 0x0002 +#define ATL1C_BUFFER_STATE_MASK 0x0003 + +#define ATL1C_PCIMAP_SINGLE 0x0004 +#define ATL1C_PCIMAP_PAGE 0x0008 +#define ATL1C_PCIMAP_TYPE_MASK 0x000C + +#define ATL1C_PCIMAP_TODEVICE 0x0010 +#define ATL1C_PCIMAP_FROMDEVICE 0x0020 +#define ATL1C_PCIMAP_DIRECTION_MASK 0x0030 + dma_addr_t dma; +}; + +#define ATL1C_SET_BUFFER_STATE(buff, state) do { \ + ((buff)->flags) &= ~ATL1C_BUFFER_STATE_MASK; \ + ((buff)->flags) |= (state); \ + } while (0) + +#define ATL1C_SET_PCIMAP_TYPE(buff, type, direction) do { \ + ((buff)->flags) &= ~ATL1C_PCIMAP_TYPE_MASK; \ + ((buff)->flags) |= (type); \ + ((buff)->flags) &= ~ATL1C_PCIMAP_DIRECTION_MASK; \ + ((buff)->flags) |= (direction); \ + } while (0) + +/* transimit packet descriptor (tpd) ring */ +struct atl1c_tpd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; /* this is protectd by adapter->tx_lock */ + atomic_t next_to_clean; + struct atl1c_buffer *buffer_info; +}; + +/* receive free descriptor (rfd) ring */ +struct atl1c_rfd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; + u16 next_to_clean; + struct atl1c_buffer *buffer_info; +}; + +/* receive return descriptor (rrd) ring */ +struct atl1c_rrd_ring { + void *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + u16 next_to_use; + u16 next_to_clean; +}; + +/* board specific private data structure */ +struct atl1c_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct napi_struct napi; + struct page *rx_page; + unsigned int rx_page_offset; + unsigned int rx_frag_size; + struct atl1c_hw hw; + struct atl1c_hw_stats hw_stats; + struct mii_if_info mii; /* MII interface info */ + u16 rx_buffer_len; + + unsigned long flags; +#define __AT_TESTING 0x0001 +#define __AT_RESETTING 0x0002 +#define __AT_DOWN 0x0003 + unsigned long work_event; +#define ATL1C_WORK_EVENT_RESET 0 +#define ATL1C_WORK_EVENT_LINK_CHANGE 1 + u32 msg_enable; + + bool have_msi; + u32 wol; + u16 link_speed; + u16 link_duplex; + + spinlock_t mdio_lock; + spinlock_t tx_lock; + atomic_t irq_sem; + + struct work_struct common_task; + struct timer_list watchdog_timer; + struct timer_list phy_config_timer; + + /* All Descriptor memory */ + struct atl1c_ring_header ring_header; + struct atl1c_tpd_ring tpd_ring[AT_MAX_TRANSMIT_QUEUE]; + struct atl1c_rfd_ring rfd_ring; + struct atl1c_rrd_ring rrd_ring; + u32 bd_number; /* board number;*/ +}; + +#define AT_WRITE_REG(a, reg, value) ( \ + writel((value), ((a)->hw_addr + reg))) + +#define AT_WRITE_FLUSH(a) (\ + readl((a)->hw_addr)) + +#define AT_READ_REG(a, reg, pdata) do { \ + if (unlikely((a)->hibernate)) { \ + readl((a)->hw_addr + reg); \ + *(u32 *)pdata = readl((a)->hw_addr + reg); \ + } else { \ + *(u32 *)pdata = readl((a)->hw_addr + reg); \ + } \ + } while (0) + +#define AT_WRITE_REGB(a, reg, value) (\ + writeb((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGB(a, reg) (\ + readb((a)->hw_addr + reg)) + +#define AT_WRITE_REGW(a, reg, value) (\ + writew((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGW(a, reg, pdata) do { \ + if (unlikely((a)->hibernate)) { \ + readw((a)->hw_addr + reg); \ + *(u16 *)pdata = readw((a)->hw_addr + reg); \ + } else { \ + *(u16 *)pdata = readw((a)->hw_addr + reg); \ + } \ + } while (0) + +#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), (((a)->hw_addr + reg) + ((offset) << 2)))) + +#define AT_READ_REG_ARRAY(a, reg, offset) ( \ + readl(((a)->hw_addr + reg) + ((offset) << 2))) + +extern char atl1c_driver_name[]; +extern char atl1c_driver_version[]; + +void atl1c_reinit_locked(struct atl1c_adapter *adapter); +s32 atl1c_reset_hw(struct atl1c_hw *hw); +void atl1c_set_ethtool_ops(struct net_device *netdev); +#endif /* _ATL1C_H_ */ diff --git a/addons/atl1c/src/4.4.180/atl1c_ethtool.c b/addons/atl1c/src/4.4.180/atl1c_ethtool.c new file mode 100644 index 00000000..872b7abb --- /dev/null +++ b/addons/atl1c/src/4.4.180/atl1c_ethtool.c @@ -0,0 +1,305 @@ +/* + * Copyright(c) 2009 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include +#include + +#include "atl1c.h" + +static int atl1c_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) + ecmd->supported |= SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_TP; + + ecmd->advertising |= hw->autoneg_advertised; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (adapter->link_speed != SPEED_0) { + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + ecmd->autoneg = AUTONEG_ENABLE; + return 0; +} + +static int atl1c_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u16 autoneg_advertised; + + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + autoneg_advertised = ADVERTISED_Autoneg; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + if (speed == SPEED_1000) { + if (ecmd->duplex != DUPLEX_FULL) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "1000M half is invalid\n"); + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + autoneg_advertised = ADVERTISED_1000baseT_Full; + } else if (speed == SPEED_100) { + if (ecmd->duplex == DUPLEX_FULL) + autoneg_advertised = ADVERTISED_100baseT_Full; + else + autoneg_advertised = ADVERTISED_100baseT_Half; + } else { + if (ecmd->duplex == DUPLEX_FULL) + autoneg_advertised = ADVERTISED_10baseT_Full; + else + autoneg_advertised = ADVERTISED_10baseT_Half; + } + } + + if (hw->autoneg_advertised != autoneg_advertised) { + hw->autoneg_advertised = autoneg_advertised; + if (atl1c_restart_autoneg(hw) != 0) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, + "ethtool speed/duplex setting failed\n"); + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + } + clear_bit(__AT_RESETTING, &adapter->flags); + return 0; +} + +static u32 atl1c_get_msglevel(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void atl1c_set_msglevel(struct net_device *netdev, u32 data) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int atl1c_get_regs_len(struct net_device *netdev) +{ + return AT_REGS_LEN; +} + +static void atl1c_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, AT_REGS_LEN); + + regs->version = 1; + AT_READ_REG(hw, REG_PM_CTRL, p++); + AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL, p++); + AT_READ_REG(hw, REG_TWSI_CTRL, p++); + AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL, p++); + AT_READ_REG(hw, REG_MASTER_CTRL, p++); + AT_READ_REG(hw, REG_MANUAL_TIMER_INIT, p++); + AT_READ_REG(hw, REG_IRQ_MODRT_TIMER_INIT, p++); + AT_READ_REG(hw, REG_GPHY_CTRL, p++); + AT_READ_REG(hw, REG_LINK_CTRL, p++); + AT_READ_REG(hw, REG_IDLE_STATUS, p++); + AT_READ_REG(hw, REG_MDIO_CTRL, p++); + AT_READ_REG(hw, REG_SERDES, p++); + AT_READ_REG(hw, REG_MAC_CTRL, p++); + AT_READ_REG(hw, REG_MAC_IPG_IFG, p++); + AT_READ_REG(hw, REG_MAC_STA_ADDR, p++); + AT_READ_REG(hw, REG_MAC_STA_ADDR+4, p++); + AT_READ_REG(hw, REG_RX_HASH_TABLE, p++); + AT_READ_REG(hw, REG_RX_HASH_TABLE+4, p++); + AT_READ_REG(hw, REG_RXQ_CTRL, p++); + AT_READ_REG(hw, REG_TXQ_CTRL, p++); + AT_READ_REG(hw, REG_MTU, p++); + AT_READ_REG(hw, REG_WOL_CTRL, p++); + + atl1c_read_phy_reg(hw, MII_BMCR, &phy_data); + regs_buff[AT_REGS_LEN/sizeof(u32) - 2] = (u32) phy_data; + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + regs_buff[AT_REGS_LEN/sizeof(u32) - 1] = (u32) phy_data; +} + +static int atl1c_get_eeprom_len(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (atl1c_check_eeprom_exist(&adapter->hw)) + return AT_EEPROM_LEN; + else + return 0; +} + +static int atl1c_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u32 *eeprom_buff; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EINVAL; + + if (!atl1c_check_eeprom_exist(hw)) /* not exist */ + return -EINVAL; + + eeprom->magic = adapter->pdev->vendor | + (adapter->pdev->device << 16); + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + + eeprom_buff = kmalloc(sizeof(u32) * + (last_dword - first_dword + 1), GFP_KERNEL); + if (eeprom_buff == NULL) + return -ENOMEM; + + for (i = first_dword; i < last_dword; i++) { + if (!atl1c_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) { + kfree(eeprom_buff); + return -EIO; + } + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; + return 0; +} + +static void atl1c_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, atl1c_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void atl1c_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + if (adapter->wol & AT_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & AT_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & AT_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & AT_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & AT_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int atl1c_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | + WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) + return -EOPNOTSUPP; + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= AT_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= AT_WUFC_LNKC; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int atl1c_nway_reset(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + atl1c_reinit_locked(adapter); + return 0; +} + +static const struct ethtool_ops atl1c_ethtool_ops = { + .get_settings = atl1c_get_settings, + .set_settings = atl1c_set_settings, + .get_drvinfo = atl1c_get_drvinfo, + .get_regs_len = atl1c_get_regs_len, + .get_regs = atl1c_get_regs, + .get_wol = atl1c_get_wol, + .set_wol = atl1c_set_wol, + .get_msglevel = atl1c_get_msglevel, + .set_msglevel = atl1c_set_msglevel, + .nway_reset = atl1c_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = atl1c_get_eeprom_len, + .get_eeprom = atl1c_get_eeprom, +}; + +void atl1c_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &atl1c_ethtool_ops; +} diff --git a/addons/atl1c/src/4.4.180/atl1c_hw.c b/addons/atl1c/src/4.4.180/atl1c_hw.c new file mode 100644 index 00000000..a8b80c56 --- /dev/null +++ b/addons/atl1c/src/4.4.180/atl1c_hw.c @@ -0,0 +1,863 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include + +#include "atl1c.h" + +/* + * check_eeprom_exist + * return 1 if eeprom exist + */ +int atl1c_check_eeprom_exist(struct atl1c_hw *hw) +{ + u32 data; + + AT_READ_REG(hw, REG_TWSI_DEBUG, &data); + if (data & TWSI_DEBUG_DEV_EXIST) + return 1; + + AT_READ_REG(hw, REG_MASTER_CTRL, &data); + if (data & MASTER_CTRL_OTP_SEL) + return 1; + return 0; +} + +void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr) +{ + u32 value; + /* + * 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword + */ + value = mac_addr[2] << 24 | + mac_addr[3] << 16 | + mac_addr[4] << 8 | + mac_addr[5]; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); + /* hight dword */ + value = mac_addr[0] << 8 | + mac_addr[1]; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); +} + +/* read mac address from hardware register */ +static bool atl1c_read_current_addr(struct atl1c_hw *hw, u8 *eth_addr) +{ + u32 addr[2]; + + AT_READ_REG(hw, REG_MAC_STA_ADDR, &addr[0]); + AT_READ_REG(hw, REG_MAC_STA_ADDR + 4, &addr[1]); + + *(u32 *) ð_addr[2] = htonl(addr[0]); + *(u16 *) ð_addr[0] = htons((u16)addr[1]); + + return is_valid_ether_addr(eth_addr); +} + +/* + * atl1c_get_permanent_address + * return 0 if get valid mac address, + */ +static int atl1c_get_permanent_address(struct atl1c_hw *hw) +{ + u32 i; + u32 otp_ctrl_data; + u32 twsi_ctrl_data; + u16 phy_data; + bool raise_vol = false; + + /* MAC-address from BIOS is the 1st priority */ + if (atl1c_read_current_addr(hw, hw->perm_mac_addr)) + return 0; + + /* init */ + AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); + if (atl1c_check_eeprom_exist(hw)) { + if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { + /* Enable OTP CLK */ + if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { + otp_ctrl_data |= OTP_CTRL_CLK_EN; + AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); + AT_WRITE_FLUSH(hw); + msleep(1); + } + } + /* raise voltage temporally for l2cb */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { + atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data); + phy_data &= ~ANACTRL_HB_EN; + atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data); + atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); + phy_data |= VOLT_CTRL_SWLOWEST; + atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); + udelay(20); + raise_vol = true; + } + + AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); + twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; + AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data); + for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) { + msleep(10); + AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data); + if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0) + break; + } + if (i >= AT_TWSI_EEPROM_TIMEOUT) + return -1; + } + /* Disable OTP_CLK */ + if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) { + otp_ctrl_data &= ~OTP_CTRL_CLK_EN; + AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); + msleep(1); + } + if (raise_vol) { + atl1c_read_phy_dbg(hw, MIIDBG_ANACTRL, &phy_data); + phy_data |= ANACTRL_HB_EN; + atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, phy_data); + atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); + phy_data &= ~VOLT_CTRL_SWLOWEST; + atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); + udelay(20); + } + + if (atl1c_read_current_addr(hw, hw->perm_mac_addr)) + return 0; + + return -1; +} + +bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value) +{ + int i; + bool ret = false; + u32 otp_ctrl_data; + u32 control; + u32 data; + + if (offset & 3) + return ret; /* address do not align */ + + AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); + if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) + AT_WRITE_REG(hw, REG_OTP_CTRL, + (otp_ctrl_data | OTP_CTRL_CLK_EN)); + + AT_WRITE_REG(hw, REG_EEPROM_DATA_LO, 0); + control = (offset & EEPROM_CTRL_ADDR_MASK) << EEPROM_CTRL_ADDR_SHIFT; + AT_WRITE_REG(hw, REG_EEPROM_CTRL, control); + + for (i = 0; i < 10; i++) { + udelay(100); + AT_READ_REG(hw, REG_EEPROM_CTRL, &control); + if (control & EEPROM_CTRL_RW) + break; + } + if (control & EEPROM_CTRL_RW) { + AT_READ_REG(hw, REG_EEPROM_CTRL, &data); + AT_READ_REG(hw, REG_EEPROM_DATA_LO, p_value); + data = data & 0xFFFF; + *p_value = swab32((data << 16) | (*p_value >> 16)); + ret = true; + } + if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) + AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data); + + return ret; +} +/* + * Reads the adapter's MAC address from the EEPROM + * + * hw - Struct containing variables accessed by shared code + */ +int atl1c_read_mac_addr(struct atl1c_hw *hw) +{ + int err = 0; + + err = atl1c_get_permanent_address(hw); + if (err) + eth_random_addr(hw->perm_mac_addr); + + memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr)); + return err; +} + +/* + * atl1c_hash_mc_addr + * purpose + * set hash value for a multicast address + * hash calcu processing : + * 1. calcu 32bit CRC for multicast address + * 2. reverse crc with MSB to LSB + */ +u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr) +{ + u32 crc32; + u32 value = 0; + int i; + + crc32 = ether_crc_le(6, mc_addr); + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* + * The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper bit of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + + mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); + + mta |= (1 << hash_bit); + + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); +} + +/* + * wait mdio module be idle + * return true: idle + * false: still busy + */ +bool atl1c_wait_mdio_idle(struct atl1c_hw *hw) +{ + u32 val; + int i; + + for (i = 0; i < MDIO_MAX_AC_TO; i++) { + AT_READ_REG(hw, REG_MDIO_CTRL, &val); + if (!(val & (MDIO_CTRL_BUSY | MDIO_CTRL_START))) + break; + udelay(10); + } + + return i != MDIO_MAX_AC_TO; +} + +void atl1c_stop_phy_polling(struct atl1c_hw *hw) +{ + if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION)) + return; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, 0); + atl1c_wait_mdio_idle(hw); +} + +void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel) +{ + u32 val; + + if (!(hw->ctrl_flags & ATL1C_FPGA_VERSION)) + return; + + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_REG, 1) | + MDIO_CTRL_START | + MDIO_CTRL_OP_READ; + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + atl1c_wait_mdio_idle(hw); + val |= MDIO_CTRL_AP_EN; + val &= ~MDIO_CTRL_START; + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + udelay(30); +} + + +/* + * atl1c_read_phy_core + * core function to read register in PHY via MDIO control regsiter. + * ext: extension register (see IEEE 802.3) + * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) + * reg: reg to read + */ +int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 *phy_data) +{ + u32 val; + u16 clk_sel = MDIO_CTRL_CLK_25_4; + + atl1c_stop_phy_polling(hw); + + *phy_data = 0; + + /* only l2c_b2 & l1d_2 could use slow clock */ + if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) && + hw->hibernate) + clk_sel = MDIO_CTRL_CLK_25_128; + if (ext) { + val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg); + AT_WRITE_REG(hw, REG_MDIO_EXTN, val); + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + MDIO_CTRL_START | + MDIO_CTRL_MODE_EXT | + MDIO_CTRL_OP_READ; + } else { + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_REG, reg) | + MDIO_CTRL_START | + MDIO_CTRL_OP_READ; + } + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + + if (!atl1c_wait_mdio_idle(hw)) + return -1; + + AT_READ_REG(hw, REG_MDIO_CTRL, &val); + *phy_data = (u16)FIELD_GETX(val, MDIO_CTRL_DATA); + + atl1c_start_phy_polling(hw, clk_sel); + + return 0; +} + +/* + * atl1c_write_phy_core + * core function to write to register in PHY via MDIO control register. + * ext: extension register (see IEEE 802.3) + * dev: device address (see IEEE 802.3 DEVAD, PRTAD is fixed to 0) + * reg: reg to write + */ +int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 phy_data) +{ + u32 val; + u16 clk_sel = MDIO_CTRL_CLK_25_4; + + atl1c_stop_phy_polling(hw); + + + /* only l2c_b2 & l1d_2 could use slow clock */ + if ((hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) && + hw->hibernate) + clk_sel = MDIO_CTRL_CLK_25_128; + + if (ext) { + val = FIELDX(MDIO_EXTN_DEVAD, dev) | FIELDX(MDIO_EXTN_REG, reg); + AT_WRITE_REG(hw, REG_MDIO_EXTN, val); + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_DATA, phy_data) | + MDIO_CTRL_START | + MDIO_CTRL_MODE_EXT; + } else { + val = MDIO_CTRL_SPRES_PRMBL | + FIELDX(MDIO_CTRL_CLK_SEL, clk_sel) | + FIELDX(MDIO_CTRL_DATA, phy_data) | + FIELDX(MDIO_CTRL_REG, reg) | + MDIO_CTRL_START; + } + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + + if (!atl1c_wait_mdio_idle(hw)) + return -1; + + atl1c_start_phy_polling(hw, clk_sel); + + return 0; +} + +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) +{ + return atl1c_read_phy_core(hw, false, 0, reg_addr, phy_data); +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data) +{ + return atl1c_write_phy_core(hw, false, 0, reg_addr, phy_data); +} + +/* read from PHY extension register */ +int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 *phy_data) +{ + return atl1c_read_phy_core(hw, true, dev_addr, reg_addr, phy_data); +} + +/* write to PHY extension register */ +int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 phy_data) +{ + return atl1c_write_phy_core(hw, true, dev_addr, reg_addr, phy_data); +} + +int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data) +{ + int err; + + err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr); + if (unlikely(err)) + return err; + else + err = atl1c_read_phy_reg(hw, MII_DBG_DATA, phy_data); + + return err; +} + +int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data) +{ + int err; + + err = atl1c_write_phy_reg(hw, MII_DBG_ADDR, reg_addr); + if (unlikely(err)) + return err; + else + err = atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data); + + return err; +} + +/* + * Configures PHY autoneg and flow control advertisement settings + * + * hw - Struct containing variables accessed by shared code + */ +static int atl1c_phy_setup_adv(struct atl1c_hw *hw) +{ + u16 mii_adv_data = ADVERTISE_DEFAULT_CAP & ~ADVERTISE_ALL; + u16 mii_giga_ctrl_data = GIGA_CR_1000T_DEFAULT_CAP & + ~GIGA_CR_1000T_SPEED_MASK; + + if (hw->autoneg_advertised & ADVERTISED_10baseT_Half) + mii_adv_data |= ADVERTISE_10HALF; + if (hw->autoneg_advertised & ADVERTISED_10baseT_Full) + mii_adv_data |= ADVERTISE_10FULL; + if (hw->autoneg_advertised & ADVERTISED_100baseT_Half) + mii_adv_data |= ADVERTISE_100HALF; + if (hw->autoneg_advertised & ADVERTISED_100baseT_Full) + mii_adv_data |= ADVERTISE_100FULL; + + if (hw->autoneg_advertised & ADVERTISED_Autoneg) + mii_adv_data |= ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL; + + if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) { + if (hw->autoneg_advertised & ADVERTISED_1000baseT_Half) + mii_giga_ctrl_data |= ADVERTISE_1000HALF; + if (hw->autoneg_advertised & ADVERTISED_1000baseT_Full) + mii_giga_ctrl_data |= ADVERTISE_1000FULL; + if (hw->autoneg_advertised & ADVERTISED_Autoneg) + mii_giga_ctrl_data |= ADVERTISE_1000HALF | + ADVERTISE_1000FULL; + } + + if (atl1c_write_phy_reg(hw, MII_ADVERTISE, mii_adv_data) != 0 || + atl1c_write_phy_reg(hw, MII_CTRL1000, mii_giga_ctrl_data) != 0) + return -1; + return 0; +} + +void atl1c_phy_disable(struct atl1c_hw *hw) +{ + atl1c_power_saving(hw, 0); +} + + +int atl1c_phy_reset(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + u16 phy_data; + u32 phy_ctrl_data, lpi_ctrl; + int err; + + /* reset PHY core */ + AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl_data); + phy_ctrl_data &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_PHY_IDDQ | + GPHY_CTRL_GATE_25M_EN | GPHY_CTRL_PWDOWN_HW | GPHY_CTRL_CLS); + phy_ctrl_data |= GPHY_CTRL_SEL_ANA_RST; + if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE)) + phy_ctrl_data |= (GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE); + else + phy_ctrl_data &= ~(GPHY_CTRL_HIB_EN | GPHY_CTRL_HIB_PULSE); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data); + AT_WRITE_FLUSH(hw); + udelay(10); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl_data | GPHY_CTRL_EXT_RESET); + AT_WRITE_FLUSH(hw); + udelay(10 * GPHY_CTRL_EXT_RST_TO); /* delay 800us */ + + /* switch clock */ + if (hw->nic_type == athr_l2c_b) { + atl1c_read_phy_dbg(hw, MIIDBG_CFGLPSPD, &phy_data); + atl1c_write_phy_dbg(hw, MIIDBG_CFGLPSPD, + phy_data & ~CFGLPSPD_RSTCNT_CLK125SW); + } + + /* tx-half amplitude issue fix */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { + atl1c_read_phy_dbg(hw, MIIDBG_CABLE1TH_DET, &phy_data); + phy_data |= CABLE1TH_DET_EN; + atl1c_write_phy_dbg(hw, MIIDBG_CABLE1TH_DET, phy_data); + } + + /* clear bit3 of dbgport 3B to lower voltage */ + if (!(hw->ctrl_flags & ATL1C_HIB_DISABLE)) { + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2) { + atl1c_read_phy_dbg(hw, MIIDBG_VOLT_CTRL, &phy_data); + phy_data &= ~VOLT_CTRL_SWLOWEST; + atl1c_write_phy_dbg(hw, MIIDBG_VOLT_CTRL, phy_data); + } + /* power saving config */ + phy_data = + hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 ? + L1D_LEGCYPS_DEF : L1C_LEGCYPS_DEF; + atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS, phy_data); + /* hib */ + atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, + SYSMODCTRL_IECHOADJ_DEF); + } else { + /* disable pws */ + atl1c_read_phy_dbg(hw, MIIDBG_LEGCYPS, &phy_data); + atl1c_write_phy_dbg(hw, MIIDBG_LEGCYPS, + phy_data & ~LEGCYPS_EN); + /* disable hibernate */ + atl1c_read_phy_dbg(hw, MIIDBG_HIBNEG, &phy_data); + atl1c_write_phy_dbg(hw, MIIDBG_HIBNEG, + phy_data & HIBNEG_PSHIB_EN); + } + /* disable AZ(EEE) by default */ + if (hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2 || + hw->nic_type == athr_l2c_b2) { + AT_READ_REG(hw, REG_LPI_CTRL, &lpi_ctrl); + AT_WRITE_REG(hw, REG_LPI_CTRL, lpi_ctrl & ~LPI_CTRL_EN); + atl1c_write_phy_ext(hw, MIIEXT_ANEG, MIIEXT_LOCAL_EEEADV, 0); + atl1c_write_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL3, + L2CB_CLDCTRL3); + } + + /* other debug port to set */ + atl1c_write_phy_dbg(hw, MIIDBG_ANACTRL, ANACTRL_DEF); + atl1c_write_phy_dbg(hw, MIIDBG_SRDSYSMOD, SRDSYSMOD_DEF); + atl1c_write_phy_dbg(hw, MIIDBG_TST10BTCFG, TST10BTCFG_DEF); + /* UNH-IOL test issue, set bit7 */ + atl1c_write_phy_dbg(hw, MIIDBG_TST100BTCFG, + TST100BTCFG_DEF | TST100BTCFG_LITCH_EN); + + /* set phy interrupt mask */ + phy_data = IER_LINK_UP | IER_LINK_DOWN; + err = atl1c_write_phy_reg(hw, MII_IER, phy_data); + if (err) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "Error enable PHY linkChange Interrupt\n"); + return err; + } + return 0; +} + +int atl1c_phy_init(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + int ret_val; + u16 mii_bmcr_data = BMCR_RESET; + + if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) || + (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) { + dev_err(&pdev->dev, "Error get phy ID\n"); + return -1; + } + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + ret_val = atl1c_phy_setup_adv(hw); + if (ret_val) { + if (netif_msg_link(adapter)) + dev_err(&pdev->dev, + "Error Setting up Auto-Negotiation\n"); + return ret_val; + } + mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART; + break; + case MEDIA_TYPE_100M_FULL: + mii_bmcr_data |= BMCR_SPEED100 | BMCR_FULLDPLX; + break; + case MEDIA_TYPE_100M_HALF: + mii_bmcr_data |= BMCR_SPEED100; + break; + case MEDIA_TYPE_10M_FULL: + mii_bmcr_data |= BMCR_FULLDPLX; + break; + case MEDIA_TYPE_10M_HALF: + break; + default: + if (netif_msg_link(adapter)) + dev_err(&pdev->dev, "Wrong Media type %d\n", + hw->media_type); + return -1; + } + + ret_val = atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); + if (ret_val) + return ret_val; + hw->phy_configured = true; + + return 0; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex) +{ + int err; + u16 phy_data; + + /* Read PHY Specific Status Register (17) */ + err = atl1c_read_phy_reg(hw, MII_GIGA_PSSR, &phy_data); + if (err) + return err; + + if (!(phy_data & GIGA_PSSR_SPD_DPLX_RESOLVED)) + return -1; + + switch (phy_data & GIGA_PSSR_SPEED) { + case GIGA_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case GIGA_PSSR_100MBS: + *speed = SPEED_100; + break; + case GIGA_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + return -1; + } + + if (phy_data & GIGA_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; +} + +/* select one link mode to get lower power consumption */ +int atl1c_phy_to_ps_link(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + int ret = 0; + u16 autoneg_advertised = ADVERTISED_10baseT_Half; + u16 save_autoneg_advertised; + u16 phy_data; + u16 mii_lpa_data; + u16 speed = SPEED_0; + u16 duplex = FULL_DUPLEX; + int i; + + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + if (phy_data & BMSR_LSTATUS) { + atl1c_read_phy_reg(hw, MII_LPA, &mii_lpa_data); + if (mii_lpa_data & LPA_10FULL) + autoneg_advertised = ADVERTISED_10baseT_Full; + else if (mii_lpa_data & LPA_10HALF) + autoneg_advertised = ADVERTISED_10baseT_Half; + else if (mii_lpa_data & LPA_100HALF) + autoneg_advertised = ADVERTISED_100baseT_Half; + else if (mii_lpa_data & LPA_100FULL) + autoneg_advertised = ADVERTISED_100baseT_Full; + + save_autoneg_advertised = hw->autoneg_advertised; + hw->phy_configured = false; + hw->autoneg_advertised = autoneg_advertised; + if (atl1c_restart_autoneg(hw) != 0) { + dev_dbg(&pdev->dev, "phy autoneg failed\n"); + ret = -1; + } + hw->autoneg_advertised = save_autoneg_advertised; + + if (mii_lpa_data) { + for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { + mdelay(100); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + if (phy_data & BMSR_LSTATUS) { + if (atl1c_get_speed_and_duplex(hw, &speed, + &duplex) != 0) + dev_dbg(&pdev->dev, + "get speed and duplex failed\n"); + break; + } + } + } + } else { + speed = SPEED_10; + duplex = HALF_DUPLEX; + } + adapter->link_speed = speed; + adapter->link_duplex = duplex; + + return ret; +} + +int atl1c_restart_autoneg(struct atl1c_hw *hw) +{ + int err = 0; + u16 mii_bmcr_data = BMCR_RESET; + + err = atl1c_phy_setup_adv(hw); + if (err) + return err; + mii_bmcr_data |= BMCR_ANENABLE | BMCR_ANRESTART; + + return atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data); +} + +int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + u32 master_ctrl, mac_ctrl, phy_ctrl; + u32 wol_ctrl, speed; + u16 phy_data; + + wol_ctrl = 0; + speed = adapter->link_speed == SPEED_1000 ? + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100; + + AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl); + AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl); + AT_READ_REG(hw, REG_GPHY_CTRL, &phy_ctrl); + + master_ctrl &= ~MASTER_CTRL_CLK_SEL_DIS; + mac_ctrl = FIELD_SETX(mac_ctrl, MAC_CTRL_SPEED, speed); + mac_ctrl &= ~(MAC_CTRL_DUPLX | MAC_CTRL_RX_EN | MAC_CTRL_TX_EN); + if (adapter->link_duplex == FULL_DUPLEX) + mac_ctrl |= MAC_CTRL_DUPLX; + phy_ctrl &= ~(GPHY_CTRL_EXT_RESET | GPHY_CTRL_CLS); + phy_ctrl |= GPHY_CTRL_SEL_ANA_RST | GPHY_CTRL_HIB_PULSE | + GPHY_CTRL_HIB_EN; + if (!wufc) { /* without WoL */ + master_ctrl |= MASTER_CTRL_CLK_SEL_DIS; + phy_ctrl |= GPHY_CTRL_PHY_IDDQ | GPHY_CTRL_PWDOWN_HW; + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl); + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + hw->phy_configured = false; /* re-init PHY when resume */ + return 0; + } + phy_ctrl |= GPHY_CTRL_EXT_RESET; + if (wufc & AT_WUFC_MAG) { + mac_ctrl |= MAC_CTRL_RX_EN | MAC_CTRL_BC_EN; + wol_ctrl |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V11) + wol_ctrl |= WOL_PATTERN_EN | WOL_PATTERN_PME_EN; + } + if (wufc & AT_WUFC_LNKC) { + wol_ctrl |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; + if (atl1c_write_phy_reg(hw, MII_IER, IER_LINK_UP) != 0) { + dev_dbg(&pdev->dev, "%s: write phy MII_IER failed.\n", + atl1c_driver_name); + } + } + /* clear PHY interrupt */ + atl1c_read_phy_reg(hw, MII_ISR, &phy_data); + + dev_dbg(&pdev->dev, "%s: suspend MAC=%x,MASTER=%x,PHY=0x%x,WOL=%x\n", + atl1c_driver_name, mac_ctrl, master_ctrl, phy_ctrl, wol_ctrl); + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl); + AT_WRITE_REG(hw, REG_GPHY_CTRL, phy_ctrl); + AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl); + + return 0; +} + + +/* configure phy after Link change Event */ +void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed) +{ + u16 phy_val; + bool adj_thresh = false; + + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 || + hw->nic_type == athr_l1d || hw->nic_type == athr_l1d_2) + adj_thresh = true; + + if (link_speed != SPEED_0) { /* link up */ + /* az with brcm, half-amp */ + if (hw->nic_type == athr_l1d_2) { + atl1c_read_phy_ext(hw, MIIEXT_PCS, MIIEXT_CLDCTRL6, + &phy_val); + phy_val = FIELD_GETX(phy_val, CLDCTRL6_CAB_LEN); + phy_val = phy_val > CLDCTRL6_CAB_LEN_SHORT ? + AZ_ANADECT_LONG : AZ_ANADECT_DEF; + atl1c_write_phy_dbg(hw, MIIDBG_AZ_ANADECT, phy_val); + } + /* threshold adjust */ + if (adj_thresh && link_speed == SPEED_100 && hw->msi_lnkpatch) { + atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB, L1D_MSE16DB_UP); + atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, + L1D_SYSMODCTRL_IECHOADJ_DEF); + } + } else { /* link down */ + if (adj_thresh && hw->msi_lnkpatch) { + atl1c_write_phy_dbg(hw, MIIDBG_SYSMODCTRL, + SYSMODCTRL_IECHOADJ_DEF); + atl1c_write_phy_dbg(hw, MIIDBG_MSE16DB, + L1D_MSE16DB_DOWN); + } + } +} diff --git a/addons/atl1c/src/4.4.180/atl1c_hw.h b/addons/atl1c/src/4.4.180/atl1c_hw.h new file mode 100644 index 00000000..21d8c4db --- /dev/null +++ b/addons/atl1c/src/4.4.180/atl1c_hw.h @@ -0,0 +1,1024 @@ +/* + * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL1C_HW_H_ +#define _ATL1C_HW_H_ + +#include +#include + +#define FIELD_GETX(_x, _name) ((_x) >> (_name##_SHIFT) & (_name##_MASK)) +#define FIELD_SETX(_x, _name, _v) \ +(((_x) & ~((_name##_MASK) << (_name##_SHIFT))) |\ +(((_v) & (_name##_MASK)) << (_name##_SHIFT))) +#define FIELDX(_name, _v) (((_v) & (_name##_MASK)) << (_name##_SHIFT)) + +struct atl1c_adapter; +struct atl1c_hw; + +/* function prototype */ +void atl1c_phy_disable(struct atl1c_hw *hw); +void atl1c_hw_set_mac_addr(struct atl1c_hw *hw, u8 *mac_addr); +int atl1c_phy_reset(struct atl1c_hw *hw); +int atl1c_read_mac_addr(struct atl1c_hw *hw); +int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex); +u32 atl1c_hash_mc_addr(struct atl1c_hw *hw, u8 *mc_addr); +void atl1c_hash_set(struct atl1c_hw *hw, u32 hash_value); +int atl1c_read_phy_reg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data); +int atl1c_write_phy_reg(struct atl1c_hw *hw, u32 reg_addr, u16 phy_data); +bool atl1c_read_eeprom(struct atl1c_hw *hw, u32 offset, u32 *p_value); +int atl1c_phy_init(struct atl1c_hw *hw); +int atl1c_check_eeprom_exist(struct atl1c_hw *hw); +int atl1c_restart_autoneg(struct atl1c_hw *hw); +int atl1c_phy_to_ps_link(struct atl1c_hw *hw); +int atl1c_power_saving(struct atl1c_hw *hw, u32 wufc); +bool atl1c_wait_mdio_idle(struct atl1c_hw *hw); +void atl1c_stop_phy_polling(struct atl1c_hw *hw); +void atl1c_start_phy_polling(struct atl1c_hw *hw, u16 clk_sel); +int atl1c_read_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 *phy_data); +int atl1c_write_phy_core(struct atl1c_hw *hw, bool ext, u8 dev, + u16 reg, u16 phy_data); +int atl1c_read_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 *phy_data); +int atl1c_write_phy_ext(struct atl1c_hw *hw, u8 dev_addr, + u16 reg_addr, u16 phy_data); +int atl1c_read_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 *phy_data); +int atl1c_write_phy_dbg(struct atl1c_hw *hw, u16 reg_addr, u16 phy_data); +void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed); + +/* hw-ids */ +#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062 +#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063 +#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */ +#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */ +#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */ +#define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */ +#define L2CB_V10 0xc0 +#define L2CB_V11 0xc1 +#define L2CB_V20 0xc0 +#define L2CB_V21 0xc1 + +/* register definition */ +#define REG_DEVICE_CAP 0x5C +#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 +#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0 + +#define DEVICE_CTRL_MAXRRS_MIN 2 + +#define REG_LINK_CTRL 0x68 +#define LINK_CTRL_L0S_EN 0x01 +#define LINK_CTRL_L1_EN 0x02 +#define LINK_CTRL_EXT_SYNC 0x80 + +#define REG_PCIE_IND_ACC_ADDR 0x80 +#define REG_PCIE_IND_ACC_DATA 0x84 + +#define REG_DEV_SERIALNUM_CTRL 0x200 +#define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */ +#define REG_DEV_MAC_SEL_SHIFT 0 +#define REG_DEV_SERIAL_NUM_EN_MASK 0x1 +#define REG_DEV_SERIAL_NUM_EN_SHIFT 1 + +#define REG_TWSI_CTRL 0x218 +#define TWSI_CTLR_FREQ_MASK 0x3UL +#define TWSI_CTRL_FREQ_SHIFT 24 +#define TWSI_CTRL_FREQ_100K 0 +#define TWSI_CTRL_FREQ_200K 1 +#define TWSI_CTRL_FREQ_300K 2 +#define TWSI_CTRL_FREQ_400K 3 +#define TWSI_CTRL_LD_EXIST BIT(23) +#define TWSI_CTRL_HW_LDSTAT BIT(12) /* 0:finish,1:in progress */ +#define TWSI_CTRL_SW_LDSTART BIT(11) +#define TWSI_CTRL_LD_OFFSET_MASK 0xFF +#define TWSI_CTRL_LD_OFFSET_SHIFT 0 + +#define REG_PCIE_DEV_MISC_CTRL 0x21C +#define PCIE_DEV_MISC_EXT_PIPE 0x2 +#define PCIE_DEV_MISC_RETRY_BUFDIS 0x1 +#define PCIE_DEV_MISC_SPIROM_EXIST 0x4 +#define PCIE_DEV_MISC_SERDES_ENDIAN 0x8 +#define PCIE_DEV_MISC_SERDES_SEL_DIN 0x10 + +#define REG_PCIE_PHYMISC 0x1000 +#define PCIE_PHYMISC_FORCE_RCV_DET BIT(2) +#define PCIE_PHYMISC_NFTS_MASK 0xFFUL +#define PCIE_PHYMISC_NFTS_SHIFT 16 + +#define REG_PCIE_PHYMISC2 0x1004 +#define PCIE_PHYMISC2_L0S_TH_MASK 0x3UL +#define PCIE_PHYMISC2_L0S_TH_SHIFT 18 +#define L2CB1_PCIE_PHYMISC2_L0S_TH 3 +#define PCIE_PHYMISC2_CDR_BW_MASK 0x3UL +#define PCIE_PHYMISC2_CDR_BW_SHIFT 16 +#define L2CB1_PCIE_PHYMISC2_CDR_BW 3 + +#define REG_TWSI_DEBUG 0x1108 +#define TWSI_DEBUG_DEV_EXIST BIT(29) + +#define REG_DMA_DBG 0x1114 +#define DMA_DBG_VENDOR_MSG BIT(0) + +#define REG_EEPROM_CTRL 0x12C0 +#define EEPROM_CTRL_DATA_HI_MASK 0xFFFF +#define EEPROM_CTRL_DATA_HI_SHIFT 0 +#define EEPROM_CTRL_ADDR_MASK 0x3FF +#define EEPROM_CTRL_ADDR_SHIFT 16 +#define EEPROM_CTRL_ACK 0x40000000 +#define EEPROM_CTRL_RW 0x80000000 + +#define REG_EEPROM_DATA_LO 0x12C4 + +#define REG_OTP_CTRL 0x12F0 +#define OTP_CTRL_CLK_EN BIT(1) + +#define REG_PM_CTRL 0x12F8 +#define PM_CTRL_HOTRST BIT(31) +#define PM_CTRL_MAC_ASPM_CHK BIT(30) /* L0s/L1 dis by MAC based on + * thrghput(setting in 15A0) */ +#define PM_CTRL_SA_DLY_EN BIT(29) +#define PM_CTRL_L0S_BUFSRX_EN BIT(28) +#define PM_CTRL_LCKDET_TIMER_MASK 0xFUL +#define PM_CTRL_LCKDET_TIMER_SHIFT 24 +#define PM_CTRL_LCKDET_TIMER_DEF 0xC +#define PM_CTRL_PM_REQ_TIMER_MASK 0xFUL +#define PM_CTRL_PM_REQ_TIMER_SHIFT 20 /* pm_request_l1 time > @ + * ->L0s not L1 */ +#define PM_CTRL_PM_REQ_TO_DEF 0xF +#define PMCTRL_TXL1_AFTER_L0S BIT(19) /* l1dv2.0+ */ +#define L1D_PMCTRL_L1_ENTRY_TM_MASK 7UL /* l1dv2.0+, 3bits */ +#define L1D_PMCTRL_L1_ENTRY_TM_SHIFT 16 +#define L1D_PMCTRL_L1_ENTRY_TM_DIS 0 +#define L1D_PMCTRL_L1_ENTRY_TM_2US 1 +#define L1D_PMCTRL_L1_ENTRY_TM_4US 2 +#define L1D_PMCTRL_L1_ENTRY_TM_8US 3 +#define L1D_PMCTRL_L1_ENTRY_TM_16US 4 +#define L1D_PMCTRL_L1_ENTRY_TM_24US 5 +#define L1D_PMCTRL_L1_ENTRY_TM_32US 6 +#define L1D_PMCTRL_L1_ENTRY_TM_63US 7 +#define PM_CTRL_L1_ENTRY_TIMER_MASK 0xFUL /* l1C 4bits */ +#define PM_CTRL_L1_ENTRY_TIMER_SHIFT 16 +#define L2CB1_PM_CTRL_L1_ENTRY_TM 7 +#define L1C_PM_CTRL_L1_ENTRY_TM 0xF +#define PM_CTRL_RCVR_WT_TIMER BIT(15) /* 1:1us, 0:2ms */ +#define PM_CTRL_CLK_PWM_VER1_1 BIT(14) /* 0:1.0a,1:1.1 */ +#define PM_CTRL_CLK_SWH_L1 BIT(13) /* en pcie clk sw in L1 */ +#define PM_CTRL_ASPM_L0S_EN BIT(12) +#define PM_CTRL_RXL1_AFTER_L0S BIT(11) /* l1dv2.0+ */ +#define L1D_PMCTRL_L0S_TIMER_MASK 7UL /* l1d2.0+, 3bits*/ +#define L1D_PMCTRL_L0S_TIMER_SHIFT 8 +#define PM_CTRL_L0S_ENTRY_TIMER_MASK 0xFUL /* l1c, 4bits */ +#define PM_CTRL_L0S_ENTRY_TIMER_SHIFT 8 +#define PM_CTRL_SERDES_BUFS_RX_L1_EN BIT(7) +#define PM_CTRL_SERDES_PD_EX_L1 BIT(6) /* power down serdes rx */ +#define PM_CTRL_SERDES_PLL_L1_EN BIT(5) +#define PM_CTRL_SERDES_L1_EN BIT(4) +#define PM_CTRL_ASPM_L1_EN BIT(3) +#define PM_CTRL_CLK_REQ_EN BIT(2) +#define PM_CTRL_RBER_EN BIT(1) +#define PM_CTRL_SPRSDWER_EN BIT(0) + +#define REG_LTSSM_ID_CTRL 0x12FC +#define LTSSM_ID_EN_WRO 0x1000 + + +/* Selene Master Control Register */ +#define REG_MASTER_CTRL 0x1400 +#define MASTER_CTRL_OTP_SEL BIT(31) +#define MASTER_DEV_NUM_MASK 0x7FUL +#define MASTER_DEV_NUM_SHIFT 24 +#define MASTER_REV_NUM_MASK 0xFFUL +#define MASTER_REV_NUM_SHIFT 16 +#define MASTER_CTRL_INT_RDCLR BIT(14) +#define MASTER_CTRL_CLK_SEL_DIS BIT(12) /* 1:alwys sel pclk from + * serdes, not sw to 25M */ +#define MASTER_CTRL_RX_ITIMER_EN BIT(11) /* IRQ MODURATION FOR RX */ +#define MASTER_CTRL_TX_ITIMER_EN BIT(10) /* MODURATION FOR TX/RX */ +#define MASTER_CTRL_MANU_INT BIT(9) /* SOFT MANUAL INT */ +#define MASTER_CTRL_MANUTIMER_EN BIT(8) +#define MASTER_CTRL_SA_TIMER_EN BIT(7) /* SYS ALIVE TIMER EN */ +#define MASTER_CTRL_OOB_DIS BIT(6) /* OUT OF BOX DIS */ +#define MASTER_CTRL_WAKEN_25M BIT(5) /* WAKE WO. PCIE CLK */ +#define MASTER_CTRL_BERT_START BIT(4) +#define MASTER_PCIE_TSTMOD_MASK 3UL +#define MASTER_PCIE_TSTMOD_SHIFT 2 +#define MASTER_PCIE_RST BIT(1) +#define MASTER_CTRL_SOFT_RST BIT(0) /* RST MAC & DMA */ +#define DMA_MAC_RST_TO 50 + +/* Timer Initial Value Register */ +#define REG_MANUAL_TIMER_INIT 0x1404 + +/* IRQ ModeratorTimer Initial Value Register */ +#define REG_IRQ_MODRT_TIMER_INIT 0x1408 +#define IRQ_MODRT_TIMER_MASK 0xffff +#define IRQ_MODRT_TX_TIMER_SHIFT 0 +#define IRQ_MODRT_RX_TIMER_SHIFT 16 + +#define REG_GPHY_CTRL 0x140C +#define GPHY_CTRL_ADDR_MASK 0x1FUL +#define GPHY_CTRL_ADDR_SHIFT 19 +#define GPHY_CTRL_BP_VLTGSW BIT(18) +#define GPHY_CTRL_100AB_EN BIT(17) +#define GPHY_CTRL_10AB_EN BIT(16) +#define GPHY_CTRL_PHY_PLL_BYPASS BIT(15) +#define GPHY_CTRL_PWDOWN_HW BIT(14) /* affect MAC&PHY, to low pw */ +#define GPHY_CTRL_PHY_PLL_ON BIT(13) /* 1:pll always on, 0:can sw */ +#define GPHY_CTRL_SEL_ANA_RST BIT(12) +#define GPHY_CTRL_HIB_PULSE BIT(11) +#define GPHY_CTRL_HIB_EN BIT(10) +#define GPHY_CTRL_GIGA_DIS BIT(9) +#define GPHY_CTRL_PHY_IDDQ_DIS BIT(8) /* pw on RST */ +#define GPHY_CTRL_PHY_IDDQ BIT(7) /* bit8 affect bit7 while rb */ +#define GPHY_CTRL_LPW_EXIT BIT(6) +#define GPHY_CTRL_GATE_25M_EN BIT(5) +#define GPHY_CTRL_REV_ANEG BIT(4) +#define GPHY_CTRL_ANEG_NOW BIT(3) +#define GPHY_CTRL_LED_MODE BIT(2) +#define GPHY_CTRL_RTL_MODE BIT(1) +#define GPHY_CTRL_EXT_RESET BIT(0) /* 1:out of DSP RST status */ +#define GPHY_CTRL_EXT_RST_TO 80 /* 800us atmost */ +#define GPHY_CTRL_CLS (\ + GPHY_CTRL_LED_MODE |\ + GPHY_CTRL_100AB_EN |\ + GPHY_CTRL_PHY_PLL_ON) + +/* Block IDLE Status Register */ +#define REG_IDLE_STATUS 0x1410 +#define IDLE_STATUS_SFORCE_MASK 0xFUL +#define IDLE_STATUS_SFORCE_SHIFT 14 +#define IDLE_STATUS_CALIB_DONE BIT(13) +#define IDLE_STATUS_CALIB_RES_MASK 0x1FUL +#define IDLE_STATUS_CALIB_RES_SHIFT 8 +#define IDLE_STATUS_CALIBERR_MASK 0xFUL +#define IDLE_STATUS_CALIBERR_SHIFT 4 +#define IDLE_STATUS_TXQ_BUSY BIT(3) +#define IDLE_STATUS_RXQ_BUSY BIT(2) +#define IDLE_STATUS_TXMAC_BUSY BIT(1) +#define IDLE_STATUS_RXMAC_BUSY BIT(0) +#define IDLE_STATUS_MASK (\ + IDLE_STATUS_TXQ_BUSY |\ + IDLE_STATUS_RXQ_BUSY |\ + IDLE_STATUS_TXMAC_BUSY |\ + IDLE_STATUS_RXMAC_BUSY) + +/* MDIO Control Register */ +#define REG_MDIO_CTRL 0x1414 +#define MDIO_CTRL_MODE_EXT BIT(30) +#define MDIO_CTRL_POST_READ BIT(29) +#define MDIO_CTRL_AP_EN BIT(28) +#define MDIO_CTRL_BUSY BIT(27) +#define MDIO_CTRL_CLK_SEL_MASK 0x7UL +#define MDIO_CTRL_CLK_SEL_SHIFT 24 +#define MDIO_CTRL_CLK_25_4 0 /* 25MHz divide 4 */ +#define MDIO_CTRL_CLK_25_6 2 +#define MDIO_CTRL_CLK_25_8 3 +#define MDIO_CTRL_CLK_25_10 4 +#define MDIO_CTRL_CLK_25_32 5 +#define MDIO_CTRL_CLK_25_64 6 +#define MDIO_CTRL_CLK_25_128 7 +#define MDIO_CTRL_START BIT(23) +#define MDIO_CTRL_SPRES_PRMBL BIT(22) +#define MDIO_CTRL_OP_READ BIT(21) /* 1:read, 0:write */ +#define MDIO_CTRL_REG_MASK 0x1FUL +#define MDIO_CTRL_REG_SHIFT 16 +#define MDIO_CTRL_DATA_MASK 0xFFFFUL +#define MDIO_CTRL_DATA_SHIFT 0 +#define MDIO_MAX_AC_TO 120 /* 1.2ms timeout for slow clk */ + +/* for extension reg access */ +#define REG_MDIO_EXTN 0x1448 +#define MDIO_EXTN_PORTAD_MASK 0x1FUL +#define MDIO_EXTN_PORTAD_SHIFT 21 +#define MDIO_EXTN_DEVAD_MASK 0x1FUL +#define MDIO_EXTN_DEVAD_SHIFT 16 +#define MDIO_EXTN_REG_MASK 0xFFFFUL +#define MDIO_EXTN_REG_SHIFT 0 + +/* BIST Control and Status Register0 (for the Packet Memory) */ +#define REG_BIST0_CTRL 0x141c +#define BIST0_NOW 0x1 +#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is + * un-repairable because + * it has address decoder + * failure or more than 1 cell + * stuck-to-x failure */ +#define BIST0_FUSE_FLAG 0x4 + +/* BIST Control and Status Register1(for the retry buffer of PCI Express) */ +#define REG_BIST1_CTRL 0x1420 +#define BIST1_NOW 0x1 +#define BIST1_SRAM_FAIL 0x2 +#define BIST1_FUSE_FLAG 0x4 + +/* SerDes Lock Detect Control and Status Register */ +#define REG_SERDES 0x1424 +#define SERDES_PHY_CLK_SLOWDOWN BIT(18) +#define SERDES_MAC_CLK_SLOWDOWN BIT(17) +#define SERDES_SELFB_PLL_MASK 0x3UL +#define SERDES_SELFB_PLL_SHIFT 14 +#define SERDES_PHYCLK_SEL_GTX BIT(13) /* 1:gtx_clk, 0:25M */ +#define SERDES_PCIECLK_SEL_SRDS BIT(12) /* 1:serdes,0:25M */ +#define SERDES_BUFS_RX_EN BIT(11) +#define SERDES_PD_RX BIT(10) +#define SERDES_PLL_EN BIT(9) +#define SERDES_EN BIT(8) +#define SERDES_SELFB_PLL_SEL_CSR BIT(6) /* 0:state-machine,1:csr */ +#define SERDES_SELFB_PLL_CSR_MASK 0x3UL +#define SERDES_SELFB_PLL_CSR_SHIFT 4 +#define SERDES_SELFB_PLL_CSR_4 3 /* 4-12% OV-CLK */ +#define SERDES_SELFB_PLL_CSR_0 2 /* 0-4% OV-CLK */ +#define SERDES_SELFB_PLL_CSR_12 1 /* 12-18% OV-CLK */ +#define SERDES_SELFB_PLL_CSR_18 0 /* 18-25% OV-CLK */ +#define SERDES_VCO_SLOW BIT(3) +#define SERDES_VCO_FAST BIT(2) +#define SERDES_LOCK_DETECT_EN BIT(1) +#define SERDES_LOCK_DETECT BIT(0) + +#define REG_LPI_DECISN_TIMER 0x143C +#define L2CB_LPI_DESISN_TIMER 0x7D00 + +#define REG_LPI_CTRL 0x1440 +#define LPI_CTRL_CHK_DA BIT(31) +#define LPI_CTRL_ENH_TO_MASK 0x1FFFUL +#define LPI_CTRL_ENH_TO_SHIFT 12 +#define LPI_CTRL_ENH_TH_MASK 0x1FUL +#define LPI_CTRL_ENH_TH_SHIFT 6 +#define LPI_CTRL_ENH_EN BIT(5) +#define LPI_CTRL_CHK_RX BIT(4) +#define LPI_CTRL_CHK_STATE BIT(3) +#define LPI_CTRL_GMII BIT(2) +#define LPI_CTRL_TO_PHY BIT(1) +#define LPI_CTRL_EN BIT(0) + +#define REG_LPI_WAIT 0x1444 +#define LPI_WAIT_TIMER_MASK 0xFFFFUL +#define LPI_WAIT_TIMER_SHIFT 0 + +/* MAC Control Register */ +#define REG_MAC_CTRL 0x1480 +#define MAC_CTRL_SPEED_MODE_SW BIT(30) /* 0:phy,1:sw */ +#define MAC_CTRL_HASH_ALG_CRC32 BIT(29) /* 1:legacy,0:lw_5b */ +#define MAC_CTRL_SINGLE_PAUSE_EN BIT(28) +#define MAC_CTRL_DBG BIT(27) +#define MAC_CTRL_BC_EN BIT(26) +#define MAC_CTRL_MC_ALL_EN BIT(25) +#define MAC_CTRL_RX_CHKSUM_EN BIT(24) +#define MAC_CTRL_TX_HUGE BIT(23) +#define MAC_CTRL_DBG_TX_BKPRESURE BIT(22) +#define MAC_CTRL_SPEED_MASK 3UL +#define MAC_CTRL_SPEED_SHIFT 20 +#define MAC_CTRL_SPEED_10_100 1 +#define MAC_CTRL_SPEED_1000 2 +#define MAC_CTRL_TX_SIMURST BIT(19) +#define MAC_CTRL_SCNT BIT(17) +#define MAC_CTRL_TX_PAUSE BIT(16) +#define MAC_CTRL_PROMIS_EN BIT(15) +#define MAC_CTRL_RMV_VLAN BIT(14) +#define MAC_CTRL_PRMLEN_MASK 0xFUL +#define MAC_CTRL_PRMLEN_SHIFT 10 +#define MAC_CTRL_HUGE_EN BIT(9) +#define MAC_CTRL_LENCHK BIT(8) +#define MAC_CTRL_PAD BIT(7) +#define MAC_CTRL_ADD_CRC BIT(6) +#define MAC_CTRL_DUPLX BIT(5) +#define MAC_CTRL_LOOPBACK BIT(4) +#define MAC_CTRL_RX_FLOW BIT(3) +#define MAC_CTRL_TX_FLOW BIT(2) +#define MAC_CTRL_RX_EN BIT(1) +#define MAC_CTRL_TX_EN BIT(0) + +/* MAC IPG/IFG Control Register */ +#define REG_MAC_IPG_IFG 0x1484 +#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back + * inter-packet gap. The + * default is 96-bit time */ +#define MAC_IPG_IFG_IPGT_MASK 0x7f +#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to + * enforce in between RX frames */ +#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */ +#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */ +#define MAC_IPG_IFG_IPGR1_MASK 0x7f +#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */ +#define MAC_IPG_IFG_IPGR2_MASK 0x7f + +/* MAC STATION ADDRESS */ +#define REG_MAC_STA_ADDR 0x1488 + +/* Hash table for multicast address */ +#define REG_RX_HASH_TABLE 0x1490 + +/* MAC Half-Duplex Control Register */ +#define REG_MAC_HALF_DUPLX_CTRL 0x1498 +#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */ +#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff +#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 +#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 +#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* No back-off on backpressure, + * immediately start the + * transmission after back pressure */ +#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */ +#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */ + +/* Maximum Frame Length Control Register */ +#define REG_MTU 0x149c + +/* Wake-On-Lan control register */ +#define REG_WOL_CTRL 0x14a0 +#define WOL_PT7_MATCH BIT(31) +#define WOL_PT6_MATCH BIT(30) +#define WOL_PT5_MATCH BIT(29) +#define WOL_PT4_MATCH BIT(28) +#define WOL_PT3_MATCH BIT(27) +#define WOL_PT2_MATCH BIT(26) +#define WOL_PT1_MATCH BIT(25) +#define WOL_PT0_MATCH BIT(24) +#define WOL_PT7_EN BIT(23) +#define WOL_PT6_EN BIT(22) +#define WOL_PT5_EN BIT(21) +#define WOL_PT4_EN BIT(20) +#define WOL_PT3_EN BIT(19) +#define WOL_PT2_EN BIT(18) +#define WOL_PT1_EN BIT(17) +#define WOL_PT0_EN BIT(16) +#define WOL_LNKCHG_ST BIT(10) +#define WOL_MAGIC_ST BIT(9) +#define WOL_PATTERN_ST BIT(8) +#define WOL_OOB_EN BIT(6) +#define WOL_LINK_CHG_PME_EN BIT(5) +#define WOL_LINK_CHG_EN BIT(4) +#define WOL_MAGIC_PME_EN BIT(3) +#define WOL_MAGIC_EN BIT(2) +#define WOL_PATTERN_PME_EN BIT(1) +#define WOL_PATTERN_EN BIT(0) + +/* WOL Length ( 2 DWORD ) */ +#define REG_WOL_PTLEN1 0x14A4 +#define WOL_PTLEN1_3_MASK 0xFFUL +#define WOL_PTLEN1_3_SHIFT 24 +#define WOL_PTLEN1_2_MASK 0xFFUL +#define WOL_PTLEN1_2_SHIFT 16 +#define WOL_PTLEN1_1_MASK 0xFFUL +#define WOL_PTLEN1_1_SHIFT 8 +#define WOL_PTLEN1_0_MASK 0xFFUL +#define WOL_PTLEN1_0_SHIFT 0 + +#define REG_WOL_PTLEN2 0x14A8 +#define WOL_PTLEN2_7_MASK 0xFFUL +#define WOL_PTLEN2_7_SHIFT 24 +#define WOL_PTLEN2_6_MASK 0xFFUL +#define WOL_PTLEN2_6_SHIFT 16 +#define WOL_PTLEN2_5_MASK 0xFFUL +#define WOL_PTLEN2_5_SHIFT 8 +#define WOL_PTLEN2_4_MASK 0xFFUL +#define WOL_PTLEN2_4_SHIFT 0 + +/* Internal SRAM Partition Register */ +#define RFDX_HEAD_ADDR_MASK 0x03FF +#define RFDX_HARD_ADDR_SHIFT 0 +#define RFDX_TAIL_ADDR_MASK 0x03FF +#define RFDX_TAIL_ADDR_SHIFT 16 + +#define REG_SRAM_RFD0_INFO 0x1500 +#define REG_SRAM_RFD1_INFO 0x1504 +#define REG_SRAM_RFD2_INFO 0x1508 +#define REG_SRAM_RFD3_INFO 0x150C + +#define REG_RFD_NIC_LEN 0x1510 /* In 8-bytes */ +#define RFD_NIC_LEN_MASK 0x03FF + +#define REG_SRAM_TRD_ADDR 0x1518 +#define TPD_HEAD_ADDR_MASK 0x03FF +#define TPD_HEAD_ADDR_SHIFT 0 +#define TPD_TAIL_ADDR_MASK 0x03FF +#define TPD_TAIL_ADDR_SHIFT 16 + +#define REG_SRAM_TRD_LEN 0x151C /* In 8-bytes */ +#define TPD_NIC_LEN_MASK 0x03FF + +#define REG_SRAM_RXF_ADDR 0x1520 +#define REG_SRAM_RXF_LEN 0x1524 +#define REG_SRAM_TXF_ADDR 0x1528 +#define REG_SRAM_TXF_LEN 0x152C +#define REG_SRAM_TCPH_ADDR 0x1530 +#define REG_SRAM_PKTH_ADDR 0x1532 + +/* + * Load Ptr Register + * Software sets this bit after the initialization of the head and tail */ +#define REG_LOAD_PTR 0x1534 + +/* + * addresses of all descriptors, as well as the following descriptor + * control register, which triggers each function block to load the head + * pointer to prepare for the operation. This bit is then self-cleared + * after one cycle. + */ +#define REG_RX_BASE_ADDR_HI 0x1540 +#define REG_TX_BASE_ADDR_HI 0x1544 +#define REG_RFD0_HEAD_ADDR_LO 0x1550 +#define REG_RFD_RING_SIZE 0x1560 +#define RFD_RING_SIZE_MASK 0x0FFF +#define REG_RX_BUF_SIZE 0x1564 +#define RX_BUF_SIZE_MASK 0xFFFF +#define REG_RRD0_HEAD_ADDR_LO 0x1568 +#define REG_RRD_RING_SIZE 0x1578 +#define RRD_RING_SIZE_MASK 0x0FFF +#define REG_TPD_PRI1_ADDR_LO 0x157C +#define REG_TPD_PRI0_ADDR_LO 0x1580 +#define REG_TPD_RING_SIZE 0x1584 +#define TPD_RING_SIZE_MASK 0xFFFF + +/* TXQ Control Register */ +#define REG_TXQ_CTRL 0x1590 +#define TXQ_TXF_BURST_NUM_MASK 0xFFFFUL +#define TXQ_TXF_BURST_NUM_SHIFT 16 +#define L1C_TXQ_TXF_BURST_PREF 0x200 +#define L2CB_TXQ_TXF_BURST_PREF 0x40 +#define TXQ_CTRL_PEDING_CLR BIT(8) +#define TXQ_CTRL_LS_8023_EN BIT(7) +#define TXQ_CTRL_ENH_MODE BIT(6) +#define TXQ_CTRL_EN BIT(5) +#define TXQ_CTRL_IP_OPTION_EN BIT(4) +#define TXQ_NUM_TPD_BURST_MASK 0xFUL +#define TXQ_NUM_TPD_BURST_SHIFT 0 +#define TXQ_NUM_TPD_BURST_DEF 5 +#define TXQ_CFGV (\ + FIELDX(TXQ_NUM_TPD_BURST, TXQ_NUM_TPD_BURST_DEF) |\ + TXQ_CTRL_ENH_MODE |\ + TXQ_CTRL_LS_8023_EN |\ + TXQ_CTRL_IP_OPTION_EN) +#define L1C_TXQ_CFGV (\ + TXQ_CFGV |\ + FIELDX(TXQ_TXF_BURST_NUM, L1C_TXQ_TXF_BURST_PREF)) +#define L2CB_TXQ_CFGV (\ + TXQ_CFGV |\ + FIELDX(TXQ_TXF_BURST_NUM, L2CB_TXQ_TXF_BURST_PREF)) + + +/* Jumbo packet Threshold for task offload */ +#define REG_TX_TSO_OFFLOAD_THRESH 0x1594 /* In 8-bytes */ +#define TX_TSO_OFFLOAD_THRESH_MASK 0x07FF +#define MAX_TSO_FRAME_SIZE (7*1024) + +#define REG_TXF_WATER_MARK 0x1598 /* In 8-bytes */ +#define TXF_WATER_MARK_MASK 0x0FFF +#define TXF_LOW_WATER_MARK_SHIFT 0 +#define TXF_HIGH_WATER_MARK_SHIFT 16 +#define TXQ_CTRL_BURST_MODE_EN 0x80000000 + +#define REG_THRUPUT_MON_CTRL 0x159C +#define THRUPUT_MON_RATE_MASK 0x3 +#define THRUPUT_MON_RATE_SHIFT 0 +#define THRUPUT_MON_EN 0x80 + +/* RXQ Control Register */ +#define REG_RXQ_CTRL 0x15A0 +#define ASPM_THRUPUT_LIMIT_MASK 0x3 +#define ASPM_THRUPUT_LIMIT_SHIFT 0 +#define ASPM_THRUPUT_LIMIT_NO 0x00 +#define ASPM_THRUPUT_LIMIT_1M 0x01 +#define ASPM_THRUPUT_LIMIT_10M 0x02 +#define ASPM_THRUPUT_LIMIT_100M 0x03 +#define IPV6_CHKSUM_CTRL_EN BIT(7) +#define RXQ_RFD_BURST_NUM_MASK 0x003F +#define RXQ_RFD_BURST_NUM_SHIFT 20 +#define RXQ_NUM_RFD_PREF_DEF 8 +#define RSS_MODE_MASK 3UL +#define RSS_MODE_SHIFT 26 +#define RSS_MODE_DIS 0 +#define RSS_MODE_SQSI 1 +#define RSS_MODE_MQSI 2 +#define RSS_MODE_MQMI 3 +#define RSS_NIP_QUEUE_SEL BIT(28) /* 0:q0, 1:table */ +#define RRS_HASH_CTRL_EN BIT(29) +#define RX_CUT_THRU_EN BIT(30) +#define RXQ_CTRL_EN BIT(31) + +#define REG_RFD_FREE_THRESH 0x15A4 +#define RFD_FREE_THRESH_MASK 0x003F +#define RFD_FREE_HI_THRESH_SHIFT 0 +#define RFD_FREE_LO_THRESH_SHIFT 6 + +/* RXF flow control register */ +#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8 +#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0 +#define RXQ_RXF_PAUSE_TH_HI_MASK 0x0FFF +#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16 +#define RXQ_RXF_PAUSE_TH_LO_MASK 0x0FFF + +#define REG_RXD_DMA_CTRL 0x15AC +#define RXD_DMA_THRESH_MASK 0x0FFF /* In 8-bytes */ +#define RXD_DMA_THRESH_SHIFT 0 +#define RXD_DMA_DOWN_TIMER_MASK 0xFFFF +#define RXD_DMA_DOWN_TIMER_SHIFT 16 + +/* DMA Engine Control Register */ +#define REG_DMA_CTRL 0x15C0 +#define DMA_CTRL_SMB_NOW BIT(31) +#define DMA_CTRL_WPEND_CLR BIT(30) +#define DMA_CTRL_RPEND_CLR BIT(29) +#define DMA_CTRL_WDLY_CNT_MASK 0xFUL +#define DMA_CTRL_WDLY_CNT_SHIFT 16 +#define DMA_CTRL_WDLY_CNT_DEF 4 +#define DMA_CTRL_RDLY_CNT_MASK 0x1FUL +#define DMA_CTRL_RDLY_CNT_SHIFT 11 +#define DMA_CTRL_RDLY_CNT_DEF 15 +#define DMA_CTRL_RREQ_PRI_DATA BIT(10) /* 0:tpd, 1:data */ +#define DMA_CTRL_WREQ_BLEN_MASK 7UL +#define DMA_CTRL_WREQ_BLEN_SHIFT 7 +#define DMA_CTRL_RREQ_BLEN_MASK 7UL +#define DMA_CTRL_RREQ_BLEN_SHIFT 4 +#define L1C_CTRL_DMA_RCB_LEN128 BIT(3) /* 0:64bytes,1:128bytes */ +#define DMA_CTRL_RORDER_MODE_MASK 7UL +#define DMA_CTRL_RORDER_MODE_SHIFT 0 +#define DMA_CTRL_RORDER_MODE_OUT 4 +#define DMA_CTRL_RORDER_MODE_ENHANCE 2 +#define DMA_CTRL_RORDER_MODE_IN 1 + +/* INT-triggle/SMB Control Register */ +#define REG_SMB_STAT_TIMER 0x15C4 /* 2us resolution */ +#define SMB_STAT_TIMER_MASK 0xFFFFFF +#define REG_TINT_TPD_THRESH 0x15C8 /* tpd th to trig intrrupt */ + +/* Mail box */ +#define MB_RFDX_PROD_IDX_MASK 0xFFFF +#define REG_MB_RFD0_PROD_IDX 0x15E0 + +#define REG_TPD_PRI1_PIDX 0x15F0 /* 16bit,hi-tpd producer idx */ +#define REG_TPD_PRI0_PIDX 0x15F2 /* 16bit,lo-tpd producer idx */ +#define REG_TPD_PRI1_CIDX 0x15F4 /* 16bit,hi-tpd consumer idx */ +#define REG_TPD_PRI0_CIDX 0x15F6 /* 16bit,lo-tpd consumer idx */ + +#define REG_MB_RFD01_CONS_IDX 0x15F8 +#define MB_RFD0_CONS_IDX_MASK 0x0000FFFF +#define MB_RFD1_CONS_IDX_MASK 0xFFFF0000 + +/* Interrupt Status Register */ +#define REG_ISR 0x1600 +#define ISR_SMB 0x00000001 +#define ISR_TIMER 0x00000002 +/* + * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set + * in Table 51 Selene Master Control Register (Offset 0x1400). + */ +#define ISR_MANUAL 0x00000004 +#define ISR_HW_RXF_OV 0x00000008 /* RXF overflow interrupt */ +#define ISR_RFD0_UR 0x00000010 /* RFD0 under run */ +#define ISR_RFD1_UR 0x00000020 +#define ISR_RFD2_UR 0x00000040 +#define ISR_RFD3_UR 0x00000080 +#define ISR_TXF_UR 0x00000100 +#define ISR_DMAR_TO_RST 0x00000200 +#define ISR_DMAW_TO_RST 0x00000400 +#define ISR_TX_CREDIT 0x00000800 +#define ISR_GPHY 0x00001000 +/* GPHY low power state interrupt */ +#define ISR_GPHY_LPW 0x00002000 +#define ISR_TXQ_TO_RST 0x00004000 +#define ISR_TX_PKT 0x00008000 +#define ISR_RX_PKT_0 0x00010000 +#define ISR_RX_PKT_1 0x00020000 +#define ISR_RX_PKT_2 0x00040000 +#define ISR_RX_PKT_3 0x00080000 +#define ISR_MAC_RX 0x00100000 +#define ISR_MAC_TX 0x00200000 +#define ISR_UR_DETECTED 0x00400000 +#define ISR_FERR_DETECTED 0x00800000 +#define ISR_NFERR_DETECTED 0x01000000 +#define ISR_CERR_DETECTED 0x02000000 +#define ISR_PHY_LINKDOWN 0x04000000 +#define ISR_DIS_INT 0x80000000 + +/* Interrupt Mask Register */ +#define REG_IMR 0x1604 + +#define IMR_NORMAL_MASK (\ + ISR_MANUAL |\ + ISR_HW_RXF_OV |\ + ISR_RFD0_UR |\ + ISR_TXF_UR |\ + ISR_DMAR_TO_RST |\ + ISR_TXQ_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_GPHY |\ + ISR_TX_PKT |\ + ISR_RX_PKT_0 |\ + ISR_GPHY_LPW |\ + ISR_PHY_LINKDOWN) + +#define ISR_RX_PKT (\ + ISR_RX_PKT_0 |\ + ISR_RX_PKT_1 |\ + ISR_RX_PKT_2 |\ + ISR_RX_PKT_3) + +#define ISR_OVER (\ + ISR_RFD0_UR |\ + ISR_RFD1_UR |\ + ISR_RFD2_UR |\ + ISR_RFD3_UR |\ + ISR_HW_RXF_OV |\ + ISR_TXF_UR) + +#define ISR_ERROR (\ + ISR_DMAR_TO_RST |\ + ISR_TXQ_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_PHY_LINKDOWN) + +#define REG_INT_RETRIG_TIMER 0x1608 +#define INT_RETRIG_TIMER_MASK 0xFFFF + +#define REG_MAC_RX_STATUS_BIN 0x1700 +#define REG_MAC_RX_STATUS_END 0x175c +#define REG_MAC_TX_STATUS_BIN 0x1760 +#define REG_MAC_TX_STATUS_END 0x17c0 + +#define REG_CLK_GATING_CTRL 0x1814 +#define CLK_GATING_DMAW_EN 0x0001 +#define CLK_GATING_DMAR_EN 0x0002 +#define CLK_GATING_TXQ_EN 0x0004 +#define CLK_GATING_RXQ_EN 0x0008 +#define CLK_GATING_TXMAC_EN 0x0010 +#define CLK_GATING_RXMAC_EN 0x0020 + +#define CLK_GATING_EN_ALL (CLK_GATING_DMAW_EN |\ + CLK_GATING_DMAR_EN |\ + CLK_GATING_TXQ_EN |\ + CLK_GATING_RXQ_EN |\ + CLK_GATING_TXMAC_EN|\ + CLK_GATING_RXMAC_EN) + +/* DEBUG ADDR */ +#define REG_DEBUG_DATA0 0x1900 +#define REG_DEBUG_DATA1 0x1904 + +#define L1D_MPW_PHYID1 0xD01C /* V7 */ +#define L1D_MPW_PHYID2 0xD01D /* V1-V6 */ +#define L1D_MPW_PHYID3 0xD01E /* V8 */ + + +/* Autoneg Advertisement Register */ +#define ADVERTISE_DEFAULT_CAP \ + (ADVERTISE_ALL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM) + +/* 1000BASE-T Control Register */ +#define GIGA_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port 0=DTE device */ + +#define GIGA_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master 0=Configure PHY as Slave */ +#define GIGA_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ +#define GIGA_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define GIGA_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define GIGA_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define GIGA_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define GIGA_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ +#define GIGA_CR_1000T_SPEED_MASK 0x0300 +#define GIGA_CR_1000T_DEFAULT_CAP 0x0300 + +/* PHY Specific Status Register */ +#define MII_GIGA_PSSR 0x11 +#define GIGA_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define GIGA_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define GIGA_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define GIGA_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define GIGA_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define GIGA_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +/* PHY Interrupt Enable Register */ +#define MII_IER 0x12 +#define IER_LINK_UP 0x0400 +#define IER_LINK_DOWN 0x0800 + +/* PHY Interrupt Status Register */ +#define MII_ISR 0x13 +#define ISR_LINK_UP 0x0400 +#define ISR_LINK_DOWN 0x0800 + +/* Cable-Detect-Test Control Register */ +#define MII_CDTC 0x16 +#define CDTC_EN_OFF 0 /* sc */ +#define CDTC_EN_BITS 1 +#define CDTC_PAIR_OFF 8 +#define CDTC_PAIR_BIT 2 + +/* Cable-Detect-Test Status Register */ +#define MII_CDTS 0x1C +#define CDTS_STATUS_OFF 8 +#define CDTS_STATUS_BITS 2 +#define CDTS_STATUS_NORMAL 0 +#define CDTS_STATUS_SHORT 1 +#define CDTS_STATUS_OPEN 2 +#define CDTS_STATUS_INVALID 3 + +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + +/***************************** debug port *************************************/ + +#define MIIDBG_ANACTRL 0x00 +#define ANACTRL_CLK125M_DELAY_EN 0x8000 +#define ANACTRL_VCO_FAST 0x4000 +#define ANACTRL_VCO_SLOW 0x2000 +#define ANACTRL_AFE_MODE_EN 0x1000 +#define ANACTRL_LCKDET_PHY 0x800 +#define ANACTRL_LCKDET_EN 0x400 +#define ANACTRL_OEN_125M 0x200 +#define ANACTRL_HBIAS_EN 0x100 +#define ANACTRL_HB_EN 0x80 +#define ANACTRL_SEL_HSP 0x40 +#define ANACTRL_CLASSA_EN 0x20 +#define ANACTRL_MANUSWON_SWR_MASK 3U +#define ANACTRL_MANUSWON_SWR_SHIFT 2 +#define ANACTRL_MANUSWON_SWR_2V 0 +#define ANACTRL_MANUSWON_SWR_1P9V 1 +#define ANACTRL_MANUSWON_SWR_1P8V 2 +#define ANACTRL_MANUSWON_SWR_1P7V 3 +#define ANACTRL_MANUSWON_BW3_4M 0x2 +#define ANACTRL_RESTART_CAL 0x1 +#define ANACTRL_DEF 0x02EF + +#define MIIDBG_SYSMODCTRL 0x04 +#define SYSMODCTRL_IECHOADJ_PFMH_PHY 0x8000 +#define SYSMODCTRL_IECHOADJ_BIASGEN 0x4000 +#define SYSMODCTRL_IECHOADJ_PFML_PHY 0x2000 +#define SYSMODCTRL_IECHOADJ_PS_MASK 3U +#define SYSMODCTRL_IECHOADJ_PS_SHIFT 10 +#define SYSMODCTRL_IECHOADJ_PS_40 3 +#define SYSMODCTRL_IECHOADJ_PS_20 2 +#define SYSMODCTRL_IECHOADJ_PS_0 1 +#define SYSMODCTRL_IECHOADJ_10BT_100MV 0x40 /* 1:100mv, 0:200mv */ +#define SYSMODCTRL_IECHOADJ_HLFAP_MASK 3U +#define SYSMODCTRL_IECHOADJ_HLFAP_SHIFT 4 +#define SYSMODCTRL_IECHOADJ_VDFULBW 0x8 +#define SYSMODCTRL_IECHOADJ_VDBIASHLF 0x4 +#define SYSMODCTRL_IECHOADJ_VDAMPHLF 0x2 +#define SYSMODCTRL_IECHOADJ_VDLANSW 0x1 +#define SYSMODCTRL_IECHOADJ_DEF 0x88BB /* ???? */ + +/* for l1d & l2cb */ +#define SYSMODCTRL_IECHOADJ_CUR_ADD 0x8000 +#define SYSMODCTRL_IECHOADJ_CUR_MASK 7U +#define SYSMODCTRL_IECHOADJ_CUR_SHIFT 12 +#define SYSMODCTRL_IECHOADJ_VOL_MASK 0xFU +#define SYSMODCTRL_IECHOADJ_VOL_SHIFT 8 +#define SYSMODCTRL_IECHOADJ_VOL_17ALL 3 +#define SYSMODCTRL_IECHOADJ_VOL_100M15 1 +#define SYSMODCTRL_IECHOADJ_VOL_10M17 0 +#define SYSMODCTRL_IECHOADJ_BIAS1_MASK 0xFU +#define SYSMODCTRL_IECHOADJ_BIAS1_SHIFT 4 +#define SYSMODCTRL_IECHOADJ_BIAS2_MASK 0xFU +#define SYSMODCTRL_IECHOADJ_BIAS2_SHIFT 0 +#define L1D_SYSMODCTRL_IECHOADJ_DEF 0x4FBB + +#define MIIDBG_SRDSYSMOD 0x05 +#define SRDSYSMOD_LCKDET_EN 0x2000 +#define SRDSYSMOD_PLL_EN 0x800 +#define SRDSYSMOD_SEL_HSP 0x400 +#define SRDSYSMOD_HLFTXDR 0x200 +#define SRDSYSMOD_TXCLK_DELAY_EN 0x100 +#define SRDSYSMOD_TXELECIDLE 0x80 +#define SRDSYSMOD_DEEMP_EN 0x40 +#define SRDSYSMOD_MS_PAD 0x4 +#define SRDSYSMOD_CDR_ADC_VLTG 0x2 +#define SRDSYSMOD_CDR_DAC_1MA 0x1 +#define SRDSYSMOD_DEF 0x2C46 + +#define MIIDBG_CFGLPSPD 0x0A +#define CFGLPSPD_RSTCNT_MASK 3U +#define CFGLPSPD_RSTCNT_SHIFT 14 +#define CFGLPSPD_RSTCNT_CLK125SW 0x2000 + +#define MIIDBG_HIBNEG 0x0B +#define HIBNEG_PSHIB_EN 0x8000 +#define HIBNEG_WAKE_BOTH 0x4000 +#define HIBNEG_ONOFF_ANACHG_SUDEN 0x2000 +#define HIBNEG_HIB_PULSE 0x1000 +#define HIBNEG_GATE_25M_EN 0x800 +#define HIBNEG_RST_80U 0x400 +#define HIBNEG_RST_TIMER_MASK 3U +#define HIBNEG_RST_TIMER_SHIFT 8 +#define HIBNEG_GTX_CLK_DELAY_MASK 3U +#define HIBNEG_GTX_CLK_DELAY_SHIFT 5 +#define HIBNEG_BYPSS_BRKTIMER 0x10 +#define HIBNEG_DEF 0xBC40 + +#define MIIDBG_TST10BTCFG 0x12 +#define TST10BTCFG_INTV_TIMER_MASK 3U +#define TST10BTCFG_INTV_TIMER_SHIFT 14 +#define TST10BTCFG_TRIGER_TIMER_MASK 3U +#define TST10BTCFG_TRIGER_TIMER_SHIFT 12 +#define TST10BTCFG_DIV_MAN_MLT3_EN 0x800 +#define TST10BTCFG_OFF_DAC_IDLE 0x400 +#define TST10BTCFG_LPBK_DEEP 0x4 /* 1:deep,0:shallow */ +#define TST10BTCFG_DEF 0x4C04 + +#define MIIDBG_AZ_ANADECT 0x15 +#define AZ_ANADECT_10BTRX_TH 0x8000 +#define AZ_ANADECT_BOTH_01CHNL 0x4000 +#define AZ_ANADECT_INTV_MASK 0x3FU +#define AZ_ANADECT_INTV_SHIFT 8 +#define AZ_ANADECT_THRESH_MASK 0xFU +#define AZ_ANADECT_THRESH_SHIFT 4 +#define AZ_ANADECT_CHNL_MASK 0xFU +#define AZ_ANADECT_CHNL_SHIFT 0 +#define AZ_ANADECT_DEF 0x3220 +#define AZ_ANADECT_LONG 0xb210 + +#define MIIDBG_MSE16DB 0x18 /* l1d */ +#define L1D_MSE16DB_UP 0x05EA +#define L1D_MSE16DB_DOWN 0x02EA + +#define MIIDBG_LEGCYPS 0x29 +#define LEGCYPS_EN 0x8000 +#define LEGCYPS_DAC_AMP1000_MASK 7U +#define LEGCYPS_DAC_AMP1000_SHIFT 12 +#define LEGCYPS_DAC_AMP100_MASK 7U +#define LEGCYPS_DAC_AMP100_SHIFT 9 +#define LEGCYPS_DAC_AMP10_MASK 7U +#define LEGCYPS_DAC_AMP10_SHIFT 6 +#define LEGCYPS_UNPLUG_TIMER_MASK 7U +#define LEGCYPS_UNPLUG_TIMER_SHIFT 3 +#define LEGCYPS_UNPLUG_DECT_EN 0x4 +#define LEGCYPS_ECNC_PS_EN 0x1 +#define L1D_LEGCYPS_DEF 0x129D +#define L1C_LEGCYPS_DEF 0x36DD + +#define MIIDBG_TST100BTCFG 0x36 +#define TST100BTCFG_NORMAL_BW_EN 0x8000 +#define TST100BTCFG_BADLNK_BYPASS 0x4000 +#define TST100BTCFG_SHORTCABL_TH_MASK 0x3FU +#define TST100BTCFG_SHORTCABL_TH_SHIFT 8 +#define TST100BTCFG_LITCH_EN 0x80 +#define TST100BTCFG_VLT_SW 0x40 +#define TST100BTCFG_LONGCABL_TH_MASK 0x3FU +#define TST100BTCFG_LONGCABL_TH_SHIFT 0 +#define TST100BTCFG_DEF 0xE12C + +#define MIIDBG_VOLT_CTRL 0x3B /* only for l2cb 1 & 2 */ +#define VOLT_CTRL_CABLE1TH_MASK 0x1FFU +#define VOLT_CTRL_CABLE1TH_SHIFT 7 +#define VOLT_CTRL_AMPCTRL_MASK 3U +#define VOLT_CTRL_AMPCTRL_SHIFT 5 +#define VOLT_CTRL_SW_BYPASS 0x10 +#define VOLT_CTRL_SWLOWEST 0x8 +#define VOLT_CTRL_DACAMP10_MASK 7U +#define VOLT_CTRL_DACAMP10_SHIFT 0 + +#define MIIDBG_CABLE1TH_DET 0x3E +#define CABLE1TH_DET_EN 0x8000 + + +/******* dev 3 *********/ +#define MIIEXT_PCS 3 + +#define MIIEXT_CLDCTRL3 0x8003 +#define CLDCTRL3_BP_CABLE1TH_DET_GT 0x8000 +#define CLDCTRL3_AZ_DISAMP 0x1000 +#define L2CB_CLDCTRL3 0x4D19 +#define L1D_CLDCTRL3 0xDD19 + +#define MIIEXT_CLDCTRL6 0x8006 +#define CLDCTRL6_CAB_LEN_MASK 0x1FFU +#define CLDCTRL6_CAB_LEN_SHIFT 0 +#define CLDCTRL6_CAB_LEN_SHORT 0x50 + +/********* dev 7 **********/ +#define MIIEXT_ANEG 7 + +#define MIIEXT_LOCAL_EEEADV 0x3C +#define LOCAL_EEEADV_1000BT 0x4 +#define LOCAL_EEEADV_100BT 0x2 + +#define MIIEXT_REMOTE_EEEADV 0x3D +#define REMOTE_EEEADV_1000BT 0x4 +#define REMOTE_EEEADV_100BT 0x2 + +#define MIIEXT_EEE_ANEG 0x8000 +#define EEE_ANEG_1000M 0x4 +#define EEE_ANEG_100M 0x2 + +#endif /*_ATL1C_HW_H_*/ diff --git a/addons/atl1c/src/4.4.180/atl1c_main.c b/addons/atl1c/src/4.4.180/atl1c_main.c new file mode 100644 index 00000000..c08d34f6 --- /dev/null +++ b/addons/atl1c/src/4.4.180/atl1c_main.c @@ -0,0 +1,2813 @@ +/* + * Copyright(c) 2008 - 2009 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include "atl1c.h" + +#define ATL1C_DRV_VERSION "1.0.1.1-NAPI" +char atl1c_driver_name[] = "atl1c"; +char atl1c_driver_version[] = ATL1C_DRV_VERSION; + +/* + * atl1c_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id atl1c_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D_2_0)}, + /* required last entry */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl); + +MODULE_AUTHOR("Jie Yang"); +MODULE_AUTHOR("Qualcomm Atheros Inc., "); +MODULE_DESCRIPTION("Qualcomm Atheros 100/1000M Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(ATL1C_DRV_VERSION); + +static int atl1c_stop_mac(struct atl1c_hw *hw); +static void atl1c_disable_l0s_l1(struct atl1c_hw *hw); +static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed); +static void atl1c_start_mac(struct atl1c_adapter *adapter); +static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, + int *work_done, int work_to_do); +static int atl1c_up(struct atl1c_adapter *adapter); +static void atl1c_down(struct atl1c_adapter *adapter); +static int atl1c_reset_mac(struct atl1c_hw *hw); +static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter); +static int atl1c_configure(struct atl1c_adapter *adapter); +static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter); + +static const u16 atl1c_pay_load_size[] = { + 128, 256, 512, 1024, 2048, 4096, +}; + + +static const u32 atl1c_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; +static void atl1c_pcie_patch(struct atl1c_hw *hw) +{ + u32 mst_data, data; + + /* pclk sel could switch to 25M */ + AT_READ_REG(hw, REG_MASTER_CTRL, &mst_data); + mst_data &= ~MASTER_CTRL_CLK_SEL_DIS; + AT_WRITE_REG(hw, REG_MASTER_CTRL, mst_data); + + /* WoL/PCIE related settings */ + if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) { + AT_READ_REG(hw, REG_PCIE_PHYMISC, &data); + data |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, data); + } else { /* new dev set bit5 of MASTER */ + if (!(mst_data & MASTER_CTRL_WAKEN_25M)) + AT_WRITE_REG(hw, REG_MASTER_CTRL, + mst_data | MASTER_CTRL_WAKEN_25M); + } + /* aspm/PCIE setting only for l2cb 1.0 */ + if (hw->nic_type == athr_l2c_b && hw->revision_id == L2CB_V10) { + AT_READ_REG(hw, REG_PCIE_PHYMISC2, &data); + data = FIELD_SETX(data, PCIE_PHYMISC2_CDR_BW, + L2CB1_PCIE_PHYMISC2_CDR_BW); + data = FIELD_SETX(data, PCIE_PHYMISC2_L0S_TH, + L2CB1_PCIE_PHYMISC2_L0S_TH); + AT_WRITE_REG(hw, REG_PCIE_PHYMISC2, data); + /* extend L1 sync timer */ + AT_READ_REG(hw, REG_LINK_CTRL, &data); + data |= LINK_CTRL_EXT_SYNC; + AT_WRITE_REG(hw, REG_LINK_CTRL, data); + } + /* l2cb 1.x & l1d 1.x */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d) { + AT_READ_REG(hw, REG_PM_CTRL, &data); + data |= PM_CTRL_L0S_BUFSRX_EN; + AT_WRITE_REG(hw, REG_PM_CTRL, data); + /* clear vendor msg */ + AT_READ_REG(hw, REG_DMA_DBG, &data); + AT_WRITE_REG(hw, REG_DMA_DBG, data & ~DMA_DBG_VENDOR_MSG); + } +} + +/* FIXME: no need any more ? */ +/* + * atl1c_init_pcie - init PCIE module + */ +static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag) +{ + u32 data; + u32 pci_cmd; + struct pci_dev *pdev = hw->adapter->pdev; + int pos; + + AT_READ_REG(hw, PCI_COMMAND, &pci_cmd); + pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; + pci_cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | + PCI_COMMAND_IO); + AT_WRITE_REG(hw, PCI_COMMAND, pci_cmd); + + /* + * Clear any PowerSaveing Settings + */ + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + /* wol sts read-clear */ + AT_READ_REG(hw, REG_WOL_CTRL, &data); + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* + * Mask some pcie error bits + */ + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); + if (pos) { + pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data); + data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP); + pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data); + } + /* clear error status */ + pcie_capability_write_word(pdev, PCI_EXP_DEVSTA, + PCI_EXP_DEVSTA_NFED | + PCI_EXP_DEVSTA_FED | + PCI_EXP_DEVSTA_CED | + PCI_EXP_DEVSTA_URD); + + AT_READ_REG(hw, REG_LTSSM_ID_CTRL, &data); + data &= ~LTSSM_ID_EN_WRO; + AT_WRITE_REG(hw, REG_LTSSM_ID_CTRL, data); + + atl1c_pcie_patch(hw); + if (flag & ATL1C_PCIE_L0S_L1_DISABLE) + atl1c_disable_l0s_l1(hw); + + msleep(5); +} + +/** + * atl1c_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static inline void atl1c_irq_enable(struct atl1c_adapter *adapter) +{ + if (likely(atomic_dec_and_test(&adapter->irq_sem))) { + AT_WRITE_REG(&adapter->hw, REG_ISR, 0x7FFFFFFF); + AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); + AT_WRITE_FLUSH(&adapter->hw); + } +} + +/** + * atl1c_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static inline void atl1c_irq_disable(struct atl1c_adapter *adapter) +{ + atomic_inc(&adapter->irq_sem); + AT_WRITE_REG(&adapter->hw, REG_IMR, 0); + AT_WRITE_REG(&adapter->hw, REG_ISR, ISR_DIS_INT); + AT_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/** + * atl1c_irq_reset - reset interrupt confiure on the NIC + * @adapter: board private structure + */ +static inline void atl1c_irq_reset(struct atl1c_adapter *adapter) +{ + atomic_set(&adapter->irq_sem, 1); + atl1c_irq_enable(adapter); +} + +/* + * atl1c_wait_until_idle - wait up to AT_HW_MAX_IDLE_DELAY reads + * of the idle status register until the device is actually idle + */ +static u32 atl1c_wait_until_idle(struct atl1c_hw *hw, u32 modu_ctrl) +{ + int timeout; + u32 data; + + for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { + AT_READ_REG(hw, REG_IDLE_STATUS, &data); + if ((data & modu_ctrl) == 0) + return 0; + msleep(1); + } + return data; +} + +/** + * atl1c_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1c_phy_config(unsigned long data) +{ + struct atl1c_adapter *adapter = (struct atl1c_adapter *) data; + struct atl1c_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + atl1c_restart_autoneg(hw); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); +} + +void atl1c_reinit_locked(struct atl1c_adapter *adapter) +{ + WARN_ON(in_interrupt()); + atl1c_down(adapter); + atl1c_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); +} + +static void atl1c_check_link_status(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + int err; + unsigned long flags; + u16 speed, duplex, phy_data; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + /* MII_BMSR must read twise */ + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(hw, MII_BMSR, &phy_data); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + + if ((phy_data & BMSR_LSTATUS) == 0) { + /* link down */ + netif_carrier_off(netdev); + hw->hibernate = true; + if (atl1c_reset_mac(hw) != 0) + if (netif_msg_hw(adapter)) + dev_warn(&pdev->dev, "reset mac failed\n"); + atl1c_set_aspm(hw, SPEED_0); + atl1c_post_phy_linkchg(hw, SPEED_0); + atl1c_reset_dma_ring(adapter); + atl1c_configure(adapter); + } else { + /* Link Up */ + hw->hibernate = false; + spin_lock_irqsave(&adapter->mdio_lock, flags); + err = atl1c_get_speed_and_duplex(hw, &speed, &duplex); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + if (unlikely(err)) + return; + /* link result is our setting */ + if (adapter->link_speed != speed || + adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl1c_set_aspm(hw, speed); + atl1c_post_phy_linkchg(hw, speed); + atl1c_start_mac(adapter); + if (netif_msg_link(adapter)) + dev_info(&pdev->dev, + "%s: %s NIC Link is Up<%d Mbps %s>\n", + atl1c_driver_name, netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex"); + } + if (!netif_carrier_ok(netdev)) + netif_carrier_on(netdev); + } +} + +static void atl1c_link_chg_event(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + u16 phy_data; + u16 link_up; + + spin_lock(&adapter->mdio_lock); + atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atl1c_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->mdio_lock); + link_up = phy_data & BMSR_LSTATUS; + /* notify upper layer link down ASAP */ + if (!link_up) { + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + netif_carrier_off(netdev); + if (netif_msg_link(adapter)) + dev_info(&pdev->dev, + "%s: %s NIC Link is Down\n", + atl1c_driver_name, netdev->name); + adapter->link_speed = SPEED_0; + } + } + + set_bit(ATL1C_WORK_EVENT_LINK_CHANGE, &adapter->work_event); + schedule_work(&adapter->common_task); +} + +static void atl1c_common_task(struct work_struct *work) +{ + struct atl1c_adapter *adapter; + struct net_device *netdev; + + adapter = container_of(work, struct atl1c_adapter, common_task); + netdev = adapter->netdev; + + if (test_bit(__AT_DOWN, &adapter->flags)) + return; + + if (test_and_clear_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event)) { + netif_device_detach(netdev); + atl1c_down(adapter); + atl1c_up(adapter); + netif_device_attach(netdev); + } + + if (test_and_clear_bit(ATL1C_WORK_EVENT_LINK_CHANGE, + &adapter->work_event)) { + atl1c_irq_disable(adapter); + atl1c_check_link_status(adapter); + atl1c_irq_enable(adapter); + } +} + + +static void atl1c_del_timer(struct atl1c_adapter *adapter) +{ + del_timer_sync(&adapter->phy_config_timer); +} + + +/** + * atl1c_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atl1c_tx_timeout(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); + schedule_work(&adapter->common_task); +} + +/** + * atl1c_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atl1c_set_multi(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u32 mac_ctrl_data; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + AT_READ_REG(hw, REG_MAC_CTRL, &mac_ctrl_data); + + if (netdev->flags & IFF_PROMISC) { + mac_ctrl_data |= MAC_CTRL_PROMIS_EN; + } else if (netdev->flags & IFF_ALLMULTI) { + mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; + mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN; + } else { + mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + } + + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + + /* clear the old settings from the multicast hash table */ + AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + /* comoute mc addresses' hash value ,and put it into hash table */ + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atl1c_hash_mc_addr(hw, ha->addr); + atl1c_hash_set(hw, hash_value); + } +} + +static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + *mac_ctrl_data |= MAC_CTRL_RMV_VLAN; + } else { + /* disable VLAN tag insert/strip */ + *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN; + } +} + +static void atl1c_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + u32 mac_ctrl_data = 0; + + if (netif_msg_pktdata(adapter)) + dev_dbg(&pdev->dev, "atl1c_vlan_mode\n"); + + atl1c_irq_disable(adapter); + AT_READ_REG(&adapter->hw, REG_MAC_CTRL, &mac_ctrl_data); + __atl1c_vlan_mode(features, &mac_ctrl_data); + AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); + atl1c_irq_enable(adapter); +} + +static void atl1c_restore_vlan(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + if (netif_msg_pktdata(adapter)) + dev_dbg(&pdev->dev, "atl1c_restore_vlan\n"); + atl1c_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +/** + * atl1c_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atl1c_set_mac_addr(struct net_device *netdev, void *p) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); + + return 0; +} + +static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, + struct net_device *dev) +{ + unsigned int head_size; + int mtu = dev->mtu; + + adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? + roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; + + head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + adapter->rx_frag_size = roundup_pow_of_two(head_size); +} + +static netdev_features_t atl1c_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + if (netdev->mtu > MAX_TSO_FRAME_SIZE) + features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + + return features; +} + +static int atl1c_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + atl1c_vlan_mode(netdev, features); + + return 0; +} + +/** + * atl1c_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1c_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + /* Fast Ethernet controller doesn't support jumbo packet */ + if (((hw->nic_type == athr_l2c || + hw->nic_type == athr_l2c_b || + hw->nic_type == athr_l2c_b2) && new_mtu > ETH_DATA_LEN) || + max_frame < ETH_ZLEN + ETH_FCS_LEN || + max_frame > MAX_JUMBO_FRAME_SIZE) { + if (netif_msg_link(adapter)) + dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); + return -EINVAL; + } + /* set MTU */ + if (old_mtu != new_mtu && netif_running(netdev)) { + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + netdev->mtu = new_mtu; + adapter->hw.max_frame_size = new_mtu; + atl1c_set_rxbufsize(adapter, netdev); + atl1c_down(adapter); + netdev_update_features(netdev); + atl1c_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); + } + return 0; +} + +/* + * caller should hold mdio_lock + */ +static int atl1c_mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1c_read_phy_reg(&adapter->hw, reg_num, &result); + return result; +} + +static void atl1c_mdio_write(struct net_device *netdev, int phy_id, + int reg_num, int val) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + atl1c_write_phy_reg(&adapter->hw, reg_num, val); +} + +static int atl1c_mii_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long flags; + int retval = 0; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 0; + break; + + case SIOCGMIIREG: + if (atl1c_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) { + retval = -EIO; + goto out; + } + break; + + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) { + retval = -EFAULT; + goto out; + } + + dev_dbg(&pdev->dev, " write %x %x", + data->reg_num, data->val_in); + if (atl1c_write_phy_reg(&adapter->hw, + data->reg_num, data->val_in)) { + retval = -EIO; + goto out; + } + break; + + default: + retval = -EOPNOTSUPP; + break; + } +out: + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + return retval; +} + +static int atl1c_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atl1c_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * atl1c_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + */ +static int atl1c_alloc_queues(struct atl1c_adapter *adapter) +{ + return 0; +} + +static void atl1c_set_mac_type(struct atl1c_hw *hw) +{ + switch (hw->device_id) { + case PCI_DEVICE_ID_ATTANSIC_L2C: + hw->nic_type = athr_l2c; + break; + case PCI_DEVICE_ID_ATTANSIC_L1C: + hw->nic_type = athr_l1c; + break; + case PCI_DEVICE_ID_ATHEROS_L2C_B: + hw->nic_type = athr_l2c_b; + break; + case PCI_DEVICE_ID_ATHEROS_L2C_B2: + hw->nic_type = athr_l2c_b2; + break; + case PCI_DEVICE_ID_ATHEROS_L1D: + hw->nic_type = athr_l1d; + break; + case PCI_DEVICE_ID_ATHEROS_L1D_2_0: + hw->nic_type = athr_l1d_2; + break; + default: + break; + } +} + +static int atl1c_setup_mac_funcs(struct atl1c_hw *hw) +{ + u32 link_ctrl_data; + + atl1c_set_mac_type(hw); + AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data); + + hw->ctrl_flags = ATL1C_INTR_MODRT_ENABLE | + ATL1C_TXQ_MODE_ENHANCE; + hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT | + ATL1C_ASPM_L1_SUPPORT; + hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON; + + if (hw->nic_type == athr_l1c || + hw->nic_type == athr_l1d || + hw->nic_type == athr_l1d_2) + hw->link_cap_flags |= ATL1C_LINK_CAP_1000M; + return 0; +} + +struct atl1c_platform_patch { + u16 pci_did; + u8 pci_revid; + u16 subsystem_vid; + u16 subsystem_did; + u32 patch_flag; +#define ATL1C_LINK_PATCH 0x1 +}; +static const struct atl1c_platform_patch plats[] = { +{0x2060, 0xC1, 0x1019, 0x8152, 0x1}, +{0x2060, 0xC1, 0x1019, 0x2060, 0x1}, +{0x2060, 0xC1, 0x1019, 0xE000, 0x1}, +{0x2062, 0xC0, 0x1019, 0x8152, 0x1}, +{0x2062, 0xC0, 0x1019, 0x2062, 0x1}, +{0x2062, 0xC0, 0x1458, 0xE000, 0x1}, +{0x2062, 0xC1, 0x1019, 0x8152, 0x1}, +{0x2062, 0xC1, 0x1019, 0x2062, 0x1}, +{0x2062, 0xC1, 0x1458, 0xE000, 0x1}, +{0x2062, 0xC1, 0x1565, 0x2802, 0x1}, +{0x2062, 0xC1, 0x1565, 0x2801, 0x1}, +{0x1073, 0xC0, 0x1019, 0x8151, 0x1}, +{0x1073, 0xC0, 0x1019, 0x1073, 0x1}, +{0x1073, 0xC0, 0x1458, 0xE000, 0x1}, +{0x1083, 0xC0, 0x1458, 0xE000, 0x1}, +{0x1083, 0xC0, 0x1019, 0x8151, 0x1}, +{0x1083, 0xC0, 0x1019, 0x1083, 0x1}, +{0x1083, 0xC0, 0x1462, 0x7680, 0x1}, +{0x1083, 0xC0, 0x1565, 0x2803, 0x1}, +{0}, +}; + +static void atl1c_patch_assign(struct atl1c_hw *hw) +{ + struct pci_dev *pdev = hw->adapter->pdev; + u32 misc_ctrl; + int i = 0; + + hw->msi_lnkpatch = false; + + while (plats[i].pci_did != 0) { + if (plats[i].pci_did == hw->device_id && + plats[i].pci_revid == hw->revision_id && + plats[i].subsystem_vid == hw->subsystem_vendor_id && + plats[i].subsystem_did == hw->subsystem_id) { + if (plats[i].patch_flag & ATL1C_LINK_PATCH) + hw->msi_lnkpatch = true; + } + i++; + } + + if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 && + hw->revision_id == L2CB_V21) { + /* config access mode */ + pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR, + REG_PCIE_DEV_MISC_CTRL); + pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl); + misc_ctrl &= ~0x100; + pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR, + REG_PCIE_DEV_MISC_CTRL); + pci_write_config_dword(pdev, REG_PCIE_IND_ACC_DATA, misc_ctrl); + } +} +/** + * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter) + * @adapter: board private structure to initialize + * + * atl1c_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int atl1c_sw_init(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 revision; + + + adapter->wol = 0; + device_set_wakeup_enable(&pdev->dev, false); + adapter->link_speed = SPEED_0; + adapter->link_duplex = FULL_DUPLEX; + adapter->tpd_ring[0].count = 1024; + adapter->rfd_ring.count = 512; + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + pci_read_config_dword(pdev, PCI_CLASS_REVISION, &revision); + hw->revision_id = revision & 0xFF; + /* before link up, we assume hibernate is true */ + hw->hibernate = true; + hw->media_type = MEDIA_TYPE_AUTO_SENSOR; + if (atl1c_setup_mac_funcs(hw) != 0) { + dev_err(&pdev->dev, "set mac function pointers failed\n"); + return -1; + } + atl1c_patch_assign(hw); + + hw->intr_mask = IMR_NORMAL_MASK; + hw->phy_configured = false; + hw->preamble_len = 7; + hw->max_frame_size = adapter->netdev->mtu; + hw->autoneg_advertised = ADVERTISED_Autoneg; + hw->indirect_tab = 0xE4E4E4E4; + hw->base_cpu = 0; + + hw->ict = 50000; /* 100ms */ + hw->smb_timer = 200000; /* 400ms */ + hw->rx_imt = 200; + hw->tx_imt = 1000; + + hw->tpd_burst = 5; + hw->rfd_burst = 8; + hw->dma_order = atl1c_dma_ord_out; + hw->dmar_block = atl1c_dma_req_1024; + + if (atl1c_alloc_queues(adapter)) { + dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + /* TODO */ + atl1c_set_rxbufsize(adapter, adapter->netdev); + atomic_set(&adapter->irq_sem, 1); + spin_lock_init(&adapter->mdio_lock); + spin_lock_init(&adapter->tx_lock); + set_bit(__AT_DOWN, &adapter->flags); + + return 0; +} + +static inline void atl1c_clean_buffer(struct pci_dev *pdev, + struct atl1c_buffer *buffer_info) +{ + u16 pci_driection; + if (buffer_info->flags & ATL1C_BUFFER_FREE) + return; + if (buffer_info->dma) { + if (buffer_info->flags & ATL1C_PCIMAP_FROMDEVICE) + pci_driection = PCI_DMA_FROMDEVICE; + else + pci_driection = PCI_DMA_TODEVICE; + + if (buffer_info->flags & ATL1C_PCIMAP_SINGLE) + pci_unmap_single(pdev, buffer_info->dma, + buffer_info->length, pci_driection); + else if (buffer_info->flags & ATL1C_PCIMAP_PAGE) + pci_unmap_page(pdev, buffer_info->dma, + buffer_info->length, pci_driection); + } + if (buffer_info->skb) + dev_consume_skb_any(buffer_info->skb); + buffer_info->dma = 0; + buffer_info->skb = NULL; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); +} +/** + * atl1c_clean_tx_ring - Free Tx-skb + * @adapter: board private structure + */ +static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + struct atl1c_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + u16 index, ring_count; + + ring_count = tpd_ring->count; + for (index = 0; index < ring_count; index++) { + buffer_info = &tpd_ring->buffer_info[index]; + atl1c_clean_buffer(pdev, buffer_info); + } + + netdev_reset_queue(adapter->netdev); + + /* Zero out Tx-buffers */ + memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) * + ring_count); + atomic_set(&tpd_ring->next_to_clean, 0); + tpd_ring->next_to_use = 0; +} + +/** + * atl1c_clean_rx_ring - Free rx-reservation skbs + * @adapter: board private structure + */ +static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter) +{ + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1c_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + int j; + + for (j = 0; j < rfd_ring->count; j++) { + buffer_info = &rfd_ring->buffer_info[j]; + atl1c_clean_buffer(pdev, buffer_info); + } + /* zero out the descriptor ring */ + memset(rfd_ring->desc, 0, rfd_ring->size); + rfd_ring->next_to_clean = 0; + rfd_ring->next_to_use = 0; + rrd_ring->next_to_use = 0; + rrd_ring->next_to_clean = 0; +} + +/* + * Read / Write Ptr Initialize: + */ +static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter) +{ + struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1c_buffer *buffer_info; + int i, j; + + for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { + tpd_ring[i].next_to_use = 0; + atomic_set(&tpd_ring[i].next_to_clean, 0); + buffer_info = tpd_ring[i].buffer_info; + for (j = 0; j < tpd_ring->count; j++) + ATL1C_SET_BUFFER_STATE(&buffer_info[i], + ATL1C_BUFFER_FREE); + } + rfd_ring->next_to_use = 0; + rfd_ring->next_to_clean = 0; + rrd_ring->next_to_use = 0; + rrd_ring->next_to_clean = 0; + for (j = 0; j < rfd_ring->count; j++) { + buffer_info = &rfd_ring->buffer_info[j]; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); + } +} + +/** + * atl1c_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void atl1c_free_ring_resources(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + pci_free_consistent(pdev, adapter->ring_header.size, + adapter->ring_header.desc, + adapter->ring_header.dma); + adapter->ring_header.desc = NULL; + + /* Note: just free tdp_ring.buffer_info, + * it contain rfd_ring.buffer_info, do not double free */ + if (adapter->tpd_ring[0].buffer_info) { + kfree(adapter->tpd_ring[0].buffer_info); + adapter->tpd_ring[0].buffer_info = NULL; + } + if (adapter->rx_page) { + put_page(adapter->rx_page); + adapter->rx_page = NULL; + } +} + +/** + * atl1c_setup_mem_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1c_ring_header *ring_header = &adapter->ring_header; + int size; + int i; + int count = 0; + int rx_desc_count = 0; + u32 offset = 0; + + rrd_ring->count = rfd_ring->count; + for (i = 1; i < AT_MAX_TRANSMIT_QUEUE; i++) + tpd_ring[i].count = tpd_ring[0].count; + + /* 2 tpd queue, one high priority queue, + * another normal priority queue */ + size = sizeof(struct atl1c_buffer) * (tpd_ring->count * 2 + + rfd_ring->count); + tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); + if (unlikely(!tpd_ring->buffer_info)) + goto err_nomem; + + for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { + tpd_ring[i].buffer_info = + (tpd_ring->buffer_info + count); + count += tpd_ring[i].count; + } + + rfd_ring->buffer_info = + (tpd_ring->buffer_info + count); + count += rfd_ring->count; + rx_desc_count += rfd_ring->count; + + /* + * real ring DMA buffer + * each ring/block may need up to 8 bytes for alignment, hence the + * additional bytes tacked onto the end. + */ + ring_header->size = size = + sizeof(struct atl1c_tpd_desc) * tpd_ring->count * 2 + + sizeof(struct atl1c_rx_free_desc) * rx_desc_count + + sizeof(struct atl1c_recv_ret_status) * rx_desc_count + + 8 * 4; + + ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size, + &ring_header->dma, GFP_KERNEL); + if (unlikely(!ring_header->desc)) { + dev_err(&pdev->dev, "could not get memory for DMA buffer\n"); + goto err_nomem; + } + /* init TPD ring */ + + tpd_ring[0].dma = roundup(ring_header->dma, 8); + offset = tpd_ring[0].dma - ring_header->dma; + for (i = 0; i < AT_MAX_TRANSMIT_QUEUE; i++) { + tpd_ring[i].dma = ring_header->dma + offset; + tpd_ring[i].desc = (u8 *) ring_header->desc + offset; + tpd_ring[i].size = + sizeof(struct atl1c_tpd_desc) * tpd_ring[i].count; + offset += roundup(tpd_ring[i].size, 8); + } + /* init RFD ring */ + rfd_ring->dma = ring_header->dma + offset; + rfd_ring->desc = (u8 *) ring_header->desc + offset; + rfd_ring->size = sizeof(struct atl1c_rx_free_desc) * rfd_ring->count; + offset += roundup(rfd_ring->size, 8); + + /* init RRD ring */ + rrd_ring->dma = ring_header->dma + offset; + rrd_ring->desc = (u8 *) ring_header->desc + offset; + rrd_ring->size = sizeof(struct atl1c_recv_ret_status) * + rrd_ring->count; + offset += roundup(rrd_ring->size, 8); + + return 0; + +err_nomem: + kfree(tpd_ring->buffer_info); + return -ENOMEM; +} + +static void atl1c_configure_des_ring(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) + adapter->tpd_ring; + + /* TPD */ + AT_WRITE_REG(hw, REG_TX_BASE_ADDR_HI, + (u32)((tpd_ring[atl1c_trans_normal].dma & + AT_DMA_HI_ADDR_MASK) >> 32)); + /* just enable normal priority TX queue */ + AT_WRITE_REG(hw, REG_TPD_PRI0_ADDR_LO, + (u32)(tpd_ring[atl1c_trans_normal].dma & + AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_TPD_PRI1_ADDR_LO, + (u32)(tpd_ring[atl1c_trans_high].dma & + AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_TPD_RING_SIZE, + (u32)(tpd_ring[0].count & TPD_RING_SIZE_MASK)); + + + /* RFD */ + AT_WRITE_REG(hw, REG_RX_BASE_ADDR_HI, + (u32)((rfd_ring->dma & AT_DMA_HI_ADDR_MASK) >> 32)); + AT_WRITE_REG(hw, REG_RFD0_HEAD_ADDR_LO, + (u32)(rfd_ring->dma & AT_DMA_LO_ADDR_MASK)); + + AT_WRITE_REG(hw, REG_RFD_RING_SIZE, + rfd_ring->count & RFD_RING_SIZE_MASK); + AT_WRITE_REG(hw, REG_RX_BUF_SIZE, + adapter->rx_buffer_len & RX_BUF_SIZE_MASK); + + /* RRD */ + AT_WRITE_REG(hw, REG_RRD0_HEAD_ADDR_LO, + (u32)(rrd_ring->dma & AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_RRD_RING_SIZE, + (rrd_ring->count & RRD_RING_SIZE_MASK)); + + if (hw->nic_type == athr_l2c_b) { + AT_WRITE_REG(hw, REG_SRAM_RXF_LEN, 0x02a0L); + AT_WRITE_REG(hw, REG_SRAM_TXF_LEN, 0x0100L); + AT_WRITE_REG(hw, REG_SRAM_RXF_ADDR, 0x029f0000L); + AT_WRITE_REG(hw, REG_SRAM_RFD0_INFO, 0x02bf02a0L); + AT_WRITE_REG(hw, REG_SRAM_TXF_ADDR, 0x03bf02c0L); + AT_WRITE_REG(hw, REG_SRAM_TRD_ADDR, 0x03df03c0L); + AT_WRITE_REG(hw, REG_TXF_WATER_MARK, 0); /* TX watermark, to enter l1 state.*/ + AT_WRITE_REG(hw, REG_RXD_DMA_CTRL, 0); /* RXD threshold.*/ + } + /* Load all of base address above */ + AT_WRITE_REG(hw, REG_LOAD_PTR, 1); +} + +static void atl1c_configure_tx(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + int max_pay_load; + u16 tx_offload_thresh; + u32 txq_ctrl_data; + + tx_offload_thresh = MAX_TSO_FRAME_SIZE; + AT_WRITE_REG(hw, REG_TX_TSO_OFFLOAD_THRESH, + (tx_offload_thresh >> 3) & TX_TSO_OFFLOAD_THRESH_MASK); + max_pay_load = pcie_get_readrq(adapter->pdev) >> 8; + hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); + /* + * if BIOS had changed the dam-read-max-length to an invalid value, + * restore it to default value + */ + if (hw->dmar_block < DEVICE_CTRL_MAXRRS_MIN) { + pcie_set_readrq(adapter->pdev, 128 << DEVICE_CTRL_MAXRRS_MIN); + hw->dmar_block = DEVICE_CTRL_MAXRRS_MIN; + } + txq_ctrl_data = + hw->nic_type == athr_l2c_b || hw->nic_type == athr_l2c_b2 ? + L2CB_TXQ_CFGV : L1C_TXQ_CFGV; + + AT_WRITE_REG(hw, REG_TXQ_CTRL, txq_ctrl_data); +} + +static void atl1c_configure_rx(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 rxq_ctrl_data; + + rxq_ctrl_data = (hw->rfd_burst & RXQ_RFD_BURST_NUM_MASK) << + RXQ_RFD_BURST_NUM_SHIFT; + + if (hw->ctrl_flags & ATL1C_RX_IPV6_CHKSUM) + rxq_ctrl_data |= IPV6_CHKSUM_CTRL_EN; + + /* aspm for gigabit */ + if (hw->nic_type != athr_l1d_2 && (hw->device_id & 1) != 0) + rxq_ctrl_data = FIELD_SETX(rxq_ctrl_data, ASPM_THRUPUT_LIMIT, + ASPM_THRUPUT_LIMIT_100M); + + AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); +} + +static void atl1c_configure_dma(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 dma_ctrl_data; + + dma_ctrl_data = FIELDX(DMA_CTRL_RORDER_MODE, DMA_CTRL_RORDER_MODE_OUT) | + DMA_CTRL_RREQ_PRI_DATA | + FIELDX(DMA_CTRL_RREQ_BLEN, hw->dmar_block) | + FIELDX(DMA_CTRL_WDLY_CNT, DMA_CTRL_WDLY_CNT_DEF) | + FIELDX(DMA_CTRL_RDLY_CNT, DMA_CTRL_RDLY_CNT_DEF); + + AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); +} + +/* + * Stop the mac, transmit and receive units + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +static int atl1c_stop_mac(struct atl1c_hw *hw) +{ + u32 data; + + AT_READ_REG(hw, REG_RXQ_CTRL, &data); + data &= ~RXQ_CTRL_EN; + AT_WRITE_REG(hw, REG_RXQ_CTRL, data); + + AT_READ_REG(hw, REG_TXQ_CTRL, &data); + data &= ~TXQ_CTRL_EN; + AT_WRITE_REG(hw, REG_TXQ_CTRL, data); + + atl1c_wait_until_idle(hw, IDLE_STATUS_RXQ_BUSY | IDLE_STATUS_TXQ_BUSY); + + AT_READ_REG(hw, REG_MAC_CTRL, &data); + data &= ~(MAC_CTRL_TX_EN | MAC_CTRL_RX_EN); + AT_WRITE_REG(hw, REG_MAC_CTRL, data); + + return (int)atl1c_wait_until_idle(hw, + IDLE_STATUS_TXMAC_BUSY | IDLE_STATUS_RXMAC_BUSY); +} + +static void atl1c_start_mac(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 mac, txq, rxq; + + hw->mac_duplex = adapter->link_duplex == FULL_DUPLEX ? true : false; + hw->mac_speed = adapter->link_speed == SPEED_1000 ? + atl1c_mac_speed_1000 : atl1c_mac_speed_10_100; + + AT_READ_REG(hw, REG_TXQ_CTRL, &txq); + AT_READ_REG(hw, REG_RXQ_CTRL, &rxq); + AT_READ_REG(hw, REG_MAC_CTRL, &mac); + + txq |= TXQ_CTRL_EN; + rxq |= RXQ_CTRL_EN; + mac |= MAC_CTRL_TX_EN | MAC_CTRL_TX_FLOW | + MAC_CTRL_RX_EN | MAC_CTRL_RX_FLOW | + MAC_CTRL_ADD_CRC | MAC_CTRL_PAD | + MAC_CTRL_BC_EN | MAC_CTRL_SINGLE_PAUSE_EN | + MAC_CTRL_HASH_ALG_CRC32; + if (hw->mac_duplex) + mac |= MAC_CTRL_DUPLX; + else + mac &= ~MAC_CTRL_DUPLX; + mac = FIELD_SETX(mac, MAC_CTRL_SPEED, hw->mac_speed); + mac = FIELD_SETX(mac, MAC_CTRL_PRMLEN, hw->preamble_len); + + AT_WRITE_REG(hw, REG_TXQ_CTRL, txq); + AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac); +} + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +static int atl1c_reset_mac(struct atl1c_hw *hw) +{ + struct atl1c_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + u32 ctrl_data = 0; + + atl1c_stop_mac(hw); + /* + * Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + AT_READ_REG(hw, REG_MASTER_CTRL, &ctrl_data); + ctrl_data |= MASTER_CTRL_OOB_DIS; + AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data | MASTER_CTRL_SOFT_RST); + + AT_WRITE_FLUSH(hw); + msleep(10); + /* Wait at least 10ms for All module to be Idle */ + + if (atl1c_wait_until_idle(hw, IDLE_STATUS_MASK)) { + dev_err(&pdev->dev, + "MAC state machine can't be idle since" + " disabled for 10ms second\n"); + return -1; + } + AT_WRITE_REG(hw, REG_MASTER_CTRL, ctrl_data); + + /* driver control speed/duplex */ + AT_READ_REG(hw, REG_MAC_CTRL, &ctrl_data); + AT_WRITE_REG(hw, REG_MAC_CTRL, ctrl_data | MAC_CTRL_SPEED_MODE_SW); + + /* clk switch setting */ + AT_READ_REG(hw, REG_SERDES, &ctrl_data); + switch (hw->nic_type) { + case athr_l2c_b: + ctrl_data &= ~(SERDES_PHY_CLK_SLOWDOWN | + SERDES_MAC_CLK_SLOWDOWN); + AT_WRITE_REG(hw, REG_SERDES, ctrl_data); + break; + case athr_l2c_b2: + case athr_l1d_2: + ctrl_data |= SERDES_PHY_CLK_SLOWDOWN | SERDES_MAC_CLK_SLOWDOWN; + AT_WRITE_REG(hw, REG_SERDES, ctrl_data); + break; + default: + break; + } + + return 0; +} + +static void atl1c_disable_l0s_l1(struct atl1c_hw *hw) +{ + u16 ctrl_flags = hw->ctrl_flags; + + hw->ctrl_flags &= ~(ATL1C_ASPM_L0S_SUPPORT | ATL1C_ASPM_L1_SUPPORT); + atl1c_set_aspm(hw, SPEED_0); + hw->ctrl_flags = ctrl_flags; +} + +/* + * Set ASPM state. + * Enable/disable L0s/L1 depend on link state. + */ +static void atl1c_set_aspm(struct atl1c_hw *hw, u16 link_speed) +{ + u32 pm_ctrl_data; + u32 link_l1_timer; + + AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data); + pm_ctrl_data &= ~(PM_CTRL_ASPM_L1_EN | + PM_CTRL_ASPM_L0S_EN | + PM_CTRL_MAC_ASPM_CHK); + /* L1 timer */ + if (hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { + pm_ctrl_data &= ~PMCTRL_TXL1_AFTER_L0S; + link_l1_timer = + link_speed == SPEED_1000 || link_speed == SPEED_100 ? + L1D_PMCTRL_L1_ENTRY_TM_16US : 1; + pm_ctrl_data = FIELD_SETX(pm_ctrl_data, + L1D_PMCTRL_L1_ENTRY_TM, link_l1_timer); + } else { + link_l1_timer = hw->nic_type == athr_l2c_b ? + L2CB1_PM_CTRL_L1_ENTRY_TM : L1C_PM_CTRL_L1_ENTRY_TM; + if (link_speed != SPEED_1000 && link_speed != SPEED_100) + link_l1_timer = 1; + pm_ctrl_data = FIELD_SETX(pm_ctrl_data, + PM_CTRL_L1_ENTRY_TIMER, link_l1_timer); + } + + /* L0S/L1 enable */ + if ((hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT) && link_speed != SPEED_0) + pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN | PM_CTRL_MAC_ASPM_CHK; + if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT) + pm_ctrl_data |= PM_CTRL_ASPM_L1_EN | PM_CTRL_MAC_ASPM_CHK; + + /* l2cb & l1d & l2cb2 & l1d2 */ + if (hw->nic_type == athr_l2c_b || hw->nic_type == athr_l1d || + hw->nic_type == athr_l2c_b2 || hw->nic_type == athr_l1d_2) { + pm_ctrl_data = FIELD_SETX(pm_ctrl_data, + PM_CTRL_PM_REQ_TIMER, PM_CTRL_PM_REQ_TO_DEF); + pm_ctrl_data |= PM_CTRL_RCVR_WT_TIMER | + PM_CTRL_SERDES_PD_EX_L1 | + PM_CTRL_CLK_SWH_L1; + pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN | + PM_CTRL_SERDES_PLL_L1_EN | + PM_CTRL_SERDES_BUFS_RX_L1_EN | + PM_CTRL_SA_DLY_EN | + PM_CTRL_HOTRST); + /* disable l0s if link down or l2cb */ + if (link_speed == SPEED_0 || hw->nic_type == athr_l2c_b) + pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN; + } else { /* l1c */ + pm_ctrl_data = + FIELD_SETX(pm_ctrl_data, PM_CTRL_L1_ENTRY_TIMER, 0); + if (link_speed != SPEED_0) { + pm_ctrl_data |= PM_CTRL_SERDES_L1_EN | + PM_CTRL_SERDES_PLL_L1_EN | + PM_CTRL_SERDES_BUFS_RX_L1_EN; + pm_ctrl_data &= ~(PM_CTRL_SERDES_PD_EX_L1 | + PM_CTRL_CLK_SWH_L1 | + PM_CTRL_ASPM_L0S_EN | + PM_CTRL_ASPM_L1_EN); + } else { /* link down */ + pm_ctrl_data |= PM_CTRL_CLK_SWH_L1; + pm_ctrl_data &= ~(PM_CTRL_SERDES_L1_EN | + PM_CTRL_SERDES_PLL_L1_EN | + PM_CTRL_SERDES_BUFS_RX_L1_EN | + PM_CTRL_ASPM_L0S_EN); + } + } + AT_WRITE_REG(hw, REG_PM_CTRL, pm_ctrl_data); + + return; +} + +/** + * atl1c_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static int atl1c_configure_mac(struct atl1c_adapter *adapter) +{ + struct atl1c_hw *hw = &adapter->hw; + u32 master_ctrl_data = 0; + u32 intr_modrt_data; + u32 data; + + AT_READ_REG(hw, REG_MASTER_CTRL, &master_ctrl_data); + master_ctrl_data &= ~(MASTER_CTRL_TX_ITIMER_EN | + MASTER_CTRL_RX_ITIMER_EN | + MASTER_CTRL_INT_RDCLR); + /* clear interrupt status */ + AT_WRITE_REG(hw, REG_ISR, 0xFFFFFFFF); + /* Clear any WOL status */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + /* set Interrupt Clear Timer + * HW will enable self to assert interrupt event to system after + * waiting x-time for software to notify it accept interrupt. + */ + + data = CLK_GATING_EN_ALL; + if (hw->ctrl_flags & ATL1C_CLK_GATING_EN) { + if (hw->nic_type == athr_l2c_b) + data &= ~CLK_GATING_RXMAC_EN; + } else + data = 0; + AT_WRITE_REG(hw, REG_CLK_GATING_CTRL, data); + + AT_WRITE_REG(hw, REG_INT_RETRIG_TIMER, + hw->ict & INT_RETRIG_TIMER_MASK); + + atl1c_configure_des_ring(adapter); + + if (hw->ctrl_flags & ATL1C_INTR_MODRT_ENABLE) { + intr_modrt_data = (hw->tx_imt & IRQ_MODRT_TIMER_MASK) << + IRQ_MODRT_TX_TIMER_SHIFT; + intr_modrt_data |= (hw->rx_imt & IRQ_MODRT_TIMER_MASK) << + IRQ_MODRT_RX_TIMER_SHIFT; + AT_WRITE_REG(hw, REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data); + master_ctrl_data |= + MASTER_CTRL_TX_ITIMER_EN | MASTER_CTRL_RX_ITIMER_EN; + } + + if (hw->ctrl_flags & ATL1C_INTR_CLEAR_ON_READ) + master_ctrl_data |= MASTER_CTRL_INT_RDCLR; + + master_ctrl_data |= MASTER_CTRL_SA_TIMER_EN; + AT_WRITE_REG(hw, REG_MASTER_CTRL, master_ctrl_data); + + AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, + hw->smb_timer & SMB_STAT_TIMER_MASK); + + /* set MTU */ + AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + + VLAN_HLEN + ETH_FCS_LEN); + + atl1c_configure_tx(adapter); + atl1c_configure_rx(adapter); + atl1c_configure_dma(adapter); + + return 0; +} + +static int atl1c_configure(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int num; + + atl1c_init_ring_ptrs(adapter); + atl1c_set_multi(netdev); + atl1c_restore_vlan(adapter); + + num = atl1c_alloc_rx_buffer(adapter); + if (unlikely(num == 0)) + return -ENOMEM; + + if (atl1c_configure_mac(adapter)) + return -EIO; + + return 0; +} + +static void atl1c_update_hw_stats(struct atl1c_adapter *adapter) +{ + u16 hw_reg_addr = 0; + unsigned long *stats_item = NULL; + u32 data; + + /* update rx status */ + hw_reg_addr = REG_MAC_RX_STATUS_BIN; + stats_item = &adapter->hw_stats.rx_ok; + while (hw_reg_addr <= REG_MAC_RX_STATUS_END) { + AT_READ_REG(&adapter->hw, hw_reg_addr, &data); + *stats_item += data; + stats_item++; + hw_reg_addr += 4; + } +/* update tx status */ + hw_reg_addr = REG_MAC_TX_STATUS_BIN; + stats_item = &adapter->hw_stats.tx_ok; + while (hw_reg_addr <= REG_MAC_TX_STATUS_END) { + AT_READ_REG(&adapter->hw, hw_reg_addr, &data); + *stats_item += data; + stats_item++; + hw_reg_addr += 4; + } +} + +/** + * atl1c_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + */ +static struct net_device_stats *atl1c_get_stats(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw_stats *hw_stats = &adapter->hw_stats; + struct net_device_stats *net_stats = &netdev->stats; + + atl1c_update_hw_stats(adapter); + net_stats->rx_bytes = hw_stats->rx_byte_cnt; + net_stats->tx_bytes = hw_stats->tx_byte_cnt; + net_stats->multicast = hw_stats->rx_mcast; + net_stats->collisions = hw_stats->tx_1_col + + hw_stats->tx_2_col + + hw_stats->tx_late_col + + hw_stats->tx_abort_col; + + net_stats->rx_errors = hw_stats->rx_frag + + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + + hw_stats->rx_sz_ov + + hw_stats->rx_rrd_ov + + hw_stats->rx_align_err + + hw_stats->rx_rxf_ov; + + net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; + net_stats->rx_length_errors = hw_stats->rx_len_err; + net_stats->rx_crc_errors = hw_stats->rx_fcs_err; + net_stats->rx_frame_errors = hw_stats->rx_align_err; + net_stats->rx_dropped = hw_stats->rx_rrd_ov; + + net_stats->tx_errors = hw_stats->tx_late_col + + hw_stats->tx_abort_col + + hw_stats->tx_underrun + + hw_stats->tx_trunc; + + net_stats->tx_fifo_errors = hw_stats->tx_underrun; + net_stats->tx_aborted_errors = hw_stats->tx_abort_col; + net_stats->tx_window_errors = hw_stats->tx_late_col; + + net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; + net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; + + return net_stats; +} + +static inline void atl1c_clear_phy_int(struct atl1c_adapter *adapter) +{ + u16 phy_data; + + spin_lock(&adapter->mdio_lock); + atl1c_read_phy_reg(&adapter->hw, MII_ISR, &phy_data); + spin_unlock(&adapter->mdio_lock); +} + +static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + struct atl1c_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); + u16 hw_next_to_clean; + u16 reg; + unsigned int total_bytes = 0, total_packets = 0; + + reg = type == atl1c_trans_high ? REG_TPD_PRI1_CIDX : REG_TPD_PRI0_CIDX; + + AT_READ_REGW(&adapter->hw, reg, &hw_next_to_clean); + + while (next_to_clean != hw_next_to_clean) { + buffer_info = &tpd_ring->buffer_info[next_to_clean]; + if (buffer_info->skb) { + total_bytes += buffer_info->skb->len; + total_packets++; + } + atl1c_clean_buffer(pdev, buffer_info); + if (++next_to_clean == tpd_ring->count) + next_to_clean = 0; + atomic_set(&tpd_ring->next_to_clean, next_to_clean); + } + + netdev_completed_queue(adapter->netdev, total_packets, total_bytes); + + if (netif_queue_stopped(adapter->netdev) && + netif_carrier_ok(adapter->netdev)) { + netif_wake_queue(adapter->netdev); + } + + return true; +} + +/** + * atl1c_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t atl1c_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + struct atl1c_hw *hw = &adapter->hw; + int max_ints = AT_MAX_INT_WORK; + int handled = IRQ_NONE; + u32 status; + u32 reg_data; + + do { + AT_READ_REG(hw, REG_ISR, ®_data); + status = reg_data & hw->intr_mask; + + if (status == 0 || (status & ISR_DIS_INT) != 0) { + if (max_ints != AT_MAX_INT_WORK) + handled = IRQ_HANDLED; + break; + } + /* link event */ + if (status & ISR_GPHY) + atl1c_clear_phy_int(adapter); + /* Ack ISR */ + AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); + if (status & ISR_RX_PKT) { + if (likely(napi_schedule_prep(&adapter->napi))) { + hw->intr_mask &= ~ISR_RX_PKT; + AT_WRITE_REG(hw, REG_IMR, hw->intr_mask); + __napi_schedule(&adapter->napi); + } + } + if (status & ISR_TX_PKT) + atl1c_clean_tx_irq(adapter, atl1c_trans_normal); + + handled = IRQ_HANDLED; + /* check if PCIE PHY Link down */ + if (status & ISR_ERROR) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "atl1c hardware error (status = 0x%x)\n", + status & ISR_ERROR); + /* reset MAC */ + set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event); + schedule_work(&adapter->common_task); + return IRQ_HANDLED; + } + + if (status & ISR_OVER) + if (netif_msg_intr(adapter)) + dev_warn(&pdev->dev, + "TX/RX overflow (status = 0x%x)\n", + status & ISR_OVER); + + /* link event */ + if (status & (ISR_GPHY | ISR_MANUAL)) { + netdev->stats.tx_carrier_errors++; + atl1c_link_chg_event(adapter); + break; + } + + } while (--max_ints > 0); + /* re-enable Interrupt*/ + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + return handled; +} + +static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter, + struct sk_buff *skb, struct atl1c_recv_ret_status *prrs) +{ + /* + * The pid field in RRS in not correct sometimes, so we + * cannot figure out if the packet is fragmented or not, + * so we tell the KERNEL CHECKSUM_NONE + */ + skb_checksum_none_assert(skb); +} + +static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter) +{ + struct sk_buff *skb; + struct page *page; + + if (adapter->rx_frag_size > PAGE_SIZE) + return netdev_alloc_skb(adapter->netdev, + adapter->rx_buffer_len); + + page = adapter->rx_page; + if (!page) { + adapter->rx_page = page = alloc_page(GFP_ATOMIC); + if (unlikely(!page)) + return NULL; + adapter->rx_page_offset = 0; + } + + skb = build_skb(page_address(page) + adapter->rx_page_offset, + adapter->rx_frag_size); + if (likely(skb)) { + skb_reserve(skb, NET_SKB_PAD); + adapter->rx_page_offset += adapter->rx_frag_size; + if (adapter->rx_page_offset >= PAGE_SIZE) + adapter->rx_page = NULL; + else + get_page(page); + } + return skb; +} + +static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter) +{ + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct pci_dev *pdev = adapter->pdev; + struct atl1c_buffer *buffer_info, *next_info; + struct sk_buff *skb; + void *vir_addr = NULL; + u16 num_alloc = 0; + u16 rfd_next_to_use, next_next; + struct atl1c_rx_free_desc *rfd_desc; + dma_addr_t mapping; + + next_next = rfd_next_to_use = rfd_ring->next_to_use; + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + + while (next_info->flags & ATL1C_BUFFER_FREE) { + rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); + + skb = atl1c_alloc_skb(adapter); + if (unlikely(!skb)) { + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, "alloc rx buffer failed\n"); + break; + } + + /* + * Make buffer alignment 2 beyond a 16 byte boundary + * this will result in a 16 byte aligned IP header after + * the 14 byte MAC header is removed + */ + vir_addr = skb->data; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + buffer_info->skb = skb; + buffer_info->length = adapter->rx_buffer_len; + mapping = pci_map_single(pdev, vir_addr, + buffer_info->length, + PCI_DMA_FROMDEVICE); + if (unlikely(pci_dma_mapping_error(pdev, mapping))) { + dev_kfree_skb(skb); + buffer_info->skb = NULL; + buffer_info->length = 0; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE); + netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed"); + break; + } + buffer_info->dma = mapping; + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, + ATL1C_PCIMAP_FROMDEVICE); + rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + rfd_next_to_use = next_next; + if (++next_next == rfd_ring->count) + next_next = 0; + buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; + next_info = &rfd_ring->buffer_info[next_next]; + num_alloc++; + } + + if (num_alloc) { + /* TODO: update mailbox here */ + wmb(); + rfd_ring->next_to_use = rfd_next_to_use; + AT_WRITE_REG(&adapter->hw, REG_MB_RFD0_PROD_IDX, + rfd_ring->next_to_use & MB_RFDX_PROD_IDX_MASK); + } + + return num_alloc; +} + +static void atl1c_clean_rrd(struct atl1c_rrd_ring *rrd_ring, + struct atl1c_recv_ret_status *rrs, u16 num) +{ + u16 i; + /* the relationship between rrd and rfd is one map one */ + for (i = 0; i < num; i++, rrs = ATL1C_RRD_DESC(rrd_ring, + rrd_ring->next_to_clean)) { + rrs->word3 &= ~RRS_RXD_UPDATED; + if (++rrd_ring->next_to_clean == rrd_ring->count) + rrd_ring->next_to_clean = 0; + } +} + +static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring, + struct atl1c_recv_ret_status *rrs, u16 num) +{ + u16 i; + u16 rfd_index; + struct atl1c_buffer *buffer_info = rfd_ring->buffer_info; + + rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) & + RRS_RX_RFD_INDEX_MASK; + for (i = 0; i < num; i++) { + buffer_info[rfd_index].skb = NULL; + ATL1C_SET_BUFFER_STATE(&buffer_info[rfd_index], + ATL1C_BUFFER_FREE); + if (++rfd_index == rfd_ring->count) + rfd_index = 0; + } + rfd_ring->next_to_clean = rfd_index; +} + +static void atl1c_clean_rx_irq(struct atl1c_adapter *adapter, + int *work_done, int work_to_do) +{ + u16 rfd_num, rfd_index; + u16 count = 0; + u16 length; + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring; + struct sk_buff *skb; + struct atl1c_recv_ret_status *rrs; + struct atl1c_buffer *buffer_info; + + while (1) { + if (*work_done >= work_to_do) + break; + rrs = ATL1C_RRD_DESC(rrd_ring, rrd_ring->next_to_clean); + if (likely(RRS_RXD_IS_VALID(rrs->word3))) { + rfd_num = (rrs->word0 >> RRS_RX_RFD_CNT_SHIFT) & + RRS_RX_RFD_CNT_MASK; + if (unlikely(rfd_num != 1)) + /* TODO support mul rfd*/ + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, + "Multi rfd not support yet!\n"); + goto rrs_checked; + } else { + break; + } +rrs_checked: + atl1c_clean_rrd(rrd_ring, rrs, rfd_num); + if (rrs->word3 & (RRS_RX_ERR_SUM | RRS_802_3_LEN_ERR)) { + atl1c_clean_rfd(rfd_ring, rrs, rfd_num); + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, + "wrong packet! rrs word3 is %x\n", + rrs->word3); + continue; + } + + length = le16_to_cpu((rrs->word3 >> RRS_PKT_SIZE_SHIFT) & + RRS_PKT_SIZE_MASK); + /* Good Receive */ + if (likely(rfd_num == 1)) { + rfd_index = (rrs->word0 >> RRS_RX_RFD_INDEX_SHIFT) & + RRS_RX_RFD_INDEX_MASK; + buffer_info = &rfd_ring->buffer_info[rfd_index]; + pci_unmap_single(pdev, buffer_info->dma, + buffer_info->length, PCI_DMA_FROMDEVICE); + skb = buffer_info->skb; + } else { + /* TODO */ + if (netif_msg_rx_err(adapter)) + dev_warn(&pdev->dev, + "Multi rfd not support yet!\n"); + break; + } + atl1c_clean_rfd(rfd_ring, rrs, rfd_num); + skb_put(skb, length - ETH_FCS_LEN); + skb->protocol = eth_type_trans(skb, netdev); + atl1c_rx_checksum(adapter, skb, rrs); + if (rrs->word3 & RRS_VLAN_INS) { + u16 vlan; + + AT_TAG_TO_VLAN(rrs->vlan_tag, vlan); + vlan = le16_to_cpu(vlan); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); + } + netif_receive_skb(skb); + + (*work_done)++; + count++; + } + if (count) + atl1c_alloc_rx_buffer(adapter); +} + +/** + * atl1c_clean - NAPI Rx polling callback + */ +static int atl1c_clean(struct napi_struct *napi, int budget) +{ + struct atl1c_adapter *adapter = + container_of(napi, struct atl1c_adapter, napi); + int work_done = 0; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(adapter->netdev)) + goto quit_polling; + /* just enable one RXQ */ + atl1c_clean_rx_irq(adapter, &work_done, budget); + + if (work_done < budget) { +quit_polling: + napi_complete(napi); + adapter->hw.intr_mask |= ISR_RX_PKT; + AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask); + } + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void atl1c_netpoll(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + atl1c_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +static inline u16 atl1c_tpd_avail(struct atl1c_adapter *adapter, enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + u16 next_to_use = 0; + u16 next_to_clean = 0; + + next_to_clean = atomic_read(&tpd_ring->next_to_clean); + next_to_use = tpd_ring->next_to_use; + + return (u16)(next_to_clean > next_to_use) ? + (next_to_clean - next_to_use - 1) : + (tpd_ring->count + next_to_clean - next_to_use - 1); +} + +/* + * get next usable tpd + * Note: should call atl1c_tdp_avail to make sure + * there is enough tpd to use + */ +static struct atl1c_tpd_desc *atl1c_get_tpd(struct atl1c_adapter *adapter, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + struct atl1c_tpd_desc *tpd_desc; + u16 next_to_use = 0; + + next_to_use = tpd_ring->next_to_use; + if (++tpd_ring->next_to_use == tpd_ring->count) + tpd_ring->next_to_use = 0; + tpd_desc = ATL1C_TPD_DESC(tpd_ring, next_to_use); + memset(tpd_desc, 0, sizeof(struct atl1c_tpd_desc)); + return tpd_desc; +} + +static struct atl1c_buffer * +atl1c_get_tx_buffer(struct atl1c_adapter *adapter, struct atl1c_tpd_desc *tpd) +{ + struct atl1c_tpd_ring *tpd_ring = adapter->tpd_ring; + + return &tpd_ring->buffer_info[tpd - + (struct atl1c_tpd_desc *)tpd_ring->desc]; +} + +/* Calculate the transmit packet descript needed*/ +static u16 atl1c_cal_tpd_req(const struct sk_buff *skb) +{ + u16 tpd_req; + u16 proto_hdr_len = 0; + + tpd_req = skb_shinfo(skb)->nr_frags + 1; + + if (skb_is_gso(skb)) { + proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (proto_hdr_len < skb_headlen(skb)) + tpd_req++; + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + tpd_req++; + } + return tpd_req; +} + +static int atl1c_tso_csum(struct atl1c_adapter *adapter, + struct sk_buff *skb, + struct atl1c_tpd_desc **tpd, + enum atl1c_trans_queue type) +{ + struct pci_dev *pdev = adapter->pdev; + unsigned short offload_type; + u8 hdr_len; + u32 real_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + offload_type = skb_shinfo(skb)->gso_type; + + if (offload_type & SKB_GSO_TCPV4) { + real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + + ntohs(ip_hdr(skb)->tot_len)); + + if (real_len < skb->len) + pskb_trim(skb, real_len); + + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (unlikely(skb->len == hdr_len)) { + /* only xsum need */ + if (netif_msg_tx_queued(adapter)) + dev_warn(&pdev->dev, + "IPV4 tso with zero data??\n"); + goto check_sum; + } else { + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic( + ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + (*tpd)->word1 |= 1 << TPD_IPV4_PACKET_SHIFT; + } + } + + if (offload_type & SKB_GSO_TCPV6) { + struct atl1c_tpd_ext_desc *etpd = + *(struct atl1c_tpd_ext_desc **)(tpd); + + memset(etpd, 0, sizeof(struct atl1c_tpd_ext_desc)); + *tpd = atl1c_get_tpd(adapter, type); + ipv6_hdr(skb)->payload_len = 0; + /* check payload == 0 byte ? */ + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (unlikely(skb->len == hdr_len)) { + /* only xsum need */ + if (netif_msg_tx_queued(adapter)) + dev_warn(&pdev->dev, + "IPV6 tso with zero data??\n"); + goto check_sum; + } else + tcp_hdr(skb)->check = ~csum_ipv6_magic( + &ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + etpd->word1 |= 1 << TPD_LSO_EN_SHIFT; + etpd->word1 |= 1 << TPD_LSO_VER_SHIFT; + etpd->pkt_len = cpu_to_le32(skb->len); + (*tpd)->word1 |= 1 << TPD_LSO_VER_SHIFT; + } + + (*tpd)->word1 |= 1 << TPD_LSO_EN_SHIFT; + (*tpd)->word1 |= (skb_transport_offset(skb) & TPD_TCPHDR_OFFSET_MASK) << + TPD_TCPHDR_OFFSET_SHIFT; + (*tpd)->word1 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) << + TPD_MSS_SHIFT; + return 0; + } + +check_sum: + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + u8 css, cso; + cso = skb_checksum_start_offset(skb); + + if (unlikely(cso & 0x1)) { + if (netif_msg_tx_err(adapter)) + dev_err(&adapter->pdev->dev, + "payload offset should not an event number\n"); + return -1; + } else { + css = cso + skb->csum_offset; + + (*tpd)->word1 |= ((cso >> 1) & TPD_PLOADOFFSET_MASK) << + TPD_PLOADOFFSET_SHIFT; + (*tpd)->word1 |= ((css >> 1) & TPD_CCSUM_OFFSET_MASK) << + TPD_CCSUM_OFFSET_SHIFT; + (*tpd)->word1 |= 1 << TPD_CCSUM_EN_SHIFT; + } + } + return 0; +} + +static void atl1c_tx_rollback(struct atl1c_adapter *adpt, + struct atl1c_tpd_desc *first_tpd, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adpt->tpd_ring[type]; + struct atl1c_buffer *buffer_info; + struct atl1c_tpd_desc *tpd; + u16 first_index, index; + + first_index = first_tpd - (struct atl1c_tpd_desc *)tpd_ring->desc; + index = first_index; + while (index != tpd_ring->next_to_use) { + tpd = ATL1C_TPD_DESC(tpd_ring, index); + buffer_info = &tpd_ring->buffer_info[index]; + atl1c_clean_buffer(adpt->pdev, buffer_info); + memset(tpd, 0, sizeof(struct atl1c_tpd_desc)); + if (++index == tpd_ring->count) + index = 0; + } + tpd_ring->next_to_use = first_index; +} + +static int atl1c_tx_map(struct atl1c_adapter *adapter, + struct sk_buff *skb, struct atl1c_tpd_desc *tpd, + enum atl1c_trans_queue type) +{ + struct atl1c_tpd_desc *use_tpd = NULL; + struct atl1c_buffer *buffer_info = NULL; + u16 buf_len = skb_headlen(skb); + u16 map_len = 0; + u16 mapped_len = 0; + u16 hdr_len = 0; + u16 nr_frags; + u16 f; + int tso; + + nr_frags = skb_shinfo(skb)->nr_frags; + tso = (tpd->word1 >> TPD_LSO_EN_SHIFT) & TPD_LSO_EN_MASK; + if (tso) { + /* TSO */ + map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + use_tpd = tpd; + + buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); + buffer_info->length = map_len; + buffer_info->dma = pci_map_single(adapter->pdev, + skb->data, hdr_len, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(adapter->pdev, + buffer_info->dma))) + goto err_dma; + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, + ATL1C_PCIMAP_TODEVICE); + mapped_len += map_len; + use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + use_tpd->buffer_len = cpu_to_le16(buffer_info->length); + } + + if (mapped_len < buf_len) { + /* mapped_len == 0, means we should use the first tpd, + which is given by caller */ + if (mapped_len == 0) + use_tpd = tpd; + else { + use_tpd = atl1c_get_tpd(adapter, type); + memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); + } + buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); + buffer_info->length = buf_len - mapped_len; + buffer_info->dma = + pci_map_single(adapter->pdev, skb->data + mapped_len, + buffer_info->length, PCI_DMA_TODEVICE); + if (unlikely(pci_dma_mapping_error(adapter->pdev, + buffer_info->dma))) + goto err_dma; + + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE, + ATL1C_PCIMAP_TODEVICE); + use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + use_tpd->buffer_len = cpu_to_le16(buffer_info->length); + } + + for (f = 0; f < nr_frags; f++) { + struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + + use_tpd = atl1c_get_tpd(adapter, type); + memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); + + buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); + buffer_info->length = skb_frag_size(frag); + buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, + frag, 0, + buffer_info->length, + DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) + goto err_dma; + + ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY); + ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE, + ATL1C_PCIMAP_TODEVICE); + use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); + use_tpd->buffer_len = cpu_to_le16(buffer_info->length); + } + + /* The last tpd */ + use_tpd->word1 |= 1 << TPD_EOP_SHIFT; + /* The last buffer info contain the skb address, + so it will be free after unmap */ + buffer_info->skb = skb; + + return 0; + +err_dma: + buffer_info->dma = 0; + buffer_info->length = 0; + return -1; +} + +static void atl1c_tx_queue(struct atl1c_adapter *adapter, struct sk_buff *skb, + struct atl1c_tpd_desc *tpd, enum atl1c_trans_queue type) +{ + struct atl1c_tpd_ring *tpd_ring = &adapter->tpd_ring[type]; + u16 reg; + + reg = type == atl1c_trans_high ? REG_TPD_PRI1_PIDX : REG_TPD_PRI0_PIDX; + AT_WRITE_REGW(&adapter->hw, reg, tpd_ring->next_to_use); +} + +static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + u16 tpd_req = 1; + struct atl1c_tpd_desc *tpd; + enum atl1c_trans_queue type = atl1c_trans_normal; + + if (test_bit(__AT_DOWN, &adapter->flags)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + tpd_req = atl1c_cal_tpd_req(skb); + if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { + if (netif_msg_pktdata(adapter)) + dev_info(&adapter->pdev->dev, "tx locked\n"); + return NETDEV_TX_LOCKED; + } + + if (atl1c_tpd_avail(adapter, type) < tpd_req) { + /* no enough descriptor, just stop queue */ + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + tpd = atl1c_get_tpd(adapter, type); + + /* do TSO and check sum */ + if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) { + spin_unlock_irqrestore(&adapter->tx_lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(skb_vlan_tag_present(skb))) { + u16 vlan = skb_vlan_tag_get(skb); + __le16 tag; + + vlan = cpu_to_le16(vlan); + AT_VLAN_TO_TAG(vlan, tag); + tpd->word1 |= 1 << TPD_INS_VTAG_SHIFT; + tpd->vlan_tag = tag; + } + + if (skb_network_offset(skb) != ETH_HLEN) + tpd->word1 |= 1 << TPD_ETH_TYPE_SHIFT; /* Ethernet frame */ + + if (atl1c_tx_map(adapter, skb, tpd, type) < 0) { + netif_info(adapter, tx_done, adapter->netdev, + "tx-skb droppted due to dma error\n"); + /* roll back tpd/buffer */ + atl1c_tx_rollback(adapter, tpd, type); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + dev_kfree_skb_any(skb); + } else { + netdev_sent_queue(adapter->netdev, skb->len); + atl1c_tx_queue(adapter, skb, tpd, type); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + } + + return NETDEV_TX_OK; +} + +static void atl1c_free_irq(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); + + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); +} + +static int atl1c_request_irq(struct atl1c_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int flags = 0; + int err = 0; + + adapter->have_msi = true; + err = pci_enable_msi(adapter->pdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_err(&pdev->dev, + "Unable to allocate MSI interrupt Error: %d\n", + err); + adapter->have_msi = false; + } + + if (!adapter->have_msi) + flags |= IRQF_SHARED; + err = request_irq(adapter->pdev->irq, atl1c_intr, flags, + netdev->name, netdev); + if (err) { + if (netif_msg_ifup(adapter)) + dev_err(&pdev->dev, + "Unable to allocate interrupt Error: %d\n", + err); + if (adapter->have_msi) + pci_disable_msi(adapter->pdev); + return err; + } + if (netif_msg_ifup(adapter)) + dev_dbg(&pdev->dev, "atl1c_request_irq OK\n"); + return err; +} + + +static void atl1c_reset_dma_ring(struct atl1c_adapter *adapter) +{ + /* release tx-pending skbs and reset tx/rx ring index */ + atl1c_clean_tx_ring(adapter, atl1c_trans_normal); + atl1c_clean_tx_ring(adapter, atl1c_trans_high); + atl1c_clean_rx_ring(adapter); +} + +static int atl1c_up(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + netif_carrier_off(netdev); + + err = atl1c_configure(adapter); + if (unlikely(err)) + goto err_up; + + err = atl1c_request_irq(adapter); + if (unlikely(err)) + goto err_up; + + atl1c_check_link_status(adapter); + clear_bit(__AT_DOWN, &adapter->flags); + napi_enable(&adapter->napi); + atl1c_irq_enable(adapter); + netif_start_queue(netdev); + return err; + +err_up: + atl1c_clean_rx_ring(adapter); + return err; +} + +static void atl1c_down(struct atl1c_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + atl1c_del_timer(adapter); + adapter->work_event = 0; /* clear all event */ + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__AT_DOWN, &adapter->flags); + netif_carrier_off(netdev); + napi_disable(&adapter->napi); + atl1c_irq_disable(adapter); + atl1c_free_irq(adapter); + /* disable ASPM if device inactive */ + atl1c_disable_l0s_l1(&adapter->hw); + /* reset MAC to disable all RX/TX */ + atl1c_reset_mac(&adapter->hw); + msleep(1); + + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; + atl1c_reset_dma_ring(adapter); +} + +/** + * atl1c_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl1c_open(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__AT_TESTING, &adapter->flags)) + return -EBUSY; + + /* allocate rx/tx dma buffer & descriptors */ + err = atl1c_setup_ring_resources(adapter); + if (unlikely(err)) + return err; + + err = atl1c_up(adapter); + if (unlikely(err)) + goto err_up; + + return 0; + +err_up: + atl1c_free_irq(adapter); + atl1c_free_ring_resources(adapter); + atl1c_reset_mac(&adapter->hw); + return err; +} + +/** + * atl1c_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl1c_close(struct net_device *netdev) +{ + struct atl1c_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + set_bit(__AT_DOWN, &adapter->flags); + cancel_work_sync(&adapter->common_task); + atl1c_down(adapter); + atl1c_free_ring_resources(adapter); + return 0; +} + +static int atl1c_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + struct atl1c_hw *hw = &adapter->hw; + u32 wufc = adapter->wol; + + atl1c_disable_l0s_l1(hw); + if (netif_running(netdev)) { + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + atl1c_down(adapter); + } + netif_device_detach(netdev); + + if (wufc) + if (atl1c_phy_to_ps_link(hw) != 0) + dev_dbg(&pdev->dev, "phy power saving failed"); + + atl1c_power_saving(hw, wufc); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int atl1c_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); + atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE); + + atl1c_phy_reset(&adapter->hw); + atl1c_reset_mac(&adapter->hw); + atl1c_phy_init(&adapter->hw); + +#if 0 + AT_READ_REG(&adapter->hw, REG_PM_CTRLSTAT, &pm_data); + pm_data &= ~PM_CTRLSTAT_PME_EN; + AT_WRITE_REG(&adapter->hw, REG_PM_CTRLSTAT, pm_data); +#endif + + netif_device_attach(netdev); + if (netif_running(netdev)) + atl1c_up(adapter); + + return 0; +} +#endif + +static void atl1c_shutdown(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + atl1c_suspend(&pdev->dev); + pci_wake_from_d3(pdev, adapter->wol); + pci_set_power_state(pdev, PCI_D3hot); +} + +static const struct net_device_ops atl1c_netdev_ops = { + .ndo_open = atl1c_open, + .ndo_stop = atl1c_close, + .ndo_validate_addr = eth_validate_addr, + .ndo_start_xmit = atl1c_xmit_frame, + .ndo_set_mac_address = atl1c_set_mac_addr, + .ndo_set_rx_mode = atl1c_set_multi, + .ndo_change_mtu = atl1c_change_mtu, + .ndo_fix_features = atl1c_fix_features, + .ndo_set_features = atl1c_set_features, + .ndo_do_ioctl = atl1c_ioctl, + .ndo_tx_timeout = atl1c_tx_timeout, + .ndo_get_stats = atl1c_get_stats, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = atl1c_netpoll, +#endif +}; + +static int atl1c_init_netdev(struct net_device *netdev, struct pci_dev *pdev) +{ + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + netdev->netdev_ops = &atl1c_netdev_ops; + netdev->watchdog_timeo = AT_TX_WATCHDOG; + atl1c_set_ethtool_ops(netdev); + + /* TODO: add when ready */ + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_TSO | + NETIF_F_TSO6; + netdev->features = netdev->hw_features | + NETIF_F_HW_VLAN_CTAG_TX; + return 0; +} + +/** + * atl1c_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl1c_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl1c_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl1c_adapter *adapter; + static int cards_found; + + int err = 0; + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + return err; + } + + /* + * The atl1c chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { + dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); + goto err_dma; + } + + err = pci_request_regions(pdev, atl1c_driver_name); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_pci_reg; + } + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct atl1c_adapter)); + if (netdev == NULL) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + err = atl1c_init_netdev(netdev, pdev); + if (err) { + dev_err(&pdev->dev, "init netdevice failed\n"); + goto err_init_netdev; + } + adapter = netdev_priv(netdev); + adapter->bd_number = cards_found; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.adapter = adapter; + adapter->msg_enable = netif_msg_init(-1, atl1c_default_msg); + adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); + if (!adapter->hw.hw_addr) { + err = -EIO; + dev_err(&pdev->dev, "cannot map device registers\n"); + goto err_ioremap; + } + + /* init mii data */ + adapter->mii.dev = netdev; + adapter->mii.mdio_read = atl1c_mdio_read; + adapter->mii.mdio_write = atl1c_mdio_write; + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK; + netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64); + setup_timer(&adapter->phy_config_timer, atl1c_phy_config, + (unsigned long)adapter); + /* setup the private structure */ + err = atl1c_sw_init(adapter); + if (err) { + dev_err(&pdev->dev, "net device private data init failed\n"); + goto err_sw_init; + } + atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE); + + /* Init GPHY as early as possible due to power saving issue */ + atl1c_phy_reset(&adapter->hw); + + err = atl1c_reset_mac(&adapter->hw); + if (err) { + err = -EIO; + goto err_reset; + } + + /* reset the controller to + * put the device in a known good starting state */ + err = atl1c_phy_init(&adapter->hw); + if (err) { + err = -EIO; + goto err_reset; + } + if (atl1c_read_mac_addr(&adapter->hw)) { + /* got a random MAC address, set NET_ADDR_RANDOM to netdev */ + netdev->addr_assign_type = NET_ADDR_RANDOM; + } + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + if (netif_msg_probe(adapter)) + dev_dbg(&pdev->dev, "mac address : %pM\n", + adapter->hw.mac_addr); + + atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr); + INIT_WORK(&adapter->common_task, atl1c_common_task); + adapter->work_event = 0; + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "register netdevice failed\n"); + goto err_register; + } + + if (netif_msg_probe(adapter)) + dev_info(&pdev->dev, "version %s\n", ATL1C_DRV_VERSION); + cards_found++; + return 0; + +err_reset: +err_register: +err_sw_init: + iounmap(adapter->hw.hw_addr); +err_init_netdev: +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * atl1c_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl1c_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void atl1c_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + unregister_netdev(netdev); + /* restore permanent address */ + atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.perm_mac_addr); + atl1c_phy_disable(&adapter->hw); + + iounmap(adapter->hw.hw_addr); + + pci_release_regions(pdev); + pci_disable_device(pdev); + free_netdev(netdev); +} + +/** + * atl1c_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t atl1c_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + atl1c_down(adapter); + + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * atl1c_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t atl1c_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + atl1c_reset_mac(&adapter->hw); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * atl1c_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the atl1c_resume routine. + */ +static void atl1c_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1c_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (atl1c_up(adapter)) { + if (netif_msg_hw(adapter)) + dev_err(&pdev->dev, + "Cannot bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static const struct pci_error_handlers atl1c_err_handler = { + .error_detected = atl1c_io_error_detected, + .slot_reset = atl1c_io_slot_reset, + .resume = atl1c_io_resume, +}; + +static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume); + +static struct pci_driver atl1c_driver = { + .name = atl1c_driver_name, + .id_table = atl1c_pci_tbl, + .probe = atl1c_probe, + .remove = atl1c_remove, + .shutdown = atl1c_shutdown, + .err_handler = &atl1c_err_handler, + .driver.pm = &atl1c_pm_ops, +}; + +module_pci_driver(atl1c_driver); diff --git a/addons/atl1e/install.sh b/addons/atl1e/install.sh new file mode 100644 index 00000000..d13abb99 --- /dev/null +++ b/addons/atl1e/install.sh @@ -0,0 +1,4 @@ +if [ "${1}" = "rd" ]; then + echo "Installing module for Atheros L1E Gigabit Ethernet adapter" + ${INSMOD} "/modules/atl1e.ko" ${PARAMS} +fi diff --git a/addons/atl1e/manifest.yml b/addons/atl1e/manifest.yml new file mode 100644 index 00000000..77a403b3 --- /dev/null +++ b/addons/atl1e/manifest.yml @@ -0,0 +1,28 @@ +version: 1 +name: atl1e +description: "Driver for Atheros L1E Gigabit Ethernet adapters" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/addons/atl1e/src/3.10.108/Makefile b/addons/atl1e/src/3.10.108/Makefile new file mode 100644 index 00000000..fe2e3d47 --- /dev/null +++ b/addons/atl1e/src/3.10.108/Makefile @@ -0,0 +1,2 @@ +obj-m += atl1e.o +atl1e-objs += atl1e_main.o atl1e_hw.o atl1e_ethtool.o atl1e_param.o diff --git a/addons/atl1e/src/3.10.108/atl1e.h b/addons/atl1e/src/3.10.108/atl1e.h new file mode 100644 index 00000000..b5fd9345 --- /dev/null +++ b/addons/atl1e/src/3.10.108/atl1e.h @@ -0,0 +1,508 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * Copyright(c) 2007 xiong huang + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL1E_H_ +#define _ATL1E_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atl1e_hw.h" + +#define PCI_REG_COMMAND 0x04 /* PCI Command Register */ +#define CMD_IO_SPACE 0x0001 +#define CMD_MEMORY_SPACE 0x0002 +#define CMD_BUS_MASTER 0x0004 + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_5 5 + +/* Wake Up Filter Control */ +#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ +#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +#define SPEED_0 0xffff +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* Error Codes */ +#define AT_ERR_EEPROM 1 +#define AT_ERR_PHY 2 +#define AT_ERR_CONFIG 3 +#define AT_ERR_PARAM 4 +#define AT_ERR_MAC_TYPE 5 +#define AT_ERR_PHY_TYPE 6 +#define AT_ERR_PHY_SPEED 7 +#define AT_ERR_PHY_RES 8 +#define AT_ERR_TIMEOUT 9 + +#define MAX_JUMBO_FRAME_SIZE 0x2000 + +#define AT_VLAN_TAG_TO_TPD_TAG(_vlan, _tpd) \ + _tpd = (((_vlan) << (4)) | (((_vlan) >> 13) & 7) |\ + (((_vlan) >> 9) & 8)) + +#define AT_TPD_TAG_TO_VLAN_TAG(_tpd, _vlan) \ + _vlan = (((_tpd) >> 8) | (((_tpd) & 0x77) << 9) |\ + (((_tdp) & 0x88) << 5)) + +#define AT_MAX_RECEIVE_QUEUE 4 +#define AT_PAGE_NUM_PER_QUEUE 2 + +#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL +#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL + +#define AT_TX_WATCHDOG (5 * HZ) +#define AT_MAX_INT_WORK 10 +#define AT_TWSI_EEPROM_TIMEOUT 100 +#define AT_HW_MAX_IDLE_DELAY 10 +#define AT_SUSPEND_LINK_TIMEOUT 28 + +#define AT_REGS_LEN 75 +#define AT_EEPROM_LEN 512 +#define AT_ADV_MASK (ADVERTISE_10_HALF |\ + ADVERTISE_10_FULL |\ + ADVERTISE_100_HALF |\ + ADVERTISE_100_FULL |\ + ADVERTISE_1000_FULL) + +/* tpd word 2 */ +#define TPD_BUFLEN_MASK 0x3FFF +#define TPD_BUFLEN_SHIFT 0 +#define TPD_DMAINT_MASK 0x0001 +#define TPD_DMAINT_SHIFT 14 +#define TPD_PKTNT_MASK 0x0001 +#define TPD_PKTINT_SHIFT 15 +#define TPD_VLANTAG_MASK 0xFFFF +#define TPD_VLAN_SHIFT 16 + +/* tpd word 3 bits 0:4 */ +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 0 +#define TPD_IP_VERSION_MASK 0x0001 +#define TPD_IP_VERSION_SHIFT 1 /* 0 : IPV4, 1 : IPV6 */ +#define TPD_INS_VL_TAG_MASK 0x0001 +#define TPD_INS_VL_TAG_SHIFT 2 +#define TPD_CC_SEGMENT_EN_MASK 0x0001 +#define TPD_CC_SEGMENT_EN_SHIFT 3 +#define TPD_SEGMENT_EN_MASK 0x0001 +#define TPD_SEGMENT_EN_SHIFT 4 + +/* tdp word 3 bits 5:7 if ip version is 0 */ +#define TPD_IP_CSUM_MASK 0x0001 +#define TPD_IP_CSUM_SHIFT 5 +#define TPD_TCP_CSUM_MASK 0x0001 +#define TPD_TCP_CSUM_SHIFT 6 +#define TPD_UDP_CSUM_MASK 0x0001 +#define TPD_UDP_CSUM_SHIFT 7 + +/* tdp word 3 bits 5:7 if ip version is 1 */ +#define TPD_V6_IPHLLO_MASK 0x0007 +#define TPD_V6_IPHLLO_SHIFT 7 + +/* tpd word 3 bits 8:9 bit */ +#define TPD_VL_TAGGED_MASK 0x0001 +#define TPD_VL_TAGGED_SHIFT 8 +#define TPD_ETHTYPE_MASK 0x0001 +#define TPD_ETHTYPE_SHIFT 9 + +/* tdp word 3 bits 10:13 if ip version is 0 */ +#define TDP_V4_IPHL_MASK 0x000F +#define TPD_V4_IPHL_SHIFT 10 + +/* tdp word 3 bits 10:13 if ip version is 1 */ +#define TPD_V6_IPHLHI_MASK 0x000F +#define TPD_V6_IPHLHI_SHIFT 10 + +/* tpd word 3 bit 14:31 if segment enabled */ +#define TPD_TCPHDRLEN_MASK 0x000F +#define TPD_TCPHDRLEN_SHIFT 14 +#define TPD_HDRFLAG_MASK 0x0001 +#define TPD_HDRFLAG_SHIFT 18 +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 19 + +/* tdp word 3 bit 16:31 if custom csum enabled */ +#define TPD_PLOADOFFSET_MASK 0x00FF +#define TPD_PLOADOFFSET_SHIFT 16 +#define TPD_CCSUMOFFSET_MASK 0x00FF +#define TPD_CCSUMOFFSET_SHIFT 24 + +struct atl1e_tpd_desc { + __le64 buffer_addr; + __le32 word2; + __le32 word3; +}; + +/* how about 0x2000 */ +#define MAX_TX_BUF_LEN 0x2000 +#define MAX_TX_BUF_SHIFT 13 +#define MAX_TSO_SEG_SIZE 0x3c00 + +/* rrs word 1 bit 0:31 */ +#define RRS_RX_CSUM_MASK 0xFFFF +#define RRS_RX_CSUM_SHIFT 0 +#define RRS_PKT_SIZE_MASK 0x3FFF +#define RRS_PKT_SIZE_SHIFT 16 +#define RRS_CPU_NUM_MASK 0x0003 +#define RRS_CPU_NUM_SHIFT 30 + +#define RRS_IS_RSS_IPV4 0x0001 +#define RRS_IS_RSS_IPV4_TCP 0x0002 +#define RRS_IS_RSS_IPV6 0x0004 +#define RRS_IS_RSS_IPV6_TCP 0x0008 +#define RRS_IS_IPV6 0x0010 +#define RRS_IS_IP_FRAG 0x0020 +#define RRS_IS_IP_DF 0x0040 +#define RRS_IS_802_3 0x0080 +#define RRS_IS_VLAN_TAG 0x0100 +#define RRS_IS_ERR_FRAME 0x0200 +#define RRS_IS_IPV4 0x0400 +#define RRS_IS_UDP 0x0800 +#define RRS_IS_TCP 0x1000 +#define RRS_IS_BCAST 0x2000 +#define RRS_IS_MCAST 0x4000 +#define RRS_IS_PAUSE 0x8000 + +#define RRS_ERR_BAD_CRC 0x0001 +#define RRS_ERR_CODE 0x0002 +#define RRS_ERR_DRIBBLE 0x0004 +#define RRS_ERR_RUNT 0x0008 +#define RRS_ERR_RX_OVERFLOW 0x0010 +#define RRS_ERR_TRUNC 0x0020 +#define RRS_ERR_IP_CSUM 0x0040 +#define RRS_ERR_L4_CSUM 0x0080 +#define RRS_ERR_LENGTH 0x0100 +#define RRS_ERR_DES_ADDR 0x0200 + +struct atl1e_recv_ret_status { + u16 seq_num; + u16 hash_lo; + __le32 word1; + u16 pkt_flag; + u16 err_flag; + u16 hash_hi; + u16 vtag; +}; + +enum atl1e_dma_req_block { + atl1e_dma_req_128 = 0, + atl1e_dma_req_256 = 1, + atl1e_dma_req_512 = 2, + atl1e_dma_req_1024 = 3, + atl1e_dma_req_2048 = 4, + atl1e_dma_req_4096 = 5 +}; + +enum atl1e_rrs_type { + atl1e_rrs_disable = 0, + atl1e_rrs_ipv4 = 1, + atl1e_rrs_ipv4_tcp = 2, + atl1e_rrs_ipv6 = 4, + atl1e_rrs_ipv6_tcp = 8 +}; + +enum atl1e_nic_type { + athr_l1e = 0, + athr_l2e_revA = 1, + athr_l2e_revB = 2 +}; + +struct atl1e_hw_stats { + /* rx */ + unsigned long rx_ok; /* The number of good packet received. */ + unsigned long rx_bcast; /* The number of good broadcast packet received. */ + unsigned long rx_mcast; /* The number of good multicast packet received. */ + unsigned long rx_pause; /* The number of Pause packet received. */ + unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */ + unsigned long rx_fcs_err; /* The number of packets with bad FCS. */ + unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */ + unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */ + unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */ + unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */ + unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */ + unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */ + unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */ + unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */ + unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */ + unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */ + unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */ + unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */ + unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */ + unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */ + unsigned long rx_align_err; /* Alignment Error */ + unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */ + unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */ + unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */ + + /* tx */ + unsigned long tx_ok; /* The number of good packet transmitted. */ + unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */ + unsigned long tx_mcast; /* The number of good multicast packet transmitted. */ + unsigned long tx_pause; /* The number of Pause packet transmitted. */ + unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */ + unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */ + unsigned long tx_defer; /* The number of packets transmitted that is deferred. */ + unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */ + unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */ + unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */ + unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */ + unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */ + unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */ + unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */ + unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */ + unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */ + unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */ + unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */ + unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */ + unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ + unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */ + unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */ + unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */ + unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */ + unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */ +}; + +struct atl1e_hw { + u8 __iomem *hw_addr; /* inner register address */ + resource_size_t mem_rang; + struct atl1e_adapter *adapter; + enum atl1e_nic_type nic_type; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u16 pci_cmd_word; + u8 mac_addr[ETH_ALEN]; + u8 perm_mac_addr[ETH_ALEN]; + u8 preamble_len; + u16 max_frame_size; + u16 rx_jumbo_th; + u16 tx_jumbo_th; + + u16 media_type; +#define MEDIA_TYPE_AUTO_SENSOR 0 +#define MEDIA_TYPE_100M_FULL 1 +#define MEDIA_TYPE_100M_HALF 2 +#define MEDIA_TYPE_10M_FULL 3 +#define MEDIA_TYPE_10M_HALF 4 + + u16 autoneg_advertised; +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + u16 imt; /* Interrupt Moderator timer ( 2us resolution) */ + u16 ict; /* Interrupt Clear timer (2us resolution) */ + u32 smb_timer; + u16 rrd_thresh; /* Threshold of number of RRD produced to trigger + interrupt request */ + u16 tpd_thresh; + u16 rx_count_down; /* 2us resolution */ + u16 tx_count_down; + + u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */ + enum atl1e_rrs_type rrs_type; + u32 base_cpu; + u32 indirect_tab; + + enum atl1e_dma_req_block dmar_block; + enum atl1e_dma_req_block dmaw_block; + u8 dmaw_dly_cnt; + u8 dmar_dly_cnt; + + bool phy_configured; + bool re_autoneg; + bool emi_ca; +}; + +/* + * wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct atl1e_tx_buffer { + struct sk_buff *skb; + u16 flags; +#define ATL1E_TX_PCIMAP_SINGLE 0x0001 +#define ATL1E_TX_PCIMAP_PAGE 0x0002 +#define ATL1E_TX_PCIMAP_TYPE_MASK 0x0003 + u16 length; + dma_addr_t dma; +}; + +#define ATL1E_SET_PCIMAP_TYPE(tx_buff, type) do { \ + ((tx_buff)->flags) &= ~ATL1E_TX_PCIMAP_TYPE_MASK; \ + ((tx_buff)->flags) |= (type); \ + } while (0) + +struct atl1e_rx_page { + dma_addr_t dma; /* receive rage DMA address */ + u8 *addr; /* receive rage virtual address */ + dma_addr_t write_offset_dma; /* the DMA address which contain the + receive data offset in the page */ + u32 *write_offset_addr; /* the virtaul address which contain + the receive data offset in the page */ + u32 read_offset; /* the offset where we have read */ +}; + +struct atl1e_rx_page_desc { + struct atl1e_rx_page rx_page[AT_PAGE_NUM_PER_QUEUE]; + u8 rx_using; + u16 rx_nxseq; +}; + +/* transmit packet descriptor (tpd) ring */ +struct atl1e_tx_ring { + struct atl1e_tpd_desc *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 count; /* the count of transmit rings */ + rwlock_t tx_lock; + u16 next_to_use; + atomic_t next_to_clean; + struct atl1e_tx_buffer *tx_buffer; + dma_addr_t cmb_dma; + u32 *cmb; +}; + +/* receive packet descriptor ring */ +struct atl1e_rx_ring { + void *desc; + dma_addr_t dma; + int size; + u32 page_size; /* bytes length of rxf page */ + u32 real_page_size; /* real_page_size = page_size + jumbo + aliagn */ + struct atl1e_rx_page_desc rx_page_desc[AT_MAX_RECEIVE_QUEUE]; +}; + +/* board specific private data structure */ +struct atl1e_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct napi_struct napi; + struct mii_if_info mii; /* MII interface info */ + struct atl1e_hw hw; + struct atl1e_hw_stats hw_stats; + + u32 wol; + u16 link_speed; + u16 link_duplex; + + spinlock_t mdio_lock; + spinlock_t tx_lock; + atomic_t irq_sem; + + struct work_struct reset_task; + struct work_struct link_chg_task; + struct timer_list watchdog_timer; + struct timer_list phy_config_timer; + + /* All Descriptor memory */ + dma_addr_t ring_dma; + void *ring_vir_addr; + u32 ring_size; + + struct atl1e_tx_ring tx_ring; + struct atl1e_rx_ring rx_ring; + int num_rx_queues; + unsigned long flags; +#define __AT_TESTING 0x0001 +#define __AT_RESETTING 0x0002 +#define __AT_DOWN 0x0003 + + u32 bd_number; /* board number;*/ + u32 pci_state[16]; + u32 *config_space; +}; + +#define AT_WRITE_REG(a, reg, value) ( \ + writel((value), ((a)->hw_addr + reg))) + +#define AT_WRITE_FLUSH(a) (\ + readl((a)->hw_addr)) + +#define AT_READ_REG(a, reg) ( \ + readl((a)->hw_addr + reg)) + +#define AT_WRITE_REGB(a, reg, value) (\ + writeb((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGB(a, reg) (\ + readb((a)->hw_addr + reg)) + +#define AT_WRITE_REGW(a, reg, value) (\ + writew((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGW(a, reg) (\ + readw((a)->hw_addr + reg)) + +#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), (((a)->hw_addr + reg) + ((offset) << 2)))) + +#define AT_READ_REG_ARRAY(a, reg, offset) ( \ + readl(((a)->hw_addr + reg) + ((offset) << 2))) + +extern char atl1e_driver_name[]; +extern char atl1e_driver_version[]; + +extern void atl1e_check_options(struct atl1e_adapter *adapter); +extern int atl1e_up(struct atl1e_adapter *adapter); +extern void atl1e_down(struct atl1e_adapter *adapter); +extern void atl1e_reinit_locked(struct atl1e_adapter *adapter); +extern s32 atl1e_reset_hw(struct atl1e_hw *hw); +extern void atl1e_set_ethtool_ops(struct net_device *netdev); +#endif /* _ATL1_E_H_ */ diff --git a/addons/atl1e/src/3.10.108/atl1e_ethtool.c b/addons/atl1e/src/3.10.108/atl1e_ethtool.c new file mode 100644 index 00000000..82b23861 --- /dev/null +++ b/addons/atl1e/src/3.10.108/atl1e_ethtool.c @@ -0,0 +1,392 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include +#include + +#include "atl1e.h" + +static int atl1e_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->nic_type == athr_l1e) + ecmd->supported |= SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_TP; + + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->advertising |= hw->autoneg_advertised; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (adapter->link_speed != SPEED_0) { + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, -1); + ecmd->duplex = -1; + } + + ecmd->autoneg = AUTONEG_ENABLE; + return 0; +} + +static int atl1e_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + u16 adv4, adv9; + + if ((ecmd->advertising&ADVERTISE_1000_FULL)) { + if (hw->nic_type == athr_l1e) { + hw->autoneg_advertised = + ecmd->advertising & AT_ADV_MASK; + } else { + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + } else if (ecmd->advertising&ADVERTISE_1000_HALF) { + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } else { + hw->autoneg_advertised = + ecmd->advertising & AT_ADV_MASK; + } + ecmd->advertising = hw->autoneg_advertised | + ADVERTISED_TP | ADVERTISED_Autoneg; + + adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL; + adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK; + if (hw->autoneg_advertised & ADVERTISE_10_HALF) + adv4 |= ADVERTISE_10HALF; + if (hw->autoneg_advertised & ADVERTISE_10_FULL) + adv4 |= ADVERTISE_10FULL; + if (hw->autoneg_advertised & ADVERTISE_100_HALF) + adv4 |= ADVERTISE_100HALF; + if (hw->autoneg_advertised & ADVERTISE_100_FULL) + adv4 |= ADVERTISE_100FULL; + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) + adv9 |= ADVERTISE_1000FULL; + + if (adv4 != hw->mii_autoneg_adv_reg || + adv9 != hw->mii_1000t_ctrl_reg) { + hw->mii_autoneg_adv_reg = adv4; + hw->mii_1000t_ctrl_reg = adv9; + hw->re_autoneg = true; + } + + } else { + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + atl1e_down(adapter); + atl1e_up(adapter); + } else + atl1e_reset_hw(&adapter->hw); + + clear_bit(__AT_RESETTING, &adapter->flags); + return 0; +} + +static u32 atl1e_get_msglevel(struct net_device *netdev) +{ +#ifdef DBG + return 1; +#else + return 0; +#endif +} + +static int atl1e_get_regs_len(struct net_device *netdev) +{ + return AT_REGS_LEN * sizeof(u32); +} + +static void atl1e_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, AT_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = AT_READ_REG(hw, REG_VPD_CAP); + regs_buff[1] = AT_READ_REG(hw, REG_SPI_FLASH_CTRL); + regs_buff[2] = AT_READ_REG(hw, REG_SPI_FLASH_CONFIG); + regs_buff[3] = AT_READ_REG(hw, REG_TWSI_CTRL); + regs_buff[4] = AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL); + regs_buff[5] = AT_READ_REG(hw, REG_MASTER_CTRL); + regs_buff[6] = AT_READ_REG(hw, REG_MANUAL_TIMER_INIT); + regs_buff[7] = AT_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT); + regs_buff[8] = AT_READ_REG(hw, REG_GPHY_CTRL); + regs_buff[9] = AT_READ_REG(hw, REG_CMBDISDMA_TIMER); + regs_buff[10] = AT_READ_REG(hw, REG_IDLE_STATUS); + regs_buff[11] = AT_READ_REG(hw, REG_MDIO_CTRL); + regs_buff[12] = AT_READ_REG(hw, REG_SERDES_LOCK); + regs_buff[13] = AT_READ_REG(hw, REG_MAC_CTRL); + regs_buff[14] = AT_READ_REG(hw, REG_MAC_IPG_IFG); + regs_buff[15] = AT_READ_REG(hw, REG_MAC_STA_ADDR); + regs_buff[16] = AT_READ_REG(hw, REG_MAC_STA_ADDR+4); + regs_buff[17] = AT_READ_REG(hw, REG_RX_HASH_TABLE); + regs_buff[18] = AT_READ_REG(hw, REG_RX_HASH_TABLE+4); + regs_buff[19] = AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL); + regs_buff[20] = AT_READ_REG(hw, REG_MTU); + regs_buff[21] = AT_READ_REG(hw, REG_WOL_CTRL); + regs_buff[22] = AT_READ_REG(hw, REG_SRAM_TRD_ADDR); + regs_buff[23] = AT_READ_REG(hw, REG_SRAM_TRD_LEN); + regs_buff[24] = AT_READ_REG(hw, REG_SRAM_RXF_ADDR); + regs_buff[25] = AT_READ_REG(hw, REG_SRAM_RXF_LEN); + regs_buff[26] = AT_READ_REG(hw, REG_SRAM_TXF_ADDR); + regs_buff[27] = AT_READ_REG(hw, REG_SRAM_TXF_LEN); + regs_buff[28] = AT_READ_REG(hw, REG_SRAM_TCPH_ADDR); + regs_buff[29] = AT_READ_REG(hw, REG_SRAM_PKTH_ADDR); + + atl1e_read_phy_reg(hw, MII_BMCR, &phy_data); + regs_buff[73] = (u32)phy_data; + atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); + regs_buff[74] = (u32)phy_data; +} + +static int atl1e_get_eeprom_len(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (!atl1e_check_eeprom_exist(&adapter->hw)) + return AT_EEPROM_LEN; + else + return 0; +} + +static int atl1e_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 *eeprom_buff; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EINVAL; + + if (atl1e_check_eeprom_exist(hw)) /* not exist */ + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + + eeprom_buff = kmalloc(sizeof(u32) * + (last_dword - first_dword + 1), GFP_KERNEL); + if (eeprom_buff == NULL) + return -ENOMEM; + + for (i = first_dword; i < last_dword; i++) { + if (!atl1e_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) { + kfree(eeprom_buff); + return -EIO; + } + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int atl1e_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 *eeprom_buff; + u32 *ptr; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EINVAL; + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + eeprom_buff = kmalloc(AT_EEPROM_LEN, GFP_KERNEL); + if (eeprom_buff == NULL) + return -ENOMEM; + + ptr = eeprom_buff; + + if (eeprom->offset & 3) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + if (!atl1e_read_eeprom(hw, first_dword * 4, &(eeprom_buff[0]))) { + ret_val = -EIO; + goto out; + } + ptr++; + } + if (((eeprom->offset + eeprom->len) & 3)) { + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + + if (!atl1e_read_eeprom(hw, last_dword * 4, + &(eeprom_buff[last_dword - first_dword]))) { + ret_val = -EIO; + goto out; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_dword - first_dword + 1; i++) { + if (!atl1e_write_eeprom(hw, ((first_dword + i) * 4), + eeprom_buff[i])) { + ret_val = -EIO; + goto out; + } + } +out: + kfree(eeprom_buff); + return ret_val; +} + +static void atl1e_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, atl1e_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->n_stats = 0; + drvinfo->testinfo_len = 0; + drvinfo->regdump_len = atl1e_get_regs_len(netdev); + drvinfo->eedump_len = atl1e_get_eeprom_len(netdev); +} + +static void atl1e_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + if (adapter->wol & AT_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & AT_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & AT_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & AT_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & AT_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | + WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) + return -EOPNOTSUPP; + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= AT_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= AT_WUFC_LNKC; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int atl1e_nway_reset(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + atl1e_reinit_locked(adapter); + return 0; +} + +static const struct ethtool_ops atl1e_ethtool_ops = { + .get_settings = atl1e_get_settings, + .set_settings = atl1e_set_settings, + .get_drvinfo = atl1e_get_drvinfo, + .get_regs_len = atl1e_get_regs_len, + .get_regs = atl1e_get_regs, + .get_wol = atl1e_get_wol, + .set_wol = atl1e_set_wol, + .get_msglevel = atl1e_get_msglevel, + .nway_reset = atl1e_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = atl1e_get_eeprom_len, + .get_eeprom = atl1e_get_eeprom, + .set_eeprom = atl1e_set_eeprom, +}; + +void atl1e_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops); +} diff --git a/addons/atl1e/src/3.10.108/atl1e_hw.c b/addons/atl1e/src/3.10.108/atl1e_hw.c new file mode 100644 index 00000000..923063d2 --- /dev/null +++ b/addons/atl1e/src/3.10.108/atl1e_hw.c @@ -0,0 +1,651 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include + +#include "atl1e.h" + +/* + * check_eeprom_exist + * return 0 if eeprom exist + */ +int atl1e_check_eeprom_exist(struct atl1e_hw *hw) +{ + u32 value; + + value = AT_READ_REG(hw, REG_SPI_FLASH_CTRL); + if (value & SPI_FLASH_CTRL_EN_VPD) { + value &= ~SPI_FLASH_CTRL_EN_VPD; + AT_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); + } + value = AT_READ_REGW(hw, REG_PCIE_CAP_LIST); + return ((value & 0xFF00) == 0x6C00) ? 0 : 1; +} + +void atl1e_hw_set_mac_addr(struct atl1e_hw *hw) +{ + u32 value; + /* + * 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword + */ + value = (((u32)hw->mac_addr[2]) << 24) | + (((u32)hw->mac_addr[3]) << 16) | + (((u32)hw->mac_addr[4]) << 8) | + (((u32)hw->mac_addr[5])) ; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); + /* hight dword */ + value = (((u32)hw->mac_addr[0]) << 8) | + (((u32)hw->mac_addr[1])) ; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); +} + +/* + * atl1e_get_permanent_address + * return 0 if get valid mac address, + */ +static int atl1e_get_permanent_address(struct atl1e_hw *hw) +{ + u32 addr[2]; + u32 i; + u32 twsi_ctrl_data; + u8 eth_addr[ETH_ALEN]; + + if (is_valid_ether_addr(hw->perm_mac_addr)) + return 0; + + /* init */ + addr[0] = addr[1] = 0; + + if (!atl1e_check_eeprom_exist(hw)) { + /* eeprom exist */ + twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL); + twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; + AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data); + for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) { + msleep(10); + twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL); + if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0) + break; + } + if (i >= AT_TWSI_EEPROM_TIMEOUT) + return AT_ERR_TIMEOUT; + } + + /* maybe MAC-address is from BIOS */ + addr[0] = AT_READ_REG(hw, REG_MAC_STA_ADDR); + addr[1] = AT_READ_REG(hw, REG_MAC_STA_ADDR + 4); + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *)&addr[1]); + + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + + return AT_ERR_EEPROM; +} + +bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value) +{ + return true; +} + +bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value) +{ + int i; + u32 control; + + if (offset & 3) + return false; /* address do not align */ + + AT_WRITE_REG(hw, REG_VPD_DATA, 0); + control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; + AT_WRITE_REG(hw, REG_VPD_CAP, control); + + for (i = 0; i < 10; i++) { + msleep(2); + control = AT_READ_REG(hw, REG_VPD_CAP); + if (control & VPD_CAP_VPD_FLAG) + break; + } + if (control & VPD_CAP_VPD_FLAG) { + *p_value = AT_READ_REG(hw, REG_VPD_DATA); + return true; + } + return false; /* timeout */ +} + +void atl1e_force_ps(struct atl1e_hw *hw) +{ + AT_WRITE_REGW(hw, REG_GPHY_CTRL, + GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET); +} + +/* + * Reads the adapter's MAC address from the EEPROM + * + * hw - Struct containing variables accessed by shared code + */ +int atl1e_read_mac_addr(struct atl1e_hw *hw) +{ + int err = 0; + + err = atl1e_get_permanent_address(hw); + if (err) + return AT_ERR_EEPROM; + memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr)); + return 0; +} + +/* + * atl1e_hash_mc_addr + * purpose + * set hash value for a multicast address + */ +u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr) +{ + u32 crc32; + u32 value = 0; + int i; + + crc32 = ether_crc_le(6, mc_addr); + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* + * The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + + mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); + + mta |= (1 << hash_bit); + + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); +} +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +int atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data) +{ + u32 val; + int i; + + val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | + MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + + wmb(); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = AT_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + wmb(); + } + if (!(val & (MDIO_START | MDIO_BUSY))) { + *phy_data = (u16)val; + return 0; + } + + return AT_ERR_PHY; +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +int atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data) +{ + int i; + u32 val; + + val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | + (reg_addr&MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | + MDIO_SUP_PREAMBLE | + MDIO_START | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + wmb(); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = AT_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + wmb(); + } + + if (!(val & (MDIO_START | MDIO_BUSY))) + return 0; + + return AT_ERR_PHY; +} + +/* + * atl1e_init_pcie - init PCIE module + */ +static void atl1e_init_pcie(struct atl1e_hw *hw) +{ + u32 value; + /* comment 2lines below to save more power when sususpend + value = LTSSM_TEST_MODE_DEF; + AT_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value); + */ + + /* pcie flow control mode change */ + value = AT_READ_REG(hw, 0x1008); + value |= 0x8000; + AT_WRITE_REG(hw, 0x1008, value); +} +/* + * Configures PHY autoneg and flow control advertisement settings + * + * hw - Struct containing variables accessed by shared code + */ +static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + if (0 != hw->mii_autoneg_adv_reg) + return 0; + /* Read the MII Auto-Neg Advertisement Register (Address 4/9). */ + mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; + mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK; + + /* + * Need to parse autoneg_advertised and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~ADVERTISE_ALL; + mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; + + /* + * Need to parse MediaType and setup the + * appropriate PHY registers. + */ + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + mii_autoneg_adv_reg |= ADVERTISE_ALL; + hw->autoneg_advertised = ADVERTISE_ALL; + if (hw->nic_type == athr_l1e) { + mii_1000t_ctrl_reg |= ADVERTISE_1000FULL; + hw->autoneg_advertised |= ADVERTISE_1000_FULL; + } + break; + + case MEDIA_TYPE_100M_FULL: + mii_autoneg_adv_reg |= ADVERTISE_100FULL; + hw->autoneg_advertised = ADVERTISE_100_FULL; + break; + + case MEDIA_TYPE_100M_HALF: + mii_autoneg_adv_reg |= ADVERTISE_100_HALF; + hw->autoneg_advertised = ADVERTISE_100_HALF; + break; + + case MEDIA_TYPE_10M_FULL: + mii_autoneg_adv_reg |= ADVERTISE_10_FULL; + hw->autoneg_advertised = ADVERTISE_10_FULL; + break; + + default: + mii_autoneg_adv_reg |= ADVERTISE_10_HALF; + hw->autoneg_advertised = ADVERTISE_10_HALF; + break; + } + + /* flow control fixed to enable all */ + mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + + hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; + hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; + + ret_val = atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { + ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return 0; +} + + +/* + * Resets the PHY and make all config validate + * + * hw - Struct containing variables accessed by shared code + * + * Sets bit 15 and 12 of the MII control regiser (for F001 bug) + */ +int atl1e_phy_commit(struct atl1e_hw *hw) +{ + struct atl1e_adapter *adapter = hw->adapter; + int ret_val; + u16 phy_data; + + phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART; + + ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data); + if (ret_val) { + u32 val; + int i; + /************************************** + * pcie serdes link may be down ! + **************************************/ + for (i = 0; i < 25; i++) { + msleep(1); + val = AT_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if (0 != (val & (MDIO_START | MDIO_BUSY))) { + netdev_err(adapter->netdev, + "pcie linkdown at least for 25ms\n"); + return ret_val; + } + + netdev_err(adapter->netdev, "pcie linkup after %d ms\n", i); + } + return 0; +} + +int atl1e_phy_init(struct atl1e_hw *hw) +{ + struct atl1e_adapter *adapter = hw->adapter; + s32 ret_val; + u16 phy_val; + + if (hw->phy_configured) { + if (hw->re_autoneg) { + hw->re_autoneg = false; + return atl1e_restart_autoneg(hw); + } + return 0; + } + + /* RESET GPHY Core */ + AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT); + msleep(2); + AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT | + GPHY_CTRL_EXT_RESET); + msleep(2); + + /* patches */ + /* p1. eable hibernation mode */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0xB); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0xBC00); + if (ret_val) + return ret_val; + /* p2. set Class A/B for all modes */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0); + if (ret_val) + return ret_val; + phy_val = 0x02ef; + /* remove Class AB */ + /* phy_val = hw->emi_ca ? 0x02ef : 0x02df; */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, phy_val); + if (ret_val) + return ret_val; + /* p3. 10B ??? */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x12); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x4C04); + if (ret_val) + return ret_val; + /* p4. 1000T power */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x4); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x8BBB); + if (ret_val) + return ret_val; + + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x5); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x2C46); + if (ret_val) + return ret_val; + + msleep(1); + + /*Enable PHY LinkChange Interrupt */ + ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00); + if (ret_val) { + netdev_err(adapter->netdev, + "Error enable PHY linkChange Interrupt\n"); + return ret_val; + } + /* setup AutoNeg parameters */ + ret_val = atl1e_phy_setup_autoneg_adv(hw); + if (ret_val) { + netdev_err(adapter->netdev, + "Error Setting up Auto-Negotiation\n"); + return ret_val; + } + /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/ + netdev_dbg(adapter->netdev, "Restarting Auto-Negotiation\n"); + ret_val = atl1e_phy_commit(hw); + if (ret_val) { + netdev_err(adapter->netdev, "Error resetting the phy\n"); + return ret_val; + } + + hw->phy_configured = true; + + return 0; +} + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +int atl1e_reset_hw(struct atl1e_hw *hw) +{ + struct atl1e_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + + u32 idle_status_data = 0; + u16 pci_cfg_cmd_word = 0; + int timeout = 0; + + /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ + pci_read_config_word(pdev, PCI_REG_COMMAND, &pci_cfg_cmd_word); + if ((pci_cfg_cmd_word & (CMD_IO_SPACE | + CMD_MEMORY_SPACE | CMD_BUS_MASTER)) + != (CMD_IO_SPACE | CMD_MEMORY_SPACE | CMD_BUS_MASTER)) { + pci_cfg_cmd_word |= (CMD_IO_SPACE | + CMD_MEMORY_SPACE | CMD_BUS_MASTER); + pci_write_config_word(pdev, PCI_REG_COMMAND, pci_cfg_cmd_word); + } + + /* + * Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + AT_WRITE_REG(hw, REG_MASTER_CTRL, + MASTER_CTRL_LED_MODE | MASTER_CTRL_SOFT_RST); + wmb(); + msleep(1); + + /* Wait at least 10ms for All module to be Idle */ + for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { + idle_status_data = AT_READ_REG(hw, REG_IDLE_STATUS); + if (idle_status_data == 0) + break; + msleep(1); + cpu_relax(); + } + + if (timeout >= AT_HW_MAX_IDLE_DELAY) { + netdev_err(adapter->netdev, + "MAC state machine can't be idle since disabled for 10ms second\n"); + return AT_ERR_TIMEOUT; + } + + return 0; +} + + +/* + * Performs basic configuration of the adapter. + * + * hw - Struct containing variables accessed by shared code + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes multicast table, + * and Calls routines to setup link + * Leaves the transmit and receive units disabled and uninitialized. + */ +int atl1e_init_hw(struct atl1e_hw *hw) +{ + s32 ret_val = 0; + + atl1e_init_pcie(hw); + + /* Zero out the Multicast HASH table */ + /* clear the old settings from the multicast hash table */ + AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + ret_val = atl1e_phy_init(hw); + + return ret_val; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex) +{ + int err; + u16 phy_data; + + /* Read PHY Specific Status Register (17) */ + err = atl1e_read_phy_reg(hw, MII_AT001_PSSR, &phy_data); + if (err) + return err; + + if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED)) + return AT_ERR_PHY_RES; + + switch (phy_data & MII_AT001_PSSR_SPEED) { + case MII_AT001_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case MII_AT001_PSSR_100MBS: + *speed = SPEED_100; + break; + case MII_AT001_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + return AT_ERR_PHY_SPEED; + break; + } + + if (phy_data & MII_AT001_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; +} + +int atl1e_restart_autoneg(struct atl1e_hw *hw) +{ + int err = 0; + + err = atl1e_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); + if (err) + return err; + + if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { + err = atl1e_write_phy_reg(hw, MII_CTRL1000, + hw->mii_1000t_ctrl_reg); + if (err) + return err; + } + + err = atl1e_write_phy_reg(hw, MII_BMCR, + BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART); + return err; +} + diff --git a/addons/atl1e/src/3.10.108/atl1e_hw.h b/addons/atl1e/src/3.10.108/atl1e_hw.h new file mode 100644 index 00000000..74df16ae --- /dev/null +++ b/addons/atl1e/src/3.10.108/atl1e_hw.h @@ -0,0 +1,690 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATHL1E_HW_H_ +#define _ATHL1E_HW_H_ + +#include +#include + +struct atl1e_adapter; +struct atl1e_hw; + +/* function prototype */ +s32 atl1e_reset_hw(struct atl1e_hw *hw); +s32 atl1e_read_mac_addr(struct atl1e_hw *hw); +s32 atl1e_init_hw(struct atl1e_hw *hw); +s32 atl1e_phy_commit(struct atl1e_hw *hw); +s32 atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex); +u32 atl1e_auto_get_fc(struct atl1e_adapter *adapter, u16 duplex); +u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr); +void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value); +s32 atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data); +s32 atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data); +s32 atl1e_validate_mdi_setting(struct atl1e_hw *hw); +void atl1e_hw_set_mac_addr(struct atl1e_hw *hw); +bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value); +bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value); +s32 atl1e_phy_enter_power_saving(struct atl1e_hw *hw); +s32 atl1e_phy_leave_power_saving(struct atl1e_hw *hw); +s32 atl1e_phy_init(struct atl1e_hw *hw); +int atl1e_check_eeprom_exist(struct atl1e_hw *hw); +void atl1e_force_ps(struct atl1e_hw *hw); +s32 atl1e_restart_autoneg(struct atl1e_hw *hw); + +/* register definition */ +#define REG_PM_CTRLSTAT 0x44 + +#define REG_PCIE_CAP_LIST 0x58 + +#define REG_DEVICE_CAP 0x5C +#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 +#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0 + +#define REG_DEVICE_CTRL 0x60 +#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7 +#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5 +#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7 +#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12 + +#define REG_VPD_CAP 0x6C +#define VPD_CAP_ID_MASK 0xff +#define VPD_CAP_ID_SHIFT 0 +#define VPD_CAP_NEXT_PTR_MASK 0xFF +#define VPD_CAP_NEXT_PTR_SHIFT 8 +#define VPD_CAP_VPD_ADDR_MASK 0x7FFF +#define VPD_CAP_VPD_ADDR_SHIFT 16 +#define VPD_CAP_VPD_FLAG 0x80000000 + +#define REG_VPD_DATA 0x70 + +#define REG_SPI_FLASH_CTRL 0x200 +#define SPI_FLASH_CTRL_STS_NON_RDY 0x1 +#define SPI_FLASH_CTRL_STS_WEN 0x2 +#define SPI_FLASH_CTRL_STS_WPEN 0x80 +#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF +#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0 +#define SPI_FLASH_CTRL_INS_MASK 0x7 +#define SPI_FLASH_CTRL_INS_SHIFT 8 +#define SPI_FLASH_CTRL_START 0x800 +#define SPI_FLASH_CTRL_EN_VPD 0x2000 +#define SPI_FLASH_CTRL_LDSTART 0x8000 +#define SPI_FLASH_CTRL_CS_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HI_SHIFT 16 +#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18 +#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20 +#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22 +#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3 +#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24 +#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3 +#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26 +#define SPI_FLASH_CTRL_WAIT_READY 0x10000000 + +#define REG_SPI_ADDR 0x204 + +#define REG_SPI_DATA 0x208 + +#define REG_SPI_FLASH_CONFIG 0x20C +#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF +#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0 +#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3 +#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24 +#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000 + + +#define REG_SPI_FLASH_OP_PROGRAM 0x210 +#define REG_SPI_FLASH_OP_SC_ERASE 0x211 +#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212 +#define REG_SPI_FLASH_OP_RDID 0x213 +#define REG_SPI_FLASH_OP_WREN 0x214 +#define REG_SPI_FLASH_OP_RDSR 0x215 +#define REG_SPI_FLASH_OP_WRSR 0x216 +#define REG_SPI_FLASH_OP_READ 0x217 + +#define REG_TWSI_CTRL 0x218 +#define TWSI_CTRL_LD_OFFSET_MASK 0xFF +#define TWSI_CTRL_LD_OFFSET_SHIFT 0 +#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7 +#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 +#define TWSI_CTRL_SW_LDSTART 0x800 +#define TWSI_CTRL_HW_LDSTART 0x1000 +#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F +#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 +#define TWSI_CTRL_LD_EXIST 0x400000 +#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23 +#define TWSI_CTRL_FREQ_SEL_100K 0 +#define TWSI_CTRL_FREQ_SEL_200K 1 +#define TWSI_CTRL_FREQ_SEL_300K 2 +#define TWSI_CTRL_FREQ_SEL_400K 3 +#define TWSI_CTRL_SMB_SLV_ADDR +#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24 + + +#define REG_PCIE_DEV_MISC_CTRL 0x21C +#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2 +#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1 +#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4 +#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8 +#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10 + +#define REG_PCIE_PHYMISC 0x1000 +#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 + +#define REG_LTSSM_TEST_MODE 0x12FC +#define LTSSM_TEST_MODE_DEF 0xE000 + +/* Selene Master Control Register */ +#define REG_MASTER_CTRL 0x1400 +#define MASTER_CTRL_SOFT_RST 0x1 +#define MASTER_CTRL_MTIMER_EN 0x2 +#define MASTER_CTRL_ITIMER_EN 0x4 +#define MASTER_CTRL_MANUAL_INT 0x8 +#define MASTER_CTRL_ITIMER2_EN 0x20 +#define MASTER_CTRL_INT_RDCLR 0x40 +#define MASTER_CTRL_LED_MODE 0x200 +#define MASTER_CTRL_REV_NUM_SHIFT 16 +#define MASTER_CTRL_REV_NUM_MASK 0xff +#define MASTER_CTRL_DEV_ID_SHIFT 24 +#define MASTER_CTRL_DEV_ID_MASK 0xff + +/* Timer Initial Value Register */ +#define REG_MANUAL_TIMER_INIT 0x1404 + + +/* IRQ ModeratorTimer Initial Value Register */ +#define REG_IRQ_MODU_TIMER_INIT 0x1408 /* w */ +#define REG_IRQ_MODU_TIMER2_INIT 0x140A /* w */ + + +#define REG_GPHY_CTRL 0x140C +#define GPHY_CTRL_EXT_RESET 1 +#define GPHY_CTRL_PIPE_MOD 2 +#define GPHY_CTRL_TEST_MODE_MASK 3 +#define GPHY_CTRL_TEST_MODE_SHIFT 2 +#define GPHY_CTRL_BERT_START 0x10 +#define GPHY_CTRL_GATE_25M_EN 0x20 +#define GPHY_CTRL_LPW_EXIT 0x40 +#define GPHY_CTRL_PHY_IDDQ 0x80 +#define GPHY_CTRL_PHY_IDDQ_DIS 0x100 +#define GPHY_CTRL_PCLK_SEL_DIS 0x200 +#define GPHY_CTRL_HIB_EN 0x400 +#define GPHY_CTRL_HIB_PULSE 0x800 +#define GPHY_CTRL_SEL_ANA_RST 0x1000 +#define GPHY_CTRL_PHY_PLL_ON 0x2000 +#define GPHY_CTRL_PWDOWN_HW 0x4000 +#define GPHY_CTRL_DEFAULT (\ + GPHY_CTRL_PHY_PLL_ON |\ + GPHY_CTRL_SEL_ANA_RST |\ + GPHY_CTRL_HIB_PULSE |\ + GPHY_CTRL_HIB_EN) + +#define GPHY_CTRL_PW_WOL_DIS (\ + GPHY_CTRL_PHY_PLL_ON |\ + GPHY_CTRL_SEL_ANA_RST |\ + GPHY_CTRL_HIB_PULSE |\ + GPHY_CTRL_HIB_EN |\ + GPHY_CTRL_PWDOWN_HW |\ + GPHY_CTRL_PCLK_SEL_DIS |\ + GPHY_CTRL_PHY_IDDQ) + +/* IRQ Anti-Lost Timer Initial Value Register */ +#define REG_CMBDISDMA_TIMER 0x140E + + +/* Block IDLE Status Register */ +#define REG_IDLE_STATUS 0x1410 +#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC state machine is in non-IDLE state. 0: RXMAC is idling */ +#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC state machine is in non-IDLE state. 0: TXMAC is idling */ +#define IDLE_STATUS_RXQ 4 /* 1: RXQ state machine is in non-IDLE state. 0: RXQ is idling */ +#define IDLE_STATUS_TXQ 8 /* 1: TXQ state machine is in non-IDLE state. 0: TXQ is idling */ +#define IDLE_STATUS_DMAR 0x10 /* 1: DMAR state machine is in non-IDLE state. 0: DMAR is idling */ +#define IDLE_STATUS_DMAW 0x20 /* 1: DMAW state machine is in non-IDLE state. 0: DMAW is idling */ +#define IDLE_STATUS_SMB 0x40 /* 1: SMB state machine is in non-IDLE state. 0: SMB is idling */ +#define IDLE_STATUS_CMB 0x80 /* 1: CMB state machine is in non-IDLE state. 0: CMB is idling */ + +/* MDIO Control Register */ +#define REG_MDIO_CTRL 0x1414 +#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit control data to write to PHY MII management register */ +#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit status data that was read from the PHY MII management register*/ +#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */ +#define MDIO_REG_ADDR_SHIFT 16 +#define MDIO_RW 0x200000 /* 1: read, 0: write */ +#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */ +#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO master. And this bit is self cleared after one cycle*/ +#define MDIO_CLK_SEL_SHIFT 24 +#define MDIO_CLK_25_4 0 +#define MDIO_CLK_25_6 2 +#define MDIO_CLK_25_8 3 +#define MDIO_CLK_25_10 4 +#define MDIO_CLK_25_14 5 +#define MDIO_CLK_25_20 6 +#define MDIO_CLK_25_28 7 +#define MDIO_BUSY 0x8000000 +#define MDIO_AP_EN 0x10000000 +#define MDIO_WAIT_TIMES 10 + +/* MII PHY Status Register */ +#define REG_PHY_STATUS 0x1418 +#define PHY_STATUS_100M 0x20000 +#define PHY_STATUS_EMI_CA 0x40000 + +/* BIST Control and Status Register0 (for the Packet Memory) */ +#define REG_BIST0_CTRL 0x141c +#define BIST0_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */ +/* BIST process and reset to zero when BIST is done */ +#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */ +/* decoder failure or more than 1 cell stuck-to-x failure */ +#define BIST0_FUSE_FLAG 0x4 /* 1: Indicating one cell has been fixed */ + +/* BIST Control and Status Register1(for the retry buffer of PCI Express) */ +#define REG_BIST1_CTRL 0x1420 +#define BIST1_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */ +/* BIST process and reset to zero when BIST is done */ +#define BIST1_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */ +/* decoder failure or more than 1 cell stuck-to-x failure.*/ +#define BIST1_FUSE_FLAG 0x4 + +/* SerDes Lock Detect Control and Status Register */ +#define REG_SERDES_LOCK 0x1424 +#define SERDES_LOCK_DETECT 1 /* 1: SerDes lock detected . This signal comes from Analog SerDes */ +#define SERDES_LOCK_DETECT_EN 2 /* 1: Enable SerDes Lock detect function */ + +/* MAC Control Register */ +#define REG_MAC_CTRL 0x1480 +#define MAC_CTRL_TX_EN 1 /* 1: Transmit Enable */ +#define MAC_CTRL_RX_EN 2 /* 1: Receive Enable */ +#define MAC_CTRL_TX_FLOW 4 /* 1: Transmit Flow Control Enable */ +#define MAC_CTRL_RX_FLOW 8 /* 1: Receive Flow Control Enable */ +#define MAC_CTRL_LOOPBACK 0x10 /* 1: Loop back at G/MII Interface */ +#define MAC_CTRL_DUPLX 0x20 /* 1: Full-duplex mode 0: Half-duplex mode */ +#define MAC_CTRL_ADD_CRC 0x40 /* 1: Instruct MAC to attach CRC on all egress Ethernet frames */ +#define MAC_CTRL_PAD 0x80 /* 1: Instruct MAC to pad short frames to 60-bytes, and then attach CRC. This bit has higher priority over CRC_EN */ +#define MAC_CTRL_LENCHK 0x100 /* 1: Instruct MAC to check if length field matches the real packet length */ +#define MAC_CTRL_HUGE_EN 0x200 /* 1: receive Jumbo frame enable */ +#define MAC_CTRL_PRMLEN_SHIFT 10 /* Preamble length */ +#define MAC_CTRL_PRMLEN_MASK 0xf +#define MAC_CTRL_RMV_VLAN 0x4000 /* 1: to remove VLAN Tag automatically from all receive packets */ +#define MAC_CTRL_PROMIS_EN 0x8000 /* 1: Promiscuous Mode Enable */ +#define MAC_CTRL_TX_PAUSE 0x10000 /* 1: transmit test pause */ +#define MAC_CTRL_SCNT 0x20000 /* 1: shortcut slot time counter */ +#define MAC_CTRL_SRST_TX 0x40000 /* 1: synchronized reset Transmit MAC module */ +#define MAC_CTRL_TX_SIMURST 0x80000 /* 1: transmit simulation reset */ +#define MAC_CTRL_SPEED_SHIFT 20 /* 10: gigabit 01:10M/100M */ +#define MAC_CTRL_SPEED_MASK 0x300000 +#define MAC_CTRL_SPEED_1000 2 +#define MAC_CTRL_SPEED_10_100 1 +#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 /* 1: transmit maximum backoff (half-duplex test bit) */ +#define MAC_CTRL_TX_HUGE 0x800000 /* 1: transmit huge enable */ +#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 /* 1: RX checksum enable */ +#define MAC_CTRL_MC_ALL_EN 0x2000000 /* 1: upload all multicast frame without error to system */ +#define MAC_CTRL_BC_EN 0x4000000 /* 1: upload all broadcast frame without error to system */ +#define MAC_CTRL_DBG 0x8000000 /* 1: upload all received frame to system (Debug Mode) */ + +/* MAC IPG/IFG Control Register */ +#define REG_MAC_IPG_IFG 0x1484 +#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back inter-packet gap. The default is 96-bit time */ +#define MAC_IPG_IFG_IPGT_MASK 0x7f +#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to enforce in between RX frames */ +#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */ +#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */ +#define MAC_IPG_IFG_IPGR1_MASK 0x7f +#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */ +#define MAC_IPG_IFG_IPGR2_MASK 0x7f + +/* MAC STATION ADDRESS */ +#define REG_MAC_STA_ADDR 0x1488 + +/* Hash table for multicast address */ +#define REG_RX_HASH_TABLE 0x1490 + + +/* MAC Half-Duplex Control Register */ +#define REG_MAC_HALF_DUPLX_CTRL 0x1498 +#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */ +#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff +#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 /* Retransmission maximum, afterwards the packet will be discarded */ +#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 /* 1: Allow the transmission of a packet which has been excessively deferred */ +#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 /* 1: No back-off on collision, immediately start the retransmission */ +#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* 1: No back-off on backpressure, immediately start the transmission after back pressure */ +#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */ +#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */ + +/* Maximum Frame Length Control Register */ +#define REG_MTU 0x149c + +/* Wake-On-Lan control register */ +#define REG_WOL_CTRL 0x14a0 +#define WOL_PATTERN_EN 0x00000001 +#define WOL_PATTERN_PME_EN 0x00000002 +#define WOL_MAGIC_EN 0x00000004 +#define WOL_MAGIC_PME_EN 0x00000008 +#define WOL_LINK_CHG_EN 0x00000010 +#define WOL_LINK_CHG_PME_EN 0x00000020 +#define WOL_PATTERN_ST 0x00000100 +#define WOL_MAGIC_ST 0x00000200 +#define WOL_LINKCHG_ST 0x00000400 +#define WOL_CLK_SWITCH_EN 0x00008000 +#define WOL_PT0_EN 0x00010000 +#define WOL_PT1_EN 0x00020000 +#define WOL_PT2_EN 0x00040000 +#define WOL_PT3_EN 0x00080000 +#define WOL_PT4_EN 0x00100000 +#define WOL_PT5_EN 0x00200000 +#define WOL_PT6_EN 0x00400000 +/* WOL Length ( 2 DWORD ) */ +#define REG_WOL_PATTERN_LEN 0x14a4 +#define WOL_PT_LEN_MASK 0x7f +#define WOL_PT0_LEN_SHIFT 0 +#define WOL_PT1_LEN_SHIFT 8 +#define WOL_PT2_LEN_SHIFT 16 +#define WOL_PT3_LEN_SHIFT 24 +#define WOL_PT4_LEN_SHIFT 0 +#define WOL_PT5_LEN_SHIFT 8 +#define WOL_PT6_LEN_SHIFT 16 + +/* Internal SRAM Partition Register */ +#define REG_SRAM_TRD_ADDR 0x1518 +#define REG_SRAM_TRD_LEN 0x151C +#define REG_SRAM_RXF_ADDR 0x1520 +#define REG_SRAM_RXF_LEN 0x1524 +#define REG_SRAM_TXF_ADDR 0x1528 +#define REG_SRAM_TXF_LEN 0x152C +#define REG_SRAM_TCPH_ADDR 0x1530 +#define REG_SRAM_PKTH_ADDR 0x1532 + +/* Load Ptr Register */ +#define REG_LOAD_PTR 0x1534 /* Software sets this bit after the initialization of the head and tail */ + +/* + * addresses of all descriptors, as well as the following descriptor + * control register, which triggers each function block to load the head + * pointer to prepare for the operation. This bit is then self-cleared + * after one cycle. + */ + +/* Descriptor Control register */ +#define REG_RXF3_BASE_ADDR_HI 0x153C +#define REG_DESC_BASE_ADDR_HI 0x1540 +#define REG_RXF0_BASE_ADDR_HI 0x1540 /* share with DESC BASE ADDR HI */ +#define REG_HOST_RXF0_PAGE0_LO 0x1544 +#define REG_HOST_RXF0_PAGE1_LO 0x1548 +#define REG_TPD_BASE_ADDR_LO 0x154C +#define REG_RXF1_BASE_ADDR_HI 0x1550 +#define REG_RXF2_BASE_ADDR_HI 0x1554 +#define REG_HOST_RXFPAGE_SIZE 0x1558 +#define REG_TPD_RING_SIZE 0x155C +/* RSS about */ +#define REG_RSS_KEY0 0x14B0 +#define REG_RSS_KEY1 0x14B4 +#define REG_RSS_KEY2 0x14B8 +#define REG_RSS_KEY3 0x14BC +#define REG_RSS_KEY4 0x14C0 +#define REG_RSS_KEY5 0x14C4 +#define REG_RSS_KEY6 0x14C8 +#define REG_RSS_KEY7 0x14CC +#define REG_RSS_KEY8 0x14D0 +#define REG_RSS_KEY9 0x14D4 +#define REG_IDT_TABLE4 0x14E0 +#define REG_IDT_TABLE5 0x14E4 +#define REG_IDT_TABLE6 0x14E8 +#define REG_IDT_TABLE7 0x14EC +#define REG_IDT_TABLE0 0x1560 +#define REG_IDT_TABLE1 0x1564 +#define REG_IDT_TABLE2 0x1568 +#define REG_IDT_TABLE3 0x156C +#define REG_IDT_TABLE REG_IDT_TABLE0 +#define REG_RSS_HASH_VALUE 0x1570 +#define REG_RSS_HASH_FLAG 0x1574 +#define REG_BASE_CPU_NUMBER 0x157C + + +/* TXQ Control Register */ +#define REG_TXQ_CTRL 0x1580 +#define TXQ_CTRL_NUM_TPD_BURST_MASK 0xF +#define TXQ_CTRL_NUM_TPD_BURST_SHIFT 0 +#define TXQ_CTRL_EN 0x20 /* 1: Enable TXQ */ +#define TXQ_CTRL_ENH_MODE 0x40 /* Performance enhancement mode, in which up to two back-to-back DMA read commands might be dispatched. */ +#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 /* Number of data byte to read in a cache-aligned burst. Each SRAM entry is 8-byte in length. */ +#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff + +/* Jumbo packet Threshold for task offload */ +#define REG_TX_EARLY_TH 0x1584 /* Jumbo frame threshold in QWORD unit. Packet greater than */ +/* JUMBO_TASK_OFFLOAD_THRESHOLD will not be task offloaded. */ +#define TX_TX_EARLY_TH_MASK 0x7ff +#define TX_TX_EARLY_TH_SHIFT 0 + + +/* RXQ Control Register */ +#define REG_RXQ_CTRL 0x15A0 +#define RXQ_CTRL_PBA_ALIGN_32 0 /* rx-packet alignment */ +#define RXQ_CTRL_PBA_ALIGN_64 1 +#define RXQ_CTRL_PBA_ALIGN_128 2 +#define RXQ_CTRL_PBA_ALIGN_256 3 +#define RXQ_CTRL_Q1_EN 0x10 +#define RXQ_CTRL_Q2_EN 0x20 +#define RXQ_CTRL_Q3_EN 0x40 +#define RXQ_CTRL_IPV6_XSUM_VERIFY_EN 0x80 +#define RXQ_CTRL_HASH_TLEN_SHIFT 8 +#define RXQ_CTRL_HASH_TLEN_MASK 0xFF +#define RXQ_CTRL_HASH_TYPE_IPV4 0x10000 +#define RXQ_CTRL_HASH_TYPE_IPV4_TCP 0x20000 +#define RXQ_CTRL_HASH_TYPE_IPV6 0x40000 +#define RXQ_CTRL_HASH_TYPE_IPV6_TCP 0x80000 +#define RXQ_CTRL_RSS_MODE_DISABLE 0 +#define RXQ_CTRL_RSS_MODE_SQSINT 0x4000000 +#define RXQ_CTRL_RSS_MODE_MQUESINT 0x8000000 +#define RXQ_CTRL_RSS_MODE_MQUEMINT 0xC000000 +#define RXQ_CTRL_NIP_QUEUE_SEL_TBL 0x10000000 +#define RXQ_CTRL_HASH_ENABLE 0x20000000 +#define RXQ_CTRL_CUT_THRU_EN 0x40000000 +#define RXQ_CTRL_EN 0x80000000 + +/* Rx jumbo packet threshold and rrd retirement timer */ +#define REG_RXQ_JMBOSZ_RRDTIM 0x15A4 +/* + * Jumbo packet threshold for non-VLAN packet, in QWORD (64-bit) unit. + * When the packet length greater than or equal to this value, RXQ + * shall start cut-through forwarding of the received packet. + */ +#define RXQ_JMBOSZ_TH_MASK 0x7ff +#define RXQ_JMBOSZ_TH_SHIFT 0 /* RRD retirement timer. Decrement by 1 after every 512ns passes*/ +#define RXQ_JMBO_LKAH_MASK 0xf +#define RXQ_JMBO_LKAH_SHIFT 11 + +/* RXF flow control register */ +#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8 +#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0 +#define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff +#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16 +#define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff + + +/* DMA Engine Control Register */ +#define REG_DMA_CTRL 0x15C0 +#define DMA_CTRL_DMAR_IN_ORDER 0x1 +#define DMA_CTRL_DMAR_ENH_ORDER 0x2 +#define DMA_CTRL_DMAR_OUT_ORDER 0x4 +#define DMA_CTRL_RCB_VALUE 0x8 +#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 +#define DMA_CTRL_DMAR_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 +#define DMA_CTRL_DMAW_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAR_REQ_PRI 0x400 +#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x1F +#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11 +#define DMA_CTRL_DMAW_DLY_CNT_MASK 0xF +#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16 +#define DMA_CTRL_TXCMB_EN 0x100000 +#define DMA_CTRL_RXCMB_EN 0x200000 + + +/* CMB/SMB Control Register */ +#define REG_SMB_STAT_TIMER 0x15C4 +#define REG_TRIG_RRD_THRESH 0x15CA +#define REG_TRIG_TPD_THRESH 0x15C8 +#define REG_TRIG_TXTIMER 0x15CC +#define REG_TRIG_RXTIMER 0x15CE + +/* HOST RXF Page 1,2,3 address */ +#define REG_HOST_RXF1_PAGE0_LO 0x15D0 +#define REG_HOST_RXF1_PAGE1_LO 0x15D4 +#define REG_HOST_RXF2_PAGE0_LO 0x15D8 +#define REG_HOST_RXF2_PAGE1_LO 0x15DC +#define REG_HOST_RXF3_PAGE0_LO 0x15E0 +#define REG_HOST_RXF3_PAGE1_LO 0x15E4 + +/* Mail box */ +#define REG_MB_RXF1_RADDR 0x15B4 +#define REG_MB_RXF2_RADDR 0x15B8 +#define REG_MB_RXF3_RADDR 0x15BC +#define REG_MB_TPD_PROD_IDX 0x15F0 + +/* RXF-Page 0-3 PageNo & Valid bit */ +#define REG_HOST_RXF0_PAGE0_VLD 0x15F4 +#define HOST_RXF_VALID 1 +#define HOST_RXF_PAGENO_SHIFT 1 +#define HOST_RXF_PAGENO_MASK 0x7F +#define REG_HOST_RXF0_PAGE1_VLD 0x15F5 +#define REG_HOST_RXF1_PAGE0_VLD 0x15F6 +#define REG_HOST_RXF1_PAGE1_VLD 0x15F7 +#define REG_HOST_RXF2_PAGE0_VLD 0x15F8 +#define REG_HOST_RXF2_PAGE1_VLD 0x15F9 +#define REG_HOST_RXF3_PAGE0_VLD 0x15FA +#define REG_HOST_RXF3_PAGE1_VLD 0x15FB + +/* Interrupt Status Register */ +#define REG_ISR 0x1600 +#define ISR_SMB 1 +#define ISR_TIMER 2 /* Interrupt when Timer is counted down to zero */ +/* + * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set + * in Table 51 Selene Master Control Register (Offset 0x1400). + */ +#define ISR_MANUAL 4 +#define ISR_HW_RXF_OV 8 /* RXF overflow interrupt */ +#define ISR_HOST_RXF0_OV 0x10 +#define ISR_HOST_RXF1_OV 0x20 +#define ISR_HOST_RXF2_OV 0x40 +#define ISR_HOST_RXF3_OV 0x80 +#define ISR_TXF_UN 0x100 +#define ISR_RX0_PAGE_FULL 0x200 +#define ISR_DMAR_TO_RST 0x400 +#define ISR_DMAW_TO_RST 0x800 +#define ISR_GPHY 0x1000 +#define ISR_TX_CREDIT 0x2000 +#define ISR_GPHY_LPW 0x4000 /* GPHY low power state interrupt */ +#define ISR_RX_PKT 0x10000 /* One packet received, triggered by RFD */ +#define ISR_TX_PKT 0x20000 /* One packet transmitted, triggered by TPD */ +#define ISR_TX_DMA 0x40000 +#define ISR_RX_PKT_1 0x80000 +#define ISR_RX_PKT_2 0x100000 +#define ISR_RX_PKT_3 0x200000 +#define ISR_MAC_RX 0x400000 +#define ISR_MAC_TX 0x800000 +#define ISR_UR_DETECTED 0x1000000 +#define ISR_FERR_DETECTED 0x2000000 +#define ISR_NFERR_DETECTED 0x4000000 +#define ISR_CERR_DETECTED 0x8000000 +#define ISR_PHY_LINKDOWN 0x10000000 +#define ISR_DIS_INT 0x80000000 + + +/* Interrupt Mask Register */ +#define REG_IMR 0x1604 + + +#define IMR_NORMAL_MASK (\ + ISR_SMB |\ + ISR_TXF_UN |\ + ISR_HW_RXF_OV |\ + ISR_HOST_RXF0_OV|\ + ISR_MANUAL |\ + ISR_GPHY |\ + ISR_GPHY_LPW |\ + ISR_DMAR_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_PHY_LINKDOWN|\ + ISR_RX_PKT |\ + ISR_TX_PKT) + +#define ISR_TX_EVENT (ISR_TXF_UN | ISR_TX_PKT) +#define ISR_RX_EVENT (ISR_HOST_RXF0_OV | ISR_HW_RXF_OV | ISR_RX_PKT) + +#define REG_MAC_RX_STATUS_BIN 0x1700 +#define REG_MAC_RX_STATUS_END 0x175c +#define REG_MAC_TX_STATUS_BIN 0x1760 +#define REG_MAC_TX_STATUS_END 0x17c0 + +/* Hardware Offset Register */ +#define REG_HOST_RXF0_PAGEOFF 0x1800 +#define REG_TPD_CONS_IDX 0x1804 +#define REG_HOST_RXF1_PAGEOFF 0x1808 +#define REG_HOST_RXF2_PAGEOFF 0x180C +#define REG_HOST_RXF3_PAGEOFF 0x1810 + +/* RXF-Page 0-3 Offset DMA Address */ +#define REG_HOST_RXF0_MB0_LO 0x1820 +#define REG_HOST_RXF0_MB1_LO 0x1824 +#define REG_HOST_RXF1_MB0_LO 0x1828 +#define REG_HOST_RXF1_MB1_LO 0x182C +#define REG_HOST_RXF2_MB0_LO 0x1830 +#define REG_HOST_RXF2_MB1_LO 0x1834 +#define REG_HOST_RXF3_MB0_LO 0x1838 +#define REG_HOST_RXF3_MB1_LO 0x183C + +/* Tpd CMB DMA Address */ +#define REG_HOST_TX_CMB_LO 0x1840 +#define REG_HOST_SMB_ADDR_LO 0x1844 + +/* DEBUG ADDR */ +#define REG_DEBUG_DATA0 0x1900 +#define REG_DEBUG_DATA1 0x1904 + +/***************************** MII definition ***************************************/ +/* PHY Common Register */ +#define MII_AT001_PSCR 0x10 +#define MII_AT001_PSSR 0x11 +#define MII_INT_CTRL 0x12 +#define MII_INT_STATUS 0x13 +#define MII_SMARTSPEED 0x14 +#define MII_LBRERROR 0x18 +#define MII_RESV2 0x1a + +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + +/* Autoneg Advertisement Register */ +#define MII_AR_DEFAULT_CAP_MASK 0 + +/* 1000BASE-T Control Register */ +#define MII_AT001_CR_1000T_SPEED_MASK \ + (ADVERTISE_1000FULL | ADVERTISE_1000HALF) +#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK + +/* AT001 PHY Specific Control Register */ +#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define MII_AT001_PSCR_MAC_POWERDOWN 0x0008 +#define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ +/* Manual MDI configuration */ +#define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080 +/* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100 +/* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ +#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5 +#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 +/* AT001 PHY Specific Status Register */ +#define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#endif /*_ATHL1E_HW_H_*/ diff --git a/addons/atl1e/src/3.10.108/atl1e_main.c b/addons/atl1e/src/3.10.108/atl1e_main.c new file mode 100644 index 00000000..c23bb02e --- /dev/null +++ b/addons/atl1e/src/3.10.108/atl1e_main.c @@ -0,0 +1,2559 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include "atl1e.h" + +#define DRV_VERSION "1.0.0.7-NAPI" + +char atl1e_driver_name[] = "ATL1E"; +char atl1e_driver_version[] = DRV_VERSION; +#define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026 +/* + * atl1e_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)}, + /* required last entry */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl); + +MODULE_AUTHOR("Atheros Corporation, , Jie Yang "); +MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter); + +static const u16 +atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = +{ + {REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD}, + {REG_HOST_RXF1_PAGE0_VLD, REG_HOST_RXF1_PAGE1_VLD}, + {REG_HOST_RXF2_PAGE0_VLD, REG_HOST_RXF2_PAGE1_VLD}, + {REG_HOST_RXF3_PAGE0_VLD, REG_HOST_RXF3_PAGE1_VLD} +}; + +static const u16 atl1e_rx_page_hi_addr_regs[AT_MAX_RECEIVE_QUEUE] = +{ + REG_RXF0_BASE_ADDR_HI, + REG_RXF1_BASE_ADDR_HI, + REG_RXF2_BASE_ADDR_HI, + REG_RXF3_BASE_ADDR_HI +}; + +static const u16 +atl1e_rx_page_lo_addr_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = +{ + {REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO}, + {REG_HOST_RXF1_PAGE0_LO, REG_HOST_RXF1_PAGE1_LO}, + {REG_HOST_RXF2_PAGE0_LO, REG_HOST_RXF2_PAGE1_LO}, + {REG_HOST_RXF3_PAGE0_LO, REG_HOST_RXF3_PAGE1_LO} +}; + +static const u16 +atl1e_rx_page_write_offset_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = +{ + {REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO}, + {REG_HOST_RXF1_MB0_LO, REG_HOST_RXF1_MB1_LO}, + {REG_HOST_RXF2_MB0_LO, REG_HOST_RXF2_MB1_LO}, + {REG_HOST_RXF3_MB0_LO, REG_HOST_RXF3_MB1_LO} +}; + +static const u16 atl1e_pay_load_size[] = { + 128, 256, 512, 1024, 2048, 4096, +}; + +/** + * atl1e_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static inline void atl1e_irq_enable(struct atl1e_adapter *adapter) +{ + if (likely(atomic_dec_and_test(&adapter->irq_sem))) { + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK); + AT_WRITE_FLUSH(&adapter->hw); + } +} + +/** + * atl1e_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static inline void atl1e_irq_disable(struct atl1e_adapter *adapter) +{ + atomic_inc(&adapter->irq_sem); + AT_WRITE_REG(&adapter->hw, REG_IMR, 0); + AT_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/** + * atl1e_irq_reset - reset interrupt confiure on the NIC + * @adapter: board private structure + */ +static inline void atl1e_irq_reset(struct atl1e_adapter *adapter) +{ + atomic_set(&adapter->irq_sem, 0); + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + AT_WRITE_REG(&adapter->hw, REG_IMR, 0); + AT_WRITE_FLUSH(&adapter->hw); +} + +/** + * atl1e_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1e_phy_config(unsigned long data) +{ + struct atl1e_adapter *adapter = (struct atl1e_adapter *) data; + struct atl1e_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + atl1e_restart_autoneg(hw); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); +} + +void atl1e_reinit_locked(struct atl1e_adapter *adapter) +{ + + WARN_ON(in_interrupt()); + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + atl1e_down(adapter); + atl1e_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); +} + +static void atl1e_reset_task(struct work_struct *work) +{ + struct atl1e_adapter *adapter; + adapter = container_of(work, struct atl1e_adapter, reset_task); + + atl1e_reinit_locked(adapter); +} + +static int atl1e_check_link(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err = 0; + u16 speed, duplex, phy_data; + + /* MII_BMSR must read twice */ + atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); + if ((phy_data & BMSR_LSTATUS) == 0) { + /* link down */ + if (netif_carrier_ok(netdev)) { /* old link state: Up */ + u32 value; + /* disable rx */ + value = AT_READ_REG(hw, REG_MAC_CTRL); + value &= ~MAC_CTRL_RX_EN; + AT_WRITE_REG(hw, REG_MAC_CTRL, value); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } else { + /* Link Up */ + err = atl1e_get_speed_and_duplex(hw, &speed, &duplex); + if (unlikely(err)) + return err; + + /* link result is our setting */ + if (adapter->link_speed != speed || + adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl1e_setup_mac_ctrl(adapter); + netdev_info(netdev, + "NIC Link is Up <%d Mbps %s Duplex>\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half"); + } + + if (!netif_carrier_ok(netdev)) { + /* Link down -> Up */ + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + } + return 0; +} + +/** + * atl1e_link_chg_task - deal with link change event Out of interrupt context + * @netdev: network interface device structure + */ +static void atl1e_link_chg_task(struct work_struct *work) +{ + struct atl1e_adapter *adapter; + unsigned long flags; + + adapter = container_of(work, struct atl1e_adapter, link_chg_task); + spin_lock_irqsave(&adapter->mdio_lock, flags); + atl1e_check_link(adapter); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); +} + +static void atl1e_link_chg_event(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 phy_data = 0; + u16 link_up = 0; + + spin_lock(&adapter->mdio_lock); + atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->mdio_lock); + link_up = phy_data & BMSR_LSTATUS; + /* notify upper layer link down ASAP */ + if (!link_up) { + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + netdev_info(netdev, "NIC Link is Down\n"); + adapter->link_speed = SPEED_0; + netif_stop_queue(netdev); + } + } + schedule_work(&adapter->link_chg_task); +} + +static void atl1e_del_timer(struct atl1e_adapter *adapter) +{ + del_timer_sync(&adapter->phy_config_timer); +} + +static void atl1e_cancel_work(struct atl1e_adapter *adapter) +{ + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->link_chg_task); +} + +/** + * atl1e_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atl1e_tx_timeout(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); +} + +/** + * atl1e_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atl1e_set_multi(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u32 mac_ctrl_data = 0; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + mac_ctrl_data = AT_READ_REG(hw, REG_MAC_CTRL); + + if (netdev->flags & IFF_PROMISC) { + mac_ctrl_data |= MAC_CTRL_PROMIS_EN; + } else if (netdev->flags & IFF_ALLMULTI) { + mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; + mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN; + } else { + mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + } + + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + + /* clear the old settings from the multicast hash table */ + AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + /* comoute mc addresses' hash value ,and put it into hash table */ + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atl1e_hash_mc_addr(hw, ha->addr); + atl1e_hash_set(hw, hash_value); + } +} + +static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + *mac_ctrl_data |= MAC_CTRL_RMV_VLAN; + } else { + /* disable VLAN tag insert/strip */ + *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN; + } +} + +static void atl1e_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + u32 mac_ctrl_data = 0; + + netdev_dbg(adapter->netdev, "%s\n", __func__); + + atl1e_irq_disable(adapter); + mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL); + __atl1e_vlan_mode(features, &mac_ctrl_data); + AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); + atl1e_irq_enable(adapter); +} + +static void atl1e_restore_vlan(struct atl1e_adapter *adapter) +{ + netdev_dbg(adapter->netdev, "%s\n", __func__); + atl1e_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +/** + * atl1e_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atl1e_set_mac_addr(struct net_device *netdev, void *p) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atl1e_hw_set_mac_addr(&adapter->hw); + + return 0; +} + +static netdev_features_t atl1e_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int atl1e_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + atl1e_vlan_mode(netdev, features); + + return 0; +} + +/** + * atl1e_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1e_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + netdev_warn(adapter->netdev, "invalid MTU setting\n"); + return -EINVAL; + } + /* set MTU */ + if (old_mtu != new_mtu && netif_running(netdev)) { + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + netdev->mtu = new_mtu; + adapter->hw.max_frame_size = new_mtu; + adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3; + atl1e_down(adapter); + atl1e_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); + } + return 0; +} + +/* + * caller should hold mdio_lock + */ +static int atl1e_mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result); + return result; +} + +static void atl1e_mdio_write(struct net_device *netdev, int phy_id, + int reg_num, int val) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); +} + +static int atl1e_mii_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long flags; + int retval = 0; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 0; + break; + + case SIOCGMIIREG: + if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) { + retval = -EIO; + goto out; + } + break; + + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) { + retval = -EFAULT; + goto out; + } + + netdev_dbg(adapter->netdev, " write %x %x\n", + data->reg_num, data->val_in); + if (atl1e_write_phy_reg(&adapter->hw, + data->reg_num, data->val_in)) { + retval = -EIO; + goto out; + } + break; + + default: + retval = -EOPNOTSUPP; + break; + } +out: + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + return retval; + +} + +static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atl1e_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +static void atl1e_setup_pcicmd(struct pci_dev *pdev) +{ + u16 cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + cmd &= ~(PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_IO); + cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + + /* + * some motherboards BIOS(PXE/EFI) driver may set PME + * while they transfer control to OS (Windows/Linux) + * so we should clear this bit before NIC work normally + */ + pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0); + msleep(1); +} + +/** + * atl1e_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + */ +static int atl1e_alloc_queues(struct atl1e_adapter *adapter) +{ + return 0; +} + +/** + * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter) + * @adapter: board private structure to initialize + * + * atl1e_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int atl1e_sw_init(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 phy_status_data = 0; + + adapter->wol = 0; + adapter->link_speed = SPEED_0; /* hardware init */ + adapter->link_duplex = FULL_DUPLEX; + adapter->num_rx_queues = 1; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); + /* nic type */ + if (hw->revision_id >= 0xF0) { + hw->nic_type = athr_l2e_revB; + } else { + if (phy_status_data & PHY_STATUS_100M) + hw->nic_type = athr_l1e; + else + hw->nic_type = athr_l2e_revA; + } + + phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); + + if (phy_status_data & PHY_STATUS_EMI_CA) + hw->emi_ca = true; + else + hw->emi_ca = false; + + hw->phy_configured = false; + hw->preamble_len = 7; + hw->max_frame_size = adapter->netdev->mtu; + hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN + + VLAN_HLEN + ETH_FCS_LEN + 7) >> 3; + + hw->rrs_type = atl1e_rrs_disable; + hw->indirect_tab = 0; + hw->base_cpu = 0; + + /* need confirm */ + + hw->ict = 50000; /* 100ms */ + hw->smb_timer = 200000; /* 200ms */ + hw->tpd_burst = 5; + hw->rrd_thresh = 1; + hw->tpd_thresh = adapter->tx_ring.count / 2; + hw->rx_count_down = 4; /* 2us resolution */ + hw->tx_count_down = hw->imt * 4 / 3; + hw->dmar_block = atl1e_dma_req_1024; + hw->dmaw_block = atl1e_dma_req_1024; + hw->dmar_dly_cnt = 15; + hw->dmaw_dly_cnt = 4; + + if (atl1e_alloc_queues(adapter)) { + netdev_err(adapter->netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + atomic_set(&adapter->irq_sem, 1); + spin_lock_init(&adapter->mdio_lock); + spin_lock_init(&adapter->tx_lock); + + set_bit(__AT_DOWN, &adapter->flags); + + return 0; +} + +/** + * atl1e_clean_tx_ring - Free Tx-skb + * @adapter: board private structure + */ +static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + struct atl1e_tx_buffer *tx_buffer = NULL; + struct pci_dev *pdev = adapter->pdev; + u16 index, ring_count; + + if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL) + return; + + ring_count = tx_ring->count; + /* first unmmap dma */ + for (index = 0; index < ring_count; index++) { + tx_buffer = &tx_ring->tx_buffer[index]; + if (tx_buffer->dma) { + if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) + pci_unmap_single(pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) + pci_unmap_page(pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + tx_buffer->dma = 0; + } + } + /* second free skb */ + for (index = 0; index < ring_count; index++) { + tx_buffer = &tx_ring->tx_buffer[index]; + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + } + } + /* Zero out Tx-buffers */ + memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) * + ring_count); + memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) * + ring_count); +} + +/** + * atl1e_clean_rx_ring - Free rx-reservation skbs + * @adapter: board private structure + */ +static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter) +{ + struct atl1e_rx_ring *rx_ring = + &adapter->rx_ring; + struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc; + u16 i, j; + + + if (adapter->ring_vir_addr == NULL) + return; + /* Zero out the descriptor ring */ + for (i = 0; i < adapter->num_rx_queues; i++) { + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + if (rx_page_desc[i].rx_page[j].addr != NULL) { + memset(rx_page_desc[i].rx_page[j].addr, 0, + rx_ring->real_page_size); + } + } + } +} + +static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size) +{ + *ring_size = ((u32)(adapter->tx_ring.count * + sizeof(struct atl1e_tpd_desc) + 7 + /* tx ring, qword align */ + + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE * + adapter->num_rx_queues + 31 + /* rx ring, 32 bytes align */ + + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) * + sizeof(u32) + 3)); + /* tx, rx cmd, dword align */ +} + +static void atl1e_init_ring_resources(struct atl1e_adapter *adapter) +{ + struct atl1e_rx_ring *rx_ring = NULL; + + rx_ring = &adapter->rx_ring; + + rx_ring->real_page_size = adapter->rx_ring.page_size + + adapter->hw.max_frame_size + + ETH_HLEN + VLAN_HLEN + + ETH_FCS_LEN; + rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32); + atl1e_cal_ring_size(adapter, &adapter->ring_size); + + adapter->ring_vir_addr = NULL; + adapter->rx_ring.desc = NULL; + rwlock_init(&adapter->tx_ring.tx_lock); +} + +/* + * Read / Write Ptr Initialize: + */ +static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = NULL; + struct atl1e_rx_ring *rx_ring = NULL; + struct atl1e_rx_page_desc *rx_page_desc = NULL; + int i, j; + + tx_ring = &adapter->tx_ring; + rx_ring = &adapter->rx_ring; + rx_page_desc = rx_ring->rx_page_desc; + + tx_ring->next_to_use = 0; + atomic_set(&tx_ring->next_to_clean, 0); + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_page_desc[i].rx_using = 0; + rx_page_desc[i].rx_nxseq = 0; + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + *rx_page_desc[i].rx_page[j].write_offset_addr = 0; + rx_page_desc[i].rx_page[j].read_offset = 0; + } + } +} + +/** + * atl1e_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void atl1e_free_ring_resources(struct atl1e_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + atl1e_clean_tx_ring(adapter); + atl1e_clean_rx_ring(adapter); + + if (adapter->ring_vir_addr) { + pci_free_consistent(pdev, adapter->ring_size, + adapter->ring_vir_addr, adapter->ring_dma); + adapter->ring_vir_addr = NULL; + } + + if (adapter->tx_ring.tx_buffer) { + kfree(adapter->tx_ring.tx_buffer); + adapter->tx_ring.tx_buffer = NULL; + } +} + +/** + * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct atl1e_tx_ring *tx_ring; + struct atl1e_rx_ring *rx_ring; + struct atl1e_rx_page_desc *rx_page_desc; + int size, i, j; + u32 offset = 0; + int err = 0; + + if (adapter->ring_vir_addr != NULL) + return 0; /* alloced already */ + + tx_ring = &adapter->tx_ring; + rx_ring = &adapter->rx_ring; + + /* real ring DMA buffer */ + + size = adapter->ring_size; + adapter->ring_vir_addr = pci_alloc_consistent(pdev, + adapter->ring_size, &adapter->ring_dma); + + if (adapter->ring_vir_addr == NULL) { + netdev_err(adapter->netdev, + "pci_alloc_consistent failed, size = D%d\n", size); + return -ENOMEM; + } + + memset(adapter->ring_vir_addr, 0, adapter->ring_size); + + rx_page_desc = rx_ring->rx_page_desc; + + /* Init TPD Ring */ + tx_ring->dma = roundup(adapter->ring_dma, 8); + offset = tx_ring->dma - adapter->ring_dma; + tx_ring->desc = adapter->ring_vir_addr + offset; + size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count); + tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL); + if (tx_ring->tx_buffer == NULL) { + err = -ENOMEM; + goto failed; + } + + /* Init RXF-Pages */ + offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count); + offset = roundup(offset, 32); + + for (i = 0; i < adapter->num_rx_queues; i++) { + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + rx_page_desc[i].rx_page[j].dma = + adapter->ring_dma + offset; + rx_page_desc[i].rx_page[j].addr = + adapter->ring_vir_addr + offset; + offset += rx_ring->real_page_size; + } + } + + /* Init CMB dma address */ + tx_ring->cmb_dma = adapter->ring_dma + offset; + tx_ring->cmb = adapter->ring_vir_addr + offset; + offset += sizeof(u32); + + for (i = 0; i < adapter->num_rx_queues; i++) { + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + rx_page_desc[i].rx_page[j].write_offset_dma = + adapter->ring_dma + offset; + rx_page_desc[i].rx_page[j].write_offset_addr = + adapter->ring_vir_addr + offset; + offset += sizeof(u32); + } + } + + if (unlikely(offset > adapter->ring_size)) { + netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n", + offset, adapter->ring_size); + err = -1; + goto failed; + } + + return 0; +failed: + if (adapter->ring_vir_addr != NULL) { + pci_free_consistent(pdev, adapter->ring_size, + adapter->ring_vir_addr, adapter->ring_dma); + adapter->ring_vir_addr = NULL; + } + return err; +} + +static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter) +{ + + struct atl1e_hw *hw = &adapter->hw; + struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + struct atl1e_rx_page_desc *rx_page_desc = NULL; + int i, j; + + AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, + (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32)); + AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO, + (u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count)); + AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO, + (u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK)); + + rx_page_desc = rx_ring->rx_page_desc; + /* RXF Page Physical address / Page Length */ + for (i = 0; i < AT_MAX_RECEIVE_QUEUE; i++) { + AT_WRITE_REG(hw, atl1e_rx_page_hi_addr_regs[i], + (u32)((adapter->ring_dma & + AT_DMA_HI_ADDR_MASK) >> 32)); + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + u32 page_phy_addr; + u32 offset_phy_addr; + + page_phy_addr = rx_page_desc[i].rx_page[j].dma; + offset_phy_addr = + rx_page_desc[i].rx_page[j].write_offset_dma; + + AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[i][j], + page_phy_addr & AT_DMA_LO_ADDR_MASK); + AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[i][j], + offset_phy_addr & AT_DMA_LO_ADDR_MASK); + AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[i][j], 1); + } + } + /* Page Length */ + AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size); + /* Load all of base address above */ + AT_WRITE_REG(hw, REG_LOAD_PTR, 1); +} + +static inline void atl1e_configure_tx(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + u32 dev_ctrl_data = 0; + u32 max_pay_load = 0; + u32 jumbo_thresh = 0; + u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */ + + /* configure TXQ param */ + if (hw->nic_type != athr_l2e_revB) { + extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; + if (hw->max_frame_size <= 1500) { + jumbo_thresh = hw->max_frame_size + extra_size; + } else if (hw->max_frame_size < 6*1024) { + jumbo_thresh = + (hw->max_frame_size + extra_size) * 2 / 3; + } else { + jumbo_thresh = (hw->max_frame_size + extra_size) / 2; + } + AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3); + } + + dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL); + + max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) & + DEVICE_CTRL_MAX_PAYLOAD_MASK; + + hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block); + + max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) & + DEVICE_CTRL_MAX_RREQ_SZ_MASK; + hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); + + if (hw->nic_type != athr_l2e_revB) + AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2, + atl1e_pay_load_size[hw->dmar_block]); + /* enable TXQ */ + AT_WRITE_REGW(hw, REG_TXQ_CTRL, + (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK) + << TXQ_CTRL_NUM_TPD_BURST_SHIFT) + | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN); +} + +static inline void atl1e_configure_rx(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + u32 rxf_len = 0; + u32 rxf_low = 0; + u32 rxf_high = 0; + u32 rxf_thresh_data = 0; + u32 rxq_ctrl_data = 0; + + if (hw->nic_type != athr_l2e_revB) { + AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM, + (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) << + RXQ_JMBOSZ_TH_SHIFT | + (1 & RXQ_JMBO_LKAH_MASK) << + RXQ_JMBO_LKAH_SHIFT)); + + rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN); + rxf_high = rxf_len * 4 / 5; + rxf_low = rxf_len / 5; + rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK) + << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK) + << RXQ_RXF_PAUSE_TH_LO_SHIFT); + + AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data); + } + + /* RRS */ + AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab); + AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu); + + if (hw->rrs_type & atl1e_rrs_ipv4) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4; + + if (hw->rrs_type & atl1e_rrs_ipv4_tcp) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4_TCP; + + if (hw->rrs_type & atl1e_rrs_ipv6) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6; + + if (hw->rrs_type & atl1e_rrs_ipv6_tcp) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6_TCP; + + if (hw->rrs_type != atl1e_rrs_disable) + rxq_ctrl_data |= + (RXQ_CTRL_HASH_ENABLE | RXQ_CTRL_RSS_MODE_MQUESINT); + + rxq_ctrl_data |= RXQ_CTRL_IPV6_XSUM_VERIFY_EN | RXQ_CTRL_PBA_ALIGN_32 | + RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; + + AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); +} + +static inline void atl1e_configure_dma(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + u32 dma_ctrl_data = 0; + + dma_ctrl_data = DMA_CTRL_RXCMB_EN; + dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) + << DMA_CTRL_DMAR_BURST_LEN_SHIFT; + dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) + << DMA_CTRL_DMAW_BURST_LEN_SHIFT; + dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER; + dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK) + << DMA_CTRL_DMAR_DLY_CNT_SHIFT; + dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK) + << DMA_CTRL_DMAW_DLY_CNT_SHIFT; + + AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); +} + +static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter) +{ + u32 value; + struct atl1e_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + /* Config MAC CTRL Register */ + value = MAC_CTRL_TX_EN | + MAC_CTRL_RX_EN ; + + if (FULL_DUPLEX == adapter->link_duplex) + value |= MAC_CTRL_DUPLX; + + value |= ((u32)((SPEED_1000 == adapter->link_speed) ? + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << + MAC_CTRL_SPEED_SHIFT); + value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); + + value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + value |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + + __atl1e_vlan_mode(netdev->features, &value); + + value |= MAC_CTRL_BC_EN; + if (netdev->flags & IFF_PROMISC) + value |= MAC_CTRL_PROMIS_EN; + if (netdev->flags & IFF_ALLMULTI) + value |= MAC_CTRL_MC_ALL_EN; + + AT_WRITE_REG(hw, REG_MAC_CTRL, value); +} + +/** + * atl1e_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static int atl1e_configure(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + + u32 intr_status_data = 0; + + /* clear interrupt status */ + AT_WRITE_REG(hw, REG_ISR, ~0); + + /* 1. set MAC Address */ + atl1e_hw_set_mac_addr(hw); + + /* 2. Init the Multicast HASH table done by set_muti */ + + /* 3. Clear any WOL status */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr + * TPD Ring/SMB/RXF0 Page CMBs, they use the same + * High 32bits memory */ + atl1e_configure_des_ring(adapter); + + /* 5. set Interrupt Moderator Timer */ + AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt); + AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt); + AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE | + MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN); + + /* 6. rx/tx threshold to trig interrupt */ + AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh); + AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh); + AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down); + AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down); + + /* 7. set Interrupt Clear Timer */ + AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict); + + /* 8. set MTU */ + AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + + VLAN_HLEN + ETH_FCS_LEN); + + /* 9. config TXQ early tx threshold */ + atl1e_configure_tx(adapter); + + /* 10. config RXQ */ + atl1e_configure_rx(adapter); + + /* 11. config DMA Engine */ + atl1e_configure_dma(adapter); + + /* 12. smb timer to trig interrupt */ + AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer); + + intr_status_data = AT_READ_REG(hw, REG_ISR); + if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) { + netdev_err(adapter->netdev, + "atl1e_configure failed, PCIE phy link down\n"); + return -1; + } + + AT_WRITE_REG(hw, REG_ISR, 0x7fffffff); + return 0; +} + +/** + * atl1e_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + */ +static struct net_device_stats *atl1e_get_stats(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw_stats *hw_stats = &adapter->hw_stats; + struct net_device_stats *net_stats = &netdev->stats; + + net_stats->rx_packets = hw_stats->rx_ok; + net_stats->tx_packets = hw_stats->tx_ok; + net_stats->rx_bytes = hw_stats->rx_byte_cnt; + net_stats->tx_bytes = hw_stats->tx_byte_cnt; + net_stats->multicast = hw_stats->rx_mcast; + net_stats->collisions = hw_stats->tx_1_col + + hw_stats->tx_2_col * 2 + + hw_stats->tx_late_col + hw_stats->tx_abort_col; + + net_stats->rx_errors = hw_stats->rx_frag + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + hw_stats->rx_sz_ov + + hw_stats->rx_rrd_ov + hw_stats->rx_align_err; + net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; + net_stats->rx_length_errors = hw_stats->rx_len_err; + net_stats->rx_crc_errors = hw_stats->rx_fcs_err; + net_stats->rx_frame_errors = hw_stats->rx_align_err; + net_stats->rx_over_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; + + net_stats->rx_missed_errors = hw_stats->rx_rrd_ov + hw_stats->rx_rxf_ov; + + net_stats->tx_errors = hw_stats->tx_late_col + hw_stats->tx_abort_col + + hw_stats->tx_underrun + hw_stats->tx_trunc; + net_stats->tx_fifo_errors = hw_stats->tx_underrun; + net_stats->tx_aborted_errors = hw_stats->tx_abort_col; + net_stats->tx_window_errors = hw_stats->tx_late_col; + + return net_stats; +} + +static void atl1e_update_hw_stats(struct atl1e_adapter *adapter) +{ + u16 hw_reg_addr = 0; + unsigned long *stats_item = NULL; + + /* update rx status */ + hw_reg_addr = REG_MAC_RX_STATUS_BIN; + stats_item = &adapter->hw_stats.rx_ok; + while (hw_reg_addr <= REG_MAC_RX_STATUS_END) { + *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); + stats_item++; + hw_reg_addr += 4; + } + /* update tx status */ + hw_reg_addr = REG_MAC_TX_STATUS_BIN; + stats_item = &adapter->hw_stats.tx_ok; + while (hw_reg_addr <= REG_MAC_TX_STATUS_END) { + *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); + stats_item++; + hw_reg_addr += 4; + } +} + +static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter) +{ + u16 phy_data; + + spin_lock(&adapter->mdio_lock); + atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data); + spin_unlock(&adapter->mdio_lock); +} + +static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + struct atl1e_tx_buffer *tx_buffer = NULL; + u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX); + u16 next_to_clean = atomic_read(&tx_ring->next_to_clean); + + while (next_to_clean != hw_next_to_clean) { + tx_buffer = &tx_ring->tx_buffer[next_to_clean]; + if (tx_buffer->dma) { + if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) + pci_unmap_single(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) + pci_unmap_page(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + tx_buffer->dma = 0; + } + + if (tx_buffer->skb) { + dev_kfree_skb_irq(tx_buffer->skb); + tx_buffer->skb = NULL; + } + + if (++next_to_clean == tx_ring->count) + next_to_clean = 0; + } + + atomic_set(&tx_ring->next_to_clean, next_to_clean); + + if (netif_queue_stopped(adapter->netdev) && + netif_carrier_ok(adapter->netdev)) { + netif_wake_queue(adapter->netdev); + } + + return true; +} + +/** + * atl1e_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t atl1e_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + int max_ints = AT_MAX_INT_WORK; + int handled = IRQ_NONE; + u32 status; + + do { + status = AT_READ_REG(hw, REG_ISR); + if ((status & IMR_NORMAL_MASK) == 0 || + (status & ISR_DIS_INT) != 0) { + if (max_ints != AT_MAX_INT_WORK) + handled = IRQ_HANDLED; + break; + } + /* link event */ + if (status & ISR_GPHY) + atl1e_clear_phy_int(adapter); + /* Ack ISR */ + AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); + + handled = IRQ_HANDLED; + /* check if PCIE PHY Link down */ + if (status & ISR_PHY_LINKDOWN) { + netdev_err(adapter->netdev, + "pcie phy linkdown %x\n", status); + if (netif_running(adapter->netdev)) { + /* reset MAC */ + atl1e_irq_reset(adapter); + schedule_work(&adapter->reset_task); + break; + } + } + + /* check if DMA read/write error */ + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { + netdev_err(adapter->netdev, + "PCIE DMA RW error (status = 0x%x)\n", + status); + atl1e_irq_reset(adapter); + schedule_work(&adapter->reset_task); + break; + } + + if (status & ISR_SMB) + atl1e_update_hw_stats(adapter); + + /* link event */ + if (status & (ISR_GPHY | ISR_MANUAL)) { + netdev->stats.tx_carrier_errors++; + atl1e_link_chg_event(adapter); + break; + } + + /* transmit event */ + if (status & ISR_TX_EVENT) + atl1e_clean_tx_irq(adapter); + + if (status & ISR_RX_EVENT) { + /* + * disable rx interrupts, without + * the synchronize_irq bit + */ + AT_WRITE_REG(hw, REG_IMR, + IMR_NORMAL_MASK & ~ISR_RX_EVENT); + AT_WRITE_FLUSH(hw); + if (likely(napi_schedule_prep( + &adapter->napi))) + __napi_schedule(&adapter->napi); + } + } while (--max_ints > 0); + /* re-enable Interrupt*/ + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + + return handled; +} + +static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_recv_ret_status *prrs) +{ + u8 *packet = (u8 *)(prrs + 1); + struct iphdr *iph; + u16 head_len = ETH_HLEN; + u16 pkt_flags; + u16 err_flags; + + skb_checksum_none_assert(skb); + pkt_flags = prrs->pkt_flag; + err_flags = prrs->err_flag; + if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) && + ((pkt_flags & RRS_IS_TCP) || (pkt_flags & RRS_IS_UDP))) { + if (pkt_flags & RRS_IS_IPV4) { + if (pkt_flags & RRS_IS_802_3) + head_len += 8; + iph = (struct iphdr *) (packet + head_len); + if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF)) + goto hw_xsum; + } + if (!(err_flags & (RRS_ERR_IP_CSUM | RRS_ERR_L4_CSUM))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + } + } + +hw_xsum : + return; +} + +static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter, + u8 que) +{ + struct atl1e_rx_page_desc *rx_page_desc = + (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc; + u8 rx_using = rx_page_desc[que].rx_using; + + return &(rx_page_desc[que].rx_page[rx_using]); +} + +static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; + struct atl1e_rx_page_desc *rx_page_desc = + (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc; + struct sk_buff *skb = NULL; + struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter, que); + u32 packet_size, write_offset; + struct atl1e_recv_ret_status *prrs; + + write_offset = *(rx_page->write_offset_addr); + if (likely(rx_page->read_offset < write_offset)) { + do { + if (*work_done >= work_to_do) + break; + (*work_done)++; + /* get new packet's rrs */ + prrs = (struct atl1e_recv_ret_status *) (rx_page->addr + + rx_page->read_offset); + /* check sequence number */ + if (prrs->seq_num != rx_page_desc[que].rx_nxseq) { + netdev_err(netdev, + "rx sequence number error (rx=%d) (expect=%d)\n", + prrs->seq_num, + rx_page_desc[que].rx_nxseq); + rx_page_desc[que].rx_nxseq++; + /* just for debug use */ + AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0, + (((u32)prrs->seq_num) << 16) | + rx_page_desc[que].rx_nxseq); + goto fatal_err; + } + rx_page_desc[que].rx_nxseq++; + + /* error packet */ + if (prrs->pkt_flag & RRS_IS_ERR_FRAME) { + if (prrs->err_flag & (RRS_ERR_BAD_CRC | + RRS_ERR_DRIBBLE | RRS_ERR_CODE | + RRS_ERR_TRUNC)) { + /* hardware error, discard this packet*/ + netdev_err(netdev, + "rx packet desc error %x\n", + *((u32 *)prrs + 1)); + goto skip_pkt; + } + } + + packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & + RRS_PKT_SIZE_MASK) - 4; /* CRC */ + skb = netdev_alloc_skb_ip_align(netdev, packet_size); + if (skb == NULL) + goto skip_pkt; + + memcpy(skb->data, (u8 *)(prrs + 1), packet_size); + skb_put(skb, packet_size); + skb->protocol = eth_type_trans(skb, netdev); + atl1e_rx_checksum(adapter, skb, prrs); + + if (prrs->pkt_flag & RRS_IS_VLAN_TAG) { + u16 vlan_tag = (prrs->vtag >> 4) | + ((prrs->vtag & 7) << 13) | + ((prrs->vtag & 8) << 9); + netdev_dbg(netdev, + "RXD VLAN TAG=0x%04x\n", + prrs->vtag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + } + netif_receive_skb(skb); + +skip_pkt: + /* skip current packet whether it's ok or not. */ + rx_page->read_offset += + (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & + RRS_PKT_SIZE_MASK) + + sizeof(struct atl1e_recv_ret_status) + 31) & + 0xFFFFFFE0); + + if (rx_page->read_offset >= rx_ring->page_size) { + /* mark this page clean */ + u16 reg_addr; + u8 rx_using; + + rx_page->read_offset = + *(rx_page->write_offset_addr) = 0; + rx_using = rx_page_desc[que].rx_using; + reg_addr = + atl1e_rx_page_vld_regs[que][rx_using]; + AT_WRITE_REGB(&adapter->hw, reg_addr, 1); + rx_page_desc[que].rx_using ^= 1; + rx_page = atl1e_get_rx_page(adapter, que); + } + write_offset = *(rx_page->write_offset_addr); + } while (rx_page->read_offset < write_offset); + } + + return; + +fatal_err: + if (!test_bit(__AT_DOWN, &adapter->flags)) + schedule_work(&adapter->reset_task); +} + +/** + * atl1e_clean - NAPI Rx polling callback + */ +static int atl1e_clean(struct napi_struct *napi, int budget) +{ + struct atl1e_adapter *adapter = + container_of(napi, struct atl1e_adapter, napi); + u32 imr_data; + int work_done = 0; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(adapter->netdev)) + goto quit_polling; + + atl1e_clean_rx_irq(adapter, 0, &work_done, budget); + + /* If no Tx and not enough Rx work done, exit the polling mode */ + if (work_done < budget) { +quit_polling: + napi_complete(napi); + imr_data = AT_READ_REG(&adapter->hw, REG_IMR); + AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); + /* test debug */ + if (test_bit(__AT_DOWN, &adapter->flags)) { + atomic_dec(&adapter->irq_sem); + netdev_err(adapter->netdev, + "atl1e_clean is called when AT_DOWN\n"); + } + /* reenable RX intr */ + /*atl1e_irq_enable(adapter); */ + + } + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void atl1e_netpoll(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + atl1e_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + u16 next_to_use = 0; + u16 next_to_clean = 0; + + next_to_clean = atomic_read(&tx_ring->next_to_clean); + next_to_use = tx_ring->next_to_use; + + return (u16)(next_to_clean > next_to_use) ? + (next_to_clean - next_to_use - 1) : + (tx_ring->count + next_to_clean - next_to_use - 1); +} + +/* + * get next usable tpd + * Note: should call atl1e_tdp_avail to make sure + * there is enough tpd to use + */ +static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + u16 next_to_use = 0; + + next_to_use = tx_ring->next_to_use; + if (++tx_ring->next_to_use == tx_ring->count) + tx_ring->next_to_use = 0; + + memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc)); + return &tx_ring->desc[next_to_use]; +} + +static struct atl1e_tx_buffer * +atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + + return &tx_ring->tx_buffer[tpd - tx_ring->desc]; +} + +/* Calculate the transmit packet descript needed*/ +static u16 atl1e_cal_tdp_req(const struct sk_buff *skb) +{ + int i = 0; + u16 tpd_req = 1; + u16 fg_size = 0; + u16 proto_hdr_len = 0; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); + tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT); + } + + if (skb_is_gso(skb)) { + if (skb->protocol == htons(ETH_P_IP) || + (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) { + proto_hdr_len = skb_transport_offset(skb) + + tcp_hdrlen(skb); + if (proto_hdr_len < skb_headlen(skb)) { + tpd_req += ((skb_headlen(skb) - proto_hdr_len + + MAX_TX_BUF_LEN - 1) >> + MAX_TX_BUF_SHIFT); + } + } + + } + return tpd_req; +} + +static int atl1e_tso_csum(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_tpd_desc *tpd) +{ + u8 hdr_len; + u32 real_len; + unsigned short offload_type; + int err; + + if (skb_is_gso(skb)) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (unlikely(err)) + return -1; + } + offload_type = skb_shinfo(skb)->gso_type; + + if (offload_type & SKB_GSO_TCPV4) { + real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + + ntohs(ip_hdr(skb)->tot_len)); + + if (real_len < skb->len) + pskb_trim(skb, real_len); + + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (unlikely(skb->len == hdr_len)) { + /* only xsum need */ + netdev_warn(adapter->netdev, + "IPV4 tso with zero data??\n"); + goto check_sum; + } else { + ip_hdr(skb)->check = 0; + ip_hdr(skb)->tot_len = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic( + ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + tpd->word3 |= (ip_hdr(skb)->ihl & + TDP_V4_IPHL_MASK) << + TPD_V4_IPHL_SHIFT; + tpd->word3 |= ((tcp_hdrlen(skb) >> 2) & + TPD_TCPHDRLEN_MASK) << + TPD_TCPHDRLEN_SHIFT; + tpd->word3 |= ((skb_shinfo(skb)->gso_size) & + TPD_MSS_MASK) << TPD_MSS_SHIFT; + tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; + } + return 0; + } + } + +check_sum: + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + u8 css, cso; + + cso = skb_checksum_start_offset(skb); + if (unlikely(cso & 0x1)) { + netdev_err(adapter->netdev, + "payload offset should not ant event number\n"); + return -1; + } else { + css = cso + skb->csum_offset; + tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) << + TPD_PLOADOFFSET_SHIFT; + tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) << + TPD_CCSUMOFFSET_SHIFT; + tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT; + } + } + + return 0; +} + +static int atl1e_tx_map(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_tpd_desc *tpd) +{ + struct atl1e_tpd_desc *use_tpd = NULL; + struct atl1e_tx_buffer *tx_buffer = NULL; + u16 buf_len = skb_headlen(skb); + u16 map_len = 0; + u16 mapped_len = 0; + u16 hdr_len = 0; + u16 nr_frags; + u16 f; + int segment; + int ring_start = adapter->tx_ring.next_to_use; + int ring_end; + + nr_frags = skb_shinfo(skb)->nr_frags; + segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; + if (segment) { + /* TSO */ + map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + use_tpd = tpd; + + tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); + tx_buffer->length = map_len; + tx_buffer->dma = pci_map_single(adapter->pdev, + skb->data, hdr_len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) + return -ENOSPC; + + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); + mapped_len += map_len; + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); + use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | + ((cpu_to_le32(tx_buffer->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); + } + + while (mapped_len < buf_len) { + /* mapped_len == 0, means we should use the first tpd, + which is given by caller */ + if (mapped_len == 0) { + use_tpd = tpd; + } else { + use_tpd = atl1e_get_tpd(adapter); + memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); + } + tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); + tx_buffer->skb = NULL; + + tx_buffer->length = map_len = + ((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ? + MAX_TX_BUF_LEN : (buf_len - mapped_len); + tx_buffer->dma = + pci_map_single(adapter->pdev, skb->data + mapped_len, + map_len, PCI_DMA_TODEVICE); + + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { + /* We need to unwind the mappings we've done */ + ring_end = adapter->tx_ring.next_to_use; + adapter->tx_ring.next_to_use = ring_start; + while (adapter->tx_ring.next_to_use != ring_end) { + tpd = atl1e_get_tpd(adapter); + tx_buffer = atl1e_get_tx_buffer(adapter, tpd); + pci_unmap_single(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + } + /* Reset the tx rings next pointer */ + adapter->tx_ring.next_to_use = ring_start; + return -ENOSPC; + } + + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); + mapped_len += map_len; + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); + use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | + ((cpu_to_le32(tx_buffer->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); + } + + for (f = 0; f < nr_frags; f++) { + const struct skb_frag_struct *frag; + u16 i; + u16 seg_num; + + frag = &skb_shinfo(skb)->frags[f]; + buf_len = skb_frag_size(frag); + + seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; + for (i = 0; i < seg_num; i++) { + use_tpd = atl1e_get_tpd(adapter); + memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); + + tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); + BUG_ON(tx_buffer->skb); + + tx_buffer->skb = NULL; + tx_buffer->length = + (buf_len > MAX_TX_BUF_LEN) ? + MAX_TX_BUF_LEN : buf_len; + buf_len -= tx_buffer->length; + + tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev, + frag, + (i * MAX_TX_BUF_LEN), + tx_buffer->length, + DMA_TO_DEVICE); + + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { + /* We need to unwind the mappings we've done */ + ring_end = adapter->tx_ring.next_to_use; + adapter->tx_ring.next_to_use = ring_start; + while (adapter->tx_ring.next_to_use != ring_end) { + tpd = atl1e_get_tpd(adapter); + tx_buffer = atl1e_get_tx_buffer(adapter, tpd); + dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma, + tx_buffer->length, DMA_TO_DEVICE); + } + + /* Reset the ring next to use pointer */ + adapter->tx_ring.next_to_use = ring_start; + return -ENOSPC; + } + + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE); + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); + use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | + ((cpu_to_le32(tx_buffer->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); + } + } + + if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK) + /* note this one is a tcp header */ + tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; + /* The last tpd */ + + use_tpd->word3 |= 1 << TPD_EOP_SHIFT; + /* The last buffer info contain the skb address, + so it will be free after unmap */ + tx_buffer->skb = skb; + return 0; +} + +static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count, + struct atl1e_tpd_desc *tpd) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use); +} + +static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + u16 tpd_req = 1; + struct atl1e_tpd_desc *tpd; + + if (test_bit(__AT_DOWN, &adapter->flags)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(skb->len <= 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + tpd_req = atl1e_cal_tdp_req(skb); + if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) + return NETDEV_TX_LOCKED; + + if (atl1e_tpd_avail(adapter) < tpd_req) { + /* no enough descriptor, just stop queue */ + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + tpd = atl1e_get_tpd(adapter); + + if (vlan_tx_tag_present(skb)) { + u16 vlan_tag = vlan_tx_tag_get(skb); + u16 atl1e_vlan_tag; + + tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; + AT_VLAN_TAG_TO_TPD_TAG(vlan_tag, atl1e_vlan_tag); + tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) << + TPD_VLAN_SHIFT; + } + + if (skb->protocol == htons(ETH_P_8021Q)) + tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT; + + if (skb_network_offset(skb) != ETH_HLEN) + tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */ + + /* do TSO and check sum */ + if (atl1e_tso_csum(adapter, skb, tpd) != 0) { + spin_unlock_irqrestore(&adapter->tx_lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (atl1e_tx_map(adapter, skb, tpd)) { + dev_kfree_skb_any(skb); + goto out; + } + + atl1e_tx_queue(adapter, tpd_req, tpd); + + netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ +out: + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_OK; +} + +static void atl1e_free_irq(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); +} + +static int atl1e_request_irq(struct atl1e_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int err = 0; + + err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, + netdev); + if (err) { + netdev_dbg(adapter->netdev, + "Unable to allocate interrupt Error: %d\n", err); + return err; + } + netdev_dbg(netdev, "atl1e_request_irq OK\n"); + return err; +} + +int atl1e_up(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + u32 val; + + /* hardware has been reset, we need to reload some things */ + err = atl1e_init_hw(&adapter->hw); + if (err) { + err = -EIO; + return err; + } + atl1e_init_ring_ptrs(adapter); + atl1e_set_multi(netdev); + atl1e_restore_vlan(adapter); + + if (atl1e_configure(adapter)) { + err = -EIO; + goto err_up; + } + + clear_bit(__AT_DOWN, &adapter->flags); + napi_enable(&adapter->napi); + atl1e_irq_enable(adapter); + val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL); + AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, + val | MASTER_CTRL_MANUAL_INT); + +err_up: + return err; +} + +void atl1e_down(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__AT_DOWN, &adapter->flags); + + netif_stop_queue(netdev); + + /* reset MAC to disable all RX/TX */ + atl1e_reset_hw(&adapter->hw); + msleep(1); + + napi_disable(&adapter->napi); + atl1e_del_timer(adapter); + atl1e_irq_disable(adapter); + + netif_carrier_off(netdev); + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; + atl1e_clean_tx_ring(adapter); + atl1e_clean_rx_ring(adapter); +} + +/** + * atl1e_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl1e_open(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__AT_TESTING, &adapter->flags)) + return -EBUSY; + + /* allocate rx/tx dma buffer & descriptors */ + atl1e_init_ring_resources(adapter); + err = atl1e_setup_ring_resources(adapter); + if (unlikely(err)) + return err; + + err = atl1e_request_irq(adapter); + if (unlikely(err)) + goto err_req_irq; + + err = atl1e_up(adapter); + if (unlikely(err)) + goto err_up; + + return 0; + +err_up: + atl1e_free_irq(adapter); +err_req_irq: + atl1e_free_ring_resources(adapter); + atl1e_reset_hw(&adapter->hw); + + return err; +} + +/** + * atl1e_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl1e_close(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + atl1e_down(adapter); + atl1e_free_irq(adapter); + atl1e_free_ring_resources(adapter); + + return 0; +} + +static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 ctrl = 0; + u32 mac_ctrl_data = 0; + u32 wol_ctrl_data = 0; + u16 mii_advertise_data = 0; + u16 mii_bmsr_data = 0; + u16 mii_intr_status_data = 0; + u32 wufc = adapter->wol; + u32 i; +#ifdef CONFIG_PM + int retval = 0; +#endif + + if (netif_running(netdev)) { + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + atl1e_down(adapter); + } + netif_device_detach(netdev); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + if (wufc) { + /* get link status */ + atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data); + atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data); + + mii_advertise_data = ADVERTISE_10HALF; + + if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) || + (atl1e_write_phy_reg(hw, + MII_ADVERTISE, mii_advertise_data) != 0) || + (atl1e_phy_commit(hw)) != 0) { + netdev_dbg(adapter->netdev, "set phy register failed\n"); + goto wol_dis; + } + + hw->phy_configured = false; /* re-init PHY when resume */ + + /* turn on magic packet wol */ + if (wufc & AT_WUFC_MAG) + wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + + if (wufc & AT_WUFC_LNKC) { + /* if orignal link status is link, just wait for retrive link */ + if (mii_bmsr_data & BMSR_LSTATUS) { + for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { + msleep(100); + atl1e_read_phy_reg(hw, MII_BMSR, + &mii_bmsr_data); + if (mii_bmsr_data & BMSR_LSTATUS) + break; + } + + if ((mii_bmsr_data & BMSR_LSTATUS) == 0) + netdev_dbg(adapter->netdev, + "Link may change when suspend\n"); + } + wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; + /* only link up can wake up */ + if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) { + netdev_dbg(adapter->netdev, + "read write phy register failed\n"); + goto wol_dis; + } + } + /* clear phy interrupt */ + atl1e_read_phy_reg(hw, MII_INT_STATUS, &mii_intr_status_data); + /* Config MAC Ctrl register */ + mac_ctrl_data = MAC_CTRL_RX_EN; + /* set to 10/100M halt duplex */ + mac_ctrl_data |= MAC_CTRL_SPEED_10_100 << MAC_CTRL_SPEED_SHIFT; + mac_ctrl_data |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << + MAC_CTRL_PRMLEN_SHIFT); + + __atl1e_vlan_mode(netdev->features, &mac_ctrl_data); + + /* magic packet maybe Broadcast&multicast&Unicast frame */ + if (wufc & AT_WUFC_MAG) + mac_ctrl_data |= MAC_CTRL_BC_EN; + + netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n", + mac_ctrl_data); + + AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + /* pcie patch */ + ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + goto suspend_exit; + } +wol_dis: + + /* WOL disabled */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* pcie patch */ + ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + + atl1e_force_ps(hw); + hw->phy_configured = false; /* re-init PHY when resume */ + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); + +suspend_exit: + + if (netif_running(netdev)) + atl1e_free_irq(adapter); + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +#ifdef CONFIG_PM +static int atl1e_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + err = pci_enable_device(pdev); + if (err) { + netdev_err(adapter->netdev, + "Cannot enable PCI device from suspend\n"); + return err; + } + + pci_set_master(pdev); + + AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */ + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); + + if (netif_running(netdev)) { + err = atl1e_request_irq(adapter); + if (err) + return err; + } + + atl1e_reset_hw(&adapter->hw); + + if (netif_running(netdev)) + atl1e_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void atl1e_shutdown(struct pci_dev *pdev) +{ + atl1e_suspend(pdev, PMSG_SUSPEND); +} + +static const struct net_device_ops atl1e_netdev_ops = { + .ndo_open = atl1e_open, + .ndo_stop = atl1e_close, + .ndo_start_xmit = atl1e_xmit_frame, + .ndo_get_stats = atl1e_get_stats, + .ndo_set_rx_mode = atl1e_set_multi, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = atl1e_set_mac_addr, + .ndo_fix_features = atl1e_fix_features, + .ndo_set_features = atl1e_set_features, + .ndo_change_mtu = atl1e_change_mtu, + .ndo_do_ioctl = atl1e_ioctl, + .ndo_tx_timeout = atl1e_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = atl1e_netpoll, +#endif + +}; + +static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) +{ + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + netdev->netdev_ops = &atl1e_netdev_ops; + + netdev->watchdog_timeo = AT_TX_WATCHDOG; + atl1e_set_ethtool_ops(netdev); + + netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = netdev->hw_features | NETIF_F_LLTX | + NETIF_F_HW_VLAN_CTAG_TX; + + return 0; +} + +/** + * atl1e_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl1e_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl1e_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl1e_adapter *adapter = NULL; + static int cards_found; + + int err = 0; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + return err; + } + + /* + * The atl1e chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { + dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); + goto err_dma; + } + + err = pci_request_regions(pdev, atl1e_driver_name); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_pci_reg; + } + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct atl1e_adapter)); + if (netdev == NULL) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + err = atl1e_init_netdev(netdev, pdev); + if (err) { + netdev_err(netdev, "init netdevice failed\n"); + goto err_init_netdev; + } + adapter = netdev_priv(netdev); + adapter->bd_number = cards_found; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.adapter = adapter; + adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0); + if (!adapter->hw.hw_addr) { + err = -EIO; + netdev_err(netdev, "cannot map device registers\n"); + goto err_ioremap; + } + + /* init mii data */ + adapter->mii.dev = netdev; + adapter->mii.mdio_read = atl1e_mdio_read; + adapter->mii.mdio_write = atl1e_mdio_write; + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK; + + netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64); + + init_timer(&adapter->phy_config_timer); + adapter->phy_config_timer.function = atl1e_phy_config; + adapter->phy_config_timer.data = (unsigned long) adapter; + + /* get user settings */ + atl1e_check_options(adapter); + /* + * Mark all PCI regions associated with PCI device + * pdev as being reserved by owner atl1e_driver_name + * Enables bus-mastering on the device and calls + * pcibios_set_master to do the needed arch specific settings + */ + atl1e_setup_pcicmd(pdev); + /* setup the private structure */ + err = atl1e_sw_init(adapter); + if (err) { + netdev_err(netdev, "net device private data init failed\n"); + goto err_sw_init; + } + + /* Init GPHY as early as possible due to power saving issue */ + atl1e_phy_init(&adapter->hw); + /* reset the controller to + * put the device in a known good starting state */ + err = atl1e_reset_hw(&adapter->hw); + if (err) { + err = -EIO; + goto err_reset; + } + + if (atl1e_read_mac_addr(&adapter->hw) != 0) { + err = -EIO; + netdev_err(netdev, "get mac address failed\n"); + goto err_eeprom; + } + + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr); + + INIT_WORK(&adapter->reset_task, atl1e_reset_task); + INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); + netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); + err = register_netdev(netdev); + if (err) { + netdev_err(netdev, "register netdevice failed\n"); + goto err_register; + } + + /* assume we have no link for now */ + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + cards_found++; + + return 0; + +err_reset: +err_register: +err_sw_init: +err_eeprom: + iounmap(adapter->hw.hw_addr); +err_init_netdev: +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * atl1e_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl1e_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void atl1e_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + /* + * flush_scheduled work may reschedule our watchdog task, so + * explicitly disable watchdog tasks from being rescheduled + */ + set_bit(__AT_DOWN, &adapter->flags); + + atl1e_del_timer(adapter); + atl1e_cancel_work(adapter); + + unregister_netdev(netdev); + atl1e_free_ring_resources(adapter); + atl1e_force_ps(&adapter->hw); + iounmap(adapter->hw.hw_addr); + pci_release_regions(pdev); + free_netdev(netdev); + pci_disable_device(pdev); +} + +/** + * atl1e_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t +atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + atl1e_down(adapter); + + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * atl1e_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + netdev_err(adapter->netdev, + "Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + atl1e_reset_hw(&adapter->hw); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * atl1e_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the atl1e_resume routine. + */ +static void atl1e_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (atl1e_up(adapter)) { + netdev_err(adapter->netdev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static const struct pci_error_handlers atl1e_err_handler = { + .error_detected = atl1e_io_error_detected, + .slot_reset = atl1e_io_slot_reset, + .resume = atl1e_io_resume, +}; + +static struct pci_driver atl1e_driver = { + .name = atl1e_driver_name, + .id_table = atl1e_pci_tbl, + .probe = atl1e_probe, + .remove = atl1e_remove, + /* Power Management Hooks */ +#ifdef CONFIG_PM + .suspend = atl1e_suspend, + .resume = atl1e_resume, +#endif + .shutdown = atl1e_shutdown, + .err_handler = &atl1e_err_handler +}; + +/** + * atl1e_init_module - Driver Registration Routine + * + * atl1e_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + */ +static int __init atl1e_init_module(void) +{ + return pci_register_driver(&atl1e_driver); +} + +/** + * atl1e_exit_module - Driver Exit Cleanup Routine + * + * atl1e_exit_module is called just before the driver is removed + * from memory. + */ +static void __exit atl1e_exit_module(void) +{ + pci_unregister_driver(&atl1e_driver); +} + +module_init(atl1e_init_module); +module_exit(atl1e_exit_module); diff --git a/addons/atl1e/src/3.10.108/atl1e_param.c b/addons/atl1e/src/3.10.108/atl1e_param.c new file mode 100644 index 00000000..fa314282 --- /dev/null +++ b/addons/atl1e/src/3.10.108/atl1e_param.c @@ -0,0 +1,269 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include + +#include "atl1e.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define ATL1E_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ +#define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET } + +#define ATL1E_PARAM(x, desc) \ + static int x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \ + static unsigned int num_##x; \ + module_param_array_named(x, x, int, &num_##x, 0); \ + MODULE_PARM_DESC(x, desc); + +/* Transmit Memory count + * + * Valid Range: 64-2048 + * + * Default Value: 128 + */ +#define ATL1E_MIN_TX_DESC_CNT 32 +#define ATL1E_MAX_TX_DESC_CNT 1020 +#define ATL1E_DEFAULT_TX_DESC_CNT 128 +ATL1E_PARAM(tx_desc_cnt, "Transmit description count"); + +/* Receive Memory Block Count + * + * Valid Range: 16-512 + * + * Default Value: 128 + */ +#define ATL1E_MIN_RX_MEM_SIZE 8 /* 8KB */ +#define ATL1E_MAX_RX_MEM_SIZE 1024 /* 1MB */ +#define ATL1E_DEFAULT_RX_MEM_SIZE 256 /* 128KB */ +ATL1E_PARAM(rx_mem_size, "memory size of rx buffer(KB)"); + +/* User Specified MediaType Override + * + * Valid Range: 0-5 + * - 0 - auto-negotiate at all supported speeds + * - 1 - only link at 100Mbps Full Duplex + * - 2 - only link at 100Mbps Half Duplex + * - 3 - only link at 10Mbps Full Duplex + * - 4 - only link at 10Mbps Half Duplex + * Default Value: 0 + */ + +ATL1E_PARAM(media_type, "MediaType Select"); + +/* Interrupt Moderate Timer in units of 2 us + * + * Valid Range: 10-65535 + * + * Default Value: 45000(90ms) + */ +#define INT_MOD_DEFAULT_CNT 100 /* 200us */ +#define INT_MOD_MAX_CNT 65000 +#define INT_MOD_MIN_CNT 50 +ATL1E_PARAM(int_mod_timer, "Interrupt Moderator Timer"); + +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +#define FLASH_VENDOR_DEFAULT 0 +#define FLASH_VENDOR_MIN 0 +#define FLASH_VENDOR_MAX 2 + +struct atl1e_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct atl1e_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int atl1e_validate_option(int *value, struct atl1e_option *opt, + struct atl1e_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + netdev_info(adapter->netdev, + "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + netdev_info(adapter->netdev, + "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + netdev_info(adapter->netdev, "%s set to %i\n", + opt->name, *value); + return 0; + } + break; + case list_option:{ + int i; + struct atl1e_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + netdev_info(adapter->netdev, + "%s\n", ent->str); + return 0; + } + } + break; + } + default: + BUG(); + } + + netdev_info(adapter->netdev, "Invalid %s specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * atl1e_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + */ +void atl1e_check_options(struct atl1e_adapter *adapter) +{ + int bd = adapter->bd_number; + + if (bd >= ATL1E_MAX_NIC) { + netdev_notice(adapter->netdev, + "no configuration for board #%i\n", bd); + netdev_notice(adapter->netdev, + "Using defaults for all values\n"); + } + + { /* Transmit Ring Size */ + struct atl1e_option opt = { + .type = range_option, + .name = "Transmit Ddescription Count", + .err = "using default of " + __MODULE_STRING(ATL1E_DEFAULT_TX_DESC_CNT), + .def = ATL1E_DEFAULT_TX_DESC_CNT, + .arg = { .r = { .min = ATL1E_MIN_TX_DESC_CNT, + .max = ATL1E_MAX_TX_DESC_CNT} } + }; + int val; + if (num_tx_desc_cnt > bd) { + val = tx_desc_cnt[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->tx_ring.count = (u16) val & 0xFFFC; + } else + adapter->tx_ring.count = (u16)opt.def; + } + + { /* Receive Memory Block Count */ + struct atl1e_option opt = { + .type = range_option, + .name = "Memory size of rx buffer(KB)", + .err = "using default of " + __MODULE_STRING(ATL1E_DEFAULT_RX_MEM_SIZE), + .def = ATL1E_DEFAULT_RX_MEM_SIZE, + .arg = { .r = { .min = ATL1E_MIN_RX_MEM_SIZE, + .max = ATL1E_MAX_RX_MEM_SIZE} } + }; + int val; + if (num_rx_mem_size > bd) { + val = rx_mem_size[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->rx_ring.page_size = (u32)val * 1024; + } else { + adapter->rx_ring.page_size = (u32)opt.def * 1024; + } + } + + { /* Interrupt Moderate Timer */ + struct atl1e_option opt = { + .type = range_option, + .name = "Interrupt Moderate Timer", + .err = "using default of " + __MODULE_STRING(INT_MOD_DEFAULT_CNT), + .def = INT_MOD_DEFAULT_CNT, + .arg = { .r = { .min = INT_MOD_MIN_CNT, + .max = INT_MOD_MAX_CNT} } + } ; + int val; + if (num_int_mod_timer > bd) { + val = int_mod_timer[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->hw.imt = (u16) val; + } else + adapter->hw.imt = (u16)(opt.def); + } + + { /* MediaType */ + struct atl1e_option opt = { + .type = range_option, + .name = "Speed/Duplex Selection", + .err = "using default of " + __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR), + .def = MEDIA_TYPE_AUTO_SENSOR, + .arg = { .r = { .min = MEDIA_TYPE_AUTO_SENSOR, + .max = MEDIA_TYPE_10M_HALF} } + } ; + int val; + if (num_media_type > bd) { + val = media_type[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->hw.media_type = (u16) val; + } else + adapter->hw.media_type = (u16)(opt.def); + + } +} diff --git a/addons/atl1e/src/4.4.180/Makefile b/addons/atl1e/src/4.4.180/Makefile new file mode 100644 index 00000000..fe2e3d47 --- /dev/null +++ b/addons/atl1e/src/4.4.180/Makefile @@ -0,0 +1,2 @@ +obj-m += atl1e.o +atl1e-objs += atl1e_main.o atl1e_hw.o atl1e_ethtool.o atl1e_param.o diff --git a/addons/atl1e/src/4.4.180/atl1e.h b/addons/atl1e/src/4.4.180/atl1e.h new file mode 100644 index 00000000..0212dac7 --- /dev/null +++ b/addons/atl1e/src/4.4.180/atl1e.h @@ -0,0 +1,507 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * Copyright(c) 2007 xiong huang + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATL1E_H_ +#define _ATL1E_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "atl1e_hw.h" + +#define PCI_REG_COMMAND 0x04 /* PCI Command Register */ +#define CMD_IO_SPACE 0x0001 +#define CMD_MEMORY_SPACE 0x0002 +#define CMD_BUS_MASTER 0x0004 + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_5 5 + +/* Wake Up Filter Control */ +#define AT_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define AT_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define AT_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define AT_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */ +#define AT_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ + +#define SPEED_0 0xffff +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* Error Codes */ +#define AT_ERR_EEPROM 1 +#define AT_ERR_PHY 2 +#define AT_ERR_CONFIG 3 +#define AT_ERR_PARAM 4 +#define AT_ERR_MAC_TYPE 5 +#define AT_ERR_PHY_TYPE 6 +#define AT_ERR_PHY_SPEED 7 +#define AT_ERR_PHY_RES 8 +#define AT_ERR_TIMEOUT 9 + +#define MAX_JUMBO_FRAME_SIZE 0x2000 + +#define AT_VLAN_TAG_TO_TPD_TAG(_vlan, _tpd) \ + _tpd = (((_vlan) << (4)) | (((_vlan) >> 13) & 7) |\ + (((_vlan) >> 9) & 8)) + +#define AT_TPD_TAG_TO_VLAN_TAG(_tpd, _vlan) \ + _vlan = (((_tpd) >> 8) | (((_tpd) & 0x77) << 9) |\ + (((_tdp) & 0x88) << 5)) + +#define AT_MAX_RECEIVE_QUEUE 4 +#define AT_PAGE_NUM_PER_QUEUE 2 + +#define AT_DMA_HI_ADDR_MASK 0xffffffff00000000ULL +#define AT_DMA_LO_ADDR_MASK 0x00000000ffffffffULL + +#define AT_TX_WATCHDOG (5 * HZ) +#define AT_MAX_INT_WORK 10 +#define AT_TWSI_EEPROM_TIMEOUT 100 +#define AT_HW_MAX_IDLE_DELAY 10 +#define AT_SUSPEND_LINK_TIMEOUT 28 + +#define AT_REGS_LEN 75 +#define AT_EEPROM_LEN 512 +#define AT_ADV_MASK (ADVERTISE_10_HALF |\ + ADVERTISE_10_FULL |\ + ADVERTISE_100_HALF |\ + ADVERTISE_100_FULL |\ + ADVERTISE_1000_FULL) + +/* tpd word 2 */ +#define TPD_BUFLEN_MASK 0x3FFF +#define TPD_BUFLEN_SHIFT 0 +#define TPD_DMAINT_MASK 0x0001 +#define TPD_DMAINT_SHIFT 14 +#define TPD_PKTNT_MASK 0x0001 +#define TPD_PKTINT_SHIFT 15 +#define TPD_VLANTAG_MASK 0xFFFF +#define TPD_VLAN_SHIFT 16 + +/* tpd word 3 bits 0:4 */ +#define TPD_EOP_MASK 0x0001 +#define TPD_EOP_SHIFT 0 +#define TPD_IP_VERSION_MASK 0x0001 +#define TPD_IP_VERSION_SHIFT 1 /* 0 : IPV4, 1 : IPV6 */ +#define TPD_INS_VL_TAG_MASK 0x0001 +#define TPD_INS_VL_TAG_SHIFT 2 +#define TPD_CC_SEGMENT_EN_MASK 0x0001 +#define TPD_CC_SEGMENT_EN_SHIFT 3 +#define TPD_SEGMENT_EN_MASK 0x0001 +#define TPD_SEGMENT_EN_SHIFT 4 + +/* tdp word 3 bits 5:7 if ip version is 0 */ +#define TPD_IP_CSUM_MASK 0x0001 +#define TPD_IP_CSUM_SHIFT 5 +#define TPD_TCP_CSUM_MASK 0x0001 +#define TPD_TCP_CSUM_SHIFT 6 +#define TPD_UDP_CSUM_MASK 0x0001 +#define TPD_UDP_CSUM_SHIFT 7 + +/* tdp word 3 bits 5:7 if ip version is 1 */ +#define TPD_V6_IPHLLO_MASK 0x0007 +#define TPD_V6_IPHLLO_SHIFT 7 + +/* tpd word 3 bits 8:9 bit */ +#define TPD_VL_TAGGED_MASK 0x0001 +#define TPD_VL_TAGGED_SHIFT 8 +#define TPD_ETHTYPE_MASK 0x0001 +#define TPD_ETHTYPE_SHIFT 9 + +/* tdp word 3 bits 10:13 if ip version is 0 */ +#define TDP_V4_IPHL_MASK 0x000F +#define TPD_V4_IPHL_SHIFT 10 + +/* tdp word 3 bits 10:13 if ip version is 1 */ +#define TPD_V6_IPHLHI_MASK 0x000F +#define TPD_V6_IPHLHI_SHIFT 10 + +/* tpd word 3 bit 14:31 if segment enabled */ +#define TPD_TCPHDRLEN_MASK 0x000F +#define TPD_TCPHDRLEN_SHIFT 14 +#define TPD_HDRFLAG_MASK 0x0001 +#define TPD_HDRFLAG_SHIFT 18 +#define TPD_MSS_MASK 0x1FFF +#define TPD_MSS_SHIFT 19 + +/* tdp word 3 bit 16:31 if custom csum enabled */ +#define TPD_PLOADOFFSET_MASK 0x00FF +#define TPD_PLOADOFFSET_SHIFT 16 +#define TPD_CCSUMOFFSET_MASK 0x00FF +#define TPD_CCSUMOFFSET_SHIFT 24 + +struct atl1e_tpd_desc { + __le64 buffer_addr; + __le32 word2; + __le32 word3; +}; + +/* how about 0x2000 */ +#define MAX_TX_BUF_LEN 0x2000 +#define MAX_TX_BUF_SHIFT 13 +#define MAX_TSO_SEG_SIZE 0x3c00 + +/* rrs word 1 bit 0:31 */ +#define RRS_RX_CSUM_MASK 0xFFFF +#define RRS_RX_CSUM_SHIFT 0 +#define RRS_PKT_SIZE_MASK 0x3FFF +#define RRS_PKT_SIZE_SHIFT 16 +#define RRS_CPU_NUM_MASK 0x0003 +#define RRS_CPU_NUM_SHIFT 30 + +#define RRS_IS_RSS_IPV4 0x0001 +#define RRS_IS_RSS_IPV4_TCP 0x0002 +#define RRS_IS_RSS_IPV6 0x0004 +#define RRS_IS_RSS_IPV6_TCP 0x0008 +#define RRS_IS_IPV6 0x0010 +#define RRS_IS_IP_FRAG 0x0020 +#define RRS_IS_IP_DF 0x0040 +#define RRS_IS_802_3 0x0080 +#define RRS_IS_VLAN_TAG 0x0100 +#define RRS_IS_ERR_FRAME 0x0200 +#define RRS_IS_IPV4 0x0400 +#define RRS_IS_UDP 0x0800 +#define RRS_IS_TCP 0x1000 +#define RRS_IS_BCAST 0x2000 +#define RRS_IS_MCAST 0x4000 +#define RRS_IS_PAUSE 0x8000 + +#define RRS_ERR_BAD_CRC 0x0001 +#define RRS_ERR_CODE 0x0002 +#define RRS_ERR_DRIBBLE 0x0004 +#define RRS_ERR_RUNT 0x0008 +#define RRS_ERR_RX_OVERFLOW 0x0010 +#define RRS_ERR_TRUNC 0x0020 +#define RRS_ERR_IP_CSUM 0x0040 +#define RRS_ERR_L4_CSUM 0x0080 +#define RRS_ERR_LENGTH 0x0100 +#define RRS_ERR_DES_ADDR 0x0200 + +struct atl1e_recv_ret_status { + u16 seq_num; + u16 hash_lo; + __le32 word1; + u16 pkt_flag; + u16 err_flag; + u16 hash_hi; + u16 vtag; +}; + +enum atl1e_dma_req_block { + atl1e_dma_req_128 = 0, + atl1e_dma_req_256 = 1, + atl1e_dma_req_512 = 2, + atl1e_dma_req_1024 = 3, + atl1e_dma_req_2048 = 4, + atl1e_dma_req_4096 = 5 +}; + +enum atl1e_rrs_type { + atl1e_rrs_disable = 0, + atl1e_rrs_ipv4 = 1, + atl1e_rrs_ipv4_tcp = 2, + atl1e_rrs_ipv6 = 4, + atl1e_rrs_ipv6_tcp = 8 +}; + +enum atl1e_nic_type { + athr_l1e = 0, + athr_l2e_revA = 1, + athr_l2e_revB = 2 +}; + +struct atl1e_hw_stats { + /* rx */ + unsigned long rx_ok; /* The number of good packet received. */ + unsigned long rx_bcast; /* The number of good broadcast packet received. */ + unsigned long rx_mcast; /* The number of good multicast packet received. */ + unsigned long rx_pause; /* The number of Pause packet received. */ + unsigned long rx_ctrl; /* The number of Control packet received other than Pause frame. */ + unsigned long rx_fcs_err; /* The number of packets with bad FCS. */ + unsigned long rx_len_err; /* The number of packets with mismatch of length field and actual size. */ + unsigned long rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */ + unsigned long rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */ + unsigned long rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */ + unsigned long rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */ + unsigned long rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */ + unsigned long rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */ + unsigned long rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */ + unsigned long rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */ + unsigned long rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */ + unsigned long rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */ + unsigned long rx_sz_ov; /* The number of good and bad packets received that are more than MTU size truncated by Selene. */ + unsigned long rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */ + unsigned long rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */ + unsigned long rx_align_err; /* Alignment Error */ + unsigned long rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */ + unsigned long rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */ + unsigned long rx_err_addr; /* The number of packets dropped due to address filtering. */ + + /* tx */ + unsigned long tx_ok; /* The number of good packet transmitted. */ + unsigned long tx_bcast; /* The number of good broadcast packet transmitted. */ + unsigned long tx_mcast; /* The number of good multicast packet transmitted. */ + unsigned long tx_pause; /* The number of Pause packet transmitted. */ + unsigned long tx_exc_defer; /* The number of packets transmitted with excessive deferral. */ + unsigned long tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */ + unsigned long tx_defer; /* The number of packets transmitted that is deferred. */ + unsigned long tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */ + unsigned long tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */ + unsigned long tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */ + unsigned long tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */ + unsigned long tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */ + unsigned long tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */ + unsigned long tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */ + unsigned long tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */ + unsigned long tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */ + unsigned long tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */ + unsigned long tx_late_col; /* The number of packets transmitted with late collisions. */ + unsigned long tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */ + unsigned long tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */ + unsigned long tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */ + unsigned long tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */ + unsigned long tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */ + unsigned long tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */ + unsigned long tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */ +}; + +struct atl1e_hw { + u8 __iomem *hw_addr; /* inner register address */ + resource_size_t mem_rang; + struct atl1e_adapter *adapter; + enum atl1e_nic_type nic_type; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u16 pci_cmd_word; + u8 mac_addr[ETH_ALEN]; + u8 perm_mac_addr[ETH_ALEN]; + u8 preamble_len; + u16 max_frame_size; + u16 rx_jumbo_th; + u16 tx_jumbo_th; + + u16 media_type; +#define MEDIA_TYPE_AUTO_SENSOR 0 +#define MEDIA_TYPE_100M_FULL 1 +#define MEDIA_TYPE_100M_HALF 2 +#define MEDIA_TYPE_10M_FULL 3 +#define MEDIA_TYPE_10M_HALF 4 + + u16 autoneg_advertised; +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + u16 imt; /* Interrupt Moderator timer ( 2us resolution) */ + u16 ict; /* Interrupt Clear timer (2us resolution) */ + u32 smb_timer; + u16 rrd_thresh; /* Threshold of number of RRD produced to trigger + interrupt request */ + u16 tpd_thresh; + u16 rx_count_down; /* 2us resolution */ + u16 tx_count_down; + + u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned burst. */ + enum atl1e_rrs_type rrs_type; + u32 base_cpu; + u32 indirect_tab; + + enum atl1e_dma_req_block dmar_block; + enum atl1e_dma_req_block dmaw_block; + u8 dmaw_dly_cnt; + u8 dmar_dly_cnt; + + bool phy_configured; + bool re_autoneg; + bool emi_ca; +}; + +/* + * wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct atl1e_tx_buffer { + struct sk_buff *skb; + u16 flags; +#define ATL1E_TX_PCIMAP_SINGLE 0x0001 +#define ATL1E_TX_PCIMAP_PAGE 0x0002 +#define ATL1E_TX_PCIMAP_TYPE_MASK 0x0003 + u16 length; + dma_addr_t dma; +}; + +#define ATL1E_SET_PCIMAP_TYPE(tx_buff, type) do { \ + ((tx_buff)->flags) &= ~ATL1E_TX_PCIMAP_TYPE_MASK; \ + ((tx_buff)->flags) |= (type); \ + } while (0) + +struct atl1e_rx_page { + dma_addr_t dma; /* receive rage DMA address */ + u8 *addr; /* receive rage virtual address */ + dma_addr_t write_offset_dma; /* the DMA address which contain the + receive data offset in the page */ + u32 *write_offset_addr; /* the virtaul address which contain + the receive data offset in the page */ + u32 read_offset; /* the offset where we have read */ +}; + +struct atl1e_rx_page_desc { + struct atl1e_rx_page rx_page[AT_PAGE_NUM_PER_QUEUE]; + u8 rx_using; + u16 rx_nxseq; +}; + +/* transmit packet descriptor (tpd) ring */ +struct atl1e_tx_ring { + struct atl1e_tpd_desc *desc; /* descriptor ring virtual address */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 count; /* the count of transmit rings */ + rwlock_t tx_lock; + u16 next_to_use; + atomic_t next_to_clean; + struct atl1e_tx_buffer *tx_buffer; + dma_addr_t cmb_dma; + u32 *cmb; +}; + +/* receive packet descriptor ring */ +struct atl1e_rx_ring { + void *desc; + dma_addr_t dma; + int size; + u32 page_size; /* bytes length of rxf page */ + u32 real_page_size; /* real_page_size = page_size + jumbo + aliagn */ + struct atl1e_rx_page_desc rx_page_desc[AT_MAX_RECEIVE_QUEUE]; +}; + +/* board specific private data structure */ +struct atl1e_adapter { + struct net_device *netdev; + struct pci_dev *pdev; + struct napi_struct napi; + struct mii_if_info mii; /* MII interface info */ + struct atl1e_hw hw; + struct atl1e_hw_stats hw_stats; + + u32 wol; + u16 link_speed; + u16 link_duplex; + + spinlock_t mdio_lock; + spinlock_t tx_lock; + atomic_t irq_sem; + + struct work_struct reset_task; + struct work_struct link_chg_task; + struct timer_list watchdog_timer; + struct timer_list phy_config_timer; + + /* All Descriptor memory */ + dma_addr_t ring_dma; + void *ring_vir_addr; + u32 ring_size; + + struct atl1e_tx_ring tx_ring; + struct atl1e_rx_ring rx_ring; + int num_rx_queues; + unsigned long flags; +#define __AT_TESTING 0x0001 +#define __AT_RESETTING 0x0002 +#define __AT_DOWN 0x0003 + + u32 bd_number; /* board number;*/ + u32 pci_state[16]; + u32 *config_space; +}; + +#define AT_WRITE_REG(a, reg, value) ( \ + writel((value), ((a)->hw_addr + reg))) + +#define AT_WRITE_FLUSH(a) (\ + readl((a)->hw_addr)) + +#define AT_READ_REG(a, reg) ( \ + readl((a)->hw_addr + reg)) + +#define AT_WRITE_REGB(a, reg, value) (\ + writeb((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGB(a, reg) (\ + readb((a)->hw_addr + reg)) + +#define AT_WRITE_REGW(a, reg, value) (\ + writew((value), ((a)->hw_addr + reg))) + +#define AT_READ_REGW(a, reg) (\ + readw((a)->hw_addr + reg)) + +#define AT_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), (((a)->hw_addr + reg) + ((offset) << 2)))) + +#define AT_READ_REG_ARRAY(a, reg, offset) ( \ + readl(((a)->hw_addr + reg) + ((offset) << 2))) + +extern char atl1e_driver_name[]; +extern char atl1e_driver_version[]; + +void atl1e_check_options(struct atl1e_adapter *adapter); +int atl1e_up(struct atl1e_adapter *adapter); +void atl1e_down(struct atl1e_adapter *adapter); +void atl1e_reinit_locked(struct atl1e_adapter *adapter); +s32 atl1e_reset_hw(struct atl1e_hw *hw); +void atl1e_set_ethtool_ops(struct net_device *netdev); +#endif /* _ATL1_E_H_ */ diff --git a/addons/atl1e/src/4.4.180/atl1e_ethtool.c b/addons/atl1e/src/4.4.180/atl1e_ethtool.c new file mode 100644 index 00000000..8e3dbd4d --- /dev/null +++ b/addons/atl1e/src/4.4.180/atl1e_ethtool.c @@ -0,0 +1,388 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + */ + +#include +#include +#include + +#include "atl1e.h" + +static int atl1e_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->nic_type == athr_l1e) + ecmd->supported |= SUPPORTED_1000baseT_Full; + + ecmd->advertising = ADVERTISED_TP; + + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->advertising |= hw->autoneg_advertised; + + ecmd->port = PORT_TP; + ecmd->phy_address = 0; + ecmd->transceiver = XCVR_INTERNAL; + + if (adapter->link_speed != SPEED_0) { + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + ecmd->autoneg = AUTONEG_ENABLE; + return 0; +} + +static int atl1e_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + u16 adv4, adv9; + + if ((ecmd->advertising&ADVERTISE_1000_FULL)) { + if (hw->nic_type == athr_l1e) { + hw->autoneg_advertised = + ecmd->advertising & AT_ADV_MASK; + } else { + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + } else if (ecmd->advertising&ADVERTISE_1000_HALF) { + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } else { + hw->autoneg_advertised = + ecmd->advertising & AT_ADV_MASK; + } + ecmd->advertising = hw->autoneg_advertised | + ADVERTISED_TP | ADVERTISED_Autoneg; + + adv4 = hw->mii_autoneg_adv_reg & ~ADVERTISE_ALL; + adv9 = hw->mii_1000t_ctrl_reg & ~MII_AT001_CR_1000T_SPEED_MASK; + if (hw->autoneg_advertised & ADVERTISE_10_HALF) + adv4 |= ADVERTISE_10HALF; + if (hw->autoneg_advertised & ADVERTISE_10_FULL) + adv4 |= ADVERTISE_10FULL; + if (hw->autoneg_advertised & ADVERTISE_100_HALF) + adv4 |= ADVERTISE_100HALF; + if (hw->autoneg_advertised & ADVERTISE_100_FULL) + adv4 |= ADVERTISE_100FULL; + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) + adv9 |= ADVERTISE_1000FULL; + + if (adv4 != hw->mii_autoneg_adv_reg || + adv9 != hw->mii_1000t_ctrl_reg) { + hw->mii_autoneg_adv_reg = adv4; + hw->mii_1000t_ctrl_reg = adv9; + hw->re_autoneg = true; + } + + } else { + clear_bit(__AT_RESETTING, &adapter->flags); + return -EINVAL; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + atl1e_down(adapter); + atl1e_up(adapter); + } else + atl1e_reset_hw(&adapter->hw); + + clear_bit(__AT_RESETTING, &adapter->flags); + return 0; +} + +static u32 atl1e_get_msglevel(struct net_device *netdev) +{ +#ifdef DBG + return 1; +#else + return 0; +#endif +} + +static int atl1e_get_regs_len(struct net_device *netdev) +{ + return AT_REGS_LEN * sizeof(u32); +} + +static void atl1e_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, AT_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = AT_READ_REG(hw, REG_VPD_CAP); + regs_buff[1] = AT_READ_REG(hw, REG_SPI_FLASH_CTRL); + regs_buff[2] = AT_READ_REG(hw, REG_SPI_FLASH_CONFIG); + regs_buff[3] = AT_READ_REG(hw, REG_TWSI_CTRL); + regs_buff[4] = AT_READ_REG(hw, REG_PCIE_DEV_MISC_CTRL); + regs_buff[5] = AT_READ_REG(hw, REG_MASTER_CTRL); + regs_buff[6] = AT_READ_REG(hw, REG_MANUAL_TIMER_INIT); + regs_buff[7] = AT_READ_REG(hw, REG_IRQ_MODU_TIMER_INIT); + regs_buff[8] = AT_READ_REG(hw, REG_GPHY_CTRL); + regs_buff[9] = AT_READ_REG(hw, REG_CMBDISDMA_TIMER); + regs_buff[10] = AT_READ_REG(hw, REG_IDLE_STATUS); + regs_buff[11] = AT_READ_REG(hw, REG_MDIO_CTRL); + regs_buff[12] = AT_READ_REG(hw, REG_SERDES_LOCK); + regs_buff[13] = AT_READ_REG(hw, REG_MAC_CTRL); + regs_buff[14] = AT_READ_REG(hw, REG_MAC_IPG_IFG); + regs_buff[15] = AT_READ_REG(hw, REG_MAC_STA_ADDR); + regs_buff[16] = AT_READ_REG(hw, REG_MAC_STA_ADDR+4); + regs_buff[17] = AT_READ_REG(hw, REG_RX_HASH_TABLE); + regs_buff[18] = AT_READ_REG(hw, REG_RX_HASH_TABLE+4); + regs_buff[19] = AT_READ_REG(hw, REG_MAC_HALF_DUPLX_CTRL); + regs_buff[20] = AT_READ_REG(hw, REG_MTU); + regs_buff[21] = AT_READ_REG(hw, REG_WOL_CTRL); + regs_buff[22] = AT_READ_REG(hw, REG_SRAM_TRD_ADDR); + regs_buff[23] = AT_READ_REG(hw, REG_SRAM_TRD_LEN); + regs_buff[24] = AT_READ_REG(hw, REG_SRAM_RXF_ADDR); + regs_buff[25] = AT_READ_REG(hw, REG_SRAM_RXF_LEN); + regs_buff[26] = AT_READ_REG(hw, REG_SRAM_TXF_ADDR); + regs_buff[27] = AT_READ_REG(hw, REG_SRAM_TXF_LEN); + regs_buff[28] = AT_READ_REG(hw, REG_SRAM_TCPH_ADDR); + regs_buff[29] = AT_READ_REG(hw, REG_SRAM_PKTH_ADDR); + + atl1e_read_phy_reg(hw, MII_BMCR, &phy_data); + regs_buff[73] = (u32)phy_data; + atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); + regs_buff[74] = (u32)phy_data; +} + +static int atl1e_get_eeprom_len(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (!atl1e_check_eeprom_exist(&adapter->hw)) + return AT_EEPROM_LEN; + else + return 0; +} + +static int atl1e_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 *eeprom_buff; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EINVAL; + + if (atl1e_check_eeprom_exist(hw)) /* not exist */ + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + + eeprom_buff = kmalloc(sizeof(u32) * + (last_dword - first_dword + 1), GFP_KERNEL); + if (eeprom_buff == NULL) + return -ENOMEM; + + for (i = first_dword; i < last_dword; i++) { + if (!atl1e_read_eeprom(hw, i * 4, &(eeprom_buff[i-first_dword]))) { + kfree(eeprom_buff); + return -EIO; + } + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int atl1e_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 *eeprom_buff; + u32 *ptr; + int first_dword, last_dword; + int ret_val = 0; + int i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EINVAL; + + first_dword = eeprom->offset >> 2; + last_dword = (eeprom->offset + eeprom->len - 1) >> 2; + eeprom_buff = kmalloc(AT_EEPROM_LEN, GFP_KERNEL); + if (eeprom_buff == NULL) + return -ENOMEM; + + ptr = eeprom_buff; + + if (eeprom->offset & 3) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + if (!atl1e_read_eeprom(hw, first_dword * 4, &(eeprom_buff[0]))) { + ret_val = -EIO; + goto out; + } + ptr++; + } + if (((eeprom->offset + eeprom->len) & 3)) { + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + + if (!atl1e_read_eeprom(hw, last_dword * 4, + &(eeprom_buff[last_dword - first_dword]))) { + ret_val = -EIO; + goto out; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_dword - first_dword + 1; i++) { + if (!atl1e_write_eeprom(hw, ((first_dword + i) * 4), + eeprom_buff[i])) { + ret_val = -EIO; + goto out; + } + } +out: + kfree(eeprom_buff); + return ret_val; +} + +static void atl1e_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, atl1e_driver_version, + sizeof(drvinfo->version)); + strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version)); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void atl1e_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + wol->supported = WAKE_MAGIC | WAKE_PHY; + wol->wolopts = 0; + + if (adapter->wol & AT_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & AT_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & AT_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & AT_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & AT_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int atl1e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | + WAKE_UCAST | WAKE_MCAST | WAKE_BCAST)) + return -EOPNOTSUPP; + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= AT_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= AT_WUFC_LNKC; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int atl1e_nway_reset(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + atl1e_reinit_locked(adapter); + return 0; +} + +static const struct ethtool_ops atl1e_ethtool_ops = { + .get_settings = atl1e_get_settings, + .set_settings = atl1e_set_settings, + .get_drvinfo = atl1e_get_drvinfo, + .get_regs_len = atl1e_get_regs_len, + .get_regs = atl1e_get_regs, + .get_wol = atl1e_get_wol, + .set_wol = atl1e_set_wol, + .get_msglevel = atl1e_get_msglevel, + .nway_reset = atl1e_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = atl1e_get_eeprom_len, + .get_eeprom = atl1e_get_eeprom, + .set_eeprom = atl1e_set_eeprom, +}; + +void atl1e_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &atl1e_ethtool_ops; +} diff --git a/addons/atl1e/src/4.4.180/atl1e_hw.c b/addons/atl1e/src/4.4.180/atl1e_hw.c new file mode 100644 index 00000000..113565da --- /dev/null +++ b/addons/atl1e/src/4.4.180/atl1e_hw.c @@ -0,0 +1,650 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ +#include +#include +#include +#include + +#include "atl1e.h" + +/* + * check_eeprom_exist + * return 0 if eeprom exist + */ +int atl1e_check_eeprom_exist(struct atl1e_hw *hw) +{ + u32 value; + + value = AT_READ_REG(hw, REG_SPI_FLASH_CTRL); + if (value & SPI_FLASH_CTRL_EN_VPD) { + value &= ~SPI_FLASH_CTRL_EN_VPD; + AT_WRITE_REG(hw, REG_SPI_FLASH_CTRL, value); + } + value = AT_READ_REGW(hw, REG_PCIE_CAP_LIST); + return ((value & 0xFF00) == 0x6C00) ? 0 : 1; +} + +void atl1e_hw_set_mac_addr(struct atl1e_hw *hw) +{ + u32 value; + /* + * 00-0B-6A-F6-00-DC + * 0: 6AF600DC 1: 000B + * low dword + */ + value = (((u32)hw->mac_addr[2]) << 24) | + (((u32)hw->mac_addr[3]) << 16) | + (((u32)hw->mac_addr[4]) << 8) | + (((u32)hw->mac_addr[5])) ; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 0, value); + /* hight dword */ + value = (((u32)hw->mac_addr[0]) << 8) | + (((u32)hw->mac_addr[1])) ; + AT_WRITE_REG_ARRAY(hw, REG_MAC_STA_ADDR, 1, value); +} + +/* + * atl1e_get_permanent_address + * return 0 if get valid mac address, + */ +static int atl1e_get_permanent_address(struct atl1e_hw *hw) +{ + u32 addr[2]; + u32 i; + u32 twsi_ctrl_data; + u8 eth_addr[ETH_ALEN]; + + if (is_valid_ether_addr(hw->perm_mac_addr)) + return 0; + + /* init */ + addr[0] = addr[1] = 0; + + if (!atl1e_check_eeprom_exist(hw)) { + /* eeprom exist */ + twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL); + twsi_ctrl_data |= TWSI_CTRL_SW_LDSTART; + AT_WRITE_REG(hw, REG_TWSI_CTRL, twsi_ctrl_data); + for (i = 0; i < AT_TWSI_EEPROM_TIMEOUT; i++) { + msleep(10); + twsi_ctrl_data = AT_READ_REG(hw, REG_TWSI_CTRL); + if ((twsi_ctrl_data & TWSI_CTRL_SW_LDSTART) == 0) + break; + } + if (i >= AT_TWSI_EEPROM_TIMEOUT) + return AT_ERR_TIMEOUT; + } + + /* maybe MAC-address is from BIOS */ + addr[0] = AT_READ_REG(hw, REG_MAC_STA_ADDR); + addr[1] = AT_READ_REG(hw, REG_MAC_STA_ADDR + 4); + *(u32 *) ð_addr[2] = swab32(addr[0]); + *(u16 *) ð_addr[0] = swab16(*(u16 *)&addr[1]); + + if (is_valid_ether_addr(eth_addr)) { + memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); + return 0; + } + + return AT_ERR_EEPROM; +} + +bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value) +{ + return true; +} + +bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value) +{ + int i; + u32 control; + + if (offset & 3) + return false; /* address do not align */ + + AT_WRITE_REG(hw, REG_VPD_DATA, 0); + control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; + AT_WRITE_REG(hw, REG_VPD_CAP, control); + + for (i = 0; i < 10; i++) { + msleep(2); + control = AT_READ_REG(hw, REG_VPD_CAP); + if (control & VPD_CAP_VPD_FLAG) + break; + } + if (control & VPD_CAP_VPD_FLAG) { + *p_value = AT_READ_REG(hw, REG_VPD_DATA); + return true; + } + return false; /* timeout */ +} + +void atl1e_force_ps(struct atl1e_hw *hw) +{ + AT_WRITE_REGW(hw, REG_GPHY_CTRL, + GPHY_CTRL_PW_WOL_DIS | GPHY_CTRL_EXT_RESET); +} + +/* + * Reads the adapter's MAC address from the EEPROM + * + * hw - Struct containing variables accessed by shared code + */ +int atl1e_read_mac_addr(struct atl1e_hw *hw) +{ + int err = 0; + + err = atl1e_get_permanent_address(hw); + if (err) + return AT_ERR_EEPROM; + memcpy(hw->mac_addr, hw->perm_mac_addr, sizeof(hw->perm_mac_addr)); + return 0; +} + +/* + * atl1e_hash_mc_addr + * purpose + * set hash value for a multicast address + */ +u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr) +{ + u32 crc32; + u32 value = 0; + int i; + + crc32 = ether_crc_le(6, mc_addr); + for (i = 0; i < 32; i++) + value |= (((crc32 >> i) & 1) << (31 - i)); + + return value; +} + +/* + * Sets the bit in the multicast table corresponding to the hash value. + * hw - Struct containing variables accessed by shared code + * hash_value - Multicast address hash value + */ +void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value) +{ + u32 hash_bit, hash_reg; + u32 mta; + + /* + * The HASH Table is a register array of 2 32-bit registers. + * It is treated like an array of 64 bits. We want to set + * bit BitArray[hash_value]. So we figure out what register + * the bit is in, read it, OR in the new bit, then write + * back the new value. The register is determined by the + * upper 7 bits of the hash value and the bit within that + * register are determined by the lower 5 bits of the value. + */ + hash_reg = (hash_value >> 31) & 0x1; + hash_bit = (hash_value >> 26) & 0x1F; + + mta = AT_READ_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg); + + mta |= (1 << hash_bit); + + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, hash_reg, mta); +} +/* + * Reads the value from a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to read + */ +int atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data) +{ + u32 val; + int i; + + val = ((u32)(reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | + MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + + wmb(); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = AT_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + wmb(); + } + if (!(val & (MDIO_START | MDIO_BUSY))) { + *phy_data = (u16)val; + return 0; + } + + return AT_ERR_PHY; +} + +/* + * Writes a value to a PHY register + * hw - Struct containing variables accessed by shared code + * reg_addr - address of the PHY register to write + * data - data to write to the PHY + */ +int atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data) +{ + int i; + u32 val; + + val = ((u32)(phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | + (reg_addr&MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | + MDIO_SUP_PREAMBLE | + MDIO_START | + MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; + + AT_WRITE_REG(hw, REG_MDIO_CTRL, val); + wmb(); + + for (i = 0; i < MDIO_WAIT_TIMES; i++) { + udelay(2); + val = AT_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + wmb(); + } + + if (!(val & (MDIO_START | MDIO_BUSY))) + return 0; + + return AT_ERR_PHY; +} + +/* + * atl1e_init_pcie - init PCIE module + */ +static void atl1e_init_pcie(struct atl1e_hw *hw) +{ + u32 value; + /* comment 2lines below to save more power when sususpend + value = LTSSM_TEST_MODE_DEF; + AT_WRITE_REG(hw, REG_LTSSM_TEST_MODE, value); + */ + + /* pcie flow control mode change */ + value = AT_READ_REG(hw, 0x1008); + value |= 0x8000; + AT_WRITE_REG(hw, 0x1008, value); +} +/* + * Configures PHY autoneg and flow control advertisement settings + * + * hw - Struct containing variables accessed by shared code + */ +static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + if (0 != hw->mii_autoneg_adv_reg) + return 0; + /* Read the MII Auto-Neg Advertisement Register (Address 4/9). */ + mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; + mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK; + + /* + * Need to parse autoneg_advertised and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* + * First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~ADVERTISE_ALL; + mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK; + + /* + * Need to parse MediaType and setup the + * appropriate PHY registers. + */ + switch (hw->media_type) { + case MEDIA_TYPE_AUTO_SENSOR: + mii_autoneg_adv_reg |= ADVERTISE_ALL; + hw->autoneg_advertised = ADVERTISE_ALL; + if (hw->nic_type == athr_l1e) { + mii_1000t_ctrl_reg |= ADVERTISE_1000FULL; + hw->autoneg_advertised |= ADVERTISE_1000_FULL; + } + break; + + case MEDIA_TYPE_100M_FULL: + mii_autoneg_adv_reg |= ADVERTISE_100FULL; + hw->autoneg_advertised = ADVERTISE_100_FULL; + break; + + case MEDIA_TYPE_100M_HALF: + mii_autoneg_adv_reg |= ADVERTISE_100_HALF; + hw->autoneg_advertised = ADVERTISE_100_HALF; + break; + + case MEDIA_TYPE_10M_FULL: + mii_autoneg_adv_reg |= ADVERTISE_10_FULL; + hw->autoneg_advertised = ADVERTISE_10_FULL; + break; + + default: + mii_autoneg_adv_reg |= ADVERTISE_10_HALF; + hw->autoneg_advertised = ADVERTISE_10_HALF; + break; + } + + /* flow control fixed to enable all */ + mii_autoneg_adv_reg |= (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + + hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; + hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; + + ret_val = atl1e_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { + ret_val = atl1e_write_phy_reg(hw, MII_CTRL1000, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return 0; +} + + +/* + * Resets the PHY and make all config validate + * + * hw - Struct containing variables accessed by shared code + * + * Sets bit 15 and 12 of the MII control regiser (for F001 bug) + */ +int atl1e_phy_commit(struct atl1e_hw *hw) +{ + struct atl1e_adapter *adapter = hw->adapter; + int ret_val; + u16 phy_data; + + phy_data = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART; + + ret_val = atl1e_write_phy_reg(hw, MII_BMCR, phy_data); + if (ret_val) { + u32 val; + int i; + /************************************** + * pcie serdes link may be down ! + **************************************/ + for (i = 0; i < 25; i++) { + msleep(1); + val = AT_READ_REG(hw, REG_MDIO_CTRL); + if (!(val & (MDIO_START | MDIO_BUSY))) + break; + } + + if (0 != (val & (MDIO_START | MDIO_BUSY))) { + netdev_err(adapter->netdev, + "pcie linkdown at least for 25ms\n"); + return ret_val; + } + + netdev_err(adapter->netdev, "pcie linkup after %d ms\n", i); + } + return 0; +} + +int atl1e_phy_init(struct atl1e_hw *hw) +{ + struct atl1e_adapter *adapter = hw->adapter; + s32 ret_val; + u16 phy_val; + + if (hw->phy_configured) { + if (hw->re_autoneg) { + hw->re_autoneg = false; + return atl1e_restart_autoneg(hw); + } + return 0; + } + + /* RESET GPHY Core */ + AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT); + msleep(2); + AT_WRITE_REGW(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT | + GPHY_CTRL_EXT_RESET); + msleep(2); + + /* patches */ + /* p1. eable hibernation mode */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0xB); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0xBC00); + if (ret_val) + return ret_val; + /* p2. set Class A/B for all modes */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0); + if (ret_val) + return ret_val; + phy_val = 0x02ef; + /* remove Class AB */ + /* phy_val = hw->emi_ca ? 0x02ef : 0x02df; */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, phy_val); + if (ret_val) + return ret_val; + /* p3. 10B ??? */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x12); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x4C04); + if (ret_val) + return ret_val; + /* p4. 1000T power */ + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x4); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x8BBB); + if (ret_val) + return ret_val; + + ret_val = atl1e_write_phy_reg(hw, MII_DBG_ADDR, 0x5); + if (ret_val) + return ret_val; + ret_val = atl1e_write_phy_reg(hw, MII_DBG_DATA, 0x2C46); + if (ret_val) + return ret_val; + + msleep(1); + + /*Enable PHY LinkChange Interrupt */ + ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00); + if (ret_val) { + netdev_err(adapter->netdev, + "Error enable PHY linkChange Interrupt\n"); + return ret_val; + } + /* setup AutoNeg parameters */ + ret_val = atl1e_phy_setup_autoneg_adv(hw); + if (ret_val) { + netdev_err(adapter->netdev, + "Error Setting up Auto-Negotiation\n"); + return ret_val; + } + /* SW.Reset & En-Auto-Neg to restart Auto-Neg*/ + netdev_dbg(adapter->netdev, "Restarting Auto-Negotiation\n"); + ret_val = atl1e_phy_commit(hw); + if (ret_val) { + netdev_err(adapter->netdev, "Error resetting the phy\n"); + return ret_val; + } + + hw->phy_configured = true; + + return 0; +} + +/* + * Reset the transmit and receive units; mask and clear all interrupts. + * hw - Struct containing variables accessed by shared code + * return : 0 or idle status (if error) + */ +int atl1e_reset_hw(struct atl1e_hw *hw) +{ + struct atl1e_adapter *adapter = hw->adapter; + struct pci_dev *pdev = adapter->pdev; + + u32 idle_status_data = 0; + u16 pci_cfg_cmd_word = 0; + int timeout = 0; + + /* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */ + pci_read_config_word(pdev, PCI_REG_COMMAND, &pci_cfg_cmd_word); + if ((pci_cfg_cmd_word & (CMD_IO_SPACE | + CMD_MEMORY_SPACE | CMD_BUS_MASTER)) + != (CMD_IO_SPACE | CMD_MEMORY_SPACE | CMD_BUS_MASTER)) { + pci_cfg_cmd_word |= (CMD_IO_SPACE | + CMD_MEMORY_SPACE | CMD_BUS_MASTER); + pci_write_config_word(pdev, PCI_REG_COMMAND, pci_cfg_cmd_word); + } + + /* + * Issue Soft Reset to the MAC. This will reset the chip's + * transmit, receive, DMA. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + AT_WRITE_REG(hw, REG_MASTER_CTRL, + MASTER_CTRL_LED_MODE | MASTER_CTRL_SOFT_RST); + wmb(); + msleep(1); + + /* Wait at least 10ms for All module to be Idle */ + for (timeout = 0; timeout < AT_HW_MAX_IDLE_DELAY; timeout++) { + idle_status_data = AT_READ_REG(hw, REG_IDLE_STATUS); + if (idle_status_data == 0) + break; + msleep(1); + cpu_relax(); + } + + if (timeout >= AT_HW_MAX_IDLE_DELAY) { + netdev_err(adapter->netdev, + "MAC state machine can't be idle since disabled for 10ms second\n"); + return AT_ERR_TIMEOUT; + } + + return 0; +} + + +/* + * Performs basic configuration of the adapter. + * + * hw - Struct containing variables accessed by shared code + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes multicast table, + * and Calls routines to setup link + * Leaves the transmit and receive units disabled and uninitialized. + */ +int atl1e_init_hw(struct atl1e_hw *hw) +{ + s32 ret_val = 0; + + atl1e_init_pcie(hw); + + /* Zero out the Multicast HASH table */ + /* clear the old settings from the multicast hash table */ + AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + ret_val = atl1e_phy_init(hw); + + return ret_val; +} + +/* + * Detects the current speed and duplex settings of the hardware. + * + * hw - Struct containing variables accessed by shared code + * speed - Speed of the connection + * duplex - Duplex setting of the connection + */ +int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex) +{ + int err; + u16 phy_data; + + /* Read PHY Specific Status Register (17) */ + err = atl1e_read_phy_reg(hw, MII_AT001_PSSR, &phy_data); + if (err) + return err; + + if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED)) + return AT_ERR_PHY_RES; + + switch (phy_data & MII_AT001_PSSR_SPEED) { + case MII_AT001_PSSR_1000MBS: + *speed = SPEED_1000; + break; + case MII_AT001_PSSR_100MBS: + *speed = SPEED_100; + break; + case MII_AT001_PSSR_10MBS: + *speed = SPEED_10; + break; + default: + return AT_ERR_PHY_SPEED; + } + + if (phy_data & MII_AT001_PSSR_DPLX) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + return 0; +} + +int atl1e_restart_autoneg(struct atl1e_hw *hw) +{ + int err = 0; + + err = atl1e_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); + if (err) + return err; + + if (hw->nic_type == athr_l1e || hw->nic_type == athr_l2e_revA) { + err = atl1e_write_phy_reg(hw, MII_CTRL1000, + hw->mii_1000t_ctrl_reg); + if (err) + return err; + } + + err = atl1e_write_phy_reg(hw, MII_BMCR, + BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART); + return err; +} + diff --git a/addons/atl1e/src/4.4.180/atl1e_hw.h b/addons/atl1e/src/4.4.180/atl1e_hw.h new file mode 100644 index 00000000..88a6271d --- /dev/null +++ b/addons/atl1e/src/4.4.180/atl1e_hw.h @@ -0,0 +1,690 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef _ATHL1E_HW_H_ +#define _ATHL1E_HW_H_ + +#include +#include + +struct atl1e_adapter; +struct atl1e_hw; + +/* function prototype */ +s32 atl1e_reset_hw(struct atl1e_hw *hw); +s32 atl1e_read_mac_addr(struct atl1e_hw *hw); +s32 atl1e_init_hw(struct atl1e_hw *hw); +s32 atl1e_phy_commit(struct atl1e_hw *hw); +s32 atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex); +u32 atl1e_auto_get_fc(struct atl1e_adapter *adapter, u16 duplex); +u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr); +void atl1e_hash_set(struct atl1e_hw *hw, u32 hash_value); +s32 atl1e_read_phy_reg(struct atl1e_hw *hw, u16 reg_addr, u16 *phy_data); +s32 atl1e_write_phy_reg(struct atl1e_hw *hw, u32 reg_addr, u16 phy_data); +s32 atl1e_validate_mdi_setting(struct atl1e_hw *hw); +void atl1e_hw_set_mac_addr(struct atl1e_hw *hw); +bool atl1e_read_eeprom(struct atl1e_hw *hw, u32 offset, u32 *p_value); +bool atl1e_write_eeprom(struct atl1e_hw *hw, u32 offset, u32 value); +s32 atl1e_phy_enter_power_saving(struct atl1e_hw *hw); +s32 atl1e_phy_leave_power_saving(struct atl1e_hw *hw); +s32 atl1e_phy_init(struct atl1e_hw *hw); +int atl1e_check_eeprom_exist(struct atl1e_hw *hw); +void atl1e_force_ps(struct atl1e_hw *hw); +s32 atl1e_restart_autoneg(struct atl1e_hw *hw); + +/* register definition */ +#define REG_PM_CTRLSTAT 0x44 + +#define REG_PCIE_CAP_LIST 0x58 + +#define REG_DEVICE_CAP 0x5C +#define DEVICE_CAP_MAX_PAYLOAD_MASK 0x7 +#define DEVICE_CAP_MAX_PAYLOAD_SHIFT 0 + +#define REG_DEVICE_CTRL 0x60 +#define DEVICE_CTRL_MAX_PAYLOAD_MASK 0x7 +#define DEVICE_CTRL_MAX_PAYLOAD_SHIFT 5 +#define DEVICE_CTRL_MAX_RREQ_SZ_MASK 0x7 +#define DEVICE_CTRL_MAX_RREQ_SZ_SHIFT 12 + +#define REG_VPD_CAP 0x6C +#define VPD_CAP_ID_MASK 0xff +#define VPD_CAP_ID_SHIFT 0 +#define VPD_CAP_NEXT_PTR_MASK 0xFF +#define VPD_CAP_NEXT_PTR_SHIFT 8 +#define VPD_CAP_VPD_ADDR_MASK 0x7FFF +#define VPD_CAP_VPD_ADDR_SHIFT 16 +#define VPD_CAP_VPD_FLAG 0x80000000 + +#define REG_VPD_DATA 0x70 + +#define REG_SPI_FLASH_CTRL 0x200 +#define SPI_FLASH_CTRL_STS_NON_RDY 0x1 +#define SPI_FLASH_CTRL_STS_WEN 0x2 +#define SPI_FLASH_CTRL_STS_WPEN 0x80 +#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF +#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0 +#define SPI_FLASH_CTRL_INS_MASK 0x7 +#define SPI_FLASH_CTRL_INS_SHIFT 8 +#define SPI_FLASH_CTRL_START 0x800 +#define SPI_FLASH_CTRL_EN_VPD 0x2000 +#define SPI_FLASH_CTRL_LDSTART 0x8000 +#define SPI_FLASH_CTRL_CS_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HI_SHIFT 16 +#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3 +#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18 +#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20 +#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3 +#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22 +#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3 +#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24 +#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3 +#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26 +#define SPI_FLASH_CTRL_WAIT_READY 0x10000000 + +#define REG_SPI_ADDR 0x204 + +#define REG_SPI_DATA 0x208 + +#define REG_SPI_FLASH_CONFIG 0x20C +#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF +#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0 +#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3 +#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24 +#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000 + + +#define REG_SPI_FLASH_OP_PROGRAM 0x210 +#define REG_SPI_FLASH_OP_SC_ERASE 0x211 +#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212 +#define REG_SPI_FLASH_OP_RDID 0x213 +#define REG_SPI_FLASH_OP_WREN 0x214 +#define REG_SPI_FLASH_OP_RDSR 0x215 +#define REG_SPI_FLASH_OP_WRSR 0x216 +#define REG_SPI_FLASH_OP_READ 0x217 + +#define REG_TWSI_CTRL 0x218 +#define TWSI_CTRL_LD_OFFSET_MASK 0xFF +#define TWSI_CTRL_LD_OFFSET_SHIFT 0 +#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7 +#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8 +#define TWSI_CTRL_SW_LDSTART 0x800 +#define TWSI_CTRL_HW_LDSTART 0x1000 +#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F +#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15 +#define TWSI_CTRL_LD_EXIST 0x400000 +#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23 +#define TWSI_CTRL_FREQ_SEL_100K 0 +#define TWSI_CTRL_FREQ_SEL_200K 1 +#define TWSI_CTRL_FREQ_SEL_300K 2 +#define TWSI_CTRL_FREQ_SEL_400K 3 +#define TWSI_CTRL_SMB_SLV_ADDR +#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3 +#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24 + + +#define REG_PCIE_DEV_MISC_CTRL 0x21C +#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2 +#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1 +#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4 +#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8 +#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10 + +#define REG_PCIE_PHYMISC 0x1000 +#define PCIE_PHYMISC_FORCE_RCV_DET 0x4 + +#define REG_LTSSM_TEST_MODE 0x12FC +#define LTSSM_TEST_MODE_DEF 0xE000 + +/* Selene Master Control Register */ +#define REG_MASTER_CTRL 0x1400 +#define MASTER_CTRL_SOFT_RST 0x1 +#define MASTER_CTRL_MTIMER_EN 0x2 +#define MASTER_CTRL_ITIMER_EN 0x4 +#define MASTER_CTRL_MANUAL_INT 0x8 +#define MASTER_CTRL_ITIMER2_EN 0x20 +#define MASTER_CTRL_INT_RDCLR 0x40 +#define MASTER_CTRL_LED_MODE 0x200 +#define MASTER_CTRL_REV_NUM_SHIFT 16 +#define MASTER_CTRL_REV_NUM_MASK 0xff +#define MASTER_CTRL_DEV_ID_SHIFT 24 +#define MASTER_CTRL_DEV_ID_MASK 0xff + +/* Timer Initial Value Register */ +#define REG_MANUAL_TIMER_INIT 0x1404 + + +/* IRQ ModeratorTimer Initial Value Register */ +#define REG_IRQ_MODU_TIMER_INIT 0x1408 /* w */ +#define REG_IRQ_MODU_TIMER2_INIT 0x140A /* w */ + + +#define REG_GPHY_CTRL 0x140C +#define GPHY_CTRL_EXT_RESET 1 +#define GPHY_CTRL_PIPE_MOD 2 +#define GPHY_CTRL_TEST_MODE_MASK 3 +#define GPHY_CTRL_TEST_MODE_SHIFT 2 +#define GPHY_CTRL_BERT_START 0x10 +#define GPHY_CTRL_GATE_25M_EN 0x20 +#define GPHY_CTRL_LPW_EXIT 0x40 +#define GPHY_CTRL_PHY_IDDQ 0x80 +#define GPHY_CTRL_PHY_IDDQ_DIS 0x100 +#define GPHY_CTRL_PCLK_SEL_DIS 0x200 +#define GPHY_CTRL_HIB_EN 0x400 +#define GPHY_CTRL_HIB_PULSE 0x800 +#define GPHY_CTRL_SEL_ANA_RST 0x1000 +#define GPHY_CTRL_PHY_PLL_ON 0x2000 +#define GPHY_CTRL_PWDOWN_HW 0x4000 +#define GPHY_CTRL_DEFAULT (\ + GPHY_CTRL_PHY_PLL_ON |\ + GPHY_CTRL_SEL_ANA_RST |\ + GPHY_CTRL_HIB_PULSE |\ + GPHY_CTRL_HIB_EN) + +#define GPHY_CTRL_PW_WOL_DIS (\ + GPHY_CTRL_PHY_PLL_ON |\ + GPHY_CTRL_SEL_ANA_RST |\ + GPHY_CTRL_HIB_PULSE |\ + GPHY_CTRL_HIB_EN |\ + GPHY_CTRL_PWDOWN_HW |\ + GPHY_CTRL_PCLK_SEL_DIS |\ + GPHY_CTRL_PHY_IDDQ) + +/* IRQ Anti-Lost Timer Initial Value Register */ +#define REG_CMBDISDMA_TIMER 0x140E + + +/* Block IDLE Status Register */ +#define REG_IDLE_STATUS 0x1410 +#define IDLE_STATUS_RXMAC 1 /* 1: RXMAC state machine is in non-IDLE state. 0: RXMAC is idling */ +#define IDLE_STATUS_TXMAC 2 /* 1: TXMAC state machine is in non-IDLE state. 0: TXMAC is idling */ +#define IDLE_STATUS_RXQ 4 /* 1: RXQ state machine is in non-IDLE state. 0: RXQ is idling */ +#define IDLE_STATUS_TXQ 8 /* 1: TXQ state machine is in non-IDLE state. 0: TXQ is idling */ +#define IDLE_STATUS_DMAR 0x10 /* 1: DMAR state machine is in non-IDLE state. 0: DMAR is idling */ +#define IDLE_STATUS_DMAW 0x20 /* 1: DMAW state machine is in non-IDLE state. 0: DMAW is idling */ +#define IDLE_STATUS_SMB 0x40 /* 1: SMB state machine is in non-IDLE state. 0: SMB is idling */ +#define IDLE_STATUS_CMB 0x80 /* 1: CMB state machine is in non-IDLE state. 0: CMB is idling */ + +/* MDIO Control Register */ +#define REG_MDIO_CTRL 0x1414 +#define MDIO_DATA_MASK 0xffff /* On MDIO write, the 16-bit control data to write to PHY MII management register */ +#define MDIO_DATA_SHIFT 0 /* On MDIO read, the 16-bit status data that was read from the PHY MII management register*/ +#define MDIO_REG_ADDR_MASK 0x1f /* MDIO register address */ +#define MDIO_REG_ADDR_SHIFT 16 +#define MDIO_RW 0x200000 /* 1: read, 0: write */ +#define MDIO_SUP_PREAMBLE 0x400000 /* Suppress preamble */ +#define MDIO_START 0x800000 /* Write 1 to initiate the MDIO master. And this bit is self cleared after one cycle*/ +#define MDIO_CLK_SEL_SHIFT 24 +#define MDIO_CLK_25_4 0 +#define MDIO_CLK_25_6 2 +#define MDIO_CLK_25_8 3 +#define MDIO_CLK_25_10 4 +#define MDIO_CLK_25_14 5 +#define MDIO_CLK_25_20 6 +#define MDIO_CLK_25_28 7 +#define MDIO_BUSY 0x8000000 +#define MDIO_AP_EN 0x10000000 +#define MDIO_WAIT_TIMES 10 + +/* MII PHY Status Register */ +#define REG_PHY_STATUS 0x1418 +#define PHY_STATUS_100M 0x20000 +#define PHY_STATUS_EMI_CA 0x40000 + +/* BIST Control and Status Register0 (for the Packet Memory) */ +#define REG_BIST0_CTRL 0x141c +#define BIST0_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */ +/* BIST process and reset to zero when BIST is done */ +#define BIST0_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */ +/* decoder failure or more than 1 cell stuck-to-x failure */ +#define BIST0_FUSE_FLAG 0x4 /* 1: Indicating one cell has been fixed */ + +/* BIST Control and Status Register1(for the retry buffer of PCI Express) */ +#define REG_BIST1_CTRL 0x1420 +#define BIST1_NOW 0x1 /* 1: To trigger BIST0 logic. This bit stays high during the */ +/* BIST process and reset to zero when BIST is done */ +#define BIST1_SRAM_FAIL 0x2 /* 1: The SRAM failure is un-repairable because it has address */ +/* decoder failure or more than 1 cell stuck-to-x failure.*/ +#define BIST1_FUSE_FLAG 0x4 + +/* SerDes Lock Detect Control and Status Register */ +#define REG_SERDES_LOCK 0x1424 +#define SERDES_LOCK_DETECT 1 /* 1: SerDes lock detected . This signal comes from Analog SerDes */ +#define SERDES_LOCK_DETECT_EN 2 /* 1: Enable SerDes Lock detect function */ + +/* MAC Control Register */ +#define REG_MAC_CTRL 0x1480 +#define MAC_CTRL_TX_EN 1 /* 1: Transmit Enable */ +#define MAC_CTRL_RX_EN 2 /* 1: Receive Enable */ +#define MAC_CTRL_TX_FLOW 4 /* 1: Transmit Flow Control Enable */ +#define MAC_CTRL_RX_FLOW 8 /* 1: Receive Flow Control Enable */ +#define MAC_CTRL_LOOPBACK 0x10 /* 1: Loop back at G/MII Interface */ +#define MAC_CTRL_DUPLX 0x20 /* 1: Full-duplex mode 0: Half-duplex mode */ +#define MAC_CTRL_ADD_CRC 0x40 /* 1: Instruct MAC to attach CRC on all egress Ethernet frames */ +#define MAC_CTRL_PAD 0x80 /* 1: Instruct MAC to pad short frames to 60-bytes, and then attach CRC. This bit has higher priority over CRC_EN */ +#define MAC_CTRL_LENCHK 0x100 /* 1: Instruct MAC to check if length field matches the real packet length */ +#define MAC_CTRL_HUGE_EN 0x200 /* 1: receive Jumbo frame enable */ +#define MAC_CTRL_PRMLEN_SHIFT 10 /* Preamble length */ +#define MAC_CTRL_PRMLEN_MASK 0xf +#define MAC_CTRL_RMV_VLAN 0x4000 /* 1: to remove VLAN Tag automatically from all receive packets */ +#define MAC_CTRL_PROMIS_EN 0x8000 /* 1: Promiscuous Mode Enable */ +#define MAC_CTRL_TX_PAUSE 0x10000 /* 1: transmit test pause */ +#define MAC_CTRL_SCNT 0x20000 /* 1: shortcut slot time counter */ +#define MAC_CTRL_SRST_TX 0x40000 /* 1: synchronized reset Transmit MAC module */ +#define MAC_CTRL_TX_SIMURST 0x80000 /* 1: transmit simulation reset */ +#define MAC_CTRL_SPEED_SHIFT 20 /* 10: gigabit 01:10M/100M */ +#define MAC_CTRL_SPEED_MASK 0x300000 +#define MAC_CTRL_SPEED_1000 2 +#define MAC_CTRL_SPEED_10_100 1 +#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000 /* 1: transmit maximum backoff (half-duplex test bit) */ +#define MAC_CTRL_TX_HUGE 0x800000 /* 1: transmit huge enable */ +#define MAC_CTRL_RX_CHKSUM_EN 0x1000000 /* 1: RX checksum enable */ +#define MAC_CTRL_MC_ALL_EN 0x2000000 /* 1: upload all multicast frame without error to system */ +#define MAC_CTRL_BC_EN 0x4000000 /* 1: upload all broadcast frame without error to system */ +#define MAC_CTRL_DBG 0x8000000 /* 1: upload all received frame to system (Debug Mode) */ + +/* MAC IPG/IFG Control Register */ +#define REG_MAC_IPG_IFG 0x1484 +#define MAC_IPG_IFG_IPGT_SHIFT 0 /* Desired back to back inter-packet gap. The default is 96-bit time */ +#define MAC_IPG_IFG_IPGT_MASK 0x7f +#define MAC_IPG_IFG_MIFG_SHIFT 8 /* Minimum number of IFG to enforce in between RX frames */ +#define MAC_IPG_IFG_MIFG_MASK 0xff /* Frame gap below such IFP is dropped */ +#define MAC_IPG_IFG_IPGR1_SHIFT 16 /* 64bit Carrier-Sense window */ +#define MAC_IPG_IFG_IPGR1_MASK 0x7f +#define MAC_IPG_IFG_IPGR2_SHIFT 24 /* 96-bit IPG window */ +#define MAC_IPG_IFG_IPGR2_MASK 0x7f + +/* MAC STATION ADDRESS */ +#define REG_MAC_STA_ADDR 0x1488 + +/* Hash table for multicast address */ +#define REG_RX_HASH_TABLE 0x1490 + + +/* MAC Half-Duplex Control Register */ +#define REG_MAC_HALF_DUPLX_CTRL 0x1498 +#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0 /* Collision Window */ +#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff +#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12 /* Retransmission maximum, afterwards the packet will be discarded */ +#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000 /* 1: Allow the transmission of a packet which has been excessively deferred */ +#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000 /* 1: No back-off on collision, immediately start the retransmission */ +#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000 /* 1: No back-off on backpressure, immediately start the transmission after back pressure */ +#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000 /* 1: Alternative Binary Exponential Back-off Enabled */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20 /* Maximum binary exponential number */ +#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf +#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24 /* IPG to start JAM for collision based flow control in half-duplex */ +#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf /* mode. In unit of 8-bit time */ + +/* Maximum Frame Length Control Register */ +#define REG_MTU 0x149c + +/* Wake-On-Lan control register */ +#define REG_WOL_CTRL 0x14a0 +#define WOL_PATTERN_EN 0x00000001 +#define WOL_PATTERN_PME_EN 0x00000002 +#define WOL_MAGIC_EN 0x00000004 +#define WOL_MAGIC_PME_EN 0x00000008 +#define WOL_LINK_CHG_EN 0x00000010 +#define WOL_LINK_CHG_PME_EN 0x00000020 +#define WOL_PATTERN_ST 0x00000100 +#define WOL_MAGIC_ST 0x00000200 +#define WOL_LINKCHG_ST 0x00000400 +#define WOL_CLK_SWITCH_EN 0x00008000 +#define WOL_PT0_EN 0x00010000 +#define WOL_PT1_EN 0x00020000 +#define WOL_PT2_EN 0x00040000 +#define WOL_PT3_EN 0x00080000 +#define WOL_PT4_EN 0x00100000 +#define WOL_PT5_EN 0x00200000 +#define WOL_PT6_EN 0x00400000 +/* WOL Length ( 2 DWORD ) */ +#define REG_WOL_PATTERN_LEN 0x14a4 +#define WOL_PT_LEN_MASK 0x7f +#define WOL_PT0_LEN_SHIFT 0 +#define WOL_PT1_LEN_SHIFT 8 +#define WOL_PT2_LEN_SHIFT 16 +#define WOL_PT3_LEN_SHIFT 24 +#define WOL_PT4_LEN_SHIFT 0 +#define WOL_PT5_LEN_SHIFT 8 +#define WOL_PT6_LEN_SHIFT 16 + +/* Internal SRAM Partition Register */ +#define REG_SRAM_TRD_ADDR 0x1518 +#define REG_SRAM_TRD_LEN 0x151C +#define REG_SRAM_RXF_ADDR 0x1520 +#define REG_SRAM_RXF_LEN 0x1524 +#define REG_SRAM_TXF_ADDR 0x1528 +#define REG_SRAM_TXF_LEN 0x152C +#define REG_SRAM_TCPH_ADDR 0x1530 +#define REG_SRAM_PKTH_ADDR 0x1532 + +/* Load Ptr Register */ +#define REG_LOAD_PTR 0x1534 /* Software sets this bit after the initialization of the head and tail */ + +/* + * addresses of all descriptors, as well as the following descriptor + * control register, which triggers each function block to load the head + * pointer to prepare for the operation. This bit is then self-cleared + * after one cycle. + */ + +/* Descriptor Control register */ +#define REG_RXF3_BASE_ADDR_HI 0x153C +#define REG_DESC_BASE_ADDR_HI 0x1540 +#define REG_RXF0_BASE_ADDR_HI 0x1540 /* share with DESC BASE ADDR HI */ +#define REG_HOST_RXF0_PAGE0_LO 0x1544 +#define REG_HOST_RXF0_PAGE1_LO 0x1548 +#define REG_TPD_BASE_ADDR_LO 0x154C +#define REG_RXF1_BASE_ADDR_HI 0x1550 +#define REG_RXF2_BASE_ADDR_HI 0x1554 +#define REG_HOST_RXFPAGE_SIZE 0x1558 +#define REG_TPD_RING_SIZE 0x155C +/* RSS about */ +#define REG_RSS_KEY0 0x14B0 +#define REG_RSS_KEY1 0x14B4 +#define REG_RSS_KEY2 0x14B8 +#define REG_RSS_KEY3 0x14BC +#define REG_RSS_KEY4 0x14C0 +#define REG_RSS_KEY5 0x14C4 +#define REG_RSS_KEY6 0x14C8 +#define REG_RSS_KEY7 0x14CC +#define REG_RSS_KEY8 0x14D0 +#define REG_RSS_KEY9 0x14D4 +#define REG_IDT_TABLE4 0x14E0 +#define REG_IDT_TABLE5 0x14E4 +#define REG_IDT_TABLE6 0x14E8 +#define REG_IDT_TABLE7 0x14EC +#define REG_IDT_TABLE0 0x1560 +#define REG_IDT_TABLE1 0x1564 +#define REG_IDT_TABLE2 0x1568 +#define REG_IDT_TABLE3 0x156C +#define REG_IDT_TABLE REG_IDT_TABLE0 +#define REG_RSS_HASH_VALUE 0x1570 +#define REG_RSS_HASH_FLAG 0x1574 +#define REG_BASE_CPU_NUMBER 0x157C + + +/* TXQ Control Register */ +#define REG_TXQ_CTRL 0x1580 +#define TXQ_CTRL_NUM_TPD_BURST_MASK 0xF +#define TXQ_CTRL_NUM_TPD_BURST_SHIFT 0 +#define TXQ_CTRL_EN 0x20 /* 1: Enable TXQ */ +#define TXQ_CTRL_ENH_MODE 0x40 /* Performance enhancement mode, in which up to two back-to-back DMA read commands might be dispatched. */ +#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16 /* Number of data byte to read in a cache-aligned burst. Each SRAM entry is 8-byte in length. */ +#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff + +/* Jumbo packet Threshold for task offload */ +#define REG_TX_EARLY_TH 0x1584 /* Jumbo frame threshold in QWORD unit. Packet greater than */ +/* JUMBO_TASK_OFFLOAD_THRESHOLD will not be task offloaded. */ +#define TX_TX_EARLY_TH_MASK 0x7ff +#define TX_TX_EARLY_TH_SHIFT 0 + + +/* RXQ Control Register */ +#define REG_RXQ_CTRL 0x15A0 +#define RXQ_CTRL_PBA_ALIGN_32 0 /* rx-packet alignment */ +#define RXQ_CTRL_PBA_ALIGN_64 1 +#define RXQ_CTRL_PBA_ALIGN_128 2 +#define RXQ_CTRL_PBA_ALIGN_256 3 +#define RXQ_CTRL_Q1_EN 0x10 +#define RXQ_CTRL_Q2_EN 0x20 +#define RXQ_CTRL_Q3_EN 0x40 +#define RXQ_CTRL_IPV6_XSUM_VERIFY_EN 0x80 +#define RXQ_CTRL_HASH_TLEN_SHIFT 8 +#define RXQ_CTRL_HASH_TLEN_MASK 0xFF +#define RXQ_CTRL_HASH_TYPE_IPV4 0x10000 +#define RXQ_CTRL_HASH_TYPE_IPV4_TCP 0x20000 +#define RXQ_CTRL_HASH_TYPE_IPV6 0x40000 +#define RXQ_CTRL_HASH_TYPE_IPV6_TCP 0x80000 +#define RXQ_CTRL_RSS_MODE_DISABLE 0 +#define RXQ_CTRL_RSS_MODE_SQSINT 0x4000000 +#define RXQ_CTRL_RSS_MODE_MQUESINT 0x8000000 +#define RXQ_CTRL_RSS_MODE_MQUEMINT 0xC000000 +#define RXQ_CTRL_NIP_QUEUE_SEL_TBL 0x10000000 +#define RXQ_CTRL_HASH_ENABLE 0x20000000 +#define RXQ_CTRL_CUT_THRU_EN 0x40000000 +#define RXQ_CTRL_EN 0x80000000 + +/* Rx jumbo packet threshold and rrd retirement timer */ +#define REG_RXQ_JMBOSZ_RRDTIM 0x15A4 +/* + * Jumbo packet threshold for non-VLAN packet, in QWORD (64-bit) unit. + * When the packet length greater than or equal to this value, RXQ + * shall start cut-through forwarding of the received packet. + */ +#define RXQ_JMBOSZ_TH_MASK 0x7ff +#define RXQ_JMBOSZ_TH_SHIFT 0 /* RRD retirement timer. Decrement by 1 after every 512ns passes*/ +#define RXQ_JMBO_LKAH_MASK 0xf +#define RXQ_JMBO_LKAH_SHIFT 11 + +/* RXF flow control register */ +#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8 +#define RXQ_RXF_PAUSE_TH_HI_SHIFT 0 +#define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff +#define RXQ_RXF_PAUSE_TH_LO_SHIFT 16 +#define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff + + +/* DMA Engine Control Register */ +#define REG_DMA_CTRL 0x15C0 +#define DMA_CTRL_DMAR_IN_ORDER 0x1 +#define DMA_CTRL_DMAR_ENH_ORDER 0x2 +#define DMA_CTRL_DMAR_OUT_ORDER 0x4 +#define DMA_CTRL_RCB_VALUE 0x8 +#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4 +#define DMA_CTRL_DMAR_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7 +#define DMA_CTRL_DMAW_BURST_LEN_MASK 7 +#define DMA_CTRL_DMAR_REQ_PRI 0x400 +#define DMA_CTRL_DMAR_DLY_CNT_MASK 0x1F +#define DMA_CTRL_DMAR_DLY_CNT_SHIFT 11 +#define DMA_CTRL_DMAW_DLY_CNT_MASK 0xF +#define DMA_CTRL_DMAW_DLY_CNT_SHIFT 16 +#define DMA_CTRL_TXCMB_EN 0x100000 +#define DMA_CTRL_RXCMB_EN 0x200000 + + +/* CMB/SMB Control Register */ +#define REG_SMB_STAT_TIMER 0x15C4 +#define REG_TRIG_RRD_THRESH 0x15CA +#define REG_TRIG_TPD_THRESH 0x15C8 +#define REG_TRIG_TXTIMER 0x15CC +#define REG_TRIG_RXTIMER 0x15CE + +/* HOST RXF Page 1,2,3 address */ +#define REG_HOST_RXF1_PAGE0_LO 0x15D0 +#define REG_HOST_RXF1_PAGE1_LO 0x15D4 +#define REG_HOST_RXF2_PAGE0_LO 0x15D8 +#define REG_HOST_RXF2_PAGE1_LO 0x15DC +#define REG_HOST_RXF3_PAGE0_LO 0x15E0 +#define REG_HOST_RXF3_PAGE1_LO 0x15E4 + +/* Mail box */ +#define REG_MB_RXF1_RADDR 0x15B4 +#define REG_MB_RXF2_RADDR 0x15B8 +#define REG_MB_RXF3_RADDR 0x15BC +#define REG_MB_TPD_PROD_IDX 0x15F0 + +/* RXF-Page 0-3 PageNo & Valid bit */ +#define REG_HOST_RXF0_PAGE0_VLD 0x15F4 +#define HOST_RXF_VALID 1 +#define HOST_RXF_PAGENO_SHIFT 1 +#define HOST_RXF_PAGENO_MASK 0x7F +#define REG_HOST_RXF0_PAGE1_VLD 0x15F5 +#define REG_HOST_RXF1_PAGE0_VLD 0x15F6 +#define REG_HOST_RXF1_PAGE1_VLD 0x15F7 +#define REG_HOST_RXF2_PAGE0_VLD 0x15F8 +#define REG_HOST_RXF2_PAGE1_VLD 0x15F9 +#define REG_HOST_RXF3_PAGE0_VLD 0x15FA +#define REG_HOST_RXF3_PAGE1_VLD 0x15FB + +/* Interrupt Status Register */ +#define REG_ISR 0x1600 +#define ISR_SMB 1 +#define ISR_TIMER 2 /* Interrupt when Timer is counted down to zero */ +/* + * Software manual interrupt, for debug. Set when SW_MAN_INT_EN is set + * in Table 51 Selene Master Control Register (Offset 0x1400). + */ +#define ISR_MANUAL 4 +#define ISR_HW_RXF_OV 8 /* RXF overflow interrupt */ +#define ISR_HOST_RXF0_OV 0x10 +#define ISR_HOST_RXF1_OV 0x20 +#define ISR_HOST_RXF2_OV 0x40 +#define ISR_HOST_RXF3_OV 0x80 +#define ISR_TXF_UN 0x100 +#define ISR_RX0_PAGE_FULL 0x200 +#define ISR_DMAR_TO_RST 0x400 +#define ISR_DMAW_TO_RST 0x800 +#define ISR_GPHY 0x1000 +#define ISR_TX_CREDIT 0x2000 +#define ISR_GPHY_LPW 0x4000 /* GPHY low power state interrupt */ +#define ISR_RX_PKT 0x10000 /* One packet received, triggered by RFD */ +#define ISR_TX_PKT 0x20000 /* One packet transmitted, triggered by TPD */ +#define ISR_TX_DMA 0x40000 +#define ISR_RX_PKT_1 0x80000 +#define ISR_RX_PKT_2 0x100000 +#define ISR_RX_PKT_3 0x200000 +#define ISR_MAC_RX 0x400000 +#define ISR_MAC_TX 0x800000 +#define ISR_UR_DETECTED 0x1000000 +#define ISR_FERR_DETECTED 0x2000000 +#define ISR_NFERR_DETECTED 0x4000000 +#define ISR_CERR_DETECTED 0x8000000 +#define ISR_PHY_LINKDOWN 0x10000000 +#define ISR_DIS_INT 0x80000000 + + +/* Interrupt Mask Register */ +#define REG_IMR 0x1604 + + +#define IMR_NORMAL_MASK (\ + ISR_SMB |\ + ISR_TXF_UN |\ + ISR_HW_RXF_OV |\ + ISR_HOST_RXF0_OV|\ + ISR_MANUAL |\ + ISR_GPHY |\ + ISR_GPHY_LPW |\ + ISR_DMAR_TO_RST |\ + ISR_DMAW_TO_RST |\ + ISR_PHY_LINKDOWN|\ + ISR_RX_PKT |\ + ISR_TX_PKT) + +#define ISR_TX_EVENT (ISR_TXF_UN | ISR_TX_PKT) +#define ISR_RX_EVENT (ISR_HOST_RXF0_OV | ISR_HW_RXF_OV | ISR_RX_PKT) + +#define REG_MAC_RX_STATUS_BIN 0x1700 +#define REG_MAC_RX_STATUS_END 0x175c +#define REG_MAC_TX_STATUS_BIN 0x1760 +#define REG_MAC_TX_STATUS_END 0x17c0 + +/* Hardware Offset Register */ +#define REG_HOST_RXF0_PAGEOFF 0x1800 +#define REG_TPD_CONS_IDX 0x1804 +#define REG_HOST_RXF1_PAGEOFF 0x1808 +#define REG_HOST_RXF2_PAGEOFF 0x180C +#define REG_HOST_RXF3_PAGEOFF 0x1810 + +/* RXF-Page 0-3 Offset DMA Address */ +#define REG_HOST_RXF0_MB0_LO 0x1820 +#define REG_HOST_RXF0_MB1_LO 0x1824 +#define REG_HOST_RXF1_MB0_LO 0x1828 +#define REG_HOST_RXF1_MB1_LO 0x182C +#define REG_HOST_RXF2_MB0_LO 0x1830 +#define REG_HOST_RXF2_MB1_LO 0x1834 +#define REG_HOST_RXF3_MB0_LO 0x1838 +#define REG_HOST_RXF3_MB1_LO 0x183C + +/* Tpd CMB DMA Address */ +#define REG_HOST_TX_CMB_LO 0x1840 +#define REG_HOST_SMB_ADDR_LO 0x1844 + +/* DEBUG ADDR */ +#define REG_DEBUG_DATA0 0x1900 +#define REG_DEBUG_DATA1 0x1904 + +/***************************** MII definition ***************************************/ +/* PHY Common Register */ +#define MII_AT001_PSCR 0x10 +#define MII_AT001_PSSR 0x11 +#define MII_INT_CTRL 0x12 +#define MII_INT_STATUS 0x13 +#define MII_SMARTSPEED 0x14 +#define MII_LBRERROR 0x18 +#define MII_RESV2 0x1a + +#define MII_DBG_ADDR 0x1D +#define MII_DBG_DATA 0x1E + +/* Autoneg Advertisement Register */ +#define MII_AR_DEFAULT_CAP_MASK 0 + +/* 1000BASE-T Control Register */ +#define MII_AT001_CR_1000T_SPEED_MASK \ + (ADVERTISE_1000FULL | ADVERTISE_1000HALF) +#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK MII_AT001_CR_1000T_SPEED_MASK + +/* AT001 PHY Specific Control Register */ +#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define MII_AT001_PSCR_MAC_POWERDOWN 0x0008 +#define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ +/* Manual MDI configuration */ +#define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080 +/* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100 +/* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ +#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5 +#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 +/* AT001 PHY Specific Status Register */ +#define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#endif /*_ATHL1E_HW_H_*/ diff --git a/addons/atl1e/src/4.4.180/atl1e_main.c b/addons/atl1e/src/4.4.180/atl1e_main.c new file mode 100644 index 00000000..59a03a19 --- /dev/null +++ b/addons/atl1e/src/4.4.180/atl1e_main.c @@ -0,0 +1,2578 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include "atl1e.h" + +#define DRV_VERSION "1.0.0.7-NAPI" + +char atl1e_driver_name[] = "ATL1E"; +char atl1e_driver_version[] = DRV_VERSION; +#define PCI_DEVICE_ID_ATTANSIC_L1E 0x1026 +/* + * atl1e_pci_tbl - PCI Device ID Table + * + * Wildcard entries (PCI_ANY_ID) should come last + * Last entry must be all 0s + * + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +static const struct pci_device_id atl1e_pci_tbl[] = { + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)}, + {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)}, + /* required last entry */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, atl1e_pci_tbl); + +MODULE_AUTHOR("Atheros Corporation, , Jie Yang "); +MODULE_DESCRIPTION("Atheros 1000M Ethernet Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter); + +static const u16 +atl1e_rx_page_vld_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = +{ + {REG_HOST_RXF0_PAGE0_VLD, REG_HOST_RXF0_PAGE1_VLD}, + {REG_HOST_RXF1_PAGE0_VLD, REG_HOST_RXF1_PAGE1_VLD}, + {REG_HOST_RXF2_PAGE0_VLD, REG_HOST_RXF2_PAGE1_VLD}, + {REG_HOST_RXF3_PAGE0_VLD, REG_HOST_RXF3_PAGE1_VLD} +}; + +static const u16 atl1e_rx_page_hi_addr_regs[AT_MAX_RECEIVE_QUEUE] = +{ + REG_RXF0_BASE_ADDR_HI, + REG_RXF1_BASE_ADDR_HI, + REG_RXF2_BASE_ADDR_HI, + REG_RXF3_BASE_ADDR_HI +}; + +static const u16 +atl1e_rx_page_lo_addr_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = +{ + {REG_HOST_RXF0_PAGE0_LO, REG_HOST_RXF0_PAGE1_LO}, + {REG_HOST_RXF1_PAGE0_LO, REG_HOST_RXF1_PAGE1_LO}, + {REG_HOST_RXF2_PAGE0_LO, REG_HOST_RXF2_PAGE1_LO}, + {REG_HOST_RXF3_PAGE0_LO, REG_HOST_RXF3_PAGE1_LO} +}; + +static const u16 +atl1e_rx_page_write_offset_regs[AT_MAX_RECEIVE_QUEUE][AT_PAGE_NUM_PER_QUEUE] = +{ + {REG_HOST_RXF0_MB0_LO, REG_HOST_RXF0_MB1_LO}, + {REG_HOST_RXF1_MB0_LO, REG_HOST_RXF1_MB1_LO}, + {REG_HOST_RXF2_MB0_LO, REG_HOST_RXF2_MB1_LO}, + {REG_HOST_RXF3_MB0_LO, REG_HOST_RXF3_MB1_LO} +}; + +static const u16 atl1e_pay_load_size[] = { + 128, 256, 512, 1024, 2048, 4096, +}; + +/** + * atl1e_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + */ +static inline void atl1e_irq_enable(struct atl1e_adapter *adapter) +{ + if (likely(atomic_dec_and_test(&adapter->irq_sem))) { + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + AT_WRITE_REG(&adapter->hw, REG_IMR, IMR_NORMAL_MASK); + AT_WRITE_FLUSH(&adapter->hw); + } +} + +/** + * atl1e_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + */ +static inline void atl1e_irq_disable(struct atl1e_adapter *adapter) +{ + atomic_inc(&adapter->irq_sem); + AT_WRITE_REG(&adapter->hw, REG_IMR, 0); + AT_WRITE_FLUSH(&adapter->hw); + synchronize_irq(adapter->pdev->irq); +} + +/** + * atl1e_irq_reset - reset interrupt confiure on the NIC + * @adapter: board private structure + */ +static inline void atl1e_irq_reset(struct atl1e_adapter *adapter) +{ + atomic_set(&adapter->irq_sem, 0); + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + AT_WRITE_REG(&adapter->hw, REG_IMR, 0); + AT_WRITE_FLUSH(&adapter->hw); +} + +/** + * atl1e_phy_config - Timer Call-back + * @data: pointer to netdev cast into an unsigned long + */ +static void atl1e_phy_config(unsigned long data) +{ + struct atl1e_adapter *adapter = (struct atl1e_adapter *) data; + struct atl1e_hw *hw = &adapter->hw; + unsigned long flags; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + atl1e_restart_autoneg(hw); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); +} + +void atl1e_reinit_locked(struct atl1e_adapter *adapter) +{ + + WARN_ON(in_interrupt()); + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + atl1e_down(adapter); + atl1e_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); +} + +static void atl1e_reset_task(struct work_struct *work) +{ + struct atl1e_adapter *adapter; + adapter = container_of(work, struct atl1e_adapter, reset_task); + + atl1e_reinit_locked(adapter); +} + +static int atl1e_check_link(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + int err = 0; + u16 speed, duplex, phy_data; + + /* MII_BMSR must read twice */ + atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); + atl1e_read_phy_reg(hw, MII_BMSR, &phy_data); + if ((phy_data & BMSR_LSTATUS) == 0) { + /* link down */ + if (netif_carrier_ok(netdev)) { /* old link state: Up */ + u32 value; + /* disable rx */ + value = AT_READ_REG(hw, REG_MAC_CTRL); + value &= ~MAC_CTRL_RX_EN; + AT_WRITE_REG(hw, REG_MAC_CTRL, value); + adapter->link_speed = SPEED_0; + netif_carrier_off(netdev); + netif_stop_queue(netdev); + } + } else { + /* Link Up */ + err = atl1e_get_speed_and_duplex(hw, &speed, &duplex); + if (unlikely(err)) + return err; + + /* link result is our setting */ + if (adapter->link_speed != speed || + adapter->link_duplex != duplex) { + adapter->link_speed = speed; + adapter->link_duplex = duplex; + atl1e_setup_mac_ctrl(adapter); + netdev_info(netdev, + "NIC Link is Up <%d Mbps %s Duplex>\n", + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full" : "Half"); + } + + if (!netif_carrier_ok(netdev)) { + /* Link down -> Up */ + netif_carrier_on(netdev); + netif_wake_queue(netdev); + } + } + return 0; +} + +/** + * atl1e_link_chg_task - deal with link change event Out of interrupt context + * @netdev: network interface device structure + */ +static void atl1e_link_chg_task(struct work_struct *work) +{ + struct atl1e_adapter *adapter; + unsigned long flags; + + adapter = container_of(work, struct atl1e_adapter, link_chg_task); + spin_lock_irqsave(&adapter->mdio_lock, flags); + atl1e_check_link(adapter); + spin_unlock_irqrestore(&adapter->mdio_lock, flags); +} + +static void atl1e_link_chg_event(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 phy_data = 0; + u16 link_up = 0; + + spin_lock(&adapter->mdio_lock); + atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + atl1e_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data); + spin_unlock(&adapter->mdio_lock); + link_up = phy_data & BMSR_LSTATUS; + /* notify upper layer link down ASAP */ + if (!link_up) { + if (netif_carrier_ok(netdev)) { + /* old link state: Up */ + netdev_info(netdev, "NIC Link is Down\n"); + adapter->link_speed = SPEED_0; + netif_stop_queue(netdev); + } + } + schedule_work(&adapter->link_chg_task); +} + +static void atl1e_del_timer(struct atl1e_adapter *adapter) +{ + del_timer_sync(&adapter->phy_config_timer); +} + +static void atl1e_cancel_work(struct atl1e_adapter *adapter) +{ + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->link_chg_task); +} + +/** + * atl1e_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + */ +static void atl1e_tx_timeout(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); +} + +/** + * atl1e_set_multi - Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_multi entry point is called whenever the multicast address + * list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper multicast, + * promiscuous mode, and all-multi behavior. + */ +static void atl1e_set_multi(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u32 mac_ctrl_data = 0; + u32 hash_value; + + /* Check for Promiscuous and All Multicast modes */ + mac_ctrl_data = AT_READ_REG(hw, REG_MAC_CTRL); + + if (netdev->flags & IFF_PROMISC) { + mac_ctrl_data |= MAC_CTRL_PROMIS_EN; + } else if (netdev->flags & IFF_ALLMULTI) { + mac_ctrl_data |= MAC_CTRL_MC_ALL_EN; + mac_ctrl_data &= ~MAC_CTRL_PROMIS_EN; + } else { + mac_ctrl_data &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN); + } + + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + + /* clear the old settings from the multicast hash table */ + AT_WRITE_REG(hw, REG_RX_HASH_TABLE, 0); + AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); + + /* comoute mc addresses' hash value ,and put it into hash table */ + netdev_for_each_mc_addr(ha, netdev) { + hash_value = atl1e_hash_mc_addr(hw, ha->addr); + atl1e_hash_set(hw, hash_value); + } +} + +static void __atl1e_rx_mode(netdev_features_t features, u32 *mac_ctrl_data) +{ + + if (features & NETIF_F_RXALL) { + /* enable RX of ALL frames */ + *mac_ctrl_data |= MAC_CTRL_DBG; + } else { + /* disable RX of ALL frames */ + *mac_ctrl_data &= ~MAC_CTRL_DBG; + } +} + +static void atl1e_rx_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + u32 mac_ctrl_data = 0; + + netdev_dbg(adapter->netdev, "%s\n", __func__); + + atl1e_irq_disable(adapter); + mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL); + __atl1e_rx_mode(features, &mac_ctrl_data); + AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); + atl1e_irq_enable(adapter); +} + + +static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data) +{ + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + *mac_ctrl_data |= MAC_CTRL_RMV_VLAN; + } else { + /* disable VLAN tag insert/strip */ + *mac_ctrl_data &= ~MAC_CTRL_RMV_VLAN; + } +} + +static void atl1e_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + u32 mac_ctrl_data = 0; + + netdev_dbg(adapter->netdev, "%s\n", __func__); + + atl1e_irq_disable(adapter); + mac_ctrl_data = AT_READ_REG(&adapter->hw, REG_MAC_CTRL); + __atl1e_vlan_mode(features, &mac_ctrl_data); + AT_WRITE_REG(&adapter->hw, REG_MAC_CTRL, mac_ctrl_data); + atl1e_irq_enable(adapter); +} + +static void atl1e_restore_vlan(struct atl1e_adapter *adapter) +{ + netdev_dbg(adapter->netdev, "%s\n", __func__); + atl1e_vlan_mode(adapter->netdev, adapter->netdev->features); +} + +/** + * atl1e_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + */ +static int atl1e_set_mac_addr(struct net_device *netdev, void *p) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len); + + atl1e_hw_set_mac_addr(&adapter->hw); + + return 0; +} + +static netdev_features_t atl1e_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* + * Since there is no support for separate rx/tx vlan accel + * enable/disable make sure tx flag is always in same state as rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int atl1e_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = netdev->features ^ features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + atl1e_vlan_mode(netdev, features); + + if (changed & NETIF_F_RXALL) + atl1e_rx_mode(netdev, features); + + + return 0; +} + +/** + * atl1e_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + */ +static int atl1e_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + int old_mtu = netdev->mtu; + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; + + if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + netdev_warn(adapter->netdev, "invalid MTU setting\n"); + return -EINVAL; + } + /* set MTU */ + if (old_mtu != new_mtu && netif_running(netdev)) { + while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) + msleep(1); + netdev->mtu = new_mtu; + adapter->hw.max_frame_size = new_mtu; + adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3; + atl1e_down(adapter); + atl1e_up(adapter); + clear_bit(__AT_RESETTING, &adapter->flags); + } + return 0; +} + +/* + * caller should hold mdio_lock + */ +static int atl1e_mdio_read(struct net_device *netdev, int phy_id, int reg_num) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + u16 result; + + atl1e_read_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, &result); + return result; +} + +static void atl1e_mdio_write(struct net_device *netdev, int phy_id, + int reg_num, int val) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + atl1e_write_phy_reg(&adapter->hw, reg_num & MDIO_REG_ADDR_MASK, val); +} + +static int atl1e_mii_ioctl(struct net_device *netdev, + struct ifreq *ifr, int cmd) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + unsigned long flags; + int retval = 0; + + if (!netif_running(netdev)) + return -EINVAL; + + spin_lock_irqsave(&adapter->mdio_lock, flags); + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 0; + break; + + case SIOCGMIIREG: + if (atl1e_read_phy_reg(&adapter->hw, data->reg_num & 0x1F, + &data->val_out)) { + retval = -EIO; + goto out; + } + break; + + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) { + retval = -EFAULT; + goto out; + } + + netdev_dbg(adapter->netdev, " write %x %x\n", + data->reg_num, data->val_in); + if (atl1e_write_phy_reg(&adapter->hw, + data->reg_num, data->val_in)) { + retval = -EIO; + goto out; + } + break; + + default: + retval = -EOPNOTSUPP; + break; + } +out: + spin_unlock_irqrestore(&adapter->mdio_lock, flags); + return retval; + +} + +static int atl1e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return atl1e_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +static void atl1e_setup_pcicmd(struct pci_dev *pdev) +{ + u16 cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + cmd &= ~(PCI_COMMAND_INTX_DISABLE | PCI_COMMAND_IO); + cmd |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_write_config_word(pdev, PCI_COMMAND, cmd); + + /* + * some motherboards BIOS(PXE/EFI) driver may set PME + * while they transfer control to OS (Windows/Linux) + * so we should clear this bit before NIC work normally + */ + pci_write_config_dword(pdev, REG_PM_CTRLSTAT, 0); + msleep(1); +} + +/** + * atl1e_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + */ +static int atl1e_alloc_queues(struct atl1e_adapter *adapter) +{ + return 0; +} + +/** + * atl1e_sw_init - Initialize general software structures (struct atl1e_adapter) + * @adapter: board private structure to initialize + * + * atl1e_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + */ +static int atl1e_sw_init(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + u32 phy_status_data = 0; + + adapter->wol = 0; + adapter->link_speed = SPEED_0; /* hardware init */ + adapter->link_duplex = FULL_DUPLEX; + adapter->num_rx_queues = 1; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); + /* nic type */ + if (hw->revision_id >= 0xF0) { + hw->nic_type = athr_l2e_revB; + } else { + if (phy_status_data & PHY_STATUS_100M) + hw->nic_type = athr_l1e; + else + hw->nic_type = athr_l2e_revA; + } + + phy_status_data = AT_READ_REG(hw, REG_PHY_STATUS); + + if (phy_status_data & PHY_STATUS_EMI_CA) + hw->emi_ca = true; + else + hw->emi_ca = false; + + hw->phy_configured = false; + hw->preamble_len = 7; + hw->max_frame_size = adapter->netdev->mtu; + hw->rx_jumbo_th = (hw->max_frame_size + ETH_HLEN + + VLAN_HLEN + ETH_FCS_LEN + 7) >> 3; + + hw->rrs_type = atl1e_rrs_disable; + hw->indirect_tab = 0; + hw->base_cpu = 0; + + /* need confirm */ + + hw->ict = 50000; /* 100ms */ + hw->smb_timer = 200000; /* 200ms */ + hw->tpd_burst = 5; + hw->rrd_thresh = 1; + hw->tpd_thresh = adapter->tx_ring.count / 2; + hw->rx_count_down = 4; /* 2us resolution */ + hw->tx_count_down = hw->imt * 4 / 3; + hw->dmar_block = atl1e_dma_req_1024; + hw->dmaw_block = atl1e_dma_req_1024; + hw->dmar_dly_cnt = 15; + hw->dmaw_dly_cnt = 4; + + if (atl1e_alloc_queues(adapter)) { + netdev_err(adapter->netdev, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + atomic_set(&adapter->irq_sem, 1); + spin_lock_init(&adapter->mdio_lock); + spin_lock_init(&adapter->tx_lock); + + set_bit(__AT_DOWN, &adapter->flags); + + return 0; +} + +/** + * atl1e_clean_tx_ring - Free Tx-skb + * @adapter: board private structure + */ +static void atl1e_clean_tx_ring(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + struct atl1e_tx_buffer *tx_buffer = NULL; + struct pci_dev *pdev = adapter->pdev; + u16 index, ring_count; + + if (tx_ring->desc == NULL || tx_ring->tx_buffer == NULL) + return; + + ring_count = tx_ring->count; + /* first unmmap dma */ + for (index = 0; index < ring_count; index++) { + tx_buffer = &tx_ring->tx_buffer[index]; + if (tx_buffer->dma) { + if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) + pci_unmap_single(pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) + pci_unmap_page(pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + tx_buffer->dma = 0; + } + } + /* second free skb */ + for (index = 0; index < ring_count; index++) { + tx_buffer = &tx_ring->tx_buffer[index]; + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + tx_buffer->skb = NULL; + } + } + /* Zero out Tx-buffers */ + memset(tx_ring->desc, 0, sizeof(struct atl1e_tpd_desc) * + ring_count); + memset(tx_ring->tx_buffer, 0, sizeof(struct atl1e_tx_buffer) * + ring_count); +} + +/** + * atl1e_clean_rx_ring - Free rx-reservation skbs + * @adapter: board private structure + */ +static void atl1e_clean_rx_ring(struct atl1e_adapter *adapter) +{ + struct atl1e_rx_ring *rx_ring = + &adapter->rx_ring; + struct atl1e_rx_page_desc *rx_page_desc = rx_ring->rx_page_desc; + u16 i, j; + + + if (adapter->ring_vir_addr == NULL) + return; + /* Zero out the descriptor ring */ + for (i = 0; i < adapter->num_rx_queues; i++) { + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + if (rx_page_desc[i].rx_page[j].addr != NULL) { + memset(rx_page_desc[i].rx_page[j].addr, 0, + rx_ring->real_page_size); + } + } + } +} + +static void atl1e_cal_ring_size(struct atl1e_adapter *adapter, u32 *ring_size) +{ + *ring_size = ((u32)(adapter->tx_ring.count * + sizeof(struct atl1e_tpd_desc) + 7 + /* tx ring, qword align */ + + adapter->rx_ring.real_page_size * AT_PAGE_NUM_PER_QUEUE * + adapter->num_rx_queues + 31 + /* rx ring, 32 bytes align */ + + (1 + AT_PAGE_NUM_PER_QUEUE * adapter->num_rx_queues) * + sizeof(u32) + 3)); + /* tx, rx cmd, dword align */ +} + +static void atl1e_init_ring_resources(struct atl1e_adapter *adapter) +{ + struct atl1e_rx_ring *rx_ring = NULL; + + rx_ring = &adapter->rx_ring; + + rx_ring->real_page_size = adapter->rx_ring.page_size + + adapter->hw.max_frame_size + + ETH_HLEN + VLAN_HLEN + + ETH_FCS_LEN; + rx_ring->real_page_size = roundup(rx_ring->real_page_size, 32); + atl1e_cal_ring_size(adapter, &adapter->ring_size); + + adapter->ring_vir_addr = NULL; + adapter->rx_ring.desc = NULL; + rwlock_init(&adapter->tx_ring.tx_lock); +} + +/* + * Read / Write Ptr Initialize: + */ +static void atl1e_init_ring_ptrs(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = NULL; + struct atl1e_rx_ring *rx_ring = NULL; + struct atl1e_rx_page_desc *rx_page_desc = NULL; + int i, j; + + tx_ring = &adapter->tx_ring; + rx_ring = &adapter->rx_ring; + rx_page_desc = rx_ring->rx_page_desc; + + tx_ring->next_to_use = 0; + atomic_set(&tx_ring->next_to_clean, 0); + + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_page_desc[i].rx_using = 0; + rx_page_desc[i].rx_nxseq = 0; + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + *rx_page_desc[i].rx_page[j].write_offset_addr = 0; + rx_page_desc[i].rx_page[j].read_offset = 0; + } + } +} + +/** + * atl1e_free_ring_resources - Free Tx / RX descriptor Resources + * @adapter: board private structure + * + * Free all transmit software resources + */ +static void atl1e_free_ring_resources(struct atl1e_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + + atl1e_clean_tx_ring(adapter); + atl1e_clean_rx_ring(adapter); + + if (adapter->ring_vir_addr) { + pci_free_consistent(pdev, adapter->ring_size, + adapter->ring_vir_addr, adapter->ring_dma); + adapter->ring_vir_addr = NULL; + } + + if (adapter->tx_ring.tx_buffer) { + kfree(adapter->tx_ring.tx_buffer); + adapter->tx_ring.tx_buffer = NULL; + } +} + +/** + * atl1e_setup_mem_resources - allocate Tx / RX descriptor resources + * @adapter: board private structure + * + * Return 0 on success, negative on failure + */ +static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct atl1e_tx_ring *tx_ring; + struct atl1e_rx_ring *rx_ring; + struct atl1e_rx_page_desc *rx_page_desc; + int size, i, j; + u32 offset = 0; + int err = 0; + + if (adapter->ring_vir_addr != NULL) + return 0; /* alloced already */ + + tx_ring = &adapter->tx_ring; + rx_ring = &adapter->rx_ring; + + /* real ring DMA buffer */ + + size = adapter->ring_size; + adapter->ring_vir_addr = pci_zalloc_consistent(pdev, adapter->ring_size, + &adapter->ring_dma); + if (adapter->ring_vir_addr == NULL) { + netdev_err(adapter->netdev, + "pci_alloc_consistent failed, size = D%d\n", size); + return -ENOMEM; + } + + rx_page_desc = rx_ring->rx_page_desc; + + /* Init TPD Ring */ + tx_ring->dma = roundup(adapter->ring_dma, 8); + offset = tx_ring->dma - adapter->ring_dma; + tx_ring->desc = adapter->ring_vir_addr + offset; + size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count); + tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL); + if (tx_ring->tx_buffer == NULL) { + err = -ENOMEM; + goto failed; + } + + /* Init RXF-Pages */ + offset += (sizeof(struct atl1e_tpd_desc) * tx_ring->count); + offset = roundup(offset, 32); + + for (i = 0; i < adapter->num_rx_queues; i++) { + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + rx_page_desc[i].rx_page[j].dma = + adapter->ring_dma + offset; + rx_page_desc[i].rx_page[j].addr = + adapter->ring_vir_addr + offset; + offset += rx_ring->real_page_size; + } + } + + /* Init CMB dma address */ + tx_ring->cmb_dma = adapter->ring_dma + offset; + tx_ring->cmb = adapter->ring_vir_addr + offset; + offset += sizeof(u32); + + for (i = 0; i < adapter->num_rx_queues; i++) { + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + rx_page_desc[i].rx_page[j].write_offset_dma = + adapter->ring_dma + offset; + rx_page_desc[i].rx_page[j].write_offset_addr = + adapter->ring_vir_addr + offset; + offset += sizeof(u32); + } + } + + if (unlikely(offset > adapter->ring_size)) { + netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n", + offset, adapter->ring_size); + err = -1; + goto failed; + } + + return 0; +failed: + if (adapter->ring_vir_addr != NULL) { + pci_free_consistent(pdev, adapter->ring_size, + adapter->ring_vir_addr, adapter->ring_dma); + adapter->ring_vir_addr = NULL; + } + return err; +} + +static inline void atl1e_configure_des_ring(struct atl1e_adapter *adapter) +{ + + struct atl1e_hw *hw = &adapter->hw; + struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + struct atl1e_rx_page_desc *rx_page_desc = NULL; + int i, j; + + AT_WRITE_REG(hw, REG_DESC_BASE_ADDR_HI, + (u32)((adapter->ring_dma & AT_DMA_HI_ADDR_MASK) >> 32)); + AT_WRITE_REG(hw, REG_TPD_BASE_ADDR_LO, + (u32)((tx_ring->dma) & AT_DMA_LO_ADDR_MASK)); + AT_WRITE_REG(hw, REG_TPD_RING_SIZE, (u16)(tx_ring->count)); + AT_WRITE_REG(hw, REG_HOST_TX_CMB_LO, + (u32)((tx_ring->cmb_dma) & AT_DMA_LO_ADDR_MASK)); + + rx_page_desc = rx_ring->rx_page_desc; + /* RXF Page Physical address / Page Length */ + for (i = 0; i < AT_MAX_RECEIVE_QUEUE; i++) { + AT_WRITE_REG(hw, atl1e_rx_page_hi_addr_regs[i], + (u32)((adapter->ring_dma & + AT_DMA_HI_ADDR_MASK) >> 32)); + for (j = 0; j < AT_PAGE_NUM_PER_QUEUE; j++) { + u32 page_phy_addr; + u32 offset_phy_addr; + + page_phy_addr = rx_page_desc[i].rx_page[j].dma; + offset_phy_addr = + rx_page_desc[i].rx_page[j].write_offset_dma; + + AT_WRITE_REG(hw, atl1e_rx_page_lo_addr_regs[i][j], + page_phy_addr & AT_DMA_LO_ADDR_MASK); + AT_WRITE_REG(hw, atl1e_rx_page_write_offset_regs[i][j], + offset_phy_addr & AT_DMA_LO_ADDR_MASK); + AT_WRITE_REGB(hw, atl1e_rx_page_vld_regs[i][j], 1); + } + } + /* Page Length */ + AT_WRITE_REG(hw, REG_HOST_RXFPAGE_SIZE, rx_ring->page_size); + /* Load all of base address above */ + AT_WRITE_REG(hw, REG_LOAD_PTR, 1); +} + +static inline void atl1e_configure_tx(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + u32 dev_ctrl_data = 0; + u32 max_pay_load = 0; + u32 jumbo_thresh = 0; + u32 extra_size = 0; /* Jumbo frame threshold in QWORD unit */ + + /* configure TXQ param */ + if (hw->nic_type != athr_l2e_revB) { + extra_size = ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; + if (hw->max_frame_size <= 1500) { + jumbo_thresh = hw->max_frame_size + extra_size; + } else if (hw->max_frame_size < 6*1024) { + jumbo_thresh = + (hw->max_frame_size + extra_size) * 2 / 3; + } else { + jumbo_thresh = (hw->max_frame_size + extra_size) / 2; + } + AT_WRITE_REG(hw, REG_TX_EARLY_TH, (jumbo_thresh + 7) >> 3); + } + + dev_ctrl_data = AT_READ_REG(hw, REG_DEVICE_CTRL); + + max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_PAYLOAD_SHIFT)) & + DEVICE_CTRL_MAX_PAYLOAD_MASK; + + hw->dmaw_block = min_t(u32, max_pay_load, hw->dmaw_block); + + max_pay_load = ((dev_ctrl_data >> DEVICE_CTRL_MAX_RREQ_SZ_SHIFT)) & + DEVICE_CTRL_MAX_RREQ_SZ_MASK; + hw->dmar_block = min_t(u32, max_pay_load, hw->dmar_block); + + if (hw->nic_type != athr_l2e_revB) + AT_WRITE_REGW(hw, REG_TXQ_CTRL + 2, + atl1e_pay_load_size[hw->dmar_block]); + /* enable TXQ */ + AT_WRITE_REGW(hw, REG_TXQ_CTRL, + (((u16)hw->tpd_burst & TXQ_CTRL_NUM_TPD_BURST_MASK) + << TXQ_CTRL_NUM_TPD_BURST_SHIFT) + | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN); +} + +static inline void atl1e_configure_rx(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + u32 rxf_len = 0; + u32 rxf_low = 0; + u32 rxf_high = 0; + u32 rxf_thresh_data = 0; + u32 rxq_ctrl_data = 0; + + if (hw->nic_type != athr_l2e_revB) { + AT_WRITE_REGW(hw, REG_RXQ_JMBOSZ_RRDTIM, + (u16)((hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) << + RXQ_JMBOSZ_TH_SHIFT | + (1 & RXQ_JMBO_LKAH_MASK) << + RXQ_JMBO_LKAH_SHIFT)); + + rxf_len = AT_READ_REG(hw, REG_SRAM_RXF_LEN); + rxf_high = rxf_len * 4 / 5; + rxf_low = rxf_len / 5; + rxf_thresh_data = ((rxf_high & RXQ_RXF_PAUSE_TH_HI_MASK) + << RXQ_RXF_PAUSE_TH_HI_SHIFT) | + ((rxf_low & RXQ_RXF_PAUSE_TH_LO_MASK) + << RXQ_RXF_PAUSE_TH_LO_SHIFT); + + AT_WRITE_REG(hw, REG_RXQ_RXF_PAUSE_THRESH, rxf_thresh_data); + } + + /* RRS */ + AT_WRITE_REG(hw, REG_IDT_TABLE, hw->indirect_tab); + AT_WRITE_REG(hw, REG_BASE_CPU_NUMBER, hw->base_cpu); + + if (hw->rrs_type & atl1e_rrs_ipv4) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4; + + if (hw->rrs_type & atl1e_rrs_ipv4_tcp) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV4_TCP; + + if (hw->rrs_type & atl1e_rrs_ipv6) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6; + + if (hw->rrs_type & atl1e_rrs_ipv6_tcp) + rxq_ctrl_data |= RXQ_CTRL_HASH_TYPE_IPV6_TCP; + + if (hw->rrs_type != atl1e_rrs_disable) + rxq_ctrl_data |= + (RXQ_CTRL_HASH_ENABLE | RXQ_CTRL_RSS_MODE_MQUESINT); + + rxq_ctrl_data |= RXQ_CTRL_IPV6_XSUM_VERIFY_EN | RXQ_CTRL_PBA_ALIGN_32 | + RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; + + AT_WRITE_REG(hw, REG_RXQ_CTRL, rxq_ctrl_data); +} + +static inline void atl1e_configure_dma(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + u32 dma_ctrl_data = 0; + + dma_ctrl_data = DMA_CTRL_RXCMB_EN; + dma_ctrl_data |= (((u32)hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) + << DMA_CTRL_DMAR_BURST_LEN_SHIFT; + dma_ctrl_data |= (((u32)hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) + << DMA_CTRL_DMAW_BURST_LEN_SHIFT; + dma_ctrl_data |= DMA_CTRL_DMAR_REQ_PRI | DMA_CTRL_DMAR_OUT_ORDER; + dma_ctrl_data |= (((u32)hw->dmar_dly_cnt) & DMA_CTRL_DMAR_DLY_CNT_MASK) + << DMA_CTRL_DMAR_DLY_CNT_SHIFT; + dma_ctrl_data |= (((u32)hw->dmaw_dly_cnt) & DMA_CTRL_DMAW_DLY_CNT_MASK) + << DMA_CTRL_DMAW_DLY_CNT_SHIFT; + + AT_WRITE_REG(hw, REG_DMA_CTRL, dma_ctrl_data); +} + +static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter) +{ + u32 value; + struct atl1e_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + + /* Config MAC CTRL Register */ + value = MAC_CTRL_TX_EN | + MAC_CTRL_RX_EN ; + + if (FULL_DUPLEX == adapter->link_duplex) + value |= MAC_CTRL_DUPLX; + + value |= ((u32)((SPEED_1000 == adapter->link_speed) ? + MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << + MAC_CTRL_SPEED_SHIFT); + value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); + + value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); + value |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); + + __atl1e_vlan_mode(netdev->features, &value); + + value |= MAC_CTRL_BC_EN; + if (netdev->flags & IFF_PROMISC) + value |= MAC_CTRL_PROMIS_EN; + if (netdev->flags & IFF_ALLMULTI) + value |= MAC_CTRL_MC_ALL_EN; + if (netdev->features & NETIF_F_RXALL) + value |= MAC_CTRL_DBG; + AT_WRITE_REG(hw, REG_MAC_CTRL, value); +} + +/** + * atl1e_configure - Configure Transmit&Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Tx /Rx unit of the MAC after a reset. + */ +static int atl1e_configure(struct atl1e_adapter *adapter) +{ + struct atl1e_hw *hw = &adapter->hw; + + u32 intr_status_data = 0; + + /* clear interrupt status */ + AT_WRITE_REG(hw, REG_ISR, ~0); + + /* 1. set MAC Address */ + atl1e_hw_set_mac_addr(hw); + + /* 2. Init the Multicast HASH table done by set_muti */ + + /* 3. Clear any WOL status */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* 4. Descripter Ring BaseMem/Length/Read ptr/Write ptr + * TPD Ring/SMB/RXF0 Page CMBs, they use the same + * High 32bits memory */ + atl1e_configure_des_ring(adapter); + + /* 5. set Interrupt Moderator Timer */ + AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER_INIT, hw->imt); + AT_WRITE_REGW(hw, REG_IRQ_MODU_TIMER2_INIT, hw->imt); + AT_WRITE_REG(hw, REG_MASTER_CTRL, MASTER_CTRL_LED_MODE | + MASTER_CTRL_ITIMER_EN | MASTER_CTRL_ITIMER2_EN); + + /* 6. rx/tx threshold to trig interrupt */ + AT_WRITE_REGW(hw, REG_TRIG_RRD_THRESH, hw->rrd_thresh); + AT_WRITE_REGW(hw, REG_TRIG_TPD_THRESH, hw->tpd_thresh); + AT_WRITE_REGW(hw, REG_TRIG_RXTIMER, hw->rx_count_down); + AT_WRITE_REGW(hw, REG_TRIG_TXTIMER, hw->tx_count_down); + + /* 7. set Interrupt Clear Timer */ + AT_WRITE_REGW(hw, REG_CMBDISDMA_TIMER, hw->ict); + + /* 8. set MTU */ + AT_WRITE_REG(hw, REG_MTU, hw->max_frame_size + ETH_HLEN + + VLAN_HLEN + ETH_FCS_LEN); + + /* 9. config TXQ early tx threshold */ + atl1e_configure_tx(adapter); + + /* 10. config RXQ */ + atl1e_configure_rx(adapter); + + /* 11. config DMA Engine */ + atl1e_configure_dma(adapter); + + /* 12. smb timer to trig interrupt */ + AT_WRITE_REG(hw, REG_SMB_STAT_TIMER, hw->smb_timer); + + intr_status_data = AT_READ_REG(hw, REG_ISR); + if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) { + netdev_err(adapter->netdev, + "atl1e_configure failed, PCIE phy link down\n"); + return -1; + } + + AT_WRITE_REG(hw, REG_ISR, 0x7fffffff); + return 0; +} + +/** + * atl1e_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the timer callback. + */ +static struct net_device_stats *atl1e_get_stats(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw_stats *hw_stats = &adapter->hw_stats; + struct net_device_stats *net_stats = &netdev->stats; + + net_stats->rx_bytes = hw_stats->rx_byte_cnt; + net_stats->tx_bytes = hw_stats->tx_byte_cnt; + net_stats->multicast = hw_stats->rx_mcast; + net_stats->collisions = hw_stats->tx_1_col + + hw_stats->tx_2_col + + hw_stats->tx_late_col + + hw_stats->tx_abort_col; + + net_stats->rx_errors = hw_stats->rx_frag + + hw_stats->rx_fcs_err + + hw_stats->rx_len_err + + hw_stats->rx_sz_ov + + hw_stats->rx_rrd_ov + + hw_stats->rx_align_err + + hw_stats->rx_rxf_ov; + + net_stats->rx_fifo_errors = hw_stats->rx_rxf_ov; + net_stats->rx_length_errors = hw_stats->rx_len_err; + net_stats->rx_crc_errors = hw_stats->rx_fcs_err; + net_stats->rx_frame_errors = hw_stats->rx_align_err; + net_stats->rx_dropped = hw_stats->rx_rrd_ov; + + net_stats->tx_errors = hw_stats->tx_late_col + + hw_stats->tx_abort_col + + hw_stats->tx_underrun + + hw_stats->tx_trunc; + + net_stats->tx_fifo_errors = hw_stats->tx_underrun; + net_stats->tx_aborted_errors = hw_stats->tx_abort_col; + net_stats->tx_window_errors = hw_stats->tx_late_col; + + net_stats->rx_packets = hw_stats->rx_ok + net_stats->rx_errors; + net_stats->tx_packets = hw_stats->tx_ok + net_stats->tx_errors; + + return net_stats; +} + +static void atl1e_update_hw_stats(struct atl1e_adapter *adapter) +{ + u16 hw_reg_addr = 0; + unsigned long *stats_item = NULL; + + /* update rx status */ + hw_reg_addr = REG_MAC_RX_STATUS_BIN; + stats_item = &adapter->hw_stats.rx_ok; + while (hw_reg_addr <= REG_MAC_RX_STATUS_END) { + *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); + stats_item++; + hw_reg_addr += 4; + } + /* update tx status */ + hw_reg_addr = REG_MAC_TX_STATUS_BIN; + stats_item = &adapter->hw_stats.tx_ok; + while (hw_reg_addr <= REG_MAC_TX_STATUS_END) { + *stats_item += AT_READ_REG(&adapter->hw, hw_reg_addr); + stats_item++; + hw_reg_addr += 4; + } +} + +static inline void atl1e_clear_phy_int(struct atl1e_adapter *adapter) +{ + u16 phy_data; + + spin_lock(&adapter->mdio_lock); + atl1e_read_phy_reg(&adapter->hw, MII_INT_STATUS, &phy_data); + spin_unlock(&adapter->mdio_lock); +} + +static bool atl1e_clean_tx_irq(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + struct atl1e_tx_buffer *tx_buffer = NULL; + u16 hw_next_to_clean = AT_READ_REGW(&adapter->hw, REG_TPD_CONS_IDX); + u16 next_to_clean = atomic_read(&tx_ring->next_to_clean); + + while (next_to_clean != hw_next_to_clean) { + tx_buffer = &tx_ring->tx_buffer[next_to_clean]; + if (tx_buffer->dma) { + if (tx_buffer->flags & ATL1E_TX_PCIMAP_SINGLE) + pci_unmap_single(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + else if (tx_buffer->flags & ATL1E_TX_PCIMAP_PAGE) + pci_unmap_page(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + tx_buffer->dma = 0; + } + + if (tx_buffer->skb) { + dev_kfree_skb_irq(tx_buffer->skb); + tx_buffer->skb = NULL; + } + + if (++next_to_clean == tx_ring->count) + next_to_clean = 0; + } + + atomic_set(&tx_ring->next_to_clean, next_to_clean); + + if (netif_queue_stopped(adapter->netdev) && + netif_carrier_ok(adapter->netdev)) { + netif_wake_queue(adapter->netdev); + } + + return true; +} + +/** + * atl1e_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + */ +static irqreturn_t atl1e_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + int max_ints = AT_MAX_INT_WORK; + int handled = IRQ_NONE; + u32 status; + + do { + status = AT_READ_REG(hw, REG_ISR); + if ((status & IMR_NORMAL_MASK) == 0 || + (status & ISR_DIS_INT) != 0) { + if (max_ints != AT_MAX_INT_WORK) + handled = IRQ_HANDLED; + break; + } + /* link event */ + if (status & ISR_GPHY) + atl1e_clear_phy_int(adapter); + /* Ack ISR */ + AT_WRITE_REG(hw, REG_ISR, status | ISR_DIS_INT); + + handled = IRQ_HANDLED; + /* check if PCIE PHY Link down */ + if (status & ISR_PHY_LINKDOWN) { + netdev_err(adapter->netdev, + "pcie phy linkdown %x\n", status); + if (netif_running(adapter->netdev)) { + /* reset MAC */ + atl1e_irq_reset(adapter); + schedule_work(&adapter->reset_task); + break; + } + } + + /* check if DMA read/write error */ + if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { + netdev_err(adapter->netdev, + "PCIE DMA RW error (status = 0x%x)\n", + status); + atl1e_irq_reset(adapter); + schedule_work(&adapter->reset_task); + break; + } + + if (status & ISR_SMB) + atl1e_update_hw_stats(adapter); + + /* link event */ + if (status & (ISR_GPHY | ISR_MANUAL)) { + netdev->stats.tx_carrier_errors++; + atl1e_link_chg_event(adapter); + break; + } + + /* transmit event */ + if (status & ISR_TX_EVENT) + atl1e_clean_tx_irq(adapter); + + if (status & ISR_RX_EVENT) { + /* + * disable rx interrupts, without + * the synchronize_irq bit + */ + AT_WRITE_REG(hw, REG_IMR, + IMR_NORMAL_MASK & ~ISR_RX_EVENT); + AT_WRITE_FLUSH(hw); + if (likely(napi_schedule_prep( + &adapter->napi))) + __napi_schedule(&adapter->napi); + } + } while (--max_ints > 0); + /* re-enable Interrupt*/ + AT_WRITE_REG(&adapter->hw, REG_ISR, 0); + + return handled; +} + +static inline void atl1e_rx_checksum(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_recv_ret_status *prrs) +{ + u8 *packet = (u8 *)(prrs + 1); + struct iphdr *iph; + u16 head_len = ETH_HLEN; + u16 pkt_flags; + u16 err_flags; + + skb_checksum_none_assert(skb); + pkt_flags = prrs->pkt_flag; + err_flags = prrs->err_flag; + if (((pkt_flags & RRS_IS_IPV4) || (pkt_flags & RRS_IS_IPV6)) && + ((pkt_flags & RRS_IS_TCP) || (pkt_flags & RRS_IS_UDP))) { + if (pkt_flags & RRS_IS_IPV4) { + if (pkt_flags & RRS_IS_802_3) + head_len += 8; + iph = (struct iphdr *) (packet + head_len); + if (iph->frag_off != 0 && !(pkt_flags & RRS_IS_IP_DF)) + goto hw_xsum; + } + if (!(err_flags & (RRS_ERR_IP_CSUM | RRS_ERR_L4_CSUM))) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + } + } + +hw_xsum : + return; +} + +static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter, + u8 que) +{ + struct atl1e_rx_page_desc *rx_page_desc = + (struct atl1e_rx_page_desc *) adapter->rx_ring.rx_page_desc; + u8 rx_using = rx_page_desc[que].rx_using; + + return &(rx_page_desc[que].rx_page[rx_using]); +} + +static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct atl1e_rx_ring *rx_ring = &adapter->rx_ring; + struct atl1e_rx_page_desc *rx_page_desc = + (struct atl1e_rx_page_desc *) rx_ring->rx_page_desc; + struct sk_buff *skb = NULL; + struct atl1e_rx_page *rx_page = atl1e_get_rx_page(adapter, que); + u32 packet_size, write_offset; + struct atl1e_recv_ret_status *prrs; + + write_offset = *(rx_page->write_offset_addr); + if (likely(rx_page->read_offset < write_offset)) { + do { + if (*work_done >= work_to_do) + break; + (*work_done)++; + /* get new packet's rrs */ + prrs = (struct atl1e_recv_ret_status *) (rx_page->addr + + rx_page->read_offset); + /* check sequence number */ + if (prrs->seq_num != rx_page_desc[que].rx_nxseq) { + netdev_err(netdev, + "rx sequence number error (rx=%d) (expect=%d)\n", + prrs->seq_num, + rx_page_desc[que].rx_nxseq); + rx_page_desc[que].rx_nxseq++; + /* just for debug use */ + AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0, + (((u32)prrs->seq_num) << 16) | + rx_page_desc[que].rx_nxseq); + goto fatal_err; + } + rx_page_desc[que].rx_nxseq++; + + /* error packet */ + if ((prrs->pkt_flag & RRS_IS_ERR_FRAME) && + !(netdev->features & NETIF_F_RXALL)) { + if (prrs->err_flag & (RRS_ERR_BAD_CRC | + RRS_ERR_DRIBBLE | RRS_ERR_CODE | + RRS_ERR_TRUNC)) { + /* hardware error, discard this packet*/ + netdev_err(netdev, + "rx packet desc error %x\n", + *((u32 *)prrs + 1)); + goto skip_pkt; + } + } + + packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & + RRS_PKT_SIZE_MASK); + if (likely(!(netdev->features & NETIF_F_RXFCS))) + packet_size -= 4; /* CRC */ + + skb = netdev_alloc_skb_ip_align(netdev, packet_size); + if (skb == NULL) + goto skip_pkt; + + memcpy(skb->data, (u8 *)(prrs + 1), packet_size); + skb_put(skb, packet_size); + skb->protocol = eth_type_trans(skb, netdev); + atl1e_rx_checksum(adapter, skb, prrs); + + if (prrs->pkt_flag & RRS_IS_VLAN_TAG) { + u16 vlan_tag = (prrs->vtag >> 4) | + ((prrs->vtag & 7) << 13) | + ((prrs->vtag & 8) << 9); + netdev_dbg(netdev, + "RXD VLAN TAG=0x%04x\n", + prrs->vtag); + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); + } + netif_receive_skb(skb); + +skip_pkt: + /* skip current packet whether it's ok or not. */ + rx_page->read_offset += + (((u32)((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & + RRS_PKT_SIZE_MASK) + + sizeof(struct atl1e_recv_ret_status) + 31) & + 0xFFFFFFE0); + + if (rx_page->read_offset >= rx_ring->page_size) { + /* mark this page clean */ + u16 reg_addr; + u8 rx_using; + + rx_page->read_offset = + *(rx_page->write_offset_addr) = 0; + rx_using = rx_page_desc[que].rx_using; + reg_addr = + atl1e_rx_page_vld_regs[que][rx_using]; + AT_WRITE_REGB(&adapter->hw, reg_addr, 1); + rx_page_desc[que].rx_using ^= 1; + rx_page = atl1e_get_rx_page(adapter, que); + } + write_offset = *(rx_page->write_offset_addr); + } while (rx_page->read_offset < write_offset); + } + + return; + +fatal_err: + if (!test_bit(__AT_DOWN, &adapter->flags)) + schedule_work(&adapter->reset_task); +} + +/** + * atl1e_clean - NAPI Rx polling callback + */ +static int atl1e_clean(struct napi_struct *napi, int budget) +{ + struct atl1e_adapter *adapter = + container_of(napi, struct atl1e_adapter, napi); + u32 imr_data; + int work_done = 0; + + /* Keep link state information with original netdev */ + if (!netif_carrier_ok(adapter->netdev)) + goto quit_polling; + + atl1e_clean_rx_irq(adapter, 0, &work_done, budget); + + /* If no Tx and not enough Rx work done, exit the polling mode */ + if (work_done < budget) { +quit_polling: + napi_complete(napi); + imr_data = AT_READ_REG(&adapter->hw, REG_IMR); + AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT); + /* test debug */ + if (test_bit(__AT_DOWN, &adapter->flags)) { + atomic_dec(&adapter->irq_sem); + netdev_err(adapter->netdev, + "atl1e_clean is called when AT_DOWN\n"); + } + /* reenable RX intr */ + /*atl1e_irq_enable(adapter); */ + + } + return work_done; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void atl1e_netpoll(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + atl1e_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +static inline u16 atl1e_tpd_avail(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + u16 next_to_use = 0; + u16 next_to_clean = 0; + + next_to_clean = atomic_read(&tx_ring->next_to_clean); + next_to_use = tx_ring->next_to_use; + + return (u16)(next_to_clean > next_to_use) ? + (next_to_clean - next_to_use - 1) : + (tx_ring->count + next_to_clean - next_to_use - 1); +} + +/* + * get next usable tpd + * Note: should call atl1e_tdp_avail to make sure + * there is enough tpd to use + */ +static struct atl1e_tpd_desc *atl1e_get_tpd(struct atl1e_adapter *adapter) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + u16 next_to_use = 0; + + next_to_use = tx_ring->next_to_use; + if (++tx_ring->next_to_use == tx_ring->count) + tx_ring->next_to_use = 0; + + memset(&tx_ring->desc[next_to_use], 0, sizeof(struct atl1e_tpd_desc)); + return &tx_ring->desc[next_to_use]; +} + +static struct atl1e_tx_buffer * +atl1e_get_tx_buffer(struct atl1e_adapter *adapter, struct atl1e_tpd_desc *tpd) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + + return &tx_ring->tx_buffer[tpd - tx_ring->desc]; +} + +/* Calculate the transmit packet descript needed*/ +static u16 atl1e_cal_tdp_req(const struct sk_buff *skb) +{ + int i = 0; + u16 tpd_req = 1; + u16 fg_size = 0; + u16 proto_hdr_len = 0; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); + tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT); + } + + if (skb_is_gso(skb)) { + if (skb->protocol == htons(ETH_P_IP) || + (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6)) { + proto_hdr_len = skb_transport_offset(skb) + + tcp_hdrlen(skb); + if (proto_hdr_len < skb_headlen(skb)) { + tpd_req += ((skb_headlen(skb) - proto_hdr_len + + MAX_TX_BUF_LEN - 1) >> + MAX_TX_BUF_SHIFT); + } + } + + } + return tpd_req; +} + +static int atl1e_tso_csum(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_tpd_desc *tpd) +{ + unsigned short offload_type; + u8 hdr_len; + u32 real_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + offload_type = skb_shinfo(skb)->gso_type; + + if (offload_type & SKB_GSO_TCPV4) { + real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + + ntohs(ip_hdr(skb)->tot_len)); + + if (real_len < skb->len) + pskb_trim(skb, real_len); + + hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); + if (unlikely(skb->len == hdr_len)) { + /* only xsum need */ + netdev_warn(adapter->netdev, + "IPV4 tso with zero data??\n"); + goto check_sum; + } else { + ip_hdr(skb)->check = 0; + ip_hdr(skb)->tot_len = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic( + ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + tpd->word3 |= (ip_hdr(skb)->ihl & + TDP_V4_IPHL_MASK) << + TPD_V4_IPHL_SHIFT; + tpd->word3 |= ((tcp_hdrlen(skb) >> 2) & + TPD_TCPHDRLEN_MASK) << + TPD_TCPHDRLEN_SHIFT; + tpd->word3 |= ((skb_shinfo(skb)->gso_size) & + TPD_MSS_MASK) << TPD_MSS_SHIFT; + tpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; + } + return 0; + } + } + +check_sum: + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + u8 css, cso; + + cso = skb_checksum_start_offset(skb); + if (unlikely(cso & 0x1)) { + netdev_err(adapter->netdev, + "payload offset should not ant event number\n"); + return -1; + } else { + css = cso + skb->csum_offset; + tpd->word3 |= (cso & TPD_PLOADOFFSET_MASK) << + TPD_PLOADOFFSET_SHIFT; + tpd->word3 |= (css & TPD_CCSUMOFFSET_MASK) << + TPD_CCSUMOFFSET_SHIFT; + tpd->word3 |= 1 << TPD_CC_SEGMENT_EN_SHIFT; + } + } + + return 0; +} + +static int atl1e_tx_map(struct atl1e_adapter *adapter, + struct sk_buff *skb, struct atl1e_tpd_desc *tpd) +{ + struct atl1e_tpd_desc *use_tpd = NULL; + struct atl1e_tx_buffer *tx_buffer = NULL; + u16 buf_len = skb_headlen(skb); + u16 map_len = 0; + u16 mapped_len = 0; + u16 hdr_len = 0; + u16 nr_frags; + u16 f; + int segment; + int ring_start = adapter->tx_ring.next_to_use; + int ring_end; + + nr_frags = skb_shinfo(skb)->nr_frags; + segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; + if (segment) { + /* TSO */ + map_len = hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + use_tpd = tpd; + + tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); + tx_buffer->length = map_len; + tx_buffer->dma = pci_map_single(adapter->pdev, + skb->data, hdr_len, PCI_DMA_TODEVICE); + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) + return -ENOSPC; + + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); + mapped_len += map_len; + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); + use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | + ((cpu_to_le32(tx_buffer->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); + } + + while (mapped_len < buf_len) { + /* mapped_len == 0, means we should use the first tpd, + which is given by caller */ + if (mapped_len == 0) { + use_tpd = tpd; + } else { + use_tpd = atl1e_get_tpd(adapter); + memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); + } + tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); + tx_buffer->skb = NULL; + + tx_buffer->length = map_len = + ((buf_len - mapped_len) >= MAX_TX_BUF_LEN) ? + MAX_TX_BUF_LEN : (buf_len - mapped_len); + tx_buffer->dma = + pci_map_single(adapter->pdev, skb->data + mapped_len, + map_len, PCI_DMA_TODEVICE); + + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { + /* We need to unwind the mappings we've done */ + ring_end = adapter->tx_ring.next_to_use; + adapter->tx_ring.next_to_use = ring_start; + while (adapter->tx_ring.next_to_use != ring_end) { + tpd = atl1e_get_tpd(adapter); + tx_buffer = atl1e_get_tx_buffer(adapter, tpd); + pci_unmap_single(adapter->pdev, tx_buffer->dma, + tx_buffer->length, PCI_DMA_TODEVICE); + } + /* Reset the tx rings next pointer */ + adapter->tx_ring.next_to_use = ring_start; + return -ENOSPC; + } + + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_SINGLE); + mapped_len += map_len; + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); + use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | + ((cpu_to_le32(tx_buffer->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); + } + + for (f = 0; f < nr_frags; f++) { + const struct skb_frag_struct *frag; + u16 i; + u16 seg_num; + + frag = &skb_shinfo(skb)->frags[f]; + buf_len = skb_frag_size(frag); + + seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; + for (i = 0; i < seg_num; i++) { + use_tpd = atl1e_get_tpd(adapter); + memcpy(use_tpd, tpd, sizeof(struct atl1e_tpd_desc)); + + tx_buffer = atl1e_get_tx_buffer(adapter, use_tpd); + BUG_ON(tx_buffer->skb); + + tx_buffer->skb = NULL; + tx_buffer->length = + (buf_len > MAX_TX_BUF_LEN) ? + MAX_TX_BUF_LEN : buf_len; + buf_len -= tx_buffer->length; + + tx_buffer->dma = skb_frag_dma_map(&adapter->pdev->dev, + frag, + (i * MAX_TX_BUF_LEN), + tx_buffer->length, + DMA_TO_DEVICE); + + if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) { + /* We need to unwind the mappings we've done */ + ring_end = adapter->tx_ring.next_to_use; + adapter->tx_ring.next_to_use = ring_start; + while (adapter->tx_ring.next_to_use != ring_end) { + tpd = atl1e_get_tpd(adapter); + tx_buffer = atl1e_get_tx_buffer(adapter, tpd); + dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma, + tx_buffer->length, DMA_TO_DEVICE); + } + + /* Reset the ring next to use pointer */ + adapter->tx_ring.next_to_use = ring_start; + return -ENOSPC; + } + + ATL1E_SET_PCIMAP_TYPE(tx_buffer, ATL1E_TX_PCIMAP_PAGE); + use_tpd->buffer_addr = cpu_to_le64(tx_buffer->dma); + use_tpd->word2 = (use_tpd->word2 & (~TPD_BUFLEN_MASK)) | + ((cpu_to_le32(tx_buffer->length) & + TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT); + } + } + + if ((tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK) + /* note this one is a tcp header */ + tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; + /* The last tpd */ + + use_tpd->word3 |= 1 << TPD_EOP_SHIFT; + /* The last buffer info contain the skb address, + so it will be free after unmap */ + tx_buffer->skb = skb; + return 0; +} + +static void atl1e_tx_queue(struct atl1e_adapter *adapter, u16 count, + struct atl1e_tpd_desc *tpd) +{ + struct atl1e_tx_ring *tx_ring = &adapter->tx_ring; + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). */ + wmb(); + AT_WRITE_REG(&adapter->hw, REG_MB_TPD_PROD_IDX, tx_ring->next_to_use); +} + +static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + unsigned long flags; + u16 tpd_req = 1; + struct atl1e_tpd_desc *tpd; + + if (test_bit(__AT_DOWN, &adapter->flags)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (unlikely(skb->len <= 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + tpd_req = atl1e_cal_tdp_req(skb); + if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) + return NETDEV_TX_LOCKED; + + if (atl1e_tpd_avail(adapter) < tpd_req) { + /* no enough descriptor, just stop queue */ + netif_stop_queue(netdev); + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + tpd = atl1e_get_tpd(adapter); + + if (skb_vlan_tag_present(skb)) { + u16 vlan_tag = skb_vlan_tag_get(skb); + u16 atl1e_vlan_tag; + + tpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; + AT_VLAN_TAG_TO_TPD_TAG(vlan_tag, atl1e_vlan_tag); + tpd->word2 |= (atl1e_vlan_tag & TPD_VLANTAG_MASK) << + TPD_VLAN_SHIFT; + } + + if (skb->protocol == htons(ETH_P_8021Q)) + tpd->word3 |= 1 << TPD_VL_TAGGED_SHIFT; + + if (skb_network_offset(skb) != ETH_HLEN) + tpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; /* 802.3 frame */ + + /* do TSO and check sum */ + if (atl1e_tso_csum(adapter, skb, tpd) != 0) { + spin_unlock_irqrestore(&adapter->tx_lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (atl1e_tx_map(adapter, skb, tpd)) { + dev_kfree_skb_any(skb); + goto out; + } + + atl1e_tx_queue(adapter, tpd_req, tpd); + + netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ +out: + spin_unlock_irqrestore(&adapter->tx_lock, flags); + return NETDEV_TX_OK; +} + +static void atl1e_free_irq(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); +} + +static int atl1e_request_irq(struct atl1e_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct net_device *netdev = adapter->netdev; + int err = 0; + + err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name, + netdev); + if (err) { + netdev_dbg(adapter->netdev, + "Unable to allocate interrupt Error: %d\n", err); + return err; + } + netdev_dbg(netdev, "atl1e_request_irq OK\n"); + return err; +} + +int atl1e_up(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0; + u32 val; + + /* hardware has been reset, we need to reload some things */ + err = atl1e_init_hw(&adapter->hw); + if (err) { + err = -EIO; + return err; + } + atl1e_init_ring_ptrs(adapter); + atl1e_set_multi(netdev); + atl1e_restore_vlan(adapter); + + if (atl1e_configure(adapter)) { + err = -EIO; + goto err_up; + } + + clear_bit(__AT_DOWN, &adapter->flags); + napi_enable(&adapter->napi); + atl1e_irq_enable(adapter); + val = AT_READ_REG(&adapter->hw, REG_MASTER_CTRL); + AT_WRITE_REG(&adapter->hw, REG_MASTER_CTRL, + val | MASTER_CTRL_MANUAL_INT); + +err_up: + return err; +} + +void atl1e_down(struct atl1e_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer */ + set_bit(__AT_DOWN, &adapter->flags); + + netif_stop_queue(netdev); + + /* reset MAC to disable all RX/TX */ + atl1e_reset_hw(&adapter->hw); + msleep(1); + + napi_disable(&adapter->napi); + atl1e_del_timer(adapter); + atl1e_irq_disable(adapter); + + netif_carrier_off(netdev); + adapter->link_speed = SPEED_0; + adapter->link_duplex = -1; + atl1e_clean_tx_ring(adapter); + atl1e_clean_rx_ring(adapter); +} + +/** + * atl1e_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + */ +static int atl1e_open(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + int err; + + /* disallow open during test */ + if (test_bit(__AT_TESTING, &adapter->flags)) + return -EBUSY; + + /* allocate rx/tx dma buffer & descriptors */ + atl1e_init_ring_resources(adapter); + err = atl1e_setup_ring_resources(adapter); + if (unlikely(err)) + return err; + + err = atl1e_request_irq(adapter); + if (unlikely(err)) + goto err_req_irq; + + err = atl1e_up(adapter); + if (unlikely(err)) + goto err_up; + + return 0; + +err_up: + atl1e_free_irq(adapter); +err_req_irq: + atl1e_free_ring_resources(adapter); + atl1e_reset_hw(&adapter->hw); + + return err; +} + +/** + * atl1e_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + */ +static int atl1e_close(struct net_device *netdev) +{ + struct atl1e_adapter *adapter = netdev_priv(netdev); + + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + atl1e_down(adapter); + atl1e_free_irq(adapter); + atl1e_free_ring_resources(adapter); + + return 0; +} + +static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + struct atl1e_hw *hw = &adapter->hw; + u32 ctrl = 0; + u32 mac_ctrl_data = 0; + u32 wol_ctrl_data = 0; + u16 mii_advertise_data = 0; + u16 mii_bmsr_data = 0; + u16 mii_intr_status_data = 0; + u32 wufc = adapter->wol; + u32 i; +#ifdef CONFIG_PM + int retval = 0; +#endif + + if (netif_running(netdev)) { + WARN_ON(test_bit(__AT_RESETTING, &adapter->flags)); + atl1e_down(adapter); + } + netif_device_detach(netdev); + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + if (wufc) { + /* get link status */ + atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data); + atl1e_read_phy_reg(hw, MII_BMSR, &mii_bmsr_data); + + mii_advertise_data = ADVERTISE_10HALF; + + if ((atl1e_write_phy_reg(hw, MII_CTRL1000, 0) != 0) || + (atl1e_write_phy_reg(hw, + MII_ADVERTISE, mii_advertise_data) != 0) || + (atl1e_phy_commit(hw)) != 0) { + netdev_dbg(adapter->netdev, "set phy register failed\n"); + goto wol_dis; + } + + hw->phy_configured = false; /* re-init PHY when resume */ + + /* turn on magic packet wol */ + if (wufc & AT_WUFC_MAG) + wol_ctrl_data |= WOL_MAGIC_EN | WOL_MAGIC_PME_EN; + + if (wufc & AT_WUFC_LNKC) { + /* if orignal link status is link, just wait for retrive link */ + if (mii_bmsr_data & BMSR_LSTATUS) { + for (i = 0; i < AT_SUSPEND_LINK_TIMEOUT; i++) { + msleep(100); + atl1e_read_phy_reg(hw, MII_BMSR, + &mii_bmsr_data); + if (mii_bmsr_data & BMSR_LSTATUS) + break; + } + + if ((mii_bmsr_data & BMSR_LSTATUS) == 0) + netdev_dbg(adapter->netdev, + "Link may change when suspend\n"); + } + wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN; + /* only link up can wake up */ + if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) { + netdev_dbg(adapter->netdev, + "read write phy register failed\n"); + goto wol_dis; + } + } + /* clear phy interrupt */ + atl1e_read_phy_reg(hw, MII_INT_STATUS, &mii_intr_status_data); + /* Config MAC Ctrl register */ + mac_ctrl_data = MAC_CTRL_RX_EN; + /* set to 10/100M halt duplex */ + mac_ctrl_data |= MAC_CTRL_SPEED_10_100 << MAC_CTRL_SPEED_SHIFT; + mac_ctrl_data |= (((u32)adapter->hw.preamble_len & + MAC_CTRL_PRMLEN_MASK) << + MAC_CTRL_PRMLEN_SHIFT); + + __atl1e_vlan_mode(netdev->features, &mac_ctrl_data); + + /* magic packet maybe Broadcast&multicast&Unicast frame */ + if (wufc & AT_WUFC_MAG) + mac_ctrl_data |= MAC_CTRL_BC_EN; + + netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n", + mac_ctrl_data); + + AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); + AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); + /* pcie patch */ + ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); + goto suspend_exit; + } +wol_dis: + + /* WOL disabled */ + AT_WRITE_REG(hw, REG_WOL_CTRL, 0); + + /* pcie patch */ + ctrl = AT_READ_REG(hw, REG_PCIE_PHYMISC); + ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; + AT_WRITE_REG(hw, REG_PCIE_PHYMISC, ctrl); + + atl1e_force_ps(hw); + hw->phy_configured = false; /* re-init PHY when resume */ + + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); + +suspend_exit: + + if (netif_running(netdev)) + atl1e_free_irq(adapter); + + pci_disable_device(pdev); + + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +#ifdef CONFIG_PM +static int atl1e_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + + err = pci_enable_device(pdev); + if (err) { + netdev_err(adapter->netdev, + "Cannot enable PCI device from suspend\n"); + return err; + } + + pci_set_master(pdev); + + AT_READ_REG(&adapter->hw, REG_WOL_CTRL); /* clear WOL status */ + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); + + if (netif_running(netdev)) { + err = atl1e_request_irq(adapter); + if (err) + return err; + } + + atl1e_reset_hw(&adapter->hw); + + if (netif_running(netdev)) + atl1e_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void atl1e_shutdown(struct pci_dev *pdev) +{ + atl1e_suspend(pdev, PMSG_SUSPEND); +} + +static const struct net_device_ops atl1e_netdev_ops = { + .ndo_open = atl1e_open, + .ndo_stop = atl1e_close, + .ndo_start_xmit = atl1e_xmit_frame, + .ndo_get_stats = atl1e_get_stats, + .ndo_set_rx_mode = atl1e_set_multi, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = atl1e_set_mac_addr, + .ndo_fix_features = atl1e_fix_features, + .ndo_set_features = atl1e_set_features, + .ndo_change_mtu = atl1e_change_mtu, + .ndo_do_ioctl = atl1e_ioctl, + .ndo_tx_timeout = atl1e_tx_timeout, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = atl1e_netpoll, +#endif + +}; + +static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev) +{ + SET_NETDEV_DEV(netdev, &pdev->dev); + pci_set_drvdata(pdev, netdev); + + netdev->netdev_ops = &atl1e_netdev_ops; + + netdev->watchdog_timeo = AT_TX_WATCHDOG; + atl1e_set_ethtool_ops(netdev); + + netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = netdev->hw_features | NETIF_F_LLTX | + NETIF_F_HW_VLAN_CTAG_TX; + /* not enabled by default */ + netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS; + return 0; +} + +/** + * atl1e_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in atl1e_pci_tbl + * + * Returns 0 on success, negative on failure + * + * atl1e_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + */ +static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct atl1e_adapter *adapter = NULL; + static int cards_found; + + int err = 0; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + return err; + } + + /* + * The atl1e chip can DMA to 64-bit addresses, but it uses a single + * shared register for the high 32 bits, so only a single, aligned, + * 4 GB physical address range can be used at a time. + * + * Supporting 64-bit DMA on this hardware is more trouble than it's + * worth. It is far easier to limit to 32-bit DMA than update + * various kernel subsystems to support the mechanics required by a + * fixed-high-32-bit system. + */ + if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) || + (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) { + dev_err(&pdev->dev, "No usable DMA configuration,aborting\n"); + goto err_dma; + } + + err = pci_request_regions(pdev, atl1e_driver_name); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_pci_reg; + } + + pci_set_master(pdev); + + netdev = alloc_etherdev(sizeof(struct atl1e_adapter)); + if (netdev == NULL) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + + err = atl1e_init_netdev(netdev, pdev); + if (err) { + netdev_err(netdev, "init netdevice failed\n"); + goto err_init_netdev; + } + adapter = netdev_priv(netdev); + adapter->bd_number = cards_found; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->hw.adapter = adapter; + adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0); + if (!adapter->hw.hw_addr) { + err = -EIO; + netdev_err(netdev, "cannot map device registers\n"); + goto err_ioremap; + } + + /* init mii data */ + adapter->mii.dev = netdev; + adapter->mii.mdio_read = atl1e_mdio_read; + adapter->mii.mdio_write = atl1e_mdio_write; + adapter->mii.phy_id_mask = 0x1f; + adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK; + + netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64); + + setup_timer(&adapter->phy_config_timer, atl1e_phy_config, + (unsigned long)adapter); + + /* get user settings */ + atl1e_check_options(adapter); + /* + * Mark all PCI regions associated with PCI device + * pdev as being reserved by owner atl1e_driver_name + * Enables bus-mastering on the device and calls + * pcibios_set_master to do the needed arch specific settings + */ + atl1e_setup_pcicmd(pdev); + /* setup the private structure */ + err = atl1e_sw_init(adapter); + if (err) { + netdev_err(netdev, "net device private data init failed\n"); + goto err_sw_init; + } + + /* Init GPHY as early as possible due to power saving issue */ + atl1e_phy_init(&adapter->hw); + /* reset the controller to + * put the device in a known good starting state */ + err = atl1e_reset_hw(&adapter->hw); + if (err) { + err = -EIO; + goto err_reset; + } + + if (atl1e_read_mac_addr(&adapter->hw) != 0) { + err = -EIO; + netdev_err(netdev, "get mac address failed\n"); + goto err_eeprom; + } + + memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); + netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr); + + INIT_WORK(&adapter->reset_task, atl1e_reset_task); + INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); + netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); + err = register_netdev(netdev); + if (err) { + netdev_err(netdev, "register netdevice failed\n"); + goto err_register; + } + + /* assume we have no link for now */ + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + cards_found++; + + return 0; + +err_reset: +err_register: +err_sw_init: +err_eeprom: + pci_iounmap(pdev, adapter->hw.hw_addr); +err_init_netdev: +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_regions(pdev); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * atl1e_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * atl1e_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + */ +static void atl1e_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + /* + * flush_scheduled work may reschedule our watchdog task, so + * explicitly disable watchdog tasks from being rescheduled + */ + set_bit(__AT_DOWN, &adapter->flags); + + atl1e_del_timer(adapter); + atl1e_cancel_work(adapter); + + unregister_netdev(netdev); + atl1e_free_ring_resources(adapter); + atl1e_force_ps(&adapter->hw); + pci_iounmap(pdev, adapter->hw.hw_addr); + pci_release_regions(pdev); + free_netdev(netdev); + pci_disable_device(pdev); +} + +/** + * atl1e_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t +atl1e_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + atl1e_down(adapter); + + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * atl1e_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (pci_enable_device(pdev)) { + netdev_err(adapter->netdev, + "Cannot re-enable PCI device after reset\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + atl1e_reset_hw(&adapter->hw); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * atl1e_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the atl1e_resume routine. + */ +static void atl1e_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct atl1e_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) { + if (atl1e_up(adapter)) { + netdev_err(adapter->netdev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +static const struct pci_error_handlers atl1e_err_handler = { + .error_detected = atl1e_io_error_detected, + .slot_reset = atl1e_io_slot_reset, + .resume = atl1e_io_resume, +}; + +static struct pci_driver atl1e_driver = { + .name = atl1e_driver_name, + .id_table = atl1e_pci_tbl, + .probe = atl1e_probe, + .remove = atl1e_remove, + /* Power Management Hooks */ +#ifdef CONFIG_PM + .suspend = atl1e_suspend, + .resume = atl1e_resume, +#endif + .shutdown = atl1e_shutdown, + .err_handler = &atl1e_err_handler +}; + +module_pci_driver(atl1e_driver); diff --git a/addons/atl1e/src/4.4.180/atl1e_param.c b/addons/atl1e/src/4.4.180/atl1e_param.c new file mode 100644 index 00000000..fa314282 --- /dev/null +++ b/addons/atl1e/src/4.4.180/atl1e_param.c @@ -0,0 +1,269 @@ +/* + * Copyright(c) 2007 Atheros Corporation. All rights reserved. + * + * Derived from Intel e1000 driver + * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include + +#include "atl1e.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define ATL1E_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ +#define ATL1E_PARAM_INIT { [0 ... ATL1E_MAX_NIC] = OPTION_UNSET } + +#define ATL1E_PARAM(x, desc) \ + static int x[ATL1E_MAX_NIC + 1] = ATL1E_PARAM_INIT; \ + static unsigned int num_##x; \ + module_param_array_named(x, x, int, &num_##x, 0); \ + MODULE_PARM_DESC(x, desc); + +/* Transmit Memory count + * + * Valid Range: 64-2048 + * + * Default Value: 128 + */ +#define ATL1E_MIN_TX_DESC_CNT 32 +#define ATL1E_MAX_TX_DESC_CNT 1020 +#define ATL1E_DEFAULT_TX_DESC_CNT 128 +ATL1E_PARAM(tx_desc_cnt, "Transmit description count"); + +/* Receive Memory Block Count + * + * Valid Range: 16-512 + * + * Default Value: 128 + */ +#define ATL1E_MIN_RX_MEM_SIZE 8 /* 8KB */ +#define ATL1E_MAX_RX_MEM_SIZE 1024 /* 1MB */ +#define ATL1E_DEFAULT_RX_MEM_SIZE 256 /* 128KB */ +ATL1E_PARAM(rx_mem_size, "memory size of rx buffer(KB)"); + +/* User Specified MediaType Override + * + * Valid Range: 0-5 + * - 0 - auto-negotiate at all supported speeds + * - 1 - only link at 100Mbps Full Duplex + * - 2 - only link at 100Mbps Half Duplex + * - 3 - only link at 10Mbps Full Duplex + * - 4 - only link at 10Mbps Half Duplex + * Default Value: 0 + */ + +ATL1E_PARAM(media_type, "MediaType Select"); + +/* Interrupt Moderate Timer in units of 2 us + * + * Valid Range: 10-65535 + * + * Default Value: 45000(90ms) + */ +#define INT_MOD_DEFAULT_CNT 100 /* 200us */ +#define INT_MOD_MAX_CNT 65000 +#define INT_MOD_MIN_CNT 50 +ATL1E_PARAM(int_mod_timer, "Interrupt Moderator Timer"); + +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +#define FLASH_VENDOR_DEFAULT 0 +#define FLASH_VENDOR_MIN 0 +#define FLASH_VENDOR_MAX 2 + +struct atl1e_option { + enum { enable_option, range_option, list_option } type; + char *name; + char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + struct atl1e_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int atl1e_validate_option(int *value, struct atl1e_option *opt, + struct atl1e_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + netdev_info(adapter->netdev, + "%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + netdev_info(adapter->netdev, + "%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + netdev_info(adapter->netdev, "%s set to %i\n", + opt->name, *value); + return 0; + } + break; + case list_option:{ + int i; + struct atl1e_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + netdev_info(adapter->netdev, + "%s\n", ent->str); + return 0; + } + } + break; + } + default: + BUG(); + } + + netdev_info(adapter->netdev, "Invalid %s specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * atl1e_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + */ +void atl1e_check_options(struct atl1e_adapter *adapter) +{ + int bd = adapter->bd_number; + + if (bd >= ATL1E_MAX_NIC) { + netdev_notice(adapter->netdev, + "no configuration for board #%i\n", bd); + netdev_notice(adapter->netdev, + "Using defaults for all values\n"); + } + + { /* Transmit Ring Size */ + struct atl1e_option opt = { + .type = range_option, + .name = "Transmit Ddescription Count", + .err = "using default of " + __MODULE_STRING(ATL1E_DEFAULT_TX_DESC_CNT), + .def = ATL1E_DEFAULT_TX_DESC_CNT, + .arg = { .r = { .min = ATL1E_MIN_TX_DESC_CNT, + .max = ATL1E_MAX_TX_DESC_CNT} } + }; + int val; + if (num_tx_desc_cnt > bd) { + val = tx_desc_cnt[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->tx_ring.count = (u16) val & 0xFFFC; + } else + adapter->tx_ring.count = (u16)opt.def; + } + + { /* Receive Memory Block Count */ + struct atl1e_option opt = { + .type = range_option, + .name = "Memory size of rx buffer(KB)", + .err = "using default of " + __MODULE_STRING(ATL1E_DEFAULT_RX_MEM_SIZE), + .def = ATL1E_DEFAULT_RX_MEM_SIZE, + .arg = { .r = { .min = ATL1E_MIN_RX_MEM_SIZE, + .max = ATL1E_MAX_RX_MEM_SIZE} } + }; + int val; + if (num_rx_mem_size > bd) { + val = rx_mem_size[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->rx_ring.page_size = (u32)val * 1024; + } else { + adapter->rx_ring.page_size = (u32)opt.def * 1024; + } + } + + { /* Interrupt Moderate Timer */ + struct atl1e_option opt = { + .type = range_option, + .name = "Interrupt Moderate Timer", + .err = "using default of " + __MODULE_STRING(INT_MOD_DEFAULT_CNT), + .def = INT_MOD_DEFAULT_CNT, + .arg = { .r = { .min = INT_MOD_MIN_CNT, + .max = INT_MOD_MAX_CNT} } + } ; + int val; + if (num_int_mod_timer > bd) { + val = int_mod_timer[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->hw.imt = (u16) val; + } else + adapter->hw.imt = (u16)(opt.def); + } + + { /* MediaType */ + struct atl1e_option opt = { + .type = range_option, + .name = "Speed/Duplex Selection", + .err = "using default of " + __MODULE_STRING(MEDIA_TYPE_AUTO_SENSOR), + .def = MEDIA_TYPE_AUTO_SENSOR, + .arg = { .r = { .min = MEDIA_TYPE_AUTO_SENSOR, + .max = MEDIA_TYPE_10M_HALF} } + } ; + int val; + if (num_media_type > bd) { + val = media_type[bd]; + atl1e_validate_option(&val, &opt, adapter); + adapter->hw.media_type = (u16) val; + } else + adapter->hw.media_type = (u16)(opt.def); + + } +} diff --git a/addons/compile-addons.sh b/addons/compile-addons.sh new file mode 100755 index 00000000..aff1a862 --- /dev/null +++ b/addons/compile-addons.sh @@ -0,0 +1,186 @@ +#!/usr/bin/env bash + +set -e + +TMP_PATH="/tmp" +DEST_PATH="../files/board/arpl/p3/addons" + +############################################################################### +function trap_cancel() { + echo "Press Control+C once more terminate the process (or wait 2s for it to restart)" + sleep 2 || exit 1 +} +trap trap_cancel SIGINT SIGTERM + +############################################################################### +function die() { + echo -e "\033[1;31m$@\033[0m" + exit 1 +} + +############################################################################### +# +# 1 - Path of key +function hasConfigKey() { + [ "`yq eval '.'${1}' | has("'${2}'")' "${3}"`" == "true" ] && return 0 || return 1 +} + +############################################################################### +# Read key value from yaml config file +# 1 - Path of key +# 2 - Path of yaml config file +# Return Value +function readConfigKey() { + RESULT=`yq eval '.'${1}' | explode(.)' "${2}"` + [ "${RESULT}" == "null" ] && echo "" || echo ${RESULT} +} + +############################################################################### +# Read Entries as map(key=value) from yaml config file +# 1 - Path of key +# 2 - Path of yaml config file +# Returns map of values +function readConfigMap() { + yq eval '.'${1}' | explode(.) | to_entries | map([.key, .value] | join("=")) | .[]' "${2}" +} + +############################################################################### +# Read an array from yaml config file +# 1 - Path of key +# 2 - Path of yaml config file +# Returns array/map of values +function readConfigArray() { + yq eval '.'${1} "${2}" +} + +############################################################################### +# Read Entries as array from yaml config file +# 1 - Path of key +# 2 - Path of yaml config file +# Returns array of values +function readConfigEntriesArray() { + yq eval '.'${1}' | explode(.) | to_entries | map([.key])[] | .[]' "${2}" +} + + +############################################################################### +function compile-addon() { + MANIFEST="${1}/manifest.yml" + [ ! -f "${MANIFEST}" ] && die "${MANIFEST} not found" + echo -e "\033[7mProcessing manifest ${MANIFEST}\033[0m" + OUT_PATH="${TMP_PATH}/${1}" + rm -rf "${OUT_PATH}" + mkdir -p "${OUT_PATH}" + VER=`readConfigKey "version" "${MANIFEST}"` + [ ${VER} -ne 1 ] && die "Error, version ${VER} of manifest not suported" + cp "${MANIFEST}" "${OUT_PATH}" + # Check if exist files for all platforms + if hasConfigKey "" "all" "${MANIFEST}"; then + echo -e "\033[1;32m Processing 'all' section\033[0m" + mkdir -p "${OUT_PATH}/all/root" + HAS_FILES=0 + # Get name of script to install, if defined. This script has low priority + INSTALL_SCRIPT="`readConfigKey "all.install-script" "${MANIFEST}"`" + if [ -n "${INSTALL_SCRIPT}" ]; then + if [ -f "${1}/${INSTALL_SCRIPT}" ]; then + echo -e "\033[1;35m Copying install script ${INSTALL_SCRIPT}\033[0m" + cp "${1}/${INSTALL_SCRIPT}" "${OUT_PATH}/all/install.sh" + HAS_FILES=1 + else + echo -e "\033[1;33m WARNING: install script '${INSTALL_SCRIPT}' not found\033[0m" + fi + fi + # Get folder name for copy + COPY_PATH="`readConfigKey "all.copy" "${MANIFEST}"`" + # If folder exists, copy + if [ -n "${COPY_PATH}" ]; then + if [ -d "${1}/${COPY_PATH}" ]; then + echo -e "\033[1;35m Copying folder '${COPY_PATH}'\033[0m" + cp -R "${1}/${COPY_PATH}/"* "${OUT_PATH}/all/root" + HAS_FILES=1 + else + echo -e "\033[1;33m WARNING: folder '${COPY_PATH}' not found\033[0m" + fi + fi + if [ ${HAS_FILES} -eq 1 ]; then + # Create tar gziped + tar caf "${OUT_PATH}/all.tgz" -C "${OUT_PATH}/all" . + echo -e "\033[1;36m Created file '${OUT_PATH}/all.tgz' \033[0m" + fi + # Clean + rm -rf "${OUT_PATH}/all" + fi + unset AVAL_FOR + declare -a AVAL_FOR + for P in `readConfigEntriesArray "available-for" "${MANIFEST}"`; do + AVAL_FOR+=(${P}) + done + [ ${#AVAL_FOR} -eq 0 ] && return + + # Loop in each available platform-kver + for P in ${AVAL_FOR[@]}; do + echo -e "\033[1;32m Processing '${P}' platform-kver section\033[0m" + mkdir -p "${OUT_PATH}/${P}/root" + HAS_FILES=0 + # Get name of script to install, if defined. This script has high priority + INSTALL_SCRIPT="`readConfigKey 'available-for."'${P}'".install-script' "${MANIFEST}"`" + if [ -n "${INSTALL_SCRIPT}" ]; then + if [ -f "${1}/${INSTALL_SCRIPT}" ]; then + echo -e "\033[1;35m Copying install script ${INSTALL_SCRIPT}\033[0m" + cp "${1}/${INSTALL_SCRIPT}" "${OUT_PATH}/${P}/install.sh" + HAS_FILES=1 + else + echo -e "\033[1;33m WARNING: install script '${INSTALL_SCRIPT}' not found\033[0m" + fi + fi + # Get folder name for copy + COPY_PATH="`readConfigKey 'available-for."'${P}'".copy' "${MANIFEST}"`" + # If folder exists, copy + if [ -n "${COPY_PATH}" ]; then + if [ -d "${1}/${COPY_PATH}" ]; then + echo -e "\033[1;35m Copying folder '${COPY_PATH}'\033[0m" + cp -R "${1}/${COPY_PATH}/"* "${OUT_PATH}/${P}/root" + HAS_FILES=1 + else + echo -e "\033[1;33m WARNING: folder '${COPY_PATH}' not found\033[0m" + fi + fi + HAS_MODULES="`readConfigKey 'available-for."'${P}'".modules' "${MANIFEST}"`" + # Check if has modules for compile + if [ "${HAS_MODULES}" = "true" ]; then + echo "Compiling modules" + PLATFORM="`echo ${P} | cut -d'-' -f1`" + KVER="`echo ${P} | cut -d'-' -f2`" + # Compile using docker + docker run --rm -t --user `id -u` -v "${TMP_PATH}":/output \ + -v "${PWD}/${1}/src/${KVER}":/input syno-compiler compile-module ${PLATFORM} + mkdir -p "${OUT_PATH}/${P}/root/modules" + mv "${TMP_PATH}/"*.ko "${OUT_PATH}/${P}/root/modules/" + HAS_FILES=1 + fi + if [ ${HAS_FILES} -eq 1 ]; then + # Create tar gziped + tar caf "${OUT_PATH}/${P}.tgz" -C "${OUT_PATH}/${P}" . + echo -e "\033[1;36m Created file '${P}.tgz' \033[0m" + fi + # Clean + rm -rf "${OUT_PATH}/${P}" + done + # Update files for image + rm -rf "${DEST_PATH}/${1}" + mkdir -p "${DEST_PATH}/${1}" + cp "${OUT_PATH}/"* "${DEST_PATH}/${1}/" +} + +# Main +if [ $# -ge 1 ]; then + for A in $@; do + compile-addon ${A} + done +else + while read D; do + DRIVER=`basename ${D}` + [ "${DRIVER}" = "." ] && continue + compile-addon ${DRIVER} + done < <(find -maxdepth 1 -type d) +fi diff --git a/addons/e1000/install.sh b/addons/e1000/install.sh new file mode 100644 index 00000000..83efefa8 --- /dev/null +++ b/addons/e1000/install.sh @@ -0,0 +1,4 @@ +if [ "${1}" = "rd" ]; then + echo "Installing module for Intel(R) PRO/1000 Gigabit Ethernet adapter" + ${INSMOD} "/modules/e1000.ko" ${PARAMS} +fi diff --git a/addons/e1000/manifest.yml b/addons/e1000/manifest.yml new file mode 100644 index 00000000..017e1f50 --- /dev/null +++ b/addons/e1000/manifest.yml @@ -0,0 +1,29 @@ +version: 1 +name: e1000 +description: "Driver for Intel(R) PRO/1000 Gigabit Ethernet adapters" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true + diff --git a/addons/e1000/src/3.10.108/Makefile b/addons/e1000/src/3.10.108/Makefile new file mode 100644 index 00000000..59afd9fd --- /dev/null +++ b/addons/e1000/src/3.10.108/Makefile @@ -0,0 +1,2 @@ +obj-m += e1000.o +e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o diff --git a/addons/e1000/src/3.10.108/e1000.h b/addons/e1000/src/3.10.108/e1000.h new file mode 100644 index 00000000..26d9cd59 --- /dev/null +++ b/addons/e1000/src/3.10.108/e1000.h @@ -0,0 +1,365 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_5 5 + +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#include "e1000_hw.h" + +#define E1000_MAX_INTR 10 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 48 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 48 +#define E1000_MAX_82544_RXD 4096 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ + +#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define E1000_MNG_VLAN_NONE (-1) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_buffer { + struct sk_buff *skb; + dma_addr_t dma; + struct page *page; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + unsigned int segs; + unsigned int bytecount; + u16 mapped_as_page; +}; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + + u16 tdh; + u16 tdt; + bool last_tx_tso; +}; + +struct e1000_rx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + struct sk_buff *rx_skb_top; + + /* cpu for rx queue */ + int cpu; + + u16 rdh; + u16 rdt; +}; + +#define E1000_DESC_UNUSED(R) \ + ((((R)->next_to_clean > (R)->next_to_use) \ + ? 0 : (R)->count) + (R)->next_to_clean - (R)->next_to_use - 1) + +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +/* board specific private data structure */ + +struct e1000_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 mng_vlan_id; + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + spinlock_t stats_lock; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + u8 fc_autoneg; + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ + unsigned int restart_queue; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotcl; + u64 gotcl_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + bool dump_buffers; + + /* RX */ + bool (*clean_rx)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); + void (*alloc_rx_buf)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ + struct napi_struct napi; + + int num_tx_queues; + int num_rx_queues; + + u64 hw_csum_err; + u64 hw_csum_good; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + u32 gorcl; + u64 gorcl_old; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; + + int msg_enable; + + /* to not mess up cache alignment, always add to the bottom */ + bool tso_force; + bool smart_power_down; /* phy smart power down */ + bool quad_port_a; + unsigned long flags; + u32 eeprom_wol; + + /* for ioport free */ + int bars; + int need_ioport; + + bool discarding; + + struct work_struct reset_task; + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; + + struct mutex mutex; +}; + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN +}; + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +extern struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); +#define e_dbg(format, arg...) \ + netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_notice(msglvl, format, arg...) \ + netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) + +extern char e1000_driver_name[]; +extern const char e1000_driver_version[]; + +extern int e1000_up(struct e1000_adapter *adapter); +extern void e1000_down(struct e1000_adapter *adapter); +extern void e1000_reinit_locked(struct e1000_adapter *adapter); +extern void e1000_reset(struct e1000_adapter *adapter); +extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); +extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +extern void e1000_update_stats(struct e1000_adapter *adapter); +extern bool e1000_has_link(struct e1000_adapter *adapter); +extern void e1000_power_up_phy(struct e1000_adapter *); +extern void e1000_set_ethtool_ops(struct net_device *netdev); +extern void e1000_check_options(struct e1000_adapter *adapter); +extern char *e1000_get_hw_dev_name(struct e1000_hw *hw); + +#endif /* _E1000_H_ */ diff --git a/addons/e1000/src/3.10.108/e1000_ethtool.c b/addons/e1000/src/3.10.108/e1000_ethtool.c new file mode 100644 index 00000000..82a967c9 --- /dev/null +++ b/addons/e1000/src/3.10.108/e1000_ethtool.c @@ -0,0 +1,1909 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for e1000 */ + +#include "e1000.h" +#include + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) E1000_STATS, \ + sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct net_device *)0)->m), \ + offsetof(struct net_device, m) + +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(stats.rxerrc) }, + { "tx_errors", E1000_STAT(stats.txerrc) }, + { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(stats.rlerrc) }, + { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, +}; + +#define E1000_QUEUE_STATS_LEN 0 +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->media_type == e1000_media_type_copper) { + + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + + if (hw->autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy_addr; + + if (hw->mac_type == e1000_82543) + ecmd->transceiver = XCVR_EXTERNAL; + else + ecmd->transceiver = XCVR_INTERNAL; + + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + + if (hw->mac_type >= e1000_82545) + ecmd->transceiver = XCVR_INTERNAL; + else + ecmd->transceiver = XCVR_EXTERNAL; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + + e1000_get_speed_and_duplex(hw, &adapter->link_speed, + &adapter->link_duplex); + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF + */ + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, -1); + ecmd->duplex = -1; + } + + ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 1; MDI => 0 */ + if ((hw->media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + ETH_TP_MDI_X : ETH_TP_MDI); + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->mdix; + return 0; +} + +static int e1000_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + ecmd->advertising = hw->autoneg_advertised; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { + clear_bit(__E1000_RESETTING, &adapter->flags); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->mdix = AUTO_ALL_MODES; + else + hw->mdix = ecmd->eth_tp_mdix_ctrl; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +} + +static u32 e1000_get_link(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + adapter->hw.get_link_status = 1; + + return e1000_has_link(adapter); +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc == E1000_FC_RX_PAUSE) + pause->rx_pause = 1; + else if (hw->fc == E1000_FC_TX_PAUSE) + pause->tx_pause = 1; + else if (hw->fc == E1000_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_NONE; + + hw->original_fc = hw->fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else + e1000_reset(adapter); + } else + retval = ((hw->media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy_type == e1000_phy_igp) { + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_A); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_B); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[14] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_C); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[15] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_D); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[16] = (u32)phy_data; /* cable length */ + regs_buff[17] = 0; /* extended 10bt distance (not needed) */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[18] = (u32)phy_data; /* cable polarity */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_PCS_INIT_REG); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[19] = (u32)phy_data; /* cable polarity */ + regs_buff[20] = 0; /* polarity correction enabled (always) */ + regs_buff[22] = 0; /* phy receive errors (unavailable) */ + regs_buff[23] = regs_buff[18]; /* mdix mode */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + } else { + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if (hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + regs_buff[26] = er32(MANC); + } +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + return hw->eeprom.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->eeprom.type == e1000_eeprom_spi) + ret_val = e1000_read_eeprom(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_eeprom(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = e1000_write_eeprom(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed */ + if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) + e1000_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, e1000_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, e1000_driver_version, + sizeof(drvinfo->version)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->regdump_len = e1000_get_regs_len(netdev); + drvinfo->eedump_len = e1000_get_eeprom_len(netdev); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_rx_ring *rxdr = adapter->rx_ring; + + ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : + E1000_MAX_82544_TXD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr, *tx_old; + struct e1000_rx_ring *rxdr, *rx_old; + int i, err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), + GFP_KERNEL); + if (!txdr) + goto err_alloc_tx; + + rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), + GFP_KERNEL); + if (!rxdr) + goto err_alloc_rx; + + adapter->tx_ring = txdr; + adapter->rx_ring = rxdr; + + rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ? + E1000_MAX_RXD : E1000_MAX_82544_RXD)); + rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); + + txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD); + txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD)); + txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + for (i = 0; i < adapter->num_tx_queues; i++) + txdr[i].count = txdr->count; + for (i = 0; i < adapter->num_rx_queues; i++) + rxdr[i].count = rxdr->count; + + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again + */ + + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + kfree(tx_old); + kfree(rx_old); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; + err = e1000_up(adapter); + if (err) + goto err_setup; + } + + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +err_setup_tx: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rxdr); +err_alloc_rx: + kfree(txdr); +err_alloc_tx: + e1000_up(adapter); +err_setup: + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + static const u32 test[] = + {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + int i; + + for (i = 0; i < ARRAY_SIZE(test); i++) { + writel(write & test[i], address); + read = readl(address); + if (read != (write & test[i] & mask)) { + e_err(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, read, (write & test[i] & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + + writel(write & mask, address); + read = readl(address); + if ((read & mask) != (write & mask)) { + e_err(drv, "set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (read & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + u32 value, before, after; + u32 i, toggle; + struct e1000_hw *hw = &adapter->hw; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + + /* there are several bits on newer hardware that are r/w */ + toggle = 0xFFFFF833; + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); + + REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); + + before = 0x06DFB3FE; + REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); + + if (hw->mac_type >= e1000_82543) { + REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); + value = E1000_RAR_ENTRIES; + for (i = 0; i < value; i++) { + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), 0x8003FFFF, + 0xFFFFFFFF); + } + } else { + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); + } + + value = E1000_MC_TBL_SIZE; + for (i = 0; i < value; i++) + REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)EEPROM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0; + bool shared_int = true; + u32 irq = adapter->pdev->irq; + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet + * Hook up test interrupt handler just for this test + */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) + shared_int = false; + else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", (shared_int ? + "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (txdr->desc && txdr->buffer_info) { + for (i = 0; i < txdr->count; i++) { + if (txdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + txdr->buffer_info[i].dma, + txdr->buffer_info[i].length, + DMA_TO_DEVICE); + if (txdr->buffer_info[i].skb) + dev_kfree_skb(txdr->buffer_info[i].skb); + } + } + + if (rxdr->desc && rxdr->buffer_info) { + for (i = 0; i < rxdr->count; i++) { + if (rxdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rxdr->buffer_info[i].dma, + rxdr->buffer_info[i].length, + DMA_FROM_DEVICE); + if (rxdr->buffer_info[i].skb) + dev_kfree_skb(rxdr->buffer_info[i].skb); + } + } + + if (txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + txdr->desc = NULL; + } + if (rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + rxdr->desc = NULL; + } + + kfree(txdr->buffer_info); + txdr->buffer_info = NULL; + kfree(rxdr->buffer_info); + rxdr->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!txdr->count) + txdr->count = E1000_DEFAULT_TXD; + + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_buffer), + GFP_KERNEL); + if (!txdr->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL | __GFP_ZERO); + if (!txdr->desc) { + ret_val = 2; + goto err_nomem; + } + txdr->next_to_use = txdr->next_to_clean = 0; + + ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64)txdr->dma >> 32)); + ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < txdr->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); + struct sk_buff *skb; + unsigned int size = 1024; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + txdr->buffer_info[i].skb = skb; + txdr->buffer_info[i].length = skb->len; + txdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rxdr->count) + rxdr->count = E1000_DEFAULT_RXD; + + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer), + GFP_KERNEL); + if (!rxdr->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL | __GFP_ZERO); + if (!rxdr->desc) { + ret_val = 6; + goto err_nomem; + } + rxdr->next_to_use = rxdr->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64)rxdr->dma >> 32)); + ew32(RDLEN, rxdr->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rxdr->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); + struct sk_buff *skb; + + skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL); + if (!skb) { + ret_val = 7; + goto err_nomem; + } + skb_reserve(skb, NET_IP_ALIGN); + rxdr->buffer_info[i].skb = skb; + rxdr->buffer_info[i].length = E1000_RXBUFFER_2048; + rxdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); + memset(skb->data, 0x00, skb->len); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_write_phy_reg(hw, 29, 0x001F); + e1000_write_phy_reg(hw, 30, 0x8FFC); + e1000_write_phy_reg(hw, 29, 0x001A); + e1000_write_phy_reg(hw, 30, 0x8FF0); +} + +static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg; + + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This + * value defaults back to a 2.5MHz clock when the PHY is reset. + */ + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_EPSCR_TX_CLK_25; + e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + + /* In addition, because of the s/w reset above, we need to enable + * CRS on TX. This must be set for both full and half duplex + * operation. + */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, phy_reg); +} + +static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg; + u16 phy_reg; + + /* Setup the Device Control Register for PHY loopback test. */ + + ctrl_reg = er32(CTRL); + ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ + E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + + /* Read the PHY Specific Control Register (0x10) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + + /* Clear Auto-Crossover bits in PHY Specific Control Register + * (bits 6:5). + */ + phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); + + /* Perform software reset on the PHY */ + e1000_phy_reset(hw); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); + + /* Wait for reset to complete. */ + udelay(500); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_phy_disable_receiver(adapter); + + /* Set the loopback bit in the PHY control register. */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + + /* Setup TX_CLK and TX_CRS one more time. */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Check Phy Configuration */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg != 0x4100) + return 9; + + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + if (phy_reg != 0x0070) + return 10; + + e1000_read_phy_reg(hw, 29, &phy_reg); + if (phy_reg != 0x001A) + return 11; + + return 0; +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + hw->autoneg = false; + + if (hw->phy_type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); + } + + ctrl_reg = er32(CTRL); + + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (hw->media_type == e1000_media_type_copper && + hw->phy_type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + else { + /* Set the ILOS bit on the fiber Nic is half + * duplex link is detected. + */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy_type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg = 0; + u16 count = 0; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->media_type == e1000_media_type_copper) { + /* Attempt to setup Loopback mode on Non-integrated PHY. + * Some PHY registers get corrupted at random, so + * attempt this 10 times. + */ + while (e1000_nonintegrated_phy_loopback(adapter) && + count++ < 10); + if (count < 11) + return 0; + } + break; + + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + return e1000_integrated_phy_loopback(adapter); + break; + default: + /* Default PHY loopback work is to read the MII + * control register and assert bit 14 (loopback mode). + */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + return 0; + break; + } + + return 8; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + return e1000_set_phy_loopback(adapter); + break; + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) + return e1000_set_phy_loopback(adapter); + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + default: + hw->autoneg = true; + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + e1000_phy_reset(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int e1000_check_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(skb->data + 3) == 0xFF) { + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val=0; + unsigned long time; + + ew32(RDT, rxdr->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rxdr->count <= txdr->count) + lc = ((txdr->count / 64) * 2) + 1; + else + lc = ((rxdr->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + txdr->buffer_info[k].dma, + txdr->buffer_info[k].length, + DMA_TO_DEVICE); + if (unlikely(++k == txdr->count)) k = 0; + } + ew32(TDT, k); + E1000_WRITE_FLUSH(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rxdr->buffer_info[l].dma, + rxdr->buffer_info[l].length, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rxdr->buffer_info[l].skb, + 1024); + if (!ret_val) + good_cnt++; + if (unlikely(++l == rxdr->count)) l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && jiffies < (time + 20)); + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (jiffies >= (time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + hw->serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + e1000_check_for_link(hw); + if (hw->serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + e1000_check_for_link(hw); + if (hw->autoneg) /* if auto_neg is set wait for it */ + msleep(4000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) { + *data = 1; + } + } + return *data; +} + +static int e1000_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + u16 autoneg_advertised = hw->autoneg_advertised; + u8 forced_speed_duplex = hw->forced_speed_duplex; + u8 autoneg = hw->autoneg; + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + e1000_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + /* make sure the phy is powered up */ + e1000_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + hw->autoneg_advertised = autoneg_advertised; + hw->forced_speed_duplex = forced_speed_duplex; + hw->autoneg = autoneg; + + e1000_reset(adapter); + clear_bit(__E1000_TESTING, &adapter->flags); + if (if_running) + dev_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->flags); + } + msleep_interruptible(4 * 1000); +} + +static int e1000_wol_exclusion(struct e1000_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_PCIE: + /* these don't support WoL at all */ + wol->supported = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events not supported on port B */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* quad port adapters only support WoL on port A */ + if (!adapter->quad_port_a) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1 && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware + */ + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* apply any specific unsupported masks here */ + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* KSP3 does not support UCAST wake-ups */ + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + break; + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + if (wol->wolopts & WAKE_UCAST) { + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_setup_led(hw); + return 2; + + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + + case ETHTOOL_ID_INACTIVE: + e1000_cleanup_led(hw); + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac_type < e1000_82545) + return -EOPNOTSUPP; + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac_type < e1000_82545) + return -EOPNOTSUPP; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + char *p = NULL; + + e1000_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + switch (e1000_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *) netdev + + e1000_gstrings_stats[i].stat_offset; + break; + case E1000_STATS: + p = (char *) adapter + + e1000_gstrings_stats[i].stat_offset; + break; + } + + data[i] = (e1000_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +/* BUG_ON(i != E1000_STATS_LEN); */ +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, *e1000_gstrings_test, + sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .get_settings = e1000_get_settings, + .set_settings = e1000_set_settings, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = e1000_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, +}; + +void e1000_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); +} diff --git a/addons/e1000/src/3.10.108/e1000_hw.c b/addons/e1000/src/3.10.108/e1000_hw.c new file mode 100644 index 00000000..2879b963 --- /dev/null +++ b/addons/e1000/src/3.10.108/e1000_hw.c @@ -0,0 +1,5893 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + + */ + +/* e1000_hw.c + * Shared functions for accessing and configuring the MAC + */ + +#include "e1000.h" + +static s32 e1000_check_downshift(struct e1000_hw *hw); +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity); +static void e1000_clear_hw_cntrs(struct e1000_hw *hw); +static void e1000_clear_vfta(struct e1000_hw *hw); +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, + bool link_up); +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); +static s32 e1000_detect_gig_phy(struct e1000_hw *hw); +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length); +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_id_led_init(struct e1000_hw *hw); +static void e1000_init_rx_addrs(struct e1000_hw *hw); +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); +static s32 e1000_set_phy_type(struct e1000_hw *hw); +static void e1000_phy_init_script(struct e1000_hw *hw); +static s32 e1000_setup_copper_link(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count); +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data); +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data); +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); +static s32 e1000_acquire_eeprom(struct e1000_hw *hw); +static void e1000_release_eeprom(struct e1000_hw *hw); +static void e1000_standby_eeprom(struct e1000_hw *hw); +static s32 e1000_set_vco_speed(struct e1000_hw *hw); +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); +static s32 e1000_set_phy_mode(struct e1000_hw *hw); +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); + +/* IGP cable length table */ +static const +u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120 +}; + +static DEFINE_SPINLOCK(e1000_eeprom_lock); +static DEFINE_SPINLOCK(e1000_phy_lock); + +/** + * e1000_set_phy_type - Set the phy type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_set_phy_type(struct e1000_hw *hw) +{ + e_dbg("e1000_set_phy_type"); + + if (hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + + switch (hw->phy_id) { + case M88E1000_E_PHY_ID: + case M88E1000_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: + hw->phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_type = e1000_phy_igp; + break; + case RTL8211B_PHY_ID: + hw->phy_type = e1000_phy_8211; + break; + case RTL8201N_PHY_ID: + hw->phy_type = e1000_phy_8201; + break; + default: + /* Should never have loaded on this device */ + hw->phy_type = e1000_phy_undefined; + return -E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_phy_init_script(struct e1000_hw *hw) +{ + u32 ret_val; + u16 phy_saved_data; + + e_dbg("e1000_phy_init_script"); + + if (hw->phy_init_script) { + msleep(20); + + /* Save off the current value of register 0x2F5B to be restored + * at the end of this routine. + */ + ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + msleep(20); + + e1000_write_phy_reg(hw, 0x0000, 0x0140); + msleep(5); + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + msleep(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac_type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = + fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= + IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = + (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & + IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + } +} + +/** + * e1000_set_mac_type - Set the mac type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + e_dbg("e1000_set_mac_type"); + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + switch (hw->revision_id) { + case E1000_82542_2_0_REV_ID: + hw->mac_type = e1000_82542_rev2_0; + break; + case E1000_82542_2_1_REV_ID: + hw->mac_type = e1000_82542_rev2_1; + break; + default: + /* Invalid 82542 revision ID */ + return -E1000_ERR_MAC_TYPE; + } + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + hw->mac_type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + hw->mac_type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + hw->mac_type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + hw->mac_type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + hw->mac_type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + hw->mac_type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + hw->mac_type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + hw->mac_type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + hw->mac_type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + hw->mac_type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + hw->mac_type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_INTEL_CE4100_GBE: + hw->mac_type = e1000_ce4100; + break; + default: + /* Should never have loaded on this device */ + return -E1000_ERR_MAC_TYPE; + } + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->asf_firmware_present = true; + break; + default: + break; + } + + /* The 82543 chip does not count tx_carrier_errors properly in + * FD mode + */ + if (hw->mac_type == e1000_82543) + hw->bad_tx_carr_stats_fd = true; + + if (hw->mac_type > e1000_82544) + hw->has_smbus = true; + + return E1000_SUCCESS; +} + +/** + * e1000_set_media_type - Set media type and TBI compatibility. + * @hw: Struct containing variables accessed by shared code + */ +void e1000_set_media_type(struct e1000_hw *hw) +{ + u32 status; + + e_dbg("e1000_set_media_type"); + + if (hw->mac_type != e1000_82543) { + /* tbi_compatibility is only valid on 82543 */ + hw->tbi_compatibility_en = false; + } + + switch (hw->device_id) { + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_ce4100: + hw->media_type = e1000_media_type_copper; + break; + default: + status = er32(STATUS); + if (status & E1000_STATUS_TBIMODE) { + hw->media_type = e1000_media_type_fiber; + /* tbi_compatibility not valid on fiber */ + hw->tbi_compatibility_en = false; + } else { + hw->media_type = e1000_media_type_copper; + } + break; + } + } +} + +/** + * e1000_reset_hw - reset the hardware completely + * @hw: Struct containing variables accessed by shared code + * + * Reset the transmit and receive units; mask and clear all interrupts. + */ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 ctrl_ext; + u32 icr; + u32 manc; + u32 led_ctrl; + s32 ret_val; + + e_dbg("e1000_reset_hw"); + + /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(); + + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ + hw->tbi_compatibility_on = false; + + /* Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msleep(10); + + ctrl = er32(CTRL); + + /* Must reset the PHY before resetting the MAC */ + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + e_dbg("Issuing a global reset to MAC\n"); + + switch (hw->mac_type) { + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82546: + case e1000_82541: + case e1000_82541_rev_2: + /* These controllers can't ack the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround to issue the reset + */ + E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + case e1000_82545_rev_3: + case e1000_82546_rev_3: + /* Reset is performed on a shadow of the control register */ + ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); + break; + case e1000_ce4100: + default: + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + break; + } + + /* After MAC reset, force reload of EEPROM to restore power-on settings + * to device. Later controllers reload the EEPROM automatically, so + * just wait for reload to complete. + */ + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* Wait for reset to complete */ + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + /* Wait for EEPROM reload */ + msleep(2); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + /* Wait for EEPROM reload */ + msleep(20); + break; + default: + /* Auto read done will delay 5ms or poll based on mac type */ + ret_val = e1000_get_auto_rd_done(hw); + if (ret_val) + return ret_val; + break; + } + + /* Disable HW ARPs on ASF enabled adapters */ + if (hw->mac_type >= e1000_82540) { + manc = er32(MANC); + manc &= ~(E1000_MANC_ARP_EN); + ew32(MANC, manc); + } + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + e1000_phy_init_script(hw); + + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + icr = er32(ICR); + + /* If MWI was previously enabled, reenable it. */ + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw - Performs basic configuration of the adapter. + * @hw: Struct containing variables accessed by shared code + * + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes the receive address registers, + * multicast table, and VLAN filter table. Calls routines to setup link + * configuration and flow control settings. Clears all on-chip counters. Leaves + * the transmit and receive units disabled and uninitialized. + */ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + s32 ret_val; + u32 mta_size; + u32 ctrl_ext; + + e_dbg("e1000_init_hw"); + + /* Initialize Identification LED */ + ret_val = e1000_id_led_init(hw); + if (ret_val) { + e_dbg("Error Initializing Identification LED\n"); + return ret_val; + } + + /* Set the media type and TBI compatibility */ + e1000_set_media_type(hw); + + /* Disabling VLAN filtering. */ + e_dbg("Initializing the IEEE VLAN\n"); + if (hw->mac_type < e1000_82545_rev_3) + ew32(VET, 0); + e1000_clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + ew32(RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Setup the receive address. This involves initializing all of the + * Receive Address Registers (RARs 0 - 15). + */ + e1000_init_rx_addrs(hw); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->mac_type == e1000_82542_rev2_0) { + ew32(RCTL, 0); + E1000_WRITE_FLUSH(); + msleep(1); + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + mta_size = E1000_MC_TBL_SIZE; + for (i = 0; i < mta_size; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + /* use write flush to prevent Memory Write Block (MWB) from + * occurring when accessing our register space + */ + E1000_WRITE_FLUSH(); + } + + /* Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. + */ + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PRIOR); + } + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + /* Workaround for PCI-X problem when BIOS sets MMRBC + * incorrectly. + */ + if (hw->bus_type == e1000_bus_type_pcix + && e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; + } + + /* Call a subroutine to configure the link and setup flow control. */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + if (hw->mac_type > e1000_82544) { + ctrl = er32(TXDCTL); + ctrl = + (ctrl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + ew32(TXDCTL, ctrl); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs(hw); + + if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || + hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { + ctrl_ext = er32(CTRL_EXT); + /* Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting. + * @hw: Struct containing variables accessed by shared code. + */ +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) +{ + u16 eeprom_data; + s32 ret_val; + + e_dbg("e1000_adjust_serdes_amplitude"); + + if (hw->media_type != e1000_media_type_internal_serdes) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, + &eeprom_data); + if (ret_val) { + return ret_val; + } + + if (eeprom_data != EEPROM_RESERVED_WORD) { + /* Adjust SERDES output amplitude only. */ + eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link - Configures flow control and link settings. + * @hw: Struct containing variables accessed by shared code + * + * Determines which flow control settings to use. Calls the appropriate media- + * specific link configuration function. Configures the flow control settings. + * Assuming the adapter has a valid link partner, a valid link should be + * established. Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 eeprom_data; + + e_dbg("e1000_setup_link"); + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->fc == E1000_FC_DEFAULT) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) + hw->fc = E1000_FC_NONE; + else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == + EEPROM_WORD0F_ASM_DIR) + hw->fc = E1000_FC_TX_PAUSE; + else + hw->fc = E1000_FC_FULL; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + if (hw->mac_type == e1000_82542_rev2_0) + hw->fc &= (~E1000_FC_TX_PAUSE); + + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + hw->fc &= (~E1000_FC_RX_PAUSE); + + hw->original_fc = hw->fc; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc); + + /* Take the 4 bits from EEPROM word 0x0F that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before e1000_setup_pcs_link() + * or e1000_phy_setup() is called. + */ + if (hw->mac_type == e1000_82543) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << + SWDPIO__EXT_SHIFT); + ew32(CTRL_EXT, ctrl_ext); + } + + /* Call the necessary subroutine to configure the link. */ + ret_val = (hw->media_type == e1000_media_type_copper) ? + e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw); + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc_pause_time); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc & E1000_FC_TX_PAUSE)) { + ew32(FCRTL, 0); + ew32(FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc_send_xon) { + ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); + ew32(FCRTH, hw->fc_high_water); + } else { + ew32(FCRTL, hw->fc_low_water); + ew32(FCRTH, hw->fc_high_water); + } + } + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link - prepare fiber or serdes link + * @hw: Struct containing variables accessed by shared code + * + * Manipulates Physical Coding Sublayer functions in order to configure + * link. Assumes the hardware has been previously reset and the transmitter + * and receiver are not enabled. + */ +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + u32 status; + u32 txcw = 0; + u32 i; + u32 signal = 0; + s32 ret_val; + + e_dbg("e1000_setup_fiber_serdes_link"); + + /* On adapters with a MAC newer than 82544, SWDP 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + * If we're on serdes media, adjust the output amplitude to value + * set in the EEPROM. + */ + ctrl = er32(CTRL); + if (hw->media_type == e1000_media_type_fiber) + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + + ret_val = e1000_adjust_serdes_amplitude(hw); + if (ret_val) + return ret_val; + + /* Take the link out of reset */ + ctrl &= ~(E1000_CTRL_LRST); + + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed(hw); + if (ret_val) + return ret_val; + + e1000_config_collision_dist(hw); + + /* Check for a software override of the flow control settings, and setup + * the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Tranmsit Config Word Register (TXCW) and re-start + * auto-negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (hw->fc) { + case E1000_FC_NONE: + /* Flow ctrl is completely disabled by a software over-ride */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case E1000_FC_RX_PAUSE: + /* Rx Flow control is enabled and Tx Flow control is disabled by + * a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case E1000_FC_TX_PAUSE: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case E1000_FC_FULL: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(TXCW, txcw); + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + hw->txcw = txcw; + msleep(1); + + /* If we have a signal (the cable is plugged in) then poll for a + * "Link-Up" indication in the Device Status Register. Time-out if a + * link isn't seen in 500 milliseconds seconds (Auto-negotiation should + * complete in less than 500 milliseconds even if the other end is doing + * it in SW). For internal serdes, we just assume a signal is present, + * then poll. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { + e_dbg("Looking for Link\n"); + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == (LINK_UP_TIMEOUT / 10)) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + hw->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * e1000_check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + hw->autoneg_failed = 0; + } else { + hw->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + } else { + e_dbg("No Signal Detected\n"); + } + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. + * @hw: Struct containing variables accessed by shared code + * + * Commits changes to PHY configuration by calling e1000_phy_reset(). + */ +static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) +{ + s32 ret_val; + + /* SW reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ctrl_aux; + + switch (hw->phy_type) { + case e1000_phy_8211: + ret_val = e1000_copper_link_rtl_setup(hw); + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + case e1000_phy_8201: + /* Set RMII mode */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= E1000_CTL_AUX_RMII; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + + /* Disable the J/K bits required for receive */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= 0x4; + ctrl_aux &= ~0x2; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + ret_val = e1000_copper_link_rtl_setup(hw); + + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + default: + e_dbg("Error Resetting the PHY\n"); + return E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_preconfig - early configuration for copper + * @hw: Struct containing variables accessed by shared code + * + * Make sure we have a valid PHY and change PHY mode before link setup. + */ +static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_copper_link_preconfig"); + + ctrl = er32(CTRL); + /* With 82543, we need to force speed and duplex on the MAC equal to + * what the PHY speed and duplex configuration is. In addition, we need + * to perform a hardware reset on the PHY to take it out of reset. + */ + if (hw->mac_type > e1000_82543) { + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + } else { + ctrl |= + (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); + ew32(CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + } + + /* Make sure we have a valid PHY */ + ret_val = e1000_detect_gig_phy(hw); + if (ret_val) { + e_dbg("Error, did not detect valid phy.\n"); + return ret_val; + } + e_dbg("Phy ID = %x\n", hw->phy_id); + + /* Set PHY to class A mode (if necessary) */ + ret_val = e1000_set_phy_mode(hw); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + phy_data |= 0x00000008; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + } + + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 + || hw->mac_type == e1000_82547_rev_2) + hw->phy_reset_disable = false; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + u32 led_ctrl; + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_copper_link_igp_setup"); + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from eeprom settings */ + msleep(15); + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + + /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ + if (hw->phy_type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= + ~(IGP01E1000_PSCR_AUTO_MDIX | + IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; + + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; + + if (hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; + + if (hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; + + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + /* Set auto Master/Slave resolution process */ + ret_val = + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_copper_link_mgp_setup"); + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((hw->phy_revision == E1000_REVISION_2) && + (hw->phy_id == M88E1111_I_PHY_ID)) { + /* Vidalia Phy, set the downshift counter to 5x */ + phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_autoneg - setup auto-neg + * @hw: Struct containing variables accessed by shared code + * + * Setup auto-negotiation and flow control advertisements, + * and then perform auto-negotiation. + */ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_copper_link_autoneg"); + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* IFE/RTL8201N PHY only supports 10/100 */ + if (hw->phy_type == e1000_phy_8201) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg + ("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = true; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_postconfig - post link setup + * @hw: Struct containing variables accessed by shared code + * + * Config the MAC and the PHY after link is up. + * 1) Set up the MAC to the current PHY speed/duplex + * if we are on 82543. If we + * are on newer silicon, we only need to configure + * collision distance in the Transmit Control Register. + * 2) Set up flow control on the MAC to that established with + * the link partner. + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. + */ +static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + s32 ret_val; + e_dbg("e1000_copper_link_postconfig"); + + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, true); + if (ret_val) { + e_dbg("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link - phy/speed/duplex setting + * @hw: Struct containing variables accessed by shared code + * + * Detects which PHY is present and sets up the speed and duplex + */ +static s32 e1000_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("e1000_setup_copper_link"); + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if (ret_val) + return ret_val; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_copper_link_igp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if (ret_val) + return ret_val; + } else { + ret_val = gbe_dhg_phy_setup(hw); + if (ret_val) { + e_dbg("gbe_dhg_phy_setup failed!\n"); + return ret_val; + } + } + + if (hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. + */ + e_dbg("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for (i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if (ret_val) + return ret_val; + + e_dbg("Valid link established!!!\n"); + return E1000_SUCCESS; + } + udelay(10); + } + + e_dbg("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/** + * e1000_phy_setup_autoneg - phy settings + * @hw: Struct containing variables accessed by shared code + * + * Configures PHY autoneg and flow control advertisement settings + */ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + e_dbg("e1000_phy_setup_autoneg"); + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + else if (hw->phy_type == e1000_phy_8201) + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { + e_dbg + ("Advertise 1000mb Half duplex requested, request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start + * auto-negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc) { + case E1000_FC_NONE: /* 0 */ + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_RX_PAUSE: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_TX_PAUSE: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case E1000_FC_FULL: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (hw->phy_type == e1000_phy_8201) { + mii_1000t_ctrl_reg = 0; + } else { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex - force link settings + * @hw: Struct containing variables accessed by shared code + * + * Force PHY speed and duplex settings to hw->forced_speed_duplex + */ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 mii_ctrl_reg; + u16 mii_status_reg; + u16 phy_data; + u16 i; + + e_dbg("e1000_phy_force_speed_duplex"); + + /* Turn off Flow control if we are forcing speed and duplex. */ + hw->fc = E1000_FC_NONE; + + e_dbg("hw->fc = %d\n", hw->fc); + + /* Read the Device Control Register. */ + ctrl = er32(CTRL); + + /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(DEVICE_SPEED_MASK); + + /* Clear the Auto Speed Detect Enable bit. */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Read the MII Control Register. */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); + if (ret_val) + return ret_val; + + /* We need to disable autoneg in order to force link and duplex. */ + + mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; + + /* Are we forcing Full or Half Duplex? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { + /* We want to force full duplex so we SET the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl |= E1000_CTRL_FD; + mii_ctrl_reg |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + /* We want to force half duplex so we CLEAR the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl &= ~E1000_CTRL_FD; + mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } + + /* Are we forcing 100Mbps??? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_100_half) { + /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ + ctrl |= E1000_CTRL_SPD_100; + mii_ctrl_reg |= MII_CR_SPEED_100; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb "); + } else { + /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + mii_ctrl_reg |= MII_CR_SPEED_10; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb "); + } + + e1000_config_collision_dist(hw); + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + + if (hw->phy_type == e1000_phy_m88) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires + * MDI forced whenever speed are duplex are forced. + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %x\n", phy_data); + + /* Need to reset the PHY or these changes will be ignored */ + mii_ctrl_reg |= MII_CR_RESET; + + /* Disable MDI-X support for 10/100 */ + } else { + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed or duplex are forced. + */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Write back the modified PHY MII control register. */ + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); + if (ret_val) + return ret_val; + + udelay(1); + + /* The wait_autoneg_complete flag may be a little misleading here. + * Since we are forcing speed and duplex, Auto-Neg is not enabled. + * But we do want to delay for a period while forcing only so we + * don't generate false No Link messages. So we will wait here + * only if the user has set wait_autoneg_complete to 1, which is + * the default. + */ + if (hw->wait_autoneg_complete) { + /* We will wait for autoneg to complete. */ + e_dbg("Waiting for forced speed/duplex link.\n"); + mii_status_reg = 0; + + /* Wait for autoneg to complete or 4.5 seconds to expire */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { + /* We didn't get link. Reset the DSP and wait again + * for link. + */ + ret_val = e1000_phy_reset_dsp(hw); + if (ret_val) { + e_dbg("Error Resetting PHY DSP\n"); + return ret_val; + } + } + /* This loop will early-out if the link condition has been + * met + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + } + } + + if (hw->phy_type == e1000_phy_m88) { + /* Because we reset the PHY above, we need to re-force TX_CLK in + * the Extended PHY Specific Control Register to 25MHz clock. + * This value defaults back to a 2.5MHz clock when the PHY is + * reset. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + /* In addition, because of the s/w reset above, we need to + * enable CRS on Tx. This must be set for both full and half + * duplex operation. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) + && (!hw->autoneg) + && (hw->forced_speed_duplex == e1000_10_full + || hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if (ret_val) + return ret_val; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_config_collision_dist - set collision distance register + * @hw: Struct containing variables accessed by shared code + * + * Sets the collision distance in the Transmit Control register. + * Link should have been established previously. Reads the speed and duplex + * information from the Device Status register. + */ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl, coll_dist; + + e_dbg("e1000_config_collision_dist"); + + if (hw->mac_type < e1000_82543) + coll_dist = E1000_COLLISION_DISTANCE_82542; + else + coll_dist = E1000_COLLISION_DISTANCE; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= coll_dist << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_config_mac_to_phy - sync phy and mac settings + * @hw: Struct containing variables accessed by shared code + * @mii_reg: data to write to the MII control register + * + * Sets MAC speed and duplex settings to reflect the those in the PHY + * The contents of the PHY register containing the needed information need to + * be passed in. + */ +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_config_mac_to_phy"); + + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration. + */ + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) + return E1000_SUCCESS; + + /* Read the Device Control Register and set the bits to Force Speed + * and Duplex. + */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + switch (hw->phy_type) { + case e1000_phy_8201: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & RTL_PHY_CTRL_FD) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + if (phy_data & RTL_PHY_CTRL_SPD_100) + ctrl |= E1000_CTRL_SPD_100; + else + ctrl |= E1000_CTRL_SPD_10; + + e1000_config_collision_dist(hw); + break; + default: + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == + M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + } + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc - force flow control settings + * @hw: Struct containing variables accessed by shared code + * + * Forces the MAC's flow control settings. + * Sets the TFCE and RFCE bits in the device control register to reflect + * the adapter settings. TFCE and RFCE need to be explicitly set by + * software when a Copper PHY is used because autonegotiation is managed + * by the PHY rather than the MAC. Software must also configure these + * bits when link is forced on a fiber connection. + */ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + e_dbg("e1000_force_mac_fc"); + + /* Get the current configuration of the Device Control Register */ + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + + switch (hw->fc) { + case E1000_FC_NONE: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case E1000_FC_RX_PAUSE: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case E1000_FC_TX_PAUSE: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case E1000_FC_FULL: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Disable TX Flow Control for 82542 (rev 2.0) */ + if (hw->mac_type == e1000_82542_rev2_0) + ctrl &= (~E1000_CTRL_TFCE); + + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up - configure flow control after autoneg + * @hw: Struct containing variables accessed by shared code + * + * Configures flow control settings after link is established + * Should be called immediately after a valid link has been established. + * Forces MAC flow control settings if link was forced. When in MII/GMII mode + * and autonegotiation is enabled, the MAC flow control settings will be set + * based on the flow control negotiated by the PHY. In TBI mode, the TFCE + * and RFCE bits will be automatically set to the negotiated flow control mode. + */ +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 mii_nway_adv_reg; + u16 mii_nway_lp_ability_reg; + u16 speed; + u16 duplex; + + e_dbg("e1000_config_fc_after_link_up"); + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) + || ((hw->media_type == e1000_media_type_internal_serdes) + && (hw->autoneg_failed)) + || ((hw->media_type == e1000_media_type_copper) + && (!hw->autoneg))) { + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement Register + * (Address 4) and the Auto_Negotiation Base Page + * Ability Register (Address 5) to determine how flow + * control was negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement + * Register (Address 4) and two bits in the Auto + * Negotiation Base Page Ability Register (Address 5) + * determine flow control for both the PHY and the link + * partner. The following table, taken out of the IEEE + * 802.3ab/D6.0 dated March 25, 1999, describes these + * PAUSE resolution bits and how flow control is + * determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|------------------ + * 0 | 0 | DC | DC | E1000_FC_NONE + * 0 | 1 | 0 | DC | E1000_FC_NONE + * 0 | 1 | 1 | 0 | E1000_FC_NONE + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * 1 | 0 | 0 | DC | E1000_FC_NONE + * 1 | DC | 1 | DC | E1000_FC_FULL + * 1 | 1 | 0 | 0 | E1000_FC_NONE + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | DC | 1 | DC | E1000_FC_FULL + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx + * ONLY of pause frames. In this case, we had + * to advertise FULL flow control because we + * could not advertise Rx ONLY. Hence, we must + * now check to see if we need to turn OFF the + * TRANSMISSION of PAUSE frames. + */ + if (hw->original_fc == E1000_FC_FULL) { + hw->fc = E1000_FC_FULL; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) + { + hw->fc = E1000_FC_TX_PAUSE; + e_dbg + ("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) + { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should + * be disabled. However, we want to consider that we + * could be connected to a legacy switch that doesn't + * advertise desired flow control, but can be forced on + * the link partner. So if we advertised no flow + * control, that is what we will resolve to. If we + * advertised some kind of receive capability (Rx Pause + * Only or Full Flow Control) and the link partner + * advertised none, we will configure ourselves to + * enable Rx Flow Control only. We can do this safely + * for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, + * no harm done since we won't be receiving any PAUSE + * frames anyway. If the intent on the link partner was + * to have flow control enabled, then by us enabling Rx + * only, we can at least receive pause frames and + * process them. This is a good idea because in most + * cases, since we are predominantly a server NIC, more + * times than not we will be asked to delay transmission + * of packets than asking our link partner to pause + * transmission of frames. + */ + else if ((hw->original_fc == E1000_FC_NONE || + hw->original_fc == E1000_FC_TX_PAUSE) || + hw->fc_strict_ieee) { + hw->fc = E1000_FC_NONE; + e_dbg("Flow Control = NONE.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc = E1000_FC_NONE; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg + ("Error forcing flow control settings\n"); + return ret_val; + } + } else { + e_dbg + ("Copper PHY and Auto Neg has not completed.\n"); + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + e_dbg("e1000_check_for_serdes_link_generic"); + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { + hw->autoneg_failed = 1; + goto out; + } + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, hw->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + hw->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + out: + return ret_val; +} + +/** + * e1000_check_for_link + * @hw: Struct containing variables accessed by shared code + * + * Checks to see if the link status of the hardware has changed. + * Called by any function that needs to check the link status of the adapter. + */ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + u32 rxcw = 0; + u32 ctrl; + u32 status; + u32 rctl; + u32 icr; + u32 signal = 0; + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_check_for_link"); + + ctrl = er32(CTRL); + status = er32(STATUS); + + /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + */ + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { + rxcw = er32(RXCW); + + if (hw->media_type == e1000_media_type_fiber) { + signal = + (hw->mac_type > + e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + if (status & E1000_STATUS_LU) + hw->get_link_status = false; + } + } + + /* If we have a copper PHY then we only want to go out to the PHY + * registers to see if Auto-Neg has completed and/or if our link + * status has changed. The get_link_status flag will be set if we + * receive a Link Status Change interrupt or we have Rx Sequence + * Errors. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + * Read the register twice since the link bit is sticky. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + hw->get_link_status = false; + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift(hw); + + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the + * polarity reversal workaround. We disable interrupts + * first, and upon returning, place the devices + * interrupt state to its previous value except for the + * link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if ((hw->mac_type == e1000_82544 + || hw->mac_type == e1000_82543) && (!hw->autoneg) + && (hw->forced_speed_duplex == e1000_10_full + || hw->forced_speed_duplex == e1000_10_half)) { + ew32(IMC, 0xffffffff); + ret_val = + e1000_polarity_reversal_workaround(hw); + icr = er32(ICR); + ew32(ICS, (icr & ~E1000_ICS_LSC)); + ew32(IMS, IMS_ENABLE_MASK); + } + + } else { + /* No link detected */ + e1000_config_dsp_after_link_change(hw, false); + return 0; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!hw->autoneg) + return -E1000_ERR_CONFIG; + + /* optimize the dsp settings for the igp phy */ + e1000_config_dsp_after_link_change(hw, true); + + /* We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_ce4100)) + e1000_config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg + ("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control settings + * because we may have had to re-autoneg with a different link + * partner. + */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + /* At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the + * link partner capability register. We use the link speed to + * determine if TBI compatibility needs to be turned on or off. + * If the link is not at gigabit speed, then TBI compatibility + * is not needed. If we are at gigabit speed, we turn on TBI + * compatibility. + */ + if (hw->tbi_compatibility_en) { + u16 speed, duplex; + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* If link speed is not set to gigabit speed, we + * do not need to enable TBI compatibility. + */ + if (hw->tbi_compatibility_on) { + /* If we previously were in the mode, + * turn it off. + */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_SBP; + ew32(RCTL, rctl); + hw->tbi_compatibility_on = false; + } + } else { + /* If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to to the hardware. + */ + if (!hw->tbi_compatibility_on) { + hw->tbi_compatibility_on = true; + rctl = er32(RCTL); + rctl |= E1000_RCTL_SBP; + ew32(RCTL, rctl); + } + } + } + } + + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) + e1000_check_for_serdes_link_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex + * @hw: Struct containing variables accessed by shared code + * @speed: Speed of the connection + * @duplex: Duplex setting of the connection + * + * Detects the current speed and duplex settings of the hardware. + */ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_get_speed_and_duplex"); + + if (hw->mac_type >= e1000_82543) { + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + e_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + e_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + e_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + e_dbg(" Half Duplex\n"); + } + } else { + e_dbg("1000 Mbs, Full Duplex\n"); + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + } + + /* IGP01 PHY may advertise full duplex operation after speed downgrade + * even if it is operating at half duplex. Here we set the duplex + * settings to match the duplex in the link partner's capabilities. + */ + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + *duplex = HALF_DUPLEX; + else { + ret_val = + e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); + if (ret_val) + return ret_val; + if ((*speed == SPEED_100 + && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) + || (*speed == SPEED_10 + && !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + *duplex = HALF_DUPLEX; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_wait_autoneg + * @hw: Struct containing variables accessed by shared code + * + * Blocks until autoneg completes or times out (~4.5 seconds) + */ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("e1000_wait_autoneg"); + e_dbg("Waiting for Auto-Neg to complete.\n"); + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + if (phy_data & MII_SR_AUTONEG_COMPLETE) { + return E1000_SUCCESS; + } + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_raise_mdi_clk - Raises the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_lower_mdi_clk - Lowers the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY + * @hw: Struct containing variables accessed by shared code + * @data: Data to send out to the PHY + * @count: Number of bits to shift out + * + * Bits are shifted out in MSB to LSB order. + */ +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) +{ + u32 ctrl; + u32 mask; + + /* We need to shift "count" number of bits out to the PHY. So, the value + * in the "data" parameter will be shifted out to the PHY one bit at a + * time. In order to do this, "data" must be broken down into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = er32(CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + udelay(10); + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + mask = mask >> 1; + } +} + +/** + * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY + * @hw: Struct containing variables accessed by shared code + * + * Bits are shifted in in MSB to LSB order. + */ +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* In order to read a register from the PHY, we need to shift in a total + * of 18 bits from the PHY. The first two bit (turnaround) times are + * used to avoid contention on the MDIO pin when a read operation is + * performed. These two bits are ignored by us and thrown away. Bits are + * "shifted in" by raising the input to the Management Data Clock + * (setting the MDC bit), and then reading the value of the MDIO bit. + */ + ctrl = er32(CTRL); + + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + /* Raise and Lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked out + * the last bit of the Register Address. + */ + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data = data << 1; + e1000_raise_mdi_clk(hw, &ctrl); + ctrl = er32(CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk(hw, &ctrl); + } + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + return data; +} + + +/** + * e1000_read_phy_reg - read a phy register + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to read + * + * Reads the value from a PHY register, if the value is on a specific non zero + * page, sets the page first. + */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) +{ + u32 ret_val; + unsigned long flags; + + e_dbg("e1000_read_phy_reg"); + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + e_dbg("e1000_read_phy_reg_ex"); + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, and register address in the MDI + * Control register. The MAC will take care of interfacing with + * the PHY to retrieve the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_READ) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + + mdic = readl(E1000_MDIO_STS); + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { + e_dbg("MDI Read Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16) mdic; + } else { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16) mdic; + } + } else { + /* We must first send a preamble through the MDIO pin to signal + * the beginning of an MII instruction. This is done by sending + * 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The + * format of a MII read instruction consists of a shift out of + * 14 bits and is defined as follows: + * + * followed by a shift in of 18 bits. This first two bits + * shifted in are TurnAround bits used to avoid contention on + * the MDIO pin when a READ operation is performed. These two + * bits are thrown away followed by a shift in of 16 bits which + * contains the desired data. + */ + mdic = ((reg_addr) | (phy_addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits(hw, mdic, 14); + + /* Now that we've shifted out the read command to the MII, we + * need to "shift in" the 16-bit value (18 total bits) of the + * requested PHY register address. + */ + *phy_data = e1000_shift_in_mdi_bits(hw); + } + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - write a phy register + * + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to write + * @data: data to write to the PHY + * + * Writes a value to a PHY register + */ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) +{ + u32 ret_val; + unsigned long flags; + + e_dbg("e1000_write_phy_reg"); + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + e_dbg("e1000_write_phy_reg_ex"); + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, register address, and data + * intended for the PHY register in the MDI Control register. + * The MAC will take care of interfacing with the PHY to send + * the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = (((u32) phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_WRITE) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 640; i++) { + udelay(5); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + mdic = (((u32) phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 641; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } + } else { + /* We'll need to use the SW defined pins to shift the write + * command out to the PHY. We first send a preamble to the PHY + * to signal the beginning of the MII instruction. This is done + * by sending 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the remaining required fields that will indicate + * a write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the + * command. The format of a MII write instruction is as follows: + * . + */ + mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32) phy_data; + + e1000_shift_out_mdi_bits(hw, mdic, 32); + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - reset the phy, hardware style + * @hw: Struct containing variables accessed by shared code + * + * Returns the PHY to the power-on reset state + */ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + u32 led_ctrl; + + e_dbg("e1000_phy_hw_reset"); + + e_dbg("Resetting Phy...\n"); + + if (hw->mac_type > e1000_82543) { + /* Read the device control register and assert the + * E1000_CTRL_PHY_RST bit. Then, take it out of reset. + * For e1000 hardware, we delay for 10ms between the assert + * and de-assert. + */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(); + + msleep(10); + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + } else { + /* Read the Extended Device Control Register, assert the + * PHY_RESET_DIR bit to put the PHY into reset. Then, take it + * out of reset. + */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + msleep(10); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + } + udelay(150); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Wait for FW to finish PHY configuration. */ + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000_phy_reset - reset the phy to commit settings + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY + * Sets bit 15 of the MII Control register + */ +s32 e1000_phy_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_phy_reset"); + + switch (hw->phy_type) { + case e1000_phy_igp: + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + break; + default: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + break; + } + + if (hw->phy_type == e1000_phy_igp) + e1000_phy_init_script(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_detect_gig_phy - check the phy type + * @hw: Struct containing variables accessed by shared code + * + * Probes the expected PHY address for known PHY IDs + */ +static s32 e1000_detect_gig_phy(struct e1000_hw *hw) +{ + s32 phy_init_status, ret_val; + u16 phy_id_high, phy_id_low; + bool match = false; + + e_dbg("e1000_detect_gig_phy"); + + if (hw->phy_id != 0) + return E1000_SUCCESS; + + /* Read the PHY ID Registers to identify which PHY is onboard. */ + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); + if (ret_val) + return ret_val; + + hw->phy_id = (u32) (phy_id_high << 16); + udelay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); + if (ret_val) + return ret_val; + + hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->phy_id == M88E1000_E_PHY_ID) + match = true; + break; + case e1000_82544: + if (hw->phy_id == M88E1000_I_PHY_ID) + match = true; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (hw->phy_id == M88E1011_I_PHY_ID) + match = true; + break; + case e1000_ce4100: + if ((hw->phy_id == RTL8211B_PHY_ID) || + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) + match = true; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (hw->phy_id == IGP01E1000_I_PHY_ID) + match = true; + break; + default: + e_dbg("Invalid MAC type %d\n", hw->mac_type); + return -E1000_ERR_CONFIG; + } + phy_init_status = e1000_set_phy_type(hw); + + if ((match) && (phy_init_status == E1000_SUCCESS)) { + e_dbg("PHY ID 0x%X detected\n", hw->phy_id); + return E1000_SUCCESS; + } + e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id); + return -E1000_ERR_PHY; +} + +/** + * e1000_phy_reset_dsp - reset DSP + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY's DSP + */ +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + e_dbg("e1000_phy_reset_dsp"); + + do { + ret_val = e1000_write_phy_reg(hw, 29, 0x001d); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x0000); + if (ret_val) + break; + ret_val = E1000_SUCCESS; + } while (0); + + return ret_val; +} + +/** + * e1000_phy_igp_get_info - get igp specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for igp PHY only. + */ +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data, min_length, max_length, average; + e1000_rev_polarity polarity; + + e_dbg("e1000_phy_igp_get_info"); + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + /* IGP01E1000 does not need to support it. */ + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + /* IGP01E1000 always correct polarity reversal */ + phy_info->polarity_correction = e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> + IGP01E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Local/Remote Receiver Information are only valid @ 1000 + * Mbps + */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + /* Get cable length */ + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + /* Translate to old method */ + average = (max_length + min_length) / 2; + + if (average <= e1000_igp_cable_length_50) + phy_info->cable_length = e1000_cable_length_50; + else if (average <= e1000_igp_cable_length_80) + phy_info->cable_length = e1000_cable_length_50_80; + else if (average <= e1000_igp_cable_length_110) + phy_info->cable_length = e1000_cable_length_80_110; + else if (average <= e1000_igp_cable_length_140) + phy_info->cable_length = e1000_cable_length_110_140; + else + phy_info->cable_length = e1000_cable_length_140; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_m88_get_info - get m88 specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for m88 PHY only. + */ +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + e1000_rev_polarity polarity; + + e_dbg("e1000_phy_m88_get_info"); + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->extended_10bt_distance = + ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> + M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? + e1000_10bt_ext_dist_enable_lower : + e1000_10bt_ext_dist_enable_normal; + + phy_info->polarity_correction = + ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> + M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? + e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> + M88E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. + */ + phy_info->cable_length = + (e1000_cable_length) ((phy_data & + M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_get_info - request phy info + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers + */ +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_phy_get_info"); + + phy_info->cable_length = e1000_cable_length_undefined; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; + phy_info->cable_polarity = e1000_rev_polarity_undefined; + phy_info->downshift = e1000_downshift_undefined; + phy_info->polarity_correction = e1000_polarity_reversal_undefined; + phy_info->mdix_mode = e1000_auto_x_mode_undefined; + phy_info->local_rx = e1000_1000t_rx_status_undefined; + phy_info->remote_rx = e1000_1000t_rx_status_undefined; + + if (hw->media_type != e1000_media_type_copper) { + e_dbg("PHY info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + e_dbg("PHY info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + if (hw->phy_type == e1000_phy_igp) + return e1000_phy_igp_get_info(hw, phy_info); + else if ((hw->phy_type == e1000_phy_8211) || + (hw->phy_type == e1000_phy_8201)) + return E1000_SUCCESS; + else + return e1000_phy_m88_get_info(hw, phy_info); +} + +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + e_dbg("e1000_validate_mdi_settings"); + + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + e_dbg("Invalid MDI setting detected\n"); + hw->mdix = 1; + return -E1000_ERR_CONFIG; + } + return E1000_SUCCESS; +} + +/** + * e1000_init_eeprom_params - initialize sw eeprom vars + * @hw: Struct containing variables accessed by shared code + * + * Sets up eeprom variables in the hw struct. Must be called after mac_type + * is configured. + */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd = er32(EECD); + s32 ret_val = E1000_SUCCESS; + u16 eeprom_size; + + e_dbg("e1000_init_eeprom_params"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + eeprom->type = e1000_eeprom_microwire; + eeprom->word_size = 64; + eeprom->opcode_bits = 3; + eeprom->address_bits = 6; + eeprom->delay_usec = 50; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_SIZE) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (eecd & E1000_EECD_TYPE) { + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + } else { + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + } + break; + default: + break; + } + + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes + * 128B to 32KB (incremented by powers of 2). + */ + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if (ret_val) + return ret_val; + eeprom_size = + (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to + * 256B) is never the result used in the shifting logic below. + */ + if (eeprom_size) + eeprom_size++; + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); + } + return ret_val; +} + +/** + * e1000_raise_ee_clk - Raises the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait microseconds. + */ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_lower_ee_clk - Lowers the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and + * then wait 50 microseconds. + */ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @data: data to send to the EEPROM + * @count: number of bits to shift out + */ +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd = er32(EECD); + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~E1000_EECD_DO; + } else if (eeprom->type == e1000_eeprom_spi) { + eecd |= E1000_EECD_DO; + } + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK bit + * controls the clock input to the EEPROM). A "0" is shifted + * out to the EEPROM by setting "DI" to "0" and then raising and + * then lowering the clock. + */ + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(eeprom->delay_usec); + + e1000_raise_ee_clk(hw, &eecd); + e1000_lower_ee_clk(hw, &eecd); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM + * @hw: Struct containing variables accessed by shared code + * @count: number of bits to shift in + */ +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 'count' + * bits in from the EEPROM. Bits are "shifted in" by raising the clock + * input to the EEPROM (setting the SK bit), and then reading the value + * of the "DO" bit. During this "shifting in" process the "DI" bit + * should always be clear. + */ + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data = data << 1; + e1000_raise_ee_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DI); + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_ee_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_acquire_eeprom - Prepares EEPROM for access + * @hw: Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + */ +static s32 e1000_acquire_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd, i = 0; + + e_dbg("e1000_acquire_eeprom"); + + eecd = er32(EECD); + + /* Request EEPROM Access */ + if (hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; + ew32(EECD, eecd); + eecd = er32(EECD); + while ((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = er32(EECD); + } + if (!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire EEPROM grant\n"); + return -E1000_ERR_EEPROM; + } + } + + /* Setup EEPROM for Read/Write */ + + if (eeprom->type == e1000_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + ew32(EECD, eecd); + + /* Set CS */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_standby_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + + eecd = er32(EECD); + + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock high */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock low */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } +} + +/** + * e1000_release_eeprom - drop chip select + * @hw: Struct containing variables accessed by shared code + * + * Terminates a command by inverting the EEPROM's chip select pin + */ +static void e1000_release_eeprom(struct e1000_hw *hw) +{ + u32 eecd; + + e_dbg("e1000_release_eeprom"); + + eecd = er32(EECD); + + if (hw->eeprom.type == e1000_eeprom_spi) { + eecd |= E1000_EECD_CS; /* Pull CS high */ + eecd &= ~E1000_EECD_SK; /* Lower SCK */ + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(hw->eeprom.delay_usec); + } else if (hw->eeprom.type == e1000_eeprom_microwire) { + /* cleanup eeprom */ + + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + + ew32(EECD, eecd); + + /* Rising edge of clock */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + + /* Falling edge of clock */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + } + + /* Stop requesting EEPROM access */ + if (hw->mac_type > e1000_82544) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + } +} + +/** + * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u8 spi_stat_reg; + + e_dbg("e1000_spi_eeprom_ready"); + + /* Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + retry_count = 0; + do { + e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, + hw->eeprom.opcode_bits); + spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8); + if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + retry_count += 5; + + e1000_standby_eeprom(hw); + } while (retry_count < EEPROM_MAX_RETRY_SPI); + + /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and + * only 0-5mSec on 5V devices) + */ + if (retry_count >= EEPROM_MAX_RETRY_SPI) { + e_dbg("SPI EEPROM Status error\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_eeprom - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * @words: number of words to read + */ +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + spin_lock(&e1000_eeprom_lock); + ret = e1000_do_read_eeprom(hw, offset, words, data); + spin_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 i = 0; + + e_dbg("e1000_read_eeprom"); + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* If eeprom is not yet detected, do so now */ + if (eeprom->word_size == 0) + e1000_init_eeprom_params(hw); + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) + || (words > eeprom->word_size - offset) || (words == 0)) { + e_dbg("\"words\" parameter out of bounds. Words = %d," + "size = %d\n", offset, eeprom->word_size); + return -E1000_ERR_EEPROM; + } + + /* EEPROM's that don't use EERD to read require us to bit-bang the SPI + * directly. In this case, we need to acquire the EEPROM so that + * FW or other port software does not interrupt. + */ + /* Prepare the EEPROM for bit-bang reading */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have + * acquired the EEPROM at this point, so any returns should release it + */ + if (eeprom->type == e1000_eeprom_spi) { + u16 word_in; + u8 read_opcode = EEPROM_READ_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) { + e1000_release_eeprom(hw); + return -E1000_ERR_EEPROM; + } + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + read_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16) (offset * 2), + eeprom->address_bits); + + /* Read the data. The address of the eeprom internally + * increments with each byte (spi) being read, saving on the + * overhead of eeprom setup and tear-down. The address counter + * will roll over if reading beyond the size of the eeprom, thus + * allowing the entire memory to be read starting from any + * offset. + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_ee_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + } else if (eeprom->type == e1000_eeprom_microwire) { + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, + EEPROM_READ_OPCODE_MICROWIRE, + eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16) (offset + i), + eeprom->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of eeprom setup and tear-down. + */ + data[i] = e1000_shift_in_ee_bits(hw, 16); + e1000_standby_eeprom(hw); + } + } + + /* End this read operation */ + e1000_release_eeprom(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum + * @hw: Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + */ +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + e_dbg("e1000_validate_eeprom_checksum"); + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + +#ifdef CONFIG_PARISC + /* This is a signature and not a checksum on HP c8000 */ + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) + return E1000_SUCCESS; + +#endif + if (checksum == (u16) EEPROM_SUM) + return E1000_SUCCESS; + else { + e_dbg("EEPROM Checksum Invalid\n"); + return -E1000_ERR_EEPROM; + } +} + +/** + * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum + * @hw: Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + */ +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + e_dbg("e1000_update_eeprom_checksum"); + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + checksum = (u16) EEPROM_SUM - checksum; + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + e_dbg("EEPROM Write Error\n"); + return -E1000_ERR_EEPROM; + } + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom - write words to the different EEPROM types. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word to be written to the EEPROM + * + * If e1000_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + */ +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + spin_lock(&e1000_eeprom_lock); + ret = e1000_do_write_eeprom(hw, offset, words, data); + spin_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + s32 status = 0; + + e_dbg("e1000_write_eeprom"); + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* If eeprom is not yet detected, do so now */ + if (eeprom->word_size == 0) + e1000_init_eeprom_params(hw); + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) + || (words > eeprom->word_size - offset) || (words == 0)) { + e_dbg("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* Prepare the EEPROM for writing */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + if (eeprom->type == e1000_eeprom_microwire) { + status = e1000_write_eeprom_microwire(hw, offset, words, data); + } else { + status = e1000_write_eeprom_spi(hw, offset, words, data); + msleep(10); + } + + /* Done with writing */ + e1000_release_eeprom(hw); + + return status; +} + +/** + * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u16 widx = 0; + + e_dbg("e1000_write_eeprom_spi"); + + while (widx < words) { + u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) + return -E1000_ERR_EEPROM; + + e1000_standby_eeprom(hw); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, + eeprom->opcode_bits); + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + write_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2), + eeprom->address_bits); + + /* Send the data */ + + /* Loop to allow for up to whole page write (32 bytes) of + * eeprom + */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_ee_bits(hw, word_out, 16); + widx++; + + /* Some larger eeprom sizes are capable of a 32-byte + * PAGE WRITE operation, while the smaller eeproms are + * capable of an 8-byte PAGE WRITE operation. Break the + * inner loop to pass new address + */ + if ((((offset + widx) * 2) % eeprom->page_size) == 0) { + e1000_standby_eeprom(hw); + break; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u16 words_written = 0; + u16 i = 0; + + e_dbg("e1000_write_eeprom_microwire"); + + /* Send the write enable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 11). It's less work to include + * the 11 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This puts the + * EEPROM into write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, + (u16) (eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + + /* Prepare the EEPROM */ + e1000_standby_eeprom(hw); + + while (words_written < words) { + /* Send the Write command (3-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, + eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16) (offset + words_written), + eeprom->address_bits); + + /* Send the data */ + e1000_shift_out_ee_bits(hw, data[words_written], 16); + + /* Toggle the CS line. This in effect tells the EEPROM to + * execute the previous command. + */ + e1000_standby_eeprom(hw); + + /* Read DO repeatedly until it is high (equal to '1'). The + * EEPROM will signal that the command has been completed by + * raising the DO signal. If DO does not go high in 10 + * milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd = er32(EECD); + if (eecd & E1000_EECD_DO) + break; + udelay(50); + } + if (i == 200) { + e_dbg("EEPROM Write did not complete\n"); + return -E1000_ERR_EEPROM; + } + + /* Recover from write */ + e1000_standby_eeprom(hw); + + words_written++; + } + + /* Send the write disable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 10). It's less work to include + * the 10 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This takes the + * EEPROM out of write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, + (u16) (eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - read the adapters MAC from eeprom + * @hw: Struct containing variables accessed by shared code + * + * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the + * second function of dual function devices + */ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + u16 offset; + u16 eeprom_data, i; + + e_dbg("e1000_read_mac_addr"); + + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + offset = i >> 1; + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) + hw->perm_mac_addr[5] ^= 0x01; + break; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return E1000_SUCCESS; +} + +/** + * e1000_init_rx_addrs - Initializes receive address filters. + * @hw: Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + */ +static void e1000_init_rx_addrs(struct e1000_hw *hw) +{ + u32 i; + u32 rar_num; + + e_dbg("e1000_init_rx_addrs"); + + /* Setup the receive address. */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set(hw, hw->mac_addr, 0); + + rar_num = E1000_RAR_ENTRIES; + + /* Zero out the other 15 receive addresses. */ + e_dbg("Clearing RAR[1-15]\n"); + for (i = 1; i < rar_num; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table + * @hw: Struct containing variables accessed by shared code + * @mc_addr: the multicast address to hash + */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value = 0; + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); + break; + case 1: + /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); + break; + case 2: + /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); + break; + case 3: + /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/** + * e1000_rar_set - Puts an ethernet address into a receive address register. + * @hw: Struct containing variables accessed by shared code + * @addr: Address to put into receive address register + * @index: Receive address register to write + */ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx + * unit hang. + * + * Description: + * If there are any Rx frames queued up or otherwise present in the HW + * before RSS is enabled, and then we enable RSS, the HW Rx unit will + * hang. To work around this issue, we have to disable receives and + * flush out all Rx frames before we enable RSS. To do so, we modify we + * redirect all Rx traffic to manageability and then reset the HW. + * This flushes away Rx frames, and (since the redirections to + * manageability persists across resets) keeps new ones from coming in + * while we work. Then, we clear the Address Valid AV bit for all MAC + * addresses and undo the re-direction to manageability. + * Now, frames are coming in again, but the MAC won't accept them, so + * far so good. We now proceed to initialize RSS (if necessary) and + * configure the Rx unit. Last, we re-enable the AV bits and continue + * on our merry way. + */ + switch (hw->mac_type) { + default: + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + break; + } + + E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table. + * @hw: Struct containing variables accessed by shared code + * @offset: Offset in VLAN filer table to write + * @value: Value to write into VLAN filter table + */ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); + E1000_WRITE_FLUSH(); + } else { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_clear_vfta - Clears the VLAN filer table + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of the + * manageability unit + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); + E1000_WRITE_FLUSH(); + } +} + +static s32 e1000_id_led_init(struct e1000_hw *hw) +{ + u32 ledctl; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 eeprom_data, i, temp; + const u16 led_mask = 0x0F; + + e_dbg("e1000_id_led_init"); + + if (hw->mac_type < e1000_82540) { + /* Nothing to do */ + return E1000_SUCCESS; + } + + ledctl = er32(LEDCTL); + hw->ledctl_default = ledctl; + hw->ledctl_mode1 = hw->ledctl_default; + hw->ledctl_mode2 = hw->ledctl_default; + + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + + if ((eeprom_data == ID_LED_RESERVED_0000) || + (eeprom_data == ID_LED_RESERVED_FFFF)) { + eeprom_data = ID_LED_DEFAULT; + } + + for (i = 0; i < 4; i++) { + temp = (eeprom_data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_setup_led + * @hw: Struct containing variables accessed by shared code + * + * Prepares SW controlable LED for use and saves the current state of the LED. + */ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + e_dbg("e1000_setup_led"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No setup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn off PHY Smart Power Down (if enabled) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, + &hw->phy_spd_default); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + (u16) (hw->phy_spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + return ret_val; + /* Fall Through */ + default: + if (hw->media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + /* Save current LEDCTL settings */ + hw->ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->media_type == e1000_media_type_copper) + ew32(LEDCTL, hw->ledctl_mode1); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores the saved state of the SW controlable LED. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + e_dbg("e1000_cleanup_led"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No cleanup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn on PHY Smart Power Down (if previously enabled) */ + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + hw->phy_spd_default); + if (ret_val) + return ret_val; + /* Fall Through */ + default: + /* Restore LEDCTL settings */ + ew32(LEDCTL, hw->ledctl_default); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turns on the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_on(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + e_dbg("e1000_led_on"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode2); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turns off the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_off(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + e_dbg("e1000_led_off"); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode1); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs - Clears all hardware statistics counters. + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_hw_cntrs(struct e1000_hw *hw) +{ + volatile u32 temp; + + temp = er32(CRCERRS); + temp = er32(SYMERRS); + temp = er32(MPC); + temp = er32(SCC); + temp = er32(ECOL); + temp = er32(MCC); + temp = er32(LATECOL); + temp = er32(COLC); + temp = er32(DC); + temp = er32(SEC); + temp = er32(RLEC); + temp = er32(XONRXC); + temp = er32(XONTXC); + temp = er32(XOFFRXC); + temp = er32(XOFFTXC); + temp = er32(FCRUC); + + temp = er32(PRC64); + temp = er32(PRC127); + temp = er32(PRC255); + temp = er32(PRC511); + temp = er32(PRC1023); + temp = er32(PRC1522); + + temp = er32(GPRC); + temp = er32(BPRC); + temp = er32(MPRC); + temp = er32(GPTC); + temp = er32(GORCL); + temp = er32(GORCH); + temp = er32(GOTCL); + temp = er32(GOTCH); + temp = er32(RNBC); + temp = er32(RUC); + temp = er32(RFC); + temp = er32(ROC); + temp = er32(RJC); + temp = er32(TORL); + temp = er32(TORH); + temp = er32(TOTL); + temp = er32(TOTH); + temp = er32(TPR); + temp = er32(TPT); + + temp = er32(PTC64); + temp = er32(PTC127); + temp = er32(PTC255); + temp = er32(PTC511); + temp = er32(PTC1023); + temp = er32(PTC1522); + + temp = er32(MPTC); + temp = er32(BPTC); + + if (hw->mac_type < e1000_82543) + return; + + temp = er32(ALGNERRC); + temp = er32(RXERRC); + temp = er32(TNCRS); + temp = er32(CEXTERR); + temp = er32(TSCTC); + temp = er32(TSCTFC); + + if (hw->mac_type <= e1000_82544) + return; + + temp = er32(MGTPRC); + temp = er32(MGTPDC); + temp = er32(MGTPTC); +} + +/** + * e1000_reset_adaptive - Resets Adaptive IFS to its default state. + * @hw: Struct containing variables accessed by shared code + * + * Call this after e1000_init_hw. You may override the IFS defaults by setting + * hw->ifs_params_forced to true. However, you must initialize hw-> + * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio + * before calling this function. + */ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + e_dbg("e1000_reset_adaptive"); + + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { + hw->current_ifs_val = 0; + hw->ifs_min_val = IFS_MIN; + hw->ifs_max_val = IFS_MAX; + hw->ifs_step_size = IFS_STEP; + hw->ifs_ratio = IFS_RATIO; + } + hw->in_ifs_mode = false; + ew32(AIT, 0); + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_update_adaptive - update adaptive IFS + * @hw: Struct containing variables accessed by shared code + * @tx_packets: Number of transmits since last callback + * @total_collisions: Number of collisions since last callback + * + * Called during the callback/watchdog routine to update IFS value based on + * the ratio of transmits to collisions. + */ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + e_dbg("e1000_update_adaptive"); + + if (hw->adaptive_ifs) { + if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { + hw->in_ifs_mode = true; + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) + hw->current_ifs_val = + hw->ifs_min_val; + else + hw->current_ifs_val += + hw->ifs_step_size; + ew32(AIT, hw->current_ifs_val); + } + } + } else { + if (hw->in_ifs_mode + && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + hw->current_ifs_val = 0; + hw->in_ifs_mode = false; + ew32(AIT, 0); + } + } + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, + u32 frame_len, u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff)) + /* Broadcast packet */ + stats->bprc++; + else if (*mac_addr & 0x01) + /* Multicast packet */ + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +/** + * e1000_get_bus_info + * @hw: Struct containing variables accessed by shared code + * + * Gets the current PCI bus type, speed, and width of the hardware + */ +void e1000_get_bus_info(struct e1000_hw *hw) +{ + u32 status; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->bus_type = e1000_bus_type_pci; + hw->bus_speed = e1000_bus_speed_unknown; + hw->bus_width = e1000_bus_width_unknown; + break; + default: + status = er32(STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; + + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if (hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } + } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; + } +} + +/** + * e1000_write_reg_io + * @hw: Struct containing variables accessed by shared code + * @offset: offset to write to + * @value: value to write + * + * Writes a value to one of the devices registers using port I/O (as opposed to + * memory mapped I/O). Only 82544 and newer devices support port I/O. + */ +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + e1000_io_write(hw, io_data, value); +} + +/** + * e1000_get_cable_length - Estimates the cable length. + * @hw: Struct containing variables accessed by shared code + * @min_length: The estimated minimum length + * @max_length: The estimated maximum length + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * This function always returns a ranged length (minimum & maximum). + * So for M88 phy's, this function interprets the one value returned from the + * register to the minimum and maximum range. + * For IGP phy's, the function calculates the range by the AGC registers. + */ +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length) +{ + s32 ret_val; + u16 agc_value = 0; + u16 i, phy_data; + u16 cable_length; + + e_dbg("e1000_get_cable_length"); + + *min_length = *max_length = 0; + + /* Use old method for Phy older than IGP */ + if (hw->phy_type == e1000_phy_m88) { + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + /* Convert the enum value to ranged values */ + switch (cable_length) { + case e1000_cable_length_50: + *min_length = 0; + *max_length = e1000_igp_cable_length_50; + break; + case e1000_cable_length_50_80: + *min_length = e1000_igp_cable_length_50; + *max_length = e1000_igp_cable_length_80; + break; + case e1000_cable_length_80_110: + *min_length = e1000_igp_cable_length_80; + *max_length = e1000_igp_cable_length_110; + break; + case e1000_cable_length_110_140: + *min_length = e1000_igp_cable_length_110; + *max_length = e1000_igp_cable_length_140; + break; + case e1000_cable_length_140: + *min_length = e1000_igp_cable_length_140; + *max_length = e1000_igp_cable_length_170; + break; + default: + return -E1000_ERR_PHY; + break; + } + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + u16 cur_agc_value; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D + }; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + + ret_val = + e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Value bound check. */ + if ((cur_agc_value >= + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) + || (cur_agc_value == 0)) + return -E1000_ERR_PHY; + + agc_value += cur_agc_value; + + /* Update minimal AGC value. */ + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < + IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { + agc_value -= min_agc_value; + + /* Get the average length of the remaining 3 channels */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Get the average length of all the 4 channels. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + /* Set the range of the calculated length. */ + *min_length = ((e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) > 0) ? + (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) : 0; + *max_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_polarity - Check the cable polarity + * @hw: Struct containing variables accessed by shared code + * @polarity: output parameter : 0 - Polarity is not reversed + * 1 - Polarity is reversed. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function simply reads the polarity bit in the + * Phy Status register. For IGP phy's, this bit is valid only if link speed is + * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will + * return 0. If the link speed is 1000 Mbps the polarity status is in the + * IGP01E1000_PHY_PCS_INIT_REG. + */ +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity) +{ + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_check_polarity"); + + if (hw->phy_type == e1000_phy_m88) { + /* return the Polarity bit in the Status register. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> + M88E1000_PSSR_REV_POLARITY_SHIFT) ? + e1000_rev_polarity_reversed : e1000_rev_polarity_normal; + + } else if (hw->phy_type == e1000_phy_igp) { + /* Read the Status register to check the speed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + /* If speed is 1000 Mbps, must read the + * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status + */ + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + + /* Read the GIG initialization PCS register (0x00B4) */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, + &phy_data); + if (ret_val) + return ret_val; + + /* Check the polarity bits */ + *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } else { + /* For 10 Mbps, read the polarity bit in the status + * register. (for 100 Mbps this bit is always 0) + */ + *polarity = + (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_downshift - Check if Downshift occurred + * @hw: Struct containing variables accessed by shared code + * @downshift: output parameter : 0 - No Downshift occurred. + * 1 - Downshift occurred. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function reads the Downshift bit in the Phy + * Specific Status register. For IGP phy's, it reads the Downgrade bit in the + * Link Health register. In IGP this bit is latched high, so the driver must + * read it immediately after link is established. + */ +static s32 e1000_check_downshift(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + e_dbg("e1000_check_downshift"); + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = + (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> + M88E1000_PSSR_DOWNSHIFT_SHIFT; + } + + return E1000_SUCCESS; +} + +static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D +}; + +static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) +{ + u16 min_length, max_length; + u16 phy_data, i; + s32 ret_val; + + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + if (hw->dsp_config_state != e1000_dsp_config_enabled) + return 0; + + if (min_length >= e1000_igp_cable_length_50) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = e1000_dsp_config_activated; + } else { + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u32 idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + udelay(1000); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + hw->ffe_config_state = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + + return 0; +} + +/** + * e1000_config_dsp_after_link_change + * @hw: Struct containing variables accessed by shared code + * @link_up: was link up at the time this was called + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + */ + +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) +{ + s32 ret_val; + u16 phy_data, phy_saved_data, speed, duplex, i; + + e_dbg("e1000_config_dsp_after_link_change"); + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (speed == SPEED_1000) { + ret_val = e1000_1000Mb_check_cable_length(hw); + if (ret_val) + return ret_val; + } + } else { + if (hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = + e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->dsp_config_state = e1000_dsp_config_enabled; + } + + if (hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->ffe_config_state = e1000_ffe_config_enabled; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_set_phy_mode - Set PHY to class A mode + * @hw: Struct containing variables accessed by shared code + * + * Assumes the following operations will follow to enable the new class mode. + * 1. Do a PHY soft reset + * 2. Restart auto-negotiation or force link. + */ +static s32 e1000_set_phy_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_data; + + e_dbg("e1000_set_phy_mode"); + + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { + ret_val = + e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, + &eeprom_data); + if (ret_val) { + return ret_val; + } + + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) + return ret_val; + + hw->phy_reset_disable = false; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - set d3 link power state + * @hw: Struct containing variables accessed by shared code + * @active: true to enable lplu false to disable lplu. + * + * This function sets the lplu state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisement + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + */ +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 phy_data; + e_dbg("e1000_set_d3_lplu_state"); + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + /* During driver activity LPLU should not be used or it will attain link + * from the lowest speeds starting from 10Mbps. The capability is used + * for Dx transitions and states + */ + if (hw->mac_type == e1000_82541_rev_2 + || hw->mac_type == e1000_82547_rev_2) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) + || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) + || (hw->autoneg_advertised == + AUTONEG_ADVERTISE_10_100_ALL)) { + + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + + } + return E1000_SUCCESS; +} + +/** + * e1000_set_vco_speed + * @hw: Struct containing variables accessed by shared code + * + * Change VCO speed register to improve Bit Error Rate performance of SERDES. + */ +static s32 e1000_set_vco_speed(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + e_dbg("e1000_set_vco_speed"); + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + + +/** + * e1000_enable_mng_pass_thru - check for bmc pass through + * @hw: Struct containing variables accessed by shared code + * + * Verifies the hardware needs to allow ARPs to be processed by the host + * returns: - true/false + */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + + if (hw->asf_firmware_present) { + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return false; + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return true; + } + return false; +} + +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) + break; + msleep(100); + } + + /* Recommended delay time after link has been lost */ + msleep(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_get_auto_rd_done + * @hw: Struct containing variables accessed by shared code + * + * Check for EEPROM Auto Read bit done. + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + e_dbg("e1000_get_auto_rd_done"); + msleep(5); + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_cfg_done + * @hw: Struct containing variables accessed by shared code + * + * Checks if the PHY configuration is done + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + e_dbg("e1000_get_phy_cfg_done"); + msleep(10); + return E1000_SUCCESS; +} diff --git a/addons/e1000/src/3.10.108/e1000_hw.h b/addons/e1000/src/3.10.108/e1000_hw.h new file mode 100644 index 00000000..11578c89 --- /dev/null +++ b/addons/e1000/src/3.10.108/e1000_hw.h @@ -0,0 +1,3112 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" + + +/* Forward declarations of structures used by the shared code */ +struct e1000_hw; +struct e1000_hw_stats; + +/* Enumerated types specific to the e1000 hardware */ +/* Media Access Controllers */ +typedef enum { + e1000_undefined = 0, + e1000_82542_rev2_0, + e1000_82542_rev2_1, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_ce4100, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_num_macs +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi, + e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_none, /* No NVM support */ + e1000_num_eeprom_types +} e1000_eeprom_type; + +/* Media Types */ +typedef enum { + e1000_media_type_copper = 0, + e1000_media_type_fiber = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3 +} e1000_speed_duplex_type; + +/* Flow Control Settings */ +typedef enum { + E1000_FC_NONE = 0, + E1000_FC_RX_PAUSE = 1, + E1000_FC_TX_PAUSE = 2, + E1000_FC_FULL = 3, + E1000_FC_DEFAULT = 0xFF +} e1000_fc_type; + +struct e1000_shadow_ram { + u16 eeprom_word; + bool modified; +}; + +/* PCI bus types */ +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_reserved +} e1000_bus_type; + +/* PCI bus speeds */ +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_reserved +} e1000_bus_speed; + +/* PCI bus widths */ +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +/* PHY status info structure and supporting enums */ +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80, + e1000_cable_length_80_110, + e1000_cable_length_110_140, + e1000_cable_length_140, + e1000_cable_length_undefined = 0xFF +} e1000_cable_length; + +typedef enum { + e1000_gg_cable_length_60 = 0, + e1000_gg_cable_length_60_115 = 1, + e1000_gg_cable_length_115_150 = 2, + e1000_gg_cable_length_150 = 4 +} e1000_gg_cable_length; + +typedef enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180 +} e1000_igp_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower, + e1000_10bt_ext_dist_enable_undefined = 0xFF +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated, + e1000_downshift_undefined = 0xFF +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled, + e1000_polarity_reversal_undefined = 0xFF +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix, + e1000_auto_x_mode_auto1, + e1000_auto_x_mode_auto2, + e1000_auto_x_mode_undefined = 0xFF +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_8211, + e1000_phy_8201, + e1000_phy_undefined = 0xFF +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +struct e1000_phy_info { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + u16 word_size; + u16 opcode_bits; + u16 address_bits; + u16 delay_usec; + u16 page_size; +}; + +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_EEPROM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_TYPE 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 + +#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ + (((_value) & 0xff00) >> 8)) + +/* Function prototypes */ +/* Initialization */ +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_set_mac_type(struct e1000_hw *hw); +void e1000_set_media_type(struct e1000_hw *hw); + +/* Link Configuration */ +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex); +s32 e1000_force_mac_fc(struct e1000_hw *hw); + +/* PHY */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_reset(struct e1000_hw *hw); +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); + +/* EEPROM Functions */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw); + +/* MNG HOST IF functions */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_ICH_IAMT_MODE 0x2 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */ +}; +#ifdef __BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u16 vlan_id; + u8 reserved0; + u8 status; + u32 reserved1; + u8 checksum; + u8 reserved3; + u16 reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; +#endif + +bool e1000_check_mng_mode(struct e1000_hw *hw); +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_read_mac_addr(struct e1000_hw *hw); + +/* Filters (multicast, vlan, receive) */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); + +/* LED functions */ +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_blink_led_start(struct e1000_hw *hw); + +/* Adaptive IFS Functions */ + +/* Everything else */ +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, + u32 frame_len, u8 * mac_addr); +void e1000_get_bus_info(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); +/* Port I/O is only supported on 82544 and newer */ +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); + +#define E1000_READ_REG_IO(a, reg) \ + e1000_read_reg_io((a), E1000_##reg) +#define E1000_WRITE_REG_IO(a, reg, val) \ + e1000_write_reg_io((a), E1000_##reg, val) + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E + +#define NODE_ADDRESS_SIZE 6 + +/* MAC decode size is 128K - This is the size of BAR0 */ +#define MAC_DECODE_SIZE (128 * 1024) + +#define E1000_82542_2_0_REV_ID 2 +#define E1000_82542_2_1_REV_ID 3 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* The sizes (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MINIMUM_ETHERNET_PACKET_SIZE \ + (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define CRC_LENGTH ETHERNET_FCS_SIZE +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Sizes */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ +#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ + +/* Packet Header defines */ +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. We + * reserve one of these spots for our directed address, allowing us room for + * E1000_RAR_ENTRIES - 1 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; /* */ + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; /* */ + u8 cmd; /* */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address Register */ +struct e1000_rar { + volatile __le32 low; /* receive address low */ + volatile __le32 high; /* receive address high */ +}; + +/* Number of entries in the Multicast Table Array (MTA). */ +#define E1000_NUM_MTA_REGISTERS 128 + +/* IPv4 Address Table Entry */ +struct e1000_ipv4_at_entry { + volatile u32 ipv4_addr; /* IP Address (RW) */ + volatile u32 reserved; +}; + +/* Four wakeup IP addresses are supported */ +#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 +#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX +#define E1000_IP6AT_SIZE 1 + +/* IPv6 Address Table Entry */ +struct e1000_ipv6_at_entry { + volatile u8 ipv6_addr[16]; +}; + +/* Flexible Filter Length Table Entry */ +struct e1000_fflt_entry { + volatile u32 length; /* Flexible Filter Length (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Mask Table Entry */ +struct e1000_ffmt_entry { + volatile u32 mask; /* Flexible Filter Mask (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Value Table Entry */ +struct e1000_ffvt_entry { + volatile u32 value; /* Flexible Filter Value (RW) */ + volatile u32 reserved; +}; + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ + +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt) +#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) +#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) +#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) +#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) + +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ + +/* Auxiliary Control Register. This register is CE4100 specific, + * RMII/RGMII function is switched by this register - RW + * Following are bits definitions of the Auxiliary Control Register + */ +#define E1000_CTL_AUX 0x000E0 +#define E1000_CTL_AUX_END_SEL_SHIFT 10 +#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 + +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use CTL_AUX.ENDIANESS, packet use default */ +#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use default, packet use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) +/* all use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) + +#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) +#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) + +/* LW little endian, Byte big endian */ +#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) + +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* RX Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* RX Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* RX Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* RX Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* RX Data FIFO Packet Count - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* Register Set (82542) + * + * Some of the 82542 registers are located at different offsets than they are + * in more current versions of the 8254x. Despite the difference in location, + * the registers function in the same manner. + */ +#define E1000_82542_CTL_AUX E1000_CTL_AUX +#define E1000_82542_CTRL E1000_CTRL +#define E1000_82542_CTRL_DUP E1000_CTRL_DUP +#define E1000_82542_STATUS E1000_STATUS +#define E1000_82542_EECD E1000_EECD +#define E1000_82542_EERD E1000_EERD +#define E1000_82542_CTRL_EXT E1000_CTRL_EXT +#define E1000_82542_FLA E1000_FLA +#define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL +#define E1000_82542_FEXTNVM E1000_FEXTNVM +#define E1000_82542_FCAL E1000_FCAL +#define E1000_82542_FCAH E1000_FCAH +#define E1000_82542_FCT E1000_FCT +#define E1000_82542_VET E1000_VET +#define E1000_82542_RA 0x00040 +#define E1000_82542_ICR E1000_ICR +#define E1000_82542_ITR E1000_ITR +#define E1000_82542_ICS E1000_ICS +#define E1000_82542_IMS E1000_IMS +#define E1000_82542_IMC E1000_IMC +#define E1000_82542_RCTL E1000_RCTL +#define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDFH E1000_RDFH +#define E1000_82542_RDFT E1000_RDFT +#define E1000_82542_RDFHS E1000_RDFHS +#define E1000_82542_RDFTS E1000_RDFTS +#define E1000_82542_RDFPC E1000_RDFPC +#define E1000_82542_RDBAL 0x00110 +#define E1000_82542_RDBAH 0x00114 +#define E1000_82542_RDLEN 0x00118 +#define E1000_82542_RDH 0x00120 +#define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication + * RX Control - RW */ +#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) +#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ +#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ +#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ +#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ +#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ +#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ +#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ +#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ +#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ +#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 +#define E1000_82542_FCRTH 0x00160 +#define E1000_82542_FCRTL 0x00168 +#define E1000_82542_FCTTV E1000_FCTTV +#define E1000_82542_TXCW E1000_TXCW +#define E1000_82542_RXCW E1000_RXCW +#define E1000_82542_MTA 0x00200 +#define E1000_82542_TCTL E1000_TCTL +#define E1000_82542_TCTL_EXT E1000_TCTL_EXT +#define E1000_82542_TIPG E1000_TIPG +#define E1000_82542_TDBAL 0x00420 +#define E1000_82542_TDBAH 0x00424 +#define E1000_82542_TDLEN 0x00428 +#define E1000_82542_TDH 0x00430 +#define E1000_82542_TDT 0x00438 +#define E1000_82542_TIDV 0x00440 +#define E1000_82542_TBT E1000_TBT +#define E1000_82542_AIT E1000_AIT +#define E1000_82542_VFTA 0x00600 +#define E1000_82542_LEDCTL E1000_LEDCTL +#define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_PHY_CTRL E1000_PHY_CTRL +#define E1000_82542_ERT E1000_ERT +#define E1000_82542_RXDCTL E1000_RXDCTL +#define E1000_82542_RXDCTL1 E1000_RXDCTL1 +#define E1000_82542_RADV E1000_RADV +#define E1000_82542_RSRPD E1000_RSRPD +#define E1000_82542_TXDMAC E1000_TXDMAC +#define E1000_82542_KABGTXD E1000_KABGTXD +#define E1000_82542_TDFHS E1000_TDFHS +#define E1000_82542_TDFTS E1000_TDFTS +#define E1000_82542_TDFPC E1000_TDFPC +#define E1000_82542_TXDCTL E1000_TXDCTL +#define E1000_82542_TADV E1000_TADV +#define E1000_82542_TSPMT E1000_TSPMT +#define E1000_82542_CRCERRS E1000_CRCERRS +#define E1000_82542_ALGNERRC E1000_ALGNERRC +#define E1000_82542_SYMERRS E1000_SYMERRS +#define E1000_82542_RXERRC E1000_RXERRC +#define E1000_82542_MPC E1000_MPC +#define E1000_82542_SCC E1000_SCC +#define E1000_82542_ECOL E1000_ECOL +#define E1000_82542_MCC E1000_MCC +#define E1000_82542_LATECOL E1000_LATECOL +#define E1000_82542_COLC E1000_COLC +#define E1000_82542_DC E1000_DC +#define E1000_82542_TNCRS E1000_TNCRS +#define E1000_82542_SEC E1000_SEC +#define E1000_82542_CEXTERR E1000_CEXTERR +#define E1000_82542_RLEC E1000_RLEC +#define E1000_82542_XONRXC E1000_XONRXC +#define E1000_82542_XONTXC E1000_XONTXC +#define E1000_82542_XOFFRXC E1000_XOFFRXC +#define E1000_82542_XOFFTXC E1000_XOFFTXC +#define E1000_82542_FCRUC E1000_FCRUC +#define E1000_82542_PRC64 E1000_PRC64 +#define E1000_82542_PRC127 E1000_PRC127 +#define E1000_82542_PRC255 E1000_PRC255 +#define E1000_82542_PRC511 E1000_PRC511 +#define E1000_82542_PRC1023 E1000_PRC1023 +#define E1000_82542_PRC1522 E1000_PRC1522 +#define E1000_82542_GPRC E1000_GPRC +#define E1000_82542_BPRC E1000_BPRC +#define E1000_82542_MPRC E1000_MPRC +#define E1000_82542_GPTC E1000_GPTC +#define E1000_82542_GORCL E1000_GORCL +#define E1000_82542_GORCH E1000_GORCH +#define E1000_82542_GOTCL E1000_GOTCL +#define E1000_82542_GOTCH E1000_GOTCH +#define E1000_82542_RNBC E1000_RNBC +#define E1000_82542_RUC E1000_RUC +#define E1000_82542_RFC E1000_RFC +#define E1000_82542_ROC E1000_ROC +#define E1000_82542_RJC E1000_RJC +#define E1000_82542_MGTPRC E1000_MGTPRC +#define E1000_82542_MGTPDC E1000_MGTPDC +#define E1000_82542_MGTPTC E1000_MGTPTC +#define E1000_82542_TORL E1000_TORL +#define E1000_82542_TORH E1000_TORH +#define E1000_82542_TOTL E1000_TOTL +#define E1000_82542_TOTH E1000_TOTH +#define E1000_82542_TPR E1000_TPR +#define E1000_82542_TPT E1000_TPT +#define E1000_82542_PTC64 E1000_PTC64 +#define E1000_82542_PTC127 E1000_PTC127 +#define E1000_82542_PTC255 E1000_PTC255 +#define E1000_82542_PTC511 E1000_PTC511 +#define E1000_82542_PTC1023 E1000_PTC1023 +#define E1000_82542_PTC1522 E1000_PTC1522 +#define E1000_82542_MPTC E1000_MPTC +#define E1000_82542_BPTC E1000_BPTC +#define E1000_82542_TSCTC E1000_TSCTC +#define E1000_82542_TSCTFC E1000_TSCTFC +#define E1000_82542_RXCSUM E1000_RXCSUM +#define E1000_82542_WUC E1000_WUC +#define E1000_82542_WUFC E1000_WUFC +#define E1000_82542_WUS E1000_WUS +#define E1000_82542_MANC E1000_MANC +#define E1000_82542_IPAV E1000_IPAV +#define E1000_82542_IP4AT E1000_IP4AT +#define E1000_82542_IP6AT E1000_IP6AT +#define E1000_82542_WUPL E1000_WUPL +#define E1000_82542_WUPM E1000_WUPM +#define E1000_82542_FFLT E1000_FFLT +#define E1000_82542_TDFH 0x08010 +#define E1000_82542_TDFT 0x08018 +#define E1000_82542_FFMT E1000_FFMT +#define E1000_82542_FFVT E1000_FFVT +#define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR +#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA +#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 txerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rlerrc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +/* Structure containing variables used by the shared code (e1000_hw.c) */ +struct e1000_hw { + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + void __iomem *ce4100_gbe_mdio_base_virt; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + u32 phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram *eeprom_shadow_ram; + u32 flash_bank_size; + u32 flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + u32 asf_firmware_present; + u32 eeprom_semaphore_present; + unsigned long io_base; + u32 phy_id; + u32 phy_revision; + u32 phy_addr; + u32 original_fc; + u32 txcw; + u32 autoneg_failed; + u32 max_frame_size; + u32 min_frame_size; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + bool tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + u16 phy_spd_default; + u16 autoneg_advertised; + u16 pci_cmd_word; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + u16 current_ifs_val; + u16 ifs_min_val; + u16 ifs_max_val; + u16 ifs_step_size; + u16 ifs_ratio; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 autoneg; + u8 mdix; + u8 forced_speed_duplex; + u8 wait_autoneg_complete; + u8 dma_fairness; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; + bool disable_polarity_correction; + bool speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + bool get_link_status; + bool serdes_has_link; + bool tbi_compatibility_en; + bool tbi_compatibility_on; + bool laa_is_present; + bool phy_reset_disable; + bool initialize_hw_bits_disable; + bool fc_send_xon; + bool fc_strict_ieee; + bool report_tx_early; + bool adaptive_ifs; + bool ifs_params_forced; + bool in_ifs_mode; + bool mng_reg_access_disabled; + bool leave_av_bit_off; + bool bad_tx_carr_stats_fd; + bool has_smbus; +}; + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* EEPROM Read */ +#define E1000_EERD_START 0x00000001 /* Start Read */ +#define E1000_EERD_DONE 0x00000010 /* Read Done */ +#define E1000_EERD_ADDR_SHIFT 8 +#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ +#define E1000_EERD_DATA_SHIFT 16 +#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ + +/* SPI EEPROM Status Register */ +#define EEPROM_STATUS_RDY_SPI 0x01 +#define EEPROM_STATUS_WEN_SPI 0x02 +#define EEPROM_STATUS_BP0_SPI 0x04 +#define EEPROM_STATUS_BP1_SPI 0x08 +#define EEPROM_STATUS_WPEN_SPI 0x80 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 +#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 +#define INTEL_CE_GBE_MDIC_GO 0x80000000 +#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 + +#define E1000_KUMCTRLSTA_MASK 0x0000FFFF +#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 +#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KUMCTRLSTA_REN 0x00200000 + +#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 +#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 +#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 +#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 +#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 +#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 +#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 +#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E +#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F + +/* FIFO Control */ +#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 +#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 + +/* In-Band Control */ +#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 +#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 + +/* Half-Duplex Control */ +#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 +#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 + +#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E + +#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 +#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 + +#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 +#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 +#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 +#define E1000_PHY_CTRL_B2B_EN 0x00000080 + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SW_W_SYNC definitions */ +#define E1000_SWFW_EEP_SM 0x0001 +#define E1000_SWFW_PHY0_SM 0x0002 +#define E1000_SWFW_PHY1_SM 0x0004 +#define E1000_SWFW_MAC_CSR_SM 0x0008 + +/* Receive Descriptor */ +#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ +#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ +#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ +#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ +#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Receive Descriptor Control */ +#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ +#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ +#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +/* Extended Transmit Control */ +#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ +#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ +#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ +#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ +#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ +#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ +#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ +#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ +#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ +#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ +#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ +#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ +#define E1000_FWSM_SKUEL_SHIFT 29 +#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ +#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ +#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ +#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Interface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; /* I/F bits for command, status for return */ + u8 checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +#define E1000_MDALIGN 4096 + +/* PCI-Ex registers*/ + +/* PCI-Ex Control Register */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + +/* PCI-Ex Config Space */ +#define PCI_EX_LINK_STATUS 0x12 +#define PCI_EX_LINK_WIDTH_MASK 0x3F0 +#define PCI_EX_LINK_WIDTH_SHIFT 4 + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM Commands - SPI */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Size definitions */ +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 +#define EEPROM_SIZE_MASK 0x1C00 + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ +#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F + +/* Mask bit for PHY class in Word 7 of the EEPROM */ +#define EEPROM_PHY_CLASS_A 0x8000 + +/* Mask bits for fields in Word 0x0a of the EEPROM */ +#define EEPROM_WORD0A_ILOS 0x0010 +#define EEPROM_WORD0A_SWDPIO 0x01E0 +#define EEPROM_WORD0A_LRST 0x0200 +#define EEPROM_WORD0A_FD 0x0400 +#define EEPROM_WORD0A_66MHZ 0x0800 + +/* Mask bits for fields in Word 0x0f of the EEPROM */ +#define EEPROM_WORD0F_PAUSE_MASK 0x3000 +#define EEPROM_WORD0F_PAUSE 0x1000 +#define EEPROM_WORD0F_ASM_DIR 0x2000 +#define EEPROM_WORD0F_ANE 0x0800 +#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 +#define EEPROM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ +#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 +#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 + +/* Mask bits for fields in Word 0x1a of the EEPROM */ +#define EEPROM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map defines (WORD OFFSETS)*/ +#define EEPROM_NODE_ADDRESS_BYTE_0 0 +#define EEPROM_PBA_BYTE_1 8 + +#define EEPROM_RESERVED_WORD 0xFFFF + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +/* Collision distance is a 0-based value that applies to + * half-duplex-capable hardware only. */ +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_COLD_SHIFT 12 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define E1000_TXDMAC_DPP 0x00000001 + +/* Adaptive IFS defines */ +#define TX_THRESHOLD_START 8 +#define TX_THRESHOLD_INCREMENT 10 +#define TX_THRESHOLD_DECREMENT 1 +#define TX_THRESHOLD_STOP 190 +#define TX_THRESHOLD_DISABLE 0 +#define TX_THRESHOLD_TIMER_MS 10000 +#define MIN_NUM_XMITS 1000 +#define IFS_MAX 80 +#define IFS_STEP 10 +#define IFS_MIN 40 +#define IFS_RATIO 4 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* PCIX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 + +/* Number of bits required to shift right the "pause" bits from the + * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. + */ +#define PAUSE_SHIFT 5 + +/* Number of bits required to shift left the "SWDPIO" bits from the + * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. + */ +#define SWDPIO_SHIFT 17 + +/* Number of bits required to shift left the "SWDPIO_EXT" bits from the + * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. + */ +#define SWDPIO__EXT_SHIFT 4 + +/* Number of bits required to shift left the "ILOS" bit from the EEPROM + * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. + */ +#define ILOS_SHIFT 3 + +#define RECEIVE_BUFFER_ALIGN_SIZE (256) + +/* Number of milliseconds we wait for auto-negotiation to complete */ +#define LINK_UP_TIMEOUT 500 + +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 + +#define E1000_TX_BUFFER_SIZE ((u32)1514) + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +/* TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the RX descriptor with EOP set + * error = the 8 bit error field of the RX descriptor with EOP set + * length = the sum of all the length fields of the RX descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ + ((adapter)->tbi_compatibility_on && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= ((adapter)->max_frame_size + 1))) : \ + (((length) > (adapter)->min_frame_size) && \ + ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) + +/* Structures, enums, and macros for the PHY */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +#define IGP01E1000_IEEE_REGS_PAGE 0x0000 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 +#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ + +/* IGP01E1000 AGC Registers - stores the cable length values*/ +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +/* IGP01E1000 DSP Reset Register */ +#define IGP01E1000_PHY_DSP_RESET 0x1F33 +#define IGP01E1000_PHY_DSP_SET 0x1F71 +#define IGP01E1000_PHY_DSP_FFE 0x1F35 + +#define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 +#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 +#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 + +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A +/* IGP01E1000 PCS Initialization register - stores the polarity status when + * speed = 1000 Mbps. */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 + +#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* Next Page TX Register */ +#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* Link Partner Next Page Register */ +#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ +#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +/* Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ +#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ + +#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ + /* (0=enable, 1=disable) */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 +#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 +#define M88E1000_PSSR_MDIX_SHIFT 6 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* IGP01E1000 Specific Port Config Register - R/W */ +#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 +#define IGP01E1000_PSCFR_PRE_EN 0x0020 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 +#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 +#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 + +/* IGP01E1000 Specific Port Status Register - R/O */ +#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C +#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 +#define IGP01E1000_PSSR_LINK_UP 0x0400 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ +#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 +#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ +#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ + +/* IGP01E1000 Specific Port Control Register - R/W */ +#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 +#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 +#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 +#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ + +/* IGP01E1000 Specific Port Link Health Register */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 +#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 +#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 +#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ +#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 +#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 +#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 +#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 +#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 +#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 +#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 + +/* IGP01E1000 Channel Quality Register */ +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + +/* IGP01E1000 DSP reset macros */ +#define DSP_RESET_ENABLE 0x0 +#define DSP_RESET_DISABLE 0x2 +#define E1000_MAX_DSP_RESETS 10 + +/* IGP01E1000 & IGP02E1000 AGC Registers */ + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F + +/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 + +/* The precision error of the cable length is +/- 10 meters */ +#define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 + +/* IGP01E1000 PCS Initialization register */ +/* bits 3:6 in the PCS registers stores the channels polarity */ +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +/* IGP01E1000 GMII FIFO Register */ +#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed + * on Link-Up */ +#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ + +/* IGP01E1000 Analog Register */ +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID +#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 +#define L1LXT971A_PHY_ID 0x001378E0 + +#define RTL8211B_PHY_ID 0x001CC910 +#define RTL8201N_PHY_ID 0x8200 +#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ +#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) \ + (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) + +#define IGP3_PHY_PORT_CTRL \ + PHY_REG(769, 17) /* Port General Configuration */ +#define IGP3_PHY_RATE_ADAPT_CTRL \ + PHY_REG(769, 25) /* Rate Adapter Control Register */ + +#define IGP3_KMRN_FIFO_CTRL_STATS \ + PHY_REG(770, 16) /* KMRN FIFO's control/status register */ +#define IGP3_KMRN_POWER_MNG_CTRL \ + PHY_REG(770, 17) /* KMRN Power Management Control Register */ +#define IGP3_KMRN_INBAND_CTRL \ + PHY_REG(770, 18) /* KMRN Inband Control Register */ +#define IGP3_KMRN_DIAG \ + PHY_REG(770, 19) /* KMRN Diagnostic register */ +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ +#define IGP3_KMRN_ACK_TIMEOUT \ + PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ + +#define IGP3_VR_CTRL \ + PHY_REG(776, 18) /* Voltage regulator control register */ +#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ +#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ + +#define IGP3_CAPABILITY \ + PHY_REG(776, 19) /* IGP3 Capability Register */ + +/* Capabilities for SKU Control */ +#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ +#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ +#define IGP3_CAP_ASF 0x0004 /* Support ASF */ +#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ +#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ +#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ +#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ +#define IGP3_CAP_RSS 0x0080 /* Support RSS */ +#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ +#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ + +#define IGP3_PPC_JORDAN_EN 0x0001 +#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 + +#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 +#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E +#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 +#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 + +#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ +#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ + +#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) +#define IGP3_KMRN_EC_DIS_INBAND 0x0080 + +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ +#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ +#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */ +#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ +#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ +#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ +#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ +#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ +#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ +#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ + +#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */ +#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ +#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ +#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ +#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ +#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ +#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 + +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */ +#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ +#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ +#define IFE_PSC_FORCE_POLARITY_SHIFT 5 +#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 + +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ +#define IFE_PMC_MDIX_MODE_SHIFT 6 +#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ + +#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ +#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ +#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ +#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ +#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ +#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ +#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ +#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ +#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ +#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define ICH_CYCLE_READ 0x0 +#define ICH_CYCLE_RESERVED 0x1 +#define ICH_CYCLE_WRITE 0x2 +#define ICH_CYCLE_ERASE 0x3 + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_FRACC 0x0050 +#define ICH_FLASH_FREG0 0x0054 +#define ICH_FLASH_FREG1 0x0058 +#define ICH_FLASH_FREG2 0x005C +#define ICH_FLASH_FREG3 0x0060 +#define ICH_FLASH_FPR0 0x0074 +#define ICH_FLASH_FPR1 0x0078 +#define ICH_FLASH_SSFSTS 0x0090 +#define ICH_FLASH_SSFCTL 0x0092 +#define ICH_FLASH_PREOP 0x0094 +#define ICH_FLASH_OPTYPE 0x0096 +#define ICH_FLASH_OPMENU 0x0098 + +#define ICH_FLASH_REG_MAPSIZE 0x00A0 +#define ICH_FLASH_SECTOR_SIZE 4096 +#define ICH_GFPREG_BASE_MASK 0x1FFF +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF + +/* Miscellaneous PHY bit definitions. */ +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_SOF 0x01 +#define PHY_OP_READ 0x02 +#define PHY_OP_WRITE 0x01 +#define PHY_TURNAROUND 0x02 +#define PHY_PREAMBLE_SIZE 32 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 +#define E1000_PHY_ADDRESS 0x01 +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ +#define REG4_SPEED_MASK 0x01E0 +#define REG9_SPEED_MASK 0x0300 +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#endif /* _E1000_HW_H_ */ diff --git a/addons/e1000/src/3.10.108/e1000_main.c b/addons/e1000/src/3.10.108/e1000_main.c new file mode 100644 index 00000000..a978fc82 --- /dev/null +++ b/addons/e1000/src/3.10.108/e1000_main.c @@ -0,0 +1,5226 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" +#include +#include +#include +#include +#include + +char e1000_driver_name[] = "e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; +#define DRV_VERSION "7.3.21-k8-NAPI" +const char e1000_driver_version[] = DRV_VERSION; +static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ +static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +void e1000_update_stats(struct e1000_adapter *adapter); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +static int e1000_sw_init(struct e1000_adapter *adapter); +static int e1000_open(struct net_device *netdev); +static int e1000_close(struct net_device *netdev); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_rx_mode(struct net_device *netdev); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog(struct work_struct *work); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static struct net_device_stats * e1000_get_stats(struct net_device *netdev); +static int e1000_change_mtu(struct net_device *netdev, int new_mtu); +static int e1000_set_mac(struct net_device *netdev, void *p); +static irqreturn_t e1000_intr(int irq, void *data); +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static int e1000_clean(struct napi_struct *napi, int budget); +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_tx_timeout(struct net_device *dev); +static void e1000_reset_task(struct work_struct *work); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb); + +static bool e1000_vlan_used(struct e1000_adapter *adapter); +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on); +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static void e1000_restore_vlan(struct e1000_adapter *adapter); + +#ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); +static int e1000_resume(struct pci_dev *pdev); +#endif +static void e1000_shutdown(struct pci_dev *pdev); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void e1000_netpoll (struct net_device *netdev); +#endif + +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); +static void e1000_io_resume(struct pci_dev *pdev); + +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, +#ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = e1000_suspend, + .resume = e1000_resume, +#endif + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * e1000_get_hw_dev - return device + * used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); + + pr_info("%s\n", e1000_copyright); + + ret = pci_register_driver(&e1000_driver); + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + pr_info("copybreak disabled\n"); + else + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irq_handler_t handler = e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u16 vid = hw->mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!e1000_vlan_used(adapter)) + return; + + if (!test_bit(vid, adapter->active_vlans)) { + if (hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + ew32(MANC, manc); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + + ew32(MANC, manc); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter = private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + e1000_set_rx_mode(netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } +} + +int e1000_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_wake_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + return 0; +} + +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + **/ +void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (hw->media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle + */ + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is true * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active + */ + if (!adapter->wol && hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + u16 mii_reg = 0; + + switch (hw->mac_type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (er32(MANC) & E1000_MANC_SMBUS_EN) + goto out; + break; + default: + goto out; + } + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + msleep(1); + } +out: + return; +} + +static void e1000_down_and_stop(struct e1000_adapter *adapter) +{ + set_bit(__E1000_DOWN, &adapter->flags); + + /* Only kill reset task if adapter is not resetting */ + if (!test_bit(__E1000_RESETTING, &adapter->flags)) + cancel_work_sync(&adapter->reset_task); + + cancel_delayed_work_sync(&adapter->watchdog_task); + cancel_delayed_work_sync(&adapter->phy_info_task); + cancel_delayed_work_sync(&adapter->fifo_stall_task); +} + +void e1000_down(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl, tctl; + + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_tx_disable(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + E1000_WRITE_FLUSH(); + msleep(10); + + napi_disable(&adapter->napi); + + e1000_irq_disable(adapter); + + /* Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * tasks from rescheduling. + */ + e1000_down_and_stop(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + netif_carrier_off(netdev); + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_reinit_safe(struct e1000_adapter *adapter) +{ + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + mutex_lock(&adapter->mutex); + e1000_down(adapter); + e1000_up(adapter); + mutex_unlock(&adapter->mutex); + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reinit_locked(struct e1000_adapter *adapter) +{ + /* if rtnl_lock is not held the call path is bogus */ + ASSERT_RTNL(); + WARN_ON(in_interrupt()); + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + e1000_down(adapter); + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reset(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + bool legacy_pba_adjust = false; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; + case e1000_82547: + case e1000_82547_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_30K; + break; + case e1000_undefined: + case e1000_num_macs: + break; + } + + if (legacy_pba_adjust) { + if (hw->max_frame_size > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + + if (hw->mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* adjust PBA for jumbo frames */ + ew32(PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (hw->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = hw->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (hw->mac_type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + } + + ew32(PBA, pba); + + /* flow control settings: + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - hw->max_frame_size)); + + hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + hw->fc_low_water = hw->fc_high_water - 8; + hw->fc_pause_time = E1000_FC_PAUSE_TIME; + hw->fc_send_xon = 1; + hw->fc = hw->original_fc; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(hw); + if (hw->mac_type >= e1000_82544) + ew32(WUC, 0); + + if (e1000_init_hw(hw)) + e_dev_err("Hardware Error\n"); + e1000_update_mng_vlan(adapter); + + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (hw->mac_type >= e1000_82544 && + hw->autoneg == 1 && + hw->autoneg_advertised == ADVERTISE_1000_FULL) { + u32 ctrl = er32(CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload + */ + ctrl &= ~E1000_CTRL_SWDPIN3; + ew32(CTRL, ctrl); + } + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETHERNET_IEEE_VLAN_TYPE); + + e1000_reset_adaptive(hw); + e1000_phy_get_info(hw, &adapter->phy_info); + + e1000_release_manageability(adapter); +} + +/* Dump the eeprom for users having checksum issues */ +static void e1000_dump_eeprom(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_eeprom eeprom; + const struct ethtool_ops *ops = netdev->ethtool_ops; + u8 *data; + int i; + u16 csum_old, csum_new = 0; + + eeprom.len = ops->get_eeprom_len(netdev); + eeprom.offset = 0; + + data = kmalloc(eeprom.len, GFP_KERNEL); + if (!data) + return; + + ops->get_eeprom(netdev, &eeprom, data); + + csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + + (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); + for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) + csum_new += data[i] + (data[i + 1] << 8); + csum_new = EEPROM_SUM - csum_new; + + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); + + pr_err("Offset Values\n"); + pr_err("======== ======\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); + + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); + + kfree(data); +} + +/** + * e1000_is_need_ioport - determine if an adapter needs ioport resources or not + * @pdev: PCI device information struct + * + * Return true if an adapter needs ioport resources + **/ +static int e1000_is_need_ioport(struct pci_dev *pdev) +{ + switch (pdev->device) { + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541ER_LOM: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + return true; + default: + return false; + } +} + +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + e1000_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + adapter->rx_csum = !!(features & NETIF_F_RXCSUM); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + + return 0; +} + +static const struct net_device_ops e1000_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_get_stats = e1000_get_stats, + .ndo_set_rx_mode = e1000_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_change_mtu = e1000_change_mtu, + .ndo_do_ioctl = e1000_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_fix_features = e1000_fix_features, + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_init_hw_struct - initialize members of hw struct + * @adapter: board private struct + * @hw: structure used by e1000_hw.c + * + * Factors out initialization of the e1000_hw struct to its own function + * that can be called very early at init (just after struct allocation). + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + * Returns negative error codes if MAC type setup fails. + */ +static int e1000_init_hw_struct(struct e1000_adapter *adapter, + struct e1000_hw *hw) +{ + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + hw->max_frame_size = adapter->netdev->mtu + + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + + /* identify the MAC */ + if (e1000_set_mac_type(hw)) { + e_err(probe, "Unknown MAC Type\n"); + return -EIO; + } + + switch (hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy_init_script = 1; + break; + } + + e1000_set_media_type(hw); + e1000_get_bus_info(hw); + + hw->wait_autoneg_complete = false; + hw->tbi_compatibility_en = true; + hw->adaptive_ifs = true; + + /* Copper options */ + + if (hw->media_type == e1000_media_type_copper) { + hw->mdix = AUTO_ALL_MODES; + hw->disable_polarity_correction = false; + hw->master_slave = E1000_MASTER_SLAVE; + } + + return 0; +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter; + struct e1000_hw *hw; + + static int cards_found = 0; + static int global_quad_port_a = 0; /* global ksp3 port a indication */ + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 tmp = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + int bars, need_ioport; + + /* do not allocate ioport bars when not needed */ + need_ioport = e1000_is_need_ioport(pdev); + if (need_ioport) { + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); + err = pci_enable_device(pdev); + } else { + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_enable_device_mem(pdev); + } + if (err) + return err; + + err = pci_request_selected_regions(pdev, bars, e1000_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->bars = bars; + adapter->need_ioport = need_ioport; + + hw = &adapter->hw; + hw->back = adapter; + + err = -EIO; + hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!hw->hw_addr) + goto err_ioremap; + + if (adapter->need_ioport) { + for (i = BAR_1; i <= BAR_5; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + hw->io_base = pci_resource_start(pdev, i); + break; + } + } + } + + /* make ready for any if (hw->...) below */ + err = e1000_init_hw_struct(adapter, hw); + if (err) + goto err_sw_init; + + /* there is a workaround being applied below that limits + * 64-bit DMA addresses to 64-bit hardware. There are some + * 32-bit adapters that Tx hang when given 64-bit DMA addresses + */ + pci_using_dac = 0; + if ((hw->bus_type == e1000_bus_type_pcix) && + !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { + /* according to DMA-API-HOWTO, coherent calls will always + * succeed if the set call did + */ + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + pci_using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA config, aborting\n"); + goto err_dma; + } + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + } + + netdev->netdev_ops = &e1000_netdev_ops; + e1000_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + if (hw->mac_type == e1000_ce4100) { + hw->ce4100_gbe_mdio_base_virt = + ioremap(pci_resource_start(pdev, BAR_1), + pci_resource_len(pdev, BAR_1)); + + if (!hw->ce4100_gbe_mdio_base_virt) + goto err_mdio_ioremap; + } + + if (hw->mac_type >= e1000_82543) { + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_82547)) + netdev->hw_features |= NETIF_F_TSO; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->features |= netdev->hw_features; + netdev->hw_features |= (NETIF_F_RXCSUM | + NETIF_F_RXALL | + NETIF_F_RXFCS); + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= (NETIF_F_TSO | + NETIF_F_HW_CSUM | + NETIF_F_SG); + + netdev->priv_flags |= IFF_UNICAST_FLT; + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); + + /* initialize eeprom parameters */ + if (e1000_init_eeprom_params(hw)) { + e_err(probe, "EEPROM initialization failed\n"); + goto err_eeprom; + } + + /* before reading the EEPROM, reset the controller to + * put the device in a known good starting state + */ + + e1000_reset_hw(hw); + + /* make sure the EEPROM is good */ + if (e1000_validate_eeprom_checksum(hw) < 0) { + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); + e1000_dump_eeprom(adapter); + /* set MAC address to all zeroes to invalidate and temporary + * disable this device for the user. This blocks regular + * traffic while still permitting ethtool ioctls from reaching + * the hardware as well as allowing the user to run the + * interface after manually setting a hw addr using + * `ip set address` + */ + memset(hw->mac_addr, 0, netdev->addr_len); + } else { + /* copy the MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw)) + e_err(probe, "EEPROM Read Error\n"); + } + /* don't block initalization here due to bad MAC address */ + memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) + e_err(probe, "Invalid MAC Address\n"); + + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); + INIT_DELAYED_WORK(&adapter->fifo_stall_task, + e1000_82547_tx_fifo_stall_task); + INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + INIT_WORK(&adapter->reset_task, e1000_reset_task); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + break; + case e1000_82544: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1){ + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + /* Fall Through */ + default: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82546GB_PCIE: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->quad_port_a = true; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* Auto detect PHY address */ + if (hw->mac_type == e1000_ce4100) { + for (i = 0; i < 32; i++) { + hw->phy_addr = i; + e1000_read_phy_reg(hw, PHY_ID2, &tmp); + if (tmp == 0 || tmp == 0xFF) { + if (i == 31) + goto err_eeprom; + continue; + } else + break; + } + } + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + e1000_vlan_filter_on_off(adapter, false); + + /* print bus type/speed/width info */ + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), + ((hw->bus_speed == e1000_bus_speed_133) ? 133 : + (hw->bus_speed == e1000_bus_speed_120) ? 120 : + (hw->bus_speed == e1000_bus_speed_100) ? 100 : + (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), + ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), + netdev->dev_addr); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); + + cards_found++; + return 0; + +err_register: +err_eeprom: + e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_dma: +err_sw_init: +err_mdio_ioremap: + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, bars); +err_pci_reg: + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + e1000_down_and_stop(adapter); + e1000_release_manageability(adapter); + + unregister_netdev(netdev); + + e1000_phy_hw_reset(hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + if (hw->mac_type == e1000_ce4100) + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, adapter->bars); + + free_netdev(netdev); + + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * e1000_init_hw_struct MUST be called before this function + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + if (e1000_alloc_queues(adapter)) { + e_err(probe, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + mutex_init(&adapter->mutex); + + set_bit(__E1000_DOWN, &adapter->flags); + + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog task is started, + * and the stack is notified that the interface is ready. + **/ +static int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->flags)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_update_mng_vlan(adapter); + } + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000_up() */ + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return E1000_SUCCESS; + +err_req_irq: + e1000_power_down_phy(adapter); + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + } + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, + unsigned long len) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long begin = (unsigned long)start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 + */ + if (hw->mac_type == e1000_82545 || + hw->mac_type == e1000_ce4100 || + hw->mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; + } + + return true; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) + return -ENOMEM; + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { +setup_tx_desc_die: + vfree(txdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + } + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_tx_resources(adapter, + &adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + u64 tdba; + struct e1000_hw *hw = &adapter->hw; + u32 tdlen, tctl, tipg; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + switch (adapter->num_tx_queues) { + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + ew32(TDLEN, tdlen); + ew32(TDBAH, (tdba >> 32)); + ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); + ew32(TDT, 0); + ew32(TDH, 0); + adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? + E1000_TDH : E1000_82542_TDH); + adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? + E1000_TDT : E1000_82542_TDT); + break; + } + + /* Set the default values for the Tx Inter Packet Gap timer */ + if ((hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes)) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + ew32(TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + if (hw->mac_type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. + */ + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) + adapter->pcix_82544 = true; + + ew32(TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) + return -ENOMEM; + + desc_len = sizeof(struct e1000_rx_desc); + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { +setup_rx_desc_die: + vfree(rxdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + } + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->rx_skb_top = NULL; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_rx_resources(adapter, + &adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = er32(RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + + if (hw->tbi_compatibility_on == 1) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + u64 rdba; + struct e1000_hw *hw = &adapter->hw; + u32 rdlen, rctl, rxcsum; + + if (adapter->netdev->mtu > ETH_DATA_LEN) { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + if (hw->mac_type >= e1000_82540) { + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + switch (adapter->num_rx_queues) { + case 1: + default: + rdba = adapter->rx_ring[0].dma; + ew32(RDLEN, rdlen); + ew32(RDBAH, (rdba >> 32)); + ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); + ew32(RDT, 0); + ew32(RDH, 0); + adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? + E1000_RDH : E1000_82542_RDH); + adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? + E1000_RDT : E1000_82542_RDT); + break; + } + + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + if (hw->mac_type >= e1000_82543) { + rxcsum = er32(RXCSUM); + if (adapter->rx_csum) + rxcsum |= E1000_RXCSUM_TUOFL; + else + /* don't need to clear IPPCSE as it defaults to 0 */ + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + } + + /* Enable Receives */ + ew32(RCTL, rctl | E1000_RCTL_EN); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = false; + + writel(0, hw->hw_addr + tx_ring->tdh); + writel(0, hw->hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma && + adapter->clean_rx == e1000_clean_rx_irq) { + dma_unmap_single(&pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_FROM_DEVICE); + } else if (buffer_info->dma && + adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + dma_unmap_page(&pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_FROM_DEVICE); + } + + buffer_info->dma = 0; + if (buffer_info->page) { + put_page(buffer_info->page); + buffer_info->page = NULL; + } + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + } + + /* there also may be some cached data from a chained receive */ + if (rx_ring->rx_skb_top) { + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + } + + size = sizeof(struct e1000_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, hw->hw_addr + rx_ring->rdh); + writel(0, hw->hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +static void e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + e1000_pci_clear_mwi(hw); + + rctl = er32(RCTL); + rctl |= E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (netif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + + if (netif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + + e1000_rar_set(hw, hw->mac_addr, 0); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + return 0; +} + +/** + * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + bool use_uc = false; + u32 rctl; + u32 hash_value; + int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = E1000_NUM_MTA_REGISTERS; + u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); + + if (!mcarray) + return; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) + rctl |= E1000_RCTL_MPE; + else + rctl &= ~E1000_RCTL_MPE; + /* Enable VLAN filter if there is a VLAN */ + if (e1000_vlan_used(adapter)) + rctl |= E1000_RCTL_VFE; + } + + if (netdev_uc_count(netdev) > rar_entries - 1) { + rctl |= E1000_RCTL_UPE; + } else if (!(netdev->flags & IFF_PROMISC)) { + rctl &= ~E1000_RCTL_UPE; + use_uc = true; + } + + ew32(RCTL, rctl); + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + /* load the first 14 addresses into the exact filters 1-14. Unicast + * addresses take precedence to avoid disabling unicast filtering + * when possible. + * + * RAR 0 is used for the station MAC address + * if there are not 14 addresses, go ahead and clear the filters + */ + i = 1; + if (use_uc) + netdev_for_each_uc_addr(ha, netdev) { + if (i == rar_entries) + break; + e1000_rar_set(hw, ha->addr, i++); + } + + netdev_for_each_mc_addr(ha, netdev) { + if (i == rar_entries) { + /* load any remaining addresses into the hash table */ + u32 hash_reg, hash_bit, mta; + hash_value = e1000_hash_mc_addr(hw, ha->addr); + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + mta = (1 << hash_bit); + mcarray[hash_reg] |= mta; + } else { + e1000_rar_set(hw, ha->addr, i++); + } + } + + for (; i < rar_entries; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(); + } + + /* write the hash table completely, write from bottom to avoid + * both stupid write combining chipsets, and flushing each write + */ + for (i = mta_reg_count - 1; i >= 0 ; i--) { + /* If we are on an 82544 has an errata where writing odd + * offsets overwrites the previous even offset, but writing + * backwards over the range solves the issue by always + * writing the odd offset first + */ + E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); + } + E1000_WRITE_FLUSH(); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + kfree(mcarray); +} + +/** + * e1000_update_phy_info_task - get phy info + * @work: work struct contained inside adapter struct + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task.work); + if (test_bit(__E1000_DOWN, &adapter->flags)) + return; + mutex_lock(&adapter->mutex); + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); + mutex_unlock(&adapter->mutex); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 tctl; + + if (test_bit(__E1000_DOWN, &adapter->flags)) + return; + mutex_lock(&adapter->mutex); + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((er32(TDT) == er32(TDH)) && + (er32(TDFT) == er32(TDFH)) && + (er32(TDFTS) == er32(TDFHS))) { + tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + ew32(TDFT, adapter->tx_head_addr); + ew32(TDFH, adapter->tx_head_addr); + ew32(TDFTS, adapter->tx_head_addr); + ew32(TDFHS, adapter->tx_head_addr); + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + netif_wake_queue(netdev); + } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + } + mutex_unlock(&adapter->mutex); +} + +bool e1000_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or rx + * sequence error interrupt (except on intel ce4100). + * get_link_status will stay false until the + * e1000_check_for_link establishes link for copper adapters + * ONLY + */ + switch (hw->media_type) { + case e1000_media_type_copper: + if (hw->mac_type == e1000_ce4100) + hw->get_link_status = 1; + if (hw->get_link_status) { + e1000_check_for_link(hw); + link_active = !hw->get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_active = hw->serdes_has_link; + break; + default: + break; + } + + return link_active; +} + +/** + * e1000_watchdog - work function + * @work: work struct contained inside adapter struct + **/ +static void e1000_watchdog(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_ring *txdr = adapter->tx_ring; + u32 link, tctl; + + if (test_bit(__E1000_DOWN, &adapter->flags)) + return; + + mutex_lock(&adapter->mutex); + link = e1000_has_link(adapter); + if ((netif_carrier_ok(netdev)) && link) + goto link_up; + + if (link) { + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + bool txb2b = true; + /* update snapshot of PHY registers on LSC */ + e1000_get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = er32(CTRL); + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = false; + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + txb2b = false; + /* maybe add some timeout factor ? */ + break; + } + + /* enable transmits in the hardware */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + netif_carrier_on(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + adapter->smartspeed = 0; + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + pr_info("%s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + + e1000_smartspeed(adapter); + } + +link_up: + e1000_update_stats(adapter); + + hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + hw->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000_update_adaptive(hw); + + if (!netif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* exit immediately since reset is imminent */ + goto unlock; + } + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* Reschedule the task */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); + +unlock: + mutex_unlock(&adapter->mutex); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + struct e1000_hw *hw = &adapter->hw; + + if (unlikely(hw->mac_type < e1000_82540)) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* jumbo frames get bulk treatment*/ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* jumbo frames need bulk latency setting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) + retval = bulk_latency; + else if (packets <= 2 && bytes < 512) + retval = lowest_latency; + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + if (unlikely(hw->mac_type < e1000_82540)) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (unlikely(adapter->link_speed != SPEED_1000)) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb) +{ + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + int err; + + if (skb_is_gso(skb)) { + if (skb_header_cloned(skb)) { + err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + if (err) + return err; + } + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb->protocol == htons(ETH_P_IPV6)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (++i == tx_ring->count) i = 0; + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb) +{ + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (skb->protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (unlikely(++i == tx_ring->count)) i = 0; + tx_ring->next_to_use = i; + + return true; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for Controller erratum -- + * descriptor for non-tso packet in a linear SKB that follows a + * tso gets written back prematurely before the data is fully + * DMA'd to the controller + */ + if (!skb->data_len && tx_ring->last_tx_tso && + !skb_is_gso(skb)) { + tx_ring->last_tx_tso = false; + size -= 4; + } + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. + */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = skb_frag_size(frag); + offset = 0; + + while (len) { + unsigned long bufend; + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && f == (nr_frags-1) && + size == len && size > 8)) + size -= 4; + /* Workaround for potential 82544 hang in PCI-X. + * Avoid terminating buffers within evenly-aligned + * dwords. + */ + bufend = (unsigned long) + page_to_phys(skb_frag_page(frag)); + bufend += offset + size - 1; + if (unlikely(adapter->pcix_82544 && + !(bufend & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i==0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, int tx_flags, + int count) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; + writel(i, hw->hw_addr + tx_ring->tdt); + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); +} + +/* 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + */ + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; + + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(E1000_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *tx_ring; + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + + /* This goes back to the question of how to logically map a Tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple Tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. + */ + tx_ring = adapter->tx_ring; + + if (unlikely(skb->len <= 0)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (skb->len < ETH_ZLEN) { + if (skb_pad(skb, ETH_ZLEN - skb->len)) + return NETDEV_TX_OK; + skb->len = ETH_ZLEN; + skb_set_tail_pointer(skb, ETH_ZLEN); + } + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. + */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { + unsigned int pull_size; + case e1000_82544: + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword + */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) + & 4) + break; + /* fall through */ + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err(drv, "__pskb_pull_tail " + "failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + break; + default: + /* do nothing */ + break; + } + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + /* Controller Erratum workaround */ + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), + max_txd_pwr); + if (adapter->pcix_82544) + count += nr_frags; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) + return NETDEV_TX_BUSY; + + if (unlikely((hw->mac_type == e1000_82547) && + (e1000_82547_fifo_workaround(adapter, skb)))) { + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->fifo_stall_task, 1); + return NETDEV_TX_BUSY; + } + + if (vlan_tx_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, tx_ring, skb); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (likely(tso)) { + if (likely(hw->mac_type != e1000_82544)) + tx_ring->last_tx_tso = true; + tx_flags |= E1000_TX_FLAGS_TSO; + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + if (likely(skb->protocol == htons(ETH_P_IP))) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, + nr_frags, mss); + + if (count) { + netdev_sent_queue(netdev, skb->len); + skb_tx_timestamp(skb); + + e1000_tx_queue(adapter, tx_ring, tx_flags, count); + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); + + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +#define NUM_REGS 38 /* 1 based count */ +static void e1000_regdump(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 regs[NUM_REGS]; + u32 *regs_buff = regs; + int i = 0; + + static const char * const reg_name[] = { + "CTRL", "STATUS", + "RCTL", "RDLEN", "RDH", "RDT", "RDTR", + "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", + "TIDV", "TXDCTL", "TADV", "TARC0", + "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", + "TXDCTL1", "TARC1", + "CTRL_EXT", "ERT", "RDBAL", "RDBAH", + "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", + "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" + }; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDBAL); + regs_buff[9] = er32(TDBAH); + regs_buff[10] = er32(TDLEN); + regs_buff[11] = er32(TDH); + regs_buff[12] = er32(TDT); + regs_buff[13] = er32(TIDV); + regs_buff[14] = er32(TXDCTL); + regs_buff[15] = er32(TADV); + regs_buff[16] = er32(TARC0); + + regs_buff[17] = er32(TDBAL1); + regs_buff[18] = er32(TDBAH1); + regs_buff[19] = er32(TDLEN1); + regs_buff[20] = er32(TDH1); + regs_buff[21] = er32(TDT1); + regs_buff[22] = er32(TXDCTL1); + regs_buff[23] = er32(TARC1); + regs_buff[24] = er32(CTRL_EXT); + regs_buff[25] = er32(ERT); + regs_buff[26] = er32(RDBAL0); + regs_buff[27] = er32(RDBAH0); + regs_buff[28] = er32(TDFH); + regs_buff[29] = er32(TDFT); + regs_buff[30] = er32(TDFHS); + regs_buff[31] = er32(TDFTS); + regs_buff[32] = er32(TDFPC); + regs_buff[33] = er32(RDFH); + regs_buff[34] = er32(RDFT); + regs_buff[35] = er32(RDFHS); + regs_buff[36] = er32(RDFTS); + regs_buff[37] = er32(RDFPC); + + pr_info("Register dump\n"); + for (i = 0; i < NUM_REGS; i++) + pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); +} + +/* + * e1000_dump: Print registers, tx ring and rx ring + */ +static void e1000_dump(struct e1000_adapter *adapter) +{ + /* this code doesn't handle multiple rings */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + e1000_regdump(adapter); + + /* transmit dump */ + pr_info("TX Desc ring0 dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); + + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)tx_desc; + const char *type; + + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + type = "NTC/U"; + else if (i == tx_ring->next_to_use) + type = "NTU"; + else if (i == tx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", + ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, + le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, buffer_info->skb, type); + } + +rx_ring_summary: + /* receive dump */ + pr_info("\nRX Desc ring dump\n"); + + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); + + if (!netif_msg_rx_status(adapter)) + goto exit; + + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)rx_desc; + const char *type; + + if (i == rx_ring->next_to_use) + type = "NTU"; + else if (i == rx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", + i, le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->skb, type); + } /* for */ + + /* dump the descriptor caches */ + /* rx */ + pr_info("Rx descriptor cache in 64bit format\n"); + for (i = 0x6000; i <= 0x63FF ; i += 0x10) { + pr_info("R%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } + /* tx */ + pr_info("Tx descriptor cache in 64bit format\n"); + for (i = 0x7000; i <= 0x73FF ; i += 0x10) { + pr_info("T%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } +exit: + return; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void e1000_tx_timeout(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = + container_of(work, struct e1000_adapter, reset_task); + + if (test_bit(__E1000_DOWN, &adapter->flags)) + return; + e_err(drv, "Reset adapter\n"); + e1000_reinit_safe(adapter); +} + +/** + * e1000_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the watchdog. + **/ +static struct net_device_stats *e1000_get_stats(struct net_device *netdev) +{ + /* only return the current stats */ + return &netdev->stats; +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + + if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + e_err(probe, "Invalid MTU setting\n"); + return -EINVAL; + } + + /* Adapter-specific max frame size limits. */ + switch (hw->mac_type) { + case e1000_undefined ... e1000_82542_rev2_1: + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + e_err(probe, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + default: + /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ + break; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + hw->max_frame_size = max_frame; + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; + e1000_down(adapter); + } + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= E1000_RXBUFFER_2048) + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + else +#if (PAGE_SIZE >= E1000_RXBUFFER_16384) + adapter->rx_buffer_len = E1000_RXBUFFER_16384; +#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) + adapter->rx_buffer_len = PAGE_SIZE; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (!hw->tbi_compatibility_on && + ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + pr_info("%s changing MTU from %d to %d\n", + netdev->name, netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000_up(adapter); + else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + + return 0; +} + +/** + * e1000_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long flags; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + spin_lock_irqsave(&adapter->stats_lock, flags); + + /* these counters are modified from e1000_tbi_adjust_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.sec += er32(SEC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->tx_packet_delta; + hw->collision_delta = er32(COLC); + adapter->stats.colc += hw->collision_delta; + + if (hw->mac_type >= e1000_82543) { + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + } + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; + netdev->stats.rx_length_errors = adapter->stats.rlerrc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.txerrc; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + if (hw->bad_tx_carr_stats_fd && + adapter->link_duplex == FULL_DUPLEX) { + netdev->stats.tx_carrier_errors = 0; + adapter->stats.tncrs = 0; + } + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + + if ((hw->mac_type <= e1000_82546) && + (hw->phy_type == e1000_phy_m88) && + !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) + adapter->phy_stats.receive_errors += phy_tmp; + } + + /* Management Stats */ + if (hw->has_smbus) { + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + } + + spin_unlock_irqrestore(&adapter->stats_lock, flags); +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (unlikely((!icr))) + return IRQ_NONE; /* Not our interrupt */ + + /* we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 1); + } + + /* disable interrupts, without the synchronize_irq bit */ + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + + if (likely(napi_schedule_prep(&adapter->napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } else { + /* this really should not happen! if it does it is basically a + * bug, but not a hard error, so enable ints and continue + */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return IRQ_HANDLED; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @adapter: board private structure + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + int tx_clean_complete = 0, work_done = 0; + + tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); + + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete) + work_done = budget; + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + napi_complete(napi); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return work_done; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes=0, total_tx_packets=0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + + } + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + tx_desc->upper.data = 0; + + if (unlikely(++i == tx_ring->count)) i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + +#define TX_WAKE_THRESHOLD 32 + if (unlikely(count && netif_carrier_ok(netdev) && + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)((tx_ring - adapter->tx_ring) / + sizeof(struct e1000_tx_ring)), + readl(hw->hw_addr + tx_ring->tdh), + readl(hw->hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + e1000_dump(adapter); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + netdev->stats.tx_bytes += total_tx_bytes; + netdev->stats.tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* 82543 or newer only */ + if (unlikely(hw->mac_type < e1000_82543)) return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + adapter->hw_csum_good++; +} + +/** + * e1000_consume_page - helper function + **/ +static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + */ +static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, + __le16 vlan, struct sk_buff *skb) +{ + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (status & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + */ +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + unsigned long irq_flags; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + if (++i == rx_ring->count) i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, + buffer_info->length, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + u8 *mapped; + u8 last_byte; + + mapped = page_address(buffer_info->page); + last_byte = *(mapped + length - 1); + if (TBI_ACCEPT(hw, status, rx_desc->errors, length, + last_byte)) { + spin_lock_irqsave(&adapter->stats_lock, + irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, + length, mapped); + spin_unlock_irqrestore(&adapter->stats_lock, + irq_flags); + length--; + } else { + if (netdev->features & NETIF_F_RXALL) + goto process_skb; + /* recycle both page and skb */ + buffer_info->skb = skb; + /* an error means any chain goes out the window + * too + */ + if (rx_ring->rx_skb_top) + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + } + +#define rxtop rx_ring->rx_skb_top +process_skb: + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = skb; + skb_fill_page_desc(rxtop, 0, buffer_info->page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the skb, only consumed the page */ + buffer_info->skb = skb; + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->page, 0, length); + /* re-use the current skb, we only consumed the + * page + */ + buffer_info->skb = skb; + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + if (length <= copybreak && + skb_tailroom(skb) >= length) { + u8 *vaddr; + vaddr = kmap_atomic(buffer_info->page); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->page + */ + skb_put(skb, length); + } else { + skb_fill_page_desc(skb, 0, + buffer_info->page, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += (skb->len - 4); /* don't count FCS */ + if (likely(!(netdev->features & NETIF_F_RXFCS))) + pskb_trim(skb, skb->len - 4); + total_rx_packets++; + + /* eth type trans needs skb->data to point to something */ + if (!pskb_may_pull(skb, ETH_HLEN)) { + e_err(drv, "pskb_may_pull failed.\n"); + dev_kfree_skb(skb); + goto next_desc; + } + + e1000_receive_skb(adapter, status, rx_desc->special, skb); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/* this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static void e1000_check_copybreak(struct net_device *netdev, + struct e1000_buffer *buffer_info, + u32 length, struct sk_buff **skb) +{ + struct sk_buff *new_skb; + + if (length > copybreak) + return; + + new_skb = netdev_alloc_skb_ip_align(netdev, length); + if (!new_skb) + return; + + skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN, + (*skb)->data - NET_IP_ALIGN, + length + NET_IP_ALIGN); + /* save the skb in buffer_info as good */ + buffer_info->skb = *skb; + *skb = new_skb; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + */ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + unsigned long flags; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + if (++i == rx_ring->count) i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_single(&pdev->dev, buffer_info->dma, + buffer_info->length, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + /* !EOP means multiple descriptors were used to store a single + * packet, if thats the case we need to toss it. In fact, we + * to toss every packet with the EOP bit clear and the next + * frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->discarding = true; + + if (adapter->discarding) { + /* All receives must fit into a single buffer */ + e_dbg("Receive packet consumed multiple buffers\n"); + /* recycle */ + buffer_info->skb = skb; + if (status & E1000_RXD_STAT_EOP) + adapter->discarding = false; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + u8 last_byte = *(skb->data + length - 1); + if (TBI_ACCEPT(hw, status, rx_desc->errors, length, + last_byte)) { + spin_lock_irqsave(&adapter->stats_lock, flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, + length, skb->data); + spin_unlock_irqrestore(&adapter->stats_lock, + flags); + length--; + } else { + if (netdev->features & NETIF_F_RXALL) + goto process_skb; + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + } + +process_skb: + total_rx_bytes += (length - 4); /* don't count FCS */ + total_rx_packets++; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above + */ + length -= 4; + + e1000_check_copybreak(netdev, buffer_info, length, &skb); + + skb_put(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + e1000_receive_skb(adapter, status, rx_desc->special, skb); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void +e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int cleaned_count) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = 256 - 16 /*for skb_reserve */ ; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto check_page; + } + + skb = netdev_alloc_skb_ip_align(netdev, bufsz); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; + buffer_info->length = adapter->rx_buffer_len; +check_page: + /* allocate a new page if necessary */ + if (!buffer_info->page) { + buffer_info->page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->page, 0, + buffer_info->length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + put_page(buffer_info->page); + dev_kfree_skb(skb); + buffer_info->page = NULL; + buffer_info->skb = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; /* while !buffer_info->skb */ + } + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto map_skb; + } + + skb = netdev_alloc_skb_ip_align(netdev, bufsz); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + struct sk_buff *oldskb = skb; + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, skb->data); + /* Try again, without freeing the previous */ + skb = netdev_alloc_skb_ip_align(netdev, bufsz); + /* Failed allocation, critical failure */ + if (!skb) { + dev_kfree_skb(oldskb); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) { + /* give up */ + dev_kfree_skb(skb); + dev_kfree_skb(oldskb); + adapter->alloc_rx_buff_failed++; + break; /* while !buffer_info->skb */ + } + + /* Use new allocation */ + dev_kfree_skb(oldskb); + } + buffer_info->skb = skb; + buffer_info->length = adapter->rx_buffer_len; +map_skb: + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data, + buffer_info->length, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_kfree_skb(skb); + buffer_info->skb = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; /* while !buffer_info->skb */ + } + + /* XXX if it was allocated cleanly it will never map to a + * boundary crossing + */ + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + dev_kfree_skb(skb); + buffer_info->skb = NULL; + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + adapter->alloc_rx_buff_failed++; + break; /* while !buffer_info->skb */ + } + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, hw->hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: + **/ +static void e1000_smartspeed(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_status; + u16 phy_ctrl; + + if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || + !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back + */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + +/** + * e1000_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * e1000_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct mii_ioctl_data *data = if_mii(ifr); + int retval; + u16 mii_reg; + unsigned long flags; + + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + break; + case SIOCGMIIREG: + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) + return -EFAULT; + mii_reg = data->val_in; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_write_phy_reg(hw, data->reg_num, + mii_reg)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + if (hw->media_type == e1000_media_type_copper) { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (mii_reg & MII_CR_AUTO_NEG_EN) { + hw->autoneg = 1; + hw->autoneg_advertised = 0x2F; + } else { + u32 speed; + if (mii_reg & 0x40) + speed = SPEED_1000; + else if (mii_reg & 0x2000) + speed = SPEED_100; + else + speed = SPEED_10; + retval = e1000_set_spd_dplx( + adapter, speed, + ((mii_reg & 0x100) + ? DUPLEX_FULL : + DUPLEX_HALF)); + if (retval) + return retval; + } + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + case M88E1000_PHY_SPEC_CTRL: + case M88E1000_EXT_PHY_SPEC_CTRL: + if (e1000_phy_reset(hw)) + return -EIO; + break; + } + } else { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + } + } + break; + default: + return -EOPNOTSUPP; + } + return E1000_SUCCESS; +} + +void e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + e_err(probe, "Error in setting MWI\n"); +} + +void e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + + pci_clear_mwi(adapter->pdev); +} + +int e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) +{ + outl(value, port); +} + +static bool e1000_vlan_used(struct e1000_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + return true; + return false; +} + +static void __e1000_vlan_mode(struct e1000_adapter *adapter, + netdev_features_t features) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = er32(CTRL); + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + ctrl |= E1000_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~E1000_CTRL_VME; + } + ew32(CTRL, ctrl); +} +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, adapter->netdev->features); + if (filter_on) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_CFIEN; + if (!(adapter->netdev->flags & IFF_PROMISC)) + rctl |= E1000_RCTL_VFE; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } else { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + } + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, features); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return 0; + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, true); + + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + clear_bit(vid, adapter->active_vlans); + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, false); + + return 0; +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + if (!e1000_vlan_used(adapter)) + return; + + e1000_vlan_filter_on_off(adapter, true); + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_hw *hw = &adapter->hw; + + hw->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((hw->media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_10_half; + break; + case SPEED_10 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_10_full; + break; + case SPEED_100 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_100_half; + break; + case SPEED_100 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_100_full; + break; + case SPEED_1000 + DUPLEX_FULL: + hw->autoneg = 1; + hw->autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err(probe, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + } + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_rx_mode(netdev); + + rctl = er32(RCTL); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) + rctl |= E1000_RCTL_MPE; + + /* enable receives in the hardware */ + ew32(RCTL, rctl | E1000_RCTL_EN); + + if (hw->mac_type >= e1000_82540) { + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + } + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + e1000_release_manageability(adapter); + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->en_mng_pt) + *enable_wake = true; + + if (netif_running(netdev)) + e1000_free_irq(adapter); + + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} + +static int e1000_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot enable PCI device from suspend\n"); + return err; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000_power_up_phy(adapter); + e1000_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000_up(adapter)) { + pr_info("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +/* e1000_main.c */ diff --git a/addons/e1000/src/3.10.108/e1000_osdep.h b/addons/e1000/src/3.10.108/e1000_osdep.h new file mode 100644 index 00000000..33e7c45a --- /dev/null +++ b/addons/e1000/src/3.10.108/e1000_osdep.h @@ -0,0 +1,109 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include + +#define CONFIG_RAM_BASE 0x60000 +#define GBE_CONFIG_OFFSET 0x0 + +#define GBE_CONFIG_RAM_BASE \ + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) + +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) + +#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ + (iowrite16_rep(base + offset, data, count)) + +#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ + (ioread16_rep(base + (offset << 1), data, count)) + +#define er32(reg) \ + (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg))) + +#define ew32(reg, value) \ + (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg)))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + +#define E1000_WRITE_FLUSH() er32(STATUS) + +#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG(a, reg) ( \ + readl((a)->flash_address + reg)) + +#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ + readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/addons/e1000/src/3.10.108/e1000_param.c b/addons/e1000/src/3.10.108/e1000_param.c new file mode 100644 index 00000000..c9cde352 --- /dev/null +++ b/addons/e1000/src/3.10.108/e1000_param.c @@ -0,0 +1,754 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ +E1000_PARAM(FlowControl, "Flow Control setting"); +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_dev_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_dev_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_dev_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_dev_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_dev_info("Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000_check_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_dev_warn("Warning: no configuration for board #%i " + "using defaults for all values\n", bd); + } + + { /* Transmit Descriptor Count */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { + .min = E1000_MIN_TXD, + .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD + }} + }; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = ALIGN(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + tx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { + .min = E1000_MIN_RXD, + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD + }} + }; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = ALIGN(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); + } else { + rx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct e1000_opt_list fc_list[] = { + { E1000_FC_NONE, "Flow Control Disabled" }, + { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, + { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { E1000_FC_FULL, "Flow Control Enabled" }, + { E1000_FC_DEFAULT, "Flow Control Hardware Default" } + }; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = E1000_FC_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; + } else { + adapter->hw.fc = adapter->hw.original_fc = opt.def; + } + } + { /* Transmit Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_dev_info("%s turned off\n", opt.name); + break; + case 1: + e_dev_info("%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_dev_info("%s set to dynamic conservative " + "mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_dev_info("%s set to simplified " + "(2000-8000) ints mode\n", opt.name); + adapter->itr_setting = adapter->itr; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* save the setting, because the dynamic bits + * change itr. + * clear the lower two bits because they are + * used as control + */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->smart_power_down = spd; + } else { + adapter->smart_power_down = opt.def; + } + } + + switch (adapter->hw.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ +static void e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; + if (num_Speed > bd) { + e_dev_info("Speed not valid for fiber adapters, parameter " + "ignored\n"); + } + + if (num_Duplex > bd) { + e_dev_info("Duplex not valid for fiber adapters, parameter " + "ignored\n"); + } + + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { + e_dev_info("AutoNeg other than 1000/Full is not valid for fiber" + "adapters, parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ +static void e1000_check_copper_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + unsigned int speed, dplx, an; + int bd = adapter->bd_number; + + { /* Speed */ + static const struct e1000_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + + if (num_Speed > bd) { + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); + } else { + speed = opt.def; + } + } + { /* Duplex */ + static const struct e1000_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (num_Duplex > bd) { + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); + } else { + dplx = opt.def; + } + } + + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { + e_dev_info("AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + static const struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + + if (num_AutoNeg > bd) { + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); + } else { + an = opt.def; + } + adapter->hw.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + adapter->hw.autoneg = adapter->fc_autoneg = 1; + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) + e_dev_info("Speed and duplex autonegotiation " + "enabled\n"); + break; + case HALF_DUPLEX: + e_dev_info("Half Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Half Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + e_dev_info("Full Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + e_dev_info("10 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 10 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + e_dev_info("Forcing to 10 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + e_dev_info("Forcing to 10 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100: + e_dev_info("100 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 100 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + e_dev_info("Forcing to 100 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + e_dev_info("Forcing to 100 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_1000: + e_dev_info("1000 Mbps Speed specified without Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); + /* fall through */ + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " + "only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. " + "Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/addons/e1000/src/4.4.180/Makefile b/addons/e1000/src/4.4.180/Makefile new file mode 100644 index 00000000..59afd9fd --- /dev/null +++ b/addons/e1000/src/4.4.180/Makefile @@ -0,0 +1,2 @@ +obj-m += e1000.o +e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o diff --git a/addons/e1000/src/4.4.180/e1000.h b/addons/e1000/src/4.4.180/e1000.h new file mode 100644 index 00000000..481e9944 --- /dev/null +++ b/addons/e1000/src/4.4.180/e1000.h @@ -0,0 +1,378 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_5 5 + +#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + +struct e1000_adapter; + +#include "e1000_hw.h" + +#define E1000_MAX_INTR 10 + +/* + * Count for polling __E1000_RESET condition every 10-20msec. + */ +#define E1000_CHECK_RESET_COUNT 50 + +/* TX/RX descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 256 +#define E1000_MIN_TXD 48 +#define E1000_MAX_82544_TXD 4096 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 256 +#define E1000_MIN_RXD 48 +#define E1000_MAX_82544_RXD 4096 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +/* this is the size past which hardware will drop packets when setting LPE=0 */ +#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 + +/* Supported Rx Buffer Sizes */ +#define E1000_RXBUFFER_128 128 /* Used for packet split */ +#define E1000_RXBUFFER_256 256 /* Used for packet split */ +#define E1000_RXBUFFER_512 512 +#define E1000_RXBUFFER_1024 1024 +#define E1000_RXBUFFER_2048 2048 +#define E1000_RXBUFFER_4096 4096 +#define E1000_RXBUFFER_8192 8192 +#define E1000_RXBUFFER_16384 16384 + +/* SmartSpeed delimiters */ +#define E1000_SMARTSPEED_DOWNSHIFT 3 +#define E1000_SMARTSPEED_MAX 15 + +/* Packet Buffer allocations */ +#define E1000_PBA_BYTES_SHIFT 0xA +#define E1000_TX_HEAD_ADDR_SHIFT 7 +#define E1000_PBA_TX_MASK 0xFFFF0000 + +/* Flow Control Watermarks */ +#define E1000_FC_HIGH_DIFF 0x1638 /* High: 5688 bytes below Rx FIFO size */ +#define E1000_FC_LOW_DIFF 0x1640 /* Low: 5696 bytes below Rx FIFO size */ + +#define E1000_FC_PAUSE_TIME 0xFFFF /* pause for the max or until send xon */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +#define E1000_TX_QUEUE_WAKE 16 +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_82544_APM 0x0004 +#define E1000_EEPROM_APME 0x0400 + +#ifndef E1000_MASTER_SLAVE +/* Switch to override PHY master/slave setting */ +#define E1000_MASTER_SLAVE e1000_ms_hw_default +#endif + +#define E1000_MNG_VLAN_NONE (-1) + +/* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_tx_buffer { + struct sk_buff *skb; + dma_addr_t dma; + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + bool mapped_as_page; + unsigned short segs; + unsigned int bytecount; +}; + +struct e1000_rx_buffer { + union { + struct page *page; /* jumbo: alloc_page */ + u8 *data; /* else, netdev_alloc_frag */ + } rxbuf; + dma_addr_t dma; +}; + +struct e1000_tx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_tx_buffer *buffer_info; + + u16 tdh; + u16 tdt; + bool last_tx_tso; +}; + +struct e1000_rx_ring { + /* pointer to the descriptor ring memory */ + void *desc; + /* physical address of the descriptor ring */ + dma_addr_t dma; + /* length of descriptor ring in bytes */ + unsigned int size; + /* number of descriptors in the ring */ + unsigned int count; + /* next descriptor to associate a buffer with */ + unsigned int next_to_use; + /* next descriptor to check for DD status bit */ + unsigned int next_to_clean; + /* array of buffer information structs */ + struct e1000_rx_buffer *buffer_info; + struct sk_buff *rx_skb_top; + + /* cpu for rx queue */ + int cpu; + + u16 rdh; + u16 rdt; +}; + +#define E1000_DESC_UNUSED(R) \ +({ \ + unsigned int clean = smp_load_acquire(&(R)->next_to_clean); \ + unsigned int use = READ_ONCE((R)->next_to_use); \ + (clean > use ? 0 : (R)->count) + clean - use - 1; \ +}) + +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +/* board specific private data structure */ + +struct e1000_adapter { + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u16 mng_vlan_id; + u32 bd_number; + u32 rx_buffer_len; + u32 wol; + u32 smartspeed; + u32 en_mng_pt; + u16 link_speed; + u16 link_duplex; + spinlock_t stats_lock; + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + u8 fc_autoneg; + + /* TX */ + struct e1000_tx_ring *tx_ring; /* One per active queue */ + unsigned int restart_queue; + u32 txd_cmd; + u32 tx_int_delay; + u32 tx_abs_int_delay; + u32 gotcl; + u64 gotcl_old; + u64 tpt_old; + u64 colc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u8 tx_timeout_factor; + atomic_t tx_fifo_stall; + bool pcix_82544; + bool detect_tx_hung; + bool dump_buffers; + + /* RX */ + bool (*clean_rx)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); + void (*alloc_rx_buf)(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); + struct e1000_rx_ring *rx_ring; /* One per active queue */ + struct napi_struct napi; + + int num_tx_queues; + int num_rx_queues; + + u64 hw_csum_err; + u64 hw_csum_good; + u32 alloc_rx_buff_failed; + u32 rx_int_delay; + u32 rx_abs_int_delay; + bool rx_csum; + u32 gorcl; + u64 gorcl_old; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + u32 test_icr; + struct e1000_tx_ring test_tx_ring; + struct e1000_rx_ring test_rx_ring; + + int msg_enable; + + /* to not mess up cache alignment, always add to the bottom */ + bool tso_force; + bool smart_power_down; /* phy smart power down */ + bool quad_port_a; + unsigned long flags; + u32 eeprom_wol; + + /* for ioport free */ + int bars; + int need_ioport; + + bool discarding; + + struct work_struct reset_task; + struct delayed_work watchdog_task; + struct delayed_work fifo_stall_task; + struct delayed_work phy_info_task; +}; + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_DOWN, + __E1000_DISABLED +}; + +#undef pr_fmt +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw); +#define e_dbg(format, arg...) \ + netdev_dbg(e1000_get_hw_dev(hw), format, ## arg) +#define e_err(msglvl, format, arg...) \ + netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_info(msglvl, format, arg...) \ + netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_warn(msglvl, format, arg...) \ + netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_notice(msglvl, format, arg...) \ + netif_notice(adapter, msglvl, adapter->netdev, format, ## arg) +#define e_dev_info(format, arg...) \ + dev_info(&adapter->pdev->dev, format, ## arg) +#define e_dev_warn(format, arg...) \ + dev_warn(&adapter->pdev->dev, format, ## arg) +#define e_dev_err(format, arg...) \ + dev_err(&adapter->pdev->dev, format, ## arg) + +extern char e1000_driver_name[]; +extern const char e1000_driver_version[]; + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_update_stats(struct e1000_adapter *adapter); +bool e1000_has_link(struct e1000_adapter *adapter); +void e1000_power_up_phy(struct e1000_adapter *); +void e1000_set_ethtool_ops(struct net_device *netdev); +void e1000_check_options(struct e1000_adapter *adapter); +char *e1000_get_hw_dev_name(struct e1000_hw *hw); + +#endif /* _E1000_H_ */ diff --git a/addons/e1000/src/4.4.180/e1000_ethtool.c b/addons/e1000/src/4.4.180/e1000_ethtool.c new file mode 100644 index 00000000..d70b2e5d --- /dev/null +++ b/addons/e1000/src/4.4.180/e1000_ethtool.c @@ -0,0 +1,1909 @@ +/******************************************************************************* + * Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2006 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + ******************************************************************************/ + +/* ethtool support for e1000 */ + +#include "e1000.h" +#include +#include + +enum {NETDEV_STATS, E1000_STATS}; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(m) E1000_STATS, \ + sizeof(((struct e1000_adapter *)0)->m), \ + offsetof(struct e1000_adapter, m) +#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ + sizeof(((struct net_device *)0)->m), \ + offsetof(struct net_device, m) + +static const struct e1000_stats e1000_gstrings_stats[] = { + { "rx_packets", E1000_STAT(stats.gprc) }, + { "tx_packets", E1000_STAT(stats.gptc) }, + { "rx_bytes", E1000_STAT(stats.gorcl) }, + { "tx_bytes", E1000_STAT(stats.gotcl) }, + { "rx_broadcast", E1000_STAT(stats.bprc) }, + { "tx_broadcast", E1000_STAT(stats.bptc) }, + { "rx_multicast", E1000_STAT(stats.mprc) }, + { "tx_multicast", E1000_STAT(stats.mptc) }, + { "rx_errors", E1000_STAT(stats.rxerrc) }, + { "tx_errors", E1000_STAT(stats.txerrc) }, + { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, + { "multicast", E1000_STAT(stats.mprc) }, + { "collisions", E1000_STAT(stats.colc) }, + { "rx_length_errors", E1000_STAT(stats.rlerrc) }, + { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, + { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, + { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, + { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, + { "rx_missed_errors", E1000_STAT(stats.mpc) }, + { "tx_aborted_errors", E1000_STAT(stats.ecol) }, + { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, + { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, + { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, + { "tx_window_errors", E1000_STAT(stats.latecol) }, + { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, + { "tx_deferred_ok", E1000_STAT(stats.dc) }, + { "tx_single_coll_ok", E1000_STAT(stats.scc) }, + { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, + { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, + { "tx_restart_queue", E1000_STAT(restart_queue) }, + { "rx_long_length_errors", E1000_STAT(stats.roc) }, + { "rx_short_length_errors", E1000_STAT(stats.ruc) }, + { "rx_align_errors", E1000_STAT(stats.algnerrc) }, + { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, + { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, + { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, + { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, + { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, + { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, + { "rx_long_byte_count", E1000_STAT(stats.gorcl) }, + { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, + { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, + { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, + { "tx_smbus", E1000_STAT(stats.mgptc) }, + { "rx_smbus", E1000_STAT(stats.mgprc) }, + { "dropped_smbus", E1000_STAT(stats.mgpdc) }, +}; + +#define E1000_QUEUE_STATS_LEN 0 +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->media_type == e1000_media_type_copper) { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full| + SUPPORTED_Autoneg | + SUPPORTED_TP); + ecmd->advertising = ADVERTISED_TP; + + if (hw->autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy_addr; + + if (hw->mac_type == e1000_82543) + ecmd->transceiver = XCVR_EXTERNAL; + else + ecmd->transceiver = XCVR_INTERNAL; + + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + + if (hw->mac_type >= e1000_82545) + ecmd->transceiver = XCVR_INTERNAL; + else + ecmd->transceiver = XCVR_EXTERNAL; + } + + if (er32(STATUS) & E1000_STATUS_LU) { + e1000_get_speed_and_duplex(hw, &adapter->link_speed, + &adapter->link_duplex); + ethtool_cmd_speed_set(ecmd, adapter->link_speed); + + /* unfortunately FULL_DUPLEX != DUPLEX_FULL + * and HALF_DUPLEX != DUPLEX_HALF + */ + if (adapter->link_duplex == FULL_DUPLEX) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + } + + ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) || + hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 1; MDI => 0 */ + if ((hw->media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + ecmd->eth_tp_mdix = (!!adapter->phy_info.mdix_mode ? + ETH_TP_MDI_X : ETH_TP_MDI); + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->mdix; + return 0; +} + +static int e1000_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->autoneg = 1; + if (hw->media_type == e1000_media_type_fiber) + hw->autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg; + else + hw->autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | + ADVERTISED_Autoneg; + ecmd->advertising = hw->autoneg_advertised; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { + clear_bit(__E1000_RESETTING, &adapter->flags); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->mdix = AUTO_ALL_MODES; + else + hw->mdix = ecmd->eth_tp_mdix_ctrl; + } + + /* reset the link */ + + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +} + +static u32 e1000_get_link(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* If the link is not reported up to netdev, interrupts are disabled, + * and so the physical link state may have changed since we last + * looked. Set get_link_status to make sure that the true link + * state is interrogated, rather than pulling a cached and possibly + * stale link state from the driver. + */ + if (!netif_carrier_ok(netdev)) + adapter->hw.get_link_status = 1; + + return e1000_has_link(adapter); +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc == E1000_FC_RX_PAUSE) { + pause->rx_pause = 1; + } else if (hw->fc == E1000_FC_TX_PAUSE) { + pause->tx_pause = 1; + } else if (hw->fc == E1000_FC_FULL) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_FULL; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_RX_PAUSE; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc = E1000_FC_TX_PAUSE; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc = E1000_FC_NONE; + + hw->original_fc = hw->fc; + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + if (netif_running(adapter->netdev)) { + e1000_down(adapter); + e1000_up(adapter); + } else { + e1000_reset(adapter); + } + } else + retval = ((hw->media_type == e1000_media_type_fiber) ? + e1000_setup_link(hw) : e1000_force_mac_fc(hw)); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device *netdev) +{ +#define E1000_REGS_LEN 32 + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs, + void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN); + regs_buff[9] = er32(TDH); + regs_buff[10] = er32(TDT); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */ + if (hw->phy_type == e1000_phy_igp) { + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_A); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_B); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[14] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_C); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[15] = (u32)phy_data; /* cable length */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_AGC_D); + e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[16] = (u32)phy_data; /* cable length */ + regs_buff[17] = 0; /* extended 10bt distance (not needed) */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[18] = (u32)phy_data; /* cable polarity */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, + IGP01E1000_PHY_PCS_INIT_REG); + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG & + IGP01E1000_PHY_PAGE_SELECT, &phy_data); + regs_buff[19] = (u32)phy_data; /* cable polarity */ + regs_buff[20] = 0; /* polarity correction enabled (always) */ + regs_buff[22] = 0; /* phy receive errors (unavailable) */ + regs_buff[23] = regs_buff[18]; /* mdix mode */ + e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0); + } else { + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + if (hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + regs_buff[26] = er32(MANC); + } +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + return hw->eeprom.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word, last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = hw->vendor_id | (hw->device_id << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * + (last_word - first_word + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->eeprom.type == e1000_eeprom_spi) + ret_val = e1000_read_eeprom(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_eeprom(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), + eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len, first_word, last_word, ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16))) + return -EFAULT; + + max_len = hw->eeprom.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word + * only the second byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, first_word, 1, + &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified + */ + ret_val = e1000_read_eeprom(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + } + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]); + + ret_val = e1000_write_eeprom(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + /* Update the checksum over the first part of the EEPROM if needed */ + if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG)) + e1000_update_eeprom_checksum(hw); + + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, e1000_driver_name, + sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, e1000_driver_version, + sizeof(drvinfo->version)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr = adapter->tx_ring; + struct e1000_rx_ring *rxdr = adapter->rx_ring; + + ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD : + E1000_MAX_82544_RXD; + ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD : + E1000_MAX_82544_TXD; + ring->rx_pending = rxdr->count; + ring->tx_pending = txdr->count; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + e1000_mac_type mac_type = hw->mac_type; + struct e1000_tx_ring *txdr, *tx_old; + struct e1000_rx_ring *rxdr, *rx_old; + int i, err; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + + if (netif_running(adapter->netdev)) + e1000_down(adapter); + + tx_old = adapter->tx_ring; + rx_old = adapter->rx_ring; + + err = -ENOMEM; + txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring), + GFP_KERNEL); + if (!txdr) + goto err_alloc_tx; + + rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring), + GFP_KERNEL); + if (!rxdr) + goto err_alloc_rx; + + adapter->tx_ring = txdr; + adapter->rx_ring = rxdr; + + rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD); + rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_RXD : E1000_MAX_82544_RXD)); + rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE); + txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD); + txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ? + E1000_MAX_TXD : E1000_MAX_82544_TXD)); + txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); + + for (i = 0; i < adapter->num_tx_queues; i++) + txdr[i].count = txdr->count; + for (i = 0; i < adapter->num_rx_queues; i++) + rxdr[i].count = rxdr->count; + + if (netif_running(adapter->netdev)) { + /* Try to get new resources before deleting old */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* save the new, restore the old in order to free it, + * then restore the new back again + */ + + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + e1000_free_all_rx_resources(adapter); + e1000_free_all_tx_resources(adapter); + adapter->rx_ring = rxdr; + adapter->tx_ring = txdr; + err = e1000_up(adapter); + if (err) + goto err_setup; + } + kfree(tx_old); + kfree(rx_old); + + clear_bit(__E1000_RESETTING, &adapter->flags); + return 0; +err_setup_tx: + e1000_free_all_rx_resources(adapter); +err_setup_rx: + adapter->rx_ring = rx_old; + adapter->tx_ring = tx_old; + kfree(rxdr); +err_alloc_rx: + kfree(txdr); +err_alloc_tx: + if (netif_running(adapter->netdev)) + e1000_up(adapter); +err_setup: + clear_bit(__E1000_RESETTING, &adapter->flags); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + int i; + + for (i = 0; i < ARRAY_SIZE(test); i++) { + writel(write & test[i], address); + read = readl(address); + if (read != (write & test[i] & mask)) { + e_err(drv, "pattern test reg %04X failed: " + "got 0x%08X expected 0x%08X\n", + reg, read, (write & test[i] & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) +{ + struct e1000_hw *hw = &adapter->hw; + u8 __iomem *address = hw->hw_addr + reg; + u32 read; + + writel(write & mask, address); + read = readl(address); + if ((read & mask) != (write & mask)) { + e_err(drv, "set/check reg %04X test failed: " + "got 0x%08X expected 0x%08X\n", + reg, (read & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST(reg, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, \ + (hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg, \ + mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + u32 value, before, after; + u32 i, toggle; + struct e1000_hw *hw = &adapter->hw; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. + */ + + /* there are several bits on newer hardware that are r/w */ + toggle = 0xFFFFF833; + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err(drv, "failed STATUS register test got: " + "0x%08X expected: 0x%08X\n", after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF); + + REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000); + + before = 0x06DFB3FE; + REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000); + + if (hw->mac_type >= e1000_82543) { + REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF); + value = E1000_RAR_ENTRIES; + for (i = 0; i < value; i++) { + REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2), + 0x8003FFFF, 0xFFFFFFFF); + } + } else { + REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF); + REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF); + REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF); + } + + value = E1000_MC_TBL_SIZE; + for (i = 0; i < value; i++) + REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) { + *data = 1; + break; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)EEPROM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + u32 mask, i = 0; + bool shared_int = true; + u32 irq = adapter->pdev->irq; + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + + /* NOTE: we don't test MSI interrupts here, yet + * Hook up test interrupt handler just for this test + */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) + shared_int = false; + else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } + e_info(hw, "testing %s interrupt\n", (shared_int ? + "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Test each interrupt */ + for (; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + E1000_WRITE_FLUSH(); + msleep(10); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + E1000_WRITE_FLUSH(); + msleep(10); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + E1000_WRITE_FLUSH(); + msleep(10); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + + return *data; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i; + + if (txdr->desc && txdr->buffer_info) { + for (i = 0; i < txdr->count; i++) { + if (txdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + txdr->buffer_info[i].dma, + txdr->buffer_info[i].length, + DMA_TO_DEVICE); + if (txdr->buffer_info[i].skb) + dev_kfree_skb(txdr->buffer_info[i].skb); + } + } + + if (rxdr->desc && rxdr->buffer_info) { + for (i = 0; i < rxdr->count; i++) { + if (rxdr->buffer_info[i].dma) + dma_unmap_single(&pdev->dev, + rxdr->buffer_info[i].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + kfree(rxdr->buffer_info[i].rxbuf.data); + } + } + + if (txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + txdr->desc = NULL; + } + if (rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + rxdr->desc = NULL; + } + + kfree(txdr->buffer_info); + txdr->buffer_info = NULL; + kfree(rxdr->buffer_info); + rxdr->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + u32 rctl; + int i, ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!txdr->count) + txdr->count = E1000_DEFAULT_TXD; + + txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer), + GFP_KERNEL); + if (!txdr->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + txdr->desc = dma_zalloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { + ret_val = 2; + goto err_nomem; + } + txdr->next_to_use = txdr->next_to_clean = 0; + + ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH, ((u64)txdr->dma >> 32)); + ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc)); + ew32(TDH, 0); + ew32(TDT, 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < txdr->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i); + struct sk_buff *skb; + unsigned int size = 1024; + + skb = alloc_skb(size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, size); + txdr->buffer_info[i].skb = skb; + txdr->buffer_info[i].length = skb->len; + txdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RPS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rxdr->count) + rxdr->count = E1000_DEFAULT_RXD; + + rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer), + GFP_KERNEL); + if (!rxdr->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc); + rxdr->desc = dma_zalloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { + ret_val = 6; + goto err_nomem; + } + rxdr->next_to_use = rxdr->next_to_clean = 0; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF)); + ew32(RDBAH, ((u64)rxdr->dma >> 32)); + ew32(RDLEN, rxdr->size); + ew32(RDH, 0); + ew32(RDT, 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rxdr->count; i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i); + u8 *buf; + + buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN, + GFP_KERNEL); + if (!buf) { + ret_val = 7; + goto err_nomem; + } + rxdr->buffer_info[i].rxbuf.data = buf; + + rxdr->buffer_info[i].dma = + dma_map_single(&pdev->dev, + buf + NET_SKB_PAD + NET_IP_ALIGN, + E1000_RXBUFFER_2048, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_write_phy_reg(hw, 29, 0x001F); + e1000_write_phy_reg(hw, 30, 0x8FFC); + e1000_write_phy_reg(hw, 29, 0x001A); + e1000_write_phy_reg(hw, 30, 0x8FF0); +} + +static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg; + + /* Because we reset the PHY above, we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock. This + * value defaults back to a 2.5MHz clock when the PHY is reset. + */ + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_EPSCR_TX_CLK_25; + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg); + + /* In addition, because of the s/w reset above, we need to enable + * CRS on TX. This must be set for both full and half duplex + * operation. + */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); +} + +static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg; + u16 phy_reg; + + /* Setup the Device Control Register for PHY loopback test. */ + + ctrl_reg = er32(CTRL); + ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */ + E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + + /* Read the PHY Specific Control Register (0x10) */ + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg); + + /* Clear Auto-Crossover bits in PHY Specific Control Register + * (bits 6:5). + */ + phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE; + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg); + + /* Perform software reset on the PHY */ + e1000_phy_reset(hw); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + e1000_write_phy_reg(hw, PHY_CTRL, 0x8100); + + /* Wait for reset to complete. */ + udelay(500); + + /* Have to setup TX_CLK and TX_CRS after software reset */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1000_phy_disable_receiver(adapter); + + /* Set the loopback bit in the PHY control register. */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + + /* Setup TX_CLK and TX_CRS one more time. */ + e1000_phy_reset_clk_and_crs(adapter); + + /* Check Phy Configuration */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg != 0x4100) + return 9; + + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg); + if (phy_reg != 0x0070) + return 10; + + e1000_read_phy_reg(hw, 29, &phy_reg); + if (phy_reg != 0x001A) + return 11; + + return 0; +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u32 stat_reg = 0; + + hw->autoneg = false; + + if (hw->phy_type == e1000_phy_m88) { + /* Auto-MDI/MDIX Off */ + e1000_write_phy_reg(hw, + M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x9140); + /* autoneg off */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x8140); + } + + ctrl_reg = er32(CTRL); + + /* force 1000, set loopback */ + e1000_write_phy_reg(hw, PHY_CTRL, 0x4140); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (hw->media_type == e1000_media_type_copper && + hw->phy_type == e1000_phy_m88) + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + else { + /* Set the ILOS bit on the fiber Nic is half + * duplex link is detected. + */ + stat_reg = er32(STATUS); + if ((stat_reg & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy_type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + udelay(500); + + return 0; +} + +static int e1000_set_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_reg = 0; + u16 count = 0; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->media_type == e1000_media_type_copper) { + /* Attempt to setup Loopback mode on Non-integrated PHY. + * Some PHY registers get corrupted at random, so + * attempt this 10 times. + */ + while (e1000_nonintegrated_phy_loopback(adapter) && + count++ < 10); + if (count < 11) + return 0; + } + break; + + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + return e1000_integrated_phy_loopback(adapter); + default: + /* Default PHY loopback work is to read the MII + * control register and assert bit 14 (loopback mode). + */ + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + phy_reg |= MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + return 0; + } + + return 8; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + return e1000_set_phy_loopback(adapter); + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->media_type == e1000_media_type_copper) { + return e1000_set_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac_type) { + case e1000_82545: + case e1000_82546: + case e1000_82545_rev_3: + case e1000_82546_rev_3: + default: + hw->autoneg = true; + e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg); + if (phy_reg & MII_CR_LOOPBACK) { + phy_reg &= ~MII_CR_LOOPBACK; + e1000_write_phy_reg(hw, PHY_CTRL, phy_reg); + e1000_phy_reset(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int e1000_check_lbtest_frame(const unsigned char *data, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(data + 3) == 0xFF) { + if ((*(data + frame_size / 2 + 10) == 0xBE) && + (*(data + frame_size / 2 + 12) == 0xAF)) { + return 0; + } + } + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *txdr = &adapter->test_tx_ring; + struct e1000_rx_ring *rxdr = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + int i, j, k, l, lc, good_cnt, ret_val = 0; + unsigned long time; + + ew32(RDT, rxdr->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rxdr->count <= txdr->count) + lc = ((txdr->count / 64) * 2) + 1; + else + lc = ((rxdr->count / 64) * 2) + 1; + + k = l = 0; + for (j = 0; j <= lc; j++) { /* loop count loop */ + for (i = 0; i < 64; i++) { /* send the packets */ + e1000_create_lbtest_frame(txdr->buffer_info[i].skb, + 1024); + dma_sync_single_for_device(&pdev->dev, + txdr->buffer_info[k].dma, + txdr->buffer_info[k].length, + DMA_TO_DEVICE); + if (unlikely(++k == txdr->count)) + k = 0; + } + ew32(TDT, k); + E1000_WRITE_FLUSH(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + do { /* receive the sent packets */ + dma_sync_single_for_cpu(&pdev->dev, + rxdr->buffer_info[l].dma, + E1000_RXBUFFER_2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame( + rxdr->buffer_info[l].rxbuf.data + + NET_SKB_PAD + NET_IP_ALIGN, + 1024); + if (!ret_val) + good_cnt++; + if (unlikely(++l == rxdr->count)) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while (good_cnt < 64 && time_after(time + 20, jiffies)); + + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (time_after_eq(jiffies, time + 2)) { + ret_val = 14; /* error code for time out error */ + break; + } + } /* end loop count loop */ + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + *data = 0; + if (hw->media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + e1000_check_for_link(hw); + if (hw->serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + e1000_check_for_link(hw); + if (hw->autoneg) /* if auto_neg is set wait for it */ + msleep(4000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->flags); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + u16 autoneg_advertised = hw->autoneg_advertised; + u8 forced_speed_duplex = hw->forced_speed_duplex; + u8 autoneg = hw->autoneg; + + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't + * interfere with test result + */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + else + e1000_reset(adapter); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000_reset(adapter); + /* make sure the phy is powered up */ + e1000_power_up_phy(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + hw->autoneg_advertised = autoneg_advertised; + hw->forced_speed_duplex = forced_speed_duplex; + hw->autoneg = autoneg; + + e1000_reset(adapter); + clear_bit(__E1000_TESTING, &adapter->flags); + if (if_running) + dev_open(netdev); + } else { + e_info(hw, "online testing starting\n"); + /* Online tests */ + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* Online tests aren't run; pass by default */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + clear_bit(__E1000_TESTING, &adapter->flags); + } + msleep_interruptible(4 * 1000); +} + +static int e1000_wol_exclusion(struct e1000_adapter *adapter, + struct ethtool_wolinfo *wol) +{ + struct e1000_hw *hw = &adapter->hw; + int retval = 1; /* fail by default */ + + switch (hw->device_id) { + case E1000_DEV_ID_82542: + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_PCIE: + /* these don't support WoL at all */ + wol->supported = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events not supported on port B */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* quad port adapters only support WoL on port A */ + if (!adapter->quad_port_a) { + wol->supported = 0; + break; + } + /* return success for non excluded adapter ports */ + retval = 0; + break; + default: + /* dual port cards only support WoL on port A from now on + * unless it was enabled in the eeprom for port B + * so exclude FUNC_1 ports from having WoL enabled + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1 && + !adapter->eeprom_wol) { + wol->supported = 0; + break; + } + + retval = 0; + } + + return retval; +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC; + wol->wolopts = 0; + + /* this function will set ->supported = 0 and return 1 if wol is not + * supported by this hardware + */ + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + /* apply any specific unsupported masks here */ + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* KSP3 does not support UCAST wake-ups */ + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + break; + default: + break; + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; + + if (e1000_wol_exclusion(adapter, wol) || + !device_can_wakeup(&adapter->pdev->dev)) + return wol->wolopts ? -EOPNOTSUPP : 0; + + switch (hw->device_id) { + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + if (wol->wolopts & WAKE_UCAST) { + e_err(drv, "Interface does not support directed " + "(unicast) frame wake-up packets\n"); + return -EOPNOTSUPP; + } + break; + default: + break; + } + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + e1000_setup_led(hw); + return 2; + + case ETHTOOL_ID_ON: + e1000_led_on(hw); + break; + + case ETHTOOL_ID_OFF: + e1000_led_off(hw); + break; + + case ETHTOOL_ID_INACTIVE: + e1000_cleanup_led(hw); + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->hw.mac_type < e1000_82545) + return -EOPNOTSUPP; + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + if (hw->mac_type < e1000_82545) + return -EOPNOTSUPP; + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr = adapter->itr_setting = 4; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + else + ew32(ITR, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int i; + const struct e1000_stats *stat = e1000_gstrings_stats; + + e1000_update_stats(adapter); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) { + char *p; + + switch (stat->type) { + case NETDEV_STATS: + p = (char *)netdev + stat->stat_offset; + break; + case E1000_STATS: + p = (char *)adapter + stat->stat_offset; + break; + default: + WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n", + stat->type, i); + continue; + } + + if (stat->sizeof_stat == sizeof(u64)) + data[i] = *(u64 *)p; + else + data[i] = *(u32 *)p; + } +/* BUG_ON(i != E1000_STATS_LEN); */ +} + +static void e1000_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */ + break; + } +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .get_settings = e1000_get_settings, + .set_settings = e1000_set_settings, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = e1000_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_ts_info = ethtool_op_get_ts_info, +}; + +void e1000_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &e1000_ethtool_ops; +} diff --git a/addons/e1000/src/4.4.180/e1000_hw.c b/addons/e1000/src/4.4.180/e1000_hw.c new file mode 100644 index 00000000..b1af0d61 --- /dev/null +++ b/addons/e1000/src/4.4.180/e1000_hw.c @@ -0,0 +1,5681 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + + */ + +/* e1000_hw.c + * Shared functions for accessing and configuring the MAC + */ + +#include "e1000.h" + +static s32 e1000_check_downshift(struct e1000_hw *hw); +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity); +static void e1000_clear_hw_cntrs(struct e1000_hw *hw); +static void e1000_clear_vfta(struct e1000_hw *hw); +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, + bool link_up); +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw); +static s32 e1000_detect_gig_phy(struct e1000_hw *hw); +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw); +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length); +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw); +static s32 e1000_id_led_init(struct e1000_hw *hw); +static void e1000_init_rx_addrs(struct e1000_hw *hw); +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info); +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active); +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value); +static s32 e1000_set_phy_type(struct e1000_hw *hw); +static void e1000_phy_init_script(struct e1000_hw *hw); +static s32 e1000_setup_copper_link(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw); +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw); +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw); +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw); +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl); +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count); +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw); +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw); +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw); +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd); +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count); +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data); +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data); +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count); +static s32 e1000_acquire_eeprom(struct e1000_hw *hw); +static void e1000_release_eeprom(struct e1000_hw *hw); +static void e1000_standby_eeprom(struct e1000_hw *hw); +static s32 e1000_set_vco_speed(struct e1000_hw *hw); +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw); +static s32 e1000_set_phy_mode(struct e1000_hw *hw); +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data); + +/* IGP cable length table */ +static const +u16 e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] = { + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25, + 25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40, + 40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60, + 60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90, + 90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, + 100, + 100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, + 110, 110, + 110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, + 120, 120 +}; + +static DEFINE_SPINLOCK(e1000_eeprom_lock); +static DEFINE_SPINLOCK(e1000_phy_lock); + +/** + * e1000_set_phy_type - Set the phy type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_set_phy_type(struct e1000_hw *hw) +{ + if (hw->mac_type == e1000_undefined) + return -E1000_ERR_PHY_TYPE; + + switch (hw->phy_id) { + case M88E1000_E_PHY_ID: + case M88E1000_I_PHY_ID: + case M88E1011_I_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1118_E_PHY_ID: + hw->phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: + if (hw->mac_type == e1000_82541 || + hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82547_rev_2) + hw->phy_type = e1000_phy_igp; + break; + case RTL8211B_PHY_ID: + hw->phy_type = e1000_phy_8211; + break; + case RTL8201N_PHY_ID: + hw->phy_type = e1000_phy_8201; + break; + default: + /* Should never have loaded on this device */ + hw->phy_type = e1000_phy_undefined; + return -E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_init_script - IGP phy init script - initializes the GbE PHY + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_phy_init_script(struct e1000_hw *hw) +{ + u32 ret_val; + u16 phy_saved_data; + + if (hw->phy_init_script) { + msleep(20); + + /* Save off the current value of register 0x2F5B to be restored + * at the end of this routine. + */ + ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + /* Disabled the PHY transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + msleep(20); + + e1000_write_phy_reg(hw, 0x0000, 0x0140); + msleep(5); + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + e1000_write_phy_reg(hw, 0x1F95, 0x0001); + e1000_write_phy_reg(hw, 0x1F71, 0xBD21); + e1000_write_phy_reg(hw, 0x1F79, 0x0018); + e1000_write_phy_reg(hw, 0x1F30, 0x1600); + e1000_write_phy_reg(hw, 0x1F31, 0x0014); + e1000_write_phy_reg(hw, 0x1F32, 0x161C); + e1000_write_phy_reg(hw, 0x1F94, 0x0003); + e1000_write_phy_reg(hw, 0x1F96, 0x003F); + e1000_write_phy_reg(hw, 0x2010, 0x0008); + break; + + case e1000_82541_rev_2: + case e1000_82547_rev_2: + e1000_write_phy_reg(hw, 0x1F73, 0x0099); + break; + default: + break; + } + + e1000_write_phy_reg(hw, 0x0000, 0x3300); + msleep(20); + + /* Now enable the transmitter */ + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (hw->mac_type == e1000_82547) { + u16 fused, fine, coarse; + + /* Move to analog registers page */ + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_SPARE_FUSE_STATUS, + &fused); + + if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) { + e1000_read_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_STATUS, + &fused); + + fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK; + coarse = + fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK; + + if (coarse > + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) { + coarse -= + IGP01E1000_ANALOG_FUSE_COARSE_10; + fine -= IGP01E1000_ANALOG_FUSE_FINE_1; + } else if (coarse == + IGP01E1000_ANALOG_FUSE_COARSE_THRESH) + fine -= IGP01E1000_ANALOG_FUSE_FINE_10; + + fused = + (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) | + (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) | + (coarse & + IGP01E1000_ANALOG_FUSE_COARSE_MASK); + + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_CONTROL, + fused); + e1000_write_phy_reg(hw, + IGP01E1000_ANALOG_FUSE_BYPASS, + IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL); + } + } + } +} + +/** + * e1000_set_mac_type - Set the mac type member in the hw struct. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_set_mac_type(struct e1000_hw *hw) +{ + switch (hw->device_id) { + case E1000_DEV_ID_82542: + switch (hw->revision_id) { + case E1000_82542_2_0_REV_ID: + hw->mac_type = e1000_82542_rev2_0; + break; + case E1000_82542_2_1_REV_ID: + hw->mac_type = e1000_82542_rev2_1; + break; + default: + /* Invalid 82542 revision ID */ + return -E1000_ERR_MAC_TYPE; + } + break; + case E1000_DEV_ID_82543GC_FIBER: + case E1000_DEV_ID_82543GC_COPPER: + hw->mac_type = e1000_82543; + break; + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + hw->mac_type = e1000_82544; + break; + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + hw->mac_type = e1000_82540; + break; + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + hw->mac_type = e1000_82545; + break; + case E1000_DEV_ID_82545GM_COPPER: + case E1000_DEV_ID_82545GM_FIBER: + case E1000_DEV_ID_82545GM_SERDES: + hw->mac_type = e1000_82545_rev_3; + break; + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + hw->mac_type = e1000_82546; + break; + case E1000_DEV_ID_82546GB_COPPER: + case E1000_DEV_ID_82546GB_FIBER: + case E1000_DEV_ID_82546GB_SERDES: + case E1000_DEV_ID_82546GB_PCIE: + case E1000_DEV_ID_82546GB_QUAD_COPPER: + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + hw->mac_type = e1000_82546_rev_3; + break; + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER_LOM: + hw->mac_type = e1000_82541; + break; + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + hw->mac_type = e1000_82541_rev_2; + break; + case E1000_DEV_ID_82547EI: + case E1000_DEV_ID_82547EI_MOBILE: + hw->mac_type = e1000_82547; + break; + case E1000_DEV_ID_82547GI: + hw->mac_type = e1000_82547_rev_2; + break; + case E1000_DEV_ID_INTEL_CE4100_GBE: + hw->mac_type = e1000_ce4100; + break; + default: + /* Should never have loaded on this device */ + return -E1000_ERR_MAC_TYPE; + } + + switch (hw->mac_type) { + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->asf_firmware_present = true; + break; + default: + break; + } + + /* The 82543 chip does not count tx_carrier_errors properly in + * FD mode + */ + if (hw->mac_type == e1000_82543) + hw->bad_tx_carr_stats_fd = true; + + if (hw->mac_type > e1000_82544) + hw->has_smbus = true; + + return E1000_SUCCESS; +} + +/** + * e1000_set_media_type - Set media type and TBI compatibility. + * @hw: Struct containing variables accessed by shared code + */ +void e1000_set_media_type(struct e1000_hw *hw) +{ + u32 status; + + if (hw->mac_type != e1000_82543) { + /* tbi_compatibility is only valid on 82543 */ + hw->tbi_compatibility_en = false; + } + + switch (hw->device_id) { + case E1000_DEV_ID_82545GM_SERDES: + case E1000_DEV_ID_82546GB_SERDES: + hw->media_type = e1000_media_type_internal_serdes; + break; + default: + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->media_type = e1000_media_type_fiber; + break; + case e1000_ce4100: + hw->media_type = e1000_media_type_copper; + break; + default: + status = er32(STATUS); + if (status & E1000_STATUS_TBIMODE) { + hw->media_type = e1000_media_type_fiber; + /* tbi_compatibility not valid on fiber */ + hw->tbi_compatibility_en = false; + } else { + hw->media_type = e1000_media_type_copper; + } + break; + } + } +} + +/** + * e1000_reset_hw - reset the hardware completely + * @hw: Struct containing variables accessed by shared code + * + * Reset the transmit and receive units; mask and clear all interrupts. + */ +s32 e1000_reset_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 ctrl_ext; + u32 icr; + u32 manc; + u32 led_ctrl; + s32 ret_val; + + /* For 82542 (rev 2.0), disable MWI before issuing a device reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC with + * the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + E1000_WRITE_FLUSH(); + + /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */ + hw->tbi_compatibility_on = false; + + /* Delay to allow any outstanding PCI transactions to complete before + * resetting the device + */ + msleep(10); + + ctrl = er32(CTRL); + + /* Must reset the PHY before resetting the MAC */ + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + ew32(CTRL, (ctrl | E1000_CTRL_PHY_RST)); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Issue a global reset to the MAC. This will reset the chip's + * transmit, receive, DMA, and link units. It will not effect + * the current PCI configuration. The global reset bit is self- + * clearing, and should clear within a microsecond. + */ + e_dbg("Issuing a global reset to MAC\n"); + + switch (hw->mac_type) { + case e1000_82544: + case e1000_82540: + case e1000_82545: + case e1000_82546: + case e1000_82541: + case e1000_82541_rev_2: + /* These controllers can't ack the 64-bit write when issuing the + * reset, so use IO-mapping as a workaround to issue the reset + */ + E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST)); + break; + case e1000_82545_rev_3: + case e1000_82546_rev_3: + /* Reset is performed on a shadow of the control register */ + ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); + break; + case e1000_ce4100: + default: + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + break; + } + + /* After MAC reset, force reload of EEPROM to restore power-on settings + * to device. Later controllers reload the EEPROM automatically, so + * just wait for reload to complete. + */ + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* Wait for reset to complete */ + udelay(10); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + /* Wait for EEPROM reload */ + msleep(2); + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + /* Wait for EEPROM reload */ + msleep(20); + break; + default: + /* Auto read done will delay 5ms or poll based on mac type */ + ret_val = e1000_get_auto_rd_done(hw); + if (ret_val) + return ret_val; + break; + } + + /* Disable HW ARPs on ASF enabled adapters */ + if (hw->mac_type >= e1000_82540) { + manc = er32(MANC); + manc &= ~(E1000_MANC_ARP_EN); + ew32(MANC, manc); + } + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + e1000_phy_init_script(hw); + + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Clear interrupt mask to stop board from generating interrupts */ + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Clear any pending interrupt events. */ + icr = er32(ICR); + + /* If MWI was previously enabled, reenable it. */ + if (hw->mac_type == e1000_82542_rev2_0) { + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + return E1000_SUCCESS; +} + +/** + * e1000_init_hw - Performs basic configuration of the adapter. + * @hw: Struct containing variables accessed by shared code + * + * Assumes that the controller has previously been reset and is in a + * post-reset uninitialized state. Initializes the receive address registers, + * multicast table, and VLAN filter table. Calls routines to setup link + * configuration and flow control settings. Clears all on-chip counters. Leaves + * the transmit and receive units disabled and uninitialized. + */ +s32 e1000_init_hw(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + s32 ret_val; + u32 mta_size; + u32 ctrl_ext; + + /* Initialize Identification LED */ + ret_val = e1000_id_led_init(hw); + if (ret_val) { + e_dbg("Error Initializing Identification LED\n"); + return ret_val; + } + + /* Set the media type and TBI compatibility */ + e1000_set_media_type(hw); + + /* Disabling VLAN filtering. */ + e_dbg("Initializing the IEEE VLAN\n"); + if (hw->mac_type < e1000_82545_rev_3) + ew32(VET, 0); + e1000_clear_vfta(hw); + + /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */ + if (hw->mac_type == e1000_82542_rev2_0) { + e_dbg("Disabling MWI on 82542 rev 2.0\n"); + e1000_pci_clear_mwi(hw); + ew32(RCTL, E1000_RCTL_RST); + E1000_WRITE_FLUSH(); + msleep(5); + } + + /* Setup the receive address. This involves initializing all of the + * Receive Address Registers (RARs 0 - 15). + */ + e1000_init_rx_addrs(hw); + + /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */ + if (hw->mac_type == e1000_82542_rev2_0) { + ew32(RCTL, 0); + E1000_WRITE_FLUSH(); + msleep(1); + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + } + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + mta_size = E1000_MC_TBL_SIZE; + for (i = 0; i < mta_size; i++) { + E1000_WRITE_REG_ARRAY(hw, MTA, i, 0); + /* use write flush to prevent Memory Write Block (MWB) from + * occurring when accessing our register space + */ + E1000_WRITE_FLUSH(); + } + + /* Set the PCI priority bit correctly in the CTRL register. This + * determines if the adapter gives priority to receives, or if it + * gives equal priority to transmits and receives. Valid only on + * 82542 and 82543 silicon. + */ + if (hw->dma_fairness && hw->mac_type <= e1000_82543) { + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PRIOR); + } + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + /* Workaround for PCI-X problem when BIOS sets MMRBC + * incorrectly. + */ + if (hw->bus_type == e1000_bus_type_pcix + && e1000_pcix_get_mmrbc(hw) > 2048) + e1000_pcix_set_mmrbc(hw, 2048); + break; + } + + /* Call a subroutine to configure the link and setup flow control. */ + ret_val = e1000_setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + if (hw->mac_type > e1000_82544) { + ctrl = er32(TXDCTL); + ctrl = + (ctrl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB; + ew32(TXDCTL, ctrl); + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs(hw); + + if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER || + hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) { + ctrl_ext = er32(CTRL_EXT); + /* Relaxed ordering must be disabled to avoid a parity + * error crash in a PCI slot. + */ + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + } + + return ret_val; +} + +/** + * e1000_adjust_serdes_amplitude - Adjust SERDES output amplitude based on EEPROM setting. + * @hw: Struct containing variables accessed by shared code. + */ +static s32 e1000_adjust_serdes_amplitude(struct e1000_hw *hw) +{ + u16 eeprom_data; + s32 ret_val; + + if (hw->media_type != e1000_media_type_internal_serdes) + return E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, + &eeprom_data); + if (ret_val) { + return ret_val; + } + + if (eeprom_data != EEPROM_RESERVED_WORD) { + /* Adjust SERDES output amplitude only. */ + eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_link - Configures flow control and link settings. + * @hw: Struct containing variables accessed by shared code + * + * Determines which flow control settings to use. Calls the appropriate media- + * specific link configuration function. Configures the flow control settings. + * Assuming the adapter has a valid link partner, a valid link should be + * established. Assumes the hardware has previously been reset and the + * transmitter and receiver are not enabled. + */ +s32 e1000_setup_link(struct e1000_hw *hw) +{ + u32 ctrl_ext; + s32 ret_val; + u16 eeprom_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + if (hw->fc == E1000_FC_DEFAULT) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0) + hw->fc = E1000_FC_NONE; + else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == + EEPROM_WORD0F_ASM_DIR) + hw->fc = E1000_FC_TX_PAUSE; + else + hw->fc = E1000_FC_FULL; + } + + /* We want to save off the original Flow Control configuration just + * in case we get disconnected and then reconnected into a different + * hub or switch with different Flow Control capabilities. + */ + if (hw->mac_type == e1000_82542_rev2_0) + hw->fc &= (~E1000_FC_TX_PAUSE); + + if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1)) + hw->fc &= (~E1000_FC_RX_PAUSE); + + hw->original_fc = hw->fc; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc); + + /* Take the 4 bits from EEPROM word 0x0F that determine the initial + * polarity value for the SW controlled pins, and setup the + * Extended Device Control reg with that info. + * This is needed because one of the SW controlled pins is used for + * signal detection. So this should be done before e1000_setup_pcs_link() + * or e1000_phy_setup() is called. + */ + if (hw->mac_type == e1000_82543) { + ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG, + 1, &eeprom_data); + if (ret_val) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) << + SWDPIO__EXT_SHIFT); + ew32(CTRL_EXT, ctrl_ext); + } + + /* Call the necessary subroutine to configure the link. */ + ret_val = (hw->media_type == e1000_media_type_copper) ? + e1000_setup_copper_link(hw) : e1000_setup_fiber_serdes_link(hw); + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc_pause_time); + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames in not enabled, then these + * registers will be set to 0. + */ + if (!(hw->fc & E1000_FC_TX_PAUSE)) { + ew32(FCRTL, 0); + ew32(FCRTH, 0); + } else { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + if (hw->fc_send_xon) { + ew32(FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE)); + ew32(FCRTH, hw->fc_high_water); + } else { + ew32(FCRTL, hw->fc_low_water); + ew32(FCRTH, hw->fc_high_water); + } + } + return ret_val; +} + +/** + * e1000_setup_fiber_serdes_link - prepare fiber or serdes link + * @hw: Struct containing variables accessed by shared code + * + * Manipulates Physical Coding Sublayer functions in order to configure + * link. Assumes the hardware has been previously reset and the transmitter + * and receiver are not enabled. + */ +static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + u32 status; + u32 txcw = 0; + u32 i; + u32 signal = 0; + s32 ret_val; + + /* On adapters with a MAC newer than 82544, SWDP 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + * If we're on serdes media, adjust the output amplitude to value + * set in the EEPROM. + */ + ctrl = er32(CTRL); + if (hw->media_type == e1000_media_type_fiber) + signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + + ret_val = e1000_adjust_serdes_amplitude(hw); + if (ret_val) + return ret_val; + + /* Take the link out of reset */ + ctrl &= ~(E1000_CTRL_LRST); + + /* Adjust VCO speed to improve BER performance */ + ret_val = e1000_set_vco_speed(hw); + if (ret_val) + return ret_val; + + e1000_config_collision_dist(hw); + + /* Check for a software override of the flow control settings, and setup + * the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Tranmsit Config Word Register (TXCW) and re-start + * auto-negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, but + * not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we do + * not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + */ + switch (hw->fc) { + case E1000_FC_NONE: + /* Flow ctrl is completely disabled by a software over-ride */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case E1000_FC_RX_PAUSE: + /* Rx Flow control is enabled and Tx Flow control is disabled by + * a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case E1000_FC_TX_PAUSE: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case E1000_FC_FULL: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(TXCW, txcw); + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + hw->txcw = txcw; + msleep(1); + + /* If we have a signal (the cable is plugged in) then poll for a + * "Link-Up" indication in the Device Status Register. Time-out if a + * link isn't seen in 500 milliseconds seconds (Auto-negotiation should + * complete in less than 500 milliseconds even if the other end is doing + * it in SW). For internal serdes, we just assume a signal is present, + * then poll. + */ + if (hw->media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1) == signal) { + e_dbg("Looking for Link\n"); + for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) { + msleep(10); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == (LINK_UP_TIMEOUT / 10)) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + hw->autoneg_failed = 1; + /* AutoNeg failed to achieve a link, so we'll call + * e1000_check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = e1000_check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + hw->autoneg_failed = 0; + } else { + hw->autoneg_failed = 0; + e_dbg("Valid Link Found\n"); + } + } else { + e_dbg("No Signal Detected\n"); + } + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series. + * @hw: Struct containing variables accessed by shared code + * + * Commits changes to PHY configuration by calling e1000_phy_reset(). + */ +static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw) +{ + s32 ret_val; + + /* SW reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +static s32 gbe_dhg_phy_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u32 ctrl_aux; + + switch (hw->phy_type) { + case e1000_phy_8211: + ret_val = e1000_copper_link_rtl_setup(hw); + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + case e1000_phy_8201: + /* Set RMII mode */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= E1000_CTL_AUX_RMII; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + + /* Disable the J/K bits required for receive */ + ctrl_aux = er32(CTL_AUX); + ctrl_aux |= 0x4; + ctrl_aux &= ~0x2; + ew32(CTL_AUX, ctrl_aux); + E1000_WRITE_FLUSH(); + ret_val = e1000_copper_link_rtl_setup(hw); + + if (ret_val) { + e_dbg("e1000_copper_link_rtl_setup failed!\n"); + return ret_val; + } + break; + default: + e_dbg("Error Resetting the PHY\n"); + return E1000_ERR_PHY_TYPE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_preconfig - early configuration for copper + * @hw: Struct containing variables accessed by shared code + * + * Make sure we have a valid PHY and change PHY mode before link setup. + */ +static s32 e1000_copper_link_preconfig(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + ctrl = er32(CTRL); + /* With 82543, we need to force speed and duplex on the MAC equal to + * what the PHY speed and duplex configuration is. In addition, we need + * to perform a hardware reset on the PHY to take it out of reset. + */ + if (hw->mac_type > e1000_82543) { + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + } else { + ctrl |= + (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU); + ew32(CTRL, ctrl); + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + } + + /* Make sure we have a valid PHY */ + ret_val = e1000_detect_gig_phy(hw); + if (ret_val) { + e_dbg("Error, did not detect valid phy.\n"); + return ret_val; + } + e_dbg("Phy ID = %x\n", hw->phy_id); + + /* Set PHY to class A mode (if necessary) */ + ret_val = e1000_set_phy_mode(hw); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82545_rev_3) || + (hw->mac_type == e1000_82546_rev_3)) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + phy_data |= 0x00000008; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + } + + if (hw->mac_type <= e1000_82543 || + hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 || + hw->mac_type == e1000_82541_rev_2 + || hw->mac_type == e1000_82547_rev_2) + hw->phy_reset_disable = false; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_igp_setup - Copper link setup for e1000_phy_igp series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_igp_setup(struct e1000_hw *hw) +{ + u32 led_ctrl; + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Wait 15ms for MAC to configure PHY from eeprom settings */ + msleep(15); + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + + /* The NVM settings will configure LPLU in D3 for IGP2 and IGP3 PHYs */ + if (hw->phy_type == e1000_phy_igp) { + /* disable lplu d3 during driver init */ + ret_val = e1000_set_d3_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D3\n"); + return ret_val; + } + } + + /* Configure mdi-mdix settings */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + hw->dsp_config_state = e1000_dsp_config_disabled; + /* Force MDI for earlier revs of the IGP PHY */ + phy_data &= + ~(IGP01E1000_PSCR_AUTO_MDIX | + IGP01E1000_PSCR_FORCE_MDI_MDIX); + hw->mdix = 1; + + } else { + hw->dsp_config_state = e1000_dsp_config_enabled; + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (hw->mdix) { + case 1: + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + phy_data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + } + ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->autoneg) { + e1000_ms_type phy_ms_setting = hw->master_slave; + + if (hw->ffe_config_state == e1000_ffe_config_active) + hw->ffe_config_state = e1000_ffe_config_enabled; + + if (hw->dsp_config_state == e1000_dsp_config_activated) + hw->dsp_config_state = e1000_dsp_config_enabled; + + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (hw->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + /* Set auto Master/Slave resolution process */ + ret_val = + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~CR_1000T_MS_ENABLE; + ret_val = + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ? + ((phy_data & CR_1000T_MS_VALUE) ? + e1000_ms_force_master : + e1000_ms_force_slave) : e1000_ms_auto; + + switch (phy_ms_setting) { + case e1000_ms_force_master: + phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE); + break; + case e1000_ms_force_slave: + phy_data |= CR_1000T_MS_ENABLE; + phy_data &= ~(CR_1000T_MS_VALUE); + break; + case e1000_ms_auto: + phy_data &= ~CR_1000T_MS_ENABLE; + default: + break; + } + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_mgp_setup - Copper link setup for e1000_phy_m88 series. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_copper_link_mgp_setup(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_reset_disable) + return E1000_SUCCESS; + + /* Enable CRS on TX. This must be set for half-duplex operation. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (hw->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (hw->disable_polarity_correction == 1) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if (hw->phy_revision < M88E1011_I_REV_4) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((hw->phy_revision == E1000_REVISION_2) && + (hw->phy_id == M88E1111_I_PHY_ID)) { + /* Vidalia Phy, set the downshift counter to 5x */ + phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK); + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + ret_val = e1000_write_phy_reg(hw, + M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + } + } + + /* SW Reset the PHY so all changes take effect */ + ret_val = e1000_phy_reset(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_autoneg - setup auto-neg + * @hw: Struct containing variables accessed by shared code + * + * Setup auto-negotiation and flow control advertisements, + * and then perform auto-negotiation. + */ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Perform some bounds checking on the hw->autoneg_advertised + * parameter. If this variable is zero, then set it to the default. + */ + hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (hw->autoneg_advertised == 0) + hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + /* IFE/RTL8201N PHY only supports 10/100 */ + if (hw->phy_type == e1000_phy_8201) + hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG); + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (hw->wait_autoneg_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg + ("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->get_link_status = true; + + return E1000_SUCCESS; +} + +/** + * e1000_copper_link_postconfig - post link setup + * @hw: Struct containing variables accessed by shared code + * + * Config the MAC and the PHY after link is up. + * 1) Set up the MAC to the current PHY speed/duplex + * if we are on 82543. If we + * are on newer silicon, we only need to configure + * collision distance in the Transmit Control Register. + * 2) Set up flow control on the MAC to that established with + * the link partner. + * 3) Config DSP to improve Gigabit link quality for some PHY revisions. + */ +static s32 e1000_copper_link_postconfig(struct e1000_hw *hw) +{ + s32 ret_val; + + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) { + e1000_config_collision_dist(hw); + } else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error Configuring Flow Control\n"); + return ret_val; + } + + /* Config DSP to improve Giga link quality */ + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_config_dsp_after_link_change(hw, true); + if (ret_val) { + e_dbg("Error Configuring DSP after link up\n"); + return ret_val; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_setup_copper_link - phy/speed/duplex setting + * @hw: Struct containing variables accessed by shared code + * + * Detects which PHY is present and sets up the speed and duplex + */ +static s32 e1000_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + /* Check if it is a valid PHY and set PHY mode if necessary. */ + ret_val = e1000_copper_link_preconfig(hw); + if (ret_val) + return ret_val; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_copper_link_igp_setup(hw); + if (ret_val) + return ret_val; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_copper_link_mgp_setup(hw); + if (ret_val) + return ret_val; + } else { + ret_val = gbe_dhg_phy_setup(hw); + if (ret_val) { + e_dbg("gbe_dhg_phy_setup failed!\n"); + return ret_val; + } + } + + if (hw->autoneg) { + /* Setup autoneg and flow control advertisement + * and perform autonegotiation + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H,or 100F + * depending on value from forced_speed_duplex. + */ + e_dbg("Forcing speed and duplex\n"); + ret_val = e1000_phy_force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + for (i = 0; i < 10; i++) { + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + /* Config the MAC and PHY after link is up */ + ret_val = e1000_copper_link_postconfig(hw); + if (ret_val) + return ret_val; + + e_dbg("Valid link established!!!\n"); + return E1000_SUCCESS; + } + udelay(10); + } + + e_dbg("Unable to establish link!!!\n"); + return E1000_SUCCESS; +} + +/** + * e1000_phy_setup_autoneg - phy settings + * @hw: Struct containing variables accessed by shared code + * + * Configures PHY autoneg and flow control advertisement settings + */ +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + else if (hw->phy_type == e1000_phy_8201) + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~REG4_SPEED_MASK; + mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK; + + e_dbg("autoneg_advertised %x\n", hw->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (hw->autoneg_advertised & ADVERTISE_1000_HALF) { + e_dbg + ("Advertise 1000mb Half duplex requested, request denied!\n"); + } + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (hw->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (PHY_AUTONEG_ADV) and re-start + * auto-negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and TX flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc) { + case E1000_FC_NONE: /* 0 */ + /* Flow control (RX & TX) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_RX_PAUSE: /* 1 */ + /* RX Flow control is enabled, and TX Flow control is + * disabled, by a software over-ride. + */ + /* Since there really isn't a way to advertise that we are + * capable of RX Pause ONLY, we will advertise that we + * support both symmetric and asymmetric RX PAUSE. Later + * (in e1000_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + case E1000_FC_TX_PAUSE: /* 2 */ + /* TX Flow control is enabled, and RX Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR; + mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE; + break; + case E1000_FC_FULL: /* 3 */ + /* Flow control (both RX and TX) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (hw->phy_type == e1000_phy_8201) { + mii_1000t_ctrl_reg = 0; + } else { + ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, + mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_force_speed_duplex - force link settings + * @hw: Struct containing variables accessed by shared code + * + * Force PHY speed and duplex settings to hw->forced_speed_duplex + */ +static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 mii_ctrl_reg; + u16 mii_status_reg; + u16 phy_data; + u16 i; + + /* Turn off Flow control if we are forcing speed and duplex. */ + hw->fc = E1000_FC_NONE; + + e_dbg("hw->fc = %d\n", hw->fc); + + /* Read the Device Control Register. */ + ctrl = er32(CTRL); + + /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */ + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(DEVICE_SPEED_MASK); + + /* Clear the Auto Speed Detect Enable bit. */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Read the MII Control Register. */ + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg); + if (ret_val) + return ret_val; + + /* We need to disable autoneg in order to force link and duplex. */ + + mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN; + + /* Are we forcing Full or Half Duplex? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_10_full) { + /* We want to force full duplex so we SET the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl |= E1000_CTRL_FD; + mii_ctrl_reg |= MII_CR_FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + /* We want to force half duplex so we CLEAR the full duplex bits + * in the Device and MII Control Registers. + */ + ctrl &= ~E1000_CTRL_FD; + mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX; + e_dbg("Half Duplex\n"); + } + + /* Are we forcing 100Mbps??? */ + if (hw->forced_speed_duplex == e1000_100_full || + hw->forced_speed_duplex == e1000_100_half) { + /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */ + ctrl |= E1000_CTRL_SPD_100; + mii_ctrl_reg |= MII_CR_SPEED_100; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10); + e_dbg("Forcing 100mb "); + } else { + /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + mii_ctrl_reg |= MII_CR_SPEED_10; + mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100); + e_dbg("Forcing 10mb "); + } + + e1000_config_collision_dist(hw); + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + + if (hw->phy_type == e1000_phy_m88) { + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires + * MDI forced whenever speed are duplex are forced. + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %x\n", phy_data); + + /* Need to reset the PHY or these changes will be ignored */ + mii_ctrl_reg |= MII_CR_RESET; + + /* Disable MDI-X support for 10/100 */ + } else { + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed or duplex are forced. + */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + /* Write back the modified PHY MII control register. */ + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg); + if (ret_val) + return ret_val; + + udelay(1); + + /* The wait_autoneg_complete flag may be a little misleading here. + * Since we are forcing speed and duplex, Auto-Neg is not enabled. + * But we do want to delay for a period while forcing only so we + * don't generate false No Link messages. So we will wait here + * only if the user has set wait_autoneg_complete to 1, which is + * the default. + */ + if (hw->wait_autoneg_complete) { + /* We will wait for autoneg to complete. */ + e_dbg("Waiting for forced speed/duplex link.\n"); + mii_status_reg = 0; + + /* Wait for autoneg to complete or 4.5 seconds to expire */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + if ((i == 0) && (hw->phy_type == e1000_phy_m88)) { + /* We didn't get link. Reset the DSP and wait again + * for link. + */ + ret_val = e1000_phy_reset_dsp(hw); + if (ret_val) { + e_dbg("Error Resetting PHY DSP\n"); + return ret_val; + } + } + /* This loop will early-out if the link condition has been + * met + */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = + e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + } + } + + if (hw->phy_type == e1000_phy_m88) { + /* Because we reset the PHY above, we need to re-force TX_CLK in + * the Extended PHY Specific Control Register to 25MHz clock. + * This value defaults back to a 2.5MHz clock when the PHY is + * reset. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = + e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + + /* In addition, because of the s/w reset above, we need to + * enable CRS on Tx. This must be set for both full and half + * duplex operation. + */ + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) + && (!hw->autoneg) + && (hw->forced_speed_duplex == e1000_10_full + || hw->forced_speed_duplex == e1000_10_half)) { + ret_val = e1000_polarity_reversal_workaround(hw); + if (ret_val) + return ret_val; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_config_collision_dist - set collision distance register + * @hw: Struct containing variables accessed by shared code + * + * Sets the collision distance in the Transmit Control register. + * Link should have been established previously. Reads the speed and duplex + * information from the Device Status register. + */ +void e1000_config_collision_dist(struct e1000_hw *hw) +{ + u32 tctl, coll_dist; + + if (hw->mac_type < e1000_82543) + coll_dist = E1000_COLLISION_DISTANCE_82542; + else + coll_dist = E1000_COLLISION_DISTANCE; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= coll_dist << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_config_mac_to_phy - sync phy and mac settings + * @hw: Struct containing variables accessed by shared code + * @mii_reg: data to write to the MII control register + * + * Sets MAC speed and duplex settings to reflect the those in the PHY + * The contents of the PHY register containing the needed information need to + * be passed in. + */ +static s32 e1000_config_mac_to_phy(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 phy_data; + + /* 82544 or newer MAC, Auto Speed Detection takes care of + * MAC speed/duplex configuration. + */ + if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) + return E1000_SUCCESS; + + /* Read the Device Control Register and set the bits to Force Speed + * and Duplex. + */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); + + switch (hw->phy_type) { + case e1000_phy_8201: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & RTL_PHY_CTRL_FD) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + if (phy_data & RTL_PHY_CTRL_SPD_100) + ctrl |= E1000_CTRL_SPD_100; + else + ctrl |= E1000_CTRL_SPD_10; + + e1000_config_collision_dist(hw); + break; + default: + /* Set up duplex in the Device Control and Transmit Control + * registers depending on negotiated values. + */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & M88E1000_PSSR_DPLX) + ctrl |= E1000_CTRL_FD; + else + ctrl &= ~E1000_CTRL_FD; + + e1000_config_collision_dist(hw); + + /* Set up speed in the Device Control register depending on + * negotiated values. + */ + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) + ctrl |= E1000_CTRL_SPD_1000; + else if ((phy_data & M88E1000_PSSR_SPEED) == + M88E1000_PSSR_100MBS) + ctrl |= E1000_CTRL_SPD_100; + } + + /* Write the configured values back to the Device Control Reg. */ + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_force_mac_fc - force flow control settings + * @hw: Struct containing variables accessed by shared code + * + * Forces the MAC's flow control settings. + * Sets the TFCE and RFCE bits in the device control register to reflect + * the adapter settings. TFCE and RFCE need to be explicitly set by + * software when a Copper PHY is used because autonegotiation is managed + * by the PHY rather than the MAC. Software must also configure these + * bits when link is forced on a fiber connection. + */ +s32 e1000_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + /* Get the current configuration of the Device Control Register */ + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and TX flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + + switch (hw->fc) { + case E1000_FC_NONE: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case E1000_FC_RX_PAUSE: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case E1000_FC_TX_PAUSE: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case E1000_FC_FULL: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + /* Disable TX Flow Control for 82542 (rev 2.0) */ + if (hw->mac_type == e1000_82542_rev2_0) + ctrl &= (~E1000_CTRL_TFCE); + + ew32(CTRL, ctrl); + return E1000_SUCCESS; +} + +/** + * e1000_config_fc_after_link_up - configure flow control after autoneg + * @hw: Struct containing variables accessed by shared code + * + * Configures flow control settings after link is established + * Should be called immediately after a valid link has been established. + * Forces MAC flow control settings if link was forced. When in MII/GMII mode + * and autonegotiation is enabled, the MAC flow control settings will be set + * based on the flow control negotiated by the PHY. In TBI mode, the TFCE + * and RFCE bits will be automatically set to the negotiated flow control mode. + */ +static s32 e1000_config_fc_after_link_up(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 mii_nway_adv_reg; + u16 mii_nway_lp_ability_reg; + u16 speed; + u16 duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) + || ((hw->media_type == e1000_media_type_internal_serdes) + && (hw->autoneg_failed)) + || ((hw->media_type == e1000_media_type_copper) + && (!hw->autoneg))) { + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) { + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement Register + * (Address 4) and the Auto_Negotiation Base Page + * Ability Register (Address 5) to determine how flow + * control was negotiated. + */ + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, + &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, + &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement + * Register (Address 4) and two bits in the Auto + * Negotiation Base Page Ability Register (Address 5) + * determine flow control for both the PHY and the link + * partner. The following table, taken out of the IEEE + * 802.3ab/D6.0 dated March 25, 1999, describes these + * PAUSE resolution bits and how flow control is + * determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|------------------ + * 0 | 0 | DC | DC | E1000_FC_NONE + * 0 | 1 | 0 | DC | E1000_FC_NONE + * 0 | 1 | 1 | 0 | E1000_FC_NONE + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * 1 | 0 | 0 | DC | E1000_FC_NONE + * 1 | DC | 1 | DC | E1000_FC_FULL + * 1 | 1 | 0 | 0 | E1000_FC_NONE + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + /* Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | DC | 1 | DC | E1000_FC_FULL + * + */ + if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) { + /* Now we need to check if the user selected Rx + * ONLY of pause frames. In this case, we had + * to advertise FULL flow control because we + * could not advertise Rx ONLY. Hence, we must + * now check to see if we need to turn OFF the + * TRANSMISSION of PAUSE frames. + */ + if (hw->original_fc == E1000_FC_FULL) { + hw->fc = E1000_FC_FULL; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 0 | 1 | 1 | 1 | E1000_FC_TX_PAUSE + * + */ + else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) + { + hw->fc = E1000_FC_TX_PAUSE; + e_dbg + ("Flow Control = TX PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|------------------ + * 1 | 1 | 0 | 1 | E1000_FC_RX_PAUSE + * + */ + else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) && + (mii_nway_adv_reg & NWAY_AR_ASM_DIR) && + !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) && + (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) + { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + /* Per the IEEE spec, at this point flow control should + * be disabled. However, we want to consider that we + * could be connected to a legacy switch that doesn't + * advertise desired flow control, but can be forced on + * the link partner. So if we advertised no flow + * control, that is what we will resolve to. If we + * advertised some kind of receive capability (Rx Pause + * Only or Full Flow Control) and the link partner + * advertised none, we will configure ourselves to + * enable Rx Flow Control only. We can do this safely + * for two reasons: If the link partner really + * didn't want flow control enabled, and we enable Rx, + * no harm done since we won't be receiving any PAUSE + * frames anyway. If the intent on the link partner was + * to have flow control enabled, then by us enabling Rx + * only, we can at least receive pause frames and + * process them. This is a good idea because in most + * cases, since we are predominantly a server NIC, more + * times than not we will be asked to delay transmission + * of packets than asking our link partner to pause + * transmission of frames. + */ + else if ((hw->original_fc == E1000_FC_NONE || + hw->original_fc == E1000_FC_TX_PAUSE) || + hw->fc_strict_ieee) { + hw->fc = E1000_FC_NONE; + e_dbg("Flow Control = NONE.\n"); + } else { + hw->fc = E1000_FC_RX_PAUSE; + e_dbg + ("Flow Control = RX PAUSE frames only.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc = E1000_FC_NONE; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000_force_mac_fc(hw); + if (ret_val) { + e_dbg + ("Error forcing flow control settings\n"); + return ret_val; + } + } else { + e_dbg + ("Copper PHY and Auto Neg has not completed.\n"); + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_for_serdes_link_generic - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + */ +static s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw) +{ + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val = E1000_SUCCESS; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) { + if (hw->autoneg_failed == 0) { + hw->autoneg_failed = 1; + goto out; + } + e_dbg("NOT RXing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (hw->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + goto out; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("RXing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, hw->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + hw->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + udelay(10); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + hw->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg " + "completed successfully.\n"); + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid" + "codewords detected in autoneg.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + hw->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + out: + return ret_val; +} + +/** + * e1000_check_for_link + * @hw: Struct containing variables accessed by shared code + * + * Checks to see if the link status of the hardware has changed. + * Called by any function that needs to check the link status of the adapter. + */ +s32 e1000_check_for_link(struct e1000_hw *hw) +{ + u32 rxcw = 0; + u32 ctrl; + u32 status; + u32 rctl; + u32 icr; + u32 signal = 0; + s32 ret_val; + u16 phy_data; + + ctrl = er32(CTRL); + status = er32(STATUS); + + /* On adapters with a MAC newer than 82544, SW Definable pin 1 will be + * set when the optics detect a signal. On older adapters, it will be + * cleared when there is a signal. This applies to fiber media only. + */ + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) { + rxcw = er32(RXCW); + + if (hw->media_type == e1000_media_type_fiber) { + signal = + (hw->mac_type > + e1000_82544) ? E1000_CTRL_SWDPIN1 : 0; + if (status & E1000_STATUS_LU) + hw->get_link_status = false; + } + } + + /* If we have a copper PHY then we only want to go out to the PHY + * registers to see if Auto-Neg has completed and/or if our link + * status has changed. The get_link_status flag will be set if we + * receive a Link Status Change interrupt or we have Rx Sequence + * Errors. + */ + if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) { + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + * Read the register twice since the link bit is sticky. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if (phy_data & MII_SR_LINK_STATUS) { + hw->get_link_status = false; + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000_check_downshift(hw); + + /* If we are on 82544 or 82543 silicon and speed/duplex + * are forced to 10H or 10F, then we will implement the + * polarity reversal workaround. We disable interrupts + * first, and upon returning, place the devices + * interrupt state to its previous value except for the + * link status change interrupt which will + * happen due to the execution of this workaround. + */ + + if ((hw->mac_type == e1000_82544 + || hw->mac_type == e1000_82543) && (!hw->autoneg) + && (hw->forced_speed_duplex == e1000_10_full + || hw->forced_speed_duplex == e1000_10_half)) { + ew32(IMC, 0xffffffff); + ret_val = + e1000_polarity_reversal_workaround(hw); + icr = er32(ICR); + ew32(ICS, (icr & ~E1000_ICS_LSC)); + ew32(IMS, IMS_ENABLE_MASK); + } + + } else { + /* No link detected */ + e1000_config_dsp_after_link_change(hw, false); + return 0; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!hw->autoneg) + return -E1000_ERR_CONFIG; + + /* optimize the dsp settings for the igp phy */ + e1000_config_dsp_after_link_change(hw, true); + + /* We have a M88E1000 PHY and Auto-Neg is enabled. If we + * have Si on board that is 82544 or newer, Auto + * Speed Detection takes care of MAC speed/duplex + * configuration. So we only need to configure Collision + * Distance in the MAC. Otherwise, we need to force + * speed/duplex on the MAC to the current PHY speed/duplex + * settings. + */ + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_ce4100)) + e1000_config_collision_dist(hw); + else { + ret_val = e1000_config_mac_to_phy(hw); + if (ret_val) { + e_dbg + ("Error configuring MAC to PHY settings\n"); + return ret_val; + } + } + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control settings + * because we may have had to re-autoneg with a different link + * partner. + */ + ret_val = e1000_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + /* At this point we know that we are on copper and we have + * auto-negotiated link. These are conditions for checking the + * link partner capability register. We use the link speed to + * determine if TBI compatibility needs to be turned on or off. + * If the link is not at gigabit speed, then TBI compatibility + * is not needed. If we are at gigabit speed, we turn on TBI + * compatibility. + */ + if (hw->tbi_compatibility_en) { + u16 speed, duplex; + ret_val = + e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg + ("Error getting link speed and duplex\n"); + return ret_val; + } + if (speed != SPEED_1000) { + /* If link speed is not set to gigabit speed, we + * do not need to enable TBI compatibility. + */ + if (hw->tbi_compatibility_on) { + /* If we previously were in the mode, + * turn it off. + */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_SBP; + ew32(RCTL, rctl); + hw->tbi_compatibility_on = false; + } + } else { + /* If TBI compatibility is was previously off, + * turn it on. For compatibility with a TBI link + * partner, we will store bad packets. Some + * frames have an additional byte on the end and + * will look like CRC errors to to the hardware. + */ + if (!hw->tbi_compatibility_on) { + hw->tbi_compatibility_on = true; + rctl = er32(RCTL); + rctl |= E1000_RCTL_SBP; + ew32(RCTL, rctl); + } + } + } + } + + if ((hw->media_type == e1000_media_type_fiber) || + (hw->media_type == e1000_media_type_internal_serdes)) + e1000_check_for_serdes_link_generic(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_get_speed_and_duplex + * @hw: Struct containing variables accessed by shared code + * @speed: Speed of the connection + * @duplex: Duplex setting of the connection + * + * Detects the current speed and duplex settings of the hardware. + */ +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex) +{ + u32 status; + s32 ret_val; + u16 phy_data; + + if (hw->mac_type >= e1000_82543) { + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) { + *speed = SPEED_1000; + e_dbg("1000 Mbs, "); + } else if (status & E1000_STATUS_SPEED_100) { + *speed = SPEED_100; + e_dbg("100 Mbs, "); + } else { + *speed = SPEED_10; + e_dbg("10 Mbs, "); + } + + if (status & E1000_STATUS_FD) { + *duplex = FULL_DUPLEX; + e_dbg("Full Duplex\n"); + } else { + *duplex = HALF_DUPLEX; + e_dbg(" Half Duplex\n"); + } + } else { + e_dbg("1000 Mbs, Full Duplex\n"); + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + } + + /* IGP01 PHY may advertise full duplex operation after speed downgrade + * even if it is operating at half duplex. Here we set the duplex + * settings to match the duplex in the link partner's capabilities. + */ + if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) { + ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data); + if (ret_val) + return ret_val; + + if (!(phy_data & NWAY_ER_LP_NWAY_CAPS)) + *duplex = HALF_DUPLEX; + else { + ret_val = + e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data); + if (ret_val) + return ret_val; + if ((*speed == SPEED_100 + && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) + || (*speed == SPEED_10 + && !(phy_data & NWAY_LPAR_10T_FD_CAPS))) + *duplex = HALF_DUPLEX; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_wait_autoneg + * @hw: Struct containing variables accessed by shared code + * + * Blocks until autoneg completes or times out (~4.5 seconds) + */ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val; + u16 i; + u16 phy_data; + + e_dbg("Waiting for Auto-Neg to complete.\n"); + + /* We will wait for autoneg to complete or 4.5 seconds to expire. */ + for (i = PHY_AUTO_NEG_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Auto-Neg + * Complete bit to be set. + */ + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + if (phy_data & MII_SR_AUTONEG_COMPLETE) { + return E1000_SUCCESS; + } + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_raise_mdi_clk - Raises the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_raise_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Raise the clock input to the Management Data Clock (by setting the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl | E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_lower_mdi_clk - Lowers the Management Data Clock + * @hw: Struct containing variables accessed by shared code + * @ctrl: Device control register's current value + */ +static void e1000_lower_mdi_clk(struct e1000_hw *hw, u32 *ctrl) +{ + /* Lower the clock input to the Management Data Clock (by clearing the + * MDC bit), and then delay 10 microseconds. + */ + ew32(CTRL, (*ctrl & ~E1000_CTRL_MDC)); + E1000_WRITE_FLUSH(); + udelay(10); +} + +/** + * e1000_shift_out_mdi_bits - Shifts data bits out to the PHY + * @hw: Struct containing variables accessed by shared code + * @data: Data to send out to the PHY + * @count: Number of bits to shift out + * + * Bits are shifted out in MSB to LSB order. + */ +static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, u32 data, u16 count) +{ + u32 ctrl; + u32 mask; + + /* We need to shift "count" number of bits out to the PHY. So, the value + * in the "data" parameter will be shifted out to the PHY one bit at a + * time. In order to do this, "data" must be broken down into bits. + */ + mask = 0x01; + mask <<= (count - 1); + + ctrl = er32(CTRL); + + /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */ + ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR); + + while (mask) { + /* A "1" is shifted out to the PHY by setting the MDIO bit to + * "1" and then raising and lowering the Management Data Clock. + * A "0" is shifted out to the PHY by setting the MDIO bit to + * "0" and then raising and lowering the clock. + */ + if (data & mask) + ctrl |= E1000_CTRL_MDIO; + else + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + udelay(10); + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + mask = mask >> 1; + } +} + +/** + * e1000_shift_in_mdi_bits - Shifts data bits in from the PHY + * @hw: Struct containing variables accessed by shared code + * + * Bits are shifted in in MSB to LSB order. + */ +static u16 e1000_shift_in_mdi_bits(struct e1000_hw *hw) +{ + u32 ctrl; + u16 data = 0; + u8 i; + + /* In order to read a register from the PHY, we need to shift in a total + * of 18 bits from the PHY. The first two bit (turnaround) times are + * used to avoid contention on the MDIO pin when a read operation is + * performed. These two bits are ignored by us and thrown away. Bits are + * "shifted in" by raising the input to the Management Data Clock + * (setting the MDC bit), and then reading the value of the MDIO bit. + */ + ctrl = er32(CTRL); + + /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as + * input. + */ + ctrl &= ~E1000_CTRL_MDIO_DIR; + ctrl &= ~E1000_CTRL_MDIO; + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + /* Raise and Lower the clock before reading in the data. This accounts + * for the turnaround bits. The first clock occurred when we clocked out + * the last bit of the Register Address. + */ + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + for (data = 0, i = 0; i < 16; i++) { + data = data << 1; + e1000_raise_mdi_clk(hw, &ctrl); + ctrl = er32(CTRL); + /* Check to see if we shifted in a "1". */ + if (ctrl & E1000_CTRL_MDIO) + data |= 1; + e1000_lower_mdi_clk(hw, &ctrl); + } + + e1000_raise_mdi_clk(hw, &ctrl); + e1000_lower_mdi_clk(hw, &ctrl); + + return data; +} + + +/** + * e1000_read_phy_reg - read a phy register + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to read + * + * Reads the value from a PHY register, if the value is on a specific non zero + * page, sets the page first. + */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 *phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, and register address in the MDI + * Control register. The MAC will take care of interfacing with + * the PHY to retrieve the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_READ) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + + mdic = readl(E1000_MDIO_STS); + if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) { + e_dbg("MDI Read Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16) mdic; + } else { + mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 64; i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + *phy_data = (u16) mdic; + } + } else { + /* We must first send a preamble through the MDIO pin to signal + * the beginning of an MII instruction. This is done by sending + * 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the next few fields that are required for a read + * operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine five different times. The + * format of a MII read instruction consists of a shift out of + * 14 bits and is defined as follows: + * + * followed by a shift in of 18 bits. This first two bits + * shifted in are TurnAround bits used to avoid contention on + * the MDIO pin when a READ operation is performed. These two + * bits are thrown away followed by a shift in of 16 bits which + * contains the desired data. + */ + mdic = ((reg_addr) | (phy_addr << 5) | + (PHY_OP_READ << 10) | (PHY_SOF << 12)); + + e1000_shift_out_mdi_bits(hw, mdic, 14); + + /* Now that we've shifted out the read command to the MII, we + * need to "shift in" the 16-bit value (18 total bits) of the + * requested PHY register address. + */ + *phy_data = e1000_shift_in_mdi_bits(hw); + } + return E1000_SUCCESS; +} + +/** + * e1000_write_phy_reg - write a phy register + * + * @hw: Struct containing variables accessed by shared code + * @reg_addr: address of the PHY register to write + * @data: data to write to the PHY + * + * Writes a value to a PHY register + */ +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 phy_data) +{ + u32 ret_val; + unsigned long flags; + + spin_lock_irqsave(&e1000_phy_lock, flags); + + if ((hw->phy_type == e1000_phy_igp) && + (reg_addr > MAX_PHY_MULTI_PAGE_REG)) { + ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT, + (u16) reg_addr); + if (ret_val) { + spin_unlock_irqrestore(&e1000_phy_lock, flags); + return ret_val; + } + } + + ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr, + phy_data); + spin_unlock_irqrestore(&e1000_phy_lock, flags); + + return ret_val; +} + +static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr, + u16 phy_data) +{ + u32 i; + u32 mdic = 0; + const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1; + + if (reg_addr > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", reg_addr); + return -E1000_ERR_PARAM; + } + + if (hw->mac_type > e1000_82543) { + /* Set up Op-code, Phy Address, register address, and data + * intended for the PHY register in the MDI Control register. + * The MAC will take care of interfacing with the PHY to send + * the desired data. + */ + if (hw->mac_type == e1000_ce4100) { + mdic = (((u32) phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (INTEL_CE_GBE_MDIC_OP_WRITE) | + (INTEL_CE_GBE_MDIC_GO)); + + writel(mdic, E1000_MDIO_CMD); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 640; i++) { + udelay(5); + mdic = readl(E1000_MDIO_CMD); + if (!(mdic & INTEL_CE_GBE_MDIC_GO)) + break; + } + if (mdic & INTEL_CE_GBE_MDIC_GO) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } else { + mdic = (((u32) phy_data) | + (reg_addr << E1000_MDIC_REG_SHIFT) | + (phy_addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read + * completed + */ + for (i = 0; i < 641; i++) { + udelay(5); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + } + } else { + /* We'll need to use the SW defined pins to shift the write + * command out to the PHY. We first send a preamble to the PHY + * to signal the beginning of the MII instruction. This is done + * by sending 32 consecutive "1" bits. + */ + e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE); + + /* Now combine the remaining required fields that will indicate + * a write operation. We use this method instead of calling the + * e1000_shift_out_mdi_bits routine for each field in the + * command. The format of a MII write instruction is as follows: + * . + */ + mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) | + (PHY_OP_WRITE << 12) | (PHY_SOF << 14)); + mdic <<= 16; + mdic |= (u32) phy_data; + + e1000_shift_out_mdi_bits(hw, mdic, 32); + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_hw_reset - reset the phy, hardware style + * @hw: Struct containing variables accessed by shared code + * + * Returns the PHY to the power-on reset state + */ +s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext; + u32 led_ctrl; + + e_dbg("Resetting Phy...\n"); + + if (hw->mac_type > e1000_82543) { + /* Read the device control register and assert the + * E1000_CTRL_PHY_RST bit. Then, take it out of reset. + * For e1000 hardware, we delay for 10ms between the assert + * and de-assert. + */ + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + E1000_WRITE_FLUSH(); + + msleep(10); + + ew32(CTRL, ctrl); + E1000_WRITE_FLUSH(); + + } else { + /* Read the Extended Device Control Register, assert the + * PHY_RESET_DIR bit to put the PHY into reset. Then, take it + * out of reset. + */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR; + ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + msleep(10); + ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA; + ew32(CTRL_EXT, ctrl_ext); + E1000_WRITE_FLUSH(); + } + udelay(150); + + if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) { + /* Configure activity LED after PHY reset */ + led_ctrl = er32(LEDCTL); + led_ctrl &= IGP_ACTIVITY_LED_MASK; + led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE); + ew32(LEDCTL, led_ctrl); + } + + /* Wait for FW to finish PHY configuration. */ + return e1000_get_phy_cfg_done(hw); +} + +/** + * e1000_phy_reset - reset the phy to commit settings + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY + * Sets bit 15 of the MII Control register + */ +s32 e1000_phy_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + switch (hw->phy_type) { + case e1000_phy_igp: + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) + return ret_val; + break; + default: + ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= MII_CR_RESET; + ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + break; + } + + if (hw->phy_type == e1000_phy_igp) + e1000_phy_init_script(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_detect_gig_phy - check the phy type + * @hw: Struct containing variables accessed by shared code + * + * Probes the expected PHY address for known PHY IDs + */ +static s32 e1000_detect_gig_phy(struct e1000_hw *hw) +{ + s32 phy_init_status, ret_val; + u16 phy_id_high, phy_id_low; + bool match = false; + + if (hw->phy_id != 0) + return E1000_SUCCESS; + + /* Read the PHY ID Registers to identify which PHY is onboard. */ + ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high); + if (ret_val) + return ret_val; + + hw->phy_id = (u32) (phy_id_high << 16); + udelay(20); + ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low); + if (ret_val) + return ret_val; + + hw->phy_id |= (u32) (phy_id_low & PHY_REVISION_MASK); + hw->phy_revision = (u32) phy_id_low & ~PHY_REVISION_MASK; + + switch (hw->mac_type) { + case e1000_82543: + if (hw->phy_id == M88E1000_E_PHY_ID) + match = true; + break; + case e1000_82544: + if (hw->phy_id == M88E1000_I_PHY_ID) + match = true; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + if (hw->phy_id == M88E1011_I_PHY_ID) + match = true; + break; + case e1000_ce4100: + if ((hw->phy_id == RTL8211B_PHY_ID) || + (hw->phy_id == RTL8201N_PHY_ID) || + (hw->phy_id == M88E1118_E_PHY_ID)) + match = true; + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (hw->phy_id == IGP01E1000_I_PHY_ID) + match = true; + break; + default: + e_dbg("Invalid MAC type %d\n", hw->mac_type); + return -E1000_ERR_CONFIG; + } + phy_init_status = e1000_set_phy_type(hw); + + if ((match) && (phy_init_status == E1000_SUCCESS)) { + e_dbg("PHY ID 0x%X detected\n", hw->phy_id); + return E1000_SUCCESS; + } + e_dbg("Invalid PHY ID 0x%X\n", hw->phy_id); + return -E1000_ERR_PHY; +} + +/** + * e1000_phy_reset_dsp - reset DSP + * @hw: Struct containing variables accessed by shared code + * + * Resets the PHY's DSP + */ +static s32 e1000_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + do { + ret_val = e1000_write_phy_reg(hw, 29, 0x001d); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x00c1); + if (ret_val) + break; + ret_val = e1000_write_phy_reg(hw, 30, 0x0000); + if (ret_val) + break; + ret_val = E1000_SUCCESS; + } while (0); + + return ret_val; +} + +/** + * e1000_phy_igp_get_info - get igp specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for igp PHY only. + */ +static s32 e1000_phy_igp_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data, min_length, max_length, average; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + /* IGP01E1000 does not need to support it. */ + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal; + + /* IGP01E1000 always correct polarity reversal */ + phy_info->polarity_correction = e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & IGP01E1000_PSSR_MDIX) >> + IGP01E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + /* Local/Remote Receiver Information are only valid @ 1000 + * Mbps + */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + /* Get cable length */ + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + /* Translate to old method */ + average = (max_length + min_length) / 2; + + if (average <= e1000_igp_cable_length_50) + phy_info->cable_length = e1000_cable_length_50; + else if (average <= e1000_igp_cable_length_80) + phy_info->cable_length = e1000_cable_length_50_80; + else if (average <= e1000_igp_cable_length_110) + phy_info->cable_length = e1000_cable_length_80_110; + else if (average <= e1000_igp_cable_length_140) + phy_info->cable_length = e1000_cable_length_110_140; + else + phy_info->cable_length = e1000_cable_length_140; + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_m88_get_info - get m88 specific registers + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers for m88 PHY only. + */ +static s32 e1000_phy_m88_get_info(struct e1000_hw *hw, + struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + e1000_rev_polarity polarity; + + /* The downshift status is checked only once, after link is established, + * and it stored in the hw->speed_downgraded parameter. + */ + phy_info->downshift = (e1000_downshift) hw->speed_downgraded; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_info->extended_10bt_distance = + ((phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >> + M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT) ? + e1000_10bt_ext_dist_enable_lower : + e1000_10bt_ext_dist_enable_normal; + + phy_info->polarity_correction = + ((phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >> + M88E1000_PSCR_POLARITY_REVERSAL_SHIFT) ? + e1000_polarity_reversal_disabled : e1000_polarity_reversal_enabled; + + /* Check polarity status */ + ret_val = e1000_check_polarity(hw, &polarity); + if (ret_val) + return ret_val; + phy_info->cable_polarity = polarity; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->mdix_mode = + (e1000_auto_x_mode) ((phy_data & M88E1000_PSSR_MDIX) >> + M88E1000_PSSR_MDIX_SHIFT); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + /* Cable Length Estimation and Local/Remote Receiver Information + * are only valid at 1000 Mbps. + */ + phy_info->cable_length = + (e1000_cable_length) ((phy_data & + M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy_info->local_rx = ((phy_data & SR_1000T_LOCAL_RX_STATUS) >> + SR_1000T_LOCAL_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + phy_info->remote_rx = ((phy_data & SR_1000T_REMOTE_RX_STATUS) >> + SR_1000T_REMOTE_RX_STATUS_SHIFT) ? + e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + } + + return E1000_SUCCESS; +} + +/** + * e1000_phy_get_info - request phy info + * @hw: Struct containing variables accessed by shared code + * @phy_info: PHY information structure + * + * Get PHY information from various PHY registers + */ +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info) +{ + s32 ret_val; + u16 phy_data; + + phy_info->cable_length = e1000_cable_length_undefined; + phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined; + phy_info->cable_polarity = e1000_rev_polarity_undefined; + phy_info->downshift = e1000_downshift_undefined; + phy_info->polarity_correction = e1000_polarity_reversal_undefined; + phy_info->mdix_mode = e1000_auto_x_mode_undefined; + phy_info->local_rx = e1000_1000t_rx_status_undefined; + phy_info->remote_rx = e1000_1000t_rx_status_undefined; + + if (hw->media_type != e1000_media_type_copper) { + e_dbg("PHY info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data); + if (ret_val) + return ret_val; + + if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) { + e_dbg("PHY info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + if (hw->phy_type == e1000_phy_igp) + return e1000_phy_igp_get_info(hw, phy_info); + else if ((hw->phy_type == e1000_phy_8211) || + (hw->phy_type == e1000_phy_8201)) + return E1000_SUCCESS; + else + return e1000_phy_m88_get_info(hw, phy_info); +} + +s32 e1000_validate_mdi_setting(struct e1000_hw *hw) +{ + if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) { + e_dbg("Invalid MDI setting detected\n"); + hw->mdix = 1; + return -E1000_ERR_CONFIG; + } + return E1000_SUCCESS; +} + +/** + * e1000_init_eeprom_params - initialize sw eeprom vars + * @hw: Struct containing variables accessed by shared code + * + * Sets up eeprom variables in the hw struct. Must be called after mac_type + * is configured. + */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd = er32(EECD); + s32 ret_val = E1000_SUCCESS; + u16 eeprom_size; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + eeprom->type = e1000_eeprom_microwire; + eeprom->word_size = 64; + eeprom->opcode_bits = 3; + eeprom->address_bits = 6; + eeprom->delay_usec = 50; + break; + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_82546_rev_3: + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_SIZE) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + break; + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (eecd & E1000_EECD_TYPE) { + eeprom->type = e1000_eeprom_spi; + eeprom->opcode_bits = 8; + eeprom->delay_usec = 1; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->page_size = 32; + eeprom->address_bits = 16; + } else { + eeprom->page_size = 8; + eeprom->address_bits = 8; + } + } else { + eeprom->type = e1000_eeprom_microwire; + eeprom->opcode_bits = 3; + eeprom->delay_usec = 50; + if (eecd & E1000_EECD_ADDR_BITS) { + eeprom->word_size = 256; + eeprom->address_bits = 8; + } else { + eeprom->word_size = 64; + eeprom->address_bits = 6; + } + } + break; + default: + break; + } + + if (eeprom->type == e1000_eeprom_spi) { + /* eeprom_size will be an enum [0..8] that maps to eeprom sizes + * 128B to 32KB (incremented by powers of 2). + */ + /* Set to default value for initial eeprom read. */ + eeprom->word_size = 64; + ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size); + if (ret_val) + return ret_val; + eeprom_size = + (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT; + /* 256B eeprom size was not supported in earlier hardware, so we + * bump eeprom_size up one to ensure that "1" (which maps to + * 256B) is never the result used in the shifting logic below. + */ + if (eeprom_size) + eeprom_size++; + + eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT); + } + return ret_val; +} + +/** + * e1000_raise_ee_clk - Raises the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_raise_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Raise the clock input to the EEPROM (by setting the SK bit), and then + * wait microseconds. + */ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_lower_ee_clk - Lowers the EEPROM's clock input. + * @hw: Struct containing variables accessed by shared code + * @eecd: EECD's current value + */ +static void e1000_lower_ee_clk(struct e1000_hw *hw, u32 *eecd) +{ + /* Lower the clock input to the EEPROM (by clearing the SK bit), and + * then wait 50 microseconds. + */ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); +} + +/** + * e1000_shift_out_ee_bits - Shift data bits out to the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @data: data to send to the EEPROM + * @count: number of bits to shift out + */ +static void e1000_shift_out_ee_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u32 mask; + + /* We need to shift "count" bits out to the EEPROM. So, value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + */ + mask = 0x01 << (count - 1); + eecd = er32(EECD); + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~E1000_EECD_DO; + } else if (eeprom->type == e1000_eeprom_spi) { + eecd |= E1000_EECD_DO; + } + do { + /* A "1" is shifted out to the EEPROM by setting bit "DI" to a + * "1", and then raising and then lowering the clock (the SK bit + * controls the clock input to the EEPROM). A "0" is shifted + * out to the EEPROM by setting "DI" to "0" and then raising and + * then lowering the clock. + */ + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(eeprom->delay_usec); + + e1000_raise_ee_clk(hw, &eecd); + e1000_lower_ee_clk(hw, &eecd); + + mask = mask >> 1; + + } while (mask); + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_ee_bits - Shift data bits in from the EEPROM + * @hw: Struct containing variables accessed by shared code + * @count: number of bits to shift in + */ +static u16 e1000_shift_in_ee_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + /* In order to read a register from the EEPROM, we need to shift 'count' + * bits in from the EEPROM. Bits are "shifted in" by raising the clock + * input to the EEPROM (setting the SK bit), and then reading the value + * of the "DO" bit. During this "shifting in" process the "DI" bit + * should always be clear. + */ + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data = data << 1; + e1000_raise_ee_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DI); + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_ee_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000_acquire_eeprom - Prepares EEPROM for access + * @hw: Struct containing variables accessed by shared code + * + * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This + * function should be called before issuing a command to the EEPROM. + */ +static s32 e1000_acquire_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd, i = 0; + + eecd = er32(EECD); + + /* Request EEPROM Access */ + if (hw->mac_type > e1000_82544) { + eecd |= E1000_EECD_REQ; + ew32(EECD, eecd); + eecd = er32(EECD); + while ((!(eecd & E1000_EECD_GNT)) && + (i < E1000_EEPROM_GRANT_ATTEMPTS)) { + i++; + udelay(5); + eecd = er32(EECD); + } + if (!(eecd & E1000_EECD_GNT)) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire EEPROM grant\n"); + return -E1000_ERR_EEPROM; + } + } + + /* Setup EEPROM for Read/Write */ + + if (eeprom->type == e1000_eeprom_microwire) { + /* Clear SK and DI */ + eecd &= ~(E1000_EECD_DI | E1000_EECD_SK); + ew32(EECD, eecd); + + /* Set CS */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(1); + } + + return E1000_SUCCESS; +} + +/** + * e1000_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_standby_eeprom(struct e1000_hw *hw) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + + eecd = er32(EECD); + + if (eeprom->type == e1000_eeprom_microwire) { + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock high */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Select EEPROM */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + + /* Clock low */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } else if (eeprom->type == e1000_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(eeprom->delay_usec); + } +} + +/** + * e1000_release_eeprom - drop chip select + * @hw: Struct containing variables accessed by shared code + * + * Terminates a command by inverting the EEPROM's chip select pin + */ +static void e1000_release_eeprom(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + + if (hw->eeprom.type == e1000_eeprom_spi) { + eecd |= E1000_EECD_CS; /* Pull CS high */ + eecd &= ~E1000_EECD_SK; /* Lower SCK */ + + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + + udelay(hw->eeprom.delay_usec); + } else if (hw->eeprom.type == e1000_eeprom_microwire) { + /* cleanup eeprom */ + + /* CS on Microwire is active-high */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_DI); + + ew32(EECD, eecd); + + /* Rising edge of clock */ + eecd |= E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + + /* Falling edge of clock */ + eecd &= ~E1000_EECD_SK; + ew32(EECD, eecd); + E1000_WRITE_FLUSH(); + udelay(hw->eeprom.delay_usec); + } + + /* Stop requesting EEPROM access */ + if (hw->mac_type > e1000_82544) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + } +} + +/** + * e1000_spi_eeprom_ready - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + */ +static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw) +{ + u16 retry_count = 0; + u8 spi_stat_reg; + + /* Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing + * bit 0 of the internal status register. If it's not cleared within + * 5 milliseconds, then error out. + */ + retry_count = 0; + do { + e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI, + hw->eeprom.opcode_bits); + spi_stat_reg = (u8) e1000_shift_in_ee_bits(hw, 8); + if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI)) + break; + + udelay(5); + retry_count += 5; + + e1000_standby_eeprom(hw); + } while (retry_count < EEPROM_MAX_RETRY_SPI); + + /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and + * only 0-5mSec on 5V devices) + */ + if (retry_count >= EEPROM_MAX_RETRY_SPI) { + e_dbg("SPI EEPROM Status error\n"); + return -E1000_ERR_EEPROM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_read_eeprom - Reads a 16 bit word from the EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset of word in the EEPROM to read + * @data: word read from the EEPROM + * @words: number of words to read + */ +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + spin_lock(&e1000_eeprom_lock); + ret = e1000_do_read_eeprom(hw, offset, words, data); + spin_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 i = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) + || (words > eeprom->word_size - offset) || (words == 0)) { + e_dbg("\"words\" parameter out of bounds. Words = %d," + "size = %d\n", offset, eeprom->word_size); + return -E1000_ERR_EEPROM; + } + + /* EEPROM's that don't use EERD to read require us to bit-bang the SPI + * directly. In this case, we need to acquire the EEPROM so that + * FW or other port software does not interrupt. + */ + /* Prepare the EEPROM for bit-bang reading */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + /* Set up the SPI or Microwire EEPROM for bit-bang reading. We have + * acquired the EEPROM at this point, so any returns should release it + */ + if (eeprom->type == e1000_eeprom_spi) { + u16 word_in; + u8 read_opcode = EEPROM_READ_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) { + e1000_release_eeprom(hw); + return -E1000_ERR_EEPROM; + } + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + read_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16) (offset * 2), + eeprom->address_bits); + + /* Read the data. The address of the eeprom internally + * increments with each byte (spi) being read, saving on the + * overhead of eeprom setup and tear-down. The address counter + * will roll over if reading beyond the size of the eeprom, thus + * allowing the entire memory to be read starting from any + * offset. + */ + for (i = 0; i < words; i++) { + word_in = e1000_shift_in_ee_bits(hw, 16); + data[i] = (word_in >> 8) | (word_in << 8); + } + } else if (eeprom->type == e1000_eeprom_microwire) { + for (i = 0; i < words; i++) { + /* Send the READ command (opcode + addr) */ + e1000_shift_out_ee_bits(hw, + EEPROM_READ_OPCODE_MICROWIRE, + eeprom->opcode_bits); + e1000_shift_out_ee_bits(hw, (u16) (offset + i), + eeprom->address_bits); + + /* Read the data. For microwire, each word requires the + * overhead of eeprom setup and tear-down. + */ + data[i] = e1000_shift_in_ee_bits(hw, 16); + e1000_standby_eeprom(hw); + } + } + + /* End this read operation */ + e1000_release_eeprom(hw); + + return E1000_SUCCESS; +} + +/** + * e1000_validate_eeprom_checksum - Verifies that the EEPROM has a valid checksum + * @hw: Struct containing variables accessed by shared code + * + * Reads the first 64 16 bit words of the EEPROM and sums the values read. + * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is + * valid. + */ +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + +#ifdef CONFIG_PARISC + /* This is a signature and not a checksum on HP c8000 */ + if ((hw->subsystem_vendor_id == 0x103C) && (eeprom_data == 0x16d6)) + return E1000_SUCCESS; + +#endif + if (checksum == (u16) EEPROM_SUM) + return E1000_SUCCESS; + else { + e_dbg("EEPROM Checksum Invalid\n"); + return -E1000_ERR_EEPROM; + } +} + +/** + * e1000_update_eeprom_checksum - Calculates/writes the EEPROM checksum + * @hw: Struct containing variables accessed by shared code + * + * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA. + * Writes the difference to word offset 63 of the EEPROM. + */ +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw) +{ + u16 checksum = 0; + u16 i, eeprom_data; + + for (i = 0; i < EEPROM_CHECKSUM_REG; i++) { + if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + checksum += eeprom_data; + } + checksum = (u16) EEPROM_SUM - checksum; + if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) { + e_dbg("EEPROM Write Error\n"); + return -E1000_ERR_EEPROM; + } + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom - write words to the different EEPROM types. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word to be written to the EEPROM + * + * If e1000_update_eeprom_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + */ +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + s32 ret; + spin_lock(&e1000_eeprom_lock); + ret = e1000_do_write_eeprom(hw, offset, words, data); + spin_unlock(&e1000_eeprom_lock); + return ret; +} + +static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + s32 status = 0; + + if (hw->mac_type == e1000_ce4100) { + GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words, + data); + return E1000_SUCCESS; + } + + /* A check for invalid values: offset too large, too many words, and + * not enough words. + */ + if ((offset >= eeprom->word_size) + || (words > eeprom->word_size - offset) || (words == 0)) { + e_dbg("\"words\" parameter out of bounds\n"); + return -E1000_ERR_EEPROM; + } + + /* Prepare the EEPROM for writing */ + if (e1000_acquire_eeprom(hw) != E1000_SUCCESS) + return -E1000_ERR_EEPROM; + + if (eeprom->type == e1000_eeprom_microwire) { + status = e1000_write_eeprom_microwire(hw, offset, words, data); + } else { + status = e1000_write_eeprom_spi(hw, offset, words, data); + msleep(10); + } + + /* Done with writing */ + e1000_release_eeprom(hw); + + return status; +} + +/** + * e1000_write_eeprom_spi - Writes a 16 bit word to a given offset in an SPI EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_spi(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u16 widx = 0; + + while (widx < words) { + u8 write_opcode = EEPROM_WRITE_OPCODE_SPI; + + if (e1000_spi_eeprom_ready(hw)) + return -E1000_ERR_EEPROM; + + e1000_standby_eeprom(hw); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI, + eeprom->opcode_bits); + + e1000_standby_eeprom(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((eeprom->address_bits == 8) && (offset >= 128)) + write_opcode |= EEPROM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16) ((offset + widx) * 2), + eeprom->address_bits); + + /* Send the data */ + + /* Loop to allow for up to whole page write (32 bytes) of + * eeprom + */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_ee_bits(hw, word_out, 16); + widx++; + + /* Some larger eeprom sizes are capable of a 32-byte + * PAGE WRITE operation, while the smaller eeproms are + * capable of an 8-byte PAGE WRITE operation. Break the + * inner loop to pass new address + */ + if ((((offset + widx) * 2) % eeprom->page_size) == 0) { + e1000_standby_eeprom(hw); + break; + } + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_write_eeprom_microwire - Writes a 16 bit word to a given offset in a Microwire EEPROM. + * @hw: Struct containing variables accessed by shared code + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: pointer to array of 8 bit words to be written to the EEPROM + */ +static s32 e1000_write_eeprom_microwire(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_eeprom_info *eeprom = &hw->eeprom; + u32 eecd; + u16 words_written = 0; + u16 i = 0; + + /* Send the write enable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 11). It's less work to include + * the 11 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This puts the + * EEPROM into write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE, + (u16) (eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + + /* Prepare the EEPROM */ + e1000_standby_eeprom(hw); + + while (words_written < words) { + /* Send the Write command (3-bit opcode + addr) */ + e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE, + eeprom->opcode_bits); + + e1000_shift_out_ee_bits(hw, (u16) (offset + words_written), + eeprom->address_bits); + + /* Send the data */ + e1000_shift_out_ee_bits(hw, data[words_written], 16); + + /* Toggle the CS line. This in effect tells the EEPROM to + * execute the previous command. + */ + e1000_standby_eeprom(hw); + + /* Read DO repeatedly until it is high (equal to '1'). The + * EEPROM will signal that the command has been completed by + * raising the DO signal. If DO does not go high in 10 + * milliseconds, then error out. + */ + for (i = 0; i < 200; i++) { + eecd = er32(EECD); + if (eecd & E1000_EECD_DO) + break; + udelay(50); + } + if (i == 200) { + e_dbg("EEPROM Write did not complete\n"); + return -E1000_ERR_EEPROM; + } + + /* Recover from write */ + e1000_standby_eeprom(hw); + + words_written++; + } + + /* Send the write disable command to the EEPROM (3-bit opcode plus + * 6/8-bit dummy address beginning with 10). It's less work to include + * the 10 of the dummy address as part of the opcode than it is to shift + * it over the correct number of bits for the address. This takes the + * EEPROM out of write/erase mode. + */ + e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE, + (u16) (eeprom->opcode_bits + 2)); + + e1000_shift_out_ee_bits(hw, 0, (u16) (eeprom->address_bits - 2)); + + return E1000_SUCCESS; +} + +/** + * e1000_read_mac_addr - read the adapters MAC from eeprom + * @hw: Struct containing variables accessed by shared code + * + * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the + * second function of dual function devices + */ +s32 e1000_read_mac_addr(struct e1000_hw *hw) +{ + u16 offset; + u16 eeprom_data, i; + + for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) { + offset = i >> 1; + if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + hw->perm_mac_addr[i] = (u8) (eeprom_data & 0x00FF); + hw->perm_mac_addr[i + 1] = (u8) (eeprom_data >> 8); + } + + switch (hw->mac_type) { + default: + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1) + hw->perm_mac_addr[5] ^= 0x01; + break; + } + + for (i = 0; i < NODE_ADDRESS_SIZE; i++) + hw->mac_addr[i] = hw->perm_mac_addr[i]; + return E1000_SUCCESS; +} + +/** + * e1000_init_rx_addrs - Initializes receive address filters. + * @hw: Struct containing variables accessed by shared code + * + * Places the MAC address in receive address register 0 and clears the rest + * of the receive address registers. Clears the multicast table. Assumes + * the receiver is in reset when the routine is called. + */ +static void e1000_init_rx_addrs(struct e1000_hw *hw) +{ + u32 i; + u32 rar_num; + + /* Setup the receive address. */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + e1000_rar_set(hw, hw->mac_addr, 0); + + rar_num = E1000_RAR_ENTRIES; + + /* Zero out the other 15 receive addresses. */ + e_dbg("Clearing RAR[1-15]\n"); + for (i = 1; i < rar_num; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_hash_mc_addr - Hashes an address to determine its location in the multicast table + * @hw: Struct containing variables accessed by shared code + * @mc_addr: the multicast address to hash + */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value = 0; + + /* The portion of the address that is used for the hash table is + * determined by the mc_filter_type setting. + */ + switch (hw->mc_filter_type) { + /* [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + */ + case 0: + /* [47:36] i.e. 0x563 for above example address */ + hash_value = ((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4)); + break; + case 1: + /* [46:35] i.e. 0xAC6 for above example address */ + hash_value = ((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5)); + break; + case 2: + /* [45:34] i.e. 0x5D8 for above example address */ + hash_value = ((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6)); + break; + case 3: + /* [43:32] i.e. 0x634 for above example address */ + hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8)); + break; + } + + hash_value &= 0xFFF; + return hash_value; +} + +/** + * e1000_rar_set - Puts an ethernet address into a receive address register. + * @hw: Struct containing variables accessed by shared code + * @addr: Address to put into receive address register + * @index: Receive address register to write + */ +void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | + ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); + rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); + + /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx + * unit hang. + * + * Description: + * If there are any Rx frames queued up or otherwise present in the HW + * before RSS is enabled, and then we enable RSS, the HW Rx unit will + * hang. To work around this issue, we have to disable receives and + * flush out all Rx frames before we enable RSS. To do so, we modify we + * redirect all Rx traffic to manageability and then reset the HW. + * This flushes away Rx frames, and (since the redirections to + * manageability persists across resets) keeps new ones from coming in + * while we work. Then, we clear the Address Valid AV bit for all MAC + * addresses and undo the re-direction to manageability. + * Now, frames are coming in again, but the MAC won't accept them, so + * far so good. We now proceed to initialize RSS (if necessary) and + * configure the Rx unit. Last, we re-enable the AV bits and continue + * on our merry way. + */ + switch (hw->mac_type) { + default: + /* Indicate to hardware the Address is Valid. */ + rar_high |= E1000_RAH_AV; + break; + } + + E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high); + E1000_WRITE_FLUSH(); +} + +/** + * e1000_write_vfta - Writes a value to the specified offset in the VLAN filter table. + * @hw: Struct containing variables accessed by shared code + * @offset: Offset in VLAN filer table to write + * @value: Value to write into VLAN filter table + */ +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value) +{ + u32 temp; + + if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) { + temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1)); + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp); + E1000_WRITE_FLUSH(); + } else { + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value); + E1000_WRITE_FLUSH(); + } +} + +/** + * e1000_clear_vfta - Clears the VLAN filer table + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_vfta(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of the + * manageability unit + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value); + E1000_WRITE_FLUSH(); + } +} + +static s32 e1000_id_led_init(struct e1000_hw *hw) +{ + u32 ledctl; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 eeprom_data, i, temp; + const u16 led_mask = 0x0F; + + if (hw->mac_type < e1000_82540) { + /* Nothing to do */ + return E1000_SUCCESS; + } + + ledctl = er32(LEDCTL); + hw->ledctl_default = ledctl; + hw->ledctl_mode1 = hw->ledctl_default; + hw->ledctl_mode2 = hw->ledctl_default; + + if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) { + e_dbg("EEPROM Read Error\n"); + return -E1000_ERR_EEPROM; + } + + if ((eeprom_data == ID_LED_RESERVED_0000) || + (eeprom_data == ID_LED_RESERVED_FFFF)) { + eeprom_data = ID_LED_DEFAULT; + } + + for (i = 0; i < 4; i++) { + temp = (eeprom_data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + hw->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_setup_led + * @hw: Struct containing variables accessed by shared code + * + * Prepares SW controlable LED for use and saves the current state of the LED. + */ +s32 e1000_setup_led(struct e1000_hw *hw) +{ + u32 ledctl; + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No setup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn off PHY Smart Power Down (if enabled) */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, + &hw->phy_spd_default); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + (u16) (hw->phy_spd_default & + ~IGP01E1000_GMII_SPD)); + if (ret_val) + return ret_val; + /* Fall Through */ + default: + if (hw->media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + /* Save current LEDCTL settings */ + hw->ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | + E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->media_type == e1000_media_type_copper) + ew32(LEDCTL, hw->ledctl_mode1); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_cleanup_led - Restores the saved state of the SW controlable LED. + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_cleanup_led(struct e1000_hw *hw) +{ + s32 ret_val = E1000_SUCCESS; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + /* No cleanup necessary */ + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + /* Turn on PHY Smart Power Down (if previously enabled) */ + ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + hw->phy_spd_default); + if (ret_val) + return ret_val; + /* Fall Through */ + default: + /* Restore LEDCTL settings */ + ew32(LEDCTL, hw->ledctl_default); + break; + } + + return E1000_SUCCESS; +} + +/** + * e1000_led_on - Turns on the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_on(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn on the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn on the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode2); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_led_off - Turns off the software controllable LED + * @hw: Struct containing variables accessed by shared code + */ +s32 e1000_led_off(struct e1000_hw *hw) +{ + u32 ctrl = er32(CTRL); + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + break; + case e1000_82544: + if (hw->media_type == e1000_media_type_fiber) { + /* Clear SW Defineable Pin 0 to turn off the LED */ + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } + break; + default: + if (hw->media_type == e1000_media_type_fiber) { + /* Set SW Defineable Pin 0 to turn off the LED */ + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + } else if (hw->media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->ledctl_mode1); + return E1000_SUCCESS; + } + break; + } + + ew32(CTRL, ctrl); + + return E1000_SUCCESS; +} + +/** + * e1000_clear_hw_cntrs - Clears all hardware statistics counters. + * @hw: Struct containing variables accessed by shared code + */ +static void e1000_clear_hw_cntrs(struct e1000_hw *hw) +{ + volatile u32 temp; + + temp = er32(CRCERRS); + temp = er32(SYMERRS); + temp = er32(MPC); + temp = er32(SCC); + temp = er32(ECOL); + temp = er32(MCC); + temp = er32(LATECOL); + temp = er32(COLC); + temp = er32(DC); + temp = er32(SEC); + temp = er32(RLEC); + temp = er32(XONRXC); + temp = er32(XONTXC); + temp = er32(XOFFRXC); + temp = er32(XOFFTXC); + temp = er32(FCRUC); + + temp = er32(PRC64); + temp = er32(PRC127); + temp = er32(PRC255); + temp = er32(PRC511); + temp = er32(PRC1023); + temp = er32(PRC1522); + + temp = er32(GPRC); + temp = er32(BPRC); + temp = er32(MPRC); + temp = er32(GPTC); + temp = er32(GORCL); + temp = er32(GORCH); + temp = er32(GOTCL); + temp = er32(GOTCH); + temp = er32(RNBC); + temp = er32(RUC); + temp = er32(RFC); + temp = er32(ROC); + temp = er32(RJC); + temp = er32(TORL); + temp = er32(TORH); + temp = er32(TOTL); + temp = er32(TOTH); + temp = er32(TPR); + temp = er32(TPT); + + temp = er32(PTC64); + temp = er32(PTC127); + temp = er32(PTC255); + temp = er32(PTC511); + temp = er32(PTC1023); + temp = er32(PTC1522); + + temp = er32(MPTC); + temp = er32(BPTC); + + if (hw->mac_type < e1000_82543) + return; + + temp = er32(ALGNERRC); + temp = er32(RXERRC); + temp = er32(TNCRS); + temp = er32(CEXTERR); + temp = er32(TSCTC); + temp = er32(TSCTFC); + + if (hw->mac_type <= e1000_82544) + return; + + temp = er32(MGTPRC); + temp = er32(MGTPDC); + temp = er32(MGTPTC); +} + +/** + * e1000_reset_adaptive - Resets Adaptive IFS to its default state. + * @hw: Struct containing variables accessed by shared code + * + * Call this after e1000_init_hw. You may override the IFS defaults by setting + * hw->ifs_params_forced to true. However, you must initialize hw-> + * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio + * before calling this function. + */ +void e1000_reset_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if (!hw->ifs_params_forced) { + hw->current_ifs_val = 0; + hw->ifs_min_val = IFS_MIN; + hw->ifs_max_val = IFS_MAX; + hw->ifs_step_size = IFS_STEP; + hw->ifs_ratio = IFS_RATIO; + } + hw->in_ifs_mode = false; + ew32(AIT, 0); + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_update_adaptive - update adaptive IFS + * @hw: Struct containing variables accessed by shared code + * @tx_packets: Number of transmits since last callback + * @total_collisions: Number of collisions since last callback + * + * Called during the callback/watchdog routine to update IFS value based on + * the ratio of transmits to collisions. + */ +void e1000_update_adaptive(struct e1000_hw *hw) +{ + if (hw->adaptive_ifs) { + if ((hw->collision_delta *hw->ifs_ratio) > hw->tx_packet_delta) { + if (hw->tx_packet_delta > MIN_NUM_XMITS) { + hw->in_ifs_mode = true; + if (hw->current_ifs_val < hw->ifs_max_val) { + if (hw->current_ifs_val == 0) + hw->current_ifs_val = + hw->ifs_min_val; + else + hw->current_ifs_val += + hw->ifs_step_size; + ew32(AIT, hw->current_ifs_val); + } + } + } else { + if (hw->in_ifs_mode + && (hw->tx_packet_delta <= MIN_NUM_XMITS)) { + hw->current_ifs_val = 0; + hw->in_ifs_mode = false; + ew32(AIT, 0); + } + } + } else { + e_dbg("Not in Adaptive IFS mode!\n"); + } +} + +/** + * e1000_get_bus_info + * @hw: Struct containing variables accessed by shared code + * + * Gets the current PCI bus type, speed, and width of the hardware + */ +void e1000_get_bus_info(struct e1000_hw *hw) +{ + u32 status; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + hw->bus_type = e1000_bus_type_pci; + hw->bus_speed = e1000_bus_speed_unknown; + hw->bus_width = e1000_bus_width_unknown; + break; + default: + status = er32(STATUS); + hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ? + e1000_bus_type_pcix : e1000_bus_type_pci; + + if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) { + hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ? + e1000_bus_speed_66 : e1000_bus_speed_120; + } else if (hw->bus_type == e1000_bus_type_pci) { + hw->bus_speed = (status & E1000_STATUS_PCI66) ? + e1000_bus_speed_66 : e1000_bus_speed_33; + } else { + switch (status & E1000_STATUS_PCIX_SPEED) { + case E1000_STATUS_PCIX_SPEED_66: + hw->bus_speed = e1000_bus_speed_66; + break; + case E1000_STATUS_PCIX_SPEED_100: + hw->bus_speed = e1000_bus_speed_100; + break; + case E1000_STATUS_PCIX_SPEED_133: + hw->bus_speed = e1000_bus_speed_133; + break; + default: + hw->bus_speed = e1000_bus_speed_reserved; + break; + } + } + hw->bus_width = (status & E1000_STATUS_BUS64) ? + e1000_bus_width_64 : e1000_bus_width_32; + break; + } +} + +/** + * e1000_write_reg_io + * @hw: Struct containing variables accessed by shared code + * @offset: offset to write to + * @value: value to write + * + * Writes a value to one of the devices registers using port I/O (as opposed to + * memory mapped I/O). Only 82544 and newer devices support port I/O. + */ +static void e1000_write_reg_io(struct e1000_hw *hw, u32 offset, u32 value) +{ + unsigned long io_addr = hw->io_base; + unsigned long io_data = hw->io_base + 4; + + e1000_io_write(hw, io_addr, offset); + e1000_io_write(hw, io_data, value); +} + +/** + * e1000_get_cable_length - Estimates the cable length. + * @hw: Struct containing variables accessed by shared code + * @min_length: The estimated minimum length + * @max_length: The estimated maximum length + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * This function always returns a ranged length (minimum & maximum). + * So for M88 phy's, this function interprets the one value returned from the + * register to the minimum and maximum range. + * For IGP phy's, the function calculates the range by the AGC registers. + */ +static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length, + u16 *max_length) +{ + s32 ret_val; + u16 agc_value = 0; + u16 i, phy_data; + u16 cable_length; + + *min_length = *max_length = 0; + + /* Use old method for Phy older than IGP */ + if (hw->phy_type == e1000_phy_m88) { + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT; + + /* Convert the enum value to ranged values */ + switch (cable_length) { + case e1000_cable_length_50: + *min_length = 0; + *max_length = e1000_igp_cable_length_50; + break; + case e1000_cable_length_50_80: + *min_length = e1000_igp_cable_length_50; + *max_length = e1000_igp_cable_length_80; + break; + case e1000_cable_length_80_110: + *min_length = e1000_igp_cable_length_80; + *max_length = e1000_igp_cable_length_110; + break; + case e1000_cable_length_110_140: + *min_length = e1000_igp_cable_length_110; + *max_length = e1000_igp_cable_length_140; + break; + case e1000_cable_length_140: + *min_length = e1000_igp_cable_length_140; + *max_length = e1000_igp_cable_length_170; + break; + default: + return -E1000_ERR_PHY; + } + } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ + u16 cur_agc_value; + u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; + static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_A, + IGP01E1000_PHY_AGC_B, + IGP01E1000_PHY_AGC_C, + IGP01E1000_PHY_AGC_D + }; + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + + ret_val = + e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT; + + /* Value bound check. */ + if ((cur_agc_value >= + IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) + || (cur_agc_value == 0)) + return -E1000_ERR_PHY; + + agc_value += cur_agc_value; + + /* Update minimal AGC value. */ + if (min_agc_value > cur_agc_value) + min_agc_value = cur_agc_value; + } + + /* Remove the minimal AGC result for length < 50m */ + if (agc_value < + IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) { + agc_value -= min_agc_value; + + /* Get the average length of the remaining 3 channels */ + agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1); + } else { + /* Get the average length of all the 4 channels. */ + agc_value /= IGP01E1000_PHY_CHANNEL_NUM; + } + + /* Set the range of the calculated length. */ + *min_length = ((e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) > 0) ? + (e1000_igp_cable_length_table[agc_value] - + IGP01E1000_AGC_RANGE) : 0; + *max_length = e1000_igp_cable_length_table[agc_value] + + IGP01E1000_AGC_RANGE; + } + + return E1000_SUCCESS; +} + +/** + * e1000_check_polarity - Check the cable polarity + * @hw: Struct containing variables accessed by shared code + * @polarity: output parameter : 0 - Polarity is not reversed + * 1 - Polarity is reversed. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function simply reads the polarity bit in the + * Phy Status register. For IGP phy's, this bit is valid only if link speed is + * 10 Mbps. If the link speed is 100 Mbps there is no polarity so this bit will + * return 0. If the link speed is 1000 Mbps the polarity status is in the + * IGP01E1000_PHY_PCS_INIT_REG. + */ +static s32 e1000_check_polarity(struct e1000_hw *hw, + e1000_rev_polarity *polarity) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_m88) { + /* return the Polarity bit in the Status register. */ + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + *polarity = ((phy_data & M88E1000_PSSR_REV_POLARITY) >> + M88E1000_PSSR_REV_POLARITY_SHIFT) ? + e1000_rev_polarity_reversed : e1000_rev_polarity_normal; + + } else if (hw->phy_type == e1000_phy_igp) { + /* Read the Status register to check the speed */ + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + /* If speed is 1000 Mbps, must read the + * IGP01E1000_PHY_PCS_INIT_REG to find the polarity status + */ + if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + + /* Read the GIG initialization PCS register (0x00B4) */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG, + &phy_data); + if (ret_val) + return ret_val; + + /* Check the polarity bits */ + *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } else { + /* For 10 Mbps, read the polarity bit in the status + * register. (for 100 Mbps this bit is always 0) + */ + *polarity = + (phy_data & IGP01E1000_PSSR_POLARITY_REVERSED) ? + e1000_rev_polarity_reversed : + e1000_rev_polarity_normal; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_check_downshift - Check if Downshift occurred + * @hw: Struct containing variables accessed by shared code + * @downshift: output parameter : 0 - No Downshift occurred. + * 1 - Downshift occurred. + * + * returns: - E1000_ERR_XXX + * E1000_SUCCESS + * + * For phy's older than IGP, this function reads the Downshift bit in the Phy + * Specific Status register. For IGP phy's, it reads the Downgrade bit in the + * Link Health register. In IGP this bit is latched high, so the driver must + * read it immediately after link is established. + */ +static s32 e1000_check_downshift(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type == e1000_phy_igp) { + ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = + (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0; + } else if (hw->phy_type == e1000_phy_m88) { + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >> + M88E1000_PSSR_DOWNSHIFT_SHIFT; + } + + return E1000_SUCCESS; +} + +static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = { + IGP01E1000_PHY_AGC_PARAM_A, + IGP01E1000_PHY_AGC_PARAM_B, + IGP01E1000_PHY_AGC_PARAM_C, + IGP01E1000_PHY_AGC_PARAM_D +}; + +static s32 e1000_1000Mb_check_cable_length(struct e1000_hw *hw) +{ + u16 min_length, max_length; + u16 phy_data, i; + s32 ret_val; + + ret_val = e1000_get_cable_length(hw, &min_length, &max_length); + if (ret_val) + return ret_val; + + if (hw->dsp_config_state != e1000_dsp_config_enabled) + return 0; + + if (min_length >= e1000_igp_cable_length_50) { + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + + ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + hw->dsp_config_state = e1000_dsp_config_activated; + } else { + u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20; + u32 idle_errs = 0; + + /* clear previous idle error counts */ + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data); + if (ret_val) + return ret_val; + + for (i = 0; i < ffe_idle_err_timeout; i++) { + udelay(1000); + ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, + &phy_data); + if (ret_val) + return ret_val; + + idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT); + if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) { + hw->ffe_config_state = e1000_ffe_config_active; + + ret_val = e1000_write_phy_reg(hw, + IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_CM_CP); + if (ret_val) + return ret_val; + break; + } + + if (idle_errs) + ffe_idle_err_timeout = + FFE_IDLE_ERR_COUNT_TIMEOUT_100; + } + } + + return 0; +} + +/** + * e1000_config_dsp_after_link_change + * @hw: Struct containing variables accessed by shared code + * @link_up: was link up at the time this was called + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + * + * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a + * gigabit link is achieved to improve link quality. + */ + +static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up) +{ + s32 ret_val; + u16 phy_data, phy_saved_data, speed, duplex, i; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + if (link_up) { + ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (speed == SPEED_1000) { + ret_val = e1000_1000Mb_check_cable_length(hw); + if (ret_val) + return ret_val; + } + } else { + if (hw->dsp_config_state == e1000_dsp_config_activated) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { + ret_val = + e1000_read_phy_reg(hw, dsp_reg_array[i], + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX; + phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS; + + ret_val = + e1000_write_phy_reg(hw, dsp_reg_array[i], + phy_data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->dsp_config_state = e1000_dsp_config_enabled; + } + + if (hw->ffe_config_state == e1000_ffe_config_active) { + /* Save off the current value of register 0x2F5B to be + * restored at the end of the routines. + */ + ret_val = + e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data); + + if (ret_val) + return ret_val; + + /* Disable the PHY transmitter */ + ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003); + + if (ret_val) + return ret_val; + + msleep(20); + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_FORCE_GIGA); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE, + IGP01E1000_PHY_DSP_FFE_DEFAULT); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, 0x0000, + IGP01E1000_IEEE_RESTART_AUTONEG); + if (ret_val) + return ret_val; + + msleep(20); + + /* Now enable the transmitter */ + ret_val = + e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data); + + if (ret_val) + return ret_val; + + hw->ffe_config_state = e1000_ffe_config_enabled; + } + } + return E1000_SUCCESS; +} + +/** + * e1000_set_phy_mode - Set PHY to class A mode + * @hw: Struct containing variables accessed by shared code + * + * Assumes the following operations will follow to enable the new class mode. + * 1. Do a PHY soft reset + * 2. Restart auto-negotiation or force link. + */ +static s32 e1000_set_phy_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 eeprom_data; + + if ((hw->mac_type == e1000_82545_rev_3) && + (hw->media_type == e1000_media_type_copper)) { + ret_val = + e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, + &eeprom_data); + if (ret_val) { + return ret_val; + } + + if ((eeprom_data != EEPROM_RESERVED_WORD) && + (eeprom_data & EEPROM_PHY_CLASS_A)) { + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, + 0x000B); + if (ret_val) + return ret_val; + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, + 0x8104); + if (ret_val) + return ret_val; + + hw->phy_reset_disable = false; + } + } + + return E1000_SUCCESS; +} + +/** + * e1000_set_d3_lplu_state - set d3 link power state + * @hw: Struct containing variables accessed by shared code + * @active: true to enable lplu false to disable lplu. + * + * This function sets the lplu state according to the active flag. When + * activating lplu this function also disables smart speed and vise versa. + * lplu will not be activated unless the device autonegotiation advertisement + * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes. + * + * returns: - E1000_ERR_PHY if fail to read/write the PHY + * E1000_SUCCESS at any other case. + */ +static s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 phy_data; + + if (hw->phy_type != e1000_phy_igp) + return E1000_SUCCESS; + + /* During driver activity LPLU should not be used or it will attain link + * from the lowest speeds starting from 10Mbps. The capability is used + * for Dx transitions and states + */ + if (hw->mac_type == e1000_82541_rev_2 + || hw->mac_type == e1000_82547_rev_2) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data); + if (ret_val) + return ret_val; + } + + if (!active) { + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data &= ~IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (hw->smart_speed == e1000_smart_speed_on) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } else if (hw->smart_speed == e1000_smart_speed_off) { + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + } + } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) + || (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL) + || (hw->autoneg_advertised == + AUTONEG_ADVERTISE_10_100_ALL)) { + + if (hw->mac_type == e1000_82541_rev_2 || + hw->mac_type == e1000_82547_rev_2) { + phy_data |= IGP01E1000_GMII_FLEX_SPD; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, + phy_data); + if (ret_val) + return ret_val; + } + + /* When LPLU is enabled we should disable SmartSpeed */ + ret_val = + e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = + e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, + phy_data); + if (ret_val) + return ret_val; + + } + return E1000_SUCCESS; +} + +/** + * e1000_set_vco_speed + * @hw: Struct containing variables accessed by shared code + * + * Change VCO speed register to improve Bit Error Rate performance of SERDES. + */ +static s32 e1000_set_vco_speed(struct e1000_hw *hw) +{ + s32 ret_val; + u16 default_page = 0; + u16 phy_data; + + switch (hw->mac_type) { + case e1000_82545_rev_3: + case e1000_82546_rev_3: + break; + default: + return E1000_SUCCESS; + } + + /* Set PHY register 30, page 5, bit 8 to 0 */ + + ret_val = + e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PHY_VCO_REG_BIT8; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + /* Set PHY register 30, page 4, bit 11 to 1 */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PHY_VCO_REG_BIT11; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page); + if (ret_val) + return ret_val; + + return E1000_SUCCESS; +} + + +/** + * e1000_enable_mng_pass_thru - check for bmc pass through + * @hw: Struct containing variables accessed by shared code + * + * Verifies the hardware needs to allow ARPs to be processed by the host + * returns: - true/false + */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + + if (hw->asf_firmware_present) { + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN) || + !(manc & E1000_MANC_EN_MAC_ADDR_FILTER)) + return false; + if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN)) + return true; + } + return false; +} + +static s32 e1000_polarity_reversal_workaround(struct e1000_hw *hw) +{ + s32 ret_val; + u16 mii_status_reg; + u16 i; + + /* Polarity reversal workaround for forced 10F/10H links. */ + + /* Disable the transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the NO link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be clear. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) + break; + msleep(100); + } + + /* Recommended delay time after link has been lost */ + msleep(1000); + + /* Now we will re-enable th transmitter on the PHY */ + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00); + if (ret_val) + return ret_val; + msleep(50); + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000); + if (ret_val) + return ret_val; + + ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000); + if (ret_val) + return ret_val; + + /* This loop will early-out if the link condition has been met. */ + for (i = PHY_FORCE_TIME; i > 0; i--) { + /* Read the MII Status Register and wait for Link Status bit + * to be set. + */ + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg); + if (ret_val) + return ret_val; + + if (mii_status_reg & MII_SR_LINK_STATUS) + break; + msleep(100); + } + return E1000_SUCCESS; +} + +/** + * e1000_get_auto_rd_done + * @hw: Struct containing variables accessed by shared code + * + * Check for EEPROM Auto Read bit done. + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_auto_rd_done(struct e1000_hw *hw) +{ + msleep(5); + return E1000_SUCCESS; +} + +/** + * e1000_get_phy_cfg_done + * @hw: Struct containing variables accessed by shared code + * + * Checks if the PHY configuration is done + * returns: - E1000_ERR_RESET if fail to reset MAC + * E1000_SUCCESS at any other case. + */ +static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw) +{ + msleep(10); + return E1000_SUCCESS; +} diff --git a/addons/e1000/src/4.4.180/e1000_hw.h b/addons/e1000/src/4.4.180/e1000_hw.h new file mode 100644 index 00000000..5cf7268c --- /dev/null +++ b/addons/e1000/src/4.4.180/e1000_hw.h @@ -0,0 +1,3110 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* e1000_hw.h + * Structures, enums, and macros for the MAC + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "e1000_osdep.h" + + +/* Forward declarations of structures used by the shared code */ +struct e1000_hw; +struct e1000_hw_stats; + +/* Enumerated types specific to the e1000 hardware */ +/* Media Access Controllers */ +typedef enum { + e1000_undefined = 0, + e1000_82542_rev2_0, + e1000_82542_rev2_1, + e1000_82543, + e1000_82544, + e1000_82540, + e1000_82545, + e1000_82545_rev_3, + e1000_82546, + e1000_ce4100, + e1000_82546_rev_3, + e1000_82541, + e1000_82541_rev_2, + e1000_82547, + e1000_82547_rev_2, + e1000_num_macs +} e1000_mac_type; + +typedef enum { + e1000_eeprom_uninitialized = 0, + e1000_eeprom_spi, + e1000_eeprom_microwire, + e1000_eeprom_flash, + e1000_eeprom_none, /* No NVM support */ + e1000_num_eeprom_types +} e1000_eeprom_type; + +/* Media Types */ +typedef enum { + e1000_media_type_copper = 0, + e1000_media_type_fiber = 1, + e1000_media_type_internal_serdes = 2, + e1000_num_media_types +} e1000_media_type; + +typedef enum { + e1000_10_half = 0, + e1000_10_full = 1, + e1000_100_half = 2, + e1000_100_full = 3 +} e1000_speed_duplex_type; + +/* Flow Control Settings */ +typedef enum { + E1000_FC_NONE = 0, + E1000_FC_RX_PAUSE = 1, + E1000_FC_TX_PAUSE = 2, + E1000_FC_FULL = 3, + E1000_FC_DEFAULT = 0xFF +} e1000_fc_type; + +struct e1000_shadow_ram { + u16 eeprom_word; + bool modified; +}; + +/* PCI bus types */ +typedef enum { + e1000_bus_type_unknown = 0, + e1000_bus_type_pci, + e1000_bus_type_pcix, + e1000_bus_type_reserved +} e1000_bus_type; + +/* PCI bus speeds */ +typedef enum { + e1000_bus_speed_unknown = 0, + e1000_bus_speed_33, + e1000_bus_speed_66, + e1000_bus_speed_100, + e1000_bus_speed_120, + e1000_bus_speed_133, + e1000_bus_speed_reserved +} e1000_bus_speed; + +/* PCI bus widths */ +typedef enum { + e1000_bus_width_unknown = 0, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +} e1000_bus_width; + +/* PHY status info structure and supporting enums */ +typedef enum { + e1000_cable_length_50 = 0, + e1000_cable_length_50_80, + e1000_cable_length_80_110, + e1000_cable_length_110_140, + e1000_cable_length_140, + e1000_cable_length_undefined = 0xFF +} e1000_cable_length; + +typedef enum { + e1000_gg_cable_length_60 = 0, + e1000_gg_cable_length_60_115 = 1, + e1000_gg_cable_length_115_150 = 2, + e1000_gg_cable_length_150 = 4 +} e1000_gg_cable_length; + +typedef enum { + e1000_igp_cable_length_10 = 10, + e1000_igp_cable_length_20 = 20, + e1000_igp_cable_length_30 = 30, + e1000_igp_cable_length_40 = 40, + e1000_igp_cable_length_50 = 50, + e1000_igp_cable_length_60 = 60, + e1000_igp_cable_length_70 = 70, + e1000_igp_cable_length_80 = 80, + e1000_igp_cable_length_90 = 90, + e1000_igp_cable_length_100 = 100, + e1000_igp_cable_length_110 = 110, + e1000_igp_cable_length_115 = 115, + e1000_igp_cable_length_120 = 120, + e1000_igp_cable_length_130 = 130, + e1000_igp_cable_length_140 = 140, + e1000_igp_cable_length_150 = 150, + e1000_igp_cable_length_160 = 160, + e1000_igp_cable_length_170 = 170, + e1000_igp_cable_length_180 = 180 +} e1000_igp_cable_length; + +typedef enum { + e1000_10bt_ext_dist_enable_normal = 0, + e1000_10bt_ext_dist_enable_lower, + e1000_10bt_ext_dist_enable_undefined = 0xFF +} e1000_10bt_ext_dist_enable; + +typedef enum { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +} e1000_rev_polarity; + +typedef enum { + e1000_downshift_normal = 0, + e1000_downshift_activated, + e1000_downshift_undefined = 0xFF +} e1000_downshift; + +typedef enum { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +} e1000_smart_speed; + +typedef enum { + e1000_polarity_reversal_enabled = 0, + e1000_polarity_reversal_disabled, + e1000_polarity_reversal_undefined = 0xFF +} e1000_polarity_reversal; + +typedef enum { + e1000_auto_x_mode_manual_mdi = 0, + e1000_auto_x_mode_manual_mdix, + e1000_auto_x_mode_auto1, + e1000_auto_x_mode_auto2, + e1000_auto_x_mode_undefined = 0xFF +} e1000_auto_x_mode; + +typedef enum { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +} e1000_1000t_rx_status; + +typedef enum { + e1000_phy_m88 = 0, + e1000_phy_igp, + e1000_phy_8211, + e1000_phy_8201, + e1000_phy_undefined = 0xFF +} e1000_phy_type; + +typedef enum { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +} e1000_ms_type; + +typedef enum { + e1000_ffe_config_enabled = 0, + e1000_ffe_config_active, + e1000_ffe_config_blocked +} e1000_ffe_config; + +typedef enum { + e1000_dsp_config_disabled = 0, + e1000_dsp_config_enabled, + e1000_dsp_config_activated, + e1000_dsp_config_undefined = 0xFF +} e1000_dsp_config; + +struct e1000_phy_info { + e1000_cable_length cable_length; + e1000_10bt_ext_dist_enable extended_10bt_distance; + e1000_rev_polarity cable_polarity; + e1000_downshift downshift; + e1000_polarity_reversal polarity_correction; + e1000_auto_x_mode mdix_mode; + e1000_1000t_rx_status local_rx; + e1000_1000t_rx_status remote_rx; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_eeprom_info { + e1000_eeprom_type type; + u16 word_size; + u16 opcode_bits; + u16 address_bits; + u16 delay_usec; + u16 page_size; +}; + +/* Flex ASF Information */ +#define E1000_HOST_IF_MAX_SIZE 2048 + +typedef enum { + e1000_byte_align = 0, + e1000_word_align = 1, + e1000_dword_align = 2 +} e1000_align_type; + +/* Error Codes */ +#define E1000_SUCCESS 0 +#define E1000_ERR_EEPROM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_TYPE 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 + +#define E1000_BYTE_SWAP_WORD(_value) ((((_value) & 0x00ff) << 8) | \ + (((_value) & 0xff00) >> 8)) + +/* Function prototypes */ +/* Initialization */ +s32 e1000_reset_hw(struct e1000_hw *hw); +s32 e1000_init_hw(struct e1000_hw *hw); +s32 e1000_set_mac_type(struct e1000_hw *hw); +void e1000_set_media_type(struct e1000_hw *hw); + +/* Link Configuration */ +s32 e1000_setup_link(struct e1000_hw *hw); +s32 e1000_phy_setup_autoneg(struct e1000_hw *hw); +void e1000_config_collision_dist(struct e1000_hw *hw); +s32 e1000_check_for_link(struct e1000_hw *hw); +s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 * speed, u16 * duplex); +s32 e1000_force_mac_fc(struct e1000_hw *hw); + +/* PHY */ +s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 * phy_data); +s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data); +s32 e1000_phy_hw_reset(struct e1000_hw *hw); +s32 e1000_phy_reset(struct e1000_hw *hw); +s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info); +s32 e1000_validate_mdi_setting(struct e1000_hw *hw); + +/* EEPROM Functions */ +s32 e1000_init_eeprom_params(struct e1000_hw *hw); + +/* MNG HOST IF functions */ +u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw); + +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */ + +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 /* Time in ms to process MNG command */ +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 /* Cookie offset */ +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 /* Cookie length */ +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_ICH_IAMT_MODE 0x2 +#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management Technology signature */ + +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */ +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT 0x2 /* DHCP parsing enabled */ +#define E1000_VFTA_ENTRY_SHIFT 0x5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658 */ +}; +#ifdef __BIG_ENDIAN +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u16 vlan_id; + u8 reserved0; + u8 status; + u32 reserved1; + u8 checksum; + u8 reserved3; + u16 reserved2; +}; +#else +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; +#endif + +bool e1000_check_mng_mode(struct e1000_hw *hw); +s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_update_eeprom_checksum(struct e1000_hw *hw); +s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 * data); +s32 e1000_read_mac_addr(struct e1000_hw *hw); + +/* Filters (multicast, vlan, receive) */ +u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr); +void e1000_mta_set(struct e1000_hw *hw, u32 hash_value); +void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index); +void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value); + +/* LED functions */ +s32 e1000_setup_led(struct e1000_hw *hw); +s32 e1000_cleanup_led(struct e1000_hw *hw); +s32 e1000_led_on(struct e1000_hw *hw); +s32 e1000_led_off(struct e1000_hw *hw); +s32 e1000_blink_led_start(struct e1000_hw *hw); + +/* Adaptive IFS Functions */ + +/* Everything else */ +void e1000_reset_adaptive(struct e1000_hw *hw); +void e1000_update_adaptive(struct e1000_hw *hw); +void e1000_get_bus_info(struct e1000_hw *hw); +void e1000_pci_set_mwi(struct e1000_hw *hw); +void e1000_pci_clear_mwi(struct e1000_hw *hw); +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc); +int e1000_pcix_get_mmrbc(struct e1000_hw *hw); +/* Port I/O is only supported on 82544 and newer */ +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value); + +#define E1000_READ_REG_IO(a, reg) \ + e1000_read_reg_io((a), E1000_##reg) +#define E1000_WRITE_REG_IO(a, reg, val) \ + e1000_write_reg_io((a), E1000_##reg, val) + +/* PCI Device IDs */ +#define E1000_DEV_ID_82542 0x1000 +#define E1000_DEV_ID_82543GC_FIBER 0x1001 +#define E1000_DEV_ID_82543GC_COPPER 0x1004 +#define E1000_DEV_ID_82544EI_COPPER 0x1008 +#define E1000_DEV_ID_82544EI_FIBER 0x1009 +#define E1000_DEV_ID_82544GC_COPPER 0x100C +#define E1000_DEV_ID_82544GC_LOM 0x100D +#define E1000_DEV_ID_82540EM 0x100E +#define E1000_DEV_ID_82540EM_LOM 0x1015 +#define E1000_DEV_ID_82540EP_LOM 0x1016 +#define E1000_DEV_ID_82540EP 0x1017 +#define E1000_DEV_ID_82540EP_LP 0x101E +#define E1000_DEV_ID_82545EM_COPPER 0x100F +#define E1000_DEV_ID_82545EM_FIBER 0x1011 +#define E1000_DEV_ID_82545GM_COPPER 0x1026 +#define E1000_DEV_ID_82545GM_FIBER 0x1027 +#define E1000_DEV_ID_82545GM_SERDES 0x1028 +#define E1000_DEV_ID_82546EB_COPPER 0x1010 +#define E1000_DEV_ID_82546EB_FIBER 0x1012 +#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D +#define E1000_DEV_ID_82541EI 0x1013 +#define E1000_DEV_ID_82541EI_MOBILE 0x1018 +#define E1000_DEV_ID_82541ER_LOM 0x1014 +#define E1000_DEV_ID_82541ER 0x1078 +#define E1000_DEV_ID_82547GI 0x1075 +#define E1000_DEV_ID_82541GI 0x1076 +#define E1000_DEV_ID_82541GI_MOBILE 0x1077 +#define E1000_DEV_ID_82541GI_LF 0x107C +#define E1000_DEV_ID_82546GB_COPPER 0x1079 +#define E1000_DEV_ID_82546GB_FIBER 0x107A +#define E1000_DEV_ID_82546GB_SERDES 0x107B +#define E1000_DEV_ID_82546GB_PCIE 0x108A +#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099 +#define E1000_DEV_ID_82547EI 0x1019 +#define E1000_DEV_ID_82547EI_MOBILE 0x101A +#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 +#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E + +#define NODE_ADDRESS_SIZE 6 + +/* MAC decode size is 128K - This is the size of BAR0 */ +#define MAC_DECODE_SIZE (128 * 1024) + +#define E1000_82542_2_0_REV_ID 2 +#define E1000_82542_2_1_REV_ID 3 +#define E1000_REVISION_0 0 +#define E1000_REVISION_1 1 +#define E1000_REVISION_2 2 +#define E1000_REVISION_3 3 + +#define SPEED_10 10 +#define SPEED_100 100 +#define SPEED_1000 1000 +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +/* The sizes (in bytes) of a ethernet packet */ +#define ENET_HEADER_SIZE 14 +#define MINIMUM_ETHERNET_FRAME_SIZE 64 /* With FCS */ +#define ETHERNET_FCS_SIZE 4 +#define MINIMUM_ETHERNET_PACKET_SIZE \ + (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE) +#define CRC_LENGTH ETHERNET_FCS_SIZE +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* 802.1q VLAN Packet Sizes */ +#define VLAN_TAG_SIZE 4 /* 802.3ac tag (not DMAed) */ + +/* Ethertype field values */ +#define ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.3ac packet */ +#define ETHERNET_IP_TYPE 0x0800 /* IP packets */ +#define ETHERNET_ARP_TYPE 0x0806 /* Address Resolution Protocol (ARP) */ + +/* Packet Header defines */ +#define IP_PROTOCOL_TCP 6 +#define IP_PROTOCOL_UDP 0x11 + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + */ +#define POLL_IMS_ENABLE_MASK ( \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ) + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. We + * reserve one of these spots for our directed address, allowing us room for + * E1000_RAR_ENTRIES - 1 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 + +#define MIN_NUMBER_OF_DESCRIPTORS 8 +#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8 + +/* Receive Descriptor */ +struct e1000_rx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + __le16 length; /* Length of data DMAed into data buffer */ + __le16 csum; /* Packet checksum */ + u8 status; /* Descriptor status */ + u8 errors; /* Descriptor Errors */ + __le16 special; +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +#define E1000_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +#define E1000_RXD_STAT_IPIDV 0x200 /* IP identification valid */ +#define E1000_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +#define E1000_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +#define E1000_RXD_SPC_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +#define E1000_RXD_SPC_PRI_SHIFT 13 +#define E1000_RXD_SPC_CFI_MASK 0x1000 /* CFI is bit 12 */ +#define E1000_RXD_SPC_CFI_SHIFT 12 + +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_TCPE 0x20000000 +#define E1000_RXDEXT_STATERR_IPE 0x40000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 +#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_DTYP_C 0x00000000 /* Context Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; /* */ + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; /* */ + u8 cmd; /* */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; /* */ + } fields; + } upper; +}; + +/* Filters */ +#define E1000_NUM_UNICAST 16 /* Unicast filter entries */ +#define E1000_MC_TBL_SIZE 128 /* Multicast Filter Table (4096 bits) */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address Register */ +struct e1000_rar { + volatile __le32 low; /* receive address low */ + volatile __le32 high; /* receive address high */ +}; + +/* Number of entries in the Multicast Table Array (MTA). */ +#define E1000_NUM_MTA_REGISTERS 128 + +/* IPv4 Address Table Entry */ +struct e1000_ipv4_at_entry { + volatile u32 ipv4_addr; /* IP Address (RW) */ + volatile u32 reserved; +}; + +/* Four wakeup IP addresses are supported */ +#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4 +#define E1000_IP4AT_SIZE E1000_WAKEUP_IP_ADDRESS_COUNT_MAX +#define E1000_IP6AT_SIZE 1 + +/* IPv6 Address Table Entry */ +struct e1000_ipv6_at_entry { + volatile u8 ipv6_addr[16]; +}; + +/* Flexible Filter Length Table Entry */ +struct e1000_fflt_entry { + volatile u32 length; /* Flexible Filter Length (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Mask Table Entry */ +struct e1000_ffmt_entry { + volatile u32 mask; /* Flexible Filter Mask (RW) */ + volatile u32 reserved; +}; + +/* Flexible Filter Value Table Entry */ +struct e1000_ffvt_entry { + volatile u32 value; /* Flexible Filter Value (RW) */ + volatile u32 reserved; +}; + +/* Four Flexible Filters are supported */ +#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4 + +/* Each Flexible Filter is at most 128 (0x80) bytes in length */ +#define E1000_FLEXIBLE_FILTER_SIZE_MAX 128 + +#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX +#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX +#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX + +#define E1000_DISABLE_SERDES_LOOPBACK 0x0400 + +/* Register Set. (82543, 82544) + * + * Registers are defined to be 32 bits and should be accessed as 32 bit values. + * These registers are physically located on the NIC, but are mapped into the + * host memory address space. + * + * RW - register is both readable and writable + * RO - register is read only + * WO - register is write only + * R/clr - register is read only and is cleared when read + * A - register array + */ +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_CTRL_DUP 0x00004 /* Device Control Duplicate (Shadow) - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ + +#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt) +#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0) +#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4) +#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8) +#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC) +#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20) +#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24) + +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ + +/* Auxiliary Control Register. This register is CE4100 specific, + * RMII/RGMII function is switched by this register - RW + * Following are bits definitions of the Auxiliary Control Register + */ +#define E1000_CTL_AUX 0x000E0 +#define E1000_CTL_AUX_END_SEL_SHIFT 10 +#define E1000_CTL_AUX_ENDIANESS_SHIFT 8 +#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0 + +/* descriptor and packet transfer use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use CTL_AUX.ENDIANESS, packet use default */ +#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT) +/* descriptor use default, packet use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT) +/* all use CTL_AUX.ENDIANESS */ +#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT) + +#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT) +#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT) + +/* LW little endian, Byte big endian */ +#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT) +#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT) + +#define E1000_RCTL 0x00100 /* RX Control - RW */ +#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ +#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ +#define E1000_RDBAH1 0x02904 /* RX Descriptor Base Address High (1) - RW */ +#define E1000_RDLEN1 0x02908 /* RX Descriptor Length (1) - RW */ +#define E1000_RDH1 0x02910 /* RX Descriptor Head (1) - RW */ +#define E1000_RDT1 0x02918 /* RX Descriptor Tail (1) - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* TX Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* RX Configuration Word - RO */ +#define E1000_TCTL 0x00400 /* TX Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended TX Control - RW */ +#define E1000_TIPG 0x00410 /* TX Inter-packet gap -RW */ +#define E1000_TBT 0x00448 /* TX Burst Timer - RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define FEXTNVM_SW_CONFIG 0x0001 +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_FLASH_UPDATES 1000 +#define E1000_EEARBC 0x01024 /* EEPROM Auto Read Bus Control */ +#define E1000_FLASHT 0x01028 /* FLASH Timer Register */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLSWCTL 0x01030 /* FLASH control register */ +#define E1000_FLSWDATA 0x01034 /* FLASH data register */ +#define E1000_FLSWCNT 0x01038 /* FLASH Access Counter */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* RX Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* RX Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* RX Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* RX Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* RX Data FIFO Packet Count - RW */ +#define E1000_RDBAL 0x02800 /* RX Descriptor Base Address Low - RW */ +#define E1000_RDBAH 0x02804 /* RX Descriptor Base Address High - RW */ +#define E1000_RDLEN 0x02808 /* RX Descriptor Length - RW */ +#define E1000_RDH 0x02810 /* RX Descriptor Head - RW */ +#define E1000_RDT 0x02818 /* RX Descriptor Tail - RW */ +#define E1000_RDTR 0x02820 /* RX Delay Timer - RW */ +#define E1000_RDBAL0 E1000_RDBAL /* RX Desc Base Address Low (0) - RW */ +#define E1000_RDBAH0 E1000_RDBAH /* RX Desc Base Address High (0) - RW */ +#define E1000_RDLEN0 E1000_RDLEN /* RX Desc Length (0) - RW */ +#define E1000_RDH0 E1000_RDH /* RX Desc Head (0) - RW */ +#define E1000_RDT0 E1000_RDT /* RX Desc Tail (0) - RW */ +#define E1000_RDTR0 E1000_RDTR /* RX Delay Timer (0) - RW */ +#define E1000_RXDCTL 0x02828 /* RX Descriptor Control queue 0 - RW */ +#define E1000_RXDCTL1 0x02928 /* RX Descriptor Control queue 1 - RW */ +#define E1000_RADV 0x0282C /* RX Interrupt Absolute Delay Timer - RW */ +#define E1000_RSRPD 0x02C00 /* RX Small Packet Detect - RW */ +#define E1000_RAID 0x02C08 /* Receive Ack Interrupt Delay - RW */ +#define E1000_TXDMAC 0x03000 /* TX DMA Control - RW */ +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* TX Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* TX Data FIFO Packet Count - RW */ +#define E1000_TDBAL 0x03800 /* TX Descriptor Base Address Low - RW */ +#define E1000_TDBAH 0x03804 /* TX Descriptor Base Address High - RW */ +#define E1000_TDLEN 0x03808 /* TX Descriptor Length - RW */ +#define E1000_TDH 0x03810 /* TX Descriptor Head - RW */ +#define E1000_TDT 0x03818 /* TX Descripotr Tail - RW */ +#define E1000_TIDV 0x03820 /* TX Interrupt Delay Value - RW */ +#define E1000_TXDCTL 0x03828 /* TX Descriptor Control - RW */ +#define E1000_TADV 0x0382C /* TX Interrupt Absolute Delay Val - RW */ +#define E1000_TSPMT 0x03830 /* TCP Segmentation PAD & Min Threshold - RW */ +#define E1000_TARC0 0x03840 /* TX Arbitration Count (0) */ +#define E1000_TDBAL1 0x03900 /* TX Desc Base Address Low (1) - RW */ +#define E1000_TDBAH1 0x03904 /* TX Desc Base Address High (1) - RW */ +#define E1000_TDLEN1 0x03908 /* TX Desc Length (1) - RW */ +#define E1000_TDH1 0x03910 /* TX Desc Head (1) - RW */ +#define E1000_TDT1 0x03918 /* TX Desc Tail (1) - RW */ +#define E1000_TXDCTL1 0x03928 /* TX Descriptor Control (1) - RW */ +#define E1000_TARC1 0x03940 /* TX Arbitration Count (1) */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* TX-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON RX Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON TX Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF RX Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF TX Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control RX Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets RX (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets RX (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets RX (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets RX (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets RX (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets RX (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets RX Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets RX Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets RX Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets TX Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets RX Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets RX Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets TX Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets TX Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* RX No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* RX Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* RX Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* RX Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* RX Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets RX Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets TX Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets RX Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets RX High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets TX Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets TX High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets RX - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets TX - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets TX (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets TX (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets TX (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets TX (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets TX (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets TX (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets TX Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets TX Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context TX - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context TX Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Packet Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Absolute Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Packet Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Absolute Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Minimum Threshold Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Descriptor Minimum Threshold Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_RXCSUM 0x05000 /* RX Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_IPAV 0x05838 /* IP Address Valid - RW */ +#define E1000_IP4AT 0x05840 /* IPv4 Address Table - RW Array */ +#define E1000_IP6AT 0x05880 /* IPv6 Address Table - RW Array */ +#define E1000_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define E1000_WUPM 0x05A00 /* Wakeup Packet Memory - RO A */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ +#define E1000_FFMT 0x09000 /* Flexible Filter Mask Table - RW Array */ +#define E1000_FFVT 0x09800 /* Flexible Filter Value Table - RW Array */ + +#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MDPHYA 0x0003C /* PHY address - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */ + +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GSCL_1 0x05B10 /* PCI-Ex Statistic Control #1 */ +#define E1000_GSCL_2 0x05B14 /* PCI-Ex Statistic Control #2 */ +#define E1000_GSCL_3 0x05B18 /* PCI-Ex Statistic Control #3 */ +#define E1000_GSCL_4 0x05B1C /* PCI-Ex Statistic Control #4 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_CPUVEC 0x02C10 /* CPU Vector Register - RW */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA 0x05C00 /* Redirection Table - RW Array */ +#define E1000_RSSRK 0x05C80 /* RSS Random Key - RW Array */ +#define E1000_RSSIM 0x05864 /* RSS Interrupt Mask */ +#define E1000_RSSIR 0x05868 /* RSS Interrupt Request */ +/* Register Set (82542) + * + * Some of the 82542 registers are located at different offsets than they are + * in more current versions of the 8254x. Despite the difference in location, + * the registers function in the same manner. + */ +#define E1000_82542_CTL_AUX E1000_CTL_AUX +#define E1000_82542_CTRL E1000_CTRL +#define E1000_82542_CTRL_DUP E1000_CTRL_DUP +#define E1000_82542_STATUS E1000_STATUS +#define E1000_82542_EECD E1000_EECD +#define E1000_82542_EERD E1000_EERD +#define E1000_82542_CTRL_EXT E1000_CTRL_EXT +#define E1000_82542_FLA E1000_FLA +#define E1000_82542_MDIC E1000_MDIC +#define E1000_82542_SCTL E1000_SCTL +#define E1000_82542_FEXTNVM E1000_FEXTNVM +#define E1000_82542_FCAL E1000_FCAL +#define E1000_82542_FCAH E1000_FCAH +#define E1000_82542_FCT E1000_FCT +#define E1000_82542_VET E1000_VET +#define E1000_82542_RA 0x00040 +#define E1000_82542_ICR E1000_ICR +#define E1000_82542_ITR E1000_ITR +#define E1000_82542_ICS E1000_ICS +#define E1000_82542_IMS E1000_IMS +#define E1000_82542_IMC E1000_IMC +#define E1000_82542_RCTL E1000_RCTL +#define E1000_82542_RDTR 0x00108 +#define E1000_82542_RDFH E1000_RDFH +#define E1000_82542_RDFT E1000_RDFT +#define E1000_82542_RDFHS E1000_RDFHS +#define E1000_82542_RDFTS E1000_RDFTS +#define E1000_82542_RDFPC E1000_RDFPC +#define E1000_82542_RDBAL 0x00110 +#define E1000_82542_RDBAH 0x00114 +#define E1000_82542_RDLEN 0x00118 +#define E1000_82542_RDH 0x00120 +#define E1000_82542_RDT 0x00128 +#define E1000_82542_RDTR0 E1000_82542_RDTR +#define E1000_82542_RDBAL0 E1000_82542_RDBAL +#define E1000_82542_RDBAH0 E1000_82542_RDBAH +#define E1000_82542_RDLEN0 E1000_82542_RDLEN +#define E1000_82542_RDH0 E1000_82542_RDH +#define E1000_82542_RDT0 E1000_82542_RDT +#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication + * RX Control - RW */ +#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8)) +#define E1000_82542_RDBAH3 0x02B04 /* RX Desc Base High Queue 3 - RW */ +#define E1000_82542_RDBAL3 0x02B00 /* RX Desc Low Queue 3 - RW */ +#define E1000_82542_RDLEN3 0x02B08 /* RX Desc Length Queue 3 - RW */ +#define E1000_82542_RDH3 0x02B10 /* RX Desc Head Queue 3 - RW */ +#define E1000_82542_RDT3 0x02B18 /* RX Desc Tail Queue 3 - RW */ +#define E1000_82542_RDBAL2 0x02A00 /* RX Desc Base Low Queue 2 - RW */ +#define E1000_82542_RDBAH2 0x02A04 /* RX Desc Base High Queue 2 - RW */ +#define E1000_82542_RDLEN2 0x02A08 /* RX Desc Length Queue 2 - RW */ +#define E1000_82542_RDH2 0x02A10 /* RX Desc Head Queue 2 - RW */ +#define E1000_82542_RDT2 0x02A18 /* RX Desc Tail Queue 2 - RW */ +#define E1000_82542_RDTR1 0x00130 +#define E1000_82542_RDBAL1 0x00138 +#define E1000_82542_RDBAH1 0x0013C +#define E1000_82542_RDLEN1 0x00140 +#define E1000_82542_RDH1 0x00148 +#define E1000_82542_RDT1 0x00150 +#define E1000_82542_FCRTH 0x00160 +#define E1000_82542_FCRTL 0x00168 +#define E1000_82542_FCTTV E1000_FCTTV +#define E1000_82542_TXCW E1000_TXCW +#define E1000_82542_RXCW E1000_RXCW +#define E1000_82542_MTA 0x00200 +#define E1000_82542_TCTL E1000_TCTL +#define E1000_82542_TCTL_EXT E1000_TCTL_EXT +#define E1000_82542_TIPG E1000_TIPG +#define E1000_82542_TDBAL 0x00420 +#define E1000_82542_TDBAH 0x00424 +#define E1000_82542_TDLEN 0x00428 +#define E1000_82542_TDH 0x00430 +#define E1000_82542_TDT 0x00438 +#define E1000_82542_TIDV 0x00440 +#define E1000_82542_TBT E1000_TBT +#define E1000_82542_AIT E1000_AIT +#define E1000_82542_VFTA 0x00600 +#define E1000_82542_LEDCTL E1000_LEDCTL +#define E1000_82542_PBA E1000_PBA +#define E1000_82542_PBS E1000_PBS +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_EEARBC E1000_EEARBC +#define E1000_82542_FLASHT E1000_FLASHT +#define E1000_82542_EEWR E1000_EEWR +#define E1000_82542_FLSWCTL E1000_FLSWCTL +#define E1000_82542_FLSWDATA E1000_FLSWDATA +#define E1000_82542_FLSWCNT E1000_FLSWCNT +#define E1000_82542_FLOP E1000_FLOP +#define E1000_82542_EXTCNF_CTRL E1000_EXTCNF_CTRL +#define E1000_82542_EXTCNF_SIZE E1000_EXTCNF_SIZE +#define E1000_82542_PHY_CTRL E1000_PHY_CTRL +#define E1000_82542_ERT E1000_ERT +#define E1000_82542_RXDCTL E1000_RXDCTL +#define E1000_82542_RXDCTL1 E1000_RXDCTL1 +#define E1000_82542_RADV E1000_RADV +#define E1000_82542_RSRPD E1000_RSRPD +#define E1000_82542_TXDMAC E1000_TXDMAC +#define E1000_82542_KABGTXD E1000_KABGTXD +#define E1000_82542_TDFHS E1000_TDFHS +#define E1000_82542_TDFTS E1000_TDFTS +#define E1000_82542_TDFPC E1000_TDFPC +#define E1000_82542_TXDCTL E1000_TXDCTL +#define E1000_82542_TADV E1000_TADV +#define E1000_82542_TSPMT E1000_TSPMT +#define E1000_82542_CRCERRS E1000_CRCERRS +#define E1000_82542_ALGNERRC E1000_ALGNERRC +#define E1000_82542_SYMERRS E1000_SYMERRS +#define E1000_82542_RXERRC E1000_RXERRC +#define E1000_82542_MPC E1000_MPC +#define E1000_82542_SCC E1000_SCC +#define E1000_82542_ECOL E1000_ECOL +#define E1000_82542_MCC E1000_MCC +#define E1000_82542_LATECOL E1000_LATECOL +#define E1000_82542_COLC E1000_COLC +#define E1000_82542_DC E1000_DC +#define E1000_82542_TNCRS E1000_TNCRS +#define E1000_82542_SEC E1000_SEC +#define E1000_82542_CEXTERR E1000_CEXTERR +#define E1000_82542_RLEC E1000_RLEC +#define E1000_82542_XONRXC E1000_XONRXC +#define E1000_82542_XONTXC E1000_XONTXC +#define E1000_82542_XOFFRXC E1000_XOFFRXC +#define E1000_82542_XOFFTXC E1000_XOFFTXC +#define E1000_82542_FCRUC E1000_FCRUC +#define E1000_82542_PRC64 E1000_PRC64 +#define E1000_82542_PRC127 E1000_PRC127 +#define E1000_82542_PRC255 E1000_PRC255 +#define E1000_82542_PRC511 E1000_PRC511 +#define E1000_82542_PRC1023 E1000_PRC1023 +#define E1000_82542_PRC1522 E1000_PRC1522 +#define E1000_82542_GPRC E1000_GPRC +#define E1000_82542_BPRC E1000_BPRC +#define E1000_82542_MPRC E1000_MPRC +#define E1000_82542_GPTC E1000_GPTC +#define E1000_82542_GORCL E1000_GORCL +#define E1000_82542_GORCH E1000_GORCH +#define E1000_82542_GOTCL E1000_GOTCL +#define E1000_82542_GOTCH E1000_GOTCH +#define E1000_82542_RNBC E1000_RNBC +#define E1000_82542_RUC E1000_RUC +#define E1000_82542_RFC E1000_RFC +#define E1000_82542_ROC E1000_ROC +#define E1000_82542_RJC E1000_RJC +#define E1000_82542_MGTPRC E1000_MGTPRC +#define E1000_82542_MGTPDC E1000_MGTPDC +#define E1000_82542_MGTPTC E1000_MGTPTC +#define E1000_82542_TORL E1000_TORL +#define E1000_82542_TORH E1000_TORH +#define E1000_82542_TOTL E1000_TOTL +#define E1000_82542_TOTH E1000_TOTH +#define E1000_82542_TPR E1000_TPR +#define E1000_82542_TPT E1000_TPT +#define E1000_82542_PTC64 E1000_PTC64 +#define E1000_82542_PTC127 E1000_PTC127 +#define E1000_82542_PTC255 E1000_PTC255 +#define E1000_82542_PTC511 E1000_PTC511 +#define E1000_82542_PTC1023 E1000_PTC1023 +#define E1000_82542_PTC1522 E1000_PTC1522 +#define E1000_82542_MPTC E1000_MPTC +#define E1000_82542_BPTC E1000_BPTC +#define E1000_82542_TSCTC E1000_TSCTC +#define E1000_82542_TSCTFC E1000_TSCTFC +#define E1000_82542_RXCSUM E1000_RXCSUM +#define E1000_82542_WUC E1000_WUC +#define E1000_82542_WUFC E1000_WUFC +#define E1000_82542_WUS E1000_WUS +#define E1000_82542_MANC E1000_MANC +#define E1000_82542_IPAV E1000_IPAV +#define E1000_82542_IP4AT E1000_IP4AT +#define E1000_82542_IP6AT E1000_IP6AT +#define E1000_82542_WUPL E1000_WUPL +#define E1000_82542_WUPM E1000_WUPM +#define E1000_82542_FFLT E1000_FFLT +#define E1000_82542_TDFH 0x08010 +#define E1000_82542_TDFT 0x08018 +#define E1000_82542_FFMT E1000_FFMT +#define E1000_82542_FFVT E1000_FFVT +#define E1000_82542_HOST_IF E1000_HOST_IF +#define E1000_82542_IAM E1000_IAM +#define E1000_82542_EEMNGCTL E1000_EEMNGCTL +#define E1000_82542_PSRCTL E1000_PSRCTL +#define E1000_82542_RAID E1000_RAID +#define E1000_82542_TARC0 E1000_TARC0 +#define E1000_82542_TDBAL1 E1000_TDBAL1 +#define E1000_82542_TDBAH1 E1000_TDBAH1 +#define E1000_82542_TDLEN1 E1000_TDLEN1 +#define E1000_82542_TDH1 E1000_TDH1 +#define E1000_82542_TDT1 E1000_TDT1 +#define E1000_82542_TXDCTL1 E1000_TXDCTL1 +#define E1000_82542_TARC1 E1000_TARC1 +#define E1000_82542_RFCTL E1000_RFCTL +#define E1000_82542_GCR E1000_GCR +#define E1000_82542_GSCL_1 E1000_GSCL_1 +#define E1000_82542_GSCL_2 E1000_GSCL_2 +#define E1000_82542_GSCL_3 E1000_GSCL_3 +#define E1000_82542_GSCL_4 E1000_GSCL_4 +#define E1000_82542_FACTPS E1000_FACTPS +#define E1000_82542_SWSM E1000_SWSM +#define E1000_82542_FWSM E1000_FWSM +#define E1000_82542_FFLT_DBG E1000_FFLT_DBG +#define E1000_82542_IAC E1000_IAC +#define E1000_82542_ICRXPTC E1000_ICRXPTC +#define E1000_82542_ICRXATC E1000_ICRXATC +#define E1000_82542_ICTXPTC E1000_ICTXPTC +#define E1000_82542_ICTXATC E1000_ICTXATC +#define E1000_82542_ICTXQEC E1000_ICTXQEC +#define E1000_82542_ICTXQMTC E1000_ICTXQMTC +#define E1000_82542_ICRXDMTC E1000_ICRXDMTC +#define E1000_82542_ICRXOC E1000_ICRXOC +#define E1000_82542_HICR E1000_HICR + +#define E1000_82542_CPUVEC E1000_CPUVEC +#define E1000_82542_MRQC E1000_MRQC +#define E1000_82542_RETA E1000_RETA +#define E1000_82542_RSSRK E1000_RSSRK +#define E1000_82542_RSSIM E1000_RSSIM +#define E1000_82542_RSSIR E1000_RSSIR +#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA +#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 txerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorcl; + u64 gorch; + u64 gotcl; + u64 gotch; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rlerrc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 torl; + u64 torh; + u64 totl; + u64 toth; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +/* Structure containing variables used by the shared code (e1000_hw.c) */ +struct e1000_hw { + u8 __iomem *hw_addr; + u8 __iomem *flash_address; + void __iomem *ce4100_gbe_mdio_base_virt; + e1000_mac_type mac_type; + e1000_phy_type phy_type; + u32 phy_init_script; + e1000_media_type media_type; + void *back; + struct e1000_shadow_ram *eeprom_shadow_ram; + u32 flash_bank_size; + u32 flash_base_addr; + e1000_fc_type fc; + e1000_bus_speed bus_speed; + e1000_bus_width bus_width; + e1000_bus_type bus_type; + struct e1000_eeprom_info eeprom; + e1000_ms_type master_slave; + e1000_ms_type original_master_slave; + e1000_ffe_config ffe_config_state; + u32 asf_firmware_present; + u32 eeprom_semaphore_present; + unsigned long io_base; + u32 phy_id; + u32 phy_revision; + u32 phy_addr; + u32 original_fc; + u32 txcw; + u32 autoneg_failed; + u32 max_frame_size; + u32 min_frame_size; + u32 mc_filter_type; + u32 num_mc_addrs; + u32 collision_delta; + u32 tx_packet_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + bool tx_pkt_filtering; + struct e1000_host_mng_dhcp_cookie mng_cookie; + u16 phy_spd_default; + u16 autoneg_advertised; + u16 pci_cmd_word; + u16 fc_high_water; + u16 fc_low_water; + u16 fc_pause_time; + u16 current_ifs_val; + u16 ifs_min_val; + u16 ifs_max_val; + u16 ifs_step_size; + u16 ifs_ratio; + u16 device_id; + u16 vendor_id; + u16 subsystem_id; + u16 subsystem_vendor_id; + u8 revision_id; + u8 autoneg; + u8 mdix; + u8 forced_speed_duplex; + u8 wait_autoneg_complete; + u8 dma_fairness; + u8 mac_addr[NODE_ADDRESS_SIZE]; + u8 perm_mac_addr[NODE_ADDRESS_SIZE]; + bool disable_polarity_correction; + bool speed_downgraded; + e1000_smart_speed smart_speed; + e1000_dsp_config dsp_config_state; + bool get_link_status; + bool serdes_has_link; + bool tbi_compatibility_en; + bool tbi_compatibility_on; + bool laa_is_present; + bool phy_reset_disable; + bool initialize_hw_bits_disable; + bool fc_send_xon; + bool fc_strict_ieee; + bool report_tx_early; + bool adaptive_ifs; + bool ifs_params_forced; + bool in_ifs_mode; + bool mng_reg_access_disabled; + bool leave_av_bit_off; + bool bad_tx_carr_stats_fd; + bool has_smbus; +}; + +#define E1000_EEPROM_SWDPIN0 0x0001 /* SWDPIN 0 EEPROM Value */ +#define E1000_EEPROM_LED_LOGIC 0x0020 /* Led Logic Word */ +#define E1000_EEPROM_RW_REG_DATA 16 /* Offset to data in EEPROM read/write registers */ +#define E1000_EEPROM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_EEPROM_RW_REG_START 1 /* First bit for telling part to start operation */ +#define E1000_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_EEPROM_POLL_WRITE 1 /* Flag for polling for write complete */ +#define E1000_EEPROM_POLL_READ 0 /* Flag for polling for read complete */ +/* Register Bit Masks */ +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_BEM 0x00000002 /* Endian Mode.0=little,1=big */ +#define E1000_CTRL_PRIOR 0x00000004 /* Priority on PCI. 0=rx,1=fair */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_TME 0x00000010 /* Test mode. 0=normal,1=test */ +#define E1000_CTRL_SLE 0x00000020 /* Serial Link on 0=dis,1=en */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_BEM32 0x00000400 /* Big Endian 32 mode */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_D_UD_EN 0x00002000 /* Dock/Undock enable */ +#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */ +#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */ +#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_SWDPIN2 0x00100000 /* SWDPIN 2 value */ +#define E1000_CTRL_SWDPIN3 0x00200000 /* SWDPIN 3 value */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_SWDPIO1 0x00800000 /* SWDPIN 1 input or output */ +#define E1000_CTRL_SWDPIO2 0x01000000 /* SWDPIN 2 input or output */ +#define E1000_CTRL_SWDPIO3 0x02000000 /* SWDPIN 3 input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_RTE 0x20000000 /* Routing tag enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ +#define E1000_CTRL_SW2FW_INT 0x02000000 /* Initiate an interrupt to manageability engine */ + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_0 0x00000000 /* Function 0 */ +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_TBIMODE 0x00000020 /* TBI mode */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion + by EEPROM/Flash */ +#define E1000_STATUS_ASDV 0x00000300 /* Auto speed detect value */ +#define E1000_STATUS_DOCK_CI 0x00000800 /* Change in Dock/Undock state. Clear on write '0'. */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */ +#define E1000_STATUS_MTXCKOK 0x00000400 /* MTX clock running OK */ +#define E1000_STATUS_PCI66 0x00000800 /* In 66Mhz slot */ +#define E1000_STATUS_BUS64 0x00001000 /* In 64 bit slot */ +#define E1000_STATUS_PCIX_MODE 0x00002000 /* PCI-X mode */ +#define E1000_STATUS_PCIX_SPEED 0x0000C000 /* PCI-X bus speed */ +#define E1000_STATUS_BMC_SKU_0 0x00100000 /* BMC USB redirect disabled */ +#define E1000_STATUS_BMC_SKU_1 0x00200000 /* BMC SRAM disabled */ +#define E1000_STATUS_BMC_SKU_2 0x00400000 /* BMC SDRAM disabled */ +#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */ +#define E1000_STATUS_BMC_LITE 0x01000000 /* BMC external code execution disabled */ +#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */ +#define E1000_STATUS_FUSE_8 0x04000000 +#define E1000_STATUS_FUSE_9 0x08000000 +#define E1000_STATUS_SERDES0_DIS 0x10000000 /* SERDES disabled on port 0 */ +#define E1000_STATUS_SERDES1_DIS 0x20000000 /* SERDES disabled on port 1 */ + +/* Constants used to interpret the masked PCI-X bus speed. */ +#define E1000_STATUS_PCIX_SPEED_66 0x00000000 /* PCI-X bus speed 50-66 MHz */ +#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed 66-100 MHz */ +#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */ + +/* EEPROM/Flash Control */ +#define E1000_EECD_SK 0x00000001 /* EEPROM Clock */ +#define E1000_EECD_CS 0x00000002 /* EEPROM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* EEPROM Data In */ +#define E1000_EECD_DO 0x00000008 /* EEPROM Data Out */ +#define E1000_EECD_FWE_MASK 0x00000030 +#define E1000_EECD_FWE_DIS 0x00000010 /* Disable FLASH writes */ +#define E1000_EECD_FWE_EN 0x00000020 /* Enable FLASH writes */ +#define E1000_EECD_FWE_SHIFT 4 +#define E1000_EECD_REQ 0x00000040 /* EEPROM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* EEPROM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* EEPROM Present */ +#define E1000_EECD_SIZE 0x00000200 /* EEPROM Size (0=64 word 1=256 word) */ +#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type + * (0-small, 1-large) */ +#define E1000_EECD_TYPE 0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */ +#ifndef E1000_EEPROM_GRANT_ATTEMPTS +#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ +#endif +#define E1000_EECD_AUTO_RD 0x00000200 /* EEPROM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* EEprom Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_NVADDS 0x00018000 /* NVM Address Size */ +#define E1000_EECD_SELSHAD 0x00020000 /* Select Shadow RAM */ +#define E1000_EECD_INITSRAM 0x00040000 /* Initialize Shadow RAM */ +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SHADV 0x00200000 /* Shadow RAM Data Valid */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SECVAL_SHIFT 22 +#define E1000_STM_OPCODE 0xDB00 +#define E1000_HICR_FW_RESET 0xC0 + +#define E1000_SHADOW_RAM_WORDS 2048 +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC0 + +/* EEPROM Read */ +#define E1000_EERD_START 0x00000001 /* Start Read */ +#define E1000_EERD_DONE 0x00000010 /* Read Done */ +#define E1000_EERD_ADDR_SHIFT 8 +#define E1000_EERD_ADDR_MASK 0x0000FF00 /* Read Address */ +#define E1000_EERD_DATA_SHIFT 16 +#define E1000_EERD_DATA_MASK 0xFFFF0000 /* Read Data */ + +/* SPI EEPROM Status Register */ +#define EEPROM_STATUS_RDY_SPI 0x01 +#define EEPROM_STATUS_WEN_SPI 0x02 +#define EEPROM_STATUS_BP0_SPI 0x04 +#define EEPROM_STATUS_BP1_SPI 0x08 +#define EEPROM_STATUS_WPEN_SPI 0x80 + +/* Extended Device Control */ +#define E1000_CTRL_EXT_GPI0_EN 0x00000001 /* Maps SDP4 to GPI0 */ +#define E1000_CTRL_EXT_GPI1_EN 0x00000002 /* Maps SDP5 to GPI1 */ +#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN +#define E1000_CTRL_EXT_GPI2_EN 0x00000004 /* Maps SDP6 to GPI2 */ +#define E1000_CTRL_EXT_GPI3_EN 0x00000008 /* Maps SDP7 to GPI3 */ +#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */ +#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */ +#define E1000_CTRL_EXT_PHY_INT E1000_CTRL_EXT_SDP5_DATA +#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */ +#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ +#define E1000_CTRL_EXT_SDP4_DIR 0x00000100 /* Direction of SDP4 0=in 1=out */ +#define E1000_CTRL_EXT_SDP5_DIR 0x00000200 /* Direction of SDP5 0=in 1=out */ +#define E1000_CTRL_EXT_SDP6_DIR 0x00000400 /* Direction of SDP6 0=in 1=out */ +#define E1000_CTRL_EXT_SDP7_DIR 0x00000800 /* Direction of SDP7 0=in 1=out */ +#define E1000_CTRL_EXT_ASDCHK 0x00001000 /* Initiate an ASD sequence */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_KMRN 0x00000000 +#define E1000_CTRL_EXT_LINK_MODE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 +#define E1000_CTRL_EXT_WR_WMARK_MASK 0x03000000 +#define E1000_CTRL_EXT_WR_WMARK_256 0x00000000 +#define E1000_CTRL_EXT_WR_WMARK_320 0x01000000 +#define E1000_CTRL_EXT_WR_WMARK_384 0x02000000 +#define E1000_CTRL_EXT_WR_WMARK_448 0x03000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Interrupt acknowledge Auto-mask */ +#define E1000_CTRL_EXT_INT_TIMER_CLR 0x20000000 /* Clear Interrupt timers after IMS clear */ +#define E1000_CRTL_EXT_PB_PAREN 0x01000000 /* packet buffer parity error detection enabled */ +#define E1000_CTRL_EXT_DF_PAREN 0x02000000 /* descriptor FIFO parity error detection enable */ +#define E1000_CTRL_EXT_GHOST_PAREN 0x40000000 + +/* MDI Control */ +#define E1000_MDIC_DATA_MASK 0x0000FFFF +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_MASK 0x03E00000 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_INT_EN 0x20000000 +#define E1000_MDIC_ERROR 0x40000000 + +#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000 +#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000 +#define INTEL_CE_GBE_MDIC_GO 0x80000000 +#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000 + +#define E1000_KUMCTRLSTA_MASK 0x0000FFFF +#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 +#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KUMCTRLSTA_REN 0x00200000 + +#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL 0x00000000 +#define E1000_KUMCTRLSTA_OFFSET_CTRL 0x00000001 +#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL 0x00000002 +#define E1000_KUMCTRLSTA_OFFSET_DIAG 0x00000003 +#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS 0x00000004 +#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM 0x00000009 +#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL 0x00000010 +#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES 0x0000001E +#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES 0x0000001F + +/* FIFO Control */ +#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS 0x00000008 +#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS 0x00000800 + +/* In-Band Control */ +#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT 0x00000500 +#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING 0x00000010 + +/* Half-Duplex Control */ +#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004 +#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT 0x00000000 + +#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL 0x0000001E + +#define E1000_KUMCTRLSTA_DIAG_FELPBK 0x2000 +#define E1000_KUMCTRLSTA_DIAG_NELPBK 0x1000 + +#define E1000_KUMCTRLSTA_K0S_100_EN 0x2000 +#define E1000_KUMCTRLSTA_K0S_GBE_EN 0x1000 +#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK 0x0003 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +#define E1000_PHY_CTRL_SPD_EN 0x00000001 +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 +#define E1000_PHY_CTRL_B2B_EN 0x00000080 + +/* LED Control */ +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_BLINK_RATE 0x0000020 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 +#define E1000_LEDCTL_LED1_MODE_MASK 0x00000F00 +#define E1000_LEDCTL_LED1_MODE_SHIFT 8 +#define E1000_LEDCTL_LED1_BLINK_RATE 0x0002000 +#define E1000_LEDCTL_LED1_IVRT 0x00004000 +#define E1000_LEDCTL_LED1_BLINK 0x00008000 +#define E1000_LEDCTL_LED2_MODE_MASK 0x000F0000 +#define E1000_LEDCTL_LED2_MODE_SHIFT 16 +#define E1000_LEDCTL_LED2_BLINK_RATE 0x00200000 +#define E1000_LEDCTL_LED2_IVRT 0x00400000 +#define E1000_LEDCTL_LED2_BLINK 0x00800000 +#define E1000_LEDCTL_LED3_MODE_MASK 0x0F000000 +#define E1000_LEDCTL_LED3_MODE_SHIFT 24 +#define E1000_LEDCTL_LED3_BLINK_RATE 0x20000000 +#define E1000_LEDCTL_LED3_IVRT 0x40000000 +#define E1000_LEDCTL_LED3_BLINK 0x80000000 + +#define E1000_LEDCTL_MODE_LINK_10_1000 0x0 +#define E1000_LEDCTL_MODE_LINK_100_1000 0x1 +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_ACTIVITY 0x3 +#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4 +#define E1000_LEDCTL_MODE_LINK_10 0x5 +#define E1000_LEDCTL_MODE_LINK_100 0x6 +#define E1000_LEDCTL_MODE_LINK_1000 0x7 +#define E1000_LEDCTL_MODE_PCIX_MODE 0x8 +#define E1000_LEDCTL_MODE_FULL_DUPLEX 0x9 +#define E1000_LEDCTL_MODE_COLLISION 0xA +#define E1000_LEDCTL_MODE_BUS_SPEED 0xB +#define E1000_LEDCTL_MODE_BUS_SIZE 0xC +#define E1000_LEDCTL_MODE_PAUSED 0xD +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Receive Address */ +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_TXQE 0x00000002 /* Transmit Queue empty */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ +#define E1000_ICR_RXO 0x00000040 /* rx overrun */ +#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ +#define E1000_ICR_MDAC 0x00000200 /* MDIO access complete */ +#define E1000_ICR_RXCFG 0x00000400 /* RX /c/ ordered set */ +#define E1000_ICR_GPI_EN0 0x00000800 /* GP Int 0 */ +#define E1000_ICR_GPI_EN1 0x00001000 /* GP Int 1 */ +#define E1000_ICR_GPI_EN2 0x00002000 /* GP Int 2 */ +#define E1000_ICR_GPI_EN3 0x00004000 /* GP Int 3 */ +#define E1000_ICR_TXD_LOW 0x00008000 +#define E1000_ICR_SRPD 0x00010000 +#define E1000_ICR_ACK 0x00020000 /* Receive Ack frame */ +#define E1000_ICR_MNG 0x00040000 /* Manageability event */ +#define E1000_ICR_DOCK 0x00080000 /* Dock/Undock */ +#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICR_HOST_ARB_PAR 0x00400000 /* host arb read buffer parity error */ +#define E1000_ICR_PB_PAR 0x00800000 /* packet buffer parity error */ +#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICR_ALL_PARITY 0x03F00000 /* all parity error bits */ +#define E1000_ICR_DSW 0x00000020 /* FW changed the status of DISSW bit in the FWSM */ +#define E1000_ICR_PHYINT 0x00001000 /* LAN connected device generates an interrupt */ +#define E1000_ICR_EPRST 0x00100000 /* ME hardware reset occurs */ + +/* Interrupt Cause Set */ +#define E1000_ICS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_ICS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_ICS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_ICS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_ICS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_ICS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_ICS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_ICS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_ICS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_ICS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_ICS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_ICS_SRPD E1000_ICR_SRPD +#define E1000_ICS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_ICS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_ICS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_ICS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_ICS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_ICS_DSW E1000_ICR_DSW +#define E1000_ICS_PHYINT E1000_ICR_PHYINT +#define E1000_ICS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMS_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMS_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMS_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMS_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMS_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMS_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMS_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMS_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMS_SRPD E1000_ICR_SRPD +#define E1000_IMS_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMS_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMS_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMS_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMS_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMS_DSW E1000_ICR_DSW +#define E1000_IMS_PHYINT E1000_ICR_PHYINT +#define E1000_IMS_EPRST E1000_ICR_EPRST + +/* Interrupt Mask Clear */ +#define E1000_IMC_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMC_TXQE E1000_ICR_TXQE /* Transmit Queue empty */ +#define E1000_IMC_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMC_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ +#define E1000_IMC_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ +#define E1000_IMC_RXO E1000_ICR_RXO /* rx overrun */ +#define E1000_IMC_RXT0 E1000_ICR_RXT0 /* rx timer intr */ +#define E1000_IMC_MDAC E1000_ICR_MDAC /* MDIO access complete */ +#define E1000_IMC_RXCFG E1000_ICR_RXCFG /* RX /c/ ordered set */ +#define E1000_IMC_GPI_EN0 E1000_ICR_GPI_EN0 /* GP Int 0 */ +#define E1000_IMC_GPI_EN1 E1000_ICR_GPI_EN1 /* GP Int 1 */ +#define E1000_IMC_GPI_EN2 E1000_ICR_GPI_EN2 /* GP Int 2 */ +#define E1000_IMC_GPI_EN3 E1000_ICR_GPI_EN3 /* GP Int 3 */ +#define E1000_IMC_TXD_LOW E1000_ICR_TXD_LOW +#define E1000_IMC_SRPD E1000_ICR_SRPD +#define E1000_IMC_ACK E1000_ICR_ACK /* Receive Ack frame */ +#define E1000_IMC_MNG E1000_ICR_MNG /* Manageability event */ +#define E1000_IMC_DOCK E1000_ICR_DOCK /* Dock/Undock */ +#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */ +#define E1000_IMC_HOST_ARB_PAR E1000_ICR_HOST_ARB_PAR /* host arb read buffer parity error */ +#define E1000_IMC_PB_PAR E1000_ICR_PB_PAR /* packet buffer parity error */ +#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */ +#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */ +#define E1000_IMC_DSW E1000_ICR_DSW +#define E1000_IMC_PHYINT E1000_ICR_PHYINT +#define E1000_IMC_EPRST E1000_ICR_EPRST + +/* Receive Control */ +#define E1000_RCTL_RST 0x00000001 /* Software reset */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_SLP 0x00000080 /* serial link loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_MASK 0x00000C00 /* Descriptor type mask */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_QUAT 0x00000100 /* rx desc min threshold size */ +#define E1000_RCTL_RDMTS_EIGTH 0x00000200 /* rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_0 0x00000000 /* multicast offset 11:0 */ +#define E1000_RCTL_MO_1 0x00001000 /* multicast offset 12:1 */ +#define E1000_RCTL_MO_2 0x00002000 /* multicast offset 13:2 */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_MDR 0x00004000 /* multicast desc ring 0 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* discard pause frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ +#define E1000_RCTL_FLXBUF_MASK 0x78000000 /* Flexible buffer size */ +#define E1000_RCTL_FLXBUF_SHIFT 27 /* Flexible buffer shift */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SW_W_SYNC definitions */ +#define E1000_SWFW_EEP_SM 0x0001 +#define E1000_SWFW_PHY0_SM 0x0002 +#define E1000_SWFW_PHY1_SM 0x0004 +#define E1000_SWFW_MAC_CSR_SM 0x0008 + +/* Receive Descriptor */ +#define E1000_RDT_DELAY 0x0000ffff /* Delay timer (1=1024us) */ +#define E1000_RDT_FPDB 0x80000000 /* Flush descriptor block */ +#define E1000_RDLEN_LEN 0x0007ff80 /* descriptor length */ +#define E1000_RDH_RDH 0x0000ffff /* receive descriptor head */ +#define E1000_RDT_RDT 0x0000ffff /* receive descriptor tail */ + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTH_XFCE 0x80000000 /* External Flow Control Enable */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Header split receive */ +#define E1000_RFCTL_ISCSI_DIS 0x00000001 +#define E1000_RFCTL_ISCSI_DWC_MASK 0x0000003E +#define E1000_RFCTL_ISCSI_DWC_SHIFT 1 +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_NFS_VER_MASK 0x00000300 +#define E1000_RFCTL_NFS_VER_SHIFT 8 +#define E1000_RFCTL_IPV6_DIS 0x00000400 +#define E1000_RFCTL_IPV6_XSUM_DIS 0x00000800 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_ACKD_DIS 0x00002000 +#define E1000_RFCTL_IPFRSP_DIS 0x00004000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Receive Descriptor Control */ +#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */ +#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */ +#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */ +#define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc. + still to be processed. */ +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_HD 0x00000040 /* TXCW half duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_RF 0x00003000 /* TXCW remote fault */ +#define E1000_TXCW_NP 0x00008000 /* TXCW next page */ +#define E1000_TXCW_CW 0x0000ffff /* TxConfigWord mask */ +#define E1000_TXCW_TXC 0x40000000 /* Transmit Config control */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_NC 0x04000000 /* Receive config no carrier */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_CC 0x10000000 /* Receive config change */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ +#define E1000_RXCW_ANC 0x80000000 /* Auto-neg complete */ + +/* Transmit Control */ +#define E1000_TCTL_RST 0x00000001 /* software reset */ +#define E1000_TCTL_EN 0x00000002 /* enable tx */ +#define E1000_TCTL_BCE 0x00000004 /* busy check enable */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_SWXOFF 0x00400000 /* SW Xoff transmission */ +#define E1000_TCTL_PBE 0x00800000 /* Packet Burst Enable */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_NRTU 0x02000000 /* No Re-transmit on underrun */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ +/* Extended Transmit Control */ +#define E1000_TCTL_EXT_BST_MASK 0x000003FF /* Backoff Slot Time */ +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */ + +/* Receive Checksum Control */ +#define E1000_RXCSUM_PCSS_MASK 0x000000FF /* Packet Checksum Start */ +#define E1000_RXCSUM_IPOFL 0x00000100 /* IPv4 checksum offload */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPV6OFL 0x00000400 /* IPv6 checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Multiple Receive Queue Control */ +#define E1000_MRQC_ENABLE_MASK 0x00000003 +#define E1000_MRQC_ENABLE_RSS_2Q 0x00000001 +#define E1000_MRQC_ENABLE_RSS_INT 0x00000004 +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_SPM 0x80000000 /* Enable SPM */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +#define E1000_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */ +#define E1000_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ +#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */ +#define E1000_WUS_MAG 0x00000002 /* Magic Packet Received */ +#define E1000_WUS_EX 0x00000004 /* Directed Exact Received */ +#define E1000_WUS_MC 0x00000008 /* Directed Multicast Received */ +#define E1000_WUS_BC 0x00000010 /* Broadcast Received */ +#define E1000_WUS_ARP 0x00000020 /* ARP Request Packet Received */ +#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */ +#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */ +#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */ +#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */ +#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */ +#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */ +#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */ + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_R_ON_FORCE 0x00000004 /* Reset on Force TCO - RO */ +#define E1000_MANC_RMCP_EN 0x00000100 /* Enable RCMP 026Fh Filtering */ +#define E1000_MANC_0298_EN 0x00000200 /* Enable RCMP 0298h Filtering */ +#define E1000_MANC_IPV4_EN 0x00000400 /* Enable IPv4 */ +#define E1000_MANC_IPV6_EN 0x00000800 /* Enable IPv6 */ +#define E1000_MANC_SNAP_EN 0x00001000 /* Accept LLC/SNAP */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_NEIGHBOR_EN 0x00004000 /* Enable Neighbor Discovery + * Filtering */ +#define E1000_MANC_ARP_RES_EN 0x00008000 /* Enable ARP response Filtering */ +#define E1000_MANC_TCO_RESET 0x00010000 /* TCO Reset Occurred */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */ +#define E1000_MANC_RCV_ALL 0x00080000 /* Receive All Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address + * filtering */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host + * memory */ +#define E1000_MANC_EN_IP_ADDR_FILTER 0x00400000 /* Enable IP address + * filtering */ +#define E1000_MANC_EN_XSUM_FILTER 0x00800000 /* Enable checksum filtering */ +#define E1000_MANC_BR_EN 0x01000000 /* Enable broadcast filtering */ +#define E1000_MANC_SMB_REQ 0x01000000 /* SMBus Request */ +#define E1000_MANC_SMB_GNT 0x02000000 /* SMBus Grant */ +#define E1000_MANC_SMB_CLK_IN 0x04000000 /* SMBus Clock In */ +#define E1000_MANC_SMB_DATA_IN 0x08000000 /* SMBus Data In */ +#define E1000_MANC_SMB_DATA_OUT 0x10000000 /* SMBus Data Out */ +#define E1000_MANC_SMB_CLK_OUT 0x20000000 /* SMBus Clock Out */ + +#define E1000_MANC_SMB_DATA_OUT_SHIFT 28 /* SMBus Data Out Shift */ +#define E1000_MANC_SMB_CLK_OUT_SHIFT 29 /* SMBus Clock Out Shift */ + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +/* FW Semaphore Register */ +#define E1000_FWSM_MODE_MASK 0x0000000E /* FW mode */ +#define E1000_FWSM_MODE_SHIFT 1 +#define E1000_FWSM_FW_VALID 0x00008000 /* FW established a valid mode */ + +#define E1000_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI reset */ +#define E1000_FWSM_DISSW 0x10000000 /* FW disable SW Write Access */ +#define E1000_FWSM_SKUSEL_MASK 0x60000000 /* LAN SKU select */ +#define E1000_FWSM_SKUEL_SHIFT 29 +#define E1000_FWSM_SKUSEL_EMB 0x0 /* Embedded SKU */ +#define E1000_FWSM_SKUSEL_CONS 0x1 /* Consumer SKU */ +#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */ +#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */ + +/* FFLT Debug Register */ +#define E1000_FFLT_DBG_INVC 0x00100000 /* Invalid /C/ code handling */ + +typedef enum { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_interface_only +} e1000_mng_mode; + +/* Host Interface Control Register */ +#define E1000_HICR_EN 0x00000001 /* Enable Bit - RO */ +#define E1000_HICR_C 0x00000002 /* Driver sets this bit when done + * to put command in RAM */ +#define E1000_HICR_SV 0x00000004 /* Status Validity */ +#define E1000_HICR_FWR 0x00000080 /* FW reset. Set by the Host */ + +/* Host Interface Command Interface - Address range 0x8800-0x8EFF */ +#define E1000_HI_MAX_DATA_LENGTH 252 /* Host Interface data length */ +#define E1000_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Number of bytes in range */ +#define E1000_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Number of dwords in range */ +#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */ + +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; /* I/F bits for command, status for return */ + u8 checksum; +}; +struct e1000_host_command_info { + struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */ + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */ +}; + +/* Host SMB register #0 */ +#define E1000_HSMC0R_CLKIN 0x00000001 /* SMB Clock in */ +#define E1000_HSMC0R_DATAIN 0x00000002 /* SMB Data in */ +#define E1000_HSMC0R_DATAOUT 0x00000004 /* SMB Data out */ +#define E1000_HSMC0R_CLKOUT 0x00000008 /* SMB Clock out */ + +/* Host SMB register #1 */ +#define E1000_HSMC1R_CLKIN E1000_HSMC0R_CLKIN +#define E1000_HSMC1R_DATAIN E1000_HSMC0R_DATAIN +#define E1000_HSMC1R_DATAOUT E1000_HSMC0R_DATAOUT +#define E1000_HSMC1R_CLKOUT E1000_HSMC0R_CLKOUT + +/* FW Status Register */ +#define E1000_FWSTS_FWS_MASK 0x000000FF /* FW Status */ + +/* Wake Up Packet Length */ +#define E1000_WUPL_LENGTH_MASK 0x0FFF /* Only the lower 12 bits are valid */ + +#define E1000_MDALIGN 4096 + +/* PCI-Ex registers*/ + +/* PCI-Ex Control Register */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +/* Function Active and Power State to MNG */ +#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 +#define E1000_FACTPS_LAN0_VALID 0x00000004 +#define E1000_FACTPS_FUNC0_AUX_EN 0x00000008 +#define E1000_FACTPS_FUNC1_POWER_STATE_MASK 0x000000C0 +#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT 6 +#define E1000_FACTPS_LAN1_VALID 0x00000100 +#define E1000_FACTPS_FUNC1_AUX_EN 0x00000200 +#define E1000_FACTPS_FUNC2_POWER_STATE_MASK 0x00003000 +#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT 12 +#define E1000_FACTPS_IDE_ENABLE 0x00004000 +#define E1000_FACTPS_FUNC2_AUX_EN 0x00008000 +#define E1000_FACTPS_FUNC3_POWER_STATE_MASK 0x000C0000 +#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT 18 +#define E1000_FACTPS_SP_ENABLE 0x00100000 +#define E1000_FACTPS_FUNC3_AUX_EN 0x00200000 +#define E1000_FACTPS_FUNC4_POWER_STATE_MASK 0x03000000 +#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT 24 +#define E1000_FACTPS_IPMI_ENABLE 0x04000000 +#define E1000_FACTPS_FUNC4_AUX_EN 0x08000000 +#define E1000_FACTPS_MNGCG 0x20000000 +#define E1000_FACTPS_LAN_FUNC_SEL 0x40000000 +#define E1000_FACTPS_PM_STATE_CHANGED 0x80000000 + +/* PCI-Ex Config Space */ +#define PCI_EX_LINK_STATUS 0x12 +#define PCI_EX_LINK_WIDTH_MASK 0x3F0 +#define PCI_EX_LINK_WIDTH_SHIFT 4 + +/* EEPROM Commands - Microwire */ +#define EEPROM_READ_OPCODE_MICROWIRE 0x6 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5 /* EEPROM write opcode */ +#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7 /* EEPROM erase opcode */ +#define EEPROM_EWEN_OPCODE_MICROWIRE 0x13 /* EEPROM erase/write enable */ +#define EEPROM_EWDS_OPCODE_MICROWIRE 0x10 /* EEPROM erase/write disable */ + +/* EEPROM Commands - SPI */ +#define EEPROM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +#define EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +#define EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Enable latch */ +#define EEPROM_WRDI_OPCODE_SPI 0x04 /* EEPROM reset Write Enable latch */ +#define EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status register */ +#define EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status register */ +#define EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +#define EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +#define EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + +/* EEPROM Size definitions */ +#define EEPROM_WORD_SIZE_SHIFT 6 +#define EEPROM_SIZE_SHIFT 10 +#define EEPROM_SIZE_MASK 0x1C00 + +/* EEPROM Word Offsets */ +#define EEPROM_COMPAT 0x0003 +#define EEPROM_ID_LED_SETTINGS 0x0004 +#define EEPROM_VERSION 0x0005 +#define EEPROM_SERDES_AMPLITUDE 0x0006 /* For SERDES output amplitude adjustment. */ +#define EEPROM_PHY_CLASS_WORD 0x0007 +#define EEPROM_INIT_CONTROL1_REG 0x000A +#define EEPROM_INIT_CONTROL2_REG 0x000F +#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010 +#define EEPROM_INIT_CONTROL3_PORT_B 0x0014 +#define EEPROM_INIT_3GIO_3 0x001A +#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020 +#define EEPROM_INIT_CONTROL3_PORT_A 0x0024 +#define EEPROM_CFG 0x0012 +#define EEPROM_FLASH_VERSION 0x0032 +#define EEPROM_CHECKSUM_REG 0x003F + +#define E1000_EEPROM_CFG_DONE 0x00040000 /* MNG config cycle done */ +#define E1000_EEPROM_CFG_DONE_PORT_1 0x00080000 /* ...for second port */ + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */ +#define EEPROM_SERDES_AMPLITUDE_MASK 0x000F + +/* Mask bit for PHY class in Word 7 of the EEPROM */ +#define EEPROM_PHY_CLASS_A 0x8000 + +/* Mask bits for fields in Word 0x0a of the EEPROM */ +#define EEPROM_WORD0A_ILOS 0x0010 +#define EEPROM_WORD0A_SWDPIO 0x01E0 +#define EEPROM_WORD0A_LRST 0x0200 +#define EEPROM_WORD0A_FD 0x0400 +#define EEPROM_WORD0A_66MHZ 0x0800 + +/* Mask bits for fields in Word 0x0f of the EEPROM */ +#define EEPROM_WORD0F_PAUSE_MASK 0x3000 +#define EEPROM_WORD0F_PAUSE 0x1000 +#define EEPROM_WORD0F_ASM_DIR 0x2000 +#define EEPROM_WORD0F_ANE 0x0800 +#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0 +#define EEPROM_WORD0F_LPLU 0x0001 + +/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */ +#define EEPROM_WORD1020_GIGA_DISABLE 0x0010 +#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008 + +/* Mask bits for fields in Word 0x1a of the EEPROM */ +#define EEPROM_WORD1A_ASPM_MASK 0x000C + +/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */ +#define EEPROM_SUM 0xBABA + +/* EEPROM Map defines (WORD OFFSETS)*/ +#define EEPROM_NODE_ADDRESS_BYTE_0 0 +#define EEPROM_PBA_BYTE_1 8 + +#define EEPROM_RESERVED_WORD 0xFFFF + +/* EEPROM Map Sizes (Byte Counts) */ +#define PBA_SIZE 4 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +/* Collision distance is a 0-based value that applies to + * half-duplex-capable hardware only. */ +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLLISION_DISTANCE_82542 64 +#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE +#define E1000_COLD_SHIFT 12 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82542_TIPG_IPGT 10 +#define DEFAULT_82543_TIPG_IPGT_FIBER 9 +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF +#define E1000_TIPG_IPGR1_MASK 0x000FFC00 +#define E1000_TIPG_IPGR2_MASK 0x3FF00000 + +#define DEFAULT_82542_TIPG_IPGR1 2 +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82542_TIPG_IPGR2 10 +#define DEFAULT_82543_TIPG_IPGR2 6 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define E1000_TXDMAC_DPP 0x00000001 + +/* Adaptive IFS defines */ +#define TX_THRESHOLD_START 8 +#define TX_THRESHOLD_INCREMENT 10 +#define TX_THRESHOLD_DECREMENT 1 +#define TX_THRESHOLD_STOP 190 +#define TX_THRESHOLD_DISABLE 0 +#define TX_THRESHOLD_TIMER_MS 10000 +#define MIN_NUM_XMITS 1000 +#define IFS_MAX 80 +#define IFS_STEP 10 +#define IFS_MIN 40 +#define IFS_RATIO 4 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE 0x00000002 +#define E1000_EXTCNF_CTRL_D_UD_ENABLE 0x00000004 +#define E1000_EXTCNF_CTRL_D_UD_LATENCY 0x00000008 +#define E1000_EXTCNF_CTRL_D_UD_OWNER 0x00000010 +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER 0x0FFF0000 + +#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH 0x000000FF +#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH 0x0000FF00 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH 0x00FF0000 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */ +#define E1000_PBA_12K 0x000C /* 12KB, default Rx allocation */ +#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */ +#define E1000_PBA_20K 0x0014 +#define E1000_PBA_22K 0x0016 +#define E1000_PBA_24K 0x0018 +#define E1000_PBA_30K 0x001E +#define E1000_PBA_32K 0x0020 +#define E1000_PBA_34K 0x0022 +#define E1000_PBA_38K 0x0026 +#define E1000_PBA_40K 0x0028 +#define E1000_PBA_48K 0x0030 /* 48KB, default RX allocation */ + +#define E1000_PBS_16K E1000_PBA_16K + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* The historical defaults for the flow control values are given below. */ +#define FC_DEFAULT_HI_THRESH (0x8000) /* 32KB */ +#define FC_DEFAULT_LO_THRESH (0x4000) /* 16KB */ +#define FC_DEFAULT_TX_TIMER (0x100) /* ~130 us */ + +/* PCIX Config space */ +#define PCIX_COMMAND_REGISTER 0xE6 +#define PCIX_STATUS_REGISTER_LO 0xE8 +#define PCIX_STATUS_REGISTER_HI 0xEA + +#define PCIX_COMMAND_MMRBC_MASK 0x000C +#define PCIX_COMMAND_MMRBC_SHIFT 0x2 +#define PCIX_STATUS_HI_MMRBC_MASK 0x0060 +#define PCIX_STATUS_HI_MMRBC_SHIFT 0x5 +#define PCIX_STATUS_HI_MMRBC_4K 0x3 +#define PCIX_STATUS_HI_MMRBC_2K 0x2 + +/* Number of bits required to shift right the "pause" bits from the + * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register. + */ +#define PAUSE_SHIFT 5 + +/* Number of bits required to shift left the "SWDPIO" bits from the + * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register. + */ +#define SWDPIO_SHIFT 17 + +/* Number of bits required to shift left the "SWDPIO_EXT" bits from the + * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register. + */ +#define SWDPIO__EXT_SHIFT 4 + +/* Number of bits required to shift left the "ILOS" bit from the EEPROM + * (bit 4) to the "ILOS" (bit 7) field in the CTRL register. + */ +#define ILOS_SHIFT 3 + +#define RECEIVE_BUFFER_ALIGN_SIZE (256) + +/* Number of milliseconds we wait for auto-negotiation to complete */ +#define LINK_UP_TIMEOUT 500 + +/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */ +#define AUTO_READ_DONE_TIMEOUT 10 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 + +#define E1000_TX_BUFFER_SIZE ((u32)1514) + +/* The carrier extension symbol, as received by the NIC. */ +#define CARRIER_EXTENSION 0x0F + +/* TBI_ACCEPT macro definition: + * + * This macro requires: + * adapter = a pointer to struct e1000_hw + * status = the 8 bit status field of the RX descriptor with EOP set + * error = the 8 bit error field of the RX descriptor with EOP set + * length = the sum of all the length fields of the RX descriptors that + * make up the current frame + * last_byte = the last byte of the frame DMAed by the hardware + * max_frame_length = the maximum frame length we want to accept. + * min_frame_length = the minimum frame length we want to accept. + * + * This macro is a conditional that should be used in the interrupt + * handler's Rx processing routine when RxErrors have been detected. + * + * Typical use: + * ... + * if (TBI_ACCEPT) { + * accept_frame = true; + * e1000_tbi_adjust_stats(adapter, MacAddress); + * frame_length--; + * } else { + * accept_frame = false; + * } + * ... + */ + +#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \ + ((adapter)->tbi_compatibility_on && \ + (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \ + ((last_byte) == CARRIER_EXTENSION) && \ + (((status) & E1000_RXD_STAT_VP) ? \ + (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \ + ((length) <= ((adapter)->max_frame_size + 1))) : \ + (((length) > (adapter)->min_frame_size) && \ + ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1))))) + +/* Structures, enums, and macros for the PHY */ + +/* Bit definitions for the Management Data IO (MDIO) and Management Data + * Clock (MDC) pins in the Device Control Register. + */ +#define E1000_CTRL_PHY_RESET_DIR E1000_CTRL_SWDPIO0 +#define E1000_CTRL_PHY_RESET E1000_CTRL_SWDPIN0 +#define E1000_CTRL_MDIO_DIR E1000_CTRL_SWDPIO2 +#define E1000_CTRL_MDIO E1000_CTRL_SWDPIN2 +#define E1000_CTRL_MDC_DIR E1000_CTRL_SWDPIO3 +#define E1000_CTRL_MDC E1000_CTRL_SWDPIN3 +#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR +#define E1000_CTRL_PHY_RESET4 E1000_CTRL_EXT_SDP4_DATA + +/* PHY 1000 MII Register/Bit Definitions */ +/* PHY Registers defined by IEEE */ +#define PHY_CTRL 0x00 /* Control Register */ +#define PHY_STATUS 0x01 /* Status Register */ +#define PHY_ID1 0x02 /* Phy Id Reg (word 1) */ +#define PHY_ID2 0x03 /* Phy Id Reg (word 2) */ +#define PHY_AUTONEG_ADV 0x04 /* Autoneg Advertisement */ +#define PHY_LP_ABILITY 0x05 /* Link Partner Ability (Base Page) */ +#define PHY_AUTONEG_EXP 0x06 /* Autoneg Expansion Reg */ +#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */ +#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */ +#define PHY_1000T_CTRL 0x09 /* 1000Base-T Control Reg */ +#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define PHY_EXT_STATUS 0x0F /* Extended Status Reg */ + +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF /* Registers equal on all pages */ + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_INT_ENABLE 0x12 /* Interrupt Enable Register */ +#define M88E1000_INT_STATUS 0x13 /* Interrupt Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ +#define M88E1000_RX_ERR_CNTR 0x15 /* Receive Error Counter */ + +#define M88E1000_PHY_EXT_CTRL 0x1A /* PHY extend control register */ +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ +#define M88E1000_PHY_VCO_REG_BIT8 0x100 /* Bits 8 & 11 are adjusted for */ +#define M88E1000_PHY_VCO_REG_BIT11 0x800 /* improved BER performance */ + +#define IGP01E1000_IEEE_REGS_PAGE 0x0000 +#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300 +#define IGP01E1000_IEEE_FORCE_GIGA 0x0140 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* PHY Specific Control Register */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */ +#define IGP01E1000_GMII_FIFO 0x14 /* GMII FIFO Register */ +#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* PHY Page Select Core Register */ + +/* IGP01E1000 AGC Registers - stores the cable length values*/ +#define IGP01E1000_PHY_AGC_A 0x1172 +#define IGP01E1000_PHY_AGC_B 0x1272 +#define IGP01E1000_PHY_AGC_C 0x1472 +#define IGP01E1000_PHY_AGC_D 0x1872 + +/* IGP02E1000 AGC Registers for cable length values */ +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +/* IGP01E1000 DSP Reset Register */ +#define IGP01E1000_PHY_DSP_RESET 0x1F33 +#define IGP01E1000_PHY_DSP_SET 0x1F71 +#define IGP01E1000_PHY_DSP_FFE 0x1F35 + +#define IGP01E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_CHANNEL_NUM 4 + +#define IGP01E1000_PHY_AGC_PARAM_A 0x1171 +#define IGP01E1000_PHY_AGC_PARAM_B 0x1271 +#define IGP01E1000_PHY_AGC_PARAM_C 0x1471 +#define IGP01E1000_PHY_AGC_PARAM_D 0x1871 + +#define IGP01E1000_PHY_EDAC_MU_INDEX 0xC000 +#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000 + +#define IGP01E1000_PHY_ANALOG_TX_STATE 0x2890 +#define IGP01E1000_PHY_ANALOG_CLASS_A 0x2000 +#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE 0x0004 +#define IGP01E1000_PHY_DSP_FFE_CM_CP 0x0069 + +#define IGP01E1000_PHY_DSP_FFE_DEFAULT 0x002A +/* IGP01E1000 PCS Initialization register - stores the polarity status when + * speed = 1000 Mbps. */ +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_PCS_CTRL_REG 0x00B5 + +#define IGP01E1000_ANALOG_REGS_PAGE 0x20C0 + +/* PHY Control Register */ +#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */ +#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */ +#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */ +#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */ +#define MII_CR_POWER_DOWN 0x0800 /* Power down */ +#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */ +#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */ +#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */ +#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */ + +/* PHY Status Register */ +#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */ +#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */ +#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */ +#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */ +#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */ +#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */ +#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */ +#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */ +#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */ +#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */ +#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */ +#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */ +#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */ +#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */ +#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */ + +/* Autoneg Advertisement Register */ +#define NWAY_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */ +#define NWAY_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */ +#define NWAY_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */ +#define NWAY_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */ +#define NWAY_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */ +#define NWAY_AR_100T4_CAPS 0x0200 /* 100T4 Capable */ +#define NWAY_AR_PAUSE 0x0400 /* Pause operation desired */ +#define NWAY_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */ +#define NWAY_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */ +#define NWAY_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Link Partner Ability Register (Base Page) */ +#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */ +#define NWAY_LPAR_10T_HD_CAPS 0x0020 /* LP is 10T Half Duplex Capable */ +#define NWAY_LPAR_10T_FD_CAPS 0x0040 /* LP is 10T Full Duplex Capable */ +#define NWAY_LPAR_100TX_HD_CAPS 0x0080 /* LP is 100TX Half Duplex Capable */ +#define NWAY_LPAR_100TX_FD_CAPS 0x0100 /* LP is 100TX Full Duplex Capable */ +#define NWAY_LPAR_100T4_CAPS 0x0200 /* LP is 100T4 Capable */ +#define NWAY_LPAR_PAUSE 0x0400 /* LP Pause operation desired */ +#define NWAY_LPAR_ASM_DIR 0x0800 /* LP Asymmetric Pause Direction bit */ +#define NWAY_LPAR_REMOTE_FAULT 0x2000 /* LP has detected Remote Fault */ +#define NWAY_LPAR_ACKNOWLEDGE 0x4000 /* LP has rx'd link code word */ +#define NWAY_LPAR_NEXT_PAGE 0x8000 /* Next Page ability supported */ + +/* Autoneg Expansion Register */ +#define NWAY_ER_LP_NWAY_CAPS 0x0001 /* LP has Auto Neg Capability */ +#define NWAY_ER_PAGE_RXD 0x0002 /* LP is 10T Half Duplex Capable */ +#define NWAY_ER_NEXT_PAGE_CAPS 0x0004 /* LP is 10T Full Duplex Capable */ +#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */ +#define NWAY_ER_PAR_DETECT_FAULT 0x0010 /* LP is 100TX Full Duplex Capable */ + +/* Next Page TX Register */ +#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define NPTX_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define NPTX_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define NPTX_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define NPTX_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* Link Partner Next Page Register */ +#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */ +#define LP_RNPR_TOGGLE 0x0800 /* Toggles between exchanges + * of different NP + */ +#define LP_RNPR_ACKNOWLDGE2 0x1000 /* 1 = will comply with msg + * 0 = cannot comply with msg + */ +#define LP_RNPR_MSG_PAGE 0x2000 /* formatted(1)/unformatted(0) pg */ +#define LP_RNPR_ACKNOWLDGE 0x4000 /* 1 = ACK / 0 = NO ACK */ +#define LP_RNPR_NEXT_PAGE 0x8000 /* 1 = addition NP will follow + * 0 = sending last NP + */ + +/* 1000BASE-T Control Register */ +#define CR_1000T_ASYM_PAUSE 0x0080 /* Advertise asymmetric pause bit */ +#define CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */ +#define CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */ +#define CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port */ + /* 0=DTE device */ +#define CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master */ + /* 0=Configure PHY as Slave */ +#define CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value */ + /* 0=Automatic Master/Slave config */ +#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */ +#define CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */ +#define CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */ +#define CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */ +#define CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */ + +/* 1000BASE-T Status Register */ +#define SR_1000T_IDLE_ERROR_CNT 0x00FF /* Num idle errors since last read */ +#define SR_1000T_ASYM_PAUSE_DIR 0x0100 /* LP asymmetric pause direction bit */ +#define SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */ +#define SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */ +#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */ +#define SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */ +#define SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */ +#define SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */ +#define SR_1000T_REMOTE_RX_STATUS_SHIFT 12 +#define SR_1000T_LOCAL_RX_STATUS_SHIFT 13 +#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_20 20 +#define FFE_IDLE_ERR_COUNT_TIMEOUT_100 100 + +/* Extended Status Register */ +#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */ +#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */ +#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */ +#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */ + +#define PHY_TX_POLARITY_MASK 0x0100 /* register 10h bit 8 (polarity bit) */ +#define PHY_TX_NORMAL_POLARITY 0 /* register 10h bit 8 (normal polarity) */ + +#define AUTO_POLARITY_DISABLE 0x0010 /* register 11h bit 4 */ + /* (0=enable, 1=disable) */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */ +#define M88E1000_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, + * 0=CLK125 toggling + */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, + * 100BASE-TX/10BASE-T: + * MDI Mode + */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled + * all speeds. + */ +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080 + /* 1=Enable Extended 10BASE-T distance + * (Lower 10BASE-T RX Threshold) + * 0=Normal 10BASE-T RX Threshold */ +#define M88E1000_PSCR_MII_5BIT_ENABLE 0x0100 + /* 1=5-Bit interface in 100BASE-TX + * 0=MII interface in 100BASE-TX */ +#define M88E1000_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */ +#define M88E1000_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */ +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT 1 +#define M88E1000_PSCR_AUTO_X_MODE_SHIFT 5 +#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7 + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_JABBER 0x0001 /* 1=Jabber */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M; + * 3=110-140M;4=>140M */ +#define M88E1000_PSSR_LINK 0x0400 /* 1=Link up, 0=Link down */ +#define M88E1000_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */ +#define M88E1000_PSSR_PAGE_RCVD 0x1000 /* 1=Page received */ +#define M88E1000_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */ +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_10MBS 0x0000 /* 00=10Mbs */ +#define M88E1000_PSSR_100MBS 0x4000 /* 01=100Mbs */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_REV_POLARITY_SHIFT 1 +#define M88E1000_PSSR_DOWNSHIFT_SHIFT 5 +#define M88E1000_PSSR_MDIX_SHIFT 6 +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* M88E1000 Extended PHY Specific Control Register */ +#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */ +#define M88E1000_EPSCR_DOWN_NO_IDLE 0x8000 /* 1=Lost lock detect enabled. + * Will assert lost lock and bring + * link down if idle not seen + * within 1ms in 1000BASE-T + */ +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X 0x0400 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X 0x0800 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X 0x0C00 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS 0x0000 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X 0x0200 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X 0x0300 +#define M88E1000_EPSCR_TX_CLK_2_5 0x0060 /* 2.5 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ +#define M88E1000_EPSCR_TX_CLK_0 0x0000 /* NO TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X 0x0000 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X 0x0200 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X 0x0400 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X 0x0600 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X 0x0A00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X 0x0C00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X 0x0E00 + +/* IGP01E1000 Specific Port Config Register - R/W */ +#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT 0x0010 +#define IGP01E1000_PSCFR_PRE_EN 0x0020 +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 +#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK 0x0100 +#define IGP01E1000_PSCFR_DISABLE_JABBER 0x0400 +#define IGP01E1000_PSCFR_DISABLE_TRANSMIT 0x2000 + +/* IGP01E1000 Specific Port Status Register - R/O */ +#define IGP01E1000_PSSR_AUTONEG_FAILED 0x0001 /* RO LH SC */ +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_CABLE_LENGTH 0x007C +#define IGP01E1000_PSSR_FULL_DUPLEX 0x0200 +#define IGP01E1000_PSSR_LINK_UP 0x0400 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 /* speed bits mask */ +#define IGP01E1000_PSSR_SPEED_10MBPS 0x4000 +#define IGP01E1000_PSSR_SPEED_100MBPS 0x8000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 +#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT 0x0002 /* shift right 2 */ +#define IGP01E1000_PSSR_MDIX_SHIFT 0x000B /* shift right 11 */ + +/* IGP01E1000 Specific Port Control Register - R/W */ +#define IGP01E1000_PSCR_TP_LOOPBACK 0x0010 +#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR 0x0200 +#define IGP01E1000_PSCR_TEN_CRS_SELECT 0x0400 +#define IGP01E1000_PSCR_FLIP_CHIP 0x0800 +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0-MDI, 1-MDIX */ + +/* IGP01E1000 Specific Port Link Health Register */ +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 +#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR 0x4000 +#define IGP01E1000_PLHR_MASTER_FAULT 0x2000 +#define IGP01E1000_PLHR_MASTER_RESOLUTION 0x1000 +#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK 0x0800 /* LH */ +#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW 0x0400 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_1 0x0200 /* LH */ +#define IGP01E1000_PLHR_DATA_ERR_0 0x0100 +#define IGP01E1000_PLHR_AUTONEG_FAULT 0x0040 +#define IGP01E1000_PLHR_AUTONEG_ACTIVE 0x0010 +#define IGP01E1000_PLHR_VALID_CHANNEL_D 0x0008 +#define IGP01E1000_PLHR_VALID_CHANNEL_C 0x0004 +#define IGP01E1000_PLHR_VALID_CHANNEL_B 0x0002 +#define IGP01E1000_PLHR_VALID_CHANNEL_A 0x0001 + +/* IGP01E1000 Channel Quality Register */ +#define IGP01E1000_MSE_CHANNEL_D 0x000F +#define IGP01E1000_MSE_CHANNEL_C 0x00F0 +#define IGP01E1000_MSE_CHANNEL_B 0x0F00 +#define IGP01E1000_MSE_CHANNEL_A 0xF000 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* Enable LPLU in non-D0a modes */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* Enable LPLU in D0a mode */ + +/* IGP01E1000 DSP reset macros */ +#define DSP_RESET_ENABLE 0x0 +#define DSP_RESET_DISABLE 0x2 +#define E1000_MAX_DSP_RESETS 10 + +/* IGP01E1000 & IGP02E1000 AGC Registers */ + +#define IGP01E1000_AGC_LENGTH_SHIFT 7 /* Coarse - 13:11, Fine - 10:7 */ +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Coarse - 15:13, Fine - 12:9 */ + +/* IGP02E1000 AGC Register Length 9-bit mask */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F + +/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */ +#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128 +#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113 + +/* The precision error of the cable length is +/- 10 meters */ +#define IGP01E1000_AGC_RANGE 10 +#define IGP02E1000_AGC_RANGE 15 + +/* IGP01E1000 PCS Initialization register */ +/* bits 3:6 in the PCS registers stores the channels polarity */ +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +/* IGP01E1000 GMII FIFO Register */ +#define IGP01E1000_GMII_FLEX_SPD 0x10 /* Enable flexible speed + * on Link-Up */ +#define IGP01E1000_GMII_SPD 0x20 /* Enable SPD */ + +/* IGP01E1000 Analog Register */ +#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS 0x20D1 +#define IGP01E1000_ANALOG_FUSE_STATUS 0x20D0 +#define IGP01E1000_ANALOG_FUSE_CONTROL 0x20DC +#define IGP01E1000_ANALOG_FUSE_BYPASS 0x20DE + +#define IGP01E1000_ANALOG_FUSE_POLY_MASK 0xF000 +#define IGP01E1000_ANALOG_FUSE_FINE_MASK 0x0F80 +#define IGP01E1000_ANALOG_FUSE_COARSE_MASK 0x0070 +#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED 0x0100 +#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002 + +#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH 0x0040 +#define IGP01E1000_ANALOG_FUSE_COARSE_10 0x0010 +#define IGP01E1000_ANALOG_FUSE_FINE_1 0x0080 +#define IGP01E1000_ANALOG_FUSE_FINE_10 0x0500 + +/* Bit definitions for valid PHY IDs. */ +/* I = Integrated + * E = External + */ +#define M88_VENDOR 0x0141 +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID +#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID +#define M88E1011_I_REV_4 0x04 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define M88E1118_E_PHY_ID 0x01410E40 +#define L1LXT971A_PHY_ID 0x001378E0 + +#define RTL8211B_PHY_ID 0x001CC910 +#define RTL8201N_PHY_ID 0x8200 +#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */ +#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) \ + (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) + +#define IGP3_PHY_PORT_CTRL \ + PHY_REG(769, 17) /* Port General Configuration */ +#define IGP3_PHY_RATE_ADAPT_CTRL \ + PHY_REG(769, 25) /* Rate Adapter Control Register */ + +#define IGP3_KMRN_FIFO_CTRL_STATS \ + PHY_REG(770, 16) /* KMRN FIFO's control/status register */ +#define IGP3_KMRN_POWER_MNG_CTRL \ + PHY_REG(770, 17) /* KMRN Power Management Control Register */ +#define IGP3_KMRN_INBAND_CTRL \ + PHY_REG(770, 18) /* KMRN Inband Control Register */ +#define IGP3_KMRN_DIAG \ + PHY_REG(770, 19) /* KMRN Diagnostic register */ +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */ +#define IGP3_KMRN_ACK_TIMEOUT \ + PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */ + +#define IGP3_VR_CTRL \ + PHY_REG(776, 18) /* Voltage regulator control register */ +#define IGP3_VR_CTRL_MODE_SHUT 0x0200 /* Enter powerdown, shutdown VRs */ +#define IGP3_VR_CTRL_MODE_MASK 0x0300 /* Shutdown VR Mask */ + +#define IGP3_CAPABILITY \ + PHY_REG(776, 19) /* IGP3 Capability Register */ + +/* Capabilities for SKU Control */ +#define IGP3_CAP_INITIATE_TEAM 0x0001 /* Able to initiate a team */ +#define IGP3_CAP_WFM 0x0002 /* Support WoL and PXE */ +#define IGP3_CAP_ASF 0x0004 /* Support ASF */ +#define IGP3_CAP_LPLU 0x0008 /* Support Low Power Link Up */ +#define IGP3_CAP_DC_AUTO_SPEED 0x0010 /* Support AC/DC Auto Link Speed */ +#define IGP3_CAP_SPD 0x0020 /* Support Smart Power Down */ +#define IGP3_CAP_MULT_QUEUE 0x0040 /* Support 2 tx & 2 rx queues */ +#define IGP3_CAP_RSS 0x0080 /* Support RSS */ +#define IGP3_CAP_8021PQ 0x0100 /* Support 802.1Q & 802.1p */ +#define IGP3_CAP_AMT_CB 0x0200 /* Support active manageability and circuit breaker */ + +#define IGP3_PPC_JORDAN_EN 0x0001 +#define IGP3_PPC_JORDAN_GIGA_SPEED 0x0002 + +#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS 0x0001 +#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK 0x001E +#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA 0x0020 +#define IGP3_KMRN_PMC_K0S_MODE1_EN_100 0x0040 + +#define IGP3E1000_PHY_MISC_CTRL 0x1B /* Misc. Ctrl register */ +#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET 0x1000 /* Duplex Manual Set */ + +#define IGP3_KMRN_EXT_CTRL PHY_REG(770, 18) +#define IGP3_KMRN_EC_DIS_INBAND 0x0080 + +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 /* 10/100 PHY */ +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 /* 100BaseTx Extended Status, Control and Address */ +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY special control register */ +#define IFE_PHY_RCV_FALSE_CARRIER 0x13 /* 100BaseTx Receive False Carrier Counter */ +#define IFE_PHY_RCV_DISCONNECT 0x14 /* 100BaseTx Receive Disconnect Counter */ +#define IFE_PHY_RCV_ERROT_FRAME 0x15 /* 100BaseTx Receive Error Frame Counter */ +#define IFE_PHY_RCV_SYMBOL_ERR 0x16 /* Receive Symbol Error Counter */ +#define IFE_PHY_PREM_EOF_ERR 0x17 /* 100BaseTx Receive Premature End Of Frame Error Counter */ +#define IFE_PHY_RCV_EOF_ERR 0x18 /* 10BaseT Receive End Of Frame Error Counter */ +#define IFE_PHY_TX_JABBER_DETECT 0x19 /* 10BaseT Transmit Jabber Detect Counter */ +#define IFE_PHY_EQUALIZER 0x1A /* PHY Equalizer Control and Status */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY special control and LED configuration */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control register */ +#define IFE_PHY_HWI_CONTROL 0x1D /* Hardware Integrity Control (HWI) */ + +#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE 0x2000 /* Default 1 = Disable auto reduced power down */ +#define IFE_PESC_100BTX_POWER_DOWN 0x0400 /* Indicates the power state of 100BASE-TX */ +#define IFE_PESC_10BTX_POWER_DOWN 0x0200 /* Indicates the power state of 10BASE-T */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 /* Indicates 10BASE-T polarity */ +#define IFE_PESC_PHY_ADDR_MASK 0x007C /* Bit 6:2 for sampled PHY address */ +#define IFE_PESC_SPEED 0x0002 /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */ +#define IFE_PESC_DUPLEX 0x0001 /* Auto-negotiation duplex result 1=Full, 0=Half */ +#define IFE_PESC_POLARITY_REVERSED_SHIFT 8 + +#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100 /* 1 = Dynamic Power Down disabled */ +#define IFE_PSC_FORCE_POLARITY 0x0020 /* 1=Reversed Polarity, 0=Normal */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 /* 1=Auto Polarity Disabled, 0=Enabled */ +#define IFE_PSC_JABBER_FUNC_DISABLE 0x0001 /* 1=Jabber Disabled, 0=Normal Jabber Operation */ +#define IFE_PSC_FORCE_POLARITY_SHIFT 5 +#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT 4 + +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable MDI/MDI-X feature, default 0=disabled */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDIX-X, 0=force MDI */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_AUTO_MDIX_COMPLETE 0x0010 /* Resolution algorithm is completed */ +#define IFE_PMC_MDIX_MODE_SHIFT 6 +#define IFE_PHC_MDIX_RESET_ALL_MASK 0x0000 /* Disable auto MDI-X */ + +#define IFE_PHC_HWI_ENABLE 0x8000 /* Enable the HWI feature */ +#define IFE_PHC_ABILITY_CHECK 0x4000 /* 1= Test Passed, 0=failed */ +#define IFE_PHC_TEST_EXEC 0x2000 /* PHY launch test pulses on the wire */ +#define IFE_PHC_HIGHZ 0x0200 /* 1 = Open Circuit */ +#define IFE_PHC_LOWZ 0x0400 /* 1 = Short Circuit */ +#define IFE_PHC_LOW_HIGH_Z_MASK 0x0600 /* Mask for indication type of problem on the line */ +#define IFE_PHC_DISTANCE_MASK 0x01FF /* Mask for distance to the cable problem, in 80cm granularity */ +#define IFE_PHC_RESET_ALL_MASK 0x0000 /* Disable HWI */ +#define IFE_PSCL_PROBE_MODE 0x0020 /* LED Probe mode */ +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +#define ICH_FLASH_COMMAND_TIMEOUT 5000 /* 5000 uSecs - adjusted */ +#define ICH_FLASH_ERASE_TIMEOUT 3000000 /* Up to 3 seconds - worst case */ +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 /* 10 cycles */ +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define ICH_CYCLE_READ 0x0 +#define ICH_CYCLE_RESERVED 0x1 +#define ICH_CYCLE_WRITE 0x2 +#define ICH_CYCLE_ERASE 0x3 + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_FRACC 0x0050 +#define ICH_FLASH_FREG0 0x0054 +#define ICH_FLASH_FREG1 0x0058 +#define ICH_FLASH_FREG2 0x005C +#define ICH_FLASH_FREG3 0x0060 +#define ICH_FLASH_FPR0 0x0074 +#define ICH_FLASH_FPR1 0x0078 +#define ICH_FLASH_SSFSTS 0x0090 +#define ICH_FLASH_SSFCTL 0x0092 +#define ICH_FLASH_PREOP 0x0094 +#define ICH_FLASH_OPTYPE 0x0096 +#define ICH_FLASH_OPMENU 0x0098 + +#define ICH_FLASH_REG_MAPSIZE 0x00A0 +#define ICH_FLASH_SECTOR_SIZE 4096 +#define ICH_GFPREG_BASE_MASK 0x1FFF +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF + +/* Miscellaneous PHY bit definitions. */ +#define PHY_PREAMBLE 0xFFFFFFFF +#define PHY_SOF 0x01 +#define PHY_OP_READ 0x02 +#define PHY_OP_WRITE 0x01 +#define PHY_TURNAROUND 0x02 +#define PHY_PREAMBLE_SIZE 32 +#define MII_CR_SPEED_1000 0x0040 +#define MII_CR_SPEED_100 0x2000 +#define MII_CR_SPEED_10 0x0000 +#define E1000_PHY_ADDRESS 0x01 +#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +#define PHY_FORCE_TIME 20 /* 2.0 Seconds */ +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define DEVICE_SPEED_MASK 0x00000300 /* Device Ctrl Reg Speed Mask */ +#define REG4_SPEED_MASK 0x01E0 +#define REG9_SPEED_MASK 0x0300 +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 +#define ADVERTISE_1000_FULL 0x0020 +#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */ +#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */ +#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */ + +#endif /* _E1000_HW_H_ */ diff --git a/addons/e1000/src/4.4.180/e1000_main.c b/addons/e1000/src/4.4.180/e1000_main.c new file mode 100644 index 00000000..1f84f2fa --- /dev/null +++ b/addons/e1000/src/4.4.180/e1000_main.c @@ -0,0 +1,5344 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" +#include +#include +#include +#include +#include + +char e1000_driver_name[] = "e1000"; +static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; +#define DRV_VERSION "7.3.21-k8-NAPI" +const char e1000_driver_version[] = DRV_VERSION; +static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; + +/* e1000_pci_tbl - PCI Device ID Table + * + * Last entry must be all 0s + * + * Macro expands to... + * {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)} + */ +static const struct pci_device_id e1000_pci_tbl[] = { + INTEL_E1000_ETHERNET_DEVICE(0x1000), + INTEL_E1000_ETHERNET_DEVICE(0x1001), + INTEL_E1000_ETHERNET_DEVICE(0x1004), + INTEL_E1000_ETHERNET_DEVICE(0x1008), + INTEL_E1000_ETHERNET_DEVICE(0x1009), + INTEL_E1000_ETHERNET_DEVICE(0x100C), + INTEL_E1000_ETHERNET_DEVICE(0x100D), + INTEL_E1000_ETHERNET_DEVICE(0x100E), + INTEL_E1000_ETHERNET_DEVICE(0x100F), + INTEL_E1000_ETHERNET_DEVICE(0x1010), + INTEL_E1000_ETHERNET_DEVICE(0x1011), + INTEL_E1000_ETHERNET_DEVICE(0x1012), + INTEL_E1000_ETHERNET_DEVICE(0x1013), + INTEL_E1000_ETHERNET_DEVICE(0x1014), + INTEL_E1000_ETHERNET_DEVICE(0x1015), + INTEL_E1000_ETHERNET_DEVICE(0x1016), + INTEL_E1000_ETHERNET_DEVICE(0x1017), + INTEL_E1000_ETHERNET_DEVICE(0x1018), + INTEL_E1000_ETHERNET_DEVICE(0x1019), + INTEL_E1000_ETHERNET_DEVICE(0x101A), + INTEL_E1000_ETHERNET_DEVICE(0x101D), + INTEL_E1000_ETHERNET_DEVICE(0x101E), + INTEL_E1000_ETHERNET_DEVICE(0x1026), + INTEL_E1000_ETHERNET_DEVICE(0x1027), + INTEL_E1000_ETHERNET_DEVICE(0x1028), + INTEL_E1000_ETHERNET_DEVICE(0x1075), + INTEL_E1000_ETHERNET_DEVICE(0x1076), + INTEL_E1000_ETHERNET_DEVICE(0x1077), + INTEL_E1000_ETHERNET_DEVICE(0x1078), + INTEL_E1000_ETHERNET_DEVICE(0x1079), + INTEL_E1000_ETHERNET_DEVICE(0x107A), + INTEL_E1000_ETHERNET_DEVICE(0x107B), + INTEL_E1000_ETHERNET_DEVICE(0x107C), + INTEL_E1000_ETHERNET_DEVICE(0x108A), + INTEL_E1000_ETHERNET_DEVICE(0x1099), + INTEL_E1000_ETHERNET_DEVICE(0x10B5), + INTEL_E1000_ETHERNET_DEVICE(0x2E6E), + /* required last entry */ + {0,} +}; + +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +int e1000_up(struct e1000_adapter *adapter); +void e1000_down(struct e1000_adapter *adapter); +void e1000_reinit_locked(struct e1000_adapter *adapter); +void e1000_reset(struct e1000_adapter *adapter); +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter); +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter); +void e1000_free_all_tx_resources(struct e1000_adapter *adapter); +void e1000_free_all_rx_resources(struct e1000_adapter *adapter); +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr); +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr); +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +void e1000_update_stats(struct e1000_adapter *adapter); + +static int e1000_init_module(void); +static void e1000_exit_module(void); +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static void e1000_remove(struct pci_dev *pdev); +static int e1000_alloc_queues(struct e1000_adapter *adapter); +static int e1000_sw_init(struct e1000_adapter *adapter); +static int e1000_open(struct net_device *netdev); +static int e1000_close(struct net_device *netdev); +static void e1000_configure_tx(struct e1000_adapter *adapter); +static void e1000_configure_rx(struct e1000_adapter *adapter); +static void e1000_setup_rctl(struct e1000_adapter *adapter); +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter); +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter); +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring); +static void e1000_set_rx_mode(struct net_device *netdev); +static void e1000_update_phy_info_task(struct work_struct *work); +static void e1000_watchdog(struct work_struct *work); +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work); +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev); +static struct net_device_stats * e1000_get_stats(struct net_device *netdev); +static int e1000_change_mtu(struct net_device *netdev, int new_mtu); +static int e1000_set_mac(struct net_device *netdev, void *p); +static irqreturn_t e1000_intr(int irq, void *data); +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring); +static int e1000_clean(struct napi_struct *napi, int budget); +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do); +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ +} +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count); +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd); +static void e1000_enter_82542_rst(struct e1000_adapter *adapter); +static void e1000_leave_82542_rst(struct e1000_adapter *adapter); +static void e1000_tx_timeout(struct net_device *dev); +static void e1000_reset_task(struct work_struct *work); +static void e1000_smartspeed(struct e1000_adapter *adapter); +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb); + +static bool e1000_vlan_used(struct e1000_adapter *adapter); +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features); +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on); +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid); +static void e1000_restore_vlan(struct e1000_adapter *adapter); + +#ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state); +static int e1000_resume(struct pci_dev *pdev); +#endif +static void e1000_shutdown(struct pci_dev *pdev); + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* for netdump / net console */ +static void e1000_netpoll (struct net_device *netdev); +#endif + +#define COPYBREAK_DEFAULT 256 +static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state); +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev); +static void e1000_io_resume(struct pci_dev *pdev); + +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static struct pci_driver e1000_driver = { + .name = e1000_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, +#ifdef CONFIG_PM + /* Power Management Hooks */ + .suspend = e1000_suspend, + .resume = e1000_resume, +#endif + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +/** + * e1000_get_hw_dev - return device + * used by hardware layer to print debugging information + * + **/ +struct net_device *e1000_get_hw_dev(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return adapter->netdev; +} + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version); + + pr_info("%s\n", e1000_copyright); + + ret = pci_register_driver(&e1000_driver); + if (copybreak != COPYBREAK_DEFAULT) { + if (copybreak == 0) + pr_info("copybreak disabled\n"); + else + pr_info("copybreak enabled for " + "packets <= %u bytes\n", copybreak); + } + return ret; +} + +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} + +module_exit(e1000_exit_module); + +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + irq_handler_t handler = e1000_intr; + int irq_flags = IRQF_SHARED; + int err; + + err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name, + netdev); + if (err) { + e_err(probe, "Unable to allocate interrupt Error: %d\n", err); + } + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + * @adapter: board private structure + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + synchronize_irq(adapter->pdev->irq); +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMS, IMS_ENABLE_MASK); + E1000_WRITE_FLUSH(); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u16 vid = hw->mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (!e1000_vlan_used(adapter)) + return; + + if (!test_bit(vid, adapter->active_vlans)) { + if (hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } else { + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && + (vid != old_vid) && + !test_bit(old_vid, adapter->active_vlans)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + old_vid); + } else { + adapter->mng_vlan_id = vid; + } +} + +static void e1000_init_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* disable hardware interception of ARP */ + manc &= ~(E1000_MANC_ARP_EN); + + ew32(MANC, manc); + } +} + +static void e1000_release_manageability(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->en_mng_pt) { + u32 manc = er32(MANC); + + /* re-enable hardware interception of ARP */ + manc |= E1000_MANC_ARP_EN; + + ew32(MANC, manc); + } +} + +/** + * e1000_configure - configure the hardware for RX and TX + * @adapter = private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int i; + + e1000_set_rx_mode(netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability(adapter); + + e1000_configure_tx(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + /* call E1000_DESC_UNUSED which always leaves + * at least 1 descriptor unused to make sure + * next_to_use != next_to_clean + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + struct e1000_rx_ring *ring = &adapter->rx_ring[i]; + adapter->alloc_rx_buf(adapter, ring, + E1000_DESC_UNUSED(ring)); + } +} + +int e1000_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_wake_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + return 0; +} + +/** + * e1000_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000_reset *** + **/ +void e1000_power_up_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 mii_reg = 0; + + /* Just clear the power down bit to wake the phy back up */ + if (hw->media_type == e1000_media_type_copper) { + /* according to the manual, the phy will retain its + * settings across a power-down/up cycle + */ + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg &= ~MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + } +} + +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* Power down the PHY so no link is implied when interface is down * + * The PHY cannot be powered down if any of the following is true * + * (a) WoL is enabled + * (b) AMT is active + * (c) SoL/IDER session is active + */ + if (!adapter->wol && hw->mac_type >= e1000_82540 && + hw->media_type == e1000_media_type_copper) { + u16 mii_reg = 0; + + switch (hw->mac_type) { + case e1000_82540: + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + case e1000_82541: + case e1000_82541_rev_2: + case e1000_82547: + case e1000_82547_rev_2: + if (er32(MANC) & E1000_MANC_SMBUS_EN) + goto out; + break; + default: + goto out; + } + e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg); + mii_reg |= MII_CR_POWER_DOWN; + e1000_write_phy_reg(hw, PHY_CTRL, mii_reg); + msleep(1); + } +out: + return; +} + +static void e1000_down_and_stop(struct e1000_adapter *adapter) +{ + set_bit(__E1000_DOWN, &adapter->flags); + + cancel_delayed_work_sync(&adapter->watchdog_task); + + /* + * Since the watchdog task can reschedule other tasks, we should cancel + * it first, otherwise we can run into the situation when a work is + * still running after the adapter has been turned down. + */ + + cancel_delayed_work_sync(&adapter->phy_info_task); + cancel_delayed_work_sync(&adapter->fifo_stall_task); + + /* Only kill reset task if adapter is not resetting */ + if (!test_bit(__E1000_RESETTING, &adapter->flags)) + cancel_work_sync(&adapter->reset_task); +} + +void e1000_down(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl, tctl; + + /* disable receives in the hardware */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_tx_disable(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + /* flush both disables and wait for them to finish */ + E1000_WRITE_FLUSH(); + msleep(10); + + /* Set the carrier off after transmits have been disabled in the + * hardware, to avoid race conditions with e1000_watchdog() (which + * may be running concurrently to us, checking for the carrier + * bit to decide whether it should enable transmits again). Such + * a race condition would result into transmission being disabled + * in the hardware until the next IFF_DOWN+IFF_UP cycle. + */ + netif_carrier_off(netdev); + + napi_disable(&adapter->napi); + + e1000_irq_disable(adapter); + + /* Setting DOWN must be after irq_disable to prevent + * a screaming interrupt. Setting DOWN also prevents + * tasks from rescheduling. + */ + e1000_down_and_stop(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + e1000_reset(adapter); + e1000_clean_all_tx_rings(adapter); + e1000_clean_all_rx_rings(adapter); +} + +void e1000_reinit_locked(struct e1000_adapter *adapter) +{ + WARN_ON(in_interrupt()); + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + e1000_down(adapter); + e1000_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->flags); +} + +void e1000_reset(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 pba = 0, tx_space, min_tx_space, min_rx_space; + bool legacy_pba_adjust = false; + u16 hwm; + + /* Repartition Pba for greater than 9k mtu + * To take effect CTRL.RST is required. + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + case e1000_82544: + case e1000_82540: + case e1000_82541: + case e1000_82541_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_48K; + break; + case e1000_82545: + case e1000_82545_rev_3: + case e1000_82546: + case e1000_ce4100: + case e1000_82546_rev_3: + pba = E1000_PBA_48K; + break; + case e1000_82547: + case e1000_82547_rev_2: + legacy_pba_adjust = true; + pba = E1000_PBA_30K; + break; + case e1000_undefined: + case e1000_num_macs: + break; + } + + if (legacy_pba_adjust) { + if (hw->max_frame_size > E1000_RXBUFFER_8192) + pba -= 8; /* allocate more FIFO for Tx */ + + if (hw->mac_type == e1000_82547) { + adapter->tx_fifo_head = 0; + adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT; + adapter->tx_fifo_size = + (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT; + atomic_set(&adapter->tx_fifo_stall, 0); + } + } else if (hw->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* adjust PBA for jumbo frames */ + ew32(PBA, pba); + + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (hw->max_frame_size + + sizeof(struct e1000_tx_desc) - + ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = hw->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if (tx_space < min_tx_space && + ((min_tx_space - tx_space) < pba)) { + pba = pba - (min_tx_space - tx_space); + + /* PCI/PCIx hardware has PBA alignment constraints */ + switch (hw->mac_type) { + case e1000_82545 ... e1000_82546_rev_3: + pba &= ~(E1000_PBA_8K - 1); + break; + default: + break; + } + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment or use Early Receive if available + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + } + + ew32(PBA, pba); + + /* flow control settings: + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus the early receive size (for parts + * with ERT support assuming ERT set to E1000_ERT_2048), or + * - the full Rx FIFO size minus one full frame + */ + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - hw->max_frame_size)); + + hw->fc_high_water = hwm & 0xFFF8; /* 8-byte granularity */ + hw->fc_low_water = hw->fc_high_water - 8; + hw->fc_pause_time = E1000_FC_PAUSE_TIME; + hw->fc_send_xon = 1; + hw->fc = hw->original_fc; + + /* Allow time for pending master requests to run */ + e1000_reset_hw(hw); + if (hw->mac_type >= e1000_82544) + ew32(WUC, 0); + + if (e1000_init_hw(hw)) + e_dev_err("Hardware Error\n"); + e1000_update_mng_vlan(adapter); + + /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */ + if (hw->mac_type >= e1000_82544 && + hw->autoneg == 1 && + hw->autoneg_advertised == ADVERTISE_1000_FULL) { + u32 ctrl = er32(CTRL); + /* clear phy power management bit if we are in gig only mode, + * which if enabled will attempt negotiation to 100Mb, which + * can cause a loss of link at power off or driver unload + */ + ctrl &= ~E1000_CTRL_SWDPIN3; + ew32(CTRL, ctrl); + } + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETHERNET_IEEE_VLAN_TYPE); + + e1000_reset_adaptive(hw); + e1000_phy_get_info(hw, &adapter->phy_info); + + e1000_release_manageability(adapter); +} + +/* Dump the eeprom for users having checksum issues */ +static void e1000_dump_eeprom(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct ethtool_eeprom eeprom; + const struct ethtool_ops *ops = netdev->ethtool_ops; + u8 *data; + int i; + u16 csum_old, csum_new = 0; + + eeprom.len = ops->get_eeprom_len(netdev); + eeprom.offset = 0; + + data = kmalloc(eeprom.len, GFP_KERNEL); + if (!data) + return; + + ops->get_eeprom(netdev, &eeprom, data); + + csum_old = (data[EEPROM_CHECKSUM_REG * 2]) + + (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8); + for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2) + csum_new += data[i] + (data[i + 1] << 8); + csum_new = EEPROM_SUM - csum_new; + + pr_err("/*********************/\n"); + pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old); + pr_err("Calculated : 0x%04x\n", csum_new); + + pr_err("Offset Values\n"); + pr_err("======== ======\n"); + print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0); + + pr_err("Include this output when contacting your support provider.\n"); + pr_err("This is not a software error! Something bad happened to\n"); + pr_err("your hardware or EEPROM image. Ignoring this problem could\n"); + pr_err("result in further problems, possibly loss of data,\n"); + pr_err("corruption or system hangs!\n"); + pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n"); + pr_err("which is invalid and requires you to set the proper MAC\n"); + pr_err("address manually before continuing to enable this network\n"); + pr_err("device. Please inspect the EEPROM dump and report the\n"); + pr_err("issue to your hardware vendor or Intel Customer Support.\n"); + pr_err("/*********************/\n"); + + kfree(data); +} + +/** + * e1000_is_need_ioport - determine if an adapter needs ioport resources or not + * @pdev: PCI device information struct + * + * Return true if an adapter needs ioport resources + **/ +static int e1000_is_need_ioport(struct pci_dev *pdev) +{ + switch (pdev->device) { + case E1000_DEV_ID_82540EM: + case E1000_DEV_ID_82540EM_LOM: + case E1000_DEV_ID_82540EP: + case E1000_DEV_ID_82540EP_LOM: + case E1000_DEV_ID_82540EP_LP: + case E1000_DEV_ID_82541EI: + case E1000_DEV_ID_82541EI_MOBILE: + case E1000_DEV_ID_82541ER: + case E1000_DEV_ID_82541ER_LOM: + case E1000_DEV_ID_82541GI: + case E1000_DEV_ID_82541GI_LF: + case E1000_DEV_ID_82541GI_MOBILE: + case E1000_DEV_ID_82544EI_COPPER: + case E1000_DEV_ID_82544EI_FIBER: + case E1000_DEV_ID_82544GC_COPPER: + case E1000_DEV_ID_82544GC_LOM: + case E1000_DEV_ID_82545EM_COPPER: + case E1000_DEV_ID_82545EM_FIBER: + case E1000_DEV_ID_82546EB_COPPER: + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546EB_QUAD_COPPER: + return true; + default: + return false; + } +} + +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + /* Since there is no support for separate Rx/Tx vlan accel + * enable/disable make sure Tx flag is always in same state as Rx. + */ + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_CTAG_TX; + else + features &= ~NETIF_F_HW_VLAN_CTAG_TX; + + return features; +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & NETIF_F_HW_VLAN_CTAG_RX) + e1000_vlan_mode(netdev, features); + + if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL))) + return 0; + + netdev->features = features; + adapter->rx_csum = !!(features & NETIF_F_RXCSUM); + + if (netif_running(netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + + return 0; +} + +static const struct net_device_ops e1000_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_get_stats = e1000_get_stats, + .ndo_set_rx_mode = e1000_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_change_mtu = e1000_change_mtu, + .ndo_do_ioctl = e1000_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_fix_features = e1000_fix_features, + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_init_hw_struct - initialize members of hw struct + * @adapter: board private struct + * @hw: structure used by e1000_hw.c + * + * Factors out initialization of the e1000_hw struct to its own function + * that can be called very early at init (just after struct allocation). + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + * Returns negative error codes if MAC type setup fails. + */ +static int e1000_init_hw_struct(struct e1000_adapter *adapter, + struct e1000_hw *hw) +{ + struct pci_dev *pdev = adapter->pdev; + + /* PCI config space info */ + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_id = pdev->subsystem_device; + hw->revision_id = pdev->revision; + + pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word); + + hw->max_frame_size = adapter->netdev->mtu + + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE; + + /* identify the MAC */ + if (e1000_set_mac_type(hw)) { + e_err(probe, "Unknown MAC Type\n"); + return -EIO; + } + + switch (hw->mac_type) { + default: + break; + case e1000_82541: + case e1000_82547: + case e1000_82541_rev_2: + case e1000_82547_rev_2: + hw->phy_init_script = 1; + break; + } + + e1000_set_media_type(hw); + e1000_get_bus_info(hw); + + hw->wait_autoneg_complete = false; + hw->tbi_compatibility_en = true; + hw->adaptive_ifs = true; + + /* Copper options */ + + if (hw->media_type == e1000_media_type_copper) { + hw->mdix = AUTO_ALL_MODES; + hw->disable_polarity_correction = false; + hw->master_slave = E1000_MASTER_SLAVE; + } + + return 0; +} + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter = NULL; + struct e1000_hw *hw; + + static int cards_found = 0; + static int global_quad_port_a = 0; /* global ksp3 port a indication */ + int i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 tmp = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + int bars, need_ioport; + bool disable_dev = false; + + /* do not allocate ioport bars when not needed */ + need_ioport = e1000_is_need_ioport(pdev); + if (need_ioport) { + bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO); + err = pci_enable_device(pdev); + } else { + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_enable_device_mem(pdev); + } + if (err) + return err; + + err = pci_request_selected_regions(pdev, bars, e1000_driver_name); + if (err) + goto err_pci_reg; + + pci_set_master(pdev); + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + adapter->bars = bars; + adapter->need_ioport = need_ioport; + + hw = &adapter->hw; + hw->back = adapter; + + err = -EIO; + hw->hw_addr = pci_ioremap_bar(pdev, BAR_0); + if (!hw->hw_addr) + goto err_ioremap; + + if (adapter->need_ioport) { + for (i = BAR_1; i <= BAR_5; i++) { + if (pci_resource_len(pdev, i) == 0) + continue; + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + hw->io_base = pci_resource_start(pdev, i); + break; + } + } + } + + /* make ready for any if (hw->...) below */ + err = e1000_init_hw_struct(adapter, hw); + if (err) + goto err_sw_init; + + /* there is a workaround being applied below that limits + * 64-bit DMA addresses to 64-bit hardware. There are some + * 32-bit adapters that Tx hang when given 64-bit DMA addresses + */ + pci_using_dac = 0; + if ((hw->bus_type == e1000_bus_type_pcix) && + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + pr_err("No usable DMA config, aborting\n"); + goto err_dma; + } + } + + netdev->netdev_ops = &e1000_netdev_ops; + e1000_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000_clean, 64); + + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + + /* setup the private structure */ + + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + err = -EIO; + if (hw->mac_type == e1000_ce4100) { + hw->ce4100_gbe_mdio_base_virt = + ioremap(pci_resource_start(pdev, BAR_1), + pci_resource_len(pdev, BAR_1)); + + if (!hw->ce4100_gbe_mdio_base_virt) + goto err_mdio_ioremap; + } + + if (hw->mac_type >= e1000_82543) { + netdev->hw_features = NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_HW_VLAN_CTAG_RX; + netdev->features = NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_FILTER; + } + + if ((hw->mac_type >= e1000_82544) && + (hw->mac_type != e1000_82547)) + netdev->hw_features |= NETIF_F_TSO; + + netdev->priv_flags |= IFF_SUPP_NOFCS; + + netdev->features |= netdev->hw_features; + netdev->hw_features |= (NETIF_F_RXCSUM | + NETIF_F_RXALL | + NETIF_F_RXFCS); + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + netdev->vlan_features |= (NETIF_F_TSO | + NETIF_F_HW_CSUM | + NETIF_F_SG); + + /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */ + if (hw->device_id != E1000_DEV_ID_82545EM_COPPER || + hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE) + netdev->priv_flags |= IFF_UNICAST_FLT; + + adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); + + /* initialize eeprom parameters */ + if (e1000_init_eeprom_params(hw)) { + e_err(probe, "EEPROM initialization failed\n"); + goto err_eeprom; + } + + /* before reading the EEPROM, reset the controller to + * put the device in a known good starting state + */ + + e1000_reset_hw(hw); + + /* make sure the EEPROM is good */ + if (e1000_validate_eeprom_checksum(hw) < 0) { + e_err(probe, "The EEPROM Checksum Is Not Valid\n"); + e1000_dump_eeprom(adapter); + /* set MAC address to all zeroes to invalidate and temporary + * disable this device for the user. This blocks regular + * traffic while still permitting ethtool ioctls from reaching + * the hardware as well as allowing the user to run the + * interface after manually setting a hw addr using + * `ip set address` + */ + memset(hw->mac_addr, 0, netdev->addr_len); + } else { + /* copy the MAC address out of the EEPROM */ + if (e1000_read_mac_addr(hw)) + e_err(probe, "EEPROM Read Error\n"); + } + /* don't block initialization here due to bad MAC address */ + memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) + e_err(probe, "Invalid MAC Address\n"); + + + INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog); + INIT_DELAYED_WORK(&adapter->fifo_stall_task, + e1000_82547_tx_fifo_stall_task); + INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task); + INIT_WORK(&adapter->reset_task, e1000_reset_task); + + e1000_check_options(adapter); + + /* Initial Wake on LAN setting + * If APM wake is enabled in the EEPROM, + * enable the ACPI Magic Packet filter + */ + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + case e1000_82543: + break; + case e1000_82544: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data); + eeprom_apme_mask = E1000_EEPROM_82544_APM; + break; + case e1000_82546: + case e1000_82546_rev_3: + if (er32(STATUS) & E1000_STATUS_FUNC_1){ + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); + break; + } + /* Fall Through */ + default: + e1000_read_eeprom(hw, + EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data); + break; + } + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + switch (pdev->device) { + case E1000_DEV_ID_82546GB_PCIE: + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546EB_FIBER: + case E1000_DEV_ID_82546GB_FIBER: + /* Wake events only supported on port A for dual fiber + * regardless of eeprom setting + */ + if (er32(STATUS) & E1000_STATUS_FUNC_1) + adapter->eeprom_wol = 0; + break; + case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3: + /* if quad port adapter, disable WoL on all but port A */ + if (global_quad_port_a != 0) + adapter->eeprom_wol = 0; + else + adapter->quad_port_a = true; + /* Reset for multiple quad port adapters */ + if (++global_quad_port_a == 4) + global_quad_port_a = 0; + break; + } + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + /* Auto detect PHY address */ + if (hw->mac_type == e1000_ce4100) { + for (i = 0; i < 32; i++) { + hw->phy_addr = i; + e1000_read_phy_reg(hw, PHY_ID2, &tmp); + if (tmp == 0 || tmp == 0xFF) { + if (i == 31) + goto err_eeprom; + continue; + } else + break; + } + } + + /* reset the hardware with the new settings */ + e1000_reset(adapter); + + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + + e1000_vlan_filter_on_off(adapter, false); + + /* print bus type/speed/width info */ + e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n", + ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""), + ((hw->bus_speed == e1000_bus_speed_133) ? 133 : + (hw->bus_speed == e1000_bus_speed_120) ? 120 : + (hw->bus_speed == e1000_bus_speed_100) ? 100 : + (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33), + ((hw->bus_width == e1000_bus_width_64) ? 64 : 32), + netdev->dev_addr); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + e_info(probe, "Intel(R) PRO/1000 Network Connection\n"); + + cards_found++; + return 0; + +err_register: +err_eeprom: + e1000_phy_hw_reset(hw); + + if (hw->flash_address) + iounmap(hw->flash_address); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_dma: +err_sw_init: +err_mdio_ioremap: + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); +err_ioremap: + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, bars); +err_pci_reg: + if (!adapter || disable_dev) + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + bool disable_dev; + + e1000_down_and_stop(adapter); + e1000_release_manageability(adapter); + + unregister_netdev(netdev); + + e1000_phy_hw_reset(hw); + + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + if (hw->mac_type == e1000_ce4100) + iounmap(hw->ce4100_gbe_mdio_base_virt); + iounmap(hw->hw_addr); + if (hw->flash_address) + iounmap(hw->flash_address); + pci_release_selected_regions(pdev, adapter->bars); + + disable_dev = !test_and_set_bit(__E1000_DISABLED, &adapter->flags); + free_netdev(netdev); + + if (disable_dev) + pci_disable_device(pdev); +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * e1000_init_hw_struct MUST be called before this function + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + adapter->num_tx_queues = 1; + adapter->num_rx_queues = 1; + + if (e1000_alloc_queues(adapter)) { + e_err(probe, "Unable to allocate memory for queues\n"); + return -ENOMEM; + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + spin_lock_init(&adapter->stats_lock); + + set_bit(__E1000_DOWN, &adapter->flags); + + return 0; +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + * + * We allocate one ring per queue at run-time since we don't know the + * number of queues at compile-time. + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + adapter->tx_ring = kcalloc(adapter->num_tx_queues, + sizeof(struct e1000_tx_ring), GFP_KERNEL); + if (!adapter->tx_ring) + return -ENOMEM; + + adapter->rx_ring = kcalloc(adapter->num_rx_queues, + sizeof(struct e1000_rx_ring), GFP_KERNEL); + if (!adapter->rx_ring) { + kfree(adapter->tx_ring); + return -ENOMEM; + } + + return E1000_SUCCESS; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog task is started, + * and the stack is notified that the interface is ready. + **/ +static int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->flags)) + return -EBUSY; + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000_setup_all_tx_resources(adapter); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000_setup_all_rx_resources(adapter); + if (err) + goto err_setup_rx; + + e1000_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { + e1000_update_mng_vlan(adapter); + } + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* From here on the code is the same as e1000_up() */ + clear_bit(__E1000_DOWN, &adapter->flags); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + netif_start_queue(netdev); + + /* fire a link status change interrupt to start the watchdog */ + ew32(ICS, E1000_ICS_LSC); + + return E1000_SUCCESS; + +err_req_irq: + e1000_power_down_phy(adapter); + e1000_free_all_rx_resources(adapter); +err_setup_rx: + e1000_free_all_tx_resources(adapter); +err_setup_tx: + e1000_reset(adapter); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + e1000_power_down_phy(adapter); + e1000_free_irq(adapter); + + e1000_free_all_tx_resources(adapter); + e1000_free_all_rx_resources(adapter); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + } + + return 0; +} + +/** + * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary + * @adapter: address of board private structure + * @start: address of beginning of memory + * @len: length of memory + **/ +static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start, + unsigned long len) +{ + struct e1000_hw *hw = &adapter->hw; + unsigned long begin = (unsigned long)start; + unsigned long end = begin + len; + + /* First rev 82545 and 82546 need to not allow any memory + * write location to cross 64k boundary due to errata 23 + */ + if (hw->mac_type == e1000_82545 || + hw->mac_type == e1000_ce4100 || + hw->mac_type == e1000_82546) { + return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; + } + + return true; +} + +/** + * e1000_setup_tx_resources - allocate Tx resources (Descriptors) + * @adapter: board private structure + * @txdr: tx descriptor ring (for a specific queue) to setup + * + * Return 0 on success, negative on failure + **/ +static int e1000_setup_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *txdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size; + + size = sizeof(struct e1000_tx_buffer) * txdr->count; + txdr->buffer_info = vzalloc(size); + if (!txdr->buffer_info) + return -ENOMEM; + + /* round up to nearest 4K */ + + txdr->size = txdr->count * sizeof(struct e1000_tx_desc); + txdr->size = ALIGN(txdr->size, 4096); + + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma, + GFP_KERNEL); + if (!txdr->desc) { +setup_tx_desc_die: + vfree(txdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + void *olddesc = txdr->desc; + dma_addr_t olddma = txdr->dma; + e_err(tx_err, "txdr align check failed: %u bytes at %p\n", + txdr->size, txdr->desc); + /* Try again, without freeing the previous */ + txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, + &txdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!txdr->desc) { + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + goto setup_tx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, txdr->size, txdr->desc, + txdr->dma); + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory " + "for the transmit descriptor ring\n"); + vfree(txdr->buffer_info); + return -ENOMEM; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, txdr->size, olddesc, + olddma); + } + } + memset(txdr->desc, 0, txdr->size); + + txdr->next_to_use = 0; + txdr->next_to_clean = 0; + + return 0; +} + +/** + * e1000_setup_all_tx_resources - wrapper to allocate Tx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_tx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { + err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); + if (err) { + e_err(probe, "Allocation for Tx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_tx_resources(adapter, + &adapter->tx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_configure_tx - Configure 8254x Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + u64 tdba; + struct e1000_hw *hw = &adapter->hw; + u32 tdlen, tctl, tipg; + u32 ipgr1, ipgr2; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + + switch (adapter->num_tx_queues) { + case 1: + default: + tdba = adapter->tx_ring[0].dma; + tdlen = adapter->tx_ring[0].count * + sizeof(struct e1000_tx_desc); + ew32(TDLEN, tdlen); + ew32(TDBAH, (tdba >> 32)); + ew32(TDBAL, (tdba & 0x00000000ffffffffULL)); + ew32(TDT, 0); + ew32(TDH, 0); + adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? + E1000_TDH : E1000_82542_TDH); + adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? + E1000_TDT : E1000_82542_TDT); + break; + } + + /* Set the default values for the Tx Inter Packet Gap timer */ + if ((hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes)) + tipg = DEFAULT_82543_TIPG_IPGT_FIBER; + else + tipg = DEFAULT_82543_TIPG_IPGT_COPPER; + + switch (hw->mac_type) { + case e1000_82542_rev2_0: + case e1000_82542_rev2_1: + tipg = DEFAULT_82542_TIPG_IPGT; + ipgr1 = DEFAULT_82542_TIPG_IPGR1; + ipgr2 = DEFAULT_82542_TIPG_IPGR2; + break; + default: + ipgr1 = DEFAULT_82543_TIPG_IPGR1; + ipgr2 = DEFAULT_82543_TIPG_IPGR2; + break; + } + tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT; + tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT; + ew32(TIPG, tipg); + + /* Set the Tx Interrupt Delay register */ + + ew32(TIDV, adapter->tx_int_delay); + if (hw->mac_type >= e1000_82540) + ew32(TADV, adapter->tx_abs_int_delay); + + /* Program the Transmit Control Register */ + + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + e1000_config_collision_dist(hw); + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + if (hw->mac_type < e1000_82543) + adapter->txd_cmd |= E1000_TXD_CMD_RPS; + else + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + /* Cache if we're 82544 running in PCI-X because we'll + * need this to apply a workaround later in the send path. + */ + if (hw->mac_type == e1000_82544 && + hw->bus_type == e1000_bus_type_pcix) + adapter->pcix_82544 = true; + + ew32(TCTL, tctl); + +} + +/** + * e1000_setup_rx_resources - allocate Rx resources (Descriptors) + * @adapter: board private structure + * @rxdr: rx descriptor ring (for a specific queue) to setup + * + * Returns 0 on success, negative on failure + **/ +static int e1000_setup_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rxdr) +{ + struct pci_dev *pdev = adapter->pdev; + int size, desc_len; + + size = sizeof(struct e1000_rx_buffer) * rxdr->count; + rxdr->buffer_info = vzalloc(size); + if (!rxdr->buffer_info) + return -ENOMEM; + + desc_len = sizeof(struct e1000_rx_desc); + + /* Round up to nearest 4K */ + + rxdr->size = rxdr->count * desc_len; + rxdr->size = ALIGN(rxdr->size, 4096); + + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, + GFP_KERNEL); + if (!rxdr->desc) { +setup_rx_desc_die: + vfree(rxdr->buffer_info); + return -ENOMEM; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + void *olddesc = rxdr->desc; + dma_addr_t olddma = rxdr->dma; + e_err(rx_err, "rxdr align check failed: %u bytes at %p\n", + rxdr->size, rxdr->desc); + /* Try again, without freeing the previous */ + rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, + &rxdr->dma, GFP_KERNEL); + /* Failed allocation, critical failure */ + if (!rxdr->desc) { + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + goto setup_rx_desc_die; + } + + if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) { + /* give up */ + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc, + rxdr->dma); + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + e_err(probe, "Unable to allocate aligned memory for " + "the Rx descriptor ring\n"); + goto setup_rx_desc_die; + } else { + /* Free old allocation, new allocation was successful */ + dma_free_coherent(&pdev->dev, rxdr->size, olddesc, + olddma); + } + } + memset(rxdr->desc, 0, rxdr->size); + + rxdr->next_to_clean = 0; + rxdr->next_to_use = 0; + rxdr->rx_skb_top = NULL; + + return 0; +} + +/** + * e1000_setup_all_rx_resources - wrapper to allocate Rx resources + * (Descriptors) for all queues + * @adapter: board private structure + * + * Return 0 on success, negative on failure + **/ +int e1000_setup_all_rx_resources(struct e1000_adapter *adapter) +{ + int i, err = 0; + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); + if (err) { + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + for (i-- ; i >= 0; i--) + e1000_free_rx_resources(adapter, + &adapter->rx_ring[i]); + break; + } + } + + return err; +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + rctl = er32(RCTL); + + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + + rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO | + E1000_RCTL_RDMTS_HALF | + (hw->mc_filter_type << E1000_RCTL_MO_SHIFT); + + if (hw->tbi_compatibility_on == 1) + rctl |= E1000_RCTL_SBP; + else + rctl &= ~E1000_RCTL_SBP; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case E1000_RXBUFFER_2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case E1000_RXBUFFER_4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case E1000_RXBUFFER_8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case E1000_RXBUFFER_16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); +} + +/** + * e1000_configure_rx - Configure 8254x Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + u64 rdba; + struct e1000_hw *hw = &adapter->hw; + u32 rdlen, rctl, rxcsum; + + if (adapter->netdev->mtu > ETH_DATA_LEN) { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = adapter->rx_ring[0].count * + sizeof(struct e1000_rx_desc); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + if (hw->mac_type >= e1000_82540) { + ew32(RADV, adapter->rx_abs_int_delay); + if (adapter->itr_setting != 0) + ew32(ITR, 1000000000 / (adapter->itr * 256)); + } + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + switch (adapter->num_rx_queues) { + case 1: + default: + rdba = adapter->rx_ring[0].dma; + ew32(RDLEN, rdlen); + ew32(RDBAH, (rdba >> 32)); + ew32(RDBAL, (rdba & 0x00000000ffffffffULL)); + ew32(RDT, 0); + ew32(RDH, 0); + adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? + E1000_RDH : E1000_82542_RDH); + adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? + E1000_RDT : E1000_82542_RDT); + break; + } + + /* Enable 82543 Receive Checksum Offload for TCP and UDP */ + if (hw->mac_type >= e1000_82543) { + rxcsum = er32(RXCSUM); + if (adapter->rx_csum) + rxcsum |= E1000_RXCSUM_TUOFL; + else + /* don't need to clear IPPCSE as it defaults to 0 */ + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + } + + /* Enable Receives */ + ew32(RCTL, rctl | E1000_RCTL_EN); +} + +/** + * e1000_free_tx_resources - Free Tx Resources per Queue + * @adapter: board private structure + * @tx_ring: Tx descriptor ring for a specific queue + * + * Free all transmit software resources + **/ +static void e1000_free_tx_resources(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(adapter, tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + + tx_ring->desc = NULL; +} + +/** + * e1000_free_all_tx_resources - Free Tx Resources for All Queues + * @adapter: board private structure + * + * Free all transmit software resources + **/ +void e1000_free_all_tx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); +} + +static void +e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, + struct e1000_tx_buffer *buffer_info) +{ + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; + /* buffer_info must be completely set up in the transmit path */ +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @adapter: board private structure + * @tx_ring: ring to be cleaned + **/ +static void e1000_clean_tx_ring(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_buffer *buffer_info; + unsigned long size; + unsigned int i; + + /* Free all the Tx ring sk_buffs */ + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_tx_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + tx_ring->last_tx_tso = false; + + writel(0, hw->hw_addr + tx_ring->tdh); + writel(0, hw->hw_addr + tx_ring->tdt); +} + +/** + * e1000_clean_all_tx_rings - Free Tx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) + e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); +} + +/** + * e1000_free_rx_resources - Free Rx Resources + * @adapter: board private structure + * @rx_ring: ring to clean the resources from + * + * Free all receive software resources + **/ +static void e1000_free_rx_resources(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_rx_ring(adapter, rx_ring); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + + rx_ring->desc = NULL; +} + +/** + * e1000_free_all_rx_resources - Free Rx Resources for All Queues + * @adapter: board private structure + * + * Free all receive software resources + **/ +void e1000_free_all_rx_resources(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); +} + +#define E1000_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) +static unsigned int e1000_frag_len(const struct e1000_adapter *a) +{ + return SKB_DATA_ALIGN(a->rx_buffer_len + E1000_HEADROOM) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static void *e1000_alloc_frag(const struct e1000_adapter *a) +{ + unsigned int len = e1000_frag_len(a); + u8 *data = netdev_alloc_frag(len); + + if (likely(data)) + data += E1000_HEADROOM; + return data; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @adapter: board private structure + * @rx_ring: ring to free buffers from + **/ +static void e1000_clean_rx_ring(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_rx_buffer *buffer_info; + struct pci_dev *pdev = adapter->pdev; + unsigned long size; + unsigned int i; + + /* Free all the Rx netfrags */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (adapter->clean_rx == e1000_clean_rx_irq) { + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.data) { + skb_free_frag(buffer_info->rxbuf.data); + buffer_info->rxbuf.data = NULL; + } + } else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) { + if (buffer_info->dma) + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (buffer_info->rxbuf.page) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + } + } + + buffer_info->dma = 0; + } + + /* there also may be some cached data from a chained receive */ + napi_free_frags(&adapter->napi); + rx_ring->rx_skb_top = NULL; + + size = sizeof(struct e1000_rx_buffer) * rx_ring->count; + memset(rx_ring->buffer_info, 0, size); + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + + writel(0, hw->hw_addr + rx_ring->rdh); + writel(0, hw->hw_addr + rx_ring->rdt); +} + +/** + * e1000_clean_all_rx_rings - Free Rx Buffers for all queues + * @adapter: board private structure + **/ +static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_rx_queues; i++) + e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); +} + +/* The 82542 2.0 (revision 2) needs to have the receive unit in reset + * and memory write and invalidate disabled for certain operations + */ +static void e1000_enter_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + e1000_pci_clear_mwi(hw); + + rctl = er32(RCTL); + rctl |= E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (netif_running(netdev)) + e1000_clean_all_rx_rings(adapter); +} + +static void e1000_leave_82542_rst(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 rctl; + + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_RST; + ew32(RCTL, rctl); + E1000_WRITE_FLUSH(); + mdelay(5); + + if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE) + e1000_pci_set_mwi(hw); + + if (netif_running(netdev)) { + /* No need to loop, because 82542 supports only 1 queue */ + struct e1000_rx_ring *ring = &adapter->rx_ring[0]; + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring)); + } +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len); + + e1000_rar_set(hw, hw->mac_addr, 0); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + return 0; +} + +/** + * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The set_rx_mode entry point is called whenever the unicast or multicast + * address lists or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + bool use_uc = false; + u32 rctl; + u32 hash_value; + int i, rar_entries = E1000_RAR_ENTRIES; + int mta_reg_count = E1000_NUM_MTA_REGISTERS; + u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC); + + if (!mcarray) + return; + + /* Check for Promiscuous and All Multicast modes */ + + rctl = er32(RCTL); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + rctl &= ~E1000_RCTL_VFE; + } else { + if (netdev->flags & IFF_ALLMULTI) + rctl |= E1000_RCTL_MPE; + else + rctl &= ~E1000_RCTL_MPE; + /* Enable VLAN filter if there is a VLAN */ + if (e1000_vlan_used(adapter)) + rctl |= E1000_RCTL_VFE; + } + + if (netdev_uc_count(netdev) > rar_entries - 1) { + rctl |= E1000_RCTL_UPE; + } else if (!(netdev->flags & IFF_PROMISC)) { + rctl &= ~E1000_RCTL_UPE; + use_uc = true; + } + + ew32(RCTL, rctl); + + /* 82542 2.0 needs to be in reset to write receive address registers */ + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_enter_82542_rst(adapter); + + /* load the first 14 addresses into the exact filters 1-14. Unicast + * addresses take precedence to avoid disabling unicast filtering + * when possible. + * + * RAR 0 is used for the station MAC address + * if there are not 14 addresses, go ahead and clear the filters + */ + i = 1; + if (use_uc) + netdev_for_each_uc_addr(ha, netdev) { + if (i == rar_entries) + break; + e1000_rar_set(hw, ha->addr, i++); + } + + netdev_for_each_mc_addr(ha, netdev) { + if (i == rar_entries) { + /* load any remaining addresses into the hash table */ + u32 hash_reg, hash_bit, mta; + hash_value = e1000_hash_mc_addr(hw, ha->addr); + hash_reg = (hash_value >> 5) & 0x7F; + hash_bit = hash_value & 0x1F; + mta = (1 << hash_bit); + mcarray[hash_reg] |= mta; + } else { + e1000_rar_set(hw, ha->addr, i++); + } + } + + for (; i < rar_entries; i++) { + E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0); + E1000_WRITE_FLUSH(); + E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0); + E1000_WRITE_FLUSH(); + } + + /* write the hash table completely, write from bottom to avoid + * both stupid write combining chipsets, and flushing each write + */ + for (i = mta_reg_count - 1; i >= 0 ; i--) { + /* If we are on an 82544 has an errata where writing odd + * offsets overwrites the previous even offset, but writing + * backwards over the range solves the issue by always + * writing the odd offset first + */ + E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]); + } + E1000_WRITE_FLUSH(); + + if (hw->mac_type == e1000_82542_rev2_0) + e1000_leave_82542_rst(adapter); + + kfree(mcarray); +} + +/** + * e1000_update_phy_info_task - get phy info + * @work: work struct contained inside adapter struct + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + */ +static void e1000_update_phy_info_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + phy_info_task.work); + + e1000_phy_get_info(&adapter->hw, &adapter->phy_info); +} + +/** + * e1000_82547_tx_fifo_stall_task - task to complete work + * @work: work struct contained inside adapter struct + **/ +static void e1000_82547_tx_fifo_stall_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + fifo_stall_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 tctl; + + if (atomic_read(&adapter->tx_fifo_stall)) { + if ((er32(TDT) == er32(TDH)) && + (er32(TDFT) == er32(TDFH)) && + (er32(TDFTS) == er32(TDFHS))) { + tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + ew32(TDFT, adapter->tx_head_addr); + ew32(TDFH, adapter->tx_head_addr); + ew32(TDFTS, adapter->tx_head_addr); + ew32(TDFHS, adapter->tx_head_addr); + ew32(TCTL, tctl); + E1000_WRITE_FLUSH(); + + adapter->tx_fifo_head = 0; + atomic_set(&adapter->tx_fifo_stall, 0); + netif_wake_queue(netdev); + } else if (!test_bit(__E1000_DOWN, &adapter->flags)) { + schedule_delayed_work(&adapter->fifo_stall_task, 1); + } + } +} + +bool e1000_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + + /* get_link_status is set on LSC (link status) interrupt or rx + * sequence error interrupt (except on intel ce4100). + * get_link_status will stay false until the + * e1000_check_for_link establishes link for copper adapters + * ONLY + */ + switch (hw->media_type) { + case e1000_media_type_copper: + if (hw->mac_type == e1000_ce4100) + hw->get_link_status = 1; + if (hw->get_link_status) { + e1000_check_for_link(hw); + link_active = !hw->get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + e1000_check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + e1000_check_for_link(hw); + link_active = hw->serdes_has_link; + break; + default: + break; + } + + return link_active; +} + +/** + * e1000_watchdog - work function + * @work: work struct contained inside adapter struct + **/ +static void e1000_watchdog(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task.work); + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_ring *txdr = adapter->tx_ring; + u32 link, tctl; + + link = e1000_has_link(adapter); + if ((netif_carrier_ok(netdev)) && link) + goto link_up; + + if (link) { + if (!netif_carrier_ok(netdev)) { + u32 ctrl; + bool txb2b = true; + /* update snapshot of PHY registers on LSC */ + e1000_get_speed_and_duplex(hw, + &adapter->link_speed, + &adapter->link_duplex); + + ctrl = er32(CTRL); + pr_info("%s NIC Link is Up %d Mbps %s, " + "Flow Control: %s\n", + netdev->name, + adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? + "Full Duplex" : "Half Duplex", + ((ctrl & E1000_CTRL_TFCE) && (ctrl & + E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & + E1000_CTRL_RFCE) ? "RX" : ((ctrl & + E1000_CTRL_TFCE) ? "TX" : "None"))); + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = false; + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + txb2b = false; + /* maybe add some timeout factor ? */ + break; + } + + /* enable transmits in the hardware */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + netif_carrier_on(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + adapter->smartspeed = 0; + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + pr_info("%s NIC Link is Down\n", + netdev->name); + netif_carrier_off(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->phy_info_task, + 2 * HZ); + } + + e1000_smartspeed(adapter); + } + +link_up: + e1000_update_stats(adapter); + + hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + hw->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old; + adapter->gorcl_old = adapter->stats.gorcl; + adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old; + adapter->gotcl_old = adapter->stats.gotcl; + + e1000_update_adaptive(hw); + + if (!netif_carrier_ok(netdev)) { + if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); + /* exit immediately since reset is imminent */ + return; + } + } + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotcl + adapter->gorcl) / 10000; + u32 dif = (adapter->gotcl > adapter->gorcl ? + adapter->gotcl - adapter->gorcl : + adapter->gorcl - adapter->gotcl) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + ew32(ITR, 1000000000 / (itr * 256)); + } + + /* Cause software interrupt to ensure rx ring is cleaned */ + ew32(ICS, E1000_ICS_RXDMT0); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* Reschedule the task */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 2 * HZ); +} + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. + * this functionality is controlled by the InterruptThrottleRate module + * parameter (see e1000_param.c) + **/ +static unsigned int e1000_update_itr(struct e1000_adapter *adapter, + u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + struct e1000_hw *hw = &adapter->hw; + + if (unlikely(hw->mac_type < e1000_82540)) + goto update_itr_done; + + if (packets == 0) + goto update_itr_done; + + switch (itr_setting) { + case lowest_latency: + /* jumbo frames get bulk treatment*/ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* jumbo frames need bulk latency setting */ + if (bytes/packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes/packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes/packets > 2000) + retval = bulk_latency; + else if (packets <= 2 && bytes < 512) + retval = lowest_latency; + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + +update_itr_done: + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 current_itr; + u32 new_itr = adapter->itr; + + if (unlikely(hw->mac_type < e1000_82540)) + return; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (unlikely(adapter->link_speed != SPEED_1000)) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter, adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter, adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + switch (current_itr) { + /* counts and packets in update_itr are dependent on these numbers */ + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : + new_itr; + adapter->itr = new_itr; + ew32(ITR, 1000000000 / (new_itr * 256)); + } +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, tucse, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + + if (skb_is_gso(skb)) { + int err; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, + iph->daddr, 0, + IPPROTO_TCP, + 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + tucse = 0; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse); + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (++i == tx_ring->count) i = 0; + tx_ring->next_to_use = i; + + return true; + } + return false; +} + +static bool e1000_tx_csum(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn(drv, "checksum_partial proto=%x!\n", + skb->protocol); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = + css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + if (unlikely(++i == tx_ring->count)) i = 0; + tx_ring->next_to_use = i; + + return true; +} + +#define E1000_MAX_TXD_PWR 12 +#define E1000_MAX_DATA_PER_TXD (1<hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_tx_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for Controller erratum -- + * descriptor for non-tso packet in a linear SKB that follows a + * tso gets written back prematurely before the data is fully + * DMA'd to the controller + */ + if (!skb->data_len && tx_ring->last_tx_tso && + !skb_is_gso(skb)) { + tx_ring->last_tx_tso = false; + size -= 4; + } + + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && !nr_frags && size == len && size > 8)) + size -= 4; + /* work-around for errata 10 and it applies + * to all controllers in PCI-X mode + * The fix is to make sure that the first descriptor of a + * packet is smaller than 2048 - 16 - 16 (or 2016) bytes + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (size > 2015) && count == 0)) + size = 2015; + + /* Workaround for potential 82544 hang in PCI-X. Avoid + * terminating buffers within evenly-aligned dwords. + */ + if (unlikely(adapter->pcix_82544 && + !((unsigned long)(skb->data + offset + size - 1) & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + /* set time_stamp *before* dma to help avoid a possible race */ + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = false; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + if (len) { + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = skb_frag_size(frag); + offset = 0; + + while (len) { + unsigned long bufend; + i++; + if (unlikely(i == tx_ring->count)) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + /* Workaround for premature desc write-backs + * in TSO mode. Append 4-byte sentinel desc + */ + if (unlikely(mss && f == (nr_frags-1) && + size == len && size > 8)) + size -= 4; + /* Workaround for potential 82544 hang in PCI-X. + * Avoid terminating buffers within evenly-aligned + * dwords. + */ + bufend = (unsigned long) + page_to_phys(skb_frag_page(frag)); + bufend += offset + size - 1; + if (unlikely(adapter->pcix_82544 && + !(bufend & 4) && + size > 4)) + size -= 4; + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->mapped_as_page = true; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + buffer_info->next_to_watch = i; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ?: 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "TX DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i==0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring, int tx_flags, + int count) +{ + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_tx_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (likely(tx_flags & E1000_TX_FLAGS_TSO)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (likely(tx_flags & E1000_TX_FLAGS_IPV4)) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + i = tx_ring->next_to_use; + + while (count--) { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = + cpu_to_le32(txd_lower | buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + if (unlikely(++i == tx_ring->count)) i = 0; + } + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; +} + +/* 82547 workaround to avoid controller hang in half-duplex environment. + * The workaround is to avoid queuing a large packet that would span + * the internal Tx FIFO ring boundary by notifying the stack to resend + * the packet at a later time. This gives the Tx FIFO an opportunity to + * flush all packets. When that occurs, we reset the Tx FIFO pointers + * to the beginning of the Tx FIFO. + */ + +#define E1000_FIFO_HDR 0x10 +#define E1000_82547_PAD_LEN 0x3E0 + +static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head; + u32 skb_fifo_len = skb->len + E1000_FIFO_HDR; + + skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR); + + if (adapter->link_duplex != HALF_DUPLEX) + goto no_fifo_stall_required; + + if (atomic_read(&adapter->tx_fifo_stall)) + return 1; + + if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) { + atomic_set(&adapter->tx_fifo_stall, 1); + return 1; + } + +no_fifo_stall_required: + adapter->tx_fifo_head += skb_fifo_len; + if (adapter->tx_fifo_head >= adapter->tx_fifo_size) + adapter->tx_fifo_head -= adapter->tx_fifo_size; + return 0; +} + +static int __e1000_maybe_stop_tx(struct net_device *netdev, int size) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + + netif_stop_queue(netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (likely(E1000_DESC_UNUSED(tx_ring) < size)) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct net_device *netdev, + struct e1000_tx_ring *tx_ring, int size) +{ + if (likely(E1000_DESC_UNUSED(tx_ring) >= size)) + return 0; + return __e1000_maybe_stop_tx(netdev, size); +} + +#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 ) +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_ring *tx_ring; + unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; + unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + __be16 protocol = vlan_get_protocol(skb); + + /* This goes back to the question of how to logically map a Tx queue + * to a flow. Right now, performance is impacted slightly negatively + * if using multiple Tx queues. If the stack breaks away from a + * single qdisc implementation, we can look at this again. + */ + tx_ring = adapter->tx_ring; + + /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (eth_skb_pad(skb)) + return NETDEV_TX_OK; + + mss = skb_shinfo(skb)->gso_size; + /* The controller does a simple calculation to + * make sure there is enough room in the FIFO before + * initiating the DMA for each buffer. The calc is: + * 4 = ceil(buffer len/mss). To make sure we don't + * overrun the FIFO, adjust the max buffer len if mss + * drops. + */ + if (mss) { + u8 hdr_len; + max_per_txd = min(mss << 2, max_per_txd); + max_txd_pwr = fls(max_per_txd) - 1; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + if (skb->data_len && hdr_len == len) { + switch (hw->mac_type) { + unsigned int pull_size; + case e1000_82544: + /* Make sure we have room to chop off 4 bytes, + * and that the end alignment will work out to + * this hardware's requirements + * NOTE: this is a TSO only workaround + * if end byte alignment not correct move us + * into the next dword + */ + if ((unsigned long)(skb_tail_pointer(skb) - 1) + & 4) + break; + /* fall through */ + pull_size = min((unsigned int)4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err(drv, "__pskb_pull_tail " + "failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + break; + default: + /* do nothing */ + break; + } + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + /* Controller Erratum workaround */ + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) + count++; + + count += TXD_USE_COUNT(len, max_txd_pwr); + + if (adapter->pcix_82544) + count++; + + /* work-around for errata 10 and it applies to all controllers + * in PCI-X mode, so add one more descriptor to the count + */ + if (unlikely((hw->bus_type == e1000_bus_type_pcix) && + (len > 2015))) + count++; + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), + max_txd_pwr); + if (adapter->pcix_82544) + count += nr_frags; + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) + return NETDEV_TX_BUSY; + + if (unlikely((hw->mac_type == e1000_82547) && + (e1000_82547_fifo_workaround(adapter, skb)))) { + netif_stop_queue(netdev); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->fifo_stall_task, 1); + return NETDEV_TX_BUSY; + } + + if (skb_vlan_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(adapter, tx_ring, skb, protocol); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (likely(tso)) { + if (likely(hw->mac_type != e1000_82544)) + tx_ring->last_tx_tso = true; + tx_flags |= E1000_TX_FLAGS_TSO; + } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) + tx_flags |= E1000_TX_FLAGS_CSUM; + + if (protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd, + nr_frags, mss); + + if (count) { + netdev_sent_queue(netdev, skb->len); + skb_tx_timestamp(skb); + + e1000_tx_queue(adapter, tx_ring, tx_flags, count); + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); + + if (!skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { + writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt); + /* we need this if more than one processor can write to + * our tail at a time, it synchronizes IO on IA64/Altix + * systems + */ + mmiowb(); + } + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +#define NUM_REGS 38 /* 1 based count */ +static void e1000_regdump(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 regs[NUM_REGS]; + u32 *regs_buff = regs; + int i = 0; + + static const char * const reg_name[] = { + "CTRL", "STATUS", + "RCTL", "RDLEN", "RDH", "RDT", "RDTR", + "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT", + "TIDV", "TXDCTL", "TADV", "TARC0", + "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1", + "TXDCTL1", "TARC1", + "CTRL_EXT", "ERT", "RDBAL", "RDBAH", + "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC", + "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC" + }; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN); + regs_buff[4] = er32(RDH); + regs_buff[5] = er32(RDT); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDBAL); + regs_buff[9] = er32(TDBAH); + regs_buff[10] = er32(TDLEN); + regs_buff[11] = er32(TDH); + regs_buff[12] = er32(TDT); + regs_buff[13] = er32(TIDV); + regs_buff[14] = er32(TXDCTL); + regs_buff[15] = er32(TADV); + regs_buff[16] = er32(TARC0); + + regs_buff[17] = er32(TDBAL1); + regs_buff[18] = er32(TDBAH1); + regs_buff[19] = er32(TDLEN1); + regs_buff[20] = er32(TDH1); + regs_buff[21] = er32(TDT1); + regs_buff[22] = er32(TXDCTL1); + regs_buff[23] = er32(TARC1); + regs_buff[24] = er32(CTRL_EXT); + regs_buff[25] = er32(ERT); + regs_buff[26] = er32(RDBAL0); + regs_buff[27] = er32(RDBAH0); + regs_buff[28] = er32(TDFH); + regs_buff[29] = er32(TDFT); + regs_buff[30] = er32(TDFHS); + regs_buff[31] = er32(TDFTS); + regs_buff[32] = er32(TDFPC); + regs_buff[33] = er32(RDFH); + regs_buff[34] = er32(RDFT); + regs_buff[35] = er32(RDFHS); + regs_buff[36] = er32(RDFTS); + regs_buff[37] = er32(RDFPC); + + pr_info("Register dump\n"); + for (i = 0; i < NUM_REGS; i++) + pr_info("%-15s %08x\n", reg_name[i], regs_buff[i]); +} + +/* + * e1000_dump: Print registers, tx ring and rx ring + */ +static void e1000_dump(struct e1000_adapter *adapter) +{ + /* this code doesn't handle multiple rings */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + + if (!netif_msg_hw(adapter)) + return; + + /* Print Registers */ + e1000_regdump(adapter); + + /* transmit dump */ + pr_info("TX Desc ring0 dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestmp bi->skb\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestmp bi->skb\n"); + + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct e1000_tx_buffer *buffer_info = &tx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)tx_desc; + const char *type; + + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + type = "NTC/U"; + else if (i == tx_ring->next_to_use) + type = "NTU"; + else if (i == tx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p %s\n", + ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i, + le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->length, + buffer_info->next_to_watch, + (u64)buffer_info->time_stamp, buffer_info->skb, type); + } + +rx_ring_summary: + /* receive dump */ + pr_info("\nRX Desc ring dump\n"); + + /* Legacy Receive Descriptor Format + * + * +-----------------------------------------------------+ + * | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * | VLAN Tag | Errors | Status 0 | Packet csum | Length | + * +-----------------------------------------------------+ + * 63 48 47 40 39 32 31 16 15 0 + */ + pr_info("R[desc] [address 63:0 ] [vl er S cks ln] [bi->dma ] [bi->skb]\n"); + + if (!netif_msg_rx_status(adapter)) + goto exit; + + for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) { + struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); + struct e1000_rx_buffer *buffer_info = &rx_ring->buffer_info[i]; + struct my_u { __le64 a; __le64 b; }; + struct my_u *u = (struct my_u *)rx_desc; + const char *type; + + if (i == rx_ring->next_to_use) + type = "NTU"; + else if (i == rx_ring->next_to_clean) + type = "NTC"; + else + type = ""; + + pr_info("R[0x%03X] %016llX %016llX %016llX %p %s\n", + i, le64_to_cpu(u->a), le64_to_cpu(u->b), + (u64)buffer_info->dma, buffer_info->rxbuf.data, type); + } /* for */ + + /* dump the descriptor caches */ + /* rx */ + pr_info("Rx descriptor cache in 64bit format\n"); + for (i = 0x6000; i <= 0x63FF ; i += 0x10) { + pr_info("R%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } + /* tx */ + pr_info("Tx descriptor cache in 64bit format\n"); + for (i = 0x7000; i <= 0x73FF ; i += 0x10) { + pr_info("T%04X: %08X|%08X %08X|%08X\n", + i, + readl(adapter->hw.hw_addr + i+4), + readl(adapter->hw.hw_addr + i), + readl(adapter->hw.hw_addr + i+12), + readl(adapter->hw.hw_addr + i+8)); + } +exit: + return; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void e1000_tx_timeout(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = + container_of(work, struct e1000_adapter, reset_task); + + e_err(drv, "Reset adapter\n"); + e1000_reinit_locked(adapter); +} + +/** + * e1000_get_stats - Get System Network Statistics + * @netdev: network interface device structure + * + * Returns the address of the device statistics structure. + * The statistics are actually updated from the watchdog. + **/ +static struct net_device_stats *e1000_get_stats(struct net_device *netdev) +{ + /* only return the current stats */ + return &netdev->stats; +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE; + + if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || + (max_frame > MAX_JUMBO_FRAME_SIZE)) { + e_err(probe, "Invalid MTU setting\n"); + return -EINVAL; + } + + /* Adapter-specific max frame size limits. */ + switch (hw->mac_type) { + case e1000_undefined ... e1000_82542_rev2_1: + if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) { + e_err(probe, "Jumbo Frames not supported.\n"); + return -EINVAL; + } + break; + default: + /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */ + break; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->flags)) + msleep(1); + /* e1000_down has a dependency on max_frame_size */ + hw->max_frame_size = max_frame; + if (netif_running(netdev)) { + /* prevent buffers from being reallocated */ + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers; + e1000_down(adapter); + } + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * however with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= E1000_RXBUFFER_2048) + adapter->rx_buffer_len = E1000_RXBUFFER_2048; + else +#if (PAGE_SIZE >= E1000_RXBUFFER_16384) + adapter->rx_buffer_len = E1000_RXBUFFER_16384; +#elif (PAGE_SIZE >= E1000_RXBUFFER_4096) + adapter->rx_buffer_len = PAGE_SIZE; +#endif + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (!hw->tbi_compatibility_on && + ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) || + (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))) + adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; + + pr_info("%s changing MTU from %d to %d\n", + netdev->name, netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + if (netif_running(netdev)) + e1000_up(adapter); + else + e1000_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->flags); + + return 0; +} + +/** + * e1000_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +void e1000_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + unsigned long flags; + u16 phy_tmp; + +#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + spin_lock_irqsave(&adapter->stats_lock, flags); + + /* these counters are modified from e1000_tbi_adjust_stats, + * called from the interrupt context, so they must only + * be written while holding adapter->stats_lock + */ + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorcl += er32(GORCL); + adapter->stats.gorch += er32(GORCH); + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.prc64 += er32(PRC64); + adapter->stats.prc127 += er32(PRC127); + adapter->stats.prc255 += er32(PRC255); + adapter->stats.prc511 += er32(PRC511); + adapter->stats.prc1023 += er32(PRC1023); + adapter->stats.prc1522 += er32(PRC1522); + + adapter->stats.symerrs += er32(SYMERRS); + adapter->stats.mpc += er32(MPC); + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + adapter->stats.sec += er32(SEC); + adapter->stats.rlec += er32(RLEC); + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.fcruc += er32(FCRUC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotcl += er32(GOTCL); + adapter->stats.gotch += er32(GOTCH); + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + adapter->stats.rfc += er32(RFC); + adapter->stats.rjc += er32(RJC); + adapter->stats.torl += er32(TORL); + adapter->stats.torh += er32(TORH); + adapter->stats.totl += er32(TOTL); + adapter->stats.toth += er32(TOTH); + adapter->stats.tpr += er32(TPR); + + adapter->stats.ptc64 += er32(PTC64); + adapter->stats.ptc127 += er32(PTC127); + adapter->stats.ptc255 += er32(PTC255); + adapter->stats.ptc511 += er32(PTC511); + adapter->stats.ptc1023 += er32(PTC1023); + adapter->stats.ptc1522 += er32(PTC1522); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->tx_packet_delta; + hw->collision_delta = er32(COLC); + adapter->stats.colc += hw->collision_delta; + + if (hw->mac_type >= e1000_82543) { + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.tncrs += er32(TNCRS); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + } + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + + adapter->stats.cexterr; + adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; + netdev->stats.rx_length_errors = adapter->stats.rlerrc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_errors = adapter->stats.txerrc; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + if (hw->bad_tx_carr_stats_fd && + adapter->link_duplex == FULL_DUPLEX) { + netdev->stats.tx_carrier_errors = 0; + adapter->stats.tncrs = 0; + } + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Phy Stats */ + if (hw->media_type == e1000_media_type_copper) { + if ((adapter->link_speed == SPEED_1000) && + (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) { + phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK; + adapter->phy_stats.idle_errors += phy_tmp; + } + + if ((hw->mac_type <= e1000_82546) && + (hw->phy_type == e1000_phy_m88) && + !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp)) + adapter->phy_stats.receive_errors += phy_tmp; + } + + /* Management Stats */ + if (hw->has_smbus) { + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + } + + spin_unlock_irqrestore(&adapter->stats_lock, flags); +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (unlikely((!icr))) + return IRQ_NONE; /* Not our interrupt */ + + /* we might have caused the interrupt, but the above + * read cleared it, and just in case the driver is + * down there is nothing to do so return handled + */ + if (unlikely(test_bit(__E1000_DOWN, &adapter->flags))) + return IRQ_HANDLED; + + if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { + hw->get_link_status = 1; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + schedule_delayed_work(&adapter->watchdog_task, 1); + } + + /* disable interrupts, without the synchronize_irq bit */ + ew32(IMC, ~0); + E1000_WRITE_FLUSH(); + + if (likely(napi_schedule_prep(&adapter->napi))) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } else { + /* this really should not happen! if it does it is basically a + * bug, but not a hard error, so enable ints and continue + */ + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return IRQ_HANDLED; +} + +/** + * e1000_clean - NAPI Rx polling callback + * @adapter: board private structure + **/ +static int e1000_clean(struct napi_struct *napi, int budget) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + int tx_clean_complete = 0, work_done = 0; + + tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]); + + adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); + + if (!tx_clean_complete) + work_done = budget; + + /* If budget not fully consumed, exit the polling mode */ + if (work_done < budget) { + if (likely(adapter->itr_setting & 3)) + e1000_set_itr(adapter); + napi_complete_done(napi, work_done); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + } + + return work_done; +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @adapter: board private structure + **/ +static bool e1000_clean_tx_irq(struct e1000_adapter *adapter, + struct e1000_tx_ring *tx_ring) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_tx_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes=0, total_tx_packets=0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + dma_rmb(); /* read buffer_info after eop_desc */ + for ( ; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + + } + e1000_unmap_and_free_tx_resource(adapter, buffer_info); + tx_desc->upper.data = 0; + + if (unlikely(++i == tx_ring->count)) i = 0; + } + + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + /* Synchronize with E1000_DESC_UNUSED called from e1000_xmit_frame, + * which will reuse the cleaned buffers. + */ + smp_store_release(&tx_ring->next_to_clean, i); + + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + +#define TX_WAKE_THRESHOLD 32 + if (unlikely(count && netif_carrier_ok(netdev) && + E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->flags))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[eop].time_stamp && + time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) { + + /* detected Tx unit hang */ + e_err(drv, "Detected Tx Unit Hang\n" + " Tx Queue <%lu>\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n", + (unsigned long)(tx_ring - adapter->tx_ring), + readl(hw->hw_addr + tx_ring->tdh), + readl(hw->hw_addr + tx_ring->tdt), + tx_ring->next_to_use, + tx_ring->next_to_clean, + tx_ring->buffer_info[eop].time_stamp, + eop, + jiffies, + eop_desc->upper.fields.status); + e1000_dump(adapter); + netif_stop_queue(netdev); + } + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + netdev->stats.tx_bytes += total_tx_bytes; + netdev->stats.tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_rx_checksum - Receive Checksum Offload for 82543 + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + u32 csum, struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* 82543 or newer only */ + if (unlikely(hw->mac_type < e1000_82543)) return; + /* Ignore Checksum bit is set */ + if (unlikely(status & E1000_RXD_STAT_IXSM)) return; + /* TCP/UDP checksum error bit is set */ + if (unlikely(errors & E1000_RXD_ERR_TCPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + /* TCP/UDP Checksum has not been calculated */ + if (!(status & E1000_RXD_STAT_TCPCS)) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + if (likely(status & E1000_RXD_STAT_TCPCS)) { + /* TCP checksum is good */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + adapter->hw_csum_good++; +} + +/** + * e1000_consume_page - helper function for jumbo Rx path + **/ +static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->rxbuf.page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_receive_skb - helper function to handle rx indications + * @adapter: board private structure + * @status: descriptor status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + */ +static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status, + __le16 vlan, struct sk_buff *skb) +{ + skb->protocol = eth_type_trans(skb, adapter->netdev); + + if (status & E1000_RXD_STAT_VP) { + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_tbi_adjust_stats + * @hw: Struct containing variables accessed by shared code + * @frame_len: The length of the frame in question + * @mac_addr: The Ethernet destination address of the frame in question + * + * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT + */ +static void e1000_tbi_adjust_stats(struct e1000_hw *hw, + struct e1000_hw_stats *stats, + u32 frame_len, const u8 *mac_addr) +{ + u64 carry_bit; + + /* First adjust the frame length. */ + frame_len--; + /* We need to adjust the statistics counters, since the hardware + * counters overcount this packet as a CRC error and undercount + * the packet as a good packet + */ + /* This packet should not be counted as a CRC error. */ + stats->crcerrs--; + /* This packet does count as a Good Packet Received. */ + stats->gprc++; + + /* Adjust the Good Octets received counters */ + carry_bit = 0x80000000 & stats->gorcl; + stats->gorcl += frame_len; + /* If the high bit of Gorcl (the low 32 bits of the Good Octets + * Received Count) was one before the addition, + * AND it is zero after, then we lost the carry out, + * need to add one to Gorch (Good Octets Received Count High). + * This could be simplified if all environments supported + * 64-bit integers. + */ + if (carry_bit && ((stats->gorcl & 0x80000000) == 0)) + stats->gorch++; + /* Is this a broadcast or multicast? Check broadcast first, + * since the test for a multicast frame will test positive on + * a broadcast frame. + */ + if (is_broadcast_ether_addr(mac_addr)) + stats->bprc++; + else if (is_multicast_ether_addr(mac_addr)) + stats->mprc++; + + if (frame_len == hw->max_frame_size) { + /* In this case, the hardware has overcounted the number of + * oversize frames. + */ + if (stats->roc > 0) + stats->roc--; + } + + /* Adjust the bin counters when the extra byte put the frame in the + * wrong bin. Remember that the frame_len was adjusted above. + */ + if (frame_len == 64) { + stats->prc64++; + stats->prc127--; + } else if (frame_len == 127) { + stats->prc127++; + stats->prc255--; + } else if (frame_len == 255) { + stats->prc255++; + stats->prc511--; + } else if (frame_len == 511) { + stats->prc511++; + stats->prc1023--; + } else if (frame_len == 1023) { + stats->prc1023++; + stats->prc1522--; + } else if (frame_len == 1522) { + stats->prc1522++; + } +} + +static bool e1000_tbi_should_accept(struct e1000_adapter *adapter, + u8 status, u8 errors, + u32 length, const u8 *data) +{ + struct e1000_hw *hw = &adapter->hw; + u8 last_byte = *(data + length - 1); + + if (TBI_ACCEPT(hw, status, errors, length, last_byte)) { + unsigned long irq_flags; + + spin_lock_irqsave(&adapter->stats_lock, irq_flags); + e1000_tbi_adjust_stats(hw, &adapter->stats, length, data); + spin_unlock_irqrestore(&adapter->stats_lock, irq_flags); + + return true; + } + + return false; +} + +static struct sk_buff *e1000_alloc_rx_skb(struct e1000_adapter *adapter, + unsigned int bufsz) +{ + struct sk_buff *skb = napi_alloc_skb(&adapter->napi, bufsz); + + if (unlikely(!skb)) + adapter->alloc_rx_buff_failed++; + return skb; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + */ +static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + + if (++i == rx_ring->count) i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((status & E1000_RXD_STAT_EOP) && + (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) { + u8 *mapped = page_address(buffer_info->rxbuf.page); + + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, mapped)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + /* an error means any chain goes out the window + * too + */ + if (rx_ring->rx_skb_top) + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } + } + +#define rxtop rx_ring->rx_skb_top +process_skb: + if (!(status & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = napi_get_frags(&adapter->napi); + if (!rxtop) + break; + + skb_fill_page_desc(rxtop, 0, + buffer_info->rxbuf.page, + 0, length); + } else { + /* this is the middle of a chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + skb_fill_page_desc(rxtop, + skb_shinfo(rxtop)->nr_frags, + buffer_info->rxbuf.page, 0, length); + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + struct page *p; + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + p = buffer_info->rxbuf.page; + if (length <= copybreak) { + u8 *vaddr; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + length -= 4; + skb = e1000_alloc_rx_skb(adapter, + length); + if (!skb) + break; + + vaddr = kmap_atomic(p); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->rxbuf.page + */ + skb_put(skb, length); + e1000_rx_checksum(adapter, + status | rx_desc->errors << 24, + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_receive_skb(adapter, status, + rx_desc->special, skb); + goto next_desc; + } else { + skb = napi_get_frags(&adapter->napi); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + skb_fill_page_desc(skb, 0, p, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload XXX recompute due to CRC strip? */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + total_rx_bytes += (skb->len - 4); /* don't count FCS */ + if (likely(!(netdev->features & NETIF_F_RXFCS))) + pskb_trim(skb, skb->len - 4); + total_rx_packets++; + + if (status & E1000_RXD_STAT_VP) { + __le16 vlan = rx_desc->special; + u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK; + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); + } + + napi_gro_frags(&adapter->napi); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/* this should improve performance for small packets with large amounts + * of reassembly being done in the stack + */ +static struct sk_buff *e1000_copybreak(struct e1000_adapter *adapter, + struct e1000_rx_buffer *buffer_info, + u32 length, const void *data) +{ + struct sk_buff *skb; + + if (length > copybreak) + return NULL; + + skb = e1000_alloc_rx_skb(adapter, length); + if (!skb) + return NULL; + + dma_sync_single_for_cpu(&adapter->pdev->dev, buffer_info->dma, + length, DMA_FROM_DEVICE); + + memcpy(skb_put(skb, length), data, length); + + return skb; +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * @rx_ring: ring to clean + * @work_done: amount of napi work completed this call + * @work_to_do: max amount of work allowed for this call to do + */ +static bool e1000_clean_rx_irq(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int *work_done, int work_to_do) +{ + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc, *next_rxd; + struct e1000_rx_buffer *buffer_info, *next_buffer; + u32 length; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes=0, total_rx_packets=0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC(*rx_ring, i); + buffer_info = &rx_ring->buffer_info[i]; + + while (rx_desc->status & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + u8 *data; + u8 status; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + status = rx_desc->status; + length = le16_to_cpu(rx_desc->length); + + data = buffer_info->rxbuf.data; + prefetch(data); + skb = e1000_copybreak(adapter, buffer_info, length, data); + if (!skb) { + unsigned int frag_len = e1000_frag_len(adapter); + + skb = build_skb(data - E1000_HEADROOM, frag_len); + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + skb_reserve(skb, E1000_HEADROOM); + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + buffer_info->rxbuf.data = NULL; + } + + if (++i == rx_ring->count) i = 0; + next_rxd = E1000_RX_DESC(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + + /* !EOP means multiple descriptors were used to store a single + * packet, if thats the case we need to toss it. In fact, we + * to toss every packet with the EOP bit clear and the next + * frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(status & E1000_RXD_STAT_EOP))) + adapter->discarding = true; + + if (adapter->discarding) { + /* All receives must fit into a single buffer */ + netdev_dbg(netdev, "Receive packet consumed multiple buffers\n"); + dev_kfree_skb(skb); + if (status & E1000_RXD_STAT_EOP) + adapter->discarding = false; + goto next_desc; + } + + if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) { + if (e1000_tbi_should_accept(adapter, status, + rx_desc->errors, + length, data)) { + length--; + } else if (netdev->features & NETIF_F_RXALL) { + goto process_skb; + } else { + dev_kfree_skb(skb); + goto next_desc; + } + } + +process_skb: + total_rx_bytes += (length - 4); /* don't count FCS */ + total_rx_packets++; + + if (likely(!(netdev->features & NETIF_F_RXFCS))) + /* adjust length to remove Ethernet CRC, this must be + * done after the TBI_ACCEPT workaround above + */ + length -= 4; + + if (buffer_info->rxbuf.data == NULL) + skb_put(skb, length); + else /* copybreak skb */ + skb_trim(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, + (u32)(status) | + ((u32)(rx_desc->errors) << 24), + le16_to_cpu(rx_desc->csum), skb); + + e1000_receive_skb(adapter, status, rx_desc->special, skb); + +next_desc: + rx_desc->status = 0; + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + } + rx_ring->next_to_clean = i; + + cleaned_count = E1000_DESC_UNUSED(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count); + + adapter->total_rx_packets += total_rx_packets; + adapter->total_rx_bytes += total_rx_bytes; + netdev->stats.rx_bytes += total_rx_bytes; + netdev->stats.rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @adapter: address of board private structure + * @rx_ring: pointer to receive ring structure + * @cleaned_count: number of buffers to allocate this pass + **/ +static void +e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, int cleaned_count) +{ + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + /* allocate a new page if necessary */ + if (!buffer_info->rxbuf.page) { + buffer_info->rxbuf.page = alloc_page(GFP_ATOMIC); + if (unlikely(!buffer_info->rxbuf.page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->rxbuf.page, 0, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + put_page(buffer_info->rxbuf.page); + buffer_info->rxbuf.page = NULL; + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + } + + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, adapter->hw.hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended + * @adapter: address of board private structure + **/ +static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, + struct e1000_rx_ring *rx_ring, + int cleaned_count) +{ + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + struct e1000_rx_desc *rx_desc; + struct e1000_rx_buffer *buffer_info; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + void *data; + + if (buffer_info->rxbuf.data) + goto skip; + + data = e1000_alloc_frag(adapter); + if (!data) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + void *olddata = data; + e_err(rx_err, "skb align check failed: %u bytes at " + "%p\n", bufsz, data); + /* Try again, without freeing the previous */ + data = e1000_alloc_frag(adapter); + /* Failed allocation, critical failure */ + if (!data) { + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + if (!e1000_check_64k_bound(adapter, data, bufsz)) { + /* give up */ + skb_free_frag(data); + skb_free_frag(olddata); + adapter->alloc_rx_buff_failed++; + break; + } + + /* Use new allocation */ + skb_free_frag(olddata); + } + buffer_info->dma = dma_map_single(&pdev->dev, + data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + skb_free_frag(data); + buffer_info->dma = 0; + adapter->alloc_rx_buff_failed++; + break; + } + + /* XXX if it was allocated cleanly it will never map to a + * boundary crossing + */ + + /* Fix for errata 23, can't cross 64kB boundary */ + if (!e1000_check_64k_bound(adapter, + (void *)(unsigned long)buffer_info->dma, + adapter->rx_buffer_len)) { + e_err(rx_err, "dma align check failed: %u bytes at " + "%p\n", adapter->rx_buffer_len, + (void *)(unsigned long)buffer_info->dma); + + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + + skb_free_frag(data); + buffer_info->rxbuf.data = NULL; + buffer_info->dma = 0; + + adapter->alloc_rx_buff_failed++; + break; + } + buffer_info->rxbuf.data = data; + skip: + rx_desc = E1000_RX_DESC(*rx_ring, i); + rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + writel(i, hw->hw_addr + rx_ring->rdt); + } +} + +/** + * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers. + * @adapter: + **/ +static void e1000_smartspeed(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u16 phy_status; + u16 phy_ctrl; + + if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg || + !(hw->autoneg_advertised & ADVERTISE_1000_FULL)) + return; + + if (adapter->smartspeed == 0) { + /* If Master/Slave config fault is asserted twice, + * we assume back-to-back + */ + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status); + if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return; + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + if (phy_ctrl & CR_1000T_MS_ENABLE) { + phy_ctrl &= ~CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, + phy_ctrl); + adapter->smartspeed++; + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, + &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, + phy_ctrl); + } + } + return; + } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) { + /* If still no link, perhaps using 2/3 pair cable */ + e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl); + phy_ctrl |= CR_1000T_MS_ENABLE; + e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl); + if (!e1000_phy_setup_autoneg(hw) && + !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) { + phy_ctrl |= (MII_CR_AUTO_NEG_EN | + MII_CR_RESTART_AUTO_NEG); + e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl); + } + } + /* Restart process after E1000_SMARTSPEED_MAX iterations */ + if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX) + adapter->smartspeed = 0; +} + +/** + * e1000_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * e1000_mii_ioctl - + * @netdev: + * @ifreq: + * @cmd: + **/ +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct mii_ioctl_data *data = if_mii(ifr); + int retval; + u16 mii_reg; + unsigned long flags; + + if (hw->media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + break; + case SIOCGMIIREG: + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_read_phy_reg(hw, data->reg_num & 0x1F, + &data->val_out)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + break; + case SIOCSMIIREG: + if (data->reg_num & ~(0x1F)) + return -EFAULT; + mii_reg = data->val_in; + spin_lock_irqsave(&adapter->stats_lock, flags); + if (e1000_write_phy_reg(hw, data->reg_num, + mii_reg)) { + spin_unlock_irqrestore(&adapter->stats_lock, flags); + return -EIO; + } + spin_unlock_irqrestore(&adapter->stats_lock, flags); + if (hw->media_type == e1000_media_type_copper) { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (mii_reg & MII_CR_AUTO_NEG_EN) { + hw->autoneg = 1; + hw->autoneg_advertised = 0x2F; + } else { + u32 speed; + if (mii_reg & 0x40) + speed = SPEED_1000; + else if (mii_reg & 0x2000) + speed = SPEED_100; + else + speed = SPEED_10; + retval = e1000_set_spd_dplx( + adapter, speed, + ((mii_reg & 0x100) + ? DUPLEX_FULL : + DUPLEX_HALF)); + if (retval) + return retval; + } + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + case M88E1000_PHY_SPEC_CTRL: + case M88E1000_EXT_PHY_SPEC_CTRL: + if (e1000_phy_reset(hw)) + return -EIO; + break; + } + } else { + switch (data->reg_num) { + case PHY_CTRL: + if (mii_reg & MII_CR_POWER_DOWN) + break; + if (netif_running(adapter->netdev)) + e1000_reinit_locked(adapter); + else + e1000_reset(adapter); + break; + } + } + break; + default: + return -EOPNOTSUPP; + } + return E1000_SUCCESS; +} + +void e1000_pci_set_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + int ret_val = pci_set_mwi(adapter->pdev); + + if (ret_val) + e_err(probe, "Error in setting MWI\n"); +} + +void e1000_pci_clear_mwi(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + + pci_clear_mwi(adapter->pdev); +} + +int e1000_pcix_get_mmrbc(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->back; + return pcix_get_mmrbc(adapter->pdev); +} + +void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc) +{ + struct e1000_adapter *adapter = hw->back; + pcix_set_mmrbc(adapter->pdev, mmrbc); +} + +void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value) +{ + outl(value, port); +} + +static bool e1000_vlan_used(struct e1000_adapter *adapter) +{ + u16 vid; + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + return true; + return false; +} + +static void __e1000_vlan_mode(struct e1000_adapter *adapter, + netdev_features_t features) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + ctrl = er32(CTRL); + if (features & NETIF_F_HW_VLAN_CTAG_RX) { + /* enable VLAN tag insert/strip */ + ctrl |= E1000_CTRL_VME; + } else { + /* disable VLAN tag insert/strip */ + ctrl &= ~E1000_CTRL_VME; + } + ew32(CTRL, ctrl); +} +static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter, + bool filter_on) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, adapter->netdev->features); + if (filter_on) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_CFIEN; + if (!(adapter->netdev->flags & IFF_PROMISC)) + rctl |= E1000_RCTL_VFE; + ew32(RCTL, rctl); + e1000_update_mng_vlan(adapter); + } else { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~E1000_RCTL_VFE; + ew32(RCTL, rctl); + } + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static void e1000_vlan_mode(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + + __e1000_vlan_mode(adapter, features); + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((hw->mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && + (vid == adapter->mng_vlan_id)) + return 0; + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, true); + + /* add VID to filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta |= (1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_disable(adapter); + if (!test_bit(__E1000_DOWN, &adapter->flags)) + e1000_irq_enable(adapter); + + /* remove VID from filter table */ + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + e1000_write_vfta(hw, index, vfta); + + clear_bit(vid, adapter->active_vlans); + + if (!e1000_vlan_used(adapter)) + e1000_vlan_filter_on_off(adapter, false); + + return 0; +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + if (!e1000_vlan_used(adapter)) + return; + + e1000_vlan_filter_on_off(adapter, true); + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_hw *hw = &adapter->hw; + + hw->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((hw->media_type == e1000_media_type_fiber) && + spd != SPEED_1000 && + dplx != DUPLEX_FULL) + goto err_inval; + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_10_half; + break; + case SPEED_10 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_10_full; + break; + case SPEED_100 + DUPLEX_HALF: + hw->forced_speed_duplex = e1000_100_half; + break; + case SPEED_100 + DUPLEX_FULL: + hw->forced_speed_duplex = e1000_100_full; + break; + case SPEED_1000 + DUPLEX_FULL: + hw->autoneg = 1; + hw->autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + hw->mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err(probe, "Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + u32 wufc = adapter->wol; +#ifdef CONFIG_PM + int retval = 0; +#endif + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->flags) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags)); + e1000_down(adapter); + } + +#ifdef CONFIG_PM + retval = pci_save_state(pdev); + if (retval) + return retval; +#endif + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000_set_rx_mode(netdev); + + rctl = er32(RCTL); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) + rctl |= E1000_RCTL_MPE; + + /* enable receives in the hardware */ + ew32(RCTL, rctl | E1000_RCTL_EN); + + if (hw->mac_type >= e1000_82540) { + ctrl = er32(CTRL); + /* advertise wake from D3Cold */ + #define E1000_CTRL_ADVD3WUC 0x00100000 + /* phy power management enable */ + #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 + ctrl |= E1000_CTRL_ADVD3WUC | + E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + } + + if (hw->media_type == e1000_media_type_fiber || + hw->media_type == e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + ew32(WUC, E1000_WUC_PME_EN); + ew32(WUFC, wufc); + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + e1000_release_manageability(adapter); + + *enable_wake = !!wufc; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->en_mng_pt) + *enable_wake = true; + + if (netif_running(netdev)) + e1000_free_irq(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + return 0; +} + +#ifdef CONFIG_PM +static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) +{ + int retval; + bool wake; + + retval = __e1000_shutdown(pdev, &wake); + if (retval) + return retval; + + if (wake) { + pci_prepare_to_sleep(pdev); + } else { + pci_wake_from_d3(pdev, false); + pci_set_power_state(pdev, PCI_D3hot); + } + + return 0; +} + +static int e1000_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot enable PCI device from suspend\n"); + return err; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + e1000_power_up_phy(adapter); + e1000_reset(adapter); + ew32(WUS, ~0); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) + e1000_up(adapter); + + netif_device_attach(netdev); + + return 0; +} +#endif + +static void e1000_shutdown(struct pci_dev *pdev) +{ + bool wake; + + __e1000_shutdown(pdev, &wake); + + if (system_state == SYSTEM_POWER_OFF) { + pci_wake_from_d3(pdev, wake); + pci_set_power_state(pdev, PCI_D3hot); + } +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + disable_irq(adapter->pdev->irq); + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000_down(adapter); + + if (!test_and_set_bit(__E1000_DISABLED, &adapter->flags)) + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int err; + + if (adapter->need_ioport) + err = pci_enable_device(pdev); + else + err = pci_enable_device_mem(pdev); + if (err) { + pr_err("Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* flush memory to make sure state is correct */ + smp_mb__before_atomic(); + clear_bit(__E1000_DISABLED, &adapter->flags); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000_reset(adapter); + ew32(WUS, ~0); + + return PCI_ERS_RESULT_RECOVERED; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability(adapter); + + if (netif_running(netdev)) { + if (e1000_up(adapter)) { + pr_info("can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); +} + +/* e1000_main.c */ diff --git a/addons/e1000/src/4.4.180/e1000_osdep.h b/addons/e1000/src/4.4.180/e1000_osdep.h new file mode 100644 index 00000000..33e7c45a --- /dev/null +++ b/addons/e1000/src/4.4.180/e1000_osdep.h @@ -0,0 +1,109 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + + +/* glue for the OS independent part of e1000 + * includes register access macros + */ + +#ifndef _E1000_OSDEP_H_ +#define _E1000_OSDEP_H_ + +#include + +#define CONFIG_RAM_BASE 0x60000 +#define GBE_CONFIG_OFFSET 0x0 + +#define GBE_CONFIG_RAM_BASE \ + ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) + +#define GBE_CONFIG_BASE_VIRT \ + ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) + +#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ + (iowrite16_rep(base + offset, data, count)) + +#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \ + (ioread16_rep(base + (offset << 1), data, count)) + +#define er32(reg) \ + (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg))) + +#define ew32(reg, value) \ + (writel((value), (hw->hw_addr + ((hw->mac_type >= e1000_82543) \ + ? E1000_##reg : E1000_82542_##reg)))) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \ + writel((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2)))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) ( \ + readl((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 2))) + +#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY +#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY + +#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \ + writew((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1)))) + +#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \ + readw((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + ((offset) << 1))) + +#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \ + writeb((value), ((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset)))) + +#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \ + readb((a)->hw_addr + \ + (((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \ + (offset))) + +#define E1000_WRITE_FLUSH() er32(STATUS) + +#define E1000_WRITE_ICH_FLASH_REG(a, reg, value) ( \ + writel((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG(a, reg) ( \ + readl((a)->flash_address + reg)) + +#define E1000_WRITE_ICH_FLASH_REG16(a, reg, value) ( \ + writew((value), ((a)->flash_address + reg))) + +#define E1000_READ_ICH_FLASH_REG16(a, reg) ( \ + readw((a)->flash_address + reg)) + +#endif /* _E1000_OSDEP_H_ */ diff --git a/addons/e1000/src/4.4.180/e1000_param.c b/addons/e1000/src/4.4.180/e1000_param.c new file mode 100644 index 00000000..c9cde352 --- /dev/null +++ b/addons/e1000/src/4.4.180/e1000_param.c @@ -0,0 +1,754 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2006 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ + +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ + +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(TxDescriptors, "Number of transmit descriptors"); + +/* Receive Descriptor Count + * + * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers + * Valid Range: 80-4096 for 82544 and newer + * + * Default Value: 256 + */ +E1000_PARAM(RxDescriptors, "Number of receive descriptors"); + +/* User Specified Speed Override + * + * Valid Range: 0, 10, 100, 1000 + * - 0 - auto-negotiate at all supported speeds + * - 10 - only link at 10 Mbps + * - 100 - only link at 100 Mbps + * - 1000 - only link at 1000 Mbps + * + * Default Value: 0 + */ +E1000_PARAM(Speed, "Speed setting"); + +/* User Specified Duplex Override + * + * Valid Range: 0-2 + * - 0 - auto-negotiate for duplex + * - 1 - only link at half duplex + * - 2 - only link at full duplex + * + * Default Value: 0 + */ +E1000_PARAM(Duplex, "Duplex setting"); + +/* Auto-negotiation Advertisement Override + * + * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber) + * + * The AutoNeg value is a bit mask describing which speed and duplex + * combinations should be advertised during auto-negotiation. + * The supported speed and duplex modes are listed below + * + * Bit 7 6 5 4 3 2 1 0 + * Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10 + * Duplex Full Full Half Full Half + * + * Default Value: 0x2F (copper); 0x20 (fiber) + */ +E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting"); +#define AUTONEG_ADV_DEFAULT 0x2F +#define AUTONEG_ADV_MASK 0x2F + +/* User Specified Flow Control Override + * + * Valid Range: 0-3 + * - 0 - No Flow Control + * - 1 - Rx only, respond to PAUSE frames but do not generate them + * - 2 - Tx only, generate PAUSE frames but ignore them on receive + * - 3 - Full Flow Control Support + * + * Default Value: Read flow control settings from the EEPROM + */ +E1000_PARAM(FlowControl, "Flow Control setting"); +#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL + +/* XsumRX - Receive Checksum Offload Enable/Disable + * + * Valid Range: 0, 1 + * - 0 - disables all checksum offload + * - 1 - enables receive IP/TCP/UDP checksum offload + * on 82543 and newer -based NICs + * + * Default Value: 1 + */ +E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload"); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define DEFAULT_RDTR 0 +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define DEFAULT_RADV 8 +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative) + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + struct { /* range_option info */ + int min; + int max; + } r; + struct { /* list_option info */ + int nr; + const struct e1000_opt_list { int i; char *str; } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + e_dev_info("%s Enabled\n", opt->name); + return 0; + case OPTION_DISABLED: + e_dev_info("%s Disabled\n", opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + e_dev_info("%s set to %i\n", opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + const struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + e_dev_info("%s\n", ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + e_dev_info("Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +static void e1000_check_fiber_options(struct e1000_adapter *adapter); +static void e1000_check_copper_options(struct e1000_adapter *adapter); + +/** + * e1000_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000_check_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + e_dev_warn("Warning: no configuration for board #%i " + "using defaults for all values\n", bd); + } + + { /* Transmit Descriptor Count */ + struct e1000_tx_ring *tx_ring = adapter->tx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_TXD), + .def = E1000_DEFAULT_TXD, + .arg = { .r = { + .min = E1000_MIN_TXD, + .max = mac_type < e1000_82544 ? E1000_MAX_TXD : E1000_MAX_82544_TXD + }} + }; + + if (num_TxDescriptors > bd) { + tx_ring->count = TxDescriptors[bd]; + e1000_validate_option(&tx_ring->count, &opt, adapter); + tx_ring->count = ALIGN(tx_ring->count, + REQ_TX_DESCRIPTOR_MULTIPLE); + } else { + tx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_tx_queues; i++) + tx_ring[i].count = tx_ring->count; + } + { /* Receive Descriptor Count */ + struct e1000_rx_ring *rx_ring = adapter->rx_ring; + int i; + e1000_mac_type mac_type = adapter->hw.mac_type; + + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Descriptors", + .err = "using default of " + __MODULE_STRING(E1000_DEFAULT_RXD), + .def = E1000_DEFAULT_RXD, + .arg = { .r = { + .min = E1000_MIN_RXD, + .max = mac_type < e1000_82544 ? E1000_MAX_RXD : + E1000_MAX_82544_RXD + }} + }; + + if (num_RxDescriptors > bd) { + rx_ring->count = RxDescriptors[bd]; + e1000_validate_option(&rx_ring->count, &opt, adapter); + rx_ring->count = ALIGN(rx_ring->count, + REQ_RX_DESCRIPTOR_MULTIPLE); + } else { + rx_ring->count = opt.def; + } + for (i = 0; i < adapter->num_rx_queues; i++) + rx_ring[i].count = rx_ring->count; + } + { /* Checksum Offload Enable/Disable */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "Checksum Offload", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_XsumRX > bd) { + unsigned int rx_csum = XsumRX[bd]; + e1000_validate_option(&rx_csum, &opt, adapter); + adapter->rx_csum = rx_csum; + } else { + adapter->rx_csum = opt.def; + } + } + { /* Flow Control */ + + static const struct e1000_opt_list fc_list[] = { + { E1000_FC_NONE, "Flow Control Disabled" }, + { E1000_FC_RX_PAUSE, "Flow Control Receive Only" }, + { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" }, + { E1000_FC_FULL, "Flow Control Enabled" }, + { E1000_FC_DEFAULT, "Flow Control Hardware Default" } + }; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Flow Control", + .err = "reading default settings from EEPROM", + .def = E1000_FC_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(fc_list), + .p = fc_list }} + }; + + if (num_FlowControl > bd) { + unsigned int fc = FlowControl[bd]; + e1000_validate_option(&fc, &opt, adapter); + adapter->hw.fc = adapter->hw.original_fc = fc; + } else { + adapter->hw.fc = adapter->hw.original_fc = opt.def; + } + } + { /* Transmit Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY }} + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + { /* Transmit Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY }} + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + { /* Receive Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY }} + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + { /* Receive Absolute Interrupt Delay */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY }} + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + { /* Interrupt Throttling Rate */ + opt = (struct e1000_option) { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR }} + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + switch (adapter->itr) { + case 0: + e_dev_info("%s turned off\n", opt.name); + break; + case 1: + e_dev_info("%s set to dynamic mode\n", + opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 3: + e_dev_info("%s set to dynamic conservative " + "mode\n", opt.name); + adapter->itr_setting = adapter->itr; + adapter->itr = 20000; + break; + case 4: + e_dev_info("%s set to simplified " + "(2000-8000) ints mode\n", opt.name); + adapter->itr_setting = adapter->itr; + break; + default: + e1000_validate_option(&adapter->itr, &opt, + adapter); + /* save the setting, because the dynamic bits + * change itr. + * clear the lower two bits because they are + * used as control + */ + adapter->itr_setting = adapter->itr & ~3; + break; + } + } else { + adapter->itr_setting = opt.def; + adapter->itr = 20000; + } + } + { /* Smart Power Down */ + opt = (struct e1000_option) { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + adapter->smart_power_down = spd; + } else { + adapter->smart_power_down = opt.def; + } + } + + switch (adapter->hw.media_type) { + case e1000_media_type_fiber: + case e1000_media_type_internal_serdes: + e1000_check_fiber_options(adapter); + break; + case e1000_media_type_copper: + e1000_check_copper_options(adapter); + break; + default: + BUG(); + } +} + +/** + * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version + * @adapter: board private structure + * + * Handles speed and duplex options on fiber adapters + **/ +static void e1000_check_fiber_options(struct e1000_adapter *adapter) +{ + int bd = adapter->bd_number; + if (num_Speed > bd) { + e_dev_info("Speed not valid for fiber adapters, parameter " + "ignored\n"); + } + + if (num_Duplex > bd) { + e_dev_info("Duplex not valid for fiber adapters, parameter " + "ignored\n"); + } + + if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) { + e_dev_info("AutoNeg other than 1000/Full is not valid for fiber" + "adapters, parameter ignored\n"); + } +} + +/** + * e1000_check_copper_options - Range Checking for Link Options, Copper Version + * @adapter: board private structure + * + * Handles speed and duplex options on copper adapters + **/ +static void e1000_check_copper_options(struct e1000_adapter *adapter) +{ + struct e1000_option opt; + unsigned int speed, dplx, an; + int bd = adapter->bd_number; + + { /* Speed */ + static const struct e1000_opt_list speed_list[] = { + { 0, "" }, + { SPEED_10, "" }, + { SPEED_100, "" }, + { SPEED_1000, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Speed", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(speed_list), + .p = speed_list }} + }; + + if (num_Speed > bd) { + speed = Speed[bd]; + e1000_validate_option(&speed, &opt, adapter); + } else { + speed = opt.def; + } + } + { /* Duplex */ + static const struct e1000_opt_list dplx_list[] = { + { 0, "" }, + { HALF_DUPLEX, "" }, + { FULL_DUPLEX, "" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "Duplex", + .err = "parameter ignored", + .def = 0, + .arg = { .l = { .nr = ARRAY_SIZE(dplx_list), + .p = dplx_list }} + }; + + if (num_Duplex > bd) { + dplx = Duplex[bd]; + e1000_validate_option(&dplx, &opt, adapter); + } else { + dplx = opt.def; + } + } + + if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) { + e_dev_info("AutoNeg specified along with Speed or Duplex, " + "parameter ignored\n"); + adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT; + } else { /* Autoneg */ + static const struct e1000_opt_list an_list[] = + #define AA "AutoNeg advertising " + {{ 0x01, AA "10/HD" }, + { 0x02, AA "10/FD" }, + { 0x03, AA "10/FD, 10/HD" }, + { 0x04, AA "100/HD" }, + { 0x05, AA "100/HD, 10/HD" }, + { 0x06, AA "100/HD, 10/FD" }, + { 0x07, AA "100/HD, 10/FD, 10/HD" }, + { 0x08, AA "100/FD" }, + { 0x09, AA "100/FD, 10/HD" }, + { 0x0a, AA "100/FD, 10/FD" }, + { 0x0b, AA "100/FD, 10/FD, 10/HD" }, + { 0x0c, AA "100/FD, 100/HD" }, + { 0x0d, AA "100/FD, 100/HD, 10/HD" }, + { 0x0e, AA "100/FD, 100/HD, 10/FD" }, + { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" }, + { 0x20, AA "1000/FD" }, + { 0x21, AA "1000/FD, 10/HD" }, + { 0x22, AA "1000/FD, 10/FD" }, + { 0x23, AA "1000/FD, 10/FD, 10/HD" }, + { 0x24, AA "1000/FD, 100/HD" }, + { 0x25, AA "1000/FD, 100/HD, 10/HD" }, + { 0x26, AA "1000/FD, 100/HD, 10/FD" }, + { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" }, + { 0x28, AA "1000/FD, 100/FD" }, + { 0x29, AA "1000/FD, 100/FD, 10/HD" }, + { 0x2a, AA "1000/FD, 100/FD, 10/FD" }, + { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" }, + { 0x2c, AA "1000/FD, 100/FD, 100/HD" }, + { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" }, + { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" }, + { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }}; + + opt = (struct e1000_option) { + .type = list_option, + .name = "AutoNeg", + .err = "parameter ignored", + .def = AUTONEG_ADV_DEFAULT, + .arg = { .l = { .nr = ARRAY_SIZE(an_list), + .p = an_list }} + }; + + if (num_AutoNeg > bd) { + an = AutoNeg[bd]; + e1000_validate_option(&an, &opt, adapter); + } else { + an = opt.def; + } + adapter->hw.autoneg_advertised = an; + } + + switch (speed + dplx) { + case 0: + adapter->hw.autoneg = adapter->fc_autoneg = 1; + if ((num_Speed > bd) && (speed != 0 || dplx != 0)) + e_dev_info("Speed and duplex autonegotiation " + "enabled\n"); + break; + case HALF_DUPLEX: + e_dev_info("Half Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Half Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_100_HALF; + break; + case FULL_DUPLEX: + e_dev_info("Full Duplex specified without Speed\n"); + e_dev_info("Using Autonegotiation at Full Duplex only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_FULL | + ADVERTISE_100_FULL | + ADVERTISE_1000_FULL; + break; + case SPEED_10: + e_dev_info("10 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 10 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_10_HALF | + ADVERTISE_10_FULL; + break; + case SPEED_10 + HALF_DUPLEX: + e_dev_info("Forcing to 10 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_10 + FULL_DUPLEX: + e_dev_info("Forcing to 10 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_10_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100: + e_dev_info("100 Mbps Speed specified without Duplex\n"); + e_dev_info("Using Autonegotiation at 100 Mbps only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_100_HALF | + ADVERTISE_100_FULL; + break; + case SPEED_100 + HALF_DUPLEX: + e_dev_info("Forcing to 100 Mbps Half Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_half; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_100 + FULL_DUPLEX: + e_dev_info("Forcing to 100 Mbps Full Duplex\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 0; + adapter->hw.forced_speed_duplex = e1000_100_full; + adapter->hw.autoneg_advertised = 0; + break; + case SPEED_1000: + e_dev_info("1000 Mbps Speed specified without Duplex\n"); + goto full_duplex_only; + case SPEED_1000 + HALF_DUPLEX: + e_dev_info("Half Duplex is not supported at 1000 Mbps\n"); + /* fall through */ + case SPEED_1000 + FULL_DUPLEX: +full_duplex_only: + e_dev_info("Using Autonegotiation at 1000 Mbps Full Duplex " + "only\n"); + adapter->hw.autoneg = adapter->fc_autoneg = 1; + adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL; + break; + default: + BUG(); + } + + /* Speed, AutoNeg and MDI/MDI-X must all play nice */ + if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) { + e_dev_info("Speed, AutoNeg and MDI-X specs are incompatible. " + "Setting MDI-X to a compatible value.\n"); + } +} + diff --git a/addons/e1000e/install.sh b/addons/e1000e/install.sh new file mode 100644 index 00000000..716c13d5 --- /dev/null +++ b/addons/e1000e/install.sh @@ -0,0 +1,4 @@ +if [ "${1}" = "rd" ]; then + echo "Installing module for Intel(R) PRO/1000 Gigabit Ethernet PCI-e adapter" + ${INSMOD} "/modules/e1000e.ko" ${PARAMS} +fi diff --git a/addons/e1000e/manifest.yml b/addons/e1000e/manifest.yml new file mode 100644 index 00000000..fe6ad631 --- /dev/null +++ b/addons/e1000e/manifest.yml @@ -0,0 +1,29 @@ +version: 1 +name: e1000e +description: "Driver for Intel(R) PRO/1000 Gigabit Ethernet PCI-e adapters" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true + diff --git a/addons/e1000e/src/3.10.108/80003es2lan.c b/addons/e1000e/src/3.10.108/80003es2lan.c new file mode 100644 index 00000000..b71c8502 --- /dev/null +++ b/addons/e1000e/src/3.10.108/80003es2lan.c @@ -0,0 +1,1421 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* 80003ES2LAN Gigabit Ethernet Controller (Copper) + * 80003ES2LAN Gigabit Ethernet Controller (Serdes) + */ + +#include "e1000.h" + +/* A table for the GG82563 cable length where the range is defined + * with a lower bound at "index" and the upper bound at + * "index + 5". + */ +static const u16 e1000_gg82563_cable_length_table[] = { + 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF +}; + +#define GG82563_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_gg82563_cable_length_table) + +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); +static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data); +static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } else { + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + phy->type = e1000_phy_gg82563; + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000e_get_phy_id(hw); + + /* Verify phy id */ + if (phy->id != GG82563_E_PHY_ID) + return -E1000_ERR_PHY; + + return ret_val; +} + +/** + * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + + return 0; +} + +/** + * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + /* Set media type and media-dependent function pointers */ + switch (hw->adapter->pdev->device) { + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + hw->phy.media_type = e1000_media_type_internal_serdes; + mac->ops.check_for_link = e1000e_check_for_serdes_link; + mac->ops.setup_physical_interface = + e1000e_setup_fiber_serdes_link; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + mac->ops.check_for_link = e1000e_check_for_copper_link; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_80003es2lan; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK); + /* Adaptive IFS not supported */ + mac->adaptive_ifs = false; + + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + + return 0; +} + +static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_80003es2lan(hw); + if (rc) + return rc; + + rc = e1000_init_nvm_params_80003es2lan(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_80003es2lan(hw); + if (rc) + return rc; + + return 0; +} + +/** + * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to acquire access rights to the correct PHY. + **/ +static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_phy_80003es2lan - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. + **/ +static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_mac_csr_80003es2lan - Acquire right to access Kumeran register + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the Kumeran interface. + * + **/ +static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = E1000_SWFW_CSR_SM; + + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_mac_csr_80003es2lan - Release right to access Kumeran Register + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the Kumeran interface + **/ +static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = E1000_SWFW_CSR_SM; + + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the EEPROM. + **/ +static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + if (ret_val) + return ret_val; + + ret_val = e1000e_acquire_nvm(hw); + + if (ret_val) + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the EEPROM. + **/ +static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 i = 0; + s32 timeout = 50; + + while (i < timeout) { + if (e1000e_get_hw_semaphore(hw)) + return -E1000_ERR_SWFW_SYNC; + + swfw_sync = er32(SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000e_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + return -E1000_ERR_SWFW_SYNC; + } + + swfw_sync |= swmask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); + + return 0; +} + +/** + * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (e1000e_get_hw_semaphore(hw) != 0) + ; /* Empty */ + + swfw_sync = er32(SW_FW_SYNC); + swfw_sync &= ~mask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); +} + +/** + * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: pointer to the data returned from the operation + * + * Read the GG82563 PHY register. + **/ +static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 *data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + usleep_range(200, 400); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + e1000_release_phy_80003es2lan(hw); + return -E1000_ERR_PHY; + } + + usleep_range(200, 400); + + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + usleep_range(200, 400); + } else { + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: value to write to the register + * + * Write to the GG82563 PHY register. + **/ +static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + usleep_range(200, 400); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + e1000_release_phy_80003es2lan(hw); + return -E1000_ERR_PHY; + } + + usleep_range(200, 400); + + ret_val = e1000e_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & + offset, data); + + usleep_range(200, 400); + } else { + ret_val = e1000e_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & + offset, data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_nvm_80003es2lan - Write to ESB2 NVM + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @words: number of words to write + * @data: buffer of data to write to the NVM + * + * Write "words" of data to the ESB2 NVM. + **/ +static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + return e1000e_write_nvm_spi(hw, offset, words, data); +} + +/** + * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete + * @hw: pointer to the HW structure + * + * Wait a specific amount of time for manageability processes to complete. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + + while (timeout) { + if (er32(EEMNGCTL) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) { + e_dbg("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex + * @hw: pointer to the HW structure + * + * Force the speed and duplex settings onto the PHY. This is a + * function pointer entry point called by the phy module. + **/ +static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + bool link; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("GG82563 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= BMCR_RESET; + + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + + if (hw->phy.autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to verify the TX_CLK corresponds + * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. + */ + phy_data &= ~GG82563_MSCR_TX_CLK_MASK; + if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) + phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; + else + phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_get_cable_length_80003es2lan - Set approximate cable length + * @hw: pointer to the HW structure + * + * Find the approximate cable length as measured by the GG82563 PHY. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); + if (ret_val) + return ret_val; + + index = phy_data & GG82563_DSPD_CABLE_LENGTH; + + if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_gg82563_cable_length_table[index]; + phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return 0; +} + +/** + * e1000_get_link_up_info_80003es2lan - Report speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to speed buffer + * @duplex: pointer to duplex buffer + * + * Retrieve the current speed and duplex configuration. + **/ +static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); + hw->phy.ops.cfg_on_link_up(hw); + } else { + ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, + speed, + duplex); + } + + return ret_val; +} + +/** + * e1000_reset_hw_80003es2lan - Reset the ESB2 controller + * @hw: pointer to the HW structure + * + * Perform a global reset to the ESB2 controller. + **/ +static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 kum_reg_data; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + ctrl = er32(CTRL); + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + e_dbg("Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + e1000_release_phy_80003es2lan(hw); + + /* Disable IBIST slave mode (far-end loopback) */ + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + er32(ICR); + + return e1000_check_alt_mac_addr_generic(hw); +} + +/** + * e1000_init_hw_80003es2lan - Initialize the ESB2 controller + * @hw: pointer to the HW structure + * + * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. + **/ +static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 kum_reg_data; + u16 i; + + e1000_initialize_hw_bits_80003es2lan(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + e_dbg("Error initializing identification LED\n"); + + /* Disabling VLAN filtering */ + e_dbg("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + if (ret_val) + return ret_val; + + /* Disable IBIST slave mode (far-end loopback) */ + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL(0)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + ew32(TXDCTL(0), reg_data); + + /* ...for both queues. */ + reg_data = er32(TXDCTL(1)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + ew32(TXDCTL(1), reg_data); + + /* Enable retransmit on late collisions */ + reg_data = er32(TCTL); + reg_data |= E1000_TCTL_RTLC; + ew32(TCTL, reg_data); + + /* Configure Gigabit Carry Extend Padding */ + reg_data = er32(TCTL_EXT); + reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; + reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; + ew32(TCTL_EXT, reg_data); + + /* Configure Transmit Inter-Packet Gap */ + reg_data = er32(TIPG); + reg_data &= ~E1000_TIPG_IPGT_MASK; + reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, reg_data); + + reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); + reg_data &= ~0x00100000; + E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); + + /* default to true to enable the MDIC W/A */ + hw->dev_spec.e80003es2lan.mdic_wa_enable = true; + + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >> + E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i); + if (!ret_val) { + if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == + E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) + hw->dev_spec.e80003es2lan.mdic_wa_enable = false; + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + if (hw->phy.media_type != e1000_media_type_copper) + reg &= ~(1 << 20); + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC(1), reg); + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + reg = er32(RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); +} + +/** + * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link + * @hw: pointer to the HW structure + * + * Setup some GG82563 PHY registers for obtaining link + **/ +static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 reg; + u16 data; + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ + data |= GG82563_MSCR_TX_CLK_1000MBPS_25; + + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; + + switch (phy->mdix) { + case 1: + data |= GG82563_PSCR_CROSSOVER_MODE_MDI; + break; + case 2: + data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; + break; + case 0: + default: + data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + if (phy->disable_polarity_correction) + data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* SW Reset the PHY so all changes take effect */ + ret_val = hw->phy.ops.commit(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Bypass Rx and Tx FIFO's */ + reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL; + data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); + if (ret_val) + return ret_val; + + reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data); + if (ret_val) + return ret_val; + data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data); + if (ret_val) + return ret_val; + + reg = er32(CTRL_EXT); + reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + ew32(CTRL_EXT, reg); + + ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); + if (ret_val) + return ret_val; + + /* Do not init these registers when the HW is in IAMT mode, since the + * firmware will have already initialized them. We only initialize + * them if the HW is not in IAMT mode. + */ + if (!hw->mac.ops.check_mng_mode(hw)) { + /* Enable Electrical Idle on the PHY */ + data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; + ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data); + if (ret_val) + return ret_val; + } + + /* Workaround: Disable padding in Kumeran interface in the MAC + * and in the PHY to avoid CRC errors. + */ + ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_ICR_DIS_PADDING; + ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data); + if (ret_val) + return ret_val; + + return 0; +} + +/** + * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 + * @hw: pointer to the HW structure + * + * Essentially a wrapper for setting up all things "copper" related. + * This is a function pointer entry point called by the mac module. + **/ +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* Set the mac to wait the maximum time between each + * iteration and increase the max iterations when + * polling the phy; this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), + 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + reg_data); + if (ret_val) + return ret_val; + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + ®_data); + if (ret_val) + return ret_val; + reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + reg_data); + if (ret_val) + return ret_val; + + ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); + if (ret_val) + return ret_val; + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 speed; + u16 duplex; + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed, + &duplex); + if (ret_val) + return ret_val; + + if (speed == SPEED_1000) + ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); + else + ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex); + } + + return ret_val; +} + +/** + * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) +{ + s32 ret_val; + u32 tipg; + u32 i = 0; + u16 reg_data, reg_data2; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; + ew32(TIPG, tipg); + + do { + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + if (duplex == HALF_DUPLEX) + reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; + else + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); +} + +/** + * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation + * @hw: pointer to the HW structure + * + * Configure the KMRN interface by applying last minute quirks for + * gigabit operation. + **/ +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data, reg_data2; + u32 tipg; + u32 i = 0; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, tipg); + + do { + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); +} + +/** + * e1000_read_kmrn_reg_80003es2lan - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquire semaphore, then read the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release the semaphore before exiting. + **/ +static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + kmrnctrlsta = er32(KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_kmrn_reg_80003es2lan - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquire semaphore, then write the data to PHY register + * at the offset using the kumeran interface. Release semaphore + * before exiting. + **/ +static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_read_mac_addr_80003es2lan - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) +{ + e1000e_clear_hw_cntrs_base(hw); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + er32(ICRXPTC); + er32(ICRXATC); + er32(ICTXPTC); + er32(ICTXATC); + er32(ICTXQEC); + er32(ICTXQMTC); + er32(ICRXDMTC); +} + +static const struct e1000_mac_operations es2_mac_ops = { + .read_mac_addr = e1000_read_mac_addr_80003es2lan, + .id_led_init = e1000e_id_led_init_generic, + .blink_led = e1000e_blink_led_generic, + .check_mng_mode = e1000e_check_mng_mode_generic, + /* check_for_link dependent on media type */ + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, + .get_bus_info = e1000e_get_bus_info_pcie, + .set_lan_id = e1000_set_lan_id_multi_port_pcie, + .get_link_up_info = e1000_get_link_up_info_80003es2lan, + .led_on = e1000e_led_on_generic, + .led_off = e1000e_led_off_generic, + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .write_vfta = e1000_write_vfta_generic, + .clear_vfta = e1000_clear_vfta_generic, + .reset_hw = e1000_reset_hw_80003es2lan, + .init_hw = e1000_init_hw_80003es2lan, + .setup_link = e1000e_setup_link_generic, + /* setup_physical_interface dependent on media type */ + .setup_led = e1000e_setup_led_generic, + .config_collision_dist = e1000e_config_collision_dist_generic, + .rar_set = e1000e_rar_set_generic, +}; + +static const struct e1000_phy_operations es2_phy_ops = { + .acquire = e1000_acquire_phy_80003es2lan, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, + .get_cfg_done = e1000_get_cfg_done_80003es2lan, + .get_cable_length = e1000_get_cable_length_80003es2lan, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, + .release = e1000_release_phy_80003es2lan, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = NULL, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, + .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, +}; + +static const struct e1000_nvm_operations es2_nvm_ops = { + .acquire = e1000_acquire_nvm_80003es2lan, + .read = e1000e_read_nvm_eerd, + .release = e1000_release_nvm_80003es2lan, + .reload = e1000e_reload_nvm_generic, + .update = e1000e_update_nvm_checksum_generic, + .valid_led_default = e1000e_valid_led_default, + .validate = e1000e_validate_nvm_checksum_generic, + .write = e1000_write_nvm_80003es2lan, +}; + +const struct e1000_info e1000_es2_info = { + .mac = e1000_80003es2lan, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_RX_NEEDS_RESTART /* errata */ + | FLAG_TARC_SET_BIT_ZERO /* errata */ + | FLAG_APME_CHECK_PORT_B + | FLAG_DISABLE_FC_PAUSE_TIME, /* errata */ + .flags2 = FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_80003es2lan, + .mac_ops = &es2_mac_ops, + .phy_ops = &es2_phy_ops, + .nvm_ops = &es2_nvm_ops, +}; diff --git a/addons/e1000e/src/3.10.108/80003es2lan.h b/addons/e1000e/src/3.10.108/80003es2lan.h new file mode 100644 index 00000000..90d363b2 --- /dev/null +++ b/addons/e1000e/src/3.10.108/80003es2lan.h @@ -0,0 +1,95 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_80003ES2LAN_H_ +#define _E1000E_80003ES2LAN_H_ + +#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 +#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 +#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 +#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F + +#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 +#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 +#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 + +#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 +#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 +#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 + +#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C +#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 + +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */ +#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 + +#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 +#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 + +/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ +#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */ +#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 +#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ +#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ +#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ + +/* PHY Specific Control Register 2 (Page 0, Register 26) */ +#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */ + +/* MAC Specific Control Register (Page 2, Register 21) */ +/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ +#define GG82563_MSCR_TX_CLK_MASK 0x0007 +#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 +#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 +#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 + +#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ + +/* DSP Distance Register (Page 5, Register 26) + * 0 = <50M + * 1 = 50-80M + * 2 = 80-100M + * 3 = 110-140M + * 4 = >140M + */ +#define GG82563_DSPD_CABLE_LENGTH 0x0007 + +/* Kumeran Mode Control Register (Page 193, Register 16) */ +#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 + +/* Max number of times Kumeran read/write should be validated */ +#define GG82563_MAX_KMRN_RETRY 0x5 + +/* Power Management Control Register (Page 193, Register 20) */ +/* 1=Enable SERDES Electrical Idle */ +#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 + +/* In-Band Control Register (Page 194, Register 18) */ +#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ + +#endif diff --git a/addons/e1000e/src/3.10.108/82571.c b/addons/e1000e/src/3.10.108/82571.c new file mode 100644 index 00000000..7380442a --- /dev/null +++ b/addons/e1000e/src/3.10.108/82571.c @@ -0,0 +1,2067 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* 82571EB Gigabit Ethernet Controller + * 82571EB Gigabit Ethernet Controller (Copper) + * 82571EB Gigabit Ethernet Controller (Fiber) + * 82571EB Dual Port Gigabit Mezzanine Adapter + * 82571EB Quad Port Gigabit Mezzanine Adapter + * 82571PT Gigabit PT Quad Port Server ExpressModule + * 82572EI Gigabit Ethernet Controller (Copper) + * 82572EI Gigabit Ethernet Controller (Fiber) + * 82572EI Gigabit Ethernet Controller + * 82573V Gigabit Ethernet Controller (Copper) + * 82573E Gigabit Ethernet Controller (Copper) + * 82573L Gigabit Ethernet Controller + * 82574L Gigabit Network Connection + * 82583V Gigabit Network Connection + */ + +#include "e1000.h" + +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); +static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); +static s32 e1000_led_on_82574(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active); +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active); + +/** + * e1000_init_phy_params_82571 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82571; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + phy->type = e1000_phy_igp_2; + break; + case e1000_82573: + phy->type = e1000_phy_m88; + break; + case e1000_82574: + case e1000_82583: + phy->type = e1000_phy_bm; + phy->ops.acquire = e1000_get_hw_semaphore_82574; + phy->ops.release = e1000_put_hw_semaphore_82574; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; + break; + default: + return -E1000_ERR_PHY; + break; + } + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000_get_phy_id_82571(hw); + if (ret_val) { + e_dbg("Error getting PHY ID\n"); + return ret_val; + } + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + if (phy->id != IGP01E1000_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82573: + if (phy->id != M88E1111_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82574: + case e1000_82583: + if (phy->id != BME1000_E_PHY_ID_R2) + ret_val = -E1000_ERR_PHY; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id); + + return ret_val; +} + +/** + * e1000_init_nvm_params_82571 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (((eecd >> 15) & 0x3) == 0x3) { + nvm->type = e1000_nvm_flash_hw; + nvm->word_size = 2048; + /* Autonomous Flash update bit must be cleared due + * to Flash update issue. + */ + eecd &= ~E1000_EECD_AUPDEN; + ew32(EECD, eecd); + break; + } + /* Fall Through */ + default: + nvm->type = e1000_nvm_eeprom_spi; + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + break; + } + + /* Function Pointers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + nvm->ops.acquire = e1000_get_hw_semaphore_82574; + nvm->ops.release = e1000_put_hw_semaphore_82574; + break; + default: + break; + } + + return 0; +} + +/** + * e1000_init_mac_params_82571 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 swsm = 0; + u32 swsm2 = 0; + bool force_clear_smbi = false; + + /* Set media type and media-dependent function pointers */ + switch (hw->adapter->pdev->device) { + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + mac->ops.check_for_link = e1000e_check_for_fiber_link; + mac->ops.get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; + break; + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + case E1000_DEV_ID_82572EI_SERDES: + hw->phy.media_type = e1000_media_type_internal_serdes; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + mac->ops.check_for_link = e1000_check_for_serdes_link_82571; + mac->ops.get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_82571; + mac->ops.check_for_link = e1000e_check_for_copper_link; + mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* MAC-specific function pointers */ + switch (hw->mac.type) { + case e1000_82573: + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; + mac->ops.led_on = e1000e_led_on_generic; + mac->ops.blink_led = e1000e_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are + * enabled. + */ + mac->arc_subsystem_valid = !!(er32(FWSM) & + E1000_FWSM_MODE_MASK); + break; + case e1000_82574: + case e1000_82583: + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + mac->ops.check_mng_mode = e1000_check_mng_mode_82574; + mac->ops.led_on = e1000_led_on_82574; + break; + default: + mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; + mac->ops.led_on = e1000e_led_on_generic; + mac->ops.blink_led = e1000e_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + break; + } + + /* Ensure that the inter-port SWSM.SMBI lock bit is clear before + * first NVM or PHY access. This should be done for single-port + * devices, and for one port only on dual-port devices so that + * for those devices we can still use the SMBI lock to synchronize + * inter-port accesses to the PHY & NVM. + */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + swsm2 = er32(SWSM2); + + if (!(swsm2 & E1000_SWSM2_LOCK)) { + /* Only do this for the first interface on this card */ + ew32(SWSM2, swsm2 | E1000_SWSM2_LOCK); + force_clear_smbi = true; + } else { + force_clear_smbi = false; + } + break; + default: + force_clear_smbi = true; + break; + } + + if (force_clear_smbi) { + /* Make sure SWSM.SMBI is clear */ + swsm = er32(SWSM); + if (swsm & E1000_SWSM_SMBI) { + /* This bit should not be set on a first interface, and + * indicates that the bootagent or EFI code has + * improperly left this bit enabled + */ + e_dbg("Please update your 82571 Bootagent\n"); + } + ew32(SWSM, swsm & ~E1000_SWSM_SMBI); + } + + /* Initialize device specific counter of SMBI acquisition timeouts. */ + hw->dev_spec.e82571.smb_counter = 0; + + return 0; +} + +static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + static int global_quad_port_a; /* global port a indication */ + struct pci_dev *pdev = adapter->pdev; + int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; + s32 rc; + + rc = e1000_init_mac_params_82571(hw); + if (rc) + return rc; + + rc = e1000_init_nvm_params_82571(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_82571(hw); + if (rc) + return rc; + + /* tag quad port adapters first, it's used below */ + switch (pdev->device) { + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: + case E1000_DEV_ID_82571PT_QUAD_COPPER: + adapter->flags |= FLAG_IS_QUAD_PORT; + /* mark the first port */ + if (global_quad_port_a == 0) + adapter->flags |= FLAG_IS_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + global_quad_port_a++; + if (global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + break; + } + + switch (adapter->hw.mac.type) { + case e1000_82571: + /* these dual ports don't have WoL on port B at all */ + if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) || + (pdev->device == E1000_DEV_ID_82571EB_SERDES) || + (pdev->device == E1000_DEV_ID_82571EB_COPPER)) && + (is_port_b)) + adapter->flags &= ~FLAG_HAS_WOL; + /* quad ports only support WoL on port A */ + if (adapter->flags & FLAG_IS_QUAD_PORT && + (!(adapter->flags & FLAG_IS_QUAD_PORT_A))) + adapter->flags &= ~FLAG_HAS_WOL; + /* Does not support WoL on any port */ + if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) + adapter->flags &= ~FLAG_HAS_WOL; + break; + case e1000_82573: + if (pdev->device == E1000_DEV_ID_82573L) { + adapter->flags |= FLAG_HAS_JUMBO_FRAMES; + adapter->max_hw_frame_size = DEFAULT_JUMBO; + } + break; + default: + break; + } + + return 0; +} + +/** + * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_id = 0; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* The 82571 firmware may still be configuring the PHY. + * In this case, we cannot access the PHY until the + * configuration is done. So we explicitly set the + * PHY ID. + */ + phy->id = IGP01E1000_I_PHY_ID; + break; + case e1000_82573: + return e1000e_get_phy_id(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + usleep_range(20, 40); + ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + s32 sw_timeout = hw->nvm.word_size + 1; + s32 fw_timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* If we have timedout 3 times on trying to acquire + * the inter-port SMBI semaphore, there is old code + * operating on the other port, and it is not + * releasing SMBI. Modify the number of times that + * we try for the semaphore to interwork with this + * older code. + */ + if (hw->dev_spec.e82571.smb_counter > 2) + sw_timeout = 1; + + /* Get the SW semaphore */ + while (i < sw_timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usleep_range(50, 100); + i++; + } + + if (i == sw_timeout) { + e_dbg("Driver can't access device - SMBI bit is set.\n"); + hw->dev_spec.e82571.smb_counter++; + } + /* Get the FW semaphore. */ + for (i = 0; i < fw_timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + usleep_range(50, 100); + } + + if (i == fw_timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_82571(hw); + e_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_put_hw_semaphore_82571 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} + +/** + * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore during reset. + * + **/ +static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + s32 i = 0; + + extcnf_ctrl = er32(EXTCNF_CTRL); + do { + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + ew32(EXTCNF_CTRL, extcnf_ctrl); + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + break; + + usleep_range(2000, 4000); + i++; + } while (i < MDIO_OWNERSHIP_TIMEOUT); + + if (i == MDIO_OWNERSHIP_TIMEOUT) { + /* Release semaphores */ + e1000_put_hw_semaphore_82573(hw); + e_dbg("Driver can't access the PHY\n"); + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * e1000_put_hw_semaphore_82573 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used during reset. + * + **/ +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + ew32(EXTCNF_CTRL, extcnf_ctrl); +} + +static DEFINE_MUTEX(swflag_mutex); + +/** + * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM. + * + **/ +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw) +{ + s32 ret_val; + + mutex_lock(&swflag_mutex); + ret_val = e1000_get_hw_semaphore_82573(hw); + if (ret_val) + mutex_unlock(&swflag_mutex); + return ret_val; +} + +/** + * e1000_put_hw_semaphore_82574 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + * + **/ +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) +{ + e1000_put_hw_semaphore_82573(hw); + mutex_unlock(&swflag_mutex); +} + +/** + * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. + * LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u32 data = er32(POEMB); + + if (active) + data |= E1000_PHY_CTRL_D0A_LPLU; + else + data &= ~E1000_PHY_CTRL_D0A_LPLU; + + ew32(POEMB, data); + return 0; +} + +/** + * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * The low power link up (lplu) state is set to the power management level D3 + * when active is true, else clear lplu for D3. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u32 data = er32(POEMB); + + if (!active) { + data &= ~E1000_PHY_CTRL_NOND0A_LPLU; + } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || + (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_PHY_CTRL_NOND0A_LPLU; + } + + ew32(POEMB, data); + return 0; +} + +/** + * e1000_acquire_nvm_82571 - Request for access to the EEPROM + * @hw: pointer to the HW structure + * + * To gain access to the EEPROM, first we must obtain a hardware semaphore. + * Then for non-82573 hardware, set the EEPROM access request bit and wait + * for EEPROM access grant bit. If the access grant bit is not set, release + * hardware semaphore. + **/ +static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_get_hw_semaphore_82571(hw); + if (ret_val) + return ret_val; + + switch (hw->mac.type) { + case e1000_82573: + break; + default: + ret_val = e1000e_acquire_nvm(hw); + break; + } + + if (ret_val) + e1000_put_hw_semaphore_82571(hw); + + return ret_val; +} + +/** + * e1000_release_nvm_82571 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +static void e1000_release_nvm_82571(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_put_hw_semaphore_82571(hw); +} + +/** + * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * For non-82573 silicon, write data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 ret_val; + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); + break; + case e1000_82571: + case e1000_82572: + ret_val = e1000e_write_nvm_spi(hw, offset, words, data); + break; + default: + ret_val = -E1000_ERR_NVM; + break; + } + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82571 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) +{ + u32 eecd; + s32 ret_val; + u16 i; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + return ret_val; + + /* If our nvm is an EEPROM, then we're done + * otherwise, commit the checksum to the flash NVM. + */ + if (hw->nvm.type != e1000_nvm_flash_hw) + return 0; + + /* Check for pending operations. */ + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + usleep_range(1000, 2000); + if (!(er32(EECD) & E1000_EECD_FLUPD)) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + /* Reset the firmware if using STM opcode. */ + if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { + /* The enabling of and the actual reset must be done + * in two write cycles. + */ + ew32(HICR, E1000_HICR_FW_RESET_ENABLE); + e1e_flush(); + ew32(HICR, E1000_HICR_FW_RESET); + } + + /* Commit the write to flash */ + eecd = er32(EECD) | E1000_EECD_FLUPD; + ew32(EECD, eecd); + + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + usleep_range(1000, 2000); + if (!(er32(EECD) & E1000_EECD_FLUPD)) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) +{ + if (hw->nvm.type == e1000_nvm_flash_hw) + e1000_fix_nvm_checksum_82571(hw); + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * After checking for invalid values, poll the EEPROM to ensure the previous + * command has completed before trying to write the next word. After write + * poll for completion. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eewr = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eewr = ((data[i] << E1000_NVM_RW_REG_DATA) | + ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START); + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + + ew32(EEWR, eewr); + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + } + + return ret_val; +} + +/** + * e1000_get_cfg_done_82571 - Poll for configuration done + * @hw: pointer to the HW structure + * + * Reads the management control register for the config done bit to be set. + **/ +static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + + while (timeout) { + if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) { + e_dbg("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When activating LPLU + * this function also disables smart speed and vice versa. LPLU will not be + * activated unless the device autonegotiation advertisement meets standards + * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function + * pointer entry point only called by PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_reset_hw_82571 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +static s32 e1000_reset_hw_82571(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext, eecd, tctl; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + e1e_flush(); + + usleep_range(10000, 20000); + + /* Must acquire the MDIO ownership before MAC reset. + * Ownership defaults to firmware after a reset. + */ + switch (hw->mac.type) { + case e1000_82573: + ret_val = e1000_get_hw_semaphore_82573(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = e1000_get_hw_semaphore_82574(hw); + break; + default: + break; + } + if (ret_val) + e_dbg("Cannot acquire MDIO ownership\n"); + + ctrl = er32(CTRL); + + e_dbg("Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + /* Must release MDIO ownership and mutex after MAC reset. */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + e1000_put_hw_semaphore_82574(hw); + break; + default: + break; + } + + if (hw->nvm.type == e1000_nvm_flash_hw) { + usleep_range(10, 20); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + } + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. + * Need to wait for Phy configuration completion before accessing + * NVM and Phy. + */ + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* REQ and GNT bits need to be cleared when using AUTO_RD + * to access the EEPROM. + */ + eecd = er32(EECD); + eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT); + ew32(EECD, eecd); + break; + case e1000_82573: + case e1000_82574: + case e1000_82583: + msleep(25); + break; + default: + break; + } + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + er32(ICR); + + if (hw->mac.type == e1000_82571) { + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + + e1000e_set_laa_state_82571(hw, true); + } + + /* Reinitialize the 82571 serdes link state machine */ + if (hw->phy.media_type == e1000_media_type_internal_serdes) + hw->mac.serdes_link_state = e1000_serdes_link_down; + + return 0; +} + +/** + * e1000_init_hw_82571 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 e1000_init_hw_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + e1000_initialize_hw_bits_82571(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + e_dbg("Error initializing identification LED\n"); + + /* Disabling VLAN filtering */ + e_dbg("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. + * If, however, a locally administered address was assigned to the + * 82571, we must reserve a RAR for it to work around an issue where + * resetting one port will reload the MAC on the other port. + */ + if (e1000e_get_laa_state_82571(hw)) + rar_count--; + e1000e_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL(0)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + ew32(TXDCTL(0), reg_data); + + /* ...for both queues. */ + switch (mac->type) { + case e1000_82573: + e1000e_enable_tx_pkt_filtering(hw); + /* fall through */ + case e1000_82574: + case e1000_82583: + reg_data = er32(GCR); + reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + ew32(GCR, reg_data); + break; + default: + reg_data = er32(TXDCTL(1)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC); + ew32(TXDCTL(1), reg_data); + break; + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82571(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); + break; + case e1000_82574: + case e1000_82583: + reg |= (1 << 26); + break; + default: + break; + } + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg &= ~((1 << 29) | (1 << 30)); + reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC(1), reg); + break; + default: + break; + } + + /* Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = er32(CTRL); + reg &= ~(1 << 29); + ew32(CTRL, reg); + break; + default: + break; + } + + /* Extended Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = er32(CTRL_EXT); + reg &= ~(1 << 23); + reg |= (1 << 22); + ew32(CTRL_EXT, reg); + break; + default: + break; + } + + if (hw->mac.type == e1000_82571) { + reg = er32(PBA_ECC); + reg |= E1000_PBA_ECC_CORR_EN; + ew32(PBA_ECC, reg); + } + + /* Workaround for hardware errata. + * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 + */ + if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) { + reg = er32(CTRL_EXT); + reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; + ew32(CTRL_EXT, reg); + } + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type <= e1000_82573) { + reg = er32(RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); + } + + /* PCI-Ex Control Registers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + reg = er32(GCR); + reg |= (1 << 22); + ew32(GCR, reg); + + /* Workaround for hardware errata. + * apply workaround for hardware errata documented in errata + * docs Fixes issue where some error prone or unreliable PCIe + * completions are occurring, particularly with ASPM enabled. + * Without fix, issue can cause Tx timeouts. + */ + reg = er32(GCR2); + reg |= 1; + ew32(GCR2, reg); + break; + default: + break; + } +} + +/** + * e1000_clear_vfta_82571 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +static void e1000_clear_vfta_82571(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->mng_cookie.vlan_id != 0) { + /* The VFTA is a 4096b bit-field, each identifying + * a single VLAN ID. The following operations + * determine which 32b entry (i.e. offset) into the + * array we want to set the VLAN ID (i.e. bit) of + * the manageability unit. + */ + vfta_offset = (hw->mng_cookie.vlan_id >> + E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = + 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + } + break; + default: + break; + } + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of + * the manageability unit. + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); + e1e_flush(); + } +} + +/** + * e1000_check_mng_mode_82574 - Check manageability is enabled + * @hw: pointer to the HW structure + * + * Reads the NVM Initialization Control Word 2 and returns true + * (>0) if any manageability is enabled, else false (0). + **/ +static bool e1000_check_mng_mode_82574(struct e1000_hw *hw) +{ + u16 data; + + e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0; +} + +/** + * e1000_led_on_82574 - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +static s32 e1000_led_on_82574(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + + ctrl = hw->mac.ledctl_mode2; + if (!(E1000_STATUS_LU & er32(STATUS))) { + /* If no link, then turn LED on by setting the invert bit + * for each LED that's "on" (0x0E) in ledctl_mode2. + */ + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8)); + } + ew32(LEDCTL, ctrl); + + return 0; +} + +/** + * e1000_check_phy_82574 - check 82574 phy hung state + * @hw: pointer to the HW structure + * + * Returns whether phy is hung or not + **/ +bool e1000_check_phy_82574(struct e1000_hw *hw) +{ + u16 status_1kbt = 0; + u16 receive_errors = 0; + s32 ret_val; + + /* Read PHY Receive Error counter first, if its is max - all F's then + * read the Base1000T status register If both are max then PHY is hung. + */ + ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); + if (ret_val) + return false; + if (receive_errors == E1000_RECEIVE_ERROR_MAX) { + ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); + if (ret_val) + return false; + if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == + E1000_IDLE_ERROR_COUNT_MASK) + return true; + } + + return false; +} + +/** + * e1000_setup_link_82571 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_82571(struct e1000_hw *hw) +{ + /* 82573 does not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->fc.requested_mode == e1000_fc_default) + hw->fc.requested_mode = e1000_fc_full; + break; + default: + break; + } + + return e1000e_setup_link_generic(hw); +} + +/** + * e1000_setup_copper_link_82571 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + switch (hw->phy.type) { + case e1000_phy_m88: + case e1000_phy_bm: + ret_val = e1000e_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_2: + ret_val = e1000e_copper_link_setup_igp(hw); + break; + default: + return -E1000_ERR_PHY; + break; + } + + if (ret_val) + return ret_val; + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes links. + * Upon successful setup, poll for link. + **/ +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) +{ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* If SerDes loopback mode is entered, there is no form + * of reset to take the adapter out of that mode. So we + * have to explicitly take the adapter out of loopback + * mode. This prevents drivers from twiddling their thumbs + * if another tool failed to take it out of loopback mode. + */ + ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + break; + default: + break; + } + + return e1000e_setup_fiber_serdes_link(hw); +} + +/** + * e1000_check_for_serdes_link_82571 - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Reports the link state as up or down. + * + * If autonegotiation is supported by the link partner, the link state is + * determined by the result of autonegotiation. This is the most likely case. + * If autonegotiation is not supported by the link partner, and the link + * has a valid signal, force the link up. + * + * The link state is represented internally here by 4 states: + * + * 1) down + * 2) autoneg_progress + * 3) autoneg_complete (the link successfully autonegotiated) + * 4) forced_up (the link has been forced up, it did not autonegotiate) + * + **/ +static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + u32 txcw; + u32 i; + s32 ret_val = 0; + + ctrl = er32(CTRL); + status = er32(STATUS); + er32(RXCW); + /* SYNCH bit and IV bit are sticky */ + usleep_range(10, 20); + rxcw = er32(RXCW); + + if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { + /* Receiver is synchronized with no invalid bits. */ + switch (mac->serdes_link_state) { + case e1000_serdes_link_autoneg_complete: + if (!(status & E1000_STATUS_LU)) { + /* We have lost link, retry autoneg before + * reporting link failure + */ + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("AN_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_forced_up: + /* If we are receiving /C/ ordered sets, re-enable + * auto-negotiation in the TXCW register and disable + * forced link in the Device Control register in an + * attempt to auto-negotiate with our link partner. + */ + if (rxcw & E1000_RXCW_C) { + /* Enable autoneg, and unforce link up */ + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("FORCED_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_autoneg_progress: + if (rxcw & E1000_RXCW_C) { + /* We received /C/ ordered sets, meaning the + * link partner has autonegotiated, and we can + * trust the Link Up (LU) status bit. + */ + if (status & E1000_STATUS_LU) { + mac->serdes_link_state = + e1000_serdes_link_autoneg_complete; + e_dbg("AN_PROG -> AN_UP\n"); + mac->serdes_has_link = true; + } else { + /* Autoneg completed, but failed. */ + mac->serdes_link_state = + e1000_serdes_link_down; + e_dbg("AN_PROG -> DOWN\n"); + } + } else { + /* The link partner did not autoneg. + * Force link up and full duplex, and change + * state to forced. + */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error config flow control\n"); + break; + } + mac->serdes_link_state = + e1000_serdes_link_forced_up; + mac->serdes_has_link = true; + e_dbg("AN_PROG -> FORCED_UP\n"); + } + break; + + case e1000_serdes_link_down: + default: + /* The link was down but the receiver has now gained + * valid sync, so lets see if we can bring the link + * up. + */ + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("DOWN -> AN_PROG\n"); + break; + } + } else { + if (!(rxcw & E1000_RXCW_SYNCH)) { + mac->serdes_has_link = false; + mac->serdes_link_state = e1000_serdes_link_down; + e_dbg("ANYSTATE -> DOWN\n"); + } else { + /* Check several times, if SYNCH bit and CONFIG + * bit both are consistently 1 then simply ignore + * the IV bit and restart Autoneg + */ + for (i = 0; i < AN_RETRY_COUNT; i++) { + usleep_range(10, 20); + rxcw = er32(RXCW); + if ((rxcw & E1000_RXCW_SYNCH) && + (rxcw & E1000_RXCW_C)) + continue; + + if (rxcw & E1000_RXCW_IV) { + mac->serdes_has_link = false; + mac->serdes_link_state = + e1000_serdes_link_down; + e_dbg("ANYSTATE -> DOWN\n"); + break; + } + } + + if (i == AN_RETRY_COUNT) { + txcw = er32(TXCW); + txcw |= E1000_TXCW_ANE; + ew32(TXCW, txcw); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("ANYSTATE -> AN_PROG\n"); + } + } + } + + return ret_val; +} + +/** + * e1000_valid_led_default_82571 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (*data == ID_LED_RESERVED_F746) + *data = ID_LED_DEFAULT_82573; + break; + default: + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + break; + } + + return 0; +} + +/** + * e1000e_get_laa_state_82571 - Get locally administered address state + * @hw: pointer to the HW structure + * + * Retrieve and return the current locally administered address state. + **/ +bool e1000e_get_laa_state_82571(struct e1000_hw *hw) +{ + if (hw->mac.type != e1000_82571) + return false; + + return hw->dev_spec.e82571.laa_is_present; +} + +/** + * e1000e_set_laa_state_82571 - Set locally administered address state + * @hw: pointer to the HW structure + * @state: enable/disable locally administered address + * + * Enable/Disable the current locally administered address state. + **/ +void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) +{ + if (hw->mac.type != e1000_82571) + return; + + hw->dev_spec.e82571.laa_is_present = state; + + /* If workaround is activated... */ + if (state) + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed, the actual LAA is in one of the RARs and no + * incoming packets directed to this port are dropped. + * Eventually the LAA will be in RAR[0] and RAR[14]. + */ + hw->mac.ops.rar_set(hw, hw->mac.addr, + hw->mac.rar_entry_count - 1); +} + +/** + * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum + * @hw: pointer to the HW structure + * + * Verifies that the EEPROM has completed the update. After updating the + * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If + * the checksum fix is not implemented, we need to set the bit and update + * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, + * we need to return bad checksum. + **/ +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 data; + + if (nvm->type != e1000_nvm_flash_hw) + return 0; + + /* Check bit 4 of word 10h. If it is 0, firmware is done updating + * 10h-12h. Checksum may need to be fixed. + */ + ret_val = e1000_read_nvm(hw, 0x10, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x10)) { + /* Read 0x23 and check bit 15. This bit is a 1 + * when the checksum has already been fixed. If + * the checksum is still wrong and this bit is a + * 1, we need to return bad checksum. Otherwise, + * we need to set this bit to a 1 and update the + * checksum. + */ + ret_val = e1000_read_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x8000)) { + data |= 0x8000; + ret_val = e1000_write_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_read_mac_addr_82571 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) +{ + if (hw->mac.type == e1000_82571) { + s32 ret_val; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + } + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_power_down_phy_copper_82571 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_mac_info *mac = &hw->mac; + + if (!phy->ops.check_reset_block) + return; + + /* If the management interface is not enabled, then power down */ + if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) +{ + e1000e_clear_hw_cntrs_base(hw); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + er32(ICRXPTC); + er32(ICRXATC); + er32(ICTXPTC); + er32(ICTXATC); + er32(ICTXQEC); + er32(ICTXQMTC); + er32(ICRXDMTC); +} + +static const struct e1000_mac_operations e82571_mac_ops = { + /* .check_mng_mode: mac type dependent */ + /* .check_for_link: media type dependent */ + .id_led_init = e1000e_id_led_init_generic, + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, + .get_bus_info = e1000e_get_bus_info_pcie, + .set_lan_id = e1000_set_lan_id_multi_port_pcie, + /* .get_link_up_info: media type dependent */ + /* .led_on: mac type dependent */ + .led_off = e1000e_led_off_generic, + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .write_vfta = e1000_write_vfta_generic, + .clear_vfta = e1000_clear_vfta_82571, + .reset_hw = e1000_reset_hw_82571, + .init_hw = e1000_init_hw_82571, + .setup_link = e1000_setup_link_82571, + /* .setup_physical_interface: media type dependent */ + .setup_led = e1000e_setup_led_generic, + .config_collision_dist = e1000e_config_collision_dist_generic, + .read_mac_addr = e1000_read_mac_addr_82571, + .rar_set = e1000e_rar_set_generic, +}; + +static const struct e1000_phy_operations e82_phy_ops_igp = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_igp, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = NULL, + .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, + .get_cfg_done = e1000_get_cfg_done_82571, + .get_cable_length = e1000e_get_cable_length_igp_2, + .get_info = e1000e_get_phy_info_igp, + .read_reg = e1000e_read_phy_reg_igp, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_igp, + .cfg_on_link_up = NULL, +}; + +static const struct e1000_phy_operations e82_phy_ops_m88 = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done_generic, + .get_cable_length = e1000e_get_cable_length_m88, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000e_read_phy_reg_m88, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_m88, + .cfg_on_link_up = NULL, +}; + +static const struct e1000_phy_operations e82_phy_ops_bm = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done_generic, + .get_cable_length = e1000e_get_cable_length_m88, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000e_read_phy_reg_bm2, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_bm2, + .cfg_on_link_up = NULL, +}; + +static const struct e1000_nvm_operations e82571_nvm_ops = { + .acquire = e1000_acquire_nvm_82571, + .read = e1000e_read_nvm_eerd, + .release = e1000_release_nvm_82571, + .reload = e1000e_reload_nvm_generic, + .update = e1000_update_nvm_checksum_82571, + .valid_led_default = e1000_valid_led_default_82571, + .validate = e1000_validate_nvm_checksum_82571, + .write = e1000_write_nvm_82571, +}; + +const struct e1000_info e1000_82571_info = { + .mac = e1000_82571, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_RESET_OVERWRITES_LAA /* errata */ + | FLAG_TARC_SPEED_MODE_BIT /* errata */ + | FLAG_APME_CHECK_PORT_B, + .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ + | FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82572_info = { + .mac = e1000_82572, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_TARC_SPEED_MODE_BIT, /* errata */ + .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ + | FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82573_info = { + .mac = e1000_82573, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_SWSM_ON_LOAD, + .flags2 = FLAG2_DISABLE_ASPM_L1 + | FLAG2_DISABLE_ASPM_L0S, + .pba = 20, + .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_m88, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82574_info = { + .mac = e1000_82574, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_MSIX + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_CTRLEXT_ON_LOAD, + .flags2 = FLAG2_CHECK_PHY_HANG + | FLAG2_DISABLE_ASPM_L0S + | FLAG2_DISABLE_ASPM_L1 + | FLAG2_NO_DISABLE_RX + | FLAG2_DMA_BURST, + .pba = 32, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_bm, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82583_info = { + .mac = e1000_82583, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_CTRLEXT_ON_LOAD, + .flags2 = FLAG2_DISABLE_ASPM_L0S + | FLAG2_NO_DISABLE_RX, + .pba = 32, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_bm, + .nvm_ops = &e82571_nvm_ops, +}; diff --git a/addons/e1000e/src/3.10.108/82571.h b/addons/e1000e/src/3.10.108/82571.h new file mode 100644 index 00000000..08e24dc3 --- /dev/null +++ b/addons/e1000e/src/3.10.108/82571.h @@ -0,0 +1,60 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_82571_H_ +#define _E1000E_82571_H_ + +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ + +/* Intr Throttling - RW */ +#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n))) + +#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAC_MASK_82574 0x01F00000 + +#define E1000_IVAR_INT_ALLOC_VALID 0x8 + +/* Manageability Operation Mode mask */ +#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 + +#define E1000_BASE1000T_STATUS 10 +#define E1000_IDLE_ERROR_COUNT_MASK 0xFF +#define E1000_RECEIVE_ERROR_COUNTER 21 +#define E1000_RECEIVE_ERROR_MAX 0xFFFF +bool e1000_check_phy_82574(struct e1000_hw *hw); +bool e1000e_get_laa_state_82571(struct e1000_hw *hw); +void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); + +#endif diff --git a/addons/e1000e/src/3.10.108/Makefile b/addons/e1000e/src/3.10.108/Makefile new file mode 100644 index 00000000..9393a936 --- /dev/null +++ b/addons/e1000e/src/3.10.108/Makefile @@ -0,0 +1,5 @@ +obj-m += e1000e.o +e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \ + mac.o manage.o nvm.o phy.o \ + param.o ethtool.o netdev.o ptp.o + diff --git a/addons/e1000e/src/3.10.108/defines.h b/addons/e1000e/src/3.10.108/defines.h new file mode 100644 index 00000000..351c94a0 --- /dev/null +++ b/addons/e1000e/src/3.10.108/defines.h @@ -0,0 +1,803 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC + +/* Extended Device Control */ +#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ +#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_LSECCK 0x00001000 +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 + +#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ +#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ +#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ +#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_CSR_SM 0x8 + +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ +#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ +#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ + +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 + +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */ + +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_PHY_LED0_MODE_MASK 0x00000007 +#define E1000_PHY_LED0_IVRT 0x00000008 +#define E1000_PHY_LED0_MASK 0x0000001F + +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable Tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 +#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF + +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* Low Power IDLE Control */ +#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */ + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ + +#define E1000_PBA_RXA_MASK 0xFFFF + +#define E1000_PBS_16K E1000_PBA_16K + +/* Uncorrectable/correctable ECC Error counts and enable bits */ +#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF +#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 +#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 +#define E1000_PBECCSTS_ECC_ENABLE 0x00010000 + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ +#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ +#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ +#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ +#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ + +/* PBA ECC Register */ +#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ +#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */ +#define E1000_PBA_ECC_CORR_EN 0x00000001 /* ECC correction enable */ +#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ +#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ +#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ +#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ +#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ +#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ +#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +/* Enable the counting of desc. still to be processed. */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ + +#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000 + +#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000 + +#define E1000_TIMINCA_INCPERIOD_SHIFT 24 +#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF + +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +/* NVM Addressing bits based on type (0-small, 1-large) */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) + +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_FUTURE_INIT_WORD1 0x0019 +#define NVM_COMPAT_VALID_CSUM 0x0001 +#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 + +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F + +#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + +/* length of string needed to store PBA number */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +/* PBA (printed board assembly) number words */ +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 + +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. + * I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define BME1000_E_PHY_ID 0x01410CB0 +#define BME1000_E_PHY_ID_R2 0x01410CB1 +#define I82577_E_PHY_ID 0x01540050 +#define I82578_E_PHY_ID 0x004DD040 +#define I82579_E_PHY_ID 0x01540090 +#define I217_E_PHY_ID 0x015400A0 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020 +#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C + +/* BME1000 PHY Specific Control Register */ +#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL \ + GG82563_REG(0, 16) /* PHY Specific Control */ +#define GG82563_PHY_PAGE_SELECT \ + GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 \ + GG82563_REG(0, 26) /* PHY Specific Control 2 */ +#define GG82563_PHY_PAGE_SELECT_ALT \ + GG82563_REG(0, 29) /* Alternate Page Select */ + +#define GG82563_PHY_MAC_SPEC_CTRL \ + GG82563_REG(2, 21) /* MAC Specific Control Register */ + +#define GG82563_PHY_DSP_DISTANCE \ + GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +#define GG82563_PHY_KMRN_MODE_CTRL \ + GG82563_REG(193, 16) /* Kumeran Mode Control */ +#define GG82563_PHY_PWR_MGMT_CTRL \ + GG82563_REG(193, 20) /* Power Management Control */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_INBAND_CTRL \ + GG82563_REG(194, 18) /* Inband Control */ + +/* MDI Control */ +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_ERROR 0x40000000 + +/* SerDes Control */ +#define E1000_GEN_POLL_TIMEOUT 640 + +#endif /* _E1000_DEFINES_H_ */ diff --git a/addons/e1000e/src/3.10.108/e1000.h b/addons/e1000e/src/3.10.108/e1000.h new file mode 100644 index 00000000..ffbc08f5 --- /dev/null +++ b/addons/e1000e/src/3.10.108/e1000.h @@ -0,0 +1,626 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hw.h" + +struct e1000_info; + +#define e_dbg(format, arg...) \ + netdev_dbg(hw->adapter->netdev, format, ## arg) +#define e_err(format, arg...) \ + netdev_err(adapter->netdev, format, ## arg) +#define e_info(format, arg...) \ + netdev_info(adapter->netdev, format, ## arg) +#define e_warn(format, arg...) \ + netdev_warn(adapter->netdev, format, ## arg) +#define e_notice(format, arg...) \ + netdev_notice(adapter->netdev, format, ## arg) + +/* Interrupt modes, as used by the IntMode parameter */ +#define E1000E_INT_MODE_LEGACY 0 +#define E1000E_INT_MODE_MSI 1 +#define E1000E_INT_MODE_MSIX 2 + +/* Tx/Rx descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 4096 +#define E1000_MIN_TXD 64 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 4096 +#define E1000_MIN_RXD 64 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_APME 0x0400 + +#define E1000_MNG_VLAN_NONE (-1) + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +#define DEFAULT_JUMBO 9234 + +/* Time to wait before putting the device into D3 if there's no link (in ms). */ +#define LINK_TIMEOUT 100 + +/* Count for polling __E1000_RESET condition every 10-20msec. + * Experimentation has shown the reset can take approximately 210msec. + */ +#define E1000_CHECK_RESET_COUNT 25 + +#define DEFAULT_RDTR 0 +#define DEFAULT_RADV 8 +#define BURST_RDTR 0x20 +#define BURST_RADV 0x20 + +/* in the case of WTHRESH, it appears at least the 82571/2 hardware + * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when + * WTHRESH=4, so a setting of 5 gives the most efficient bus + * utilization but to avoid possible Tx stalls, set it to 1 + */ +#define E1000_TXDCTL_DMA_BURST_ENABLE \ + (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ + E1000_TXDCTL_COUNT_DESC | \ + (1 << 16) | /* wthresh must be +1 more than desired */\ + (1 << 8) | /* hthresh */ \ + 0x1f) /* pthresh */ + +#define E1000_RXDCTL_DMA_BURST_ENABLE \ + (0x01000000 | /* set descriptor granularity */ \ + (4 << 16) | /* set writeback threshold */ \ + (4 << 8) | /* set prefetch threshold */ \ + 0x20) /* set hthresh */ + +#define E1000_TIDV_FPD (1 << 31) +#define E1000_RDTR_FPD (1 << 31) + +enum e1000_boards { + board_82571, + board_82572, + board_82573, + board_82574, + board_82583, + board_80003es2lan, + board_ich8lan, + board_ich9lan, + board_ich10lan, + board_pchlan, + board_pch2lan, + board_pch_lpt, +}; + +struct e1000_ps_page { + struct page *page; + u64 dma; /* must be u64 - written to hw */ +}; + +/* wrappers around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + /* Tx */ + struct { + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + unsigned int segs; + unsigned int bytecount; + u16 mapped_as_page; + }; + /* Rx */ + struct { + /* arrays of page information for packet split */ + struct e1000_ps_page *ps_pages; + struct page *page; + }; + }; +}; + +struct e1000_ring { + struct e1000_adapter *adapter; /* back pointer to adapter */ + void *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ + + u16 next_to_use; + u16 next_to_clean; + + void __iomem *head; + void __iomem *tail; + + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + + char name[IFNAMSIZ + 5]; + u32 ims_val; + u32 itr_val; + void __iomem *itr_register; + int set_itr; + + struct sk_buff *rx_skb_top; +}; + +/* PHY register snapshot values */ +struct e1000_phy_regs { + u16 bmcr; /* basic mode control register */ + u16 bmsr; /* basic mode status register */ + u16 advertise; /* auto-negotiation advertisement */ + u16 lpa; /* link partner ability register */ + u16 expansion; /* auto-negotiation expansion reg */ + u16 ctrl1000; /* 1000BASE-T control register */ + u16 stat1000; /* 1000BASE-T status register */ + u16 estatus; /* extended status register */ +}; + +/* board specific private data structure */ +struct e1000_adapter { + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + struct timer_list blink_timer; + + struct work_struct reset_task; + struct work_struct watchdog_task; + + const struct e1000_info *ei; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 bd_number; + u32 rx_buffer_len; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + u16 eeprom_vers; + + /* track device up/down/testing state */ + unsigned long state; + + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* Tx - one ring per active queue */ + struct e1000_ring *tx_ring ____cacheline_aligned_in_smp; + u32 tx_fifo_limit; + + struct napi_struct napi; + + unsigned int uncorr_errors; /* uncorrectable ECC errors */ + unsigned int corr_errors; /* correctable ECC errors */ + unsigned int restart_queue; + u32 txd_cmd; + + bool detect_tx_hung; + bool tx_hang_recheck; + u8 tx_timeout_factor; + + u32 tx_int_delay; + u32 tx_abs_int_delay; + + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + + /* Tx stats */ + u64 tpt_old; + u64 colc_old; + u32 gotc; + u64 gotc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + + /* Rx */ + bool (*clean_rx) (struct e1000_ring *ring, int *work_done, + int work_to_do) ____cacheline_aligned_in_smp; + void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count, + gfp_t gfp); + struct e1000_ring *rx_ring; + + u32 rx_int_delay; + u32 rx_abs_int_delay; + + /* Rx stats */ + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 gorc; + u64 gorc_old; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 rx_hwtstamp_cleared; + + unsigned int rx_ps_pages; + u16 rx_ps_bsize0; + u32 max_frame_size; + u32 min_frame_size; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + + spinlock_t stats64_lock; /* protects statistics counters */ + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + /* Snapshot of PHY registers */ + struct e1000_phy_regs phy_regs; + + struct e1000_ring test_tx_ring; + struct e1000_ring test_rx_ring; + u32 test_icr; + + u32 msg_enable; + unsigned int num_vectors; + struct msix_entry *msix_entries; + int int_mode; + u32 eiac_mask; + + u32 eeprom_wol; + u32 wol; + u32 pba; + u32 max_hw_frame_size; + + bool fc_autoneg; + + unsigned int flags; + unsigned int flags2; + struct work_struct downshift_task; + struct work_struct update_phy_task; + struct work_struct print_hang_task; + + bool idle_check; + int phy_hang_count; + + u16 tx_ring_count; + u16 rx_ring_count; + + struct hwtstamp_config hwtstamp_config; + struct delayed_work systim_overflow_work; + struct sk_buff *tx_hwtstamp_skb; + struct work_struct tx_hwtstamp_work; + spinlock_t systim_lock; /* protects SYSTIML/H regsters */ + struct cyclecounter cc; + struct timecounter tc; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + + u16 eee_advert; +}; + +struct e1000_info { + enum e1000_mac_type mac; + unsigned int flags; + unsigned int flags2; + u32 pba; + u32 max_hw_frame_size; + s32 (*get_variants)(struct e1000_adapter *); + const struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + const struct e1000_nvm_operations *nvm_ops; +}; + +s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); + +/* The system time is maintained by a 64-bit counter comprised of the 32-bit + * SYSTIMH and SYSTIML registers. How the counter increments (and therefore + * its resolution) is based on the contents of the TIMINCA register - it + * increments every incperiod (bits 31:24) clock ticks by incvalue (bits 23:0). + * For the best accuracy, the incperiod should be as small as possible. The + * incvalue is scaled by a factor as large as possible (while still fitting + * in bits 23:0) so that relatively small clock corrections can be made. + * + * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of + * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n) + * bits to count nanoseconds leaving the rest for fractional nonseconds. + */ +#define INCVALUE_96MHz 125 +#define INCVALUE_SHIFT_96MHz 17 +#define INCPERIOD_SHIFT_96MHz 2 +#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz) + +#define INCVALUE_25MHz 40 +#define INCVALUE_SHIFT_25MHz 18 +#define INCPERIOD_25MHz 1 + +/* Another drawback of scaling the incvalue by a large factor is the + * 64-bit SYSTIM register overflows more quickly. This is dealt with + * by simply reading the clock before it overflows. + * + * Clock ns bits Overflows after + * ~~~~~~ ~~~~~~~ ~~~~~~~~~~~~~~~ + * 96MHz 47-bit 2^(47-INCPERIOD_SHIFT_96MHz) / 10^9 / 3600 = 9.77 hrs + * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours + */ +#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4) + +/* hardware capability, feature, and workaround flags */ +#define FLAG_HAS_AMT (1 << 0) +#define FLAG_HAS_FLASH (1 << 1) +#define FLAG_HAS_HW_VLAN_FILTER (1 << 2) +#define FLAG_HAS_WOL (1 << 3) +/* reserved bit4 */ +#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) +#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) +#define FLAG_HAS_JUMBO_FRAMES (1 << 7) +#define FLAG_READ_ONLY_NVM (1 << 8) +#define FLAG_IS_ICH (1 << 9) +#define FLAG_HAS_MSIX (1 << 10) +#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) +#define FLAG_IS_QUAD_PORT_A (1 << 12) +#define FLAG_IS_QUAD_PORT (1 << 13) +#define FLAG_HAS_HW_TIMESTAMP (1 << 14) +#define FLAG_APME_IN_WUC (1 << 15) +#define FLAG_APME_IN_CTRL3 (1 << 16) +#define FLAG_APME_CHECK_PORT_B (1 << 17) +#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18) +#define FLAG_NO_WAKE_UCAST (1 << 19) +#define FLAG_MNG_PT_ENABLED (1 << 20) +#define FLAG_RESET_OVERWRITES_LAA (1 << 21) +#define FLAG_TARC_SPEED_MODE_BIT (1 << 22) +#define FLAG_TARC_SET_BIT_ZERO (1 << 23) +#define FLAG_RX_NEEDS_RESTART (1 << 24) +#define FLAG_LSC_GIG_SPEED_DROP (1 << 25) +#define FLAG_SMART_POWER_DOWN (1 << 26) +#define FLAG_MSI_ENABLED (1 << 27) +/* reserved (1 << 28) */ +#define FLAG_TSO_FORCE (1 << 29) +#define FLAG_RESTART_NOW (1 << 30) +#define FLAG_MSI_TEST_FAILED (1 << 31) + +#define FLAG2_CRC_STRIPPING (1 << 0) +#define FLAG2_HAS_PHY_WAKEUP (1 << 1) +#define FLAG2_IS_DISCARDING (1 << 2) +#define FLAG2_DISABLE_ASPM_L1 (1 << 3) +#define FLAG2_HAS_PHY_STATS (1 << 4) +#define FLAG2_HAS_EEE (1 << 5) +#define FLAG2_DMA_BURST (1 << 6) +#define FLAG2_DISABLE_ASPM_L0S (1 << 7) +#define FLAG2_DISABLE_AIM (1 << 8) +#define FLAG2_CHECK_PHY_HANG (1 << 9) +#define FLAG2_NO_DISABLE_RX (1 << 10) +#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) +#define FLAG2_DFLT_CRC_STRIPPING (1 << 12) +#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13) + +#define E1000_RX_DESC_PS(R, i) \ + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_ACCESS_SHARED_RESOURCE, + __E1000_DOWN +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +extern char e1000e_driver_name[]; +extern const char e1000e_driver_version[]; + +extern void e1000e_check_options(struct e1000_adapter *adapter); +extern void e1000e_set_ethtool_ops(struct net_device *netdev); + +extern int e1000e_up(struct e1000_adapter *adapter); +extern void e1000e_down(struct e1000_adapter *adapter); +extern void e1000e_reinit_locked(struct e1000_adapter *adapter); +extern void e1000e_reset(struct e1000_adapter *adapter); +extern void e1000e_power_up_phy(struct e1000_adapter *adapter); +extern int e1000e_setup_rx_resources(struct e1000_ring *ring); +extern int e1000e_setup_tx_resources(struct e1000_ring *ring); +extern void e1000e_free_rx_resources(struct e1000_ring *ring); +extern void e1000e_free_tx_resources(struct e1000_ring *ring); +extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 + *stats); +extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); +extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); +extern void e1000e_get_hw_control(struct e1000_adapter *adapter); +extern void e1000e_release_hw_control(struct e1000_adapter *adapter); +extern void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr); + +extern unsigned int copybreak; + +extern const struct e1000_info e1000_82571_info; +extern const struct e1000_info e1000_82572_info; +extern const struct e1000_info e1000_82573_info; +extern const struct e1000_info e1000_82574_info; +extern const struct e1000_info e1000_82583_info; +extern const struct e1000_info e1000_ich8_info; +extern const struct e1000_info e1000_ich9_info; +extern const struct e1000_info e1000_ich10_info; +extern const struct e1000_info e1000_pch_info; +extern const struct e1000_info e1000_pch2_info; +extern const struct e1000_info e1000_pch_lpt_info; +extern const struct e1000_info e1000_es2_info; + +extern void e1000e_ptp_init(struct e1000_adapter *adapter); +extern void e1000e_ptp_remove(struct e1000_adapter *adapter); + +static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + return hw->phy.ops.reset(hw); +} + +static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_reg(hw, offset, data); +} + +static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_reg_locked(hw, offset, data); +} + +static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_reg(hw, offset, data); +} + +static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_reg_locked(hw, offset, data); +} + +extern void e1000e_reload_nvm_generic(struct e1000_hw *hw); + +static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->mac.ops.read_mac_addr) + return hw->mac.ops.read_mac_addr(hw); + + return e1000_read_mac_addr_generic(hw); +} + +static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.validate(hw); +} + +static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.update(hw); +} + +static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + return hw->nvm.ops.read(hw, offset, words, data); +} + +static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + return hw->nvm.ops.write(hw, offset, words, data); +} + +static inline s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + return hw->phy.ops.get_info(hw); +} + +static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->hw_addr + reg); +} + +#define er32(reg) __er32(hw, E1000_##reg) + +/** + * __ew32_prepare - prepare to write to MAC CSR register on certain parts + * @hw: pointer to the HW structure + * + * When updating the MAC CSR registers, the Manageability Engine (ME) could + * be accessing the registers at the same time. Normally, this is handled in + * h/w by an arbiter but on some parts there is a bug that acknowledges Host + * accesses later than it should which could result in the register to have + * an incorrect value. Workaround this by checking the FWSM register which + * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set + * and try again a number of times. + **/ +static inline s32 __ew32_prepare(struct e1000_hw *hw) +{ + s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; + + while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) + udelay(50); + + return i; +} + +static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + __ew32_prepare(hw); + + writel(val, hw->hw_addr + reg); +} + +#define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) + +#define e1e_flush() er32(STATUS) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ + (__ew32((a), (reg + ((offset) << 2)), (value))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) \ + (readl((a)->hw_addr + reg + ((offset) << 2))) + +#endif /* _E1000_H_ */ diff --git a/addons/e1000e/src/3.10.108/ethtool.c b/addons/e1000e/src/3.10.108/ethtool.c new file mode 100644 index 00000000..7c8ca658 --- /dev/null +++ b/addons/e1000e/src/3.10.108/ethtool.c @@ -0,0 +1,2262 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* ethtool support for e1000 */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000.h" + +enum { NETDEV_STATS, E1000_STATS }; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(str, m) { \ + .stat_string = str, \ + .type = E1000_STATS, \ + .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ + .stat_offset = offsetof(struct e1000_adapter, m) } +#define E1000_NETDEV_STAT(str, m) { \ + .stat_string = str, \ + .type = NETDEV_STATS, \ + .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \ + .stat_offset = offsetof(struct rtnl_link_stats64, m) } + +static const struct e1000_stats e1000_gstrings_stats[] = { + E1000_STAT("rx_packets", stats.gprc), + E1000_STAT("tx_packets", stats.gptc), + E1000_STAT("rx_bytes", stats.gorc), + E1000_STAT("tx_bytes", stats.gotc), + E1000_STAT("rx_broadcast", stats.bprc), + E1000_STAT("tx_broadcast", stats.bptc), + E1000_STAT("rx_multicast", stats.mprc), + E1000_STAT("tx_multicast", stats.mptc), + E1000_NETDEV_STAT("rx_errors", rx_errors), + E1000_NETDEV_STAT("tx_errors", tx_errors), + E1000_NETDEV_STAT("tx_dropped", tx_dropped), + E1000_STAT("multicast", stats.mprc), + E1000_STAT("collisions", stats.colc), + E1000_NETDEV_STAT("rx_length_errors", rx_length_errors), + E1000_NETDEV_STAT("rx_over_errors", rx_over_errors), + E1000_STAT("rx_crc_errors", stats.crcerrs), + E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors), + E1000_STAT("rx_no_buffer_count", stats.rnbc), + E1000_STAT("rx_missed_errors", stats.mpc), + E1000_STAT("tx_aborted_errors", stats.ecol), + E1000_STAT("tx_carrier_errors", stats.tncrs), + E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors), + E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors), + E1000_STAT("tx_window_errors", stats.latecol), + E1000_STAT("tx_abort_late_coll", stats.latecol), + E1000_STAT("tx_deferred_ok", stats.dc), + E1000_STAT("tx_single_coll_ok", stats.scc), + E1000_STAT("tx_multi_coll_ok", stats.mcc), + E1000_STAT("tx_timeout_count", tx_timeout_count), + E1000_STAT("tx_restart_queue", restart_queue), + E1000_STAT("rx_long_length_errors", stats.roc), + E1000_STAT("rx_short_length_errors", stats.ruc), + E1000_STAT("rx_align_errors", stats.algnerrc), + E1000_STAT("tx_tcp_seg_good", stats.tsctc), + E1000_STAT("tx_tcp_seg_failed", stats.tsctfc), + E1000_STAT("rx_flow_control_xon", stats.xonrxc), + E1000_STAT("rx_flow_control_xoff", stats.xoffrxc), + E1000_STAT("tx_flow_control_xon", stats.xontxc), + E1000_STAT("tx_flow_control_xoff", stats.xofftxc), + E1000_STAT("rx_csum_offload_good", hw_csum_good), + E1000_STAT("rx_csum_offload_errors", hw_csum_err), + E1000_STAT("rx_header_split", rx_hdr_split), + E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + E1000_STAT("tx_smbus", stats.mgptc), + E1000_STAT("rx_smbus", stats.mgprc), + E1000_STAT("dropped_smbus", stats.mgpdc), + E1000_STAT("rx_dma_failed", rx_dma_failed), + E1000_STAT("tx_dma_failed", tx_dma_failed), + E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + E1000_STAT("uncorr_ecc_errors", uncorr_errors), + E1000_STAT("corr_ecc_errors", corr_errors), +}; + +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 speed; + + if (hw->phy.media_type == e1000_media_type_copper) { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->phy.type == e1000_phy_ife) + ecmd->supported &= ~SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->phy.autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + ecmd->transceiver = XCVR_INTERNAL; + + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + } + + speed = -1; + ecmd->duplex = -1; + + if (netif_running(netdev)) { + if (netif_carrier_ok(netdev)) { + speed = adapter->link_speed; + ecmd->duplex = adapter->link_duplex - 1; + } + } else { + u32 status = er32(STATUS); + if (status & E1000_STATUS_LU) { + if (status & E1000_STATUS_SPEED_1000) + speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + speed = SPEED_100; + else + speed = SPEED_10; + + if (status & E1000_STATUS_FD) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } + } + + ethtool_cmd_speed_set(ecmd, speed); + ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if ((hw->phy.media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI; + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && + (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) { + goto err_inval; + } + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err("Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int e1000_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed + */ + if (hw->phy.ops.check_reset_block && + hw->phy.ops.check_reset_block(hw)) { + e_err("Cannot change link characteristics when SoL/IDER is active.\n"); + return -EINVAL; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + return -EINVAL; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + if (hw->phy.media_type == e1000_media_type_fiber) + hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | ADVERTISED_Autoneg; + else + hw->phy.autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | ADVERTISED_Autoneg; + ecmd->advertising = hw->phy.autoneg_advertised; + if (adapter->fc_autoneg) + hw->fc.requested_mode = e1000_fc_default; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { + clear_bit(__E1000_RESETTING, &adapter->state); + return -EINVAL; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + e1000e_down(adapter); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + + clear_bit(__E1000_RESETTING, &adapter->state); + return 0; +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == e1000_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == e1000_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = e1000_fc_default; + if (netif_running(adapter->netdev)) { + e1000e_down(adapter); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + if (hw->phy.media_type == e1000_media_type_fiber) { + retval = hw->mac.ops.setup_link(hw); + /* implicit goto out */ + } else { + retval = e1000e_force_mac_fc(hw); + if (retval) + goto out; + e1000e_set_fc_watermarks(hw); + } + } + +out: + clear_bit(__E1000_RESETTING, &adapter->state); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device __always_unused *netdev) +{ +#define E1000_REGS_LEN 32 /* overestimate */ + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + adapter->pdev->device; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN(0)); + regs_buff[4] = er32(RDH(0)); + regs_buff[5] = er32(RDT(0)); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN(0)); + regs_buff[9] = er32(TDH(0)); + regs_buff[10] = er32(TDT(0)); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ + + /* ethtool doesn't use anything past this point, so all this + * code is likely legacy junk for apps that may or may not exist + */ + if (hw->phy.type == e1000_phy_m88) { + e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = 0; /* was idle_errors */ + e1e_rphy(hw, MII_STAT1000, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + ret_val = e1000_read_nvm(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_nvm(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + if (ret_val) { + /* a read error occurred, throw away the result */ + memset(eeprom_buff, 0xff, sizeof(u16) * + (last_word - first_word + 1)); + } else { + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != + (adapter->pdev->vendor | (adapter->pdev->device << 16))) + return -EFAULT; + + if (adapter->flags & FLAG_READ_ONLY_NVM) + return -EINVAL; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (!ret_val)) + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + + if (ret_val) + goto out; + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = e1000_write_nvm(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + if (ret_val) + goto out; + + /* Update the checksum over the first part of the EEPROM if needed + * and flush shadow RAM for applicable controllers + */ + if ((first_word <= NVM_CHECKSUM_REG) || + (hw->mac.type == e1000_82583) || + (hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82573)) + ret_val = e1000e_update_nvm_checksum(hw); + +out: + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, e1000e_driver_version, + sizeof(drvinfo->version)); + + /* EEPROM image version # is reported as firmware version # for + * PCI-E controllers + */ + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d-%d", + (adapter->eeprom_vers & 0xF000) >> 12, + (adapter->eeprom_vers & 0x0FF0) >> 4, + (adapter->eeprom_vers & 0x000F)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); + drvinfo->regdump_len = e1000_get_regs_len(netdev); + drvinfo->eedump_len = e1000_get_eeprom_len(netdev); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = E1000_MAX_RXD; + ring->tx_max_pending = E1000_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *temp_tx = NULL, *temp_rx = NULL; + int err = 0, size = sizeof(struct e1000_ring); + bool set_tx = false, set_rx = false; + u16 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = clamp_t(u32, ring->rx_pending, E1000_MIN_RXD, + E1000_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = clamp_t(u32, ring->tx_pending, E1000_MIN_TXD, + E1000_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) + /* nothing to do */ + return 0; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + /* Set counts now and allocate resources during open() */ + adapter->tx_ring->count = new_tx_count; + adapter->rx_ring->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + set_tx = (new_tx_count != adapter->tx_ring_count); + set_rx = (new_rx_count != adapter->rx_ring_count); + + /* Allocate temporary storage for ring updates */ + if (set_tx) { + temp_tx = vmalloc(size); + if (!temp_tx) { + err = -ENOMEM; + goto free_temp; + } + } + if (set_rx) { + temp_rx = vmalloc(size); + if (!temp_rx) { + err = -ENOMEM; + goto free_temp; + } + } + + e1000e_down(adapter); + + /* We can't just free everything and then setup again, because the + * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring + * structs. First, attempt to allocate new resources... + */ + if (set_tx) { + memcpy(temp_tx, adapter->tx_ring, size); + temp_tx->count = new_tx_count; + err = e1000e_setup_tx_resources(temp_tx); + if (err) + goto err_setup; + } + if (set_rx) { + memcpy(temp_rx, adapter->rx_ring, size); + temp_rx->count = new_rx_count; + err = e1000e_setup_rx_resources(temp_rx); + if (err) + goto err_setup_rx; + } + + /* ...then free the old resources and copy back any new ring data */ + if (set_tx) { + e1000e_free_tx_resources(adapter->tx_ring); + memcpy(adapter->tx_ring, temp_tx, size); + adapter->tx_ring_count = new_tx_count; + } + if (set_rx) { + e1000e_free_rx_resources(adapter->rx_ring); + memcpy(adapter->rx_ring, temp_rx, size); + adapter->rx_ring_count = new_rx_count; + } + +err_setup_rx: + if (err && set_tx) + e1000e_free_tx_resources(temp_tx); +err_setup: + e1000e_up(adapter); +free_temp: + vfree(temp_tx); + vfree(temp_rx); +clear_reset: + clear_bit(__E1000_RESETTING, &adapter->state); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, + int reg, int offset, u32 mask, u32 write) +{ + u32 pat, val; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + for (pat = 0; pat < ARRAY_SIZE(test); pat++) { + E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, + (test[pat] & write)); + val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); + if (val != (test[pat] & write & mask)) { + e_err("pattern test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", + reg + (offset << 2), val, + (test[pat] & write & mask)); + *data = reg; + return 1; + } + } + return 0; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + u32 val; + __ew32(&adapter->hw, reg, write & mask); + val = __er32(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + return 1; + } + return 0; +} + +#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ + return 1; \ + } while (0) +#define REG_PATTERN_TEST(reg, mask, write) \ + REG_PATTERN_TEST_ARRAY(reg, 0, mask, write) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &adapter->hw.mac; + u32 value; + u32 before; + u32 after; + u32 i; + u32 toggle; + u32 mask; + u32 wlock_mac = 0; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. There are several bits + * on newer hardware that are r/w. + */ + switch (mac->type) { + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + toggle = 0x7FFFF3FF; + break; + default: + toggle = 0x7FFFF033; + break; + } + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + if (!(adapter->flags & FLAG_IS_ICH)) { + REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF); + } + + REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); + + before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE); + REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); + + REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); + if (!(adapter->flags & FLAG_IS_ICH)) + REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); + mask = 0x8003FFFF; + switch (mac->type) { + case e1000_ich10lan: + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + mask |= (1 << 18); + break; + default: + break; + } + + if (mac->type == e1000_pch_lpt) + wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> + E1000_FWSM_WLOCK_MAC_SHIFT; + + for (i = 0; i < mac->rar_entry_count; i++) { + if (mac->type == e1000_pch_lpt) { + /* Cannot test write-protected SHRAL[n] registers */ + if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) + continue; + + /* SHRAH[9] different than the others */ + if (i == 10) + mask |= (1 << 30); + else + mask &= ~(1 << 30); + } + + REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask, + 0xFFFFFFFF); + } + + for (i = 0; i < mac->mta_reg_count; i++) + REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { + *data = 1; + return *data; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)NVM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 mask; + u32 shared_int = 1; + u32 irq = adapter->pdev->irq; + int i; + int ret_val = 0; + int int_mode = E1000E_INT_MODE_LEGACY; + + *data = 0; + + /* NOTE: we don't test MSI/MSI-X interrupts here, yet */ + if (adapter->int_mode == E1000E_INT_MODE_MSIX) { + int_mode = adapter->int_mode; + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e1000e_set_interrupt_capability(adapter); + } + /* Hook up test interrupt handler just for this test */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) { + shared_int = 0; + } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name, + netdev)) { + *data = 1; + ret_val = -1; + goto out; + } + e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + e1e_flush(); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (i = 0; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (adapter->flags & FLAG_IS_ICH) { + switch (mask) { + case E1000_ICR_RXSEQ: + continue; + case 0x00000100: + if (adapter->hw.mac.type == e1000_ich8lan || + adapter->hw.mac.type == e1000_ich9lan) + continue; + break; + default: + break; + } + } + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + e1e_flush(); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + e1e_flush(); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + +out: + if (int_mode == E1000E_INT_MODE_MSIX) { + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = int_mode; + e1000e_set_interrupt_capability(adapter); + } + + return ret_val; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info; + int i; + + if (tx_ring->desc && tx_ring->buffer_info) { + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + if (buffer_info->skb) + dev_kfree_skb(buffer_info->skb); + } + } + + if (rx_ring->desc && rx_ring->buffer_info) { + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, + buffer_info->dma, + 2048, DMA_FROM_DEVICE); + if (buffer_info->skb) + dev_kfree_skb(buffer_info->skb); + } + } + + if (tx_ring->desc) { + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; + } + if (rx_ring->desc) { + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; + } + + kfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + kfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + int i; + int ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!tx_ring->count) + tx_ring->count = E1000_DEFAULT_TXD; + + tx_ring->buffer_info = kcalloc(tx_ring->count, + sizeof(struct e1000_buffer), GFP_KERNEL); + if (!tx_ring->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + ret_val = 2; + goto err_nomem; + } + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH(0), ((u64)tx_ring->dma >> 32)); + ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc)); + ew32(TDH(0), 0); + ew32(TDT(0), 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < tx_ring->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct sk_buff *skb; + unsigned int skb_size = 1024; + + skb = alloc_skb(skb_size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, skb_size); + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].length = skb->len; + tx_ring->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, + tx_ring->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rx_ring->count) + rx_ring->count = E1000_DEFAULT_RXD; + + rx_ring->buffer_info = kcalloc(rx_ring->count, + sizeof(struct e1000_buffer), GFP_KERNEL); + if (!rx_ring->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rx_ring->size = rx_ring->count * sizeof(union e1000_rx_desc_extended); + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) { + ret_val = 6; + goto err_nomem; + } + rx_ring->next_to_use = 0; + rx_ring->next_to_clean = 0; + + rctl = er32(RCTL); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF)); + ew32(RDBAH(0), ((u64)rx_ring->dma >> 32)); + ew32(RDLEN(0), rx_ring->size); + ew32(RDH(0), 0); + ew32(RDT(0), 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | + E1000_RCTL_SBP | E1000_RCTL_SECRC | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rx_ring->count; i++) { + union e1000_rx_desc_extended *rx_desc; + struct sk_buff *skb; + + skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); + if (!skb) { + ret_val = 7; + goto err_nomem; + } + skb_reserve(skb, NET_IP_ALIGN); + rx_ring->buffer_info[i].skb = skb; + rx_ring->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, 2048, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + rx_ring->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + rx_desc->read.buffer_addr = + cpu_to_le64(rx_ring->buffer_info[i].dma); + memset(skb->data, 0x00, skb->len); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1e_wphy(&adapter->hw, 29, 0x001F); + e1e_wphy(&adapter->hw, 30, 0x8FFC); + e1e_wphy(&adapter->hw, 29, 0x001A); + e1e_wphy(&adapter->hw, 30, 0x8FF0); +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u16 phy_reg = 0; + s32 ret_val = 0; + + hw->mac.autoneg = 0; + + if (hw->phy.type == e1000_phy_ife) { + /* force 100, set loopback */ + e1e_wphy(hw, MII_BMCR, 0x6100); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_100 |/* Force Speed to 100 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + e1e_flush(); + usleep_range(500, 1000); + + return 0; + } + + /* Specific PHY configuration for loopback */ + switch (hw->phy.type) { + case e1000_phy_m88: + /* Auto-MDI/MDIX Off */ + e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1e_wphy(hw, MII_BMCR, 0x9140); + /* autoneg off */ + e1e_wphy(hw, MII_BMCR, 0x8140); + break; + case e1000_phy_gg82563: + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); + break; + case e1000_phy_bm: + /* Set Default MAC Interface speed to 1GB */ + e1e_rphy(hw, PHY_REG(2, 21), &phy_reg); + phy_reg &= ~0x0007; + phy_reg |= 0x006; + e1e_wphy(hw, PHY_REG(2, 21), phy_reg); + /* Assert SW reset for above settings to take effect */ + hw->phy.ops.commit(hw); + usleep_range(1000, 2000); + /* Force Full Duplex */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); + /* Set Link Up (in force link) */ + e1e_rphy(hw, PHY_REG(776, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040); + /* Force Link */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040); + /* Set Early Link Enable */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); + break; + case e1000_phy_82577: + case e1000_phy_82578: + /* Workaround: K1 must be disabled for stable 1Gbps operation */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_err("Cannot setup 1Gbps loopback.\n"); + return ret_val; + } + e1000_configure_k1_ich8lan(hw, false); + hw->phy.ops.release(hw); + break; + case e1000_phy_82579: + /* Disable PHY energy detect power down */ + e1e_rphy(hw, PHY_REG(0, 21), &phy_reg); + e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3)); + /* Disable full chip energy detect */ + e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); + e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); + /* Enable loopback on the PHY */ + e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001); + break; + default: + break; + } + + /* force 1000, set loopback */ + e1e_wphy(hw, MII_BMCR, 0x4140); + msleep(250); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (adapter->flags & FLAG_IS_ICH) + ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ + + if (hw->phy.media_type == e1000_media_type_copper && + hw->phy.type == e1000_phy_m88) { + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + } else { + /* Set the ILOS bit on the fiber Nic if half duplex link is + * detected. + */ + if ((er32(STATUS) & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy.type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + usleep_range(500, 1000); + + return 0; +} + +static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + int link; + + /* special requirements for 82571/82572 fiber adapters */ + + /* jump through hoops to make sure link is up because serdes + * link is hardwired up + */ + ctrl |= E1000_CTRL_SLU; + ew32(CTRL, ctrl); + + /* disable autoneg */ + ctrl = er32(TXCW); + ctrl &= ~(1 << 31); + ew32(TXCW, ctrl); + + link = (er32(STATUS) & E1000_STATUS_LU); + + if (!link) { + /* set invert loss of signal */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_ILOS; + ew32(CTRL, ctrl); + } + + /* special write to serdes control register to enable SerDes analog + * loopback + */ + ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK); + e1e_flush(); + usleep_range(10000, 20000); + + return 0; +} + +/* only call this for fiber/serdes connections to es2lan */ +static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrlext = er32(CTRL_EXT); + u32 ctrl = er32(CTRL); + + /* save CTRL_EXT to restore later, reuse an empty variable (unused + * on mac_type 80003es2lan) + */ + adapter->tx_fifo_head = ctrlext; + + /* clear the serdes mode bits, putting the device into mac loopback */ + ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + ew32(CTRL_EXT, ctrlext); + + /* force speed to 1000/FD, link up */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | + E1000_CTRL_SPD_1000 | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* set mac loopback */ + ctrl = er32(RCTL); + ctrl |= E1000_RCTL_LBM_MAC; + ew32(RCTL, ctrl); + + /* set testing mode parameters (no need to reset later) */ +#define KMRNCTRLSTA_OPMODE (0x1F << 16) +#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 + ew32(KMRNCTRLSTA, + (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); + + return 0; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + switch (hw->mac.type) { + case e1000_80003es2lan: + return e1000_set_es2lan_mac_loopback(adapter); + break; + case e1000_82571: + case e1000_82572: + return e1000_set_82571_fiber_loopback(adapter); + break; + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->phy.media_type == e1000_media_type_copper) { + return e1000_integrated_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac.type) { + case e1000_80003es2lan: + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + /* restore CTRL_EXT, stealing space from tx_fifo_head */ + ew32(CTRL_EXT, adapter->tx_fifo_head); + adapter->tx_fifo_head = 0; + } + /* fall through */ + case e1000_82571: + case e1000_82572: + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + e1e_flush(); + usleep_range(10000, 20000); + break; + } + /* Fall Through */ + default: + hw->mac.autoneg = 1; + if (hw->phy.type == e1000_phy_gg82563) + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); + e1e_rphy(hw, MII_BMCR, &phy_reg); + if (phy_reg & BMCR_LOOPBACK) { + phy_reg &= ~BMCR_LOOPBACK; + e1e_wphy(hw, MII_BMCR, phy_reg); + if (hw->phy.ops.commit) + hw->phy.ops.commit(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int e1000_check_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(skb->data + 3) == 0xFF) + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) + return 0; + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_buffer *buffer_info; + int i, j, k, l; + int lc; + int good_cnt; + int ret_val = 0; + unsigned long time; + + ew32(RDT(0), rx_ring->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + k = 0; + l = 0; + /* loop count loop */ + for (j = 0; j <= lc; j++) { + /* send the packets */ + for (i = 0; i < 64; i++) { + buffer_info = &tx_ring->buffer_info[k]; + + e1000_create_lbtest_frame(buffer_info->skb, 1024); + dma_sync_single_for_device(&pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + k++; + if (k == tx_ring->count) + k = 0; + } + ew32(TDT(0), k); + e1e_flush(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + /* receive the sent packets */ + do { + buffer_info = &rx_ring->buffer_info[l]; + + dma_sync_single_for_cpu(&pdev->dev, + buffer_info->dma, 2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame(buffer_info->skb, + 1024); + if (!ret_val) + good_cnt++; + l++; + if (l == rx_ring->count) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (jiffies >= (time + 20)) { + ret_val = 14; /* error code for time out error */ + break; + } + } + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + /* PHY loopback cannot be performed if SoL/IDER sessions are active */ + if (hw->phy.ops.check_reset_block && + hw->phy.ops.check_reset_block(hw)) { + e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; + hw->mac.serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + hw->mac.ops.check_for_link(hw); + if (hw->mac.serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + hw->mac.ops.check_for_link(hw); + if (hw->mac.autoneg) + /* On some Phy/switch combinations, link establishment + * can take a few seconds more than expected. + */ + msleep_interruptible(5000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000e_get_sset_count(struct net_device __always_unused *netdev, + int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex; + u8 autoneg; + bool if_running = netif_running(netdev); + + set_bit(__E1000_TESTING, &adapter->state); + + if (!if_running) { + /* Get control of and reset hardware */ + if (adapter->flags & FLAG_HAS_AMT) + e1000e_get_hw_control(adapter); + + e1000e_power_up_phy(adapter); + + adapter->hw.phy.autoneg_wait_to_complete = 1; + e1000e_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = 0; + } + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + e_info("offline testing starting\n"); + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.autoneg_wait_to_complete = 1; + e1000e_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = 0; + + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + e1000e_reset(adapter); + + clear_bit(__E1000_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + /* Online tests */ + + e_info("online testing starting\n"); + + /* register, eeprom, intr and loopback tests not run online */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(__E1000_TESTING, &adapter->state); + } + + if (!if_running) { + e1000e_reset(adapter); + + if (adapter->flags & FLAG_HAS_AMT) + e1000e_release_hw_control(adapter); + } + + msleep_interruptible(4 * 1000); +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + wol->supported = 0; + wol->wolopts = 0; + + if (!(adapter->flags & FLAG_HAS_WOL) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | WAKE_PHY; + + /* apply any specific unsupported masks here */ + if (adapter->flags & FLAG_NO_WAKE_UCAST) { + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err("Interface does not support directed (unicast) frame wake-up packets\n"); + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & E1000_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!(adapter->flags & FLAG_HAS_WOL) || + !device_can_wakeup(&adapter->pdev->dev) || + (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | + WAKE_MAGIC | WAKE_PHY))) + return -EOPNOTSUPP; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= E1000_WUFC_LNKC; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + if (!hw->mac.ops.blink_led) + return 2; /* cycle on/off twice per second */ + + hw->mac.ops.blink_led(hw); + break; + + case ETHTOOL_ID_INACTIVE: + if (hw->phy.type == e1000_phy_ife) + e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + hw->mac.ops.led_off(hw); + hw->mac.ops.cleanup_led(hw); + break; + + case ETHTOOL_ID_ON: + hw->mac.ops.led_on(hw); + break; + + case ETHTOOL_ID_OFF: + hw->mac.ops.led_off(hw); + break; + } + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr_setting = 4; + adapter->itr = adapter->itr_setting; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + if (adapter->itr_setting != 0) + e1000e_write_itr(adapter, adapter->itr); + else + e1000e_write_itr(adapter, 0); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EAGAIN; + + if (!adapter->hw.mac.autoneg) + return -EINVAL; + + e1000e_reinit_locked(adapter); + + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 net_stats; + int i; + char *p = NULL; + + e1000e_get_stats64(netdev, &net_stats); + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + switch (e1000_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *)&net_stats + + e1000_gstrings_stats[i].stat_offset; + break; + case E1000_STATS: + p = (char *)adapter + + e1000_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; + } + + data[i] = (e1000_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +} + +static void e1000_get_strings(struct net_device __always_unused *netdev, + u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int e1000_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *info, + u32 __always_unused *rule_locs) +{ + info->data = 0; + + switch (info->cmd) { + case ETHTOOL_GRXFH: { + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 mrqc = er32(MRQC); + + if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK)) + return 0; + + switch (info->flow_type) { + case TCP_V4_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4) + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6) + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + break; + } + return 0; + } + default: + return -EOPNOTSUPP; + } +} + +static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 cap_addr, lpa_addr, pcs_stat_addr, phy_data; + u32 ret_val; + + if (!(adapter->flags2 & FLAG2_HAS_EEE)) + return -EOPNOTSUPP; + + switch (hw->phy.type) { + case e1000_phy_82579: + cap_addr = I82579_EEE_CAPABILITY; + lpa_addr = I82579_EEE_LP_ABILITY; + pcs_stat_addr = I82579_EEE_PCS_STATUS; + break; + case e1000_phy_i217: + cap_addr = I217_EEE_CAPABILITY; + lpa_addr = I217_EEE_LP_ABILITY; + pcs_stat_addr = I217_EEE_PCS_STATUS; + break; + default: + return -EOPNOTSUPP; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return -EBUSY; + + /* EEE Capability */ + ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data); + if (ret_val) + goto release; + edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data); + + /* EEE Advertised */ + edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + /* EEE Link Partner Advertised */ + ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data); + if (ret_val) + goto release; + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + + /* EEE PCS Status */ + ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data); + if (hw->phy.type == e1000_phy_82579) + phy_data <<= 8; + +release: + hw->phy.ops.release(hw); + if (ret_val) + return -ENODATA; + + /* Result of the EEE auto negotiation - there is no register that + * has the status of the EEE negotiation so do a best-guess based + * on whether Tx or Rx LPI indications have been received. + */ + if (phy_data & (E1000_EEE_TX_LPI_RCVD | E1000_EEE_RX_LPI_RCVD)) + edata->eee_active = true; + + edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable; + edata->tx_lpi_enabled = true; + edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT; + + return 0; +} + +static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + ret_val = e1000e_get_eee(netdev, &eee_curr); + if (ret_val) + return ret_val; + + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_err("Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_curr.tx_lpi_timer != edata->tx_lpi_timer) { + e_err("Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { + e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + + hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; + + /* reset the link */ + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + + return 0; +} + +static int e1000e_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + ethtool_op_get_ts_info(netdev, info); + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return 0; + + info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE); + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_ALL)); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + + return 0; +} + +static int e1000e_ethtool_begin(struct net_device *netdev) +{ + return pm_runtime_get_sync(netdev->dev.parent); +} + +static void e1000e_ethtool_complete(struct net_device *netdev) +{ + pm_runtime_put_sync(netdev->dev.parent); +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .begin = e1000e_ethtool_begin, + .complete = e1000e_ethtool_complete, + .get_settings = e1000_get_settings, + .set_settings = e1000_set_settings, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000e_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_rxnfc = e1000_get_rxnfc, + .get_ts_info = e1000e_get_ts_info, + .get_eee = e1000e_get_eee, + .set_eee = e1000e_set_eee, +}; + +void e1000e_set_ethtool_ops(struct net_device *netdev) +{ + SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops); +} diff --git a/addons/e1000e/src/3.10.108/hw.h b/addons/e1000e/src/3.10.108/hw.h new file mode 100644 index 00000000..84850f7a --- /dev/null +++ b/addons/e1000e/src/3.10.108/hw.h @@ -0,0 +1,681 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "regs.h" +#include "defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 +#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC +#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 +#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82572EI 0x10B9 +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A +#define E1000_DEV_ID_82574L 0x10D3 +#define E1000_DEV_ID_82574LA 0x10F6 +#define E1000_DEV_ID_82583V 0x150C +#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB +#define E1000_DEV_ID_ICH8_82567V_3 0x1501 +#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A +#define E1000_DEV_ID_ICH8_IGP_C 0x104B +#define E1000_DEV_ID_ICH8_IFE 0x104C +#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 +#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 +#define E1000_DEV_ID_ICH8_IGP_M 0x104D +#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD +#define E1000_DEV_ID_ICH9_BM 0x10E5 +#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 +#define E1000_DEV_ID_ICH9_IGP_M 0x10BF +#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB +#define E1000_DEV_ID_ICH9_IGP_C 0x294C +#define E1000_DEV_ID_ICH9_IFE 0x10C0 +#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 +#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 +#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC +#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD +#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE +#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE +#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF +#define E1000_DEV_ID_ICH10_D_BM_V 0x1525 +#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA +#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB +#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF +#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 +#define E1000_DEV_ID_PCH2_LV_LM 0x1502 +#define E1000_DEV_ID_PCH2_LV_V 0x1503 +#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A +#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B +#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A +#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 + +#define E1000_REVISION_4 4 + +#define E1000_FUNC_1 1 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 + +enum e1000_mac_type { + e1000_82571, + e1000_82572, + e1000_82573, + e1000_82574, + e1000_82583, + e1000_80003es2lan, + e1000_ich8lan, + e1000_ich9lan, + e1000_ich10lan, + e1000_pchlan, + e1000_pch2lan, + e1000_pch_lpt, +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_bm, + e1000_phy_82578, + e1000_phy_82577, + e1000_phy_82579, + e1000_phy_i217, +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress, + e1000_serdes_link_autoneg_complete, + e1000_serdes_link_forced_up +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + __le16 length[3]; /* length of buffers 1-3 */ + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "mac.h" +#include "phy.h" +#include "nvm.h" +#include "manage.h" + +/* Function pointers for the MAC. */ +struct e1000_mac_operations { + s32 (*id_led_init)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + void (*set_lan_id)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*config_collision_dist)(struct e1000_hw *); + void (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); +}; + +/* When to use various PHY register access functions: + * + * Func Caller + * Function Does Does When to use + * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * X_reg L,P,A n/a for simple PHY reg accesses + * X_reg_locked P,A L for multiple accesses of different regs + * on different pages + * X_reg_page A L,P for multiple accesses of different regs + * on the same page + * + * Where X=[read|write], L=locking, P=sets page, A=register access + * + */ +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*cfg_on_link_up)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_info)(struct e1000_hw *); + s32 (*set_page)(struct e1000_hw *, u16); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw *, u32, u16); + void (*power_up)(struct e1000_hw *); + void (*power_down)(struct e1000_hw *); +}; + +/* Function pointers for the NVM. */ +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + void (*reload)(struct e1000_hw *); + s32 (*update)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum e1000_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool serdes_has_link; + bool tx_pkt_filtering; + enum e1000_serdes_link_state serdes_link_state; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_width width; + + u16 func; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* FC mode in effect */ + enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct e1000_dev_spec_82571 { + bool laa_is_present; + u32 smb_counter; +}; + +struct e1000_dev_spec_80003es2lan { + bool mdic_wa_enable; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +#define E1000_ICH8_SHADOW_RAM_WORDS 2048 + +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; + bool nvm_k1_enabled; + bool eee_disable; + u16 eee_lp_ability; +}; + +struct e1000_hw { + struct e1000_adapter *adapter; + + void __iomem *hw_addr; + void __iomem *flash_address; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82571 e82571; + struct e1000_dev_spec_80003es2lan e80003es2lan; + struct e1000_dev_spec_ich8lan ich8lan; + } dev_spec; +}; + +#include "82571.h" +#include "80003es2lan.h" +#include "ich8lan.h" + +#endif diff --git a/addons/e1000e/src/3.10.108/ich8lan.c b/addons/e1000e/src/3.10.108/ich8lan.c new file mode 100644 index 00000000..ad9d8f2d --- /dev/null +++ b/addons/e1000e/src/3.10.108/ich8lan.c @@ -0,0 +1,4738 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* 82562G 10/100 Network Connection + * 82562G-2 10/100 Network Connection + * 82562GT 10/100 Network Connection + * 82562GT-2 10/100 Network Connection + * 82562V 10/100 Network Connection + * 82562V-2 10/100 Network Connection + * 82566DC-2 Gigabit Network Connection + * 82566DC Gigabit Network Connection + * 82566DM-2 Gigabit Network Connection + * 82566DM Gigabit Network Connection + * 82566MC Gigabit Network Connection + * 82566MM Gigabit Network Connection + * 82567LM Gigabit Network Connection + * 82567LF Gigabit Network Connection + * 82567V Gigabit Network Connection + * 82567LM-2 Gigabit Network Connection + * 82567LF-2 Gigabit Network Connection + * 82567V-2 Gigabit Network Connection + * 82567LF-3 Gigabit Network Connection + * 82567LM-3 Gigabit Network Connection + * 82567LM-4 Gigabit Network Connection + * 82577LM Gigabit Network Connection + * 82577LC Gigabit Network Connection + * 82578DM Gigabit Network Connection + * 82578DC Gigabit Network Connection + * 82579LM Gigabit Network Connection + * 82579V Gigabit Network Connection + */ + +#include "e1000.h" + +/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ +/* Offset 04h HSFSTS */ +union ich8_hws_flash_status { + struct ich8_hsfsts { + u16 flcdone:1; /* bit 0 Flash Cycle Done */ + u16 flcerr:1; /* bit 1 Flash Cycle Error */ + u16 dael:1; /* bit 2 Direct Access error Log */ + u16 berasesz:2; /* bit 4:3 Sector Erase Size */ + u16 flcinprog:1; /* bit 5 flash cycle in Progress */ + u16 reserved1:2; /* bit 13:6 Reserved */ + u16 reserved2:6; /* bit 13:6 Reserved */ + u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ + u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ + } hsf_status; + u16 regval; +}; + +/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ +/* Offset 06h FLCTL */ +union ich8_hws_flash_ctrl { + struct ich8_hsflctl { + u16 flcgo:1; /* 0 Flash Cycle Go */ + u16 flcycle:2; /* 2:1 Flash Cycle */ + u16 reserved:5; /* 7:3 Reserved */ + u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ + u16 flockdn:6; /* 15:10 Reserved */ + } hsf_ctrl; + u16 regval; +}; + +/* ICH Flash Region Access Permissions */ +union ich8_hws_flash_regacc { + struct ich8_flracc { + u32 grra:8; /* 0:7 GbE region Read Access */ + u32 grwa:8; /* 8:15 GbE region Write Access */ + u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ + u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ + } hsf_flregacc; + u16 regval; +}; + +/* ICH Flash Protected Region */ +union ich8_flash_protected_range { + struct ich8_pr { + u32 base:13; /* 0:12 Protected Range Base */ + u32 reserved1:2; /* 13:14 Reserved */ + u32 rpe:1; /* 15 Read Protection Enable */ + u32 limit:13; /* 16:28 Protected Range Limit */ + u32 reserved2:2; /* 29:30 Reserved */ + u32 wpe:1; /* 31 Write Protection Enable */ + } range; + u32 regval; +}; + +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte); +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data); +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data); +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); +static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); +static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); +static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); +static s32 e1000_led_on_pchlan(struct e1000_hw *hw); +static s32 e1000_led_off_pchlan(struct e1000_hw *hw); +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); +static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); +static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); +static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); +static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); + +static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) +{ + return readw(hw->flash_address + reg); +} + +static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->flash_address + reg); +} + +static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) +{ + writew(val, hw->flash_address + reg); +} + +static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->flash_address + reg); +} + +#define er16flash(reg) __er16flash(hw, (reg)) +#define er32flash(reg) __er32flash(hw, (reg)) +#define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) +#define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) + +/** + * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers + * @hw: pointer to the HW structure + * + * Test access to the PHY registers by reading the PHY ID registers. If + * the PHY ID is already known (e.g. resume path) compare it with known ID, + * otherwise assume the read PHY ID is correct if it is valid. + * + * Assumes the sw/fw/hw semaphore is already acquired. + **/ +static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) +{ + u16 phy_reg = 0; + u32 phy_id = 0; + s32 ret_val; + u16 retry_count; + + for (retry_count = 0; retry_count < 2; retry_count++) { + ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg); + if (ret_val || (phy_reg == 0xFFFF)) + continue; + phy_id = (u32)(phy_reg << 16); + + ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg); + if (ret_val || (phy_reg == 0xFFFF)) { + phy_id = 0; + continue; + } + phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); + break; + } + + if (hw->phy.id) { + if (hw->phy.id == phy_id) + return true; + } else if (phy_id) { + hw->phy.id = phy_id; + hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); + return true; + } + + /* In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + hw->phy.ops.release(hw); + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (!ret_val) + ret_val = e1000e_get_phy_id(hw); + hw->phy.ops.acquire(hw); + + return !ret_val; +} + +/** + * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds + * @hw: pointer to the HW structure + * + * Workarounds/flow necessary for PHY initialization during driver load + * and resume paths. + **/ +static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) +{ + u32 mac_reg, fwsm = er32(FWSM); + s32 ret_val; + u16 phy_reg; + + /* Gate automatic PHY configuration by hardware on managed and + * non-managed 82579 and newer adapters. + */ + e1000_gate_hw_phy_config_ich8lan(hw, true); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to initialize PHY flow\n"); + goto out; + } + + /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is + * inaccessible and resetting the PHY is not blocked, toggle the + * LANPHYPC Value bit to force the interconnect to PCIe mode. + */ + switch (hw->mac.type) { + case e1000_pch_lpt: + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* Before toggling LANPHYPC, see if PHY is accessible by + * forcing MAC to SMBus mode first. + */ + mac_reg = er32(CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + /* fall-through */ + case e1000_pch2lan: + if (e1000_phy_is_accessible_pchlan(hw)) { + if (hw->mac.type == e1000_pch_lpt) { + /* Unforce SMBus mode in PHY */ + e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + } + break; + } + + /* fall-through */ + case e1000_pchlan: + if ((hw->mac.type == e1000_pchlan) && + (fwsm & E1000_ICH_FWSM_FW_VALID)) + break; + + if (hw->phy.ops.check_reset_block(hw)) { + e_dbg("Required LANPHYPC toggle blocked by ME\n"); + break; + } + + e_dbg("Toggling LANPHYPC\n"); + + /* Set Phy Config Counter to 50msec */ + mac_reg = er32(FEXTNVM3); + mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + ew32(FEXTNVM3, mac_reg); + + if (hw->mac.type == e1000_pch_lpt) { + /* Toggling LANPHYPC brings the PHY out of SMBus mode + * So ensure that the MAC is also out of SMBus mode + */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + } + + /* Toggle LANPHYPC Value bit */ + mac_reg = er32(CTRL); + mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; + mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; + ew32(CTRL, mac_reg); + e1e_flush(); + usleep_range(10, 20); + mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; + ew32(CTRL, mac_reg); + e1e_flush(); + if (hw->mac.type < e1000_pch_lpt) { + msleep(50); + } else { + u16 count = 20; + do { + usleep_range(5000, 10000); + } while (!(er32(CTRL_EXT) & + E1000_CTRL_EXT_LPCD) && count--); + } + break; + default: + break; + } + + hw->phy.ops.release(hw); + + /* Reset the PHY before any access to it. Doing so, ensures + * that the PHY is in a known good state before we read/write + * PHY registers. The generic reset is sufficient here, + * because we haven't determined the PHY type yet. + */ + ret_val = e1000e_phy_hw_reset_generic(hw); + +out: + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + return ret_val; +} + +/** + * e1000_init_phy_params_pchlan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.set_page = e1000_set_page_igp; + phy->ops.read_reg = e1000_read_phy_reg_hv; + phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; + phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; + phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.write_reg = e1000_write_phy_reg_hv; + phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; + phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + phy->id = e1000_phy_unknown; + + ret_val = e1000_init_phy_workarounds_pchlan(hw); + if (ret_val) + return ret_val; + + if (phy->id == e1000_phy_unknown) + switch (hw->mac.type) { + default: + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) + break; + /* fall-through */ + case e1000_pch2lan: + case e1000_pch_lpt: + /* In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + break; + } + phy->type = e1000e_get_phy_type_from_id(phy->id); + + switch (phy->type) { + case e1000_phy_82577: + case e1000_phy_82579: + case e1000_phy_i217: + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.commit = e1000e_phy_sw_reset; + break; + case e1000_phy_82578: + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000e_get_cable_length_m88; + phy->ops.get_info = e1000e_get_phy_info_m88; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + return ret_val; +} + +/** + * e1000_init_phy_params_ich8lan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 i = 0; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + + /* We may need to do this twice - once for IGP and if that fails, + * we'll set BM func pointers and try again + */ + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + phy->ops.write_reg = e1000e_write_phy_reg_bm; + phy->ops.read_reg = e1000e_read_phy_reg_bm; + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + e_dbg("Cannot determine PHY addr. Erroring out\n"); + return ret_val; + } + } + + phy->id = 0; + while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && + (i++ < 100)) { + usleep_range(1000, 2000); + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + } + + /* Verify phy id */ + switch (phy->id) { + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; + phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; + phy->ops.get_info = e1000e_get_phy_info_igp; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy->type = e1000_phy_ife; + phy->autoneg_mask = E1000_ALL_NOT_GIG; + phy->ops.get_info = e1000_get_phy_info_ife; + phy->ops.check_polarity = e1000_check_polarity_ife; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; + break; + case BME1000_E_PHY_ID: + phy->type = e1000_phy_bm; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg = e1000e_read_phy_reg_bm; + phy->ops.write_reg = e1000e_write_phy_reg_bm; + phy->ops.commit = e1000e_phy_sw_reset; + phy->ops.get_info = e1000e_get_phy_info_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; + break; + default: + return -E1000_ERR_PHY; + break; + } + + return 0; +} + +/** + * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific NVM parameters and function + * pointers. + **/ +static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 gfpreg, sector_base_addr, sector_end_addr; + u16 i; + + /* Can't read flash registers if the register set isn't mapped. */ + if (!hw->flash_address) { + e_dbg("ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } + + nvm->type = e1000_nvm_flash_sw; + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT; + + /* find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT); + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + + nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; + + /* Clear shadow ram */ + for (i = 0; i < nvm->word_size; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + return 0; +} + +/** + * e1000_init_mac_params_ich8lan - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific MAC parameters and function + * pointers. + **/ +static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + /* Set media type function pointer */ + hw->phy.media_type = e1000_media_type_copper; + + /* Set mta register count */ + mac->mta_reg_count = 32; + /* Set rar entry count */ + mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; + if (mac->type == e1000_ich8lan) + mac->rar_entry_count--; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC subsystem not supported */ + mac->arc_subsystem_valid = false; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* LED and other operations */ + switch (mac->type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; + /* ID LED init */ + mac->ops.id_led_init = e1000e_id_led_init_generic; + /* blink LED */ + mac->ops.blink_led = e1000e_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000e_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_ich8lan; + mac->ops.led_off = e1000_led_off_ich8lan; + break; + case e1000_pch2lan: + mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch2lan; + /* fall-through */ + case e1000_pch_lpt: + case e1000_pchlan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_pchlan; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_pchlan; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_pchlan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_pchlan; + mac->ops.led_off = e1000_led_off_pchlan; + break; + default: + break; + } + + if (mac->type == e1000_pch_lpt) { + mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch_lpt; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_pch_lpt; + } + + /* Enable PCS Lock-loss workaround for ICH8 */ + if (mac->type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); + + return 0; +} + +/** + * __e1000_access_emi_reg_locked - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + * + * This helper function assumes the SW/FW/HW Semaphore is already acquired. + **/ +static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val; + + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data); + else + ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data); + + return ret_val; +} + +/** + * e1000_read_emi_reg_locked - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + * + * Assumes the SW/FW/HW Semaphore is already acquired. + **/ +s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) +{ + return __e1000_access_emi_reg_locked(hw, addr, data, true); +} + +/** + * e1000_write_emi_reg_locked - Write Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be written to the EMI address + * + * Assumes the SW/FW/HW Semaphore is already acquired. + **/ +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) +{ + return __e1000_access_emi_reg_locked(hw, addr, &data, false); +} + +/** + * e1000_set_eee_pchlan - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure, the duplex of + * the link and the EEE capabilities of the link partner. The LPI Control + * register bits will remain set only if/when link is up. + **/ +static s32 e1000_set_eee_pchlan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + s32 ret_val; + u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; + + switch (hw->phy.type) { + case e1000_phy_82579: + lpa = I82579_EEE_LP_ABILITY; + pcs_status = I82579_EEE_PCS_STATUS; + adv_addr = I82579_EEE_ADVERTISEMENT; + break; + case e1000_phy_i217: + lpa = I217_EEE_LP_ABILITY; + pcs_status = I217_EEE_PCS_STATUS; + adv_addr = I217_EEE_ADVERTISEMENT; + break; + default: + return 0; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); + if (ret_val) + goto release; + + /* Clear bits that enable EEE in various speeds */ + lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; + + /* Enable EEE if not disabled by user */ + if (!dev_spec->eee_disable) { + /* Save off link partner's EEE ability */ + ret_val = e1000_read_emi_reg_locked(hw, lpa, + &dev_spec->eee_lp_ability); + if (ret_val) + goto release; + + /* Read EEE advertisement */ + ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); + if (ret_val) + goto release; + + /* Enable EEE only for speeds in which the link partner is + * EEE capable and for which we advertise EEE. + */ + if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; + + if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { + e1e_rphy_locked(hw, MII_LPA, &data); + if (data & LPA_100FULL) + lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; + else + /* EEE is not supported in 100Half, so ignore + * partner's EEE in 100 ability if full-duplex + * is not advertised. + */ + dev_spec->eee_lp_ability &= + ~I82579_EEE_100_SUPPORTED; + } + } + + /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ + ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); + if (ret_val) + goto release; + + ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications + * preventing further DMA write requests. Workaround the issue by disabling + * the de-assertion of the clock request when in 1Gpbs mode. + **/ +static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) +{ + u32 fextnvm6 = er32(FEXTNVM6); + s32 ret_val = 0; + + if (link && (er32(STATUS) & E1000_STATUS_SPEED_1000)) { + u16 kmrn_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = + e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + goto release; + + ret_val = + e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg & + ~E1000_KMRNCTRLSTA_K1_ENABLE); + if (ret_val) + goto release; + + usleep_range(10, 20); + + ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); + + ret_val = + e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); +release: + hw->phy.ops.release(hw); + } else { + /* clear FEXTNVM6 bit 8 on link down or 10/100 */ + ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + } + + return ret_val; +} + +/** + * e1000_platform_pm_pch_lpt - Set platform power management values + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like" + * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed + * when link is up (which must not exceed the maximum latency supported + * by the platform), otherwise specify there is no LTR requirement. + * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop + * latencies in the LTR Extended Capability Structure in the PCIe Extended + * Capability register set, on this device LTR is set by writing the + * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and + * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB) + * message to the PMC. + **/ +static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) +{ + u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | + link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; + u16 lat_enc = 0; /* latency encoded */ + + if (link) { + u16 speed, duplex, scale = 0; + u16 max_snoop, max_nosnoop; + u16 max_ltr_enc; /* max LTR latency encoded */ + s64 lat_ns; /* latency (ns) */ + s64 value; + u32 rxa; + + if (!hw->adapter->max_frame_size) { + e_dbg("max_frame_size not set.\n"); + return -E1000_ERR_CONFIG; + } + + hw->mac.ops.get_link_up_info(hw, &speed, &duplex); + if (!speed) { + e_dbg("Speed not set.\n"); + return -E1000_ERR_CONFIG; + } + + /* Rx Packet Buffer Allocation size (KB) */ + rxa = er32(PBA) & E1000_PBA_RXA_MASK; + + /* Determine the maximum latency tolerated by the device. + * + * Per the PCIe spec, the tolerated latencies are encoded as + * a 3-bit encoded scale (only 0-5 are valid) multiplied by + * a 10-bit value (0-1023) to provide a range from 1 ns to + * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, + * 1=2^5ns, 2=2^10ns,...5=2^25ns. + */ + lat_ns = ((s64)rxa * 1024 - + (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000; + if (lat_ns < 0) + lat_ns = 0; + else + do_div(lat_ns, speed); + + value = lat_ns; + while (value > PCI_LTR_VALUE_MASK) { + scale++; + value = DIV_ROUND_UP(value, (1 << 5)); + } + if (scale > E1000_LTRV_SCALE_MAX) { + e_dbg("Invalid LTR latency scale %d\n", scale); + return -E1000_ERR_CONFIG; + } + lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value); + + /* Determine the maximum latency tolerated by the platform */ + pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT, + &max_snoop); + pci_read_config_word(hw->adapter->pdev, + E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); + max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); + + if (lat_enc > max_ltr_enc) + lat_enc = max_ltr_enc; + } + + /* Set Snoop and No-Snoop latencies the same */ + reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT); + ew32(LTRV, reg); + + return 0; +} + +/** + * e1000_check_for_copper_link_ich8lan - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + u16 phy_reg; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return 0; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pchlan) { + ret_val = e1000_k1_gig_workaround_hv(hw, link); + if (ret_val) + return ret_val; + } + + /* When connected at 10Mbps half-duplex, 82579 parts are excessively + * aggressive resulting in many collisions. To avoid this, increase + * the IPG and reduce Rx latency in the PHY. + */ + if ((hw->mac.type == e1000_pch2lan) && link) { + u32 reg; + reg = er32(STATUS); + if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { + reg = er32(TIPG); + reg &= ~E1000_TIPG_IPGT_MASK; + reg |= 0xFF; + ew32(TIPG, reg); + + /* Reduce Rx latency in analog PHY */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = + e1000_write_emi_reg_locked(hw, I82579_RX_CONFIG, 0); + + hw->phy.ops.release(hw); + + if (ret_val) + return ret_val; + } + } + + /* Work-around I218 hang issue */ + if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + ret_val = e1000_k1_workaround_lpt_lp(hw, link); + if (ret_val) + return ret_val; + } + + if (hw->mac.type == e1000_pch_lpt) { + /* Set platform power management values for + * Latency Tolerance Reporting (LTR) + */ + ret_val = e1000_platform_pm_pch_lpt(hw, link); + if (ret_val) + return ret_val; + } + + /* Clear link partner's EEE ability */ + hw->dev_spec.ich8lan.eee_lp_ability = 0; + + if (!link) + return 0; /* No link detected */ + + mac->get_link_status = false; + + switch (hw->mac.type) { + case e1000_pch2lan: + ret_val = e1000_k1_workaround_lv(hw); + if (ret_val) + return ret_val; + /* fall-thru */ + case e1000_pchlan: + if (hw->phy.type == e1000_phy_82578) { + ret_val = e1000_link_stall_workaround_hv(hw); + if (ret_val) + return ret_val; + } + + /* Workaround for PCHx parts in half-duplex: + * Set the number of preambles removed from the packet + * when it is passed from the PHY to the MAC to prevent + * the MAC from misinterpreting the packet type. + */ + e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); + phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; + + if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) + phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); + + e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); + break; + default: + break; + } + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000e_check_downshift(hw); + + /* Enable/Disable EEE after link up */ + ret_val = e1000_set_eee_pchlan(hw); + if (ret_val) + return ret_val; + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + return -E1000_ERR_CONFIG; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) + e_dbg("Error configuring flow control\n"); + + return ret_val; +} + +static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_ich8lan(hw); + if (rc) + return rc; + + rc = e1000_init_nvm_params_ich8lan(hw); + if (rc) + return rc; + + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + rc = e1000_init_phy_params_ich8lan(hw); + break; + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + rc = e1000_init_phy_params_pchlan(hw); + break; + default: + break; + } + if (rc) + return rc; + + /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or + * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). + */ + if ((adapter->hw.phy.type == e1000_phy_ife) || + ((adapter->hw.mac.type >= e1000_pch2lan) && + (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { + adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; + adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN; + + hw->mac.ops.blink_led = NULL; + } + + if ((adapter->hw.mac.type == e1000_ich8lan) && + (adapter->hw.phy.type != e1000_phy_ife)) + adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; + + /* Enable workaround for 82579 w/ ME enabled */ + if ((adapter->hw.mac.type == e1000_pch2lan) && + (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; + + return 0; +} + +static DEFINE_MUTEX(nvm_mutex); + +/** + * e1000_acquire_nvm_ich8lan - Acquire NVM mutex + * @hw: pointer to the HW structure + * + * Acquires the mutex for performing NVM operations. + **/ +static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw) +{ + mutex_lock(&nvm_mutex); + + return 0; +} + +/** + * e1000_release_nvm_ich8lan - Release NVM mutex + * @hw: pointer to the HW structure + * + * Releases the mutex used while performing NVM operations. + **/ +static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw) +{ + mutex_unlock(&nvm_mutex); +} + +/** + * e1000_acquire_swflag_ich8lan - Acquire software control flag + * @hw: pointer to the HW structure + * + * Acquires the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; + s32 ret_val = 0; + + if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE, + &hw->adapter->state)) { + e_dbg("contention for Phy access\n"); + return -E1000_ERR_PHY; + } + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) + break; + + mdelay(1); + timeout--; + } + + if (!timeout) { + e_dbg("SW has already locked the resource.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + timeout = SW_FLAG_TIMEOUT; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; + + mdelay(1); + timeout--; + } + + if (!timeout) { + e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", + er32(FWSM), extcnf_ctrl); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + if (ret_val) + clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); + + return ret_val; +} + +/** + * e1000_release_swflag_ich8lan - Release software control flag + * @hw: pointer to the HW structure + * + * Releases the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + } else { + e_dbg("Semaphore unexpectedly released by sw/fw/hw\n"); + } + + clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); +} + +/** + * e1000_check_mng_mode_ich8lan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has any manageability enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + return ((fwsm & E1000_ICH_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))); +} + +/** + * e1000_check_mng_mode_pchlan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has iAMT enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_rar_set_pch2lan - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. For 82579, RAR[0] is the base address register that is to + * contain the MAC address but RAR[1-6] are reserved for manageability (ME). + * Use SHRA[0-3] in place of those reserved for ME. + **/ +static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); + return; + } + + if (index < hw->mac.rar_entry_count) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + goto out; + + ew32(SHRAL(index - 1), rar_low); + e1e_flush(); + ew32(SHRAH(index - 1), rar_high); + e1e_flush(); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((er32(SHRAL(index - 1)) == rar_low) && + (er32(SHRAH(index - 1)) == rar_high)) + return; + + e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", + (index - 1), er32(FWSM)); + } + +out: + e_dbg("Failed to write receive address at index %d\n", index); +} + +/** + * e1000_rar_set_pch_lpt - Set receive address registers + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address register array at index to the address passed + * in by addr. For LPT, RAR[0] is the base address register that is to + * contain the MAC address. SHRA[0-10] are the shared receive address + * registers that are shared between the Host and manageability engine (ME). + **/ +static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + u32 wlock_mac; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); + return; + } + + /* The manageability engine (ME) can lock certain SHRAR registers that + * it is using - those registers are unavailable for use. + */ + if (index < hw->mac.rar_entry_count) { + wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; + wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; + + /* Check if all SHRAR registers are locked */ + if (wlock_mac == 1) + goto out; + + if ((wlock_mac == 0) || (index <= wlock_mac)) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + + if (ret_val) + goto out; + + ew32(SHRAL_PCH_LPT(index - 1), rar_low); + e1e_flush(); + ew32(SHRAH_PCH_LPT(index - 1), rar_high); + e1e_flush(); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) && + (er32(SHRAH_PCH_LPT(index - 1)) == rar_high)) + return; + } + } + +out: + e_dbg("Failed to write receive address at index %d\n", index); +} + +/** + * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Checks if firmware is blocking the reset of the PHY. + * This is a function pointer entry point only called by + * reset routines. + **/ +static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + + return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET; +} + +/** + * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states + * @hw: pointer to the HW structure + * + * Assumes semaphore already acquired. + * + **/ +static s32 e1000_write_smbus_addr(struct e1000_hw *hw) +{ + u16 phy_data; + u32 strap = er32(STRAP); + u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> + E1000_STRAP_SMT_FREQ_SHIFT; + s32 ret_val; + + strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; + + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~HV_SMB_ADDR_MASK; + phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); + phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + + if (hw->phy.type == e1000_phy_i217) { + /* Restore SMBus frequency */ + if (freq--) { + phy_data &= ~HV_SMB_ADDR_FREQ_MASK; + phy_data |= (freq & (1 << 0)) << + HV_SMB_ADDR_FREQ_LOW_SHIFT; + phy_data |= (freq & (1 << 1)) << + (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); + } else { + e_dbg("Unsupported SMB frequency in PHY\n"); + } + } + + return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); +} + +/** + * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * + * SW should configure the LCD from the NVM extended configuration region + * as a workaround for certain parts. + **/ +static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; + s32 ret_val = 0; + u16 word_addr, reg_data, reg_addr, phy_page = 0; + + /* Initialize the PHY from the NVM on ICH platforms. This + * is needed due to an issue where the NVM configuration is + * not properly autoloaded after power transitions. + * Therefore, after each PHY reset, we will load the + * configuration data out of the NVM manually. + */ + switch (hw->mac.type) { + case e1000_ich8lan: + if (phy->type != e1000_phy_igp_3) + return ret_val; + + if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || + (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + break; + } + /* Fall-thru */ + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; + break; + default: + return ret_val; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + data = er32(FEXTNVM); + if (!(data & sw_cfg_mask)) + goto release; + + /* Make sure HW does not configure LCD from PHY + * extended configuration before SW configuration + */ + data = er32(EXTCNF_CTRL); + if ((hw->mac.type < e1000_pch2lan) && + (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) + goto release; + + cnf_size = er32(EXTCNF_SIZE); + cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; + cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; + if (!cnf_size) + goto release; + + cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; + cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; + + if (((hw->mac.type == e1000_pchlan) && + !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || + (hw->mac.type > e1000_pchlan)) { + /* HW configures the SMBus address and LEDs when the + * OEM and LCD Write Enable bits are set in the NVM. + * When both NVM bits are cleared, SW will configure + * them instead. + */ + ret_val = e1000_write_smbus_addr(hw); + if (ret_val) + goto release; + + data = er32(LEDCTL); + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, + (u16)data); + if (ret_val) + goto release; + } + + /* Configure LCD from extended configuration region. */ + + /* cnf_base_addr is in DWORD */ + word_addr = (u16)(cnf_base_addr << 1); + + for (i = 0; i < cnf_size; i++) { + ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, ®_data); + if (ret_val) + goto release; + + ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), + 1, ®_addr); + if (ret_val) + goto release; + + /* Save off the PHY page for future writes. */ + if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { + phy_page = reg_data; + continue; + } + + reg_addr &= PHY_REG_MASK; + reg_addr |= phy_page; + + ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data); + if (ret_val) + goto release; + } + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_k1_gig_workaround_hv - K1 Si workaround + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * If K1 is enabled for 1Gbps, the MAC might stall when transitioning + * from a lower speed. This workaround disables K1 whenever link is at 1Gig + * If link is down, the function will restore the default K1 setting located + * in the NVM. + **/ +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) +{ + s32 ret_val = 0; + u16 status_reg = 0; + bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; + + if (hw->mac.type != e1000_pchlan) + return 0; + + /* Wrap the whole flow with the sw flag */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ + if (link) { + if (hw->phy.type == e1000_phy_82578) { + ret_val = e1e_rphy_locked(hw, BM_CS_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); + + if (status_reg == (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + k1_enable = false; + } + + if (hw->phy.type == e1000_phy_82577) { + ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg); + if (ret_val) + goto release; + + status_reg &= (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_MASK); + + if (status_reg == (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_1000)) + k1_enable = false; + } + + /* Link stall fix for link up */ + ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100); + if (ret_val) + goto release; + + } else { + /* Link stall fix for link down */ + ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100); + if (ret_val) + goto release; + } + + ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); + +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_configure_k1_ich8lan - Configure K1 power state + * @hw: pointer to the HW structure + * @enable: K1 state to configure + * + * Configure the K1 power state based on the provided parameter. + * Assumes semaphore already acquired. + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + **/ +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) +{ + s32 ret_val; + u32 ctrl_reg = 0; + u32 ctrl_ext = 0; + u32 reg = 0; + u16 kmrn_reg = 0; + + ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + return ret_val; + + if (k1_enable) + kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; + else + kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; + + ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); + if (ret_val) + return ret_val; + + usleep_range(20, 40); + ctrl_ext = er32(CTRL_EXT); + ctrl_reg = er32(CTRL); + + reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + reg |= E1000_CTRL_FRCSPD; + ew32(CTRL, reg); + + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); + e1e_flush(); + usleep_range(20, 40); + ew32(CTRL, ctrl_reg); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + usleep_range(20, 40); + + return 0; +} + +/** + * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * @d0_state: boolean if entering d0 or d3 device state + * + * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are + * collectively called OEM bits. The OEM Write Enable bit and SW Config bit + * in NVM determines whether HW should configure LPLU and Gbe Disable. + **/ +static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) +{ + s32 ret_val = 0; + u32 mac_reg; + u16 oem_reg; + + if (hw->mac.type < e1000_pchlan) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pchlan) { + mac_reg = er32(EXTCNF_CTRL); + if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) + goto release; + } + + mac_reg = er32(FEXTNVM); + if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) + goto release; + + mac_reg = er32(PHY_CTRL); + + ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto release; + + oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); + + if (d0_state) { + if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + } else { + if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU)) + oem_reg |= HV_OEM_BITS_LPLU; + } + + /* Set Restart auto-neg to activate the bits */ + if ((d0_state || (hw->mac.type != e1000_pchlan)) && + !hw->phy.ops.check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + + ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg); + +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode + * @hw: pointer to the HW structure + **/ +static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data |= HV_KMRN_MDIO_SLOW; + + ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data); + + return ret_val; +} + +/** + * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_data; + + if (hw->mac.type != e1000_pchlan) + return 0; + + /* Set MDIO slow mode before any other MDIO access */ + if (hw->phy.type == e1000_phy_82577) { + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + } + + if (((hw->phy.type == e1000_phy_82577) && + ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || + ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { + /* Disable generation of early preamble */ + ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431); + if (ret_val) + return ret_val; + + /* Preamble tuning for SSC */ + ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); + if (ret_val) + return ret_val; + } + + if (hw->phy.type == e1000_phy_82578) { + /* Return registers to default by doing a soft reset then + * writing 0x3140 to the control register. + */ + if (hw->phy.revision < 2) { + e1000e_phy_sw_reset(hw); + ret_val = e1e_wphy(hw, MII_BMCR, 0x3140); + } + } + + /* Select page 0 */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + hw->phy.addr = 1; + ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + + /* Configure the K1 Si workaround during phy reset assuming there is + * link so that it disables K1 if link is in 1Gbps. + */ + ret_val = e1000_k1_gig_workaround_hv(hw, true); + if (ret_val) + return ret_val; + + /* Workaround for link disconnects on a busy hub in half duplex */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data); + if (ret_val) + goto release; + ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF); + if (ret_val) + goto release; + + /* set MSE higher to enable link to stay up when noise is high */ + ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY + * @hw: pointer to the HW structure + **/ +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) +{ + u32 mac_reg; + u16 i, phy_reg = 0; + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) + goto release; + + /* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */ + for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { + mac_reg = er32(RAL(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), + (u16)((mac_reg >> 16) & 0xFFFF)); + + mac_reg = er32(RAH(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), + (u16)((mac_reg & E1000_RAH_AV) + >> 16)); + } + + e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + +release: + hw->phy.ops.release(hw); +} + +/** + * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation + * with 82579 PHY + * @hw: pointer to the HW structure + * @enable: flag to enable/disable workaround when enabling/disabling jumbos + **/ +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) +{ + s32 ret_val = 0; + u16 phy_reg, data; + u32 mac_reg; + u16 i; + + if (hw->mac.type < e1000_pch2lan) + return 0; + + /* disable Rx path while enabling/disabling workaround */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); + if (ret_val) + return ret_val; + + if (enable) { + /* Write Rx addresses (rar_entry_count for RAL/H, +4 for + * SHRAL/H) and initial CRC values to the MAC + */ + for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) { + u8 mac_addr[ETH_ALEN] = { 0 }; + u32 addr_high, addr_low; + + addr_high = er32(RAH(i)); + if (!(addr_high & E1000_RAH_AV)) + continue; + addr_low = er32(RAL(i)); + mac_addr[0] = (addr_low & 0xFF); + mac_addr[1] = ((addr_low >> 8) & 0xFF); + mac_addr[2] = ((addr_low >> 16) & 0xFF); + mac_addr[3] = ((addr_low >> 24) & 0xFF); + mac_addr[4] = (addr_high & 0xFF); + mac_addr[5] = ((addr_high >> 8) & 0xFF); + + ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); + } + + /* Write Rx addresses to the PHY */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + /* Enable jumbo frame workaround in the MAC */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(1 << 14); + mac_reg |= (7 << 15); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg |= E1000_RCTL_SECRC; + ew32(RCTL, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + return ret_val; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data | (1 << 0)); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + return ret_val; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + return ret_val; + + /* Enable jumbo frame workaround in the PHY */ + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + data |= (0x37 << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + return ret_val; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data &= ~(1 << 13); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + return ret_val; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x1A << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + return ret_val; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100); + if (ret_val) + return ret_val; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); + if (ret_val) + return ret_val; + } else { + /* Write MAC register values back to h/w defaults */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(0xF << 14); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg &= ~E1000_RCTL_SECRC; + ew32(RCTL, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + return ret_val; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data & ~(1 << 0)); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + return ret_val; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + return ret_val; + + /* Write PHY register values back to h/w defaults */ + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + return ret_val; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data |= (1 << 13); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + return ret_val; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x8 << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + return ret_val; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); + if (ret_val) + return ret_val; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); + if (ret_val) + return ret_val; + } + + /* re-enable Rx path after enabling/disabling workaround */ + return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); +} + +/** + * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (hw->mac.type != e1000_pch2lan) + return 0; + + /* Set MDIO slow mode before any other MDIO access */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + /* set MSE higher to enable link to stay up when noise is high */ + ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); + if (ret_val) + goto release; + /* drop link after 5 times MSE threshold was reached */ + ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_k1_gig_workaround_lv - K1 Si workaround + * @hw: pointer to the HW structure + * + * Workaround to set the K1 beacon duration for 82579 parts + **/ +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 status_reg = 0; + u32 mac_reg; + u16 phy_reg; + + if (hw->mac.type != e1000_pch2lan) + return 0; + + /* Set K1 beacon duration based on 1Gbps speed or otherwise */ + ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); + if (ret_val) + return ret_val; + + if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) + == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { + mac_reg = er32(FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + + ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg); + if (ret_val) + return ret_val; + + if (status_reg & HV_M_STATUS_SPEED_1000) { + u16 pm_phy_reg; + + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; + phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; + /* LV 1G Packet drop issue wa */ + ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); + if (ret_val) + return ret_val; + pm_phy_reg &= ~HV_PM_CTRL_PLL_STOP_IN_K1_GIGA; + ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); + if (ret_val) + return ret_val; + } else { + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; + phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT; + } + ew32(FEXTNVM4, mac_reg); + ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg); + } + + return ret_val; +} + +/** + * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware + * @hw: pointer to the HW structure + * @gate: boolean set to true to gate, false to ungate + * + * Gate/ungate the automatic PHY configuration via hardware; perform + * the configuration via software instead. + **/ +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) +{ + u32 extcnf_ctrl; + + if (hw->mac.type < e1000_pch2lan) + return; + + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (gate) + extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; + else + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; + + ew32(EXTCNF_CTRL, extcnf_ctrl); +} + +/** + * e1000_lan_init_done_ich8lan - Check for PHY config completion + * @hw: pointer to the HW structure + * + * Check the appropriate indication the MAC has finished configuring the + * PHY after a software reset. + **/ +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) +{ + u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; + + /* Wait for basic configuration completes before proceeding */ + do { + data = er32(STATUS); + data &= E1000_STATUS_LAN_INIT_DONE; + usleep_range(100, 200); + } while ((!data) && --loop); + + /* If basic configuration is incomplete before the above loop + * count reaches 0, loading the configuration from NVM will + * leave the PHY in a bad state possibly resulting in no link. + */ + if (loop == 0) + e_dbg("LAN_INIT_DONE not set, increase timeout\n"); + + /* Clear the Init Done bit for the next init event */ + data = er32(STATUS); + data &= ~E1000_STATUS_LAN_INIT_DONE; + ew32(STATUS, data); +} + +/** + * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset + * @hw: pointer to the HW structure + **/ +static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 reg; + + if (hw->phy.ops.check_reset_block(hw)) + return 0; + + /* Allow time for h/w to get to quiescent state after reset */ + usleep_range(10000, 20000); + + /* Perform any necessary post-reset workarounds */ + switch (hw->mac.type) { + case e1000_pchlan: + ret_val = e1000_hv_phy_workarounds_ich8lan(hw); + if (ret_val) + return ret_val; + break; + case e1000_pch2lan: + ret_val = e1000_lv_phy_workarounds_ich8lan(hw); + if (ret_val) + return ret_val; + break; + default: + break; + } + + /* Clear the host wakeup bit after lcd reset */ + if (hw->mac.type >= e1000_pchlan) { + e1e_rphy(hw, BM_PORT_GEN_CFG, ®); + reg &= ~BM_WUC_HOST_WU_BIT; + e1e_wphy(hw, BM_PORT_GEN_CFG, reg); + } + + /* Configure the LCD with the extended configuration region in NVM */ + ret_val = e1000_sw_lcd_config_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Configure the LCD with the OEM bits in NVM */ + ret_val = e1000_oem_bits_config_ich8lan(hw, true); + + if (hw->mac.type == e1000_pch2lan) { + /* Ungate automatic PHY configuration on non-managed 82579 */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + /* Set EEE LPI Update Timer to 200usec */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = e1000_write_emi_reg_locked(hw, + I82579_LPI_UPDATE_TIMER, + 0x1387); + hw->phy.ops.release(hw); + } + + return ret_val; +} + +/** + * e1000_phy_hw_reset_ich8lan - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* Gate automatic PHY configuration by hardware on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + return ret_val; + + return e1000_post_phy_reset_ich8lan(hw); +} + +/** + * e1000_set_lplu_state_pchlan - Set Low Power Link Up state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU state according to the active flag. For PCH, if OEM write + * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set + * the phy speed. This function will manually set the LPLU bit and restart + * auto-neg as hw would do. D3 and D0 LPLU will call the same function + * since it configures the same bit. + **/ +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 oem_reg; + + ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + return ret_val; + + if (active) + oem_reg |= HV_OEM_BITS_LPLU; + else + oem_reg &= ~HV_OEM_BITS_LPLU; + + if (!hw->phy.ops.check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + + return e1e_wphy(hw, HV_OEM_BITS, oem_reg); +} + +/** + * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = 0; + u16 data; + + if (phy->type == e1000_phy_ife) + return 0; + + phy_ctrl = er32(PHY_CTRL); + + if (active) { + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D3 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = 0; + u16 data; + + phy_ctrl = er32(PHY_CTRL); + + if (!active) { + phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + } + + return ret_val; +} + +/** + * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 + * @hw: pointer to the HW structure + * @bank: pointer to the variable that returns the active bank + * + * Reads signature byte from the NVM using the flash access registers. + * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. + **/ +static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) +{ + u32 eecd; + struct e1000_nvm_info *nvm = &hw->nvm; + u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); + u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; + u8 sig_byte = 0; + s32 ret_val; + + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + eecd = er32(EECD); + if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == + E1000_EECD_SEC1VAL_VALID_MASK) { + if (eecd & E1000_EECD_SEC1VAL) + *bank = 1; + else + *bank = 0; + + return 0; + } + e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n"); + /* fall-thru */ + default: + /* set bank to 0 in case flash read fails */ + *bank = 0; + + /* Check bank 0 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 0; + return 0; + } + + /* Check bank 1 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + + bank1_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 1; + return 0; + } + + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } +} + +/** + * e1000_read_nvm_ich8lan - Read word(s) from the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words + * @data: Pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM using the flash access registers. + **/ +static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = 0; + u32 bank = 0; + u16 i, word; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = 0; + for (i = 0; i < words; i++) { + if (dev_spec->shadow_ram[offset + i].modified) { + data[i] = dev_spec->shadow_ram[offset + i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, + act_offset + i, + &word); + if (ret_val) + break; + data[i] = word; + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + e_dbg("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_flash_cycle_init_ich8lan - Initialize flash + * @hw: pointer to the HW structure + * + * This function does initial flash setup so that a new read/write/erase cycle + * can be started. + **/ +static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) +{ + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* Check if the flash descriptor is valid */ + if (!hsfsts.hsf_status.fldesvalid) { + e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); + return -E1000_ERR_NVM; + } + + /* Clear FCERR and DAEL in hw status by writing 1 */ + hsfsts.hsf_status.flcerr = 1; + hsfsts.hsf_status.dael = 1; + + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + /* Either we should have a hardware SPI cycle in progress + * bit to check against, in order to start a new cycle or + * FDONE bit should be changed in the hardware so that it + * is 1 after hardware reset, which can then be used as an + * indication whether a cycle is in progress or has been + * completed. + */ + + if (!hsfsts.hsf_status.flcinprog) { + /* There is no cycle running at present, + * so we can start a cycle. + * Begin by setting Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + ret_val = 0; + } else { + s32 i; + + /* Otherwise poll for sometime so the current + * cycle has a chance to end before giving up. + */ + for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (!hsfsts.hsf_status.flcinprog) { + ret_val = 0; + break; + } + udelay(1); + } + if (!ret_val) { + /* Successful in waiting for previous cycle to timeout, + * now set the Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + } else { + e_dbg("Flash controller busy, cannot get access\n"); + } + } + + return ret_val; +} + +/** + * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) + * @hw: pointer to the HW structure + * @timeout: maximum time to wait for completion + * + * This function starts a flash cycle and waits for its completion. + **/ +static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) +{ + union ich8_hws_flash_ctrl hsflctl; + union ich8_hws_flash_status hsfsts; + u32 i = 0; + + /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcgo = 1; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* wait till FDONE bit is set to 1 */ + do { + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcdone) + break; + udelay(1); + } while (i++ < timeout); + + if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) + return 0; + + return -E1000_ERR_NVM; +} + +/** + * e1000_read_flash_word_ich8lan - Read word from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash word at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + /* Must convert offset into bytes. */ + offset <<= 1; + + return e1000_read_flash_data_ich8lan(hw, offset, 2, data); +} + +/** + * e1000_read_flash_byte_ich8lan - Read byte from flash + * @hw: pointer to the HW structure + * @offset: The offset of the byte to read. + * @data: Pointer to a byte to store the value read. + * + * Reads a single byte from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data) +{ + s32 ret_val; + u16 word = 0; + + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + if (ret_val) + return ret_val; + + *data = (u8)word; + + return 0; +} + +/** + * e1000_read_flash_data_ich8lan - Read byte or word from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte or word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: Pointer to the word to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (!ret_val) { + flash_data = er32flash(ICH_FLASH_FDATA0); + if (size == 1) + *data = (u8)(flash_data & 0x000000FF); + else if (size == 2) + *data = (u16)(flash_data & 0x0000FFFF); + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) { + /* Repeat for some time before giving up. */ + continue; + } else if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_nvm_ich8lan - Write word(s) to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to write. + * @words: Size of data to write in words + * @data: Pointer to the word(s) to write at offset. + * + * Writes a byte or word to the NVM using the flash access registers. + **/ +static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + nvm->ops.acquire(hw); + + for (i = 0; i < words; i++) { + dev_spec->shadow_ram[offset + i].modified = true; + dev_spec->shadow_ram[offset + i].value = data[i]; + } + + nvm->ops.release(hw); + + return 0; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u16 data; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + /* Determine whether to write the value stored + * in the other NVM bank or a modified value stored + * in the shadow RAM + */ + if (dev_spec->shadow_ram[i].modified) { + data = dev_spec->shadow_ram[i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, i + + old_bank_offset, + &data); + if (ret_val) + break; + } + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD) + data |= E1000_ICH_NVM_SIG_MASK; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usleep_range(100, 200); + /* Write the bytes to the new bank. */ + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset, + (u8)data); + if (ret_val) + break; + + usleep_range(100, 200); + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset + 1, + (u8)(data >> 8)); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ + e_dbg("Flash commit failed.\n"); + goto release; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); + if (ret_val) + goto release; + + data &= 0xBFFF; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset * 2 + 1, + (u8)(data >> 8)); + if (ret_val) + goto release; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + nvm->ops.reload(hw); + usleep_range(10000, 20000); + } + +out: + if (ret_val) + e_dbg("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. + * If the bit is 0, that the EEPROM had been modified, but the checksum was not + * calculated, in which case we need to calculate the checksum and set bit 6. + **/ +static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + u16 word; + u16 valid_csum_mask; + + /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, + * the checksum needs to be fixed. This bit is an indication that + * the NVM was prepared by OEM software and did not calculate + * the checksum...a likely scenario. + */ + switch (hw->mac.type) { + case e1000_pch_lpt: + word = NVM_COMPAT; + valid_csum_mask = NVM_COMPAT_VALID_CSUM; + break; + default: + word = NVM_FUTURE_INIT_WORD1; + valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; + break; + } + + ret_val = e1000_read_nvm(hw, word, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & valid_csum_mask)) { + data |= valid_csum_mask; + ret_val = e1000_write_nvm(hw, word, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only + * @hw: pointer to the HW structure + * + * To prevent malicious write/erase of the NVM, set it to be read-only + * so that the hardware ignores all write/erase cycles of the NVM via + * the flash control registers. The shadow-ram copy of the NVM will + * still be updated, however any updates to this copy will not stick + * across driver reloads. + **/ +void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_flash_protected_range pr0; + union ich8_hws_flash_status hsfsts; + u32 gfpreg; + + nvm->ops.acquire(hw); + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* Write-protect GbE Sector of NVM */ + pr0.regval = er32flash(ICH_FLASH_PR0); + pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; + pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); + pr0.range.wpe = true; + ew32flash(ICH_FLASH_PR0, pr0.regval); + + /* Lock down a subset of GbE Flash Control Registers, e.g. + * PR0 to prevent the write-protection from being lifted. + * Once FLOCKDN is set, the registers protected by it cannot + * be written until FLOCKDN is cleared by a hardware reset. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + hsfsts.hsf_status.flockdn = true; + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + nvm->ops.release(hw); +} + +/** + * e1000_write_flash_data_ich8lan - Writes bytes to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte/word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: The byte(s) to write to the NVM. + * + * Writes one/two bytes to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val; + u8 count = 0; + + if (size < 1 || size > 2 || data > size * 0xff || + offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + if (size == 1) + flash_data = (u32)data & 0x00FF; + else + flash_data = (u32)data; + + ew32flash(ICH_FLASH_FDATA0, flash_data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + if (!ret_val) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) + /* Repeat for some time before giving up. */ + continue; + if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_flash_byte_ich8lan - Write a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The index of the byte to read. + * @data: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 data) +{ + u16 word = (u16)data; + + return e1000_write_flash_data_ich8lan(hw, offset, 1, word); +} + +/** + * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The offset of the byte to write. + * @byte: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + * Goes through a retry algorithm before giving up. + **/ +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte) +{ + s32 ret_val; + u16 program_retries; + + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + return ret_val; + + for (program_retries = 0; program_retries < 100; program_retries++) { + e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); + usleep_range(100, 200); + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM + * @hw: pointer to the HW structure + * @bank: 0 for first bank, 1 for second bank, etc. + * + * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. + * bank N is 4096 * N + flash_reg_addr. + **/ +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + /* bank size is in 16bit words - adjust to bytes */ + u32 flash_bank_size = nvm->flash_bank_size * 2; + s32 ret_val; + s32 count = 0; + s32 j, iteration, sector_size; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* Determine HW Sector size: Read BERASE bits of hw flash status + * register + * 00: The Hw sector is 256 bytes, hence we need to erase 16 + * consecutive sectors. The start index for the nth Hw sector + * can be calculated as = bank * 4096 + n * 256 + * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. + * The start index for the nth Hw sector can be calculated + * as = bank * 4096 + * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 + * (ich9 only, otherwise error condition) + * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 + */ + switch (hsfsts.hsf_status.berasesz) { + case 0: + /* Hw sector size 256 */ + sector_size = ICH_FLASH_SEG_SIZE_256; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; + break; + case 1: + sector_size = ICH_FLASH_SEG_SIZE_4K; + iteration = 1; + break; + case 2: + sector_size = ICH_FLASH_SEG_SIZE_8K; + iteration = 1; + break; + case 3: + sector_size = ICH_FLASH_SEG_SIZE_64K; + iteration = 1; + break; + default: + return -E1000_ERR_NVM; + } + + /* Start with the base address, then add the sector offset. */ + flash_linear_addr = hw->nvm.flash_base_addr; + flash_linear_addr += (bank) ? flash_bank_size : 0; + + for (j = 0; j < iteration; j++) { + do { + u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; + + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Write a value 11 (block Erase) in Flash + * Cycle field in hw flash control + */ + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* Write the last 24 bits of an index within the + * block into Flash Linear address field in Flash + * Address. + */ + flash_linear_addr += (j * sector_size); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, timeout); + if (!ret_val) + break; + + /* Check if FCERR is set to 1. If 1, + * clear it and try the whole sequence + * a few more times else Done + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) + /* repeat for some time before giving up */ + continue; + else if (!hsfsts.hsf_status.flcdone) + return ret_val; + } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); + } + + return 0; +} + +/** + * e1000_valid_led_default_ich8lan - Set the default LED settings + * @hw: pointer to the HW structure + * @data: Pointer to the LED settings + * + * Reads the LED default settings from the NVM to data. If the NVM LED + * settings is all 0's or F's, set the LED default to a valid LED default + * setting. + **/ +static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT_ICH8LAN; + + return 0; +} + +/** + * e1000_id_led_init_pchlan - store LED configurations + * @hw: pointer to the HW structure + * + * PCH does not control LEDs via the LEDCTL register, rather it uses + * the PHY LED configuration register. + * + * PCH also does not have an "always on" or "always off" mode which + * complicates the ID feature. Instead of using the "on" mode to indicate + * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()), + * use "link_up" mode. The LEDs will still ID on request if there is no + * link based on logic in e1000_led_[on|off]_pchlan(). + **/ +static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; + const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; + u16 data, i, temp, shift; + + /* Get default ID LED modes */ + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; + shift = (i * 5); + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_on << shift); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_on << shift); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + } + + return 0; +} + +/** + * e1000_get_bus_info_ich8lan - Get/Set the bus type and width + * @hw: pointer to the HW structure + * + * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability + * register, so the the bus width is hard coded. + **/ +static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + + ret_val = e1000e_get_bus_info_pcie(hw); + + /* ICH devices are "PCI Express"-ish. They have + * a configuration space, but do not contain + * PCI Express Capability registers, so bus width + * must be hardcoded. + */ + if (bus->width == e1000_bus_width_unknown) + bus->width = e1000_bus_width_pcie_x1; + + return ret_val; +} + +/** + * e1000_reset_hw_ich8lan - Reset the hardware + * @hw: pointer to the HW structure + * + * Does a full reset of the hardware which includes a reset of the PHY and + * MAC. + **/ +static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 kum_cfg; + u32 ctrl, reg; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC + * with the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + /* Workaround for ICH8 bit corruption issue in FIFO memory */ + if (hw->mac.type == e1000_ich8lan) { + /* Set Tx and Rx buffer allocation to 8k apiece. */ + ew32(PBA, E1000_PBA_8K); + /* Set Packet Buffer Size to 16k. */ + ew32(PBS, E1000_PBS_16K); + } + + if (hw->mac.type == e1000_pchlan) { + /* Save the NVM K1 bit setting */ + ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); + if (ret_val) + return ret_val; + + if (kum_cfg & E1000_NVM_K1_ENABLE) + dev_spec->nvm_k1_enabled = true; + else + dev_spec->nvm_k1_enabled = false; + } + + ctrl = er32(CTRL); + + if (!hw->phy.ops.check_reset_block(hw)) { + /* Full-chip reset requires MAC and PHY reset at the same + * time to make sure the interface between MAC and the + * external PHY is reset. + */ + ctrl |= E1000_CTRL_PHY_RST; + + /* Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + } + ret_val = e1000_acquire_swflag_ich8lan(hw); + e_dbg("Issuing a global reset to ich8lan\n"); + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + /* cannot issue a flush here because it hangs the hardware */ + msleep(20); + + /* Set Phy Config Counter to 50msec */ + if (hw->mac.type == e1000_pch2lan) { + reg = er32(FEXTNVM3); + reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + ew32(FEXTNVM3, reg); + } + + if (!ret_val) + clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); + + if (ctrl & E1000_CTRL_PHY_RST) { + ret_val = hw->phy.ops.get_cfg_done(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_post_phy_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + + /* For PCH, this write will make sure that any noise + * will be detected as a CRC error and be dropped rather than show up + * as a bad packet to the DMA engine. + */ + if (hw->mac.type == e1000_pchlan) + ew32(CRC_OFFSET, 0x65656565); + + ew32(IMC, 0xffffffff); + er32(ICR); + + reg = er32(KABGTXD); + reg |= E1000_KABGTXD_BGSQLBIAS; + ew32(KABGTXD, reg); + + return 0; +} + +/** + * e1000_init_hw_ich8lan - Initialize the hardware + * @hw: pointer to the HW structure + * + * Prepares the hardware for transmit and receive by doing the following: + * - initialize hardware bits + * - initialize LED identification + * - setup receive address registers + * - setup flow control + * - setup transmit descriptors + * - clear statistics + **/ +static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl_ext, txdctl, snoop; + s32 ret_val; + u16 i; + + e1000_initialize_hw_bits_ich8lan(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + e_dbg("Error initializing identification LED\n"); + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* The 82578 Rx buffer will stall if wakeup is enabled in host and + * the ME. Disable wakeup by clearing the host wakeup bit. + * Reset the phy after disabling host wakeup to reset the Rx buffer. + */ + if (hw->phy.type == e1000_phy_82578) { + e1e_rphy(hw, BM_PORT_GEN_CFG, &i); + i &= ~BM_WUC_HOST_WU_BIT; + e1e_wphy(hw, BM_PORT_GEN_CFG, i); + ret_val = e1000_phy_hw_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the transmit descriptor write-back policy for both queues */ + txdctl = er32(TXDCTL(0)); + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); + ew32(TXDCTL(0), txdctl); + txdctl = er32(TXDCTL(1)); + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); + ew32(TXDCTL(1), txdctl); + + /* ICH8 has opposite polarity of no_snoop bits. + * By default, we should use snoop behavior. + */ + if (mac->type == e1000_ich8lan) + snoop = PCIE_ICH8_SNOOP_ALL; + else + snoop = (u32)~(PCIE_NO_SNOOP_ALL); + e1000e_set_pcie_no_snoop(hw, snoop); + + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_ich8lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits + * @hw: pointer to the HW structure + * + * Sets/Clears required hardware bits necessary for correctly setting up the + * hardware for transmit and receive. + **/ +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Extended Device Control */ + reg = er32(CTRL_EXT); + reg |= (1 << 22); + /* Enable PHY low-power state when MAC is at D3 w/o WoL */ + if (hw->mac.type >= e1000_pchlan) + reg |= E1000_CTRL_EXT_PHYPDEN; + ew32(CTRL_EXT, reg); + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + if (hw->mac.type == e1000_ich8lan) + reg |= (1 << 28) | (1 << 29); + reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + reg |= (1 << 24) | (1 << 26) | (1 << 30); + ew32(TARC(1), reg); + + /* Device Status */ + if (hw->mac.type == e1000_ich8lan) { + reg = er32(STATUS); + reg &= ~(1 << 31); + ew32(STATUS, reg); + } + + /* work-around descriptor data corruption issue during nfs v2 udp + * traffic, just disable the nfs filtering capability + */ + reg = er32(RFCTL); + reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type == e1000_ich8lan) + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); + + /* Enable ECC on Lynxpoint */ + if (hw->mac.type == e1000_pch_lpt) { + reg = er32(PBECCSTS); + reg |= E1000_PBECCSTS_ECC_ENABLE; + ew32(PBECCSTS, reg); + + reg = er32(CTRL); + reg |= E1000_CTRL_MEHE; + ew32(CTRL, reg); + } +} + +/** + * e1000_setup_link_ich8lan - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + + if (hw->phy.ops.check_reset_block(hw)) + return 0; + + /* ICH parts do not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + /* Workaround h/w hang when Tx flow control enabled */ + if (hw->mac.type == e1000_pchlan) + hw->fc.requested_mode = e1000_fc_rx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + } + + /* Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Continue to configure the copper link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + ew32(FCTTV, hw->fc.pause_time); + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || + (hw->phy.type == e1000_phy_82577)) { + ew32(FCRTV_PCH, hw->fc.refresh_time); + + ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), + hw->fc.pause_time); + if (ret_val) + return ret_val; + } + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Configures the kumeran interface to the PHY to wait the appropriate time + * when polling the PHY, then call the generic setup_copper_link to finish + * configuring the copper link. + **/ +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* Set the mac to wait the maximum time between each iteration + * and increase the max iterations when polling the phy; + * this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + reg_data); + if (ret_val) + return ret_val; + + switch (hw->phy.type) { + case e1000_phy_igp_3: + ret_val = e1000e_copper_link_setup_igp(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_bm: + case e1000_phy_82578: + ret_val = e1000e_copper_link_setup_m88(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_82577: + case e1000_phy_82579: + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_ife: + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); + if (ret_val) + return ret_val; + + reg_data &= ~IFE_PMC_AUTO_MDIX; + + switch (hw->phy.mdix) { + case 1: + reg_data &= ~IFE_PMC_FORCE_MDIX; + break; + case 2: + reg_data |= IFE_PMC_FORCE_MDIX; + break; + case 0: + default: + reg_data |= IFE_PMC_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); + if (ret_val) + return ret_val; + break; + default: + break; + } + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Calls the PHY specific link setup function and then calls the + * generic setup_copper_link to finish configuring the link for + * Lynxpoint PCH devices + **/ +static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_get_link_up_info_ich8lan - Get current link speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to store current link speed + * @duplex: pointer to store the current link duplex + * + * Calls the generic get_speed_and_duplex to retrieve the current link + * information and then calls the Kumeran lock loss workaround for links at + * gigabit speeds. + **/ +static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); + if (ret_val) + return ret_val; + + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { + ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); + } + + return ret_val; +} + +/** + * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround + * @hw: pointer to the HW structure + * + * Work-around for 82566 Kumeran PCS lock loss: + * On link status change (i.e. PCI reset, speed change) and link is up and + * speed is gigabit- + * 0) if workaround is optionally disabled do nothing + * 1) wait 1ms for Kumeran link to come up + * 2) check Kumeran Diagnostic register PCS lock loss bit + * 3) if not set the link is locked (all is good), otherwise... + * 4) reset the PHY + * 5) repeat up to 10 times + * Note: this is only called for IGP3 copper when speed is 1gb. + **/ +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + u16 i, data; + bool link; + + if (!dev_spec->kmrn_lock_loss_workaround_enabled) + return 0; + + /* Make sure link is up before proceeding. If not just return. + * Attempting this while link is negotiating fouled up link + * stability + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (!link) + return 0; + + for (i = 0; i < 10; i++) { + /* read once to clear */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + /* and again to get new status */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + + /* check for PCS lock */ + if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) + return 0; + + /* Issue PHY reset */ + e1000_phy_hw_reset(hw); + mdelay(5); + } + /* Disable GigE link negotiation */ + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, phy_ctrl); + + /* Call gig speed drop workaround on Gig disable before accessing + * any PHY registers + */ + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* unable to acquire PCS lock */ + return -E1000_ERR_PHY; +} + +/** + * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state + * @hw: pointer to the HW structure + * @state: boolean value used to set the current Kumeran workaround state + * + * If ICH8, set the current Kumeran workaround state (enabled - true + * /disabled - false). + **/ +void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + + if (hw->mac.type != e1000_ich8lan) { + e_dbg("Workaround applies to ICH8 only.\n"); + return; + } + + dev_spec->kmrn_lock_loss_workaround_enabled = state; +} + +/** + * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 + * @hw: pointer to the HW structure + * + * Workaround for 82566 power-down on D3 entry: + * 1) disable gigabit link + * 2) write VR power-down enable + * 3) read it back + * Continue if successful, else issue LCD reset and repeat + **/ +void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + u16 data; + u8 retry = 0; + + if (hw->phy.type != e1000_phy_igp_3) + return; + + /* Try the workaround twice (if needed) */ + do { + /* Disable link */ + reg = er32(PHY_CTRL); + reg |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, reg); + + /* Call gig speed drop workaround on Gig disable before + * accessing any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* Write VR power-down enable */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); + + /* Read it back and test */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) + break; + + /* Issue PHY reset and repeat at most one more time */ + reg = er32(CTRL); + ew32(CTRL, reg | E1000_CTRL_PHY_RST); + retry++; + } while (retry); +} + +/** + * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working + * @hw: pointer to the HW structure + * + * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), + * LPLU, Gig disable, MDIC PHY reset): + * 1) Set Kumeran Near-end loopback + * 2) Clear Kumeran Near-end loopback + * Should only be called for ICH8[m] devices with any 1G Phy. + **/ +void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data; + + if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife)) + return; + + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + ®_data); + if (ret_val) + return; + reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); + if (ret_val) + return; + reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; + e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); +} + +/** + * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx + * @hw: pointer to the HW structure + * + * During S0 to Sx transition, it is possible the link remains at gig + * instead of negotiating to a lower speed. Before going to Sx, set + * 'Gig Disable' to force link speed negotiation to a lower speed based on + * the LPLU setting in the NVM or custom setting. For PCH and newer parts, + * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also + * needs to be written. + * Parts that support (and are linked to a partner which support) EEE in + * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power + * than 10Mbps w/o EEE. + **/ +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; + + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg, device_id = hw->adapter->pdev->device; + + if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V)) { + u32 fextnvm6 = er32(FEXTNVM6); + + ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (!dev_spec->eee_disable) { + u16 eee_advert; + + ret_val = + e1000_read_emi_reg_locked(hw, + I217_EEE_ADVERTISEMENT, + &eee_advert); + if (ret_val) + goto release; + + /* Disable LPLU if both link partners support 100BaseT + * EEE and 100Full is advertised on both ends of the + * link. + */ + if ((eee_advert & I82579_EEE_100_SUPPORTED) && + (dev_spec->eee_lp_ability & + I82579_EEE_100_SUPPORTED) && + (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) + phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU); + } + + /* For i217 Intel Rapid Start Technology support, + * when the system is going into Sx and no manageability engine + * is present, the driver must configure proxy to reset only on + * power good. LPI (Low Power Idle) state must also reset only + * on power good, as well as the MTA (Multicast table array). + * The SMBus release must also be disabled on LCD reset. + */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + /* Enable proxy to reset only on power good. */ + e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg); + phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; + e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg); + + /* Set bit enable LPI (EEE) to reset only on + * power good. + */ + e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); + phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; + e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); + + /* Disable the SMB release on LCD reset. */ + e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); + phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; + e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); + } + + /* Enable MTA to reset for Intel Rapid Start Technology + * Support + */ + e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); + phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; + e1e_wphy_locked(hw, I217_CGFREG, phy_reg); + +release: + hw->phy.ops.release(hw); + } +out: + ew32(PHY_CTRL, phy_ctrl); + + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + if (hw->mac.type >= e1000_pchlan) { + e1000_oem_bits_config_ich8lan(hw, false); + + /* Reset PHY to activate OEM bits on 82577/8 */ + if (hw->mac.type == e1000_pchlan) + e1000e_phy_hw_reset_generic(hw); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + e1000_write_smbus_addr(hw); + hw->phy.ops.release(hw); + } +} + +/** + * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 + * @hw: pointer to the HW structure + * + * During Sx to S0 transitions on non-managed devices or managed devices + * on which PHY resets are not blocked, if the PHY registers cannot be + * accessed properly by the s/w toggle the LANPHYPC value to power cycle + * the PHY. + * On i217, setup Intel Rapid Start Technology. + **/ +void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) +{ + s32 ret_val; + + if (hw->mac.type < e1000_pch2lan) + return; + + ret_val = e1000_init_phy_workarounds_pchlan(hw); + if (ret_val) { + e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val); + return; + } + + /* For i217 Intel Rapid Start Technology support when the system + * is transitioning from Sx and no manageability engine is present + * configure SMBus to restore on reset, disable proxy, and enable + * the reset on MTA (Multicast table array). + */ + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to setup iRST\n"); + return; + } + + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + /* Restore clear on SMB if no manageability engine + * is present + */ + ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); + if (ret_val) + goto release; + phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; + e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); + + /* Disable Proxy */ + e1e_wphy_locked(hw, I217_PROXY_CTRL, 0); + } + /* Enable reset on MTA */ + ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); + if (ret_val) + goto release; + phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; + e1e_wphy_locked(hw, I217_CGFREG, phy_reg); +release: + if (ret_val) + e_dbg("Error %d in resume workarounds\n", ret_val); + hw->phy.ops.release(hw); + } +} + +/** + * e1000_cleanup_led_ich8lan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000_led_on_ich8lan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); + + ew32(LEDCTL, hw->mac.ledctl_mode2); + return 0; +} + +/** + * e1000_led_off_ich8lan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | + IFE_PSCL_PROBE_LEDS_OFF)); + + ew32(LEDCTL, hw->mac.ledctl_mode1); + return 0; +} + +/** + * e1000_setup_led_pchlan - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use. + **/ +static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) +{ + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); +} + +/** + * e1000_cleanup_led_pchlan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) +{ + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); +} + +/** + * e1000_led_on_pchlan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +static s32 e1000_led_on_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode2; + u32 i, led; + + /* If no link, then turn LED on by setting the invert bit + * for each LED that's mode is "link_up" in ledctl_mode2. + */ + if (!(er32(STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return e1e_wphy(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_led_off_pchlan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +static s32 e1000_led_off_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode1; + u32 i, led; + + /* If no link, then turn LED off by clearing the invert bit + * for each LED that's mode is "link_up" in ledctl_mode1. + */ + if (!(er32(STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return e1e_wphy(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset + * @hw: pointer to the HW structure + * + * Read appropriate register for the config done bit for completion status + * and configure the PHY through s/w for EEPROM-less parts. + * + * NOTE: some silicon which is EEPROM-less will fail trying to read the + * config done bit, so only an error is logged and continues. If we were + * to return with error, EEPROM-less silicon would not be able to be reset + * or change link. + **/ +static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 bank = 0; + u32 status; + + e1000e_get_cfg_done_generic(hw); + + /* Wait for indication from h/w that it has completed basic config */ + if (hw->mac.type >= e1000_ich10lan) { + e1000_lan_init_done_ich8lan(hw); + } else { + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + e_dbg("Auto Read Done did not complete\n"); + ret_val = 0; + } + } + + /* Clear PHY Reset Asserted bit */ + status = er32(STATUS); + if (status & E1000_STATUS_PHYRA) + ew32(STATUS, status & ~E1000_STATUS_PHYRA); + else + e_dbg("PHY Reset Asserted not set - needs delay\n"); + + /* If EEPROM is not marked present, init the IGP 3 PHY manually */ + if (hw->mac.type <= e1000_ich9lan) { + if (!(er32(EECD) & E1000_EECD_PRES) && + (hw->phy.type == e1000_phy_igp_3)) { + e1000e_phy_init_script_igp3(hw); + } + } else { + if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { + /* Maybe we should do a basic PHY config */ + e_dbg("EEPROM not present\n"); + ret_val = -E1000_ERR_CONFIG; + } + } + + return ret_val; +} + +/** + * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters + * @hw: pointer to the HW structure + * + * Clears hardware counters specific to the silicon family and calls + * clear_hw_cntrs_generic to clear all general purpose counters. + **/ +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) +{ + u16 phy_data; + s32 ret_val; + + e1000e_clear_hw_cntrs_base(hw); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + /* Clear PHY statistics registers */ + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || + (hw->phy.type == e1000_phy_82577)) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = hw->phy.ops.set_page(hw, + HV_STATS_PAGE << IGP_PAGE_SHIFT); + if (ret_val) + goto release; + hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); +release: + hw->phy.ops.release(hw); + } +} + +static const struct e1000_mac_operations ich8_mac_ops = { + /* check_mng_mode dependent on mac type */ + .check_for_link = e1000_check_for_copper_link_ich8lan, + /* cleanup_led dependent on mac type */ + .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, + .get_bus_info = e1000_get_bus_info_ich8lan, + .set_lan_id = e1000_set_lan_id_single_port, + .get_link_up_info = e1000_get_link_up_info_ich8lan, + /* led_on dependent on mac type */ + /* led_off dependent on mac type */ + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .reset_hw = e1000_reset_hw_ich8lan, + .init_hw = e1000_init_hw_ich8lan, + .setup_link = e1000_setup_link_ich8lan, + .setup_physical_interface = e1000_setup_copper_link_ich8lan, + /* id_led_init dependent on mac type */ + .config_collision_dist = e1000e_config_collision_dist_generic, + .rar_set = e1000e_rar_set_generic, +}; + +static const struct e1000_phy_operations ich8_phy_ops = { + .acquire = e1000_acquire_swflag_ich8lan, + .check_reset_block = e1000_check_reset_block_ich8lan, + .commit = NULL, + .get_cfg_done = e1000_get_cfg_done_ich8lan, + .get_cable_length = e1000e_get_cable_length_igp_2, + .read_reg = e1000e_read_phy_reg_igp, + .release = e1000_release_swflag_ich8lan, + .reset = e1000_phy_hw_reset_ich8lan, + .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, + .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, + .write_reg = e1000e_write_phy_reg_igp, +}; + +static const struct e1000_nvm_operations ich8_nvm_ops = { + .acquire = e1000_acquire_nvm_ich8lan, + .read = e1000_read_nvm_ich8lan, + .release = e1000_release_nvm_ich8lan, + .reload = e1000e_reload_nvm_generic, + .update = e1000_update_nvm_checksum_ich8lan, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate = e1000_validate_nvm_checksum_ich8lan, + .write = e1000_write_nvm_ich8lan, +}; + +const struct e1000_info e1000_ich8_info = { + .mac = e1000_ich8lan, + .flags = FLAG_HAS_WOL + | FLAG_IS_ICH + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 8, + .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_ich9_info = { + .mac = e1000_ich9lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 18, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_ich10_info = { + .mac = e1000_ich10lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 18, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch_info = { + .mac = e1000_pchlan, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS, + .pba = 26, + .max_hw_frame_size = 4096, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch2_info = { + .mac = e1000_pch2lan, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9018, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch_lpt_info = { + .mac = e1000_pch_lpt, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9018, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; diff --git a/addons/e1000e/src/3.10.108/ich8lan.h b/addons/e1000e/src/3.10.108/ich8lan.h new file mode 100644 index 00000000..80034a2b --- /dev/null +++ b/addons/e1000e/src/3.10.108/ich8lan.h @@ -0,0 +1,265 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_ICH8LAN_H_ +#define _E1000E_ICH8LAN_H_ + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_PR0 0x0074 + +/* Requires up to 10 seconds when MNG might be accessing part. */ +#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 + +#define ICH_CYCLE_READ 0 +#define ICH_CYCLE_WRITE 2 +#define ICH_CYCLE_ERASE 3 + +#define FLASH_GFPREG_BASE_MASK 0x1FFF +#define FLASH_SECTOR_ADDR_SHIFT 12 + +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_8K 8192 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ +/* FW established a valid mode */ +#define E1000_ICH_FWSM_FW_VALID 0x00008000 +#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ +#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 + +#define E1000_ICH_MNG_IAMT_MODE 0x2 + +#define E1000_FWSM_WLOCK_MAC_MASK 0x0380 +#define E1000_FWSM_WLOCK_MAC_SHIFT 7 + +/* Shared Receive Address Registers */ +#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) +#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) + +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_OFF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC000 +#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 +#define E1000_ICH_NVM_SIG_VALUE 0x80 + +#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 + +#define E1000_FEXTNVM_SW_CONFIG 1 +#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */ + +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000 +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000 + +#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 + +#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 + +#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL + +#define E1000_ICH_RAR_ENTRIES 7 +#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ +#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ + +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 +#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 +#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 + +/* PHY Wakeup Registers and defines */ +#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) +#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) +#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) +#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) +#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) +#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) +#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) + +#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ +#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ +#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ +#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ +#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ +#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ +#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ + +#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ +#define HV_MUX_DATA_CTRL PHY_REG(776, 16) +#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 +#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 +#define HV_STATS_PAGE 778 +/* Half-duplex collision counts */ +#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */ +#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) +#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */ +#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) +#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */ +#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) +#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */ +#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) +#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */ +#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) +#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ +#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) +#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */ +#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) + +#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ + +#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ +#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ + +/* SMBus Control Phy Register */ +#define CV_SMB_CTRL PHY_REG(769, 23) +#define CV_SMB_CTRL_FORCE_SMBUS 0x0001 + +/* SMBus Address Phy Register */ +#define HV_SMB_ADDR PHY_REG(768, 26) +#define HV_SMB_ADDR_MASK 0x007F +#define HV_SMB_ADDR_PEC_EN 0x0200 +#define HV_SMB_ADDR_VALID 0x0080 +#define HV_SMB_ADDR_FREQ_MASK 0x1100 +#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8 +#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12 + +/* Strapping Option Register - RO */ +#define E1000_STRAP 0x0000C +#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 +#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 +#define E1000_STRAP_SMT_FREQ_MASK 0x00003000 +#define E1000_STRAP_SMT_FREQ_SHIFT 12 + +/* OEM Bits Phy Register */ +#define HV_OEM_BITS PHY_REG(768, 25) +#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ +#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ +#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ + +/* KMRN Mode Control */ +#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) +#define HV_KMRN_MDIO_SLOW 0x0400 + +/* KMRN FIFO Control and Status */ +#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 + +/* PHY Power Management Control */ +#define HV_PM_CTRL PHY_REG(770, 17) +#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 + +#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ + +/* PHY Low Power Idle Control */ +#define I82579_LPI_CTRL PHY_REG(772, 20) +#define I82579_LPI_CTRL_100_ENABLE 0x2000 +#define I82579_LPI_CTRL_1000_ENABLE 0x4000 +#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 +#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80 + +/* Extended Management Interface (EMI) Registers */ +#define I82579_EMI_ADDR 0x10 +#define I82579_EMI_DATA 0x11 +#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ +#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */ +#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ +#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ +#define I82579_RX_CONFIG 0x3412 /* Receive configuration */ +#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ +#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ +#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ +#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ +#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ +#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ +#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ +#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ +#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ +#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ + +#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ +#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ + +/* Intel Rapid Start Technology Support */ +#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70) +#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 +#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) +#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000 +#define I217_CGFREG PHY_REG(772, 29) +#define I217_CGFREG_ENABLE_MTA_RESET 0x0002 +#define I217_MEMPWR PHY_REG(772, 26) +#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 + +/* Receive Address Initial CRC Calculation */ +#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) + +/* Latency Tolerance Reporting */ +#define E1000_LTRV 0x000F8 +#define E1000_LTRV_SCALE_MAX 5 +#define E1000_LTRV_SCALE_FACTOR 5 +#define E1000_LTRV_REQ_SHIFT 15 +#define E1000_LTRV_NOSNOOP_SHIFT 16 +#define E1000_LTRV_SEND (1 << 30) + +/* Proprietary Latency Tolerance Reporting PCI Capability */ +#define E1000_PCI_LTR_CAP_LPT 0xA8 + +void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); +void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state); +void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); +void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); +void e1000_resume_workarounds_pchlan(struct e1000_hw *hw); +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); +s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data); +#endif /* _E1000E_ICH8LAN_H_ */ diff --git a/addons/e1000e/src/3.10.108/mac.c b/addons/e1000e/src/3.10.108/mac.c new file mode 100644 index 00000000..2480c109 --- /dev/null +++ b/addons/e1000e/src/3.10.108/mac.c @@ -0,0 +1,1801 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +/** + * e1000e_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + struct e1000_adapter *adapter = hw->adapter; + u16 pcie_link_status, cap_offset; + + cap_offset = adapter->pdev->pcie_cap; + if (!cap_offset) { + bus->width = e1000_bus_width_unknown; + } else { + pci_read_config_word(adapter->pdev, + cap_offset + PCIE_LINK_STATUS, + &pcie_link_status); + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> + PCIE_LINK_WIDTH_SHIFT); + } + + mac->ops.set_lan_id(hw); + + return 0; +} + +/** + * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u32 reg; + + /* The status register reports the correct function number + * for the device regardless of function swap state. + */ + reg = er32(STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; +} + +/** + * e1000_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure + * + * Sets the LAN function id to zero for a single port device. + **/ +void e1000_set_lan_id_single_port(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + bus->func = 0; +} + +/** + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + e1e_flush(); + } +} + +/** + * e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) +{ + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + e1e_flush(); +} + +/** + * e1000e_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = { 0 }; + + /* Setup the receive address */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + e_dbg("Clearing RAR[1-%u]\n", rar_count - 1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is programmed into RAR0, replacing + * the permanent address that was installed into RAR0 by the Si on reset. + * This function will return SUCCESS unless it encounters an error while + * reading the EEPROM. + **/ +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + return ret_val; + + /* not supported on 82573 */ + if (hw->mac.type == e1000_82573) + return 0; + + ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + return 0; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + e_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + return 0; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + + return 0; +} + +/** + * e1000e_rar_set_generic - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); +} + +/** + * e1000_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. + **/ +static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000e_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + e1e_flush(); +} + +/** + * e1000e_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + er32(MPTC); + er32(BPTC); +} + +/** + * e1000e_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + **/ +s32 e1000e_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return 0; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) + return 0; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000e_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + return -E1000_ERR_CONFIG; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) + e_dbg("Error configuring flow control\n"); + + return ret_val; +} + +/** + * e1000e_check_for_fiber_link - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && + !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return 0; + } + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } + + return 0; +} + +/** + * e1000e_check_for_serdes_link - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return 0; + } + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + usleep_range(10, 20); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + usleep_range(10, 20); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg completed successfully.\n"); + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid codewords detected in autoneg.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + return 0; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + + return 0; +} + +/** + * e1000e_setup_link_generic - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000e_setup_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) + return 0; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + return ret_val; + } + + /* Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc.pause_time); + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + /* Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + break; + } + + ew32(TXCW, txcw); + mac->txcw = txcw; + + return 0; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + /* If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + usleep_range(10000, 20000); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = true; + /* AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = false; + } else { + mac->autoneg_failed = false; + e_dbg("Valid Link Found\n"); + } + + return 0; +} + +/** + * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + hw->mac.ops.config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(CTRL, ctrl); + e1e_flush(); + usleep_range(1000, 2000); + + /* For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + e_dbg("No signal detected\n"); + } + + return ret_val; +} + +/** + * e1000e_config_collision_dist_generic - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +void e1000e_config_collision_dist_generic(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + e1e_flush(); +} + +/** + * e1000e_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + **/ +s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + ew32(FCRTL, fcrtl); + ew32(FCRTH, fcrth); + + return 0; +} + +/** + * e1000e_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000e_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ew32(CTRL, ctrl); + + return 0; +} + +/** + * e1000e_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000e_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = e1000e_force_mac_fc(hw); + } + + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg); + if (ret_val) + return ret_val; + + if (!(mii_status_reg & BMSR_ANEGCOMPLETE)) { + e_dbg("Copper PHY and Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1e_rphy(hw, MII_LPA, &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && + (mii_nway_lp_ability_reg & LPA_PAUSE_CAP)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && + (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) && + (mii_nway_lp_ability_reg & LPA_PAUSE_CAP) && + (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) { + hw->fc.current_mode = e1000_fc_tx_pause; + e_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && + (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) && + !(mii_nway_lp_ability_reg & LPA_PAUSE_CAP) && + (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + e_dbg("Flow Control = NONE.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000e_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) && + mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = er32(PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + e_dbg("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = er32(PCS_ANADV); + pcs_lp_ability_reg = er32(PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + e_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + e_dbg("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = er32(PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + ew32(PCS_LCTL, pcs_ctrl_reg); + + ret_val = e1000e_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) + *speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + if (status & E1000_STATUS_FD) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + e_dbg("%u Mbps, %s Duplex\n", + *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10, + *duplex == FULL_DUPLEX ? "Full" : "Half"); + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw __always_unused + *hw, u16 *speed, u16 *duplex) +{ + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return 0; +} + +/** + * e1000e_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usleep_range(50, 100); + i++; + } + + if (i == timeout) { + e_dbg("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + usleep_range(50, 100); + } + + if (i == timeout) { + /* Release semaphores */ + e1000e_put_hw_semaphore(hw); + e_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000e_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} + +/** + * e1000e_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (er32(EECD) & E1000_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + e_dbg("Auto read by HW from NVM has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000e_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return 0; +} + +/** + * e1000e_id_led_init_generic - + * @hw: pointer to the HW structure + * + **/ +s32 e1000e_id_led_init_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + + return 0; +} + +/** + * e1000e_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 e1000e_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + + if (hw->mac.ops.setup_led != e1000e_setup_led_generic) + return -E1000_ERR_CONFIG; + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->mac.ledctl_mode1); + } + + return 0; +} + +/** + * e1000e_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) +{ + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000e_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the LEDs which are set to be on. + **/ +s32 e1000e_blink_led_generic(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + ew32(LEDCTL, ledctl_blink); + + return 0; +} + +/** + * e1000e_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000e_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000e_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_set_pcie_no_snoop - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + if (no_snoop) { + gcr = er32(GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + ew32(GCR, gcr); + } +} + +/** + * e1000e_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000e_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + ew32(CTRL, ctrl); + + while (timeout) { + if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) + break; + usleep_range(100, 200); + timeout--; + } + + if (!timeout) { + e_dbg("Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return 0; +} + +/** + * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000e_reset_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if (!mac->adaptive_ifs) { + e_dbg("Not in Adaptive IFS mode!\n"); + return; + } + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = false; + ew32(AIT, 0); +} + +/** + * e1000e_update_adaptive - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000e_update_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if (!mac->adaptive_ifs) { + e_dbg("Not in Adaptive IFS mode!\n"); + return; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = true; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + ew32(AIT, mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = false; + ew32(AIT, 0); + } + } +} diff --git a/addons/e1000e/src/3.10.108/mac.h b/addons/e1000e/src/3.10.108/mac.h new file mode 100644 index 00000000..a61fee40 --- /dev/null +++ b/addons/e1000e/src/3.10.108/mac.h @@ -0,0 +1,74 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_MAC_H_ +#define _E1000E_MAC_H_ + +s32 e1000e_blink_led_generic(struct e1000_hw *hw); +s32 e1000e_check_for_copper_link(struct e1000_hw *hw); +s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); +s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); +s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); +s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); +s32 e1000e_disable_pcie_master(struct e1000_hw *hw); +s32 e1000e_force_mac_fc(struct e1000_hw *hw); +s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); +s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); +void e1000_set_lan_id_single_port(struct e1000_hw *hw); +s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); +s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +s32 e1000e_id_led_init_generic(struct e1000_hw *hw); +s32 e1000e_led_on_generic(struct e1000_hw *hw); +s32 e1000e_led_off_generic(struct e1000_hw *hw); +void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); +s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); +s32 e1000e_setup_led_generic(struct e1000_hw *hw); +s32 e1000e_setup_link_generic(struct e1000_hw *hw); +s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw); +s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); + +void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); +void e1000_clear_vfta_generic(struct e1000_hw *hw); +void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +void e1000e_put_hw_semaphore(struct e1000_hw *hw); +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +void e1000e_reset_adaptive(struct e1000_hw *hw); +void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); +void e1000e_update_adaptive(struct e1000_hw *hw); +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); + +void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); +void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); +void e1000e_config_collision_dist_generic(struct e1000_hw *hw); + +#endif diff --git a/addons/e1000e/src/3.10.108/manage.c b/addons/e1000e/src/3.10.108/manage.c new file mode 100644 index 00000000..e4b0f1ef --- /dev/null +++ b/addons/e1000e/src/3.10.108/manage.c @@ -0,0 +1,351 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +static u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8)(0 - sum); +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + if (!hw->mac.arc_subsystem_valid) { + e_dbg("ARC subsystem not valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = er32(HICR); + if (!(hicr & E1000_HICR_EN)) { + e_dbg("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = er32(HICR); + if (!(hicr & E1000_HICR_C)) + break; + mdelay(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + e_dbg("Previous command timeout failed .\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return 0; +} + +/** + * e1000e_check_mng_mode_generic - Generic check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000e_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm = er32(FWSM); + + return (fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); +} + +/** + * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + hw->mac.tx_pkt_filtering = true; + + /* No manageability, no filtering */ + if (!hw->mac.ops.check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, + offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = true; + return hw->mac.tx_pkt_filtering; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) + hw->mac.tx_pkt_filtering = false; + + return hw->mac.tx_pkt_filtering; +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, *((u32 *)hdr + i)); + e1e_flush(); + } + + return 0; +} + +/** + * e1000_mng_host_if_write - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + + return 0; +} + +/** + * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = er32(HICR); + ew32(HICR, hicr | E1000_HICR_C); + + return 0; +} + +/** + * e1000e_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.has_fwsm) { + fwsm = er32(FWSM); + factps = er32(FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) + return true; + } else if ((hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82583)) { + u16 data; + + factps = er32(FACTPS); + e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((data & E1000_NVM_INIT_CTRL2_MNGM) == + (e1000_mng_mode_pt << 13))) + return true; + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + return true; + } + + return false; +} diff --git a/addons/e1000e/src/3.10.108/manage.h b/addons/e1000e/src/3.10.108/manage.h new file mode 100644 index 00000000..326897c2 --- /dev/null +++ b/addons/e1000e/src/3.10.108/manage.h @@ -0,0 +1,72 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_MANAGE_H_ +#define _E1000E_MANAGE_H_ + +bool e1000e_check_mng_mode_generic(struct e1000_hw *hw); +bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); +s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); +bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_SV 0x04 /* Status Validity */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +#endif diff --git a/addons/e1000e/src/3.10.108/netdev.c b/addons/e1000e/src/3.10.108/netdev.c new file mode 100644 index 00000000..a27e3bcc --- /dev/null +++ b/addons/e1000e/src/3.10.108/netdev.c @@ -0,0 +1,7043 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000.h" + +#define DRV_EXTRAVERSION "-k" + +#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION +char e1000e_driver_name[] = "e1000e"; +const char e1000e_driver_version[] = DRV_VERSION; + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); + +static const struct e1000_info *e1000_info_tbl[] = { + [board_82571] = &e1000_82571_info, + [board_82572] = &e1000_82572_info, + [board_82573] = &e1000_82573_info, + [board_82574] = &e1000_82574_info, + [board_82583] = &e1000_82583_info, + [board_80003es2lan] = &e1000_es2_info, + [board_ich8lan] = &e1000_ich8_info, + [board_ich9lan] = &e1000_ich9_info, + [board_ich10lan] = &e1000_ich10_info, + [board_pchlan] = &e1000_pch_info, + [board_pch2lan] = &e1000_pch2_info, + [board_pch_lpt] = &e1000_pch_lpt_info, +}; + +struct e1000_reg_info { + u32 ofs; + char *name; +}; + +static const struct e1000_reg_info e1000_reg_info_tbl[] = { + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* Rx Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN(0), "RDLEN"}, + {E1000_RDH(0), "RDH"}, + {E1000_RDT(0), "RDT"}, + {E1000_RDTR, "RDTR"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_ERT, "ERT"}, + {E1000_RDBAL(0), "RDBAL"}, + {E1000_RDBAH(0), "RDBAH"}, + {E1000_RDFH, "RDFH"}, + {E1000_RDFT, "RDFT"}, + {E1000_RDFHS, "RDFHS"}, + {E1000_RDFTS, "RDFTS"}, + {E1000_RDFPC, "RDFPC"}, + + /* Tx Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL(0), "TDBAL"}, + {E1000_TDBAH(0), "TDBAH"}, + {E1000_TDLEN(0), "TDLEN"}, + {E1000_TDH(0), "TDH"}, + {E1000_TDT(0), "TDT"}, + {E1000_TIDV, "TIDV"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TADV, "TADV"}, + {E1000_TARC(0), "TARC"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFTS, "TDFTS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {0, NULL} +}; + +/** + * e1000_regdump - register printout routine + * @hw: pointer to the HW structure + * @reginfo: pointer to the register info table + **/ +static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_RXDCTL(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TXDCTL(n)); + break; + case E1000_TARC(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TARC(n)); + break; + default: + pr_info("%-15s %08x\n", + reginfo->name, __er32(hw, reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); + pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); +} + +static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, + struct e1000_buffer *bi) +{ + int i; + struct e1000_ps_page *ps_page; + + for (i = 0; i < adapter->rx_ps_pages; i++) { + ps_page = &bi->ps_pages[i]; + + if (ps_page->page) { + pr_info("packet dump for ps_page %d:\n", i); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, page_address(ps_page->page), + PAGE_SIZE, true); + } + } +} + +/** + * e1000e_dump - Print registers, Tx-ring and Rx-ring + * @adapter: board private structure + **/ +static void e1000e_dump(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_reg_info *reginfo; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc; + struct my_u0 { + __le64 a; + __le64 b; + } *u0; + struct e1000_buffer *buffer_info; + struct e1000_ring *rx_ring = adapter->rx_ring; + union e1000_rx_desc_packet_split *rx_desc_ps; + union e1000_rx_desc_extended *rx_desc; + struct my_u1 { + __le64 a; + __le64 b; + __le64 c; + __le64 d; + } *u1; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name state trans_start last_rx\n"); + pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, + netdev->state, netdev->trans_start, netdev->last_rx); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; + reginfo->name; reginfo++) { + e1000_regdump(hw, reginfo); + } + + /* Print Tx Ring Summary */ + if (!netdev || !netif_running(netdev)) + return; + + dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", + 0, tx_ring->next_to_use, tx_ring->next_to_clean, + (unsigned long long)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp); + + /* Print Tx Ring */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n"); + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n"); + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", + (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : + ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), + i, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->length, buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + buffer_info->skb->len, true); + } + + /* Print Rx Ring Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + pr_info(" %5d %5X %5X\n", + 0, rx_ring->next_to_use, rx_ring->next_to_clean); + + /* Print Rx Ring */ + if (!netif_msg_rx_status(adapter)) + return; + + dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); + switch (adapter->rx_ps_pages) { + case 1: + case 2: + case 3: + /* [Extended] Packet Split Receive Descriptor Format + * + * +-----------------------------------------------------+ + * 0 | Buffer Address 0 [63:0] | + * +-----------------------------------------------------+ + * 8 | Buffer Address 1 [63:0] | + * +-----------------------------------------------------+ + * 16 | Buffer Address 2 [63:0] | + * +-----------------------------------------------------+ + * 24 | Buffer Address 3 [63:0] | + * +-----------------------------------------------------+ + */ + pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n"); + /* [Extended] Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 13 12 8 7 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | + * | Checksum | Ident | | Queue | | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n"); + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + buffer_info = &rx_ring->buffer_info[i]; + rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); + u1 = (struct my_u1 *)rx_desc_ps; + staterr = + le32_to_cpu(rx_desc_ps->wb.middle.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", + "RWB", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + buffer_info->skb, next_desc); + } else { + pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n", + "R ", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + (unsigned long long)buffer_info->dma, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter)) + e1000e_dump_ps_pages(adapter, + buffer_info); + } + } + break; + default: + case 0: + /* Extended Receive Descriptor (Read) Format + * + * +-----------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * 8 | Reserved | + * +-----------------------------------------------------+ + */ + pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n"); + /* Extended Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 24 23 4 3 0 + * +------------------------------------------------------+ + * | RSS Hash | | | | + * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | + * | Packet | IP | | | Type | + * | Checksum | Ident | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + + buffer_info = &rx_ring->buffer_info[i]; + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + u1 = (struct my_u1 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", + "RWB", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + buffer_info->skb, next_desc); + } else { + pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n", + "R ", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)buffer_info->dma, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, + 1, + buffer_info->skb->data, + adapter->rx_buffer_len, + true); + } + } + } +} + +/** + * e1000_desc_unused - calculate if we have unused descriptors + **/ +static int e1000_desc_unused(struct e1000_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp + * @adapter: board private structure + * @hwtstamps: time stamp structure to update + * @systim: unsigned 64bit system time value. + * + * Convert the system time value stored in the RX/TXSTMP registers into a + * hwtstamp which can be used by the upper level time stamping functions. + * + * The 'systim_lock' spinlock is used to protect the consistency of the + * system time value. This is needed because reading the 64 bit time + * value involves reading two 32 bit registers. The first read latches the + * value. + **/ +static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + u64 ns; + unsigned long flags; + + spin_lock_irqsave(&adapter->systim_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, systim); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +/** + * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp + * @adapter: board private structure + * @status: descriptor extended error and status field + * @skb: particular skb to include time stamp + * + * If the time stamp is valid, convert it into the timecounter ns value + * and store that result into the shhwtstamps structure which is passed + * up the network stack. + **/ +static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status, + struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u64 rxstmp; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || + !(status & E1000_RXDEXT_STATERR_TST) || + !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + /* The Rx time stamp registers contain the time stamp. No other + * received packet will be time stamped until the Rx time stamp + * registers are read. Because only one packet can be time stamped + * at a time, the register values must belong to this packet and + * therefore none of the other additional attributes need to be + * compared. + */ + rxstmp = (u64)er32(RXSTMPL); + rxstmp |= (u64)er32(RXSTMPH) << 32; + e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp); + + adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; +} + +/** + * e1000_receive_skb - helper function to handle Rx indications + * @adapter: board private structure + * @staterr: descriptor extended error and status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + **/ +static void e1000_receive_skb(struct e1000_adapter *adapter, + struct net_device *netdev, struct sk_buff *skb, + u32 staterr, __le16 vlan) +{ + u16 tag = le16_to_cpu(vlan); + + e1000e_rx_hwtstamp(adapter, staterr, skb); + + skb->protocol = eth_type_trans(skb, netdev); + + if (staterr & E1000_RXD_STAT_VP) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); + + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_rx_checksum - Receive Checksum Offload + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + struct sk_buff *skb) +{ + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* Rx checksum disabled */ + if (!(adapter->netdev->features & NETIF_F_RXCSUM)) + return; + + /* Ignore Checksum bit is set */ + if (status & E1000_RXD_STAT_IXSM) + return; + + /* TCP/UDP checksum error bit or IP checksum error bit is set */ + if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + + /* TCP/UDP Checksum has not been calculated */ + if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_good++; +} + +static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + s32 ret_val = __ew32_prepare(hw); + + writel(i, rx_ring->tail); + + if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { + u32 rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e_err("ME firmware caused invalid RDT - resetting\n"); + schedule_work(&adapter->reset_task); + } +} + +static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + s32 ret_val = __ew32_prepare(hw); + + writel(i, tx_ring->tail); + + if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { + u32 tctl = er32(TCTL); + ew32(TCTL, tctl & ~E1000_TCTL_EN); + e_err("ME firmware caused invalid TDT - resetting\n"); + schedule_work(&adapter->reset_task); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: Rx descriptor ring + **/ +static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring, + int cleaned_count, gfp_t gfp) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_extended *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto map_skb; + } + + skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); + if (!skb) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; +map_skb: + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_err(&pdev->dev, "Rx DMA map failed\n"); + adapter->rx_dma_failed++; + break; + } + + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, i); + else + writel(i, rx_ring->tail); + } + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + rx_ring->next_to_use = i; +} + +/** + * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @rx_ring: Rx descriptor ring + **/ +static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, + int cleaned_count, gfp_t gfp) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_packet_split *rx_desc; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &buffer_info->ps_pages[j]; + if (j >= adapter->rx_ps_pages) { + /* all unused desc entries get hw null ptr */ + rx_desc->read.buffer_addr[j + 1] = + ~cpu_to_le64(0); + continue; + } + if (!ps_page->page) { + ps_page->page = alloc_page(gfp); + if (!ps_page->page) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + ps_page->dma = dma_map_page(&pdev->dev, + ps_page->page, + 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + ps_page->dma)) { + dev_err(&adapter->pdev->dev, + "Rx DMA page map failed\n"); + adapter->rx_dma_failed++; + goto no_buffers; + } + } + /* Refresh the desc even if buffer_addrs + * didn't change because each write-back + * erases this info. + */ + rx_desc->read.buffer_addr[j + 1] = + cpu_to_le64(ps_page->dma); + } + + skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, + gfp); + + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, + adapter->rx_ps_bsize0, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_err(&pdev->dev, "Rx DMA map failed\n"); + adapter->rx_dma_failed++; + /* cleanup skb */ + dev_kfree_skb_any(skb); + buffer_info->skb = NULL; + break; + } + + rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); + + if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, i << 1); + else + writel(i << 1, rx_ring->tail); + } + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + +no_buffers: + rx_ring->next_to_use = i; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @rx_ring: Rx descriptor ring + * @cleaned_count: number of buffers to allocate this pass + **/ + +static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, + int cleaned_count, gfp_t gfp) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_extended *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = 256 - 16; /* for skb_reserve */ + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto check_page; + } + + skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; +check_page: + /* allocate a new page if necessary */ + if (!buffer_info->page) { + buffer_info->page = alloc_page(gfp); + if (unlikely(!buffer_info->page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->page, 0, + PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, i); + else + writel(i, rx_ring->tail); + } +} + +static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, + struct sk_buff *skb) +{ + if (netdev->features & NETIF_F_RXHASH) + skb->rxhash = le32_to_cpu(rss); +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack + * @rx_ring: Rx descriptor ring + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, + int work_to_do) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + union e1000_rx_desc_extended *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length, staterr; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->wb.upper.length); + + /* !EOP means multiple descriptors were used to store a single + * packet, if that's the case we need to toss it. In fact, we + * need to toss every packet with the EOP bit clear and the + * next frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) + adapter->flags2 |= FLAG2_IS_DISCARDING; + + if (adapter->flags2 & FLAG2_IS_DISCARDING) { + /* All receives must fit into a single buffer */ + e_dbg("Receive packet consumed multiple buffers\n"); + /* recycle */ + buffer_info->skb = skb; + if (staterr & E1000_RXD_STAT_EOP) + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + goto next_desc; + } + + if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + + /* adjust length to remove Ethernet CRC */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { + /* If configured to store CRC, don't subtract FCS, + * but keep the FCS bytes out of the total_rx_bytes + * counter + */ + if (netdev->features & NETIF_F_RXFCS) + total_rx_bytes -= 4; + else + length -= 4; + } + + total_rx_bytes += length; + total_rx_packets++; + + /* code added for copybreak, this should improve + * performance for small packets with large amounts + * of reassembly being done in the stack + */ + if (length < copybreak) { + struct sk_buff *new_skb = + netdev_alloc_skb_ip_align(netdev, length); + if (new_skb) { + skb_copy_to_linear_data_offset(new_skb, + -NET_IP_ALIGN, + (skb->data - + NET_IP_ALIGN), + (length + + NET_IP_ALIGN)); + /* save the skb in buffer_info as good */ + buffer_info->skb = skb; + skb = new_skb; + } + /* else just continue with the old one */ + } + /* end copybreak code */ + skb_put(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, staterr, skb); + + e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); + + e1000_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.upper.vlan); + +next_desc: + rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(rx_ring, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +static void e1000_put_txbuf(struct e1000_ring *tx_ring, + struct e1000_buffer *buffer_info) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; +} + +static void e1000_print_hw_hang(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + print_hang_task); + struct net_device *netdev = adapter->netdev; + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int i = tx_ring->next_to_clean; + unsigned int eop = tx_ring->buffer_info[i].next_to_watch; + struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); + struct e1000_hw *hw = &adapter->hw; + u16 phy_status, phy_1000t_status, phy_ext_status; + u16 pci_status; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { + /* May be block on write-back, flush and detect again + * flush pending descriptor writebacks to memory + */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + /* execute the writes immediately */ + e1e_flush(); + /* Due to rare timing issues, write to TIDV again to ensure + * the write is successful + */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + /* execute the writes immediately */ + e1e_flush(); + adapter->tx_hang_recheck = true; + return; + } + /* Real hang detected */ + adapter->tx_hang_recheck = false; + netif_stop_queue(netdev); + + e1e_rphy(hw, MII_BMSR, &phy_status); + e1e_rphy(hw, MII_STAT1000, &phy_1000t_status); + e1e_rphy(hw, MII_ESTATUS, &phy_ext_status); + + pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); + + /* detected Hardware unit hang */ + e_err("Detected Hardware Unit Hang:\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]:\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n" + "MAC Status <%x>\n" + "PHY Status <%x>\n" + "PHY 1000BASE-T Status <%x>\n" + "PHY Extended Status <%x>\n" + "PCI Status <%x>\n", + readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, + tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, + eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), + phy_status, phy_1000t_status, phy_ext_status, pci_status); + + /* Suggest workaround for known h/w issue */ + if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) + e_err("Try turning off Tx pause (flow control) via ethtool\n"); +} + +/** + * e1000e_tx_hwtstamp_work - check for Tx time stamp + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. The timestamp must + * be for this skb because only one such packet is allowed in the queue. + */ +static void e1000e_tx_hwtstamp_work(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, + tx_hwtstamp_work); + struct e1000_hw *hw = &adapter->hw; + + if (!adapter->tx_hwtstamp_skb) + return; + + if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { + struct skb_shared_hwtstamps shhwtstamps; + u64 txstmp; + + txstmp = er32(TXSTMPL); + txstmp |= (u64)er32(TXSTMPH) << 32; + + e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp); + + skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps); + dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + adapter->tx_hwtstamp_skb = NULL; + } else { + /* reschedule to check later */ + schedule_work(&adapter->tx_hwtstamp_work); + } +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @tx_ring: Tx descriptor ring + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + rmb(); /* read buffer_info after eop_desc */ + for (; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + } + + e1000_put_txbuf(tx_ring, buffer_info); + tx_desc->upper.data = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + + if (i == tx_ring->next_to_use) + break; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + +#define TX_WAKE_THRESHOLD 32 + if (count && netif_carrier_ok(netdev) && + e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->state))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[i].time_stamp && + time_after(jiffies, tx_ring->buffer_info[i].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) + schedule_work(&adapter->print_hang_task); + else + adapter->tx_hang_recheck = false; + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split + * @rx_ring: Rx descriptor ring + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, + int work_to_do) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + union e1000_rx_desc_packet_split *rx_desc, *next_rxd; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info, *next_buffer; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + u32 length, staterr; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + if (*work_done >= work_to_do) + break; + (*work_done)++; + skb = buffer_info->skb; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + /* in the packet split case this is header only */ + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_PS(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_bsize0, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + /* see !EOP comment in other Rx routine */ + if (!(staterr & E1000_RXD_STAT_EOP)) + adapter->flags2 |= FLAG2_IS_DISCARDING; + + if (adapter->flags2 & FLAG2_IS_DISCARDING) { + e_dbg("Packet Split buffers didn't pick up the full packet\n"); + dev_kfree_skb_irq(skb); + if (staterr & E1000_RXD_STAT_EOP) + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + goto next_desc; + } + + if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + length = le16_to_cpu(rx_desc->wb.middle.length0); + + if (!length) { + e_dbg("Last part of the packet spanning multiple descriptors\n"); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + /* Good Receive */ + skb_put(skb, length); + + { + /* this looks ugly, but it seems compiler issues make + * it more efficient than reusing j + */ + int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); + + /* page alloc/put takes too long and effects small + * packet throughput, so unsplit small packets and + * save the alloc/put only valid in softirq (napi) + * context to call kmap_* + */ + if (l1 && (l1 <= copybreak) && + ((length + l1) <= adapter->rx_ps_bsize0)) { + u8 *vaddr; + + ps_page = &buffer_info->ps_pages[0]; + + /* there is no documentation about how to call + * kmap_atomic, so we can't hold the mapping + * very long + */ + dma_sync_single_for_cpu(&pdev->dev, + ps_page->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + vaddr = kmap_atomic(ps_page->page); + memcpy(skb_tail_pointer(skb), vaddr, l1); + kunmap_atomic(vaddr); + dma_sync_single_for_device(&pdev->dev, + ps_page->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + + /* remove the CRC */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { + if (!(netdev->features & NETIF_F_RXFCS)) + l1 -= 4; + } + + skb_put(skb, l1); + goto copydone; + } /* if */ + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + length = le16_to_cpu(rx_desc->wb.upper.length[j]); + if (!length) + break; + + ps_page = &buffer_info->ps_pages[j]; + dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + ps_page->dma = 0; + skb_fill_page_desc(skb, j, ps_page->page, 0, length); + ps_page->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; + } + + /* strip the ethernet crc, problem is we're using pages now so + * this whole operation can get a little cpu intensive + */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { + if (!(netdev->features & NETIF_F_RXFCS)) + pskb_trim(skb, skb->len - 4); + } + +copydone: + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_rx_checksum(adapter, staterr, skb); + + e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); + + if (rx_desc->wb.upper.header_status & + cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) + adapter->rx_hdr_split++; + + e1000_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.middle.vlan); + +next_desc: + rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); + buffer_info->skb = NULL; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(rx_ring, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_consume_page - helper function + **/ +static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, + int work_to_do) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_extended *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length, staterr; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct skb_shared_info *shinfo; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + rmb(); /* read descriptor and rx_buffer_info after status DD */ + + skb = buffer_info->skb; + buffer_info->skb = NULL; + + ++i; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->wb.upper.length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((staterr & E1000_RXD_STAT_EOP) && + ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL)))) { + /* recycle both page and skb */ + buffer_info->skb = skb; + /* an error means any chain goes out the window too */ + if (rx_ring->rx_skb_top) + dev_kfree_skb_irq(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } +#define rxtop (rx_ring->rx_skb_top) + if (!(staterr & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = skb; + skb_fill_page_desc(rxtop, 0, buffer_info->page, + 0, length); + } else { + /* this is the middle of a chain */ + shinfo = skb_shinfo(rxtop); + skb_fill_page_desc(rxtop, shinfo->nr_frags, + buffer_info->page, 0, + length); + /* re-use the skb, only consumed the page */ + buffer_info->skb = skb; + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + shinfo = skb_shinfo(rxtop); + skb_fill_page_desc(rxtop, shinfo->nr_frags, + buffer_info->page, 0, + length); + /* re-use the current skb, we only consumed the + * page + */ + buffer_info->skb = skb; + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + if (length <= copybreak && + skb_tailroom(skb) >= length) { + u8 *vaddr; + vaddr = kmap_atomic(buffer_info->page); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->page + */ + skb_put(skb, length); + } else { + skb_fill_page_desc(skb, 0, + buffer_info->page, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, staterr, skb); + + e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + /* eth type trans needs skb->data to point to something */ + if (!pskb_may_pull(skb, ETH_HLEN)) { + e_err("pskb_may_pull failed.\n"); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + e1000_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.upper.vlan); + +next_desc: + rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(rx_ring, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: Rx descriptor ring + **/ +static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct pci_dev *pdev = adapter->pdev; + unsigned int i, j; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + if (adapter->clean_rx == e1000_clean_rx_irq) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) + dma_unmap_page(&pdev->dev, buffer_info->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + else if (adapter->clean_rx == e1000_clean_rx_irq_ps) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_bsize0, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->page) { + put_page(buffer_info->page); + buffer_info->page = NULL; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &buffer_info->ps_pages[j]; + if (!ps_page->page) + break; + dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + ps_page->dma = 0; + put_page(ps_page->page); + ps_page->page = NULL; + } + } + + /* there also may be some cached data from a chained receive */ + if (rx_ring->rx_skb_top) { + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + } + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + + writel(0, rx_ring->head); + if (rx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, 0); + else + writel(0, rx_ring->tail); +} + +static void e1000e_downshift_workaround(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + downshift_task); + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); +} + +/** + * e1000_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + /* read ICR disables interrupts using IAM */ + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = true; + /* ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers + */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + schedule_work(&adapter->downshift_task); + + /* 80003ES2LAN workaround-- For packet buffer work-around on + * link down event; disable receives here in the ISR and reset + * adapter in watchdog + */ + if (netif_carrier_ok(netdev) && + adapter->flags & FLAG_RX_NEEDS_RESTART) { + /* disable receives */ + u32 rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= FLAG_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + /* Reset on uncorrectable ECC error */ + if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + u32 pbeccsts = er32(PBECCSTS); + + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); + + /* return immediately since reset is imminent */ + return IRQ_HANDLED; + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + + return IRQ_HANDLED; +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 rctl, icr = er32(ICR); + + if (!icr || test_bit(__E1000_DOWN, &adapter->state)) + return IRQ_NONE; /* Not our interrupt */ + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + /* Interrupt Auto-Mask...upon reading ICR, + * interrupts are masked. No need for the + * IMC write + */ + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = true; + /* ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers + */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + schedule_work(&adapter->downshift_task); + + /* 80003ES2LAN workaround-- + * For packet buffer work-around on link down event; + * disable receives here in the ISR and + * reset adapter in watchdog + */ + if (netif_carrier_ok(netdev) && + (adapter->flags & FLAG_RX_NEEDS_RESTART)) { + /* disable receives */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= FLAG_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + /* Reset on uncorrectable ECC error */ + if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) { + u32 pbeccsts = er32(PBECCSTS); + + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); + + /* return immediately since reset is imminent */ + return IRQ_HANDLED; + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (!(icr & E1000_ICR_INT_ASSERTED)) { + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_OTHER); + return IRQ_NONE; + } + + if (icr & adapter->eiac_mask) + ew32(ICS, (icr & adapter->eiac_mask)); + + if (icr & E1000_ICR_OTHER) { + if (!(icr & E1000_ICR_LSC)) + goto no_link_interrupt; + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + +no_link_interrupt: + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + + if (!e1000_clean_tx_irq(tx_ring)) + /* Ring was not completely cleaned, so fire another interrupt */ + ew32(ICS, tx_ring->ims_val); + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *rx_ring = adapter->rx_ring; + + /* Write the ITR value calculated at the end of the + * previous interrupt. + */ + if (rx_ring->set_itr) { + writel(1000000000 / (rx_ring->itr_val * 256), + rx_ring->itr_register); + rx_ring->set_itr = 0; + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + return IRQ_HANDLED; +} + +/** + * e1000_configure_msix - Configure MSI-X hardware + * + * e1000_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void e1000_configure_msix(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_ring *tx_ring = adapter->tx_ring; + int vector = 0; + u32 ctrl_ext, ivar = 0; + + adapter->eiac_mask = 0; + + /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ + if (hw->mac.type == e1000_82574) { + u32 rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_ACK_DIS; + ew32(RFCTL, rfctl); + } + + /* Configure Rx vector */ + rx_ring->ims_val = E1000_IMS_RXQ0; + adapter->eiac_mask |= rx_ring->ims_val; + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + rx_ring->itr_register); + else + writel(1, rx_ring->itr_register); + ivar = E1000_IVAR_INT_ALLOC_VALID | vector; + + /* Configure Tx vector */ + tx_ring->ims_val = E1000_IMS_TXQ0; + vector++; + if (tx_ring->itr_val) + writel(1000000000 / (tx_ring->itr_val * 256), + tx_ring->itr_register); + else + writel(1, tx_ring->itr_register); + adapter->eiac_mask |= tx_ring->ims_val; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); + + /* set vector for Other Causes, e.g. link changes */ + vector++; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + hw->hw_addr + E1000_EITR_82574(vector)); + else + writel(1, hw->hw_addr + E1000_EITR_82574(vector)); + + /* Cause Tx interrupts on every write back */ + ivar |= (1 << 31); + + ew32(IVAR, ivar); + + /* enable MSI-X PBA support */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask Other interrupts upon ICR read */ + ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); + ctrl_ext |= E1000_CTRL_EXT_EIAME; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} + +void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) +{ + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~FLAG_MSI_ENABLED; + } +} + +/** + * e1000e_set_interrupt_capability - set MSI or MSI-X if supported + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) +{ + int err; + int i; + + switch (adapter->int_mode) { + case E1000E_INT_MODE_MSIX: + if (adapter->flags & FLAG_HAS_MSIX) { + adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ + adapter->msix_entries = kcalloc(adapter->num_vectors, + sizeof(struct + msix_entry), + GFP_KERNEL); + if (adapter->msix_entries) { + for (i = 0; i < adapter->num_vectors; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix(adapter->pdev, + adapter->msix_entries, + adapter->num_vectors); + if (err == 0) + return; + } + /* MSI-X failed, so fall through and try MSI */ + e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); + e1000e_reset_interrupt_capability(adapter); + } + adapter->int_mode = E1000E_INT_MODE_MSI; + /* Fall through */ + case E1000E_INT_MODE_MSI: + if (!pci_enable_msi(adapter->pdev)) { + adapter->flags |= FLAG_MSI_ENABLED; + } else { + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); + } + /* Fall through */ + case E1000E_INT_MODE_LEGACY: + /* Don't do anything; this is the system default */ + break; + } + + /* store the number of vectors being used */ + adapter->num_vectors = 1; +} + +/** + * e1000_request_msix - Initialize MSI-X interrupts + * + * e1000_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int e1000_request_msix(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0, vector = 0; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + snprintf(adapter->rx_ring->name, + sizeof(adapter->rx_ring->name) - 1, + "%s-rx-0", netdev->name); + else + memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[vector].vector, + e1000_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); + if (err) + return err; + adapter->rx_ring->itr_register = adapter->hw.hw_addr + + E1000_EITR_82574(vector); + adapter->rx_ring->itr_val = adapter->itr; + vector++; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + snprintf(adapter->tx_ring->name, + sizeof(adapter->tx_ring->name) - 1, + "%s-tx-0", netdev->name); + else + memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[vector].vector, + e1000_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); + if (err) + return err; + adapter->tx_ring->itr_register = adapter->hw.hw_addr + + E1000_EITR_82574(vector); + adapter->tx_ring->itr_val = adapter->itr; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + e1000_msix_other, 0, netdev->name, netdev); + if (err) + return err; + + e1000_configure_msix(adapter); + + return 0; +} + +/** + * e1000_request_irq - initialize interrupts + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->msix_entries) { + err = e1000_request_msix(adapter); + if (!err) + return err; + /* fall back to MSI */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_MSI; + e1000e_set_interrupt_capability(adapter); + } + if (adapter->flags & FLAG_MSI_ENABLED) { + err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, + netdev->name, netdev); + if (!err) + return err; + + /* fall back to legacy interrupt */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_LEGACY; + } + + err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, + netdev->name, netdev); + if (err) + e_err("Unable to allocate interrupt, Error: %d\n", err); + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->msix_entries) { + int vector = 0; + + free_irq(adapter->msix_entries[vector].vector, netdev); + vector++; + + free_irq(adapter->msix_entries[vector].vector, netdev); + vector++; + + /* Other Causes interrupt vector */ + free_irq(adapter->msix_entries[vector].vector, netdev); + return; + } + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + if (adapter->msix_entries) + ew32(EIAC_82574, 0); + e1e_flush(); + + if (adapter->msix_entries) { + int i; + for (i = 0; i < adapter->num_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); + ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); + } else if (hw->mac.type == e1000_pch_lpt) { + ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); + } else { + ew32(IMS, IMS_ENABLE_MASK); + } + e1e_flush(); +} + +/** + * e1000e_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. For AMT version (only with 82573) + * of the f/w this means that the network i/f is open. + **/ +void e1000e_get_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware know the driver has taken over */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * e1000e_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT version (only with 82573) i + * of the f/w this means that the network i/f is closed. + * + **/ +void e1000e_release_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware taken over control of h/w */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * e1000_alloc_ring_dma - allocate memory for a ring structure + **/ +static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, + struct e1000_ring *ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma, + GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + return 0; +} + +/** + * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: Tx descriptor ring + * + * Return 0 on success, negative on failure + **/ +int e1000e_setup_tx_resources(struct e1000_ring *tx_ring) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + int err = -ENOMEM, size; + + size = sizeof(struct e1000_buffer) * tx_ring->count; + tx_ring->buffer_info = vzalloc(size); + if (!tx_ring->buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, tx_ring); + if (err) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; +err: + vfree(tx_ring->buffer_info); + e_err("Unable to allocate memory for the transmit descriptor ring\n"); + return err; +} + +/** + * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: Rx descriptor ring + * + * Returns 0 on success, negative on failure + **/ +int e1000e_setup_rx_resources(struct e1000_ring *rx_ring) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_buffer *buffer_info; + int i, size, desc_len, err = -ENOMEM; + + size = sizeof(struct e1000_buffer) * rx_ring->count; + rx_ring->buffer_info = vzalloc(size); + if (!rx_ring->buffer_info) + goto err; + + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, + sizeof(struct e1000_ps_page), + GFP_KERNEL); + if (!buffer_info->ps_pages) + goto err_pages; + } + + desc_len = sizeof(union e1000_rx_desc_packet_split); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, rx_ring); + if (err) + goto err_pages; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->rx_skb_top = NULL; + + return 0; + +err_pages: + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + kfree(buffer_info->ps_pages); + } +err: + vfree(rx_ring->buffer_info); + e_err("Unable to allocate memory for the receive descriptor ring\n"); + return err; +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @tx_ring: Tx descriptor ring + **/ +static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_buffer *buffer_info; + unsigned long size; + unsigned int i; + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_put_txbuf(tx_ring, buffer_info); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + writel(0, tx_ring->head); + if (tx_ring->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_tdt_wa(tx_ring, 0); + else + writel(0, tx_ring->tail); +} + +/** + * e1000e_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring + * + * Free all transmit software resources + **/ +void e1000e_free_tx_resources(struct e1000_ring *tx_ring) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * e1000e_free_rx_resources - Free Rx Resources + * @rx_ring: Rx descriptor ring + * + * Free all receive software resources + **/ +void e1000e_free_rx_resources(struct e1000_ring *rx_ring) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct pci_dev *pdev = adapter->pdev; + int i; + + e1000_clean_rx_ring(rx_ring); + + for (i = 0; i < rx_ring->count; i++) + kfree(rx_ring->buffer_info[i].ps_pages); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. This functionality is controlled + * by the InterruptThrottleRate module parameter. + **/ +static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + + if (packets == 0) + return itr_setting; + + switch (itr_setting) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes / packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes / packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes / packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes / packets > 2000) { + retval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + retval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + u16 current_itr; + u32 new_itr = adapter->itr; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + if (adapter->flags2 & FLAG2_DISABLE_AIM) { + new_itr = 0; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + /* counts and packets in update_itr are dependent on these numbers */ + switch (current_itr) { + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; + adapter->itr = new_itr; + adapter->rx_ring->itr_val = new_itr; + if (adapter->msix_entries) + adapter->rx_ring->set_itr = 1; + else + e1000e_write_itr(adapter, new_itr); + } +} + +/** + * e1000e_write_itr - write the ITR value to the appropriate registers + * @adapter: address of board private structure + * @itr: new ITR value to program + * + * e1000e_write_itr determines if the adapter is in MSI-X mode + * and, if so, writes the EITR registers with the ITR value. + * Otherwise, it writes the ITR value into the ITR register. + **/ +void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr) +{ + struct e1000_hw *hw = &adapter->hw; + u32 new_itr = itr ? 1000000000 / (itr * 256) : 0; + + if (adapter->msix_entries) { + int vector; + + for (vector = 0; vector < adapter->num_vectors; vector++) + writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector)); + } else { + ew32(ITR, new_itr); + } +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + int size = sizeof(struct e1000_ring); + + adapter->tx_ring = kzalloc(size, GFP_KERNEL); + if (!adapter->tx_ring) + goto err; + adapter->tx_ring->count = adapter->tx_ring_count; + adapter->tx_ring->adapter = adapter; + + adapter->rx_ring = kzalloc(size, GFP_KERNEL); + if (!adapter->rx_ring) + goto err; + adapter->rx_ring->count = adapter->rx_ring_count; + adapter->rx_ring->adapter = adapter; + + return 0; +err: + e_err("Unable to allocate memory for queues\n"); + kfree(adapter->rx_ring); + kfree(adapter->tx_ring); + return -ENOMEM; +} + +/** + * e1000e_poll - NAPI Rx polling callback + * @napi: struct associated with this polling callback + * @weight: number of packets driver is allowed to process this poll + **/ +static int e1000e_poll(struct napi_struct *napi, int weight) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + struct e1000_hw *hw = &adapter->hw; + struct net_device *poll_dev = adapter->netdev; + int tx_cleaned = 1, work_done = 0; + + adapter = netdev_priv(poll_dev); + + if (!adapter->msix_entries || + (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) + tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); + + adapter->clean_rx(adapter->rx_ring, &work_done, weight); + + if (!tx_cleaned) + work_done = weight; + + /* If weight not fully consumed, exit the polling mode */ + if (work_done < weight) { + if (adapter->itr_setting & 3) + e1000_set_itr(adapter); + napi_complete(napi); + if (!test_bit(__E1000_DOWN, &adapter->state)) { + if (adapter->msix_entries) + ew32(IMS, adapter->rx_ring->ims_val); + else + e1000_irq_enable(adapter); + } + } + + return work_done; +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + /* don't update vlan cookie if already programmed */ + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) + return 0; + + /* add VID to filter table */ + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta |= (1 << (vid & 0x1F)); + hw->mac.ops.write_vfta(hw, index, vfta); + } + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) { + /* release control to f/w */ + e1000e_release_hw_control(adapter); + return 0; + } + + /* remove VID from filter table */ + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + hw->mac.ops.write_vfta(hw, index, vfta); + } + + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +/** + * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); + ew32(RCTL, rctl); + + if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + } +} + +/** + * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl |= E1000_RCTL_VFE; + rctl &= ~E1000_RCTL_CFIEN; + ew32(RCTL, rctl); + } +} + +/** + * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + /* disable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_VME; + ew32(CTRL, ctrl); +} + +/** + * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + /* enable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_VME; + ew32(CTRL, ctrl); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } + + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid); +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +static void e1000_init_manageability_pt(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 manc, manc2h, mdef, i, j; + + if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) + return; + + manc = er32(MANC); + + /* enable receiving management packets to the host. this will probably + * generate destination unreachable messages from the host OS, but + * the packets will be handled on SMBUS + */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h = er32(MANC2H); + + switch (hw->mac.type) { + default: + manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); + break; + case e1000_82574: + case e1000_82583: + /* Check if IPMI pass-through decision filter already exists; + * if so, enable it. + */ + for (i = 0, j = 0; i < 8; i++) { + mdef = er32(MDEF(i)); + + /* Ignore filters with anything other than IPMI ports */ + if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + continue; + + /* Enable this decision filter in MANC2H */ + if (mdef) + manc2h |= (1 << i); + + j |= mdef; + } + + if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + break; + + /* Create new decision filter in an empty filter */ + for (i = 0, j = 0; i < 8; i++) + if (er32(MDEF(i)) == 0) { + ew32(MDEF(i), (E1000_MDEF_PORT_623 | + E1000_MDEF_PORT_664)); + manc2h |= (1 << 1); + j++; + break; + } + + if (!j) + e_warn("Unable to create IPMI pass-through filter\n"); + break; + } + + ew32(MANC2H, manc2h); + ew32(MANC, manc); +} + +/** + * e1000_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + u64 tdba; + u32 tdlen, tarc; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + tdba = tx_ring->dma; + tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); + ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); + ew32(TDBAH(0), (tdba >> 32)); + ew32(TDLEN(0), tdlen); + ew32(TDH(0), 0); + ew32(TDT(0), 0); + tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); + tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); + + /* Set the Tx Interrupt Delay register */ + ew32(TIDV, adapter->tx_int_delay); + /* Tx irq moderation */ + ew32(TADV, adapter->tx_abs_int_delay); + + if (adapter->flags2 & FLAG2_DMA_BURST) { + u32 txdctl = er32(TXDCTL(0)); + txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | + E1000_TXDCTL_WTHRESH); + /* set up some performance related parameters to encourage the + * hardware to use the bus more efficiently in bursts, depends + * on the tx_int_delay to be enabled, + * wthresh = 1 ==> burst write is disabled to avoid Tx stalls + * hthresh = 1 ==> prefetch when one or more available + * pthresh = 0x1f ==> prefetch if internal cache 31 or less + * BEWARE: this seems to work but should be considered first if + * there are Tx hangs or other Tx related bugs + */ + txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; + ew32(TXDCTL(0), txdctl); + } + /* erratum work around: set txdctl the same for both queues */ + ew32(TXDCTL(1), er32(TXDCTL(0))); + + if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { + tarc = er32(TARC(0)); + /* set the speed mode bit, we'll clear it if we're not at + * gigabit link later + */ +#define SPEED_MODE_BIT (1 << 21) + tarc |= SPEED_MODE_BIT; + ew32(TARC(0), tarc); + } + + /* errata: program both queues to unweighted RR */ + if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { + tarc = er32(TARC(0)); + tarc |= 1; + ew32(TARC(0), tarc); + tarc = er32(TARC(1)); + tarc |= 1; + ew32(TARC(1), tarc); + } + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + /* enable Report Status bit */ + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + hw->mac.ops.config_collision_dist(hw); +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl, rfctl; + u32 pages = 0; + + /* Workaround Si errata on PCHx - configure jumbo frame flow */ + if (hw->mac.type >= e1000_pch2lan) { + s32 ret_val; + + if (adapter->netdev->mtu > ETH_DATA_LEN) + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); + else + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); + + if (ret_val) + e_dbg("failed to enable jumbo frame workaround mode\n"); + } + + /* Program MC offset vector base */ + rctl = er32(RCTL); + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Do not Store bad packets */ + rctl &= ~E1000_RCTL_SBP; + + /* Enable Long Packet receive */ + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Some systems expect that the CRC is included in SMBUS traffic. The + * hardware strips the CRC before sending to both SMBUS (BMC) and to + * host memory when this is enabled + */ + if (adapter->flags2 & FLAG2_CRC_STRIPPING) + rctl |= E1000_RCTL_SECRC; + + /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ + if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { + u16 phy_data; + + e1e_rphy(hw, PHY_REG(770, 26), &phy_data); + phy_data &= 0xfff8; + phy_data |= (1 << 2); + e1e_wphy(hw, PHY_REG(770, 26), phy_data); + + e1e_rphy(hw, 22, &phy_data); + phy_data &= 0x0fff; + phy_data |= (1 << 14); + e1e_wphy(hw, 0x10, 0x2823); + e1e_wphy(hw, 0x11, 0x0003); + e1e_wphy(hw, 22, phy_data); + } + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case 2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case 4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case 8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case 16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* Enable Extended Status in all Receive Descriptors */ + rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_EXTEN; + ew32(RFCTL, rfctl); + + /* 82571 and greater support packet-split where the protocol + * header is placed in skb->data and the packet data is + * placed in pages hanging off of skb_shinfo(skb)->nr_frags. + * In the case of a non-split, skb->data is linearly filled, + * followed by the page buffers. Therefore, skb->data is + * sized to hold the largest protocol header. + * + * allocations using alloc_page take too long for regular MTU + * so only enable packet split for jumbo frames + * + * Using pages when the page size is greater than 16k wastes + * a lot of memory, since we allocate 3 pages at all times + * per packet. + */ + pages = PAGE_USE_COUNT(adapter->netdev->mtu); + if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) + adapter->rx_ps_pages = pages; + else + adapter->rx_ps_pages = 0; + + if (adapter->rx_ps_pages) { + u32 psrctl = 0; + + /* Enable Packet split descriptors */ + rctl |= E1000_RCTL_DTYP_PS; + + psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; + + switch (adapter->rx_ps_pages) { + case 3: + psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT; + /* fall-through */ + case 2: + psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT; + /* fall-through */ + case 1: + psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT; + break; + } + + ew32(PSRCTL, psrctl); + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); + /* just started the receive unit, no need to restart */ + adapter->flags &= ~FLAG_RESTART_NOW; +} + +/** + * e1000_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + u64 rdba; + u32 rdlen, rctl, rxcsum, ctrl_ext; + + if (adapter->rx_ps_pages) { + /* this is a 32 byte descriptor */ + rdlen = rx_ring->count * + sizeof(union e1000_rx_desc_packet_split); + adapter->clean_rx = e1000_clean_rx_irq_ps; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; + } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { + rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->flags2 & FLAG2_DMA_BURST) { + /* set the writeback threshold (only takes effect if the RDTR + * is set). set GRAN=1 and write back up to 0x4 worth, and + * enable prefetching of 0x20 Rx descriptors + * granularity = 01 + * wthresh = 04, + * hthresh = 04, + * pthresh = 0x20 + */ + ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); + ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); + + /* override the delay timers for enabling bursting, only if + * the value was not set by the user via module options + */ + if (adapter->rx_int_delay == DEFAULT_RDTR) + adapter->rx_int_delay = BURST_RDTR; + if (adapter->rx_abs_int_delay == DEFAULT_RADV) + adapter->rx_abs_int_delay = BURST_RADV; + } + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + /* irq moderation */ + ew32(RADV, adapter->rx_abs_int_delay); + if ((adapter->itr_setting != 0) && (adapter->itr != 0)) + e1000e_write_itr(adapter, adapter->itr); + + ctrl_ext = er32(CTRL_EXT); + /* Auto-Mask interrupts upon ICR access */ + ctrl_ext |= E1000_CTRL_EXT_IAME; + ew32(IAM, 0xffffffff); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + rdba = rx_ring->dma; + ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); + ew32(RDBAH(0), (rdba >> 32)); + ew32(RDLEN(0), rdlen); + ew32(RDH(0), 0); + ew32(RDT(0), 0); + rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); + rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); + + /* Enable Receive Checksum Offload for TCP and UDP */ + rxcsum = er32(RXCSUM); + if (adapter->netdev->features & NETIF_F_RXCSUM) + rxcsum |= E1000_RXCSUM_TUOFL; + else + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + + /* With jumbo frames, excessive C-state transition latencies result + * in dropped transactions. + */ + if (adapter->netdev->mtu > ETH_DATA_LEN) { + u32 lat = + ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - + adapter->max_frame_size) * 8 / 1000; + + if (adapter->flags & FLAG_IS_ICH) { + u32 rxdctl = er32(RXDCTL(0)); + ew32(RXDCTL(0), rxdctl | 0x3); + } + + pm_qos_update_request(&adapter->netdev->pm_qos_req, lat); + } else { + pm_qos_update_request(&adapter->netdev->pm_qos_req, + PM_QOS_DEFAULT_VALUE); + } + + /* Enable Receives */ + ew32(RCTL, rctl); +} + +/** + * e1000e_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + */ +static int e1000e_write_mc_addr_list(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + hw->mac.ops.update_mc_addr_list(hw, NULL, 0); + return 0; + } + + mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* update_mc_addr_list expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + hw->mac.ops.update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +/** + * e1000e_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int e1000e_write_uc_addr_list(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int rar_entries = hw->mac.rar_entry_count; + int count = 0; + + /* save a rar entry for our hardware address */ + rar_entries--; + + /* save a rar entry for the LAA workaround */ + if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) + rar_entries--; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > rar_entries) + return -ENOMEM; + + if (!netdev_uc_empty(netdev) && rar_entries) { + struct netdev_hw_addr *ha; + + /* write the addresses in reverse order to avoid write + * combining + */ + netdev_for_each_uc_addr(ha, netdev) { + if (!rar_entries) + break; + hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); + count++; + } + } + + /* zero out the remaining RAR entries not used above */ + for (; rar_entries > 0; rar_entries--) { + ew32(RAH(rar_entries), 0); + ew32(RAL(rar_entries), 0); + } + e1e_flush(); + + return count; +} + +/** + * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The ndo_set_rx_mode entry point is called whenever the unicast or multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000e_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + /* Check for Promiscuous and All Multicast modes */ + rctl = er32(RCTL); + + /* clear the affected bits */ + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + /* Do not hardware filter VLANs in promisc mode */ + e1000e_vlan_filter_disable(adapter); + } else { + int count; + + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = e1000e_write_mc_addr_list(netdev); + if (count < 0) + rctl |= E1000_RCTL_MPE; + } + e1000e_vlan_filter_enable(adapter); + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = e1000e_write_uc_addr_list(netdev); + if (count < 0) + rctl |= E1000_RCTL_UPE; + } + + ew32(RCTL, rctl); + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + e1000e_vlan_strip_enable(adapter); + else + e1000e_vlan_strip_disable(adapter); +} + +static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + int i; + static const u32 rsskey[10] = { + 0xda565a6d, 0xc20e5b25, 0x3d256741, 0xb08fa343, 0xcb2bcad0, + 0xb4307bae, 0xa32dcb77, 0x0cf23080, 0x3bb7426a, 0xfa01acbe + }; + + /* Fill out hash function seed */ + for (i = 0; i < 10; i++) + ew32(RSSRK(i), rsskey[i]); + + /* Direct all traffic to queue 0 */ + for (i = 0; i < 32; i++) + ew32(RETA(i), 0); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. + */ + rxcsum = er32(RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + ew32(RXCSUM, rxcsum); + + mrqc = (E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); + + ew32(MRQC, mrqc); +} + +/** + * e1000e_get_base_timinca - get default SYSTIM time increment attributes + * @adapter: board private structure + * @timinca: pointer to returned time increment attributes + * + * Get attributes for incrementing the System Time Register SYSTIML/H at + * the default base frequency, and set the cyclecounter shift value. + **/ +s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) +{ + struct e1000_hw *hw = &adapter->hw; + u32 incvalue, incperiod, shift; + + /* Make sure clock is enabled on I217 before checking the frequency */ + if ((hw->mac.type == e1000_pch_lpt) && + !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && + !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { + u32 fextnvm7 = er32(FEXTNVM7); + + if (!(fextnvm7 & (1 << 0))) { + ew32(FEXTNVM7, fextnvm7 | (1 << 0)); + e1e_flush(); + } + } + + switch (hw->mac.type) { + case e1000_pch2lan: + case e1000_pch_lpt: + /* On I217, the clock frequency is 25MHz or 96MHz as + * indicated by the System Clock Frequency Indication + */ + if ((hw->mac.type != e1000_pch_lpt) || + (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { + /* Stable 96MHz frequency */ + incperiod = INCPERIOD_96MHz; + incvalue = INCVALUE_96MHz; + shift = INCVALUE_SHIFT_96MHz; + adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + break; + } + /* fall-through */ + case e1000_82574: + case e1000_82583: + /* Stable 25MHz frequency */ + incperiod = INCPERIOD_25MHz; + incvalue = INCVALUE_25MHz; + shift = INCVALUE_SHIFT_25MHz; + adapter->cc.shift = shift; + break; + default: + return -EINVAL; + } + + *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) | + ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK)); + + return 0; +} + +/** + * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable + * @adapter: board private structure + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware filters. + * Not all combinations are supported, in particular event type has to be + * specified. Matching the kind of event packet is not supported, with the + * exception of "all V2 events regardless of level 2 or 4". + **/ +static int e1000e_config_hwtstamp(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct hwtstamp_config *config = &adapter->hwtstamp_config; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + u32 rxmtrl = 0; + u16 rxudp = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + s32 ret_val; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return -EINVAL; + + /* flags reserved for future extensions - must be zero */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + break; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + /* Also time stamps V2 L2 Path Delay Request/Response */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; + is_l2 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + /* Also time stamps V2 L2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; + is_l2 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + /* Hardware cannot filter just V2 L4 Sync messages; + * fall-through to V2 (both L2 and L4) Sync. + */ + case HWTSTAMP_FILTER_PTP_V2_SYNC: + /* Also time stamps V2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + /* Hardware cannot filter just V2 L4 Delay Request messages; + * fall-through to V2 (both L2 and L4) Delay Request. + */ + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* Also time stamps V2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + /* Hardware cannot filter just V2 L4 or L2 Event messages; + * fall-through to all V2 (both L2 and L4) Events. + */ + case HWTSTAMP_FILTER_PTP_V2_EVENT: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + /* For V1, the hardware can only filter Sync messages or + * Delay Request messages but not both so fall-through to + * time stamp all packets. + */ + case HWTSTAMP_FILTER_ALL: + is_l2 = true; + is_l4 = true; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + /* enable/disable Tx h/w time stamping */ + regval = er32(TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + ew32(TSYNCTXCTL, regval); + if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) != + (regval & E1000_TSYNCTXCTL_ENABLED)) { + e_err("Timesync Tx Control register not set as expected\n"); + return -EAGAIN; + } + + /* enable/disable Rx h/w time stamping */ + regval = er32(TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + ew32(TSYNCRXCTL, regval); + if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED | + E1000_TSYNCRXCTL_TYPE_MASK)) != + (regval & (E1000_TSYNCRXCTL_ENABLED | + E1000_TSYNCRXCTL_TYPE_MASK))) { + e_err("Timesync Rx Control register not set as expected\n"); + return -EAGAIN; + } + + /* L2: define ethertype filter for time stamped packets */ + if (is_l2) + rxmtrl |= ETH_P_1588; + + /* define which PTP packets get time stamped */ + ew32(RXMTRL, rxmtrl); + + /* Filter by destination port */ + if (is_l4) { + rxudp = PTP_EV_PORT; + cpu_to_be16s(&rxudp); + } + ew32(RXUDP, rxudp); + + e1e_flush(); + + /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */ + er32(RXSTMPH); + er32(TXSTMPH); + + /* Get and set the System Time Register SYSTIM base frequency */ + ret_val = e1000e_get_base_timinca(adapter, ®val); + if (ret_val) + return ret_val; + ew32(TIMINCA, regval); + + /* reset the ns time counter */ + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + + return 0; +} + +/** + * e1000_configure - configure the hardware for Rx and Tx + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + + e1000e_set_rx_mode(adapter->netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability_pt(adapter); + + e1000_configure_tx(adapter); + + if (adapter->netdev->features & NETIF_F_RXHASH) + e1000e_setup_rss_hash(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); +} + +/** + * e1000e_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000e_reset *** + **/ +void e1000e_power_up_phy(struct e1000_adapter *adapter) +{ + if (adapter->hw.phy.ops.power_up) + adapter->hw.phy.ops.power_up(&adapter->hw); + + adapter->hw.mac.ops.setup_link(&adapter->hw); +} + +/** + * e1000_power_down_phy - Power down the PHY + * + * Power down the PHY so no link is implied when interface is down. + * The PHY cannot be powered down if management or WoL is active. + */ +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + /* WoL is enabled */ + if (adapter->wol) + return; + + if (adapter->hw.phy.ops.power_down) + adapter->hw.phy.ops.power_down(&adapter->hw); +} + +/** + * e1000e_reset - bring the hardware into a known good state + * + * This function boots the hardware and enables some settings that + * require a configuration cycle of the hardware - those cannot be + * set/changed during runtime. After reset the device needs to be + * properly configured for Rx, Tx etc. + */ +void e1000e_reset(struct e1000_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_fc_info *fc = &adapter->hw.fc; + struct e1000_hw *hw = &adapter->hw; + u32 tx_space, min_tx_space, min_rx_space; + u32 pba = adapter->pba; + u16 hwm; + + /* reset Packet Buffer Allocation to default */ + ew32(PBA, pba); + + if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) { + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (adapter->max_frame_size + + sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = adapter->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if ((tx_space < min_tx_space) && + ((min_tx_space - tx_space) < pba)) { + pba -= min_tx_space - tx_space; + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + + ew32(PBA, pba); + } + + /* flow control settings + * + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus one full frame + */ + if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) + fc->pause_time = 0xFFFF; + else + fc->pause_time = E1000_FC_PAUSE_TIME; + fc->send_xon = true; + fc->current_mode = fc->requested_mode; + + switch (hw->mac.type) { + case e1000_ich9lan: + case e1000_ich10lan: + if (adapter->netdev->mtu > ETH_DATA_LEN) { + pba = 14; + ew32(PBA, pba); + fc->high_water = 0x2800; + fc->low_water = fc->high_water - 8; + break; + } + /* fall-through */ + default: + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - adapter->max_frame_size)); + + fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ + fc->low_water = fc->high_water - 8; + break; + case e1000_pchlan: + /* Workaround PCH LOM adapter hangs with certain network + * loads. If hangs persist, try disabling Tx flow control. + */ + if (adapter->netdev->mtu > ETH_DATA_LEN) { + fc->high_water = 0x3500; + fc->low_water = 0x1500; + } else { + fc->high_water = 0x5000; + fc->low_water = 0x3000; + } + fc->refresh_time = 0x1000; + break; + case e1000_pch2lan: + case e1000_pch_lpt: + fc->refresh_time = 0x0400; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) { + fc->high_water = 0x05C20; + fc->low_water = 0x05048; + fc->pause_time = 0x0650; + break; + } + + fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; + fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; + break; + } + + /* Alignment of Tx data is on an arbitrary byte boundary with the + * maximum size per Tx descriptor limited only to the transmit + * allocation of the packet buffer minus 96 bytes with an upper + * limit of 24KB due to receive synchronization limitations. + */ + adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, + 24 << 10); + + /* Disable Adaptive Interrupt Moderation if 2 full packets cannot + * fit in receive buffer. + */ + if (adapter->itr_setting & 0x3) { + if ((adapter->max_frame_size * 2) > (pba << 10)) { + if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { + dev_info(&adapter->pdev->dev, + "Interrupt Throttle Rate off\n"); + adapter->flags2 |= FLAG2_DISABLE_AIM; + e1000e_write_itr(adapter, 0); + } + } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { + dev_info(&adapter->pdev->dev, + "Interrupt Throttle Rate on\n"); + adapter->flags2 &= ~FLAG2_DISABLE_AIM; + adapter->itr = 20000; + e1000e_write_itr(adapter, adapter->itr); + } + } + + /* Allow time for pending master requests to run */ + mac->ops.reset_hw(hw); + + /* For parts with AMT enabled, let the firmware know + * that the network interface is in control + */ + if (adapter->flags & FLAG_HAS_AMT) + e1000e_get_hw_control(adapter); + + ew32(WUC, 0); + + if (mac->ops.init_hw(hw)) + e_err("Hardware Error\n"); + + e1000_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETH_P_8021Q); + + e1000e_reset_adaptive(hw); + + /* initialize systim and reset the ns time counter */ + e1000e_config_hwtstamp(adapter); + + /* Set EEE advertisement as appropriate */ + if (adapter->flags2 & FLAG2_HAS_EEE) { + s32 ret_val; + u16 adv_addr; + + switch (hw->phy.type) { + case e1000_phy_82579: + adv_addr = I82579_EEE_ADVERTISEMENT; + break; + case e1000_phy_i217: + adv_addr = I217_EEE_ADVERTISEMENT; + break; + default: + dev_err(&adapter->pdev->dev, + "Invalid PHY type setting EEE advertisement\n"); + return; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + dev_err(&adapter->pdev->dev, + "EEE advertisement - unable to acquire PHY\n"); + return; + } + + e1000_write_emi_reg_locked(hw, adv_addr, + hw->dev_spec.ich8lan.eee_disable ? + 0 : adapter->eee_advert); + + hw->phy.ops.release(hw); + } + + if (!netif_running(adapter->netdev) && + !test_bit(__E1000_TESTING, &adapter->state)) { + e1000_power_down_phy(adapter); + return; + } + + e1000_get_phy_info(hw); + + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && + !(adapter->flags & FLAG_SMART_POWER_DOWN)) { + u16 phy_data = 0; + /* speed up time to link by disabling smart power down, ignore + * the return value of this function because there is nothing + * different we would do if it failed + */ + e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); + phy_data &= ~IGP02E1000_PM_SPD; + e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); + } +} + +int e1000e_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->state); + + if (adapter->msix_entries) + e1000_configure_msix(adapter); + e1000_irq_enable(adapter); + + netif_start_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); + + return 0; +} + +static void e1000e_flush_descriptors(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (!(adapter->flags2 & FLAG2_DMA_BURST)) + return; + + /* flush pending descriptor writebacks to memory */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); + + /* execute the writes immediately */ + e1e_flush(); + + /* due to rare timing issues, write to TIDV/RDTR again to ensure the + * write is successful + */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); + + /* execute the writes immediately */ + e1e_flush(); +} + +static void e1000e_update_stats(struct e1000_adapter *adapter); + +void e1000e_down(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__E1000_DOWN, &adapter->state); + + /* disable receives in the hardware */ + rctl = er32(RCTL); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_stop_queue(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + + /* flush both disables and wait for them to finish */ + e1e_flush(); + usleep_range(10000, 20000); + + e1000_irq_disable(adapter); + + napi_synchronize(&adapter->napi); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + netif_carrier_off(netdev); + + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + e1000e_flush_descriptors(adapter); + e1000_clean_tx_ring(adapter->tx_ring); + e1000_clean_rx_ring(adapter->rx_ring); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + if (!pci_channel_offline(adapter->pdev)) + e1000e_reset(adapter); + + /* TODO: for power management, we could drop the link and + * pci_disable_device here. + */ +} + +void e1000e_reinit_locked(struct e1000_adapter *adapter) +{ + might_sleep(); + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + e1000e_down(adapter); + e1000e_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->state); +} + +/** + * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) + * @cc: cyclecounter structure + **/ +static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) +{ + struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, + cc); + struct e1000_hw *hw = &adapter->hw; + cycle_t systim; + + /* latch SYSTIMH on read of SYSTIML */ + systim = (cycle_t)er32(SYSTIML); + systim |= (cycle_t)er32(SYSTIMH) << 32; + + return systim; +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN; + adapter->rx_ps_bsize0 = 128; + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + adapter->tx_ring_count = E1000_DEFAULT_TXD; + adapter->rx_ring_count = E1000_DEFAULT_RXD; + + spin_lock_init(&adapter->stats64_lock); + + e1000e_set_interrupt_capability(adapter); + + if (e1000_alloc_queues(adapter)) + return -ENOMEM; + + /* Setup hardware time stamping cyclecounter */ + if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { + adapter->cc.read = e1000e_cyclecounter_read; + adapter->cc.mask = CLOCKSOURCE_MASK(64); + adapter->cc.mult = 1; + /* cc.shift set in e1000e_get_base_tininca() */ + + spin_lock_init(&adapter->systim_lock); + INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + set_bit(__E1000_DOWN, &adapter->state); + return 0; +} + +/** + * e1000_intr_msi_test - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + e_dbg("icr is %08X\n", icr); + if (icr & E1000_ICR_RXSEQ) { + adapter->flags &= ~FLAG_MSI_TEST_FAILED; + /* Force memory writes to complete before acknowledging the + * interrupt is handled. + */ + wmb(); + } + + return IRQ_HANDLED; +} + +/** + * e1000_test_msi_interrupt - Returns 0 for successful test + * @adapter: board private struct + * + * code flow taken from tg3.c + **/ +static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + int err; + + /* poll_enable hasn't been called yet, so don't need disable */ + /* clear any pending events */ + er32(ICR); + + /* free the real vector and request a test handler */ + e1000_free_irq(adapter); + e1000e_reset_interrupt_capability(adapter); + + /* Assume that the test fails, if it succeeds then the test + * MSI irq handler will unset this flag + */ + adapter->flags |= FLAG_MSI_TEST_FAILED; + + err = pci_enable_msi(adapter->pdev); + if (err) + goto msi_test_failed; + + err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, + netdev->name, netdev); + if (err) { + pci_disable_msi(adapter->pdev); + goto msi_test_failed; + } + + /* Force memory writes to complete before enabling and firing an + * interrupt. + */ + wmb(); + + e1000_irq_enable(adapter); + + /* fire an unusual interrupt on the test handler */ + ew32(ICS, E1000_ICS_RXSEQ); + e1e_flush(); + msleep(100); + + e1000_irq_disable(adapter); + + rmb(); /* read flags after interrupt has been fired */ + + if (adapter->flags & FLAG_MSI_TEST_FAILED) { + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e_info("MSI interrupt test failed, using legacy interrupt.\n"); + } else { + e_dbg("MSI interrupt test succeeded!\n"); + } + + free_irq(adapter->pdev->irq, netdev); + pci_disable_msi(adapter->pdev); + +msi_test_failed: + e1000e_set_interrupt_capability(adapter); + return e1000_request_irq(adapter); +} + +/** + * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored + * @adapter: board private struct + * + * code flow taken from tg3.c, called with e1000 interrupts disabled. + **/ +static int e1000_test_msi(struct e1000_adapter *adapter) +{ + int err; + u16 pci_cmd; + + if (!(adapter->flags & FLAG_MSI_ENABLED)) + return 0; + + /* disable SERR in case the MSI write causes a master abort */ + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_SERR) + pci_write_config_word(adapter->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); + + err = e1000_test_msi_interrupt(adapter); + + /* re-enable SERR */ + if (pci_cmd & PCI_COMMAND_SERR) { + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_SERR; + pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); + } + + return err; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->state)) + return -EBUSY; + + pm_runtime_get_sync(&pdev->dev); + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000e_setup_tx_resources(adapter->tx_ring); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000e_setup_rx_resources(adapter->rx_ring); + if (err) + goto err_setup_rx; + + /* If AMT is enabled, let the firmware know that the network + * interface is now open and reset the part to a known state. + */ + if (adapter->flags & FLAG_HAS_AMT) { + e1000e_get_hw_control(adapter); + e1000e_reset(adapter); + } + + e1000e_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) + e1000_update_mng_vlan(adapter); + + /* DMA latency requirement to workaround jumbo issue */ + pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Work around PCIe errata with MSI interrupts causing some chipsets to + * ignore e1000e MSI messages, which means we need to test our MSI + * interrupt now + */ + if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { + err = e1000_test_msi(adapter); + if (err) { + e_err("Interrupt allocation failed\n"); + goto err_req_irq; + } + } + + /* From here on the code is the same as e1000e_up() */ + clear_bit(__E1000_DOWN, &adapter->state); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + adapter->tx_hang_recheck = false; + netif_start_queue(netdev); + + adapter->idle_check = true; + hw->mac.get_link_status = true; + pm_runtime_put(&pdev->dev); + + /* fire a link status change interrupt to start the watchdog */ + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); + + return 0; + +err_req_irq: + e1000e_release_hw_control(adapter); + e1000_power_down_phy(adapter); + e1000e_free_rx_resources(adapter->rx_ring); +err_setup_rx: + e1000e_free_tx_resources(adapter->tx_ring); +err_setup_tx: + e1000e_reset(adapter); + pm_runtime_put_sync(&pdev->dev); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->state) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + + pm_runtime_get_sync(&pdev->dev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) { + e1000e_down(adapter); + e1000_free_irq(adapter); + } + + napi_disable(&adapter->napi); + + e1000_power_down_phy(adapter); + + e1000e_free_tx_resources(adapter->tx_ring); + e1000e_free_rx_resources(adapter->rx_ring); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + + /* If AMT is enabled, let the firmware know that the network + * interface is now closed + */ + if ((adapter->flags & FLAG_HAS_AMT) && + !test_bit(__E1000_TESTING, &adapter->state)) + e1000e_release_hw_control(adapter); + + pm_qos_remove_request(&adapter->netdev->pm_qos_req); + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); + + hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); + + if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { + /* activate the work around */ + e1000e_set_laa_state_82571(&adapter->hw, 1); + + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed (in e1000_watchdog), the actual LAA is in one + * of the RARs and no incoming packets directed to this port + * are dropped. Eventually the LAA will be in RAR[0] and + * RAR[14] + */ + hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, + adapter->hw.mac.rar_entry_count - 1); + } + + return 0; +} + +/** + * e1000e_update_phy_task - work thread to update phy + * @work: pointer to our work struct + * + * this worker thread exists because we must acquire a + * semaphore to read the phy, which we could msleep while + * waiting for it, and we can't msleep in a timer. + **/ +static void e1000e_update_phy_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + update_phy_task); + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1000_get_phy_info(&adapter->hw); +} + +/** + * e1000_update_phy_info - timre call-back to update PHY info + * @data: pointer to adapter cast into an unsigned long + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + **/ +static void e1000_update_phy_info(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *)data; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + schedule_work(&adapter->update_phy_task); +} + +/** + * e1000e_update_phy_stats - Update the PHY statistics counters + * @adapter: board private structure + * + * Read/clear the upper 16-bit PHY registers and read/accumulate lower + **/ +static void e1000e_update_phy_stats(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 ret_val; + u16 phy_data; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + + /* A page set is expensive so check if already on desired page. + * If not, set to the page with the PHY status registers. + */ + hw->phy.addr = 1; + ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + &phy_data); + if (ret_val) + goto release; + if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { + ret_val = hw->phy.ops.set_page(hw, + HV_STATS_PAGE << IGP_PAGE_SHIFT); + if (ret_val) + goto release; + } + + /* Single Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.scc += phy_data; + + /* Excessive Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); + if (!ret_val) + adapter->stats.ecol += phy_data; + + /* Multiple Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.mcc += phy_data; + + /* Late Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); + if (!ret_val) + adapter->stats.latecol += phy_data; + + /* Collision Count - also used for adaptive IFS */ + hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); + if (!ret_val) + hw->mac.collision_delta = phy_data; + + /* Defer Count */ + hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.dc += phy_data; + + /* Transmit with no CRS */ + hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); + if (!ret_val) + adapter->stats.tncrs += phy_data; + +release: + hw->phy.ops.release(hw); +} + +/** + * e1000e_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +static void e1000e_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorc += er32(GORCL); + er32(GORCH); /* Clear gorc */ + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.mpc += er32(MPC); + + /* Half-duplex statistics */ + if (adapter->link_duplex == HALF_DUPLEX) { + if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { + e1000e_update_phy_stats(adapter); + } else { + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + + hw->mac.collision_delta = er32(COLC); + + if ((hw->mac.type != e1000_82574) && + (hw->mac.type != e1000_82583)) + adapter->stats.tncrs += er32(TNCRS); + } + adapter->stats.colc += hw->mac.collision_delta; + } + + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotc += er32(GOTCL); + er32(GOTCH); /* Clear gotc */ + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->mac.tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->mac.tx_packet_delta; + + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; + netdev->stats.rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + + /* Correctable ECC Errors */ + if (hw->mac.type == e1000_pch_lpt) { + u32 pbeccsts = er32(PBECCSTS); + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + } +} + +/** + * e1000_phy_read_status - Update the PHY register status snapshot + * @adapter: board private structure + **/ +static void e1000_phy_read_status(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_phy_regs *phy = &adapter->phy_regs; + + if ((er32(STATUS) & E1000_STATUS_LU) && + (adapter->hw.phy.media_type == e1000_media_type_copper)) { + int ret_val; + + pm_runtime_get_sync(&adapter->pdev->dev); + ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); + ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); + ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); + ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); + ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); + ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); + ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); + ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); + if (ret_val) + e_warn("Error reading PHY register\n"); + pm_runtime_put_sync(&adapter->pdev->dev); + } else { + /* Do not read PHY registers if link is not up + * Set values to typical power-on defaults + */ + phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); + phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | + BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | + BMSR_ERCAP); + phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | + ADVERTISE_ALL | ADVERTISE_CSMA); + phy->lpa = 0; + phy->expansion = EXPANSION_ENABLENPAGE; + phy->ctrl1000 = ADVERTISE_1000FULL; + phy->stat1000 = 0; + phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); + } +} + +static void e1000_print_link_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + + /* Link status message must follow this format for user tools */ + pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->netdev->name, adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", + (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : + (ctrl & E1000_CTRL_RFCE) ? "Rx" : + (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"); +} + +static bool e1000e_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + s32 ret_val = 0; + + /* get_link_status is set on LSC (link status) interrupt or + * Rx sequence error interrupt. get_link_status will stay + * false until the check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (hw->mac.get_link_status) { + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + ret_val = hw->mac.ops.check_for_link(hw); + link_active = adapter->hw.mac.serdes_has_link; + break; + default: + case e1000_media_type_unknown: + break; + } + + if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && + (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { + /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ + e_info("Gigabit has been disabled, downgrading speed\n"); + } + + return link_active; +} + +static void e1000e_enable_receives(struct e1000_adapter *adapter) +{ + /* make sure the receive unit is started */ + if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && + (adapter->flags & FLAG_RESTART_NOW)) { + struct e1000_hw *hw = &adapter->hw; + u32 rctl = er32(RCTL); + ew32(RCTL, rctl | E1000_RCTL_EN); + adapter->flags &= ~FLAG_RESTART_NOW; + } +} + +static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* With 82574 controllers, PHY needs to be checked periodically + * for hung state and reset, if two calls return true + */ + if (e1000_check_phy_82574(hw)) + adapter->phy_hang_count++; + else + adapter->phy_hang_count = 0; + + if (adapter->phy_hang_count > 1) { + adapter->phy_hang_count = 0; + schedule_work(&adapter->reset_task); + } +} + +/** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void e1000_watchdog(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *)data; + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); + + /* TODO: make this use queue_delayed_work() */ +} + +static void e1000_watchdog_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_phy_info *phy = &adapter->hw.phy; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_hw *hw = &adapter->hw; + u32 link, tctl; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + link = e1000e_has_link(adapter); + if ((netif_carrier_ok(netdev)) && link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + e1000e_enable_receives(adapter); + goto link_up; + } + + if ((e1000e_enable_tx_pkt_filtering(hw)) && + (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) + e1000_update_mng_vlan(adapter); + + if (link) { + if (!netif_carrier_ok(netdev)) { + bool txb2b = true; + + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + /* update snapshot of PHY registers on LSC */ + e1000_phy_read_status(adapter); + mac->ops.get_link_up_info(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + e1000_print_link_info(adapter); + + /* check if SmartSpeed worked */ + e1000e_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, + "Link Speed was downgraded by SmartSpeed\n"); + + /* On supported PHYs, check for duplex mismatch only + * if link has autonegotiated at 10/100 half + */ + if ((hw->phy.type == e1000_phy_igp_3 || + hw->phy.type == e1000_phy_bm) && + (hw->mac.autoneg == true) && + (adapter->link_speed == SPEED_10 || + adapter->link_speed == SPEED_100) && + (adapter->link_duplex == HALF_DUPLEX)) { + u16 autoneg_exp; + + e1e_rphy(hw, MII_EXPANSION, &autoneg_exp); + + if (!(autoneg_exp & EXPANSION_NWAY)) + e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); + } + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = false; + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + txb2b = false; + adapter->tx_timeout_factor = 10; + break; + } + + /* workaround: re-program speed mode bit after + * link-up event + */ + if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && + !txb2b) { + u32 tarc0; + tarc0 = er32(TARC(0)); + tarc0 &= ~SPEED_MODE_BIT; + ew32(TARC(0), tarc0); + } + + /* disable TSO for pcie and 10/100 speeds, to avoid + * some hardware issues + */ + if (!(adapter->flags & FLAG_TSO_FORCE)) { + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + e_info("10/100 speed: disabling TSO\n"); + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + break; + case SPEED_1000: + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + break; + default: + /* oops */ + break; + } + } + + /* enable transmits in the hardware, need to do this + * after setting TARC(0) + */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + /* Perform any post-link-up configuration before + * reporting link up. + */ + if (phy->ops.cfg_on_link_up) + phy->ops.cfg_on_link_up(hw); + + netif_carrier_on(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + /* Link status message must follow this format */ + pr_info("%s NIC Link is Down\n", adapter->netdev->name); + netif_carrier_off(netdev); + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* The link is lost so the controller stops DMA. + * If there is queued Tx work that cannot be done + * or if on an 8000ES2LAN which requires a Rx packet + * buffer work-around on link down event, reset the + * controller to flush the Tx/Rx packet buffers. + * (Do the reset outside of interrupt context). + */ + if ((adapter->flags & FLAG_RX_NEEDS_RESTART) || + (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) + adapter->flags |= FLAG_RESTART_NOW; + else + pm_schedule_suspend(netdev->dev.parent, + LINK_TIMEOUT); + } + } + +link_up: + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + + mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + mac->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorc = adapter->stats.gorc - adapter->gorc_old; + adapter->gorc_old = adapter->stats.gorc; + adapter->gotc = adapter->stats.gotc - adapter->gotc_old; + adapter->gotc_old = adapter->stats.gotc; + spin_unlock(&adapter->stats64_lock); + + if (adapter->flags & FLAG_RESTART_NOW) { + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + + e1000e_update_adaptive(&adapter->hw); + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotc + adapter->gorc) / 10000; + u32 dif = (adapter->gotc > adapter->gorc ? + adapter->gotc - adapter->gorc : + adapter->gorc - adapter->gotc) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + e1000e_write_itr(adapter, itr); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->msix_entries) + ew32(ICS, adapter->rx_ring->ims_val); + else + ew32(ICS, E1000_ICS_RXDMT0); + + /* flush pending descriptors to memory before detecting Tx hang */ + e1000e_flush_descriptors(adapter); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* With 82571 controllers, LAA may be overwritten due to controller + * reset from the other port. Set the appropriate LAA in RAR[0] + */ + if (e1000e_get_laa_state_82571(hw)) + hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); + + if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) + e1000e_check_82574_phy_workaround(adapter); + + /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */ + if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { + if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && + (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) { + er32(RXSTMPH); + adapter->rx_hwtstamp_cleared++; + } else { + adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; + } + } + + /* Reset the timer */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_HWTSTAMP 0x00000020 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) +{ + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + + if (!skb_is_gso(skb)) + return 0; + + if (skb_header_cloned(skb)) { + int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); + + if (err) + return err; + } + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (skb->protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return 1; +} + +static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + __be16 protocol; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return 0; + + if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) + protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; + else + protocol = skb->protocol; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn("checksum_partial proto=%x!\n", + be16_to_cpu(protocol)); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return 1; +} + +static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, + unsigned int first, unsigned int max_per_txd, + unsigned int nr_frags) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + buffer_info->mapped_as_page = false; + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + + len -= size; + offset += size; + count++; + + if (len) { + i++; + if (i == tx_ring->count) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = skb_frag_size(frag); + offset = 0; + + while (len) { + i++; + if (i == tx_ring->count) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, + DMA_TO_DEVICE); + buffer_info->mapped_as_page = true; + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ? : 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "Tx DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_put_txbuf(tx_ring, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (tx_flags & E1000_TX_FLAGS_TSO) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (tx_flags & E1000_TX_FLAGS_IPV4) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_CSUM) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_VLAN) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_EXTCMD_TSTAMP; + } + + i = tx_ring->next_to_use; + + do { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = cpu_to_le32(txd_lower | + buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + + i++; + if (i == tx_ring->count) + i = 0; + } while (--count > 0); + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; + + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_tdt_wa(tx_ring, i); + else + writel(i, tx_ring->tail); + + /* we need this if more than one processor can write to our tail + * at a time, it synchronizes IO on IA64/Altix systems + */ + mmiowb(); +} + +#define MINIMUM_DHCP_PACKET_SIZE 282 +static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 length, offset; + + if (vlan_tx_tag_present(skb) && + !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && + (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) + return 0; + + if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) + return 0; + + if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) + return 0; + + { + const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); + struct udphdr *udp; + + if (ip->protocol != IPPROTO_UDP) + return 0; + + udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); + if (ntohs(udp->dest) != 67) + return 0; + + offset = (u8 *)udp + 8 - skb->data; + length = skb->len - offset; + return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); + } + + return 0; +} + +static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + + netif_stop_queue(adapter->netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (e1000_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(adapter->netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) +{ + BUG_ON(size > tx_ring->count); + + if (e1000_desc_unused(tx_ring) >= size) + return 0; + return __e1000_maybe_stop_tx(tx_ring, size); +} + +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int first; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + + if (test_bit(__E1000_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* The minimum packet size with TCTL.PSP set is 17 bytes so + * pad skb in order to meet this minimum size requirement + */ + if (unlikely(skb->len < 17)) { + if (skb_pad(skb, 17 - skb->len)) + return NETDEV_TX_OK; + skb->len = 17; + skb_set_tail_pointer(skb, 17); + } + + mss = skb_shinfo(skb)->gso_size; + if (mss) { + u8 hdr_len; + + /* TSO Workaround for 82571/2/3 Controllers -- if skb->data + * points to just header, pull a few bytes of payload from + * frags into skb->data + */ + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + /* we do this workaround for ES2LAN, but it is un-necessary, + * avoiding it could save a lot of cycles + */ + if (skb->data_len && (hdr_len == len)) { + unsigned int pull_size; + + pull_size = min_t(unsigned int, 4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err("__pskb_pull_tail failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), + adapter->tx_fifo_limit); + + if (adapter->hw.mac.tx_pkt_filtering) + e1000_transfer_dhcp_info(adapter, skb); + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (e1000_maybe_stop_tx(tx_ring, count + 2)) + return NETDEV_TX_BUSY; + + if (vlan_tx_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(tx_ring, skb); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= E1000_TX_FLAGS_TSO; + else if (e1000_tx_csum(tx_ring, skb)) + tx_flags |= E1000_TX_FLAGS_CSUM; + + /* Old method was to assume IPv4 packet by default if TSO was enabled. + * 82571 hardware supports TSO capabilities for IPv6 as well... + * no longer assume, we must. + */ + if (skb->protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + /* if count is 0 then mapping error has occurred */ + count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, + nr_frags); + if (count) { + if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + !adapter->tx_hwtstamp_skb)) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= E1000_TX_FLAGS_HWTSTAMP; + adapter->tx_hwtstamp_skb = skb_get(skb); + schedule_work(&adapter->tx_hwtstamp_work); + } else { + skb_tx_timestamp(skb); + } + + netdev_sent_queue(netdev, skb->len); + e1000_tx_queue(tx_ring, tx_flags, count); + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(tx_ring, + (MAX_SKB_FRAGS * + DIV_ROUND_UP(PAGE_SIZE, + adapter->tx_fifo_limit) + 2)); + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void e1000_tx_timeout(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter; + adapter = container_of(work, struct e1000_adapter, reset_task); + + /* don't run the task if already down */ + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + if (!(adapter->flags & FLAG_RESTART_NOW)) { + e1000e_dump(adapter); + e_err("Reset adapter unexpectedly\n"); + } + e1000e_reinit_locked(adapter); +} + +/** + * e1000_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + * Returns the address of the device statistics structure. + **/ +struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + memset(stats, 0, sizeof(struct rtnl_link_stats64)); + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + /* Fill out the OS statistics structure */ + stats->rx_bytes = adapter->stats.gorc; + stats->rx_packets = adapter->stats.gprc; + stats->tx_bytes = adapter->stats.gotc; + stats->tx_packets = adapter->stats.gptc; + stats->multicast = adapter->stats.mprc; + stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; + stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; + stats->rx_crc_errors = adapter->stats.crcerrs; + stats->rx_frame_errors = adapter->stats.algnerrc; + stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; + stats->tx_aborted_errors = adapter->stats.ecol; + stats->tx_window_errors = adapter->stats.latecol; + stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + spin_unlock(&adapter->stats64_lock); + return stats; +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Jumbo frame support */ + if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && + !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { + e_err("Jumbo Frames not supported.\n"); + return -EINVAL; + } + + /* Supported frame sizes */ + if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) || + (max_frame > adapter->max_hw_frame_size)) { + e_err("Unsupported MTU setting\n"); + return -EINVAL; + } + + /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ + if ((adapter->hw.mac.type >= e1000_pch2lan) && + !(adapter->flags2 & FLAG2_CRC_STRIPPING) && + (new_mtu > ETH_DATA_LEN)) { + e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ + adapter->max_frame_size = max_frame; + e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + if (netif_running(netdev)) + e1000e_down(adapter); + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * However with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= 2048) + adapter->rx_buffer_len = 2048; + else + adapter->rx_buffer_len = 4096; + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || + (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN)) + adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + + ETH_FCS_LEN; + + if (netif_running(netdev)) + e1000e_up(adapter); + else + e1000e_reset(adapter); + + clear_bit(__E1000_RESETTING, &adapter->state); + + return 0; +} + +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (adapter->hw.phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + e1000_phy_read_status(adapter); + + switch (data->reg_num & 0x1F) { + case MII_BMCR: + data->val_out = adapter->phy_regs.bmcr; + break; + case MII_BMSR: + data->val_out = adapter->phy_regs.bmsr; + break; + case MII_PHYSID1: + data->val_out = (adapter->hw.phy.id >> 16); + break; + case MII_PHYSID2: + data->val_out = (adapter->hw.phy.id & 0xFFFF); + break; + case MII_ADVERTISE: + data->val_out = adapter->phy_regs.advertise; + break; + case MII_LPA: + data->val_out = adapter->phy_regs.lpa; + break; + case MII_EXPANSION: + data->val_out = adapter->phy_regs.expansion; + break; + case MII_CTRL1000: + data->val_out = adapter->phy_regs.ctrl1000; + break; + case MII_STAT1000: + data->val_out = adapter->phy_regs.stat1000; + break; + case MII_ESTATUS: + data->val_out = adapter->phy_regs.estatus; + break; + default: + return -EIO; + } + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * e1000e_hwtstamp_ioctl - control hardware time stamping + * @netdev: network interface device structure + * @ifreq: interface request + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware filters. + * Not all combinations are supported, in particular event type has to be + * specified. Matching the kind of event packet is not supported, with the + * exception of "all V2 events regardless of level 2 or 4". + **/ +static int e1000e_hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int ret_val; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + adapter->hwtstamp_config = config; + + ret_val = e1000e_config_hwtstamp(adapter); + if (ret_val) + return ret_val; + + config = adapter->hwtstamp_config; + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* With V2 type filters which specify a Sync or Delay Request, + * Path Delay Request/Response messages are also time stamped + * by hardware so notify the caller the requested packets plus + * some others are time stamped. + */ + config.rx_filter = HWTSTAMP_FILTER_SOME; + break; + default: + break; + } + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + case SIOCSHWTSTAMP: + return e1000e_hwtstamp_ioctl(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) +{ + struct e1000_hw *hw = &adapter->hw; + u32 i, mac_reg; + u16 phy_reg, wuc_enable; + int retval; + + /* copy MAC RARs to PHY RARs */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + retval = hw->phy.ops.acquire(hw); + if (retval) { + e_err("Could not acquire PHY\n"); + return retval; + } + + /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ + retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); + if (retval) + goto release; + + /* copy MAC MTA to PHY MTA - only needed for pchlan */ + for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { + mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); + hw->phy.ops.write_reg_page(hw, BM_MTA(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, + (u16)((mac_reg >> 16) & 0xFFFF)); + } + + /* configure PHY Rx Control register */ + hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); + mac_reg = er32(RCTL); + if (mac_reg & E1000_RCTL_UPE) + phy_reg |= BM_RCTL_UPE; + if (mac_reg & E1000_RCTL_MPE) + phy_reg |= BM_RCTL_MPE; + phy_reg &= ~(BM_RCTL_MO_MASK); + if (mac_reg & E1000_RCTL_MO_3) + phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) + << BM_RCTL_MO_SHIFT); + if (mac_reg & E1000_RCTL_BAM) + phy_reg |= BM_RCTL_BAM; + if (mac_reg & E1000_RCTL_PMCF) + phy_reg |= BM_RCTL_PMCF; + mac_reg = er32(CTRL); + if (mac_reg & E1000_CTRL_RFCE) + phy_reg |= BM_RCTL_RFCE; + hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); + + /* enable PHY wakeup in MAC register */ + ew32(WUFC, wufc); + ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN); + + /* configure and enable PHY wakeup in PHY registers */ + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN); + + /* activate PHY wakeup */ + wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; + retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); + if (retval) + e_err("Could not set PHY Host Wakeup bit\n"); +release: + hw->phy.ops.release(hw); + + return retval; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + /* Runtime suspend should only enable wakeup for link changes */ + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + int retval = 0; + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->state) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + e1000e_down(adapter); + e1000_free_irq(adapter); + } + e1000e_reset_interrupt_capability(adapter); + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000e_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = er32(RCTL); + rctl |= E1000_RCTL_MPE; + ew32(RCTL, rctl); + } + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_ADVD3WUC; + if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) + ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + + if (adapter->hw.phy.media_type == e1000_media_type_fiber || + adapter->hw.phy.media_type == + e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + if (adapter->flags & FLAG_IS_ICH) + e1000_suspend_workarounds_ich8lan(&adapter->hw); + + /* Allow time for pending master requests to run */ + e1000e_disable_pcie_master(&adapter->hw); + + if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { + /* enable wakeup by the PHY */ + retval = e1000_init_phy_wakeup(adapter, wufc); + if (retval) + return retval; + } else { + /* enable wakeup by the MAC */ + ew32(WUFC, wufc); + ew32(WUC, E1000_WUC_PME_EN); + } + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + } + + if (adapter->hw.phy.type == e1000_phy_igp_3) + e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + e1000e_release_hw_control(adapter); + + /* The pci-e switch on some quad port adapters will report a + * correctable error when the MAC transitions from D0 to D3. To + * prevent this we need to mask off the correctable errors on the + * downstream port of the pci-e switch. + */ + if (adapter->flags & FLAG_IS_QUAD_PORT) { + struct pci_dev *us_dev = pdev->bus->self; + u16 devctl; + + pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); + pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, + (devctl & ~PCI_EXP_DEVCTL_CERE)); + + pci_save_state(pdev); + pci_prepare_to_sleep(pdev); + + pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); + } + + return 0; +} + +#ifdef CONFIG_PCIEASPM +static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + pci_disable_link_state_locked(pdev, state); +} +#else +static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + u16 aspm_ctl = 0; + + if (state & PCIE_LINK_STATE_L0S) + aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L0S; + if (state & PCIE_LINK_STATE_L1) + aspm_ctl |= PCI_EXP_LNKCTL_ASPM_L1; + + /* Both device and parent should have the same ASPM setting. + * Disable ASPM in downstream component first and then upstream. + */ + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_ctl); + + if (pdev->bus->self) + pcie_capability_clear_word(pdev->bus->self, PCI_EXP_LNKCTL, + aspm_ctl); +} +#endif +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + dev_info(&pdev->dev, "Disabling ASPM %s %s\n", + (state & PCIE_LINK_STATE_L0S) ? "L0s" : "", + (state & PCIE_LINK_STATE_L1) ? "L1" : ""); + + __e1000e_disable_aspm(pdev, state); +} + +#ifdef CONFIG_PM +static bool e1000e_pm_ready(struct e1000_adapter *adapter) +{ + return !!adapter->tx_ring->buffer_info; +} + +static int __e1000_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 aspm_disable_flag = 0; + u32 err; + + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + pci_set_master(pdev); + + e1000e_set_interrupt_capability(adapter); + if (netif_running(netdev)) { + err = e1000_request_irq(adapter); + if (err) + return err; + } + + if (hw->mac.type >= e1000_pch2lan) + e1000_resume_workarounds_pchlan(&adapter->hw); + + e1000e_power_up_phy(adapter); + + /* report the system wakeup cause from S3/S4 */ + if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { + u16 phy_data; + + e1e_rphy(&adapter->hw, BM_WUS, &phy_data); + if (phy_data) { + e_info("PHY Wakeup cause - %s\n", + phy_data & E1000_WUS_EX ? "Unicast Packet" : + phy_data & E1000_WUS_MC ? "Multicast Packet" : + phy_data & E1000_WUS_BC ? "Broadcast Packet" : + phy_data & E1000_WUS_MAG ? "Magic Packet" : + phy_data & E1000_WUS_LNKC ? + "Link Status Change" : "other"); + } + e1e_wphy(&adapter->hw, BM_WUS, ~0); + } else { + u32 wus = er32(WUS); + if (wus) { + e_info("MAC Wakeup cause - %s\n", + wus & E1000_WUS_EX ? "Unicast Packet" : + wus & E1000_WUS_MC ? "Multicast Packet" : + wus & E1000_WUS_BC ? "Broadcast Packet" : + wus & E1000_WUS_MAG ? "Magic Packet" : + wus & E1000_WUS_LNKC ? "Link Status Change" : + "other"); + } + ew32(WUS, ~0); + } + + e1000e_reset(adapter); + + e1000_init_manageability_pt(adapter); + + if (netif_running(netdev)) + e1000e_up(adapter); + + netif_device_attach(netdev); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int e1000_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + return __e1000_shutdown(pdev, false); +} + +static int e1000_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (e1000e_pm_ready(adapter)) + adapter->idle_check = true; + + return __e1000_resume(pdev); +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM_RUNTIME +static int e1000_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!e1000e_pm_ready(adapter)) + return 0; + + return __e1000_shutdown(pdev, true); +} + +static int e1000_idle(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!e1000e_pm_ready(adapter)) + return 0; + + if (adapter->idle_check) { + adapter->idle_check = false; + if (!e1000e_has_link(adapter)) + pm_schedule_suspend(dev, MSEC_PER_SEC); + } + + return -EBUSY; +} + +static int e1000_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!e1000e_pm_ready(adapter)) + return 0; + + adapter->idle_check = !dev->power.runtime_auto; + return __e1000_resume(pdev); +} +#endif /* CONFIG_PM_RUNTIME */ +#endif /* CONFIG_PM */ + +static void e1000_shutdown(struct pci_dev *pdev) +{ + __e1000_shutdown(pdev, false); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->msix_entries) { + int vector, msix_irq; + + vector = 0; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_intr_msix_rx(msix_irq, netdev); + enable_irq(msix_irq); + + vector++; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_intr_msix_tx(msix_irq, netdev); + enable_irq(msix_irq); + + vector++; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_msix_other(msix_irq, netdev); + enable_irq(msix_irq); + } + + return IRQ_HANDLED; +} + +/** + * e1000_netpoll + * @netdev: network interface device structure + * + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + switch (adapter->int_mode) { + case E1000E_INT_MODE_MSIX: + e1000_intr_msix(adapter->pdev->irq, netdev); + break; + case E1000E_INT_MODE_MSI: + disable_irq(adapter->pdev->irq); + e1000_intr_msi(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); + break; + default: /* E1000E_INT_MODE_LEGACY */ + disable_irq(adapter->pdev->irq); + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); + break; + } +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000e_down(adapter); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 aspm_disable_flag = 0; + int err; + pci_ers_result_t result; + + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pdev->state_saved = true; + pci_restore_state(pdev); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000e_reset(adapter); + ew32(WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + + return result; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability_pt(adapter); + + if (netif_running(netdev)) { + if (e1000e_up(adapter)) { + dev_err(&pdev->dev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); +} + +static void e1000_print_device_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 ret_val; + u8 pba_str[E1000_PBANUM_LENGTH]; + + /* print bus type/speed/width info */ + e_info("(PCI Express:2.5GT/s:%s) %pM\n", + /* bus width */ + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : + "Width x1"), + /* MAC address */ + netdev->dev_addr); + e_info("Intel(R) PRO/%s Network Connection\n", + (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); + ret_val = e1000_read_pba_string_generic(hw, pba_str, + E1000_PBANUM_LENGTH); + if (ret_val) + strlcpy((char *)pba_str, "Unknown", sizeof(pba_str)); + e_info("MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, pba_str); +} + +static void e1000_eeprom_checks(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int ret_val; + u16 buf = 0; + + if (hw->mac.type != e1000_82573) + return; + + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); + le16_to_cpus(&buf); + if (!ret_val && (!(buf & (1 << 0)))) { + /* Deep Smart Power Down (DSPD) */ + dev_warn(&adapter->pdev->dev, + "Warning: detected DSPD enabled in EEPROM\n"); + } +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) + adapter->flags |= FLAG_TSO_FORCE; + + if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | + NETIF_F_RXALL))) + return 0; + + if (changed & NETIF_F_RXFCS) { + if (features & NETIF_F_RXFCS) { + adapter->flags2 &= ~FLAG2_CRC_STRIPPING; + } else { + /* We need to take it back to defaults, which might mean + * stripping is still disabled at the adapter level. + */ + if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) + adapter->flags2 |= FLAG2_CRC_STRIPPING; + else + adapter->flags2 &= ~FLAG2_CRC_STRIPPING; + } + } + + netdev->features = features; + + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + + return 0; +} + +static const struct net_device_ops e1000e_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_get_stats64 = e1000e_get_stats64, + .ndo_set_rx_mode = e1000e_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_change_mtu = e1000_change_mtu, + .ndo_do_ioctl = e1000_ioctl, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_set_features = e1000_set_features, +}; + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter; + struct e1000_hw *hw; + const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; + resource_size_t mmio_start, mmio_len; + resource_size_t flash_start, flash_len; + static int cards_found; + u16 aspm_disable_flag = 0; + int bars, i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + + if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) + pci_using_dac = 1; + } else { + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + err = dma_set_coherent_mask(&pdev->dev, + DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + } + + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_request_selected_regions_exclusive(pdev, bars, + e1000e_driver_name); + if (err) + goto err_pci_reg; + + /* AER (Advanced Error Reporting) hooks */ + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + /* PCI config space info */ + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + netdev->irq = pdev->irq; + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + hw = &adapter->hw; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ei = ei; + adapter->pba = ei->pba; + adapter->flags = ei->flags; + adapter->flags2 = ei->flags2; + adapter->hw.adapter = adapter; + adapter->hw.mac.type = ei->mac; + adapter->max_hw_frame_size = ei->max_hw_frame_size; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + + err = -EIO; + adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); + if (!adapter->hw.hw_addr) + goto err_ioremap; + + if ((adapter->flags & FLAG_HAS_FLASH) && + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { + flash_start = pci_resource_start(pdev, 1); + flash_len = pci_resource_len(pdev, 1); + adapter->hw.flash_address = ioremap(flash_start, flash_len); + if (!adapter->hw.flash_address) + goto err_flashmap; + } + + /* Set default EEE advertisement */ + if (adapter->flags2 & FLAG2_HAS_EEE) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + + /* construct the net_device struct */ + netdev->netdev_ops = &e1000e_netdev_ops; + e1000e_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); + strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + + adapter->bd_number = cards_found++; + + e1000e_check_options(adapter); + + /* setup adapter struct */ + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + err = ei->get_variants(adapter); + if (err) + goto err_hw_init; + + if ((adapter->flags & FLAG_IS_ICH) && + (adapter->flags & FLAG_READ_ONLY_NVM)) + e1000e_write_protect_nvm_ich8lan(&adapter->hw); + + hw->mac.ops.get_bus_info(&adapter->hw); + + adapter->hw.phy.autoneg_wait_to_complete = 0; + + /* Copper options */ + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + adapter->hw.phy.mdix = AUTO_ALL_MODES; + adapter->hw.phy.disable_polarity_correction = 0; + adapter->hw.phy.ms_type = e1000_ms_hw_default; + } + + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) + dev_info(&pdev->dev, + "PHY reset is blocked due to SOL/IDER session.\n"); + + /* Set initial default active device features */ + netdev->features = (NETIF_F_SG | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM); + + /* Set user-changeable features (subset of all device features) */ + netdev->hw_features = netdev->features; + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->vlan_features |= (NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_HW_CSUM); + + netdev->priv_flags |= IFF_UNICAST_FLT; + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (e1000e_enable_mng_pass_thru(&adapter->hw)) + adapter->flags |= FLAG_MNG_PT_ENABLED; + + /* before reading the NVM, reset the controller to + * put the device in a known good starting state + */ + adapter->hw.mac.ops.reset_hw(&adapter->hw); + + /* systems with ASPM and others may see the checksum fail on the first + * attempt. Let's give it a few tries + */ + for (i = 0;; i++) { + if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) + break; + if (i == 2) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + e1000_eeprom_checks(adapter); + + /* copy the MAC address */ + if (e1000e_read_mac_addr(&adapter->hw)) + dev_err(&pdev->dev, + "NVM Read Error while reading MAC address\n"); + + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", + netdev->dev_addr); + err = -EIO; + goto err_eeprom; + } + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = e1000_watchdog; + adapter->watchdog_timer.data = (unsigned long)adapter; + + init_timer(&adapter->phy_info_timer); + adapter->phy_info_timer.function = e1000_update_phy_info; + adapter->phy_info_timer.data = (unsigned long)adapter; + + INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); + INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); + INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); + INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); + + /* Initialize link parameters. User can change them with ethtool */ + adapter->hw.mac.autoneg = 1; + adapter->fc_autoneg = true; + adapter->hw.fc.requested_mode = e1000_fc_default; + adapter->hw.fc.current_mode = e1000_fc_default; + adapter->hw.phy.autoneg_advertised = 0x2f; + + /* ring size defaults */ + adapter->rx_ring->count = E1000_DEFAULT_RXD; + adapter->tx_ring->count = E1000_DEFAULT_TXD; + + /* Initial Wake on LAN setting - If APM wake is enabled in + * the EEPROM, enable the ACPI Magic Packet filter + */ + if (adapter->flags & FLAG_APME_IN_WUC) { + /* APME bit in EEPROM is mapped to WUC.APME */ + eeprom_data = er32(WUC); + eeprom_apme_mask = E1000_WUC_APME; + if ((hw->mac.type > e1000_ich10lan) && + (eeprom_data & E1000_WUC_PHY_WAKE)) + adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; + } else if (adapter->flags & FLAG_APME_IN_CTRL3) { + if (adapter->flags & FLAG_APME_CHECK_PORT_B && + (adapter->hw.bus.func == 1)) + e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B, + 1, &eeprom_data); + else + e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A, + 1, &eeprom_data); + } + + /* fetch WoL from EEPROM */ + if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + if (!(adapter->flags & FLAG_HAS_WOL)) + adapter->eeprom_wol = 0; + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || + (hw->mac.ops.check_mng_mode(hw))) + device_wakeup_enable(&pdev->dev); + + /* save off EEPROM version number */ + e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); + + /* reset the hardware with the new settings */ + e1000e_reset(adapter); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + + strlcpy(netdev->name, "eth%d", sizeof(netdev->name)); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + /* init PTP hardware clock */ + e1000e_ptp_init(adapter); + + e1000_print_device_info(adapter); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_release_hw_control(adapter); +err_eeprom: + if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) + e1000_phy_hw_reset(&adapter->hw); +err_hw_init: + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_sw_init: + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + e1000e_reset_interrupt_capability(adapter); +err_flashmap: + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + bool down = test_bit(__E1000_DOWN, &adapter->state); + + e1000e_ptp_remove(adapter); + + /* The timers may be rescheduled, so explicitly disable them + * from being rescheduled. + */ + if (!down) + set_bit(__E1000_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + cancel_work_sync(&adapter->downshift_task); + cancel_work_sync(&adapter->update_phy_task); + cancel_work_sync(&adapter->print_hang_task); + + if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { + cancel_work_sync(&adapter->tx_hwtstamp_work); + if (adapter->tx_hwtstamp_skb) { + dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + adapter->tx_hwtstamp_skb = NULL; + } + } + + if (!(netdev->flags & IFF_UP)) + e1000_power_down_phy(adapter); + + /* Don't lie to e1000_close() down the road. */ + if (!down) + clear_bit(__E1000_DOWN, &adapter->state); + unregister_netdev(netdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + e1000e_release_hw_control(adapter); + + e1000e_reset_interrupt_capability(adapter); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(adapter->hw.hw_addr); + if (adapter->hw.flash_address) + iounmap(adapter->hw.flash_address); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + free_netdev(netdev); + + /* AER disable */ + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/* PCI Error Recovery (ERS) */ +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), + board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), + board_80003es2lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, + + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +#ifdef CONFIG_PM +static const struct dev_pm_ops e1000_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume) + SET_RUNTIME_PM_OPS(e1000_runtime_suspend, e1000_runtime_resume, + e1000_idle) +}; +#endif + +/* PCI Device API Driver */ +static struct pci_driver e1000_driver = { + .name = e1000e_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, +#ifdef CONFIG_PM + .driver = { + .pm = &e1000_pm_ops, + }, +#endif + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + pr_info("Intel(R) PRO/1000 Network Driver - %s\n", + e1000e_driver_version); + pr_info("Copyright(c) 1999 - 2013 Intel Corporation.\n"); + ret = pci_register_driver(&e1000_driver); + + return ret; +} +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} +module_exit(e1000_exit_module); + + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* netdev.c */ diff --git a/addons/e1000e/src/3.10.108/nvm.c b/addons/e1000e/src/3.10.108/nvm.c new file mode 100644 index 00000000..44ddc0a0 --- /dev/null +++ b/addons/e1000e/src/3.10.108/nvm.c @@ -0,0 +1,638 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u32 mask; + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + e1e_flush(); + + udelay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = er32(EECD); + + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = er32(EERD); + else + reg = er32(EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) + return 0; + + udelay(5); + } + + return -E1000_ERR_NVM; +} + +/** + * e1000e_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000e_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = er32(EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + + ew32(EECD, eecd | E1000_EECD_REQ); + eecd = er32(EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = er32(EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000e_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000e_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = er32(EECD); + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u8 spi_stat_reg; + + if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + e1e_flush(); + udelay(1); + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + e_dbg("SPI NVM Status error\n"); + return -E1000_ERR_NVM; + } + } + + return 0; +} + +/** + * e1000e_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + ew32(EERD, eerd); + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) + break; + + data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); + } + + return ret_val; +} + +/** + * e1000e_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + usleep_range(10000, 20000); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + if (pba_num == NULL) { + e_dbg("PBA string buffer was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + e_dbg("NVM PBA number is not stored as string\n"); + + /* make sure callers buffer is big enough to store the PBA */ + if (pba_num_size < E1000_PBANUM_LENGTH) { + e_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return 0; + } + + ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + e_dbg("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + e_dbg("PBA string buffer too small\n"); + return -E1000_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return 0; +} + +/** + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = er32(RAH(0)); + rar_low = er32(RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + e_dbg("NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + e_dbg("NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * e1000e_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000e_reload_nvm_generic(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + usleep_range(10, 20); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} diff --git a/addons/e1000e/src/3.10.108/nvm.h b/addons/e1000e/src/3.10.108/nvm.h new file mode 100644 index 00000000..45fc6956 --- /dev/null +++ b/addons/e1000e/src/3.10.108/nvm.h @@ -0,0 +1,47 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_NVM_H_ +#define _E1000E_NVM_H_ + +s32 e1000e_acquire_nvm(struct e1000_hw *hw); + +s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); +s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); +s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); +void e1000e_release_nvm(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 + +#endif diff --git a/addons/e1000e/src/3.10.108/param.c b/addons/e1000e/src/3.10.108/param.c new file mode 100644 index 00000000..c16bd75b --- /dev/null +++ b/addons/e1000e/src/3.10.108/param.c @@ -0,0 +1,530 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include +#include +#include + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define COPYBREAK_DEFAULT 256 +unsigned int copybreak = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non-zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* IntMode (Interrupt Mode) + * + * Valid Range: varies depending on kernel configuration & hardware support + * + * legacy=0, MSI=1, MSI-X=2 + * + * When MSI/MSI-X support is enabled in kernel- + * Default Value: 2 (MSI-X) when supported by hardware, 1 (MSI) otherwise + * When MSI/MSI-X support is not enabled in kernel- + * Default Value: 0 (legacy) + * + * When a mode is specified that is not allowed/supported, it will be + * demoted to the most advanced interrupt mode available. + */ +E1000_PARAM(IntMode, "Interrupt Mode"); +#define MAX_INTMODE 2 +#define MIN_INTMODE 0 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +/* Enable Kumeran Lock Loss workaround + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); + +/* Write Protect NVM + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(WriteProtectNVM, + "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); + +/* Enable CRC Stripping + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(CrcStripping, + "Enable CRC Stripping, disable if your BMC needs the CRC"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + /* range_option info */ + struct { + int min; + int max; + } r; + /* list_option info */ + struct { + int nr; + struct e1000_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + dev_info(&adapter->pdev->dev, "%s Enabled\n", + opt->name); + return 0; + case OPTION_DISABLED: + dev_info(&adapter->pdev->dev, "%s Disabled\n", + opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + dev_info(&adapter->pdev->dev, "%s set to %i\n", + opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + dev_info(&adapter->pdev->dev, "%s\n", + ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * e1000e_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000e_check_options(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + dev_notice(&adapter->pdev->dev, + "Warning: no configuration for board #%i\n", bd); + dev_notice(&adapter->pdev->dev, + "Using defaults for all values\n"); + } + + /* Transmit Interrupt Delay */ + { + static const struct e1000_option opt = { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY } } + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + /* Transmit Absolute Interrupt Delay */ + { + static const struct e1000_option opt = { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY } } + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + /* Receive Interrupt Delay */ + { + static struct e1000_option opt = { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY } } + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + /* Receive Absolute Interrupt Delay */ + { + static const struct e1000_option opt = { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY } } + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + /* Interrupt Throttling Rate */ + { + static const struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " + __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + + /* Make sure a message is printed for non-special + * values. And in case of an invalid option, display + * warning, use default and go through itr/itr_setting + * adjustment logic below + */ + if ((adapter->itr > 4) && + e1000_validate_option(&adapter->itr, &opt, adapter)) + adapter->itr = opt.def; + } else { + /* If no option specified, use default value and go + * through the logic below to adjust itr/itr_setting + */ + adapter->itr = opt.def; + + /* Make sure a message is printed for non-special + * default values + */ + if (adapter->itr > 4) + dev_info(&adapter->pdev->dev, + "%s set to default %d\n", opt.name, + adapter->itr); + } + + adapter->itr_setting = adapter->itr; + switch (adapter->itr) { + case 0: + dev_info(&adapter->pdev->dev, "%s turned off\n", + opt.name); + break; + case 1: + dev_info(&adapter->pdev->dev, + "%s set to dynamic mode\n", opt.name); + adapter->itr = 20000; + break; + case 3: + dev_info(&adapter->pdev->dev, + "%s set to dynamic conservative mode\n", + opt.name); + adapter->itr = 20000; + break; + case 4: + dev_info(&adapter->pdev->dev, + "%s set to simplified (2000-8000 ints) mode\n", + opt.name); + break; + default: + /* Save the setting, because the dynamic bits + * change itr. + * + * Clear the lower two bits because + * they are used as control. + */ + adapter->itr_setting &= ~3; + break; + } + } + /* Interrupt Mode */ + { + static struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Mode", +#ifndef CONFIG_PCI_MSI + .err = "defaulting to 0 (legacy)", + .def = E1000E_INT_MODE_LEGACY, + .arg = { .r = { .min = 0, + .max = 0 } } +#endif + }; + +#ifdef CONFIG_PCI_MSI + if (adapter->flags & FLAG_HAS_MSIX) { + opt.err = kstrdup("defaulting to 2 (MSI-X)", + GFP_KERNEL); + opt.def = E1000E_INT_MODE_MSIX; + opt.arg.r.max = E1000E_INT_MODE_MSIX; + } else { + opt.err = kstrdup("defaulting to 1 (MSI)", GFP_KERNEL); + opt.def = E1000E_INT_MODE_MSI; + opt.arg.r.max = E1000E_INT_MODE_MSI; + } + + if (!opt.err) { + dev_err(&adapter->pdev->dev, + "Failed to allocate memory\n"); + return; + } +#endif + + if (num_IntMode > bd) { + unsigned int int_mode = IntMode[bd]; + e1000_validate_option(&int_mode, &opt, adapter); + adapter->int_mode = int_mode; + } else { + adapter->int_mode = opt.def; + } + +#ifdef CONFIG_PCI_MSI + kfree(opt.err); +#endif + } + /* Smart Power Down */ + { + static const struct e1000_option opt = { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + e1000_validate_option(&spd, &opt, adapter); + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd) + adapter->flags |= FLAG_SMART_POWER_DOWN; + } + } + /* CRC Stripping */ + { + static const struct e1000_option opt = { + .type = enable_option, + .name = "CRC Stripping", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_CrcStripping > bd) { + unsigned int crc_stripping = CrcStripping[bd]; + e1000_validate_option(&crc_stripping, &opt, adapter); + if (crc_stripping == OPTION_ENABLED) { + adapter->flags2 |= FLAG2_CRC_STRIPPING; + adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; + } + } else { + adapter->flags2 |= FLAG2_CRC_STRIPPING; + adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; + } + } + /* Kumeran Lock Loss Workaround */ + { + static const struct e1000_option opt = { + .type = enable_option, + .name = "Kumeran Lock Loss Workaround", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + bool enabled = opt.def; + + if (num_KumeranLockLoss > bd) { + unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; + e1000_validate_option(&kmrn_lock_loss, &opt, adapter); + enabled = kmrn_lock_loss; + } + + if (hw->mac.type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, + enabled); + } + /* Write-protect NVM */ + { + static const struct e1000_option opt = { + .type = enable_option, + .name = "Write-protect NVM", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (adapter->flags & FLAG_IS_ICH) { + if (num_WriteProtectNVM > bd) { + unsigned int write_protect_nvm = + WriteProtectNVM[bd]; + e1000_validate_option(&write_protect_nvm, &opt, + adapter); + if (write_protect_nvm) + adapter->flags |= FLAG_READ_ONLY_NVM; + } else { + if (opt.def) + adapter->flags |= FLAG_READ_ONLY_NVM; + } + } + } +} diff --git a/addons/e1000e/src/3.10.108/phy.c b/addons/e1000e/src/3.10.108/phy.c new file mode 100644 index 00000000..be4d7c1e --- /dev/null +++ b/addons/e1000e/src/3.10.108/phy.c @@ -0,0 +1,3243 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#include "e1000.h" + +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set); +static u32 e1000_get_phy_addr_for_hv_page(u32 page); +static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED +}; + +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_m88_cable_length_table) + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124 +}; + +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_igp_2_cable_length_table) + +/** + * e1000e_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + manc = er32(MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; +} + +/** + * e1000e_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 e1000e_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + u16 retry_count = 0; + + if (!phy->ops.read_reg) + return 0; + + while (retry_count < 2) { + ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + usleep_range(20, 40); + ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + + if (phy->id != 0 && phy->id != PHY_REVISION_MASK) + return 0; + + retry_count++; + } + + return 0; +} + +/** + * e1000e_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + return ret_val; + + return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0); +} + +/** + * e1000e_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + e_dbg("MDI Read offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + *data = (u16)mdic; + + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + udelay(100); + + return 0; +} + +/** + * e1000e_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + e_dbg("MDI Write offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + udelay(100); + + return 0; +} + +/** + * e1000e_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000e_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_set_page_igp - Set page as on IGP-like PHY(s) + * @hw: pointer to the HW structure + * @page: page to set (shifted left when necessary) + * + * Sets PHY page required for PHY register access. Assumes semaphore is + * already acquired. Note, this function sets phy.addr to 1 so the caller + * must set it appropriately (if necessary) after this function returns. + **/ +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) +{ + e_dbg("Setting page 0x%x\n", page); + + hw->phy.addr = 1; + + return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); +} + +/** + * __e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + s32 ret_val = 0; + + if (!locked) { + if (!hw->phy.ops.acquire) + return 0; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000e_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, true); +} + +/** + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + s32 ret_val = 0; + + if (!locked) { + if (!hw->phy.ops.acquire) + return 0; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000e_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & + offset, data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, true); +} + +/** + * __e1000_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + u32 kmrnctrlsta; + + if (!locked) { + s32 ret_val = 0; + + if (!hw->phy.ops.acquire) + return 0; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + kmrnctrlsta = er32(KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + if (!locked) + hw->phy.ops.release(hw); + + return 0; +} + +/** + * e1000e_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000e_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, true); +} + +/** + * __e1000_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + u32 kmrnctrlsta; + + if (!locked) { + s32 ret_val = 0; + + if (!hw->phy.ops.acquire) + return 0; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + if (!locked) + hw->phy.ops.release(hw); + + return 0; +} + +/** + * e1000e_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000e_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, true); +} + +/** + * e1000_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = e1e_rphy(hw, MII_CTRL1000, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CTL1000_ENABLE_MASTER) ? + ((phy_data & CTL1000_AS_MASTER) ? + e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); + break; + case e1000_ms_force_slave: + phy_data |= CTL1000_ENABLE_MASTER; + phy_data &= ~(CTL1000_AS_MASTER); + break; + case e1000_ms_auto: + phy_data &= ~CTL1000_ENABLE_MASTER; + /* fall-through */ + default: + break; + } + + return e1e_wphy(hw, MII_CTRL1000, phy_data); +} + +/** + * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; + + ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); + if (ret_val) + return ret_val; + + /* Set MDI/MDIX mode */ + ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data); + if (ret_val) + return ret_val; + + return e1000_set_master_slave_mode(hw); +} + +/** + * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* For BM PHY this bit is downshift enable */ + if (phy->type != e1000_phy_bm) + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift on BM (disabled by default) */ + if (phy->type == e1000_phy_bm) { + /* For 82574/82583, first disable then enable downshift */ + if (phy->id == BME1000_E_PHY_ID_R2) { + phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + e_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; + } + + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((phy->type == e1000_phy_m88) && + (phy->revision < E1000_REVISION_4) && + (phy->id != BME1000_E_PHY_ID_R2)) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) { + /* Set PHY page 0, register 29 to 0x0003 */ + ret_val = e1e_wphy(hw, 29, 0x0003); + if (ret_val) + return ret_val; + + /* Set PHY page 0, register 30 to 0x0000 */ + ret_val = e1e_wphy(hw, 30, 0x0000); + if (ret_val) + return ret_val; + } + + /* Commit the changes. */ + if (phy->ops.commit) { + ret_val = phy->ops.commit(hw); + if (ret_val) { + e_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + + if (phy->type == e1000_phy_82578) { + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* 82578 PHY - set the downshift count to 1x. */ + phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; + phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) { + e_dbg("Error resetting the PHY.\n"); + return ret_val; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msleep(100); + + /* disable lplu d0 during driver init */ + if (hw->phy.ops.set_d0_lplu_state) { + ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D0\n"); + return ret_val; + } + } + /* Configure mdi-mdix settings */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = e1e_rphy(hw, MII_CTRL1000, &data); + if (ret_val) + return ret_val; + + data &= ~CTL1000_ENABLE_MASTER; + ret_val = e1e_wphy(hw, MII_CTRL1000, data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_set_master_slave_mode(hw); + } + + return ret_val; +} + +/** + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1e_rphy(hw, MII_CTRL1000, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(ADVERTISE_100FULL | + ADVERTISE_100HALF | + ADVERTISE_10FULL | ADVERTISE_10HALF); + mii_1000t_ctrl_reg &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); + + e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= ADVERTISE_10HALF; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= ADVERTISE_10FULL; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= ADVERTISE_100HALF; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= ADVERTISE_100FULL; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + e_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= ADVERTISE_1000FULL; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (MII_ADVERTISE) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= + ~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000e_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= + (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM; + mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP; + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= + (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_wphy(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = e1e_wphy(hw, MII_CTRL1000, mii_1000t_ctrl_reg); + + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (!phy->autoneg_advertised) + phy->autoneg_advertised = phy->autoneg_mask; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART); + ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->mac.get_link_status = true; + + return ret_val; +} + +/** + * e1000e_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 e1000e_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + e_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, + &link); + if (ret_val) + return ret_val; + + if (link) { + e_dbg("Valid link established!!!\n"); + hw->mac.ops.config_collision_dist(hw); + ret_val = e1000e_config_fc_after_link_up(hw); + } else { + e_dbg("Unable to establish link!!!\n"); + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); + if (ret_val) + return ret_val; + + /* Reset the phy to commit changes. */ + if (hw->phy.ops.commit) { + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + return ret_val; + } + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + if (hw->phy.type != e1000_phy_m88) { + e_dbg("Link taking longer than expected.\n"); + } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + return ret_val; + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + if (hw->phy.type != e1000_phy_m88) + return 0; + + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1e_rphy(hw, MII_BMCR, &data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &data); + + ret_val = e1e_wphy(hw, MII_BMCR, data); + if (ret_val) + return ret_val; + + /* Disable MDI-X support for 10/100 */ + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + return ret_val; + + e_dbg("IFE PMC: %X\n", data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of MII_BMCR + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the MII_BMCR register for these settings to + * take affect. + **/ +void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~BMCR_ANENABLE; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~BMCR_FULLDPLX; + e_dbg("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= BMCR_FULLDPLX; + e_dbg("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= BMCR_SPEED100; + *phy_ctrl &= ~BMCR_SPEED1000; + e_dbg("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl &= ~(BMCR_SPEED1000 | BMCR_SPEED100); + e_dbg("Forcing 10mb\n"); + } + + hw->mac.ops.config_collision_dist(hw); + + ew32(CTRL, ctrl); +} + +/** + * e1000e_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + } + + return ret_val; +} + +/** + * e1000e_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 e1000e_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_m88: + case e1000_phy_gg82563: + case e1000_phy_bm: + case e1000_phy_82578: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + return 0; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = !!(phy_data & mask); + + return ret_val; +} + +/** + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +s32 e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = e1e_rphy(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = ((data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reversal feature being enabled. + **/ +s32 e1000_check_polarity_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + /* Polarity is determined based on the reversal feature being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = ((phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); + if (ret_val) + break; + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); + if (ret_val) + break; + if (phy_status & BMSR_ANEGCOMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * e1000e_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + for (i = 0; i < iterations; i++) { + /* Some PHYs require the MII_BMSR register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); + if (ret_val) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + msleep(usec_interval / 1000); + else + udelay(usec_interval); + } + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); + if (ret_val) + break; + if (phy_status & BMSR_LSTATUS) + break; + if (usec_interval >= 1000) + msleep(usec_interval / 1000); + else + udelay(usec_interval); + } + + *success = (i < iterations); + + return ret_val; +} + +/** + * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return 0; +} + +/** + * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK); + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) + return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0); + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return 0; +} + +/** + * e1000e_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (phy->media_type != e1000_media_type_copper) { + e_dbg("Phy info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy->polarity_correction = !!(phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, MII_STAT1000, &phy_data); + if (ret_val) + return ret_val; + + phy->local_rx = (phy_data & LPA_1000LOCALRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & LPA_1000REMRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000e_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, MII_STAT1000, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & LPA_1000LOCALRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & LPA_1000REMRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_ife - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + **/ +s32 e1000_get_phy_info_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + return ret_val; + phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife(hw); + if (ret_val) + return ret_val; + } else { + /* Polarity is forced */ + phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + } + + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + + return 0; +} + +/** + * e1000e_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 e1000e_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= BMCR_RESET; + ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl); + if (ret_val) + return ret_val; + + udelay(1); + + return ret_val; +} + +/** + * e1000e_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + if (phy->ops.check_reset_block) { + ret_val = phy->ops.check_reset_block(hw); + if (ret_val) + return 0; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + return ret_val; + + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + e1e_flush(); + + udelay(phy->reset_delay_us); + + ew32(CTRL, ctrl); + e1e_flush(); + + usleep_range(150, 300); + + phy->ops.release(hw); + + return phy->ops.get_cfg_done(hw); +} + +/** + * e1000e_get_cfg_done_generic - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000e_get_cfg_done_generic(struct e1000_hw __always_unused *hw) +{ + mdelay(10); + + return 0; +} + +/** + * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) +{ + e_dbg("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + e1e_wphy(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + e1e_wphy(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + e1e_wphy(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + e1e_wphy(hw, 0x2FB2, 0xF8F0); + /* Add 4% to Tx amplitude in Gig mode */ + e1e_wphy(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + e1e_wphy(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + e1e_wphy(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + e1e_wphy(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + e1e_wphy(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + e1e_wphy(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + e1e_wphy(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + e1e_wphy(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + e1e_wphy(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + e1e_wphy(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + e1e_wphy(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + e1e_wphy(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + e1e_wphy(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + e1e_wphy(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + e1e_wphy(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + e1e_wphy(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + e1e_wphy(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + e1e_wphy(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + e1e_wphy(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + e1e_wphy(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + e1e_wphy(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + e1e_wphy(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + e1e_wphy(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + e1e_wphy(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + e1e_wphy(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + e1e_wphy(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + e1e_wphy(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + e1e_wphy(hw, 0x0000, 0x1340); + + return 0; +} + +/** + * e1000e_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) +{ + enum e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + case BME1000_E_PHY_ID: + case BME1000_E_PHY_ID_R2: + phy_type = e1000_phy_bm; + break; + case I82578_E_PHY_ID: + phy_type = e1000_phy_82578; + break; + case I82577_E_PHY_ID: + phy_type = e1000_phy_82577; + break; + case I82579_E_PHY_ID: + phy_type = e1000_phy_82579; + break; + case I217_E_PHY_ID: + phy_type = e1000_phy_i217; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000e_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 e1000e_determine_phy_address(struct e1000_hw *hw) +{ + u32 phy_addr = 0; + u32 i; + enum e1000_phy_type phy_type = e1000_phy_unknown; + + hw->phy.id = phy_type; + + for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { + hw->phy.addr = phy_addr; + i = 0; + + do { + e1000e_get_phy_id(hw); + phy_type = e1000e_get_phy_type_from_id(hw->phy.id); + + /* If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) + return 0; + + usleep_range(1000, 2000); + i++; + } while (i < 10); + } + + return -E1000_ERR_PHY_TYPE; +} + +/** + * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address + * @page: page to access + * + * Returns the phy address for the page requested. + **/ +static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) +{ + u32 phy_addr = 2; + + if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000e_write_phy_reg_bm - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto release; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto release; + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_read_phy_reg_bm - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto release; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto release; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_read_phy_reg_bm2 - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto release; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto release; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_write_phy_reg_bm2 - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto release; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto release; + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers + * @hw: pointer to the HW structure + * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG + * + * Assumes semaphore already acquired and phy_reg points to a valid memory + * address to store contents of the BM_WUC_ENABLE_REG register. + **/ +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + u16 temp; + + /* All page select, port ctrl and wakeup registers use phy address 1 */ + hw->phy.addr = 1; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + e_dbg("Could not set Port Control page\n"); + return ret_val; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + if (ret_val) { + e_dbg("Could not read PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + return ret_val; + } + + /* Enable both PHY wakeup mode and Wakeup register page writes. + * Prevent a power state change by disabling ME and Host PHY wakeup. + */ + temp = *phy_reg; + temp |= BM_WUC_ENABLE_BIT; + temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); + + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); + if (ret_val) { + e_dbg("Could not write PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + return ret_val; + } + + /* Select Host Wakeup Registers page - caller now able to write + * registers on the Wakeup registers page + */ + return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); +} + +/** + * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs + * @hw: pointer to the HW structure + * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG + * + * Restore BM_WUC_ENABLE_REG to its original value. + * + * Assumes semaphore already acquired and *phy_reg is the contents of the + * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by + * caller. + **/ +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + e_dbg("Could not set Port Control page\n"); + return ret_val; + } + + /* Restore 769.17 to its original value */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); + if (ret_val) + e_dbg("Could not restore PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + + return ret_val; +} + +/** + * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to read or write + * @read: determines if operation is read or write + * @page_set: BM_WUC_PAGE already set and access enabled + * + * Read the PHY register at offset and store the retrieved information in + * data, or write data to PHY register at offset. Note the procedure to + * access the PHY wakeup registers is different than reading the other PHY + * registers. It works as such: + * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 + * 2) Set page to 800 for host (801 if we were manageability) + * 3) Write the address using the address opcode (0x11) + * 4) Read or write the data using the data opcode (0x12) + * 5) Restore 769.17.2 to its original value + * + * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and + * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm(). + * + * Assumes semaphore is already acquired. When page_set==true, assumes + * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack + * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()). + **/ +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set) +{ + s32 ret_val; + u16 reg = BM_PHY_REG_NUM(offset); + u16 page = BM_PHY_REG_PAGE(offset); + u16 phy_reg = 0; + + /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ + if ((hw->mac.type == e1000_pchlan) && + (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) + e_dbg("Attempting to access page %d while gig enabled.\n", + page); + + if (!page_set) { + /* Enable access to PHY wakeup registers */ + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) { + e_dbg("Could not enable PHY wakeup reg access\n"); + return ret_val; + } + } + + e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg); + + /* Write the Wakeup register page offset value using opcode 0x11 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); + if (ret_val) { + e_dbg("Could not write address opcode to page %d\n", page); + return ret_val; + } + + if (read) { + /* Read the Wakeup register page value using opcode 0x12 */ + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + data); + } else { + /* Write the Wakeup register page value using opcode 0x12 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + *data); + } + + if (ret_val) { + e_dbg("Could not access PHY reg %d.%d\n", page, reg); + return ret_val; + } + + if (!page_set) + ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + + return ret_val; +} + +/** + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + e1e_rphy(hw, MII_BMCR, &mii_reg); + mii_reg &= ~BMCR_PDOWN; + e1e_wphy(hw, MII_BMCR, mii_reg); +} + +/** + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + e1e_rphy(hw, MII_BMCR, &mii_reg); + mii_reg |= BMCR_PDOWN; + e1e_wphy(hw, MII_BMCR, mii_reg); + usleep_range(1000, 2000); +} + +/** + * __e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphore before exiting. + **/ +static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + data, true); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores + * the retrieved information in data. Release the acquired semaphore + * before exiting. + **/ +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_read_phy_reg_hv_locked - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_read_phy_reg_page_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired and page already set. + **/ +s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * __e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + &data, false); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + /* Workaround MDIO accesses being disabled after entering IEEE + * Power Down (when bit 11 of the PHY Control register is set) + */ + if ((hw->phy.type == e1000_phy_82578) && + (hw->phy.revision >= 1) && + (hw->phy.addr == 2) && + !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) { + u16 data2 = 0x7EFF; + ret_val = e1000_access_phy_debug_regs_hv(hw, + (1 << 6) | 0x3, + &data2, false); + if (ret_val) + goto out; + } + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, + data); + +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register at the offset. + * Release the acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_write_phy_reg_hv_locked - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired. + **/ +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_write_phy_reg_page_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired and page already set. + **/ +s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * e1000_get_phy_addr_for_hv_page - Get PHY address based on page + * @page: page to be accessed + **/ +static u32 e1000_get_phy_addr_for_hv_page(u32 page) +{ + u32 phy_addr = 2; + + if (page >= HV_INTC_FC_PAGE_START) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to be read or written + * @read: determines if operation is read or write + * + * Reads the PHY register at offset and stores the retreived information + * in data. Assumes semaphore already acquired. Note that the procedure + * to access these regs uses the address port and data port to read/write. + * These accesses done with PHY address 2 and without using pages. + **/ +static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read) +{ + s32 ret_val; + u32 addr_reg; + u32 data_reg; + + /* This takes care of the difference with desktop vs mobile phy */ + addr_reg = ((hw->phy.type == e1000_phy_82578) ? + I82578_ADDR_REG : I82577_ADDR_REG); + data_reg = addr_reg + 1; + + /* All operations in this function are phy address 2 */ + hw->phy.addr = 2; + + /* masking with 0x3F to remove the page from offset */ + ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); + if (ret_val) { + e_dbg("Could not write the Address Offset port register\n"); + return ret_val; + } + + /* Read or write the data value next */ + if (read) + ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data); + else + ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); + + if (ret_val) + e_dbg("Could not access the Data port register\n"); + + return ret_val; +} + +/** + * e1000_link_stall_workaround_hv - Si workaround + * @hw: pointer to the HW structure + * + * This function works around a Si bug where the link partner can get + * a link up indication before the PHY does. If small packets are sent + * by the link partner they can be placed in the packet buffer without + * being properly accounted for by the PHY and will stall preventing + * further packets from being received. The workaround is to clear the + * packet buffer after the PHY detects link up. + **/ +s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 data; + + if (hw->phy.type != e1000_phy_82578) + return 0; + + /* Do not apply workaround if in PHY loopback bit 14 set */ + e1e_rphy(hw, MII_BMCR, &data); + if (data & BMCR_LOOPBACK) + return 0; + + /* check if link is up and at 1Gbps */ + ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); + if (ret_val) + return ret_val; + + data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); + + if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + return 0; + + msleep(200); + + /* flush the packets in the fifo buffer */ + ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, + (HV_MUX_DATA_CTRL_GEN_TO_MAC | + HV_MUX_DATA_CTRL_FORCE_SPEED)); + if (ret_val) + return ret_val; + + return e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); +} + +/** + * e1000_check_polarity_82577 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. + **/ +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000_get_phy_info_82577 - Retrieve I82577 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_82577(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); + + if ((data & I82577_PHY_STATUS2_SPEED_MASK) == + I82577_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, MII_STAT1000, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & LPA_1000LOCALRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & LPA_1000REMRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return 0; +} + +/** + * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 e1000_get_cable_length_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + return ret_val; + + length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT); + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + return -E1000_ERR_PHY; + + phy->cable_length = length; + + return 0; +} diff --git a/addons/e1000e/src/3.10.108/phy.h b/addons/e1000e/src/3.10.108/phy.h new file mode 100644 index 00000000..f4f71b99 --- /dev/null +++ b/addons/e1000e/src/3.10.108/phy.h @@ -0,0 +1,242 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_PHY_H_ +#define _E1000E_PHY_H_ + +s32 e1000e_check_downshift(struct e1000_hw *hw); +s32 e1000_check_polarity_m88(struct e1000_hw *hw); +s32 e1000_check_polarity_igp(struct e1000_hw *hw); +s32 e1000_check_polarity_ife(struct e1000_hw *hw); +s32 e1000e_check_reset_block_generic(struct e1000_hw *hw); +s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); +s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); +s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); +s32 e1000e_get_cable_length_m88(struct e1000_hw *hw); +s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); +s32 e1000e_get_cfg_done_generic(struct e1000_hw *hw); +s32 e1000e_get_phy_id(struct e1000_hw *hw); +s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); +s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); +s32 e1000_get_phy_info_ife(struct e1000_hw *hw); +s32 e1000e_phy_sw_reset(struct e1000_hw *hw); +void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); +s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); +s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); +s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000e_setup_copper_link(struct e1000_hw *hw); +s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw); +enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); +s32 e1000e_determine_phy_address(struct e1000_hw *hw); +s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); +void e1000_power_up_phy_copper(struct e1000_hw *hw); +void e1000_power_down_phy_copper(struct e1000_hw *hw); +s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); +s32 e1000_check_polarity_82577(struct e1000_hw *hw); +s32 e1000_get_phy_info_82577(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); +s32 e1000_get_cable_length_82577(struct e1000_hw *hw); + +#define E1000_MAX_PHY_ADDR 8 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +/* BM/HV Specific Registers */ +#define BM_PORT_CTRL_PAGE 769 +#define BM_WUC_PAGE 800 +#define BM_WUC_ADDRESS_OPCODE 0x11 +#define BM_WUC_DATA_OPCODE 0x12 +#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE +#define BM_WUC_ENABLE_REG 17 +#define BM_WUC_ENABLE_BIT (1 << 2) +#define BM_WUC_HOST_WU_BIT (1 << 4) +#define BM_WUC_ME_WU_BIT (1 << 5) + +#define PHY_UPPER_SHIFT 21 +#define BM_PHY_REG(page, reg) \ + (((reg) & MAX_PHY_REG_ADDRESS) |\ + (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\ + (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT))) +#define BM_PHY_REG_PAGE(offset) \ + ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) +#define BM_PHY_REG_NUM(offset) \ + ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ + (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ + ~MAX_PHY_REG_ADDRESS))) + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ +#define I82577_CTRL_REG 23 + +/* 82577 specific PHY registers */ +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_LBK_CTRL 19 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 + +/* I82577 PHY Status 2 */ +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 + +/* I82577 PHY Control 2 */ +#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82577 PHY Diagnostics Status */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* BM PHY Copper Specific Control 1 */ +#define BM_CS_CTRL1 16 + +/* BM PHY Copper Specific Status */ +#define BM_CS_STATUS 17 +#define BM_CS_STATUS_LINK_UP 0x0400 +#define BM_CS_STATUS_RESOLVED 0x0800 +#define BM_CS_STATUS_SPEED_MASK 0xC000 +#define BM_CS_STATUS_SPEED_1000 0x8000 + +/* 82577 Mobile Phy Status Register */ +#define HV_M_STATUS 26 +#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 +#define HV_M_STATUS_SPEED_MASK 0x0300 +#define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_LINK_UP 0x0040 + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ +#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 +#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */ +#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ + +#endif diff --git a/addons/e1000e/src/3.10.108/ptp.c b/addons/e1000e/src/3.10.108/ptp.c new file mode 100644 index 00000000..065f8c80 --- /dev/null +++ b/addons/e1000e/src/3.10.108/ptp.c @@ -0,0 +1,276 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +/* PTP 1588 Hardware Clock (PHC) + * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb) + * Copyright (C) 2011 Richard Cochran + */ + +#include "e1000.h" + +/** + * e1000e_phc_adjfreq - adjust the frequency of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired frequency change in parts per billion + * + * Adjust the frequency of the PHC cycle counter by the indicated delta from + * the base frequency. + **/ +static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + struct e1000_hw *hw = &adapter->hw; + bool neg_adj = false; + u64 adjustment; + u32 timinca, incvalue; + s32 ret_val; + + if ((delta > ptp->max_adj) || (delta <= -1000000000)) + return -EINVAL; + + if (delta < 0) { + neg_adj = true; + delta = -delta; + } + + /* Get the System Time Register SYSTIM base frequency */ + ret_val = e1000e_get_base_timinca(adapter, &timinca); + if (ret_val) + return ret_val; + + incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK; + + adjustment = incvalue; + adjustment *= delta; + adjustment = div_u64(adjustment, 1000000000); + + incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment); + + timinca &= ~E1000_TIMINCA_INCVALUE_MASK; + timinca |= incvalue; + + ew32(TIMINCA, timinca); + + return 0; +} + +/** + * e1000e_phc_adjtime - Shift the time of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired change in nanoseconds + * + * Adjust the timer by resetting the timecounter structure. + **/ +static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + unsigned long flags; + s64 now; + + spin_lock_irqsave(&adapter->systim_lock, flags); + now = timecounter_read(&adapter->tc); + now += delta; + timecounter_init(&adapter->tc, &adapter->cc, now); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + return 0; +} + +/** + * e1000e_phc_gettime - Reads the current time from the hardware clock + * @ptp: ptp clock structure + * @ts: timespec structure to hold the current time value + * + * Read the timecounter and return the correct value in ns after converting + * it into a struct timespec. + **/ +static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec *ts) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + unsigned long flags; + u32 remainder; + u64 ns; + + spin_lock_irqsave(&adapter->systim_lock, flags); + ns = timecounter_read(&adapter->tc); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + ts->tv_sec = div_u64_rem(ns, NSEC_PER_SEC, &remainder); + ts->tv_nsec = remainder; + + return 0; +} + +/** + * e1000e_phc_settime - Set the current time on the hardware clock + * @ptp: ptp clock structure + * @ts: timespec containing the new time for the cycle counter + * + * Reset the timecounter to use a new base value instead of the kernel + * wall timer value. + **/ +static int e1000e_phc_settime(struct ptp_clock_info *ptp, + const struct timespec *ts) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + unsigned long flags; + u64 ns; + + ns = timespec_to_ns(ts); + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->systim_lock, flags); + timecounter_init(&adapter->tc, &adapter->cc, ns); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + return 0; +} + +/** + * e1000e_phc_enable - enable or disable an ancillary feature + * @ptp: ptp clock structure + * @request: Desired resource to enable or disable + * @on: Caller passes one to enable or zero to disable + * + * Enable (or disable) ancillary features of the PHC subsystem. + * Currently, no ancillary features are supported. + **/ +static int e1000e_phc_enable(struct ptp_clock_info __always_unused *ptp, + struct ptp_clock_request __always_unused *request, + int __always_unused on) +{ + return -EOPNOTSUPP; +} + +static void e1000e_systim_overflow_work(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, + systim_overflow_work.work); + struct e1000_hw *hw = &adapter->hw; + struct timespec ts; + + adapter->ptp_clock_info.gettime(&adapter->ptp_clock_info, &ts); + + e_dbg("SYSTIM overflow check at %ld.%09lu\n", ts.tv_sec, ts.tv_nsec); + + schedule_delayed_work(&adapter->systim_overflow_work, + E1000_SYSTIM_OVERFLOW_PERIOD); +} + +static const struct ptp_clock_info e1000e_ptp_clock_info = { + .owner = THIS_MODULE, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .pps = 0, + .adjfreq = e1000e_phc_adjfreq, + .adjtime = e1000e_phc_adjtime, + .gettime = e1000e_phc_gettime, + .settime = e1000e_phc_settime, + .enable = e1000e_phc_enable, +}; + +/** + * e1000e_ptp_init - initialize PTP for devices which support it + * @adapter: board private structure + * + * This function performs the required steps for enabling PTP support. + * If PTP support has already been loaded it simply calls the cyclecounter + * init routine and exits. + **/ +void e1000e_ptp_init(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + adapter->ptp_clock = NULL; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return; + + adapter->ptp_clock_info = e1000e_ptp_clock_info; + + snprintf(adapter->ptp_clock_info.name, + sizeof(adapter->ptp_clock_info.name), "%pm", + adapter->netdev->perm_addr); + + switch (hw->mac.type) { + case e1000_pch2lan: + case e1000_pch_lpt: + if ((hw->mac.type != e1000_pch_lpt) || + (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { + adapter->ptp_clock_info.max_adj = 24000000 - 1; + break; + } + /* fall-through */ + case e1000_82574: + case e1000_82583: + adapter->ptp_clock_info.max_adj = 600000000 - 1; + break; + default: + break; + } + + INIT_DELAYED_WORK(&adapter->systim_overflow_work, + e1000e_systim_overflow_work); + + schedule_delayed_work(&adapter->systim_overflow_work, + E1000_SYSTIM_OVERFLOW_PERIOD); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + e_err("ptp_clock_register failed\n"); + } else { + e_info("registered PHC clock\n"); + } +} + +/** + * e1000e_ptp_remove - disable PTP device and stop the overflow check + * @adapter: board private structure + * + * Stop the PTP support, and cancel the delayed work. + **/ +void e1000e_ptp_remove(struct e1000_adapter *adapter) +{ + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return; + + cancel_delayed_work_sync(&adapter->systim_overflow_work); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_info("removed PHC\n"); + } +} diff --git a/addons/e1000e/src/3.10.108/regs.h b/addons/e1000e/src/3.10.108/regs.h new file mode 100644 index 00000000..a7e6a3e3 --- /dev/null +++ b/addons/e1000e/src/3.10.108/regs.h @@ -0,0 +1,253 @@ +/******************************************************************************* + + Intel PRO/1000 Linux driver + Copyright(c) 1999 - 2013 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, + version 2, as published by the Free Software Foundation. + + This program is distributed in the hope it will be useful, but WITHOUT + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + + You should have received a copy of the GNU General Public License along with + this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + + The full GNU General Public License is included in this distribution in + the file called "COPYING". + + Contact Information: + Linux NICS + e1000-devel Mailing List + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + +*******************************************************************************/ + +#ifndef _E1000E_REGS_H_ +#define _E1000E_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ +#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ +#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ +#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ +#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */ +#define E1000_SVCR 0x000F0 +#define E1000_SVT 0x000F4 +#define E1000_LPIC 0x000FC /* Low Power IDLE control */ +#define E1000_RCTL 0x00100 /* Rx Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */ +#define E1000_TCTL 0x00400 /* Tx Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ +/* Split and Replication Rx Control - RW */ +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) +#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */ + +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ + +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +/* Management Decision Filters */ +#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) +#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +/* Driver-only SW semaphore (not used by BOOT agents) */ +#define E1000_SWSM2 0x05B58 +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ +#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ + +#endif diff --git a/addons/e1000e/src/4.4.180/80003es2lan.c b/addons/e1000e/src/4.4.180/80003es2lan.c new file mode 100644 index 00000000..2af603f3 --- /dev/null +++ b/addons/e1000e/src/4.4.180/80003es2lan.c @@ -0,0 +1,1418 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* 80003ES2LAN Gigabit Ethernet Controller (Copper) + * 80003ES2LAN Gigabit Ethernet Controller (Serdes) + */ + +#include "e1000.h" + +/* A table for the GG82563 cable length where the range is defined + * with a lower bound at "index" and the upper bound at + * "index + 5". + */ +static const u16 e1000_gg82563_cable_length_table[] = { + 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF +}; + +#define GG82563_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_gg82563_cable_length_table) + +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw); +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask); +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw); +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex); +static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data); +static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw); + +/** + * e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } else { + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + phy->type = e1000_phy_gg82563; + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000e_get_phy_id(hw); + + /* Verify phy id */ + if (phy->id != GG82563_E_PHY_ID) + return -E1000_ERR_PHY; + + return ret_val; +} + +/** + * e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + nvm->type = e1000_nvm_eeprom_spi; + + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + + return 0; +} + +/** + * e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + /* Set media type and media-dependent function pointers */ + switch (hw->adapter->pdev->device) { + case E1000_DEV_ID_80003ES2LAN_SERDES_DPT: + hw->phy.media_type = e1000_media_type_internal_serdes; + mac->ops.check_for_link = e1000e_check_for_serdes_link; + mac->ops.setup_physical_interface = + e1000e_setup_fiber_serdes_link; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + mac->ops.check_for_link = e1000e_check_for_copper_link; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_80003es2lan; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are enabled. */ + mac->arc_subsystem_valid = !!(er32(FWSM) & E1000_FWSM_MODE_MASK); + /* Adaptive IFS not supported */ + mac->adaptive_ifs = false; + + /* set lan id for port to determine which phy lock to use */ + hw->mac.ops.set_lan_id(hw); + + return 0; +} + +static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_80003es2lan(hw); + if (rc) + return rc; + + rc = e1000_init_nvm_params_80003es2lan(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_80003es2lan(hw); + if (rc) + return rc; + + return 0; +} + +/** + * e1000_acquire_phy_80003es2lan - Acquire rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to acquire access rights to the correct PHY. + **/ +static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_phy_80003es2lan - Release rights to access PHY + * @hw: pointer to the HW structure + * + * A wrapper to release access rights to the correct PHY. + **/ +static void e1000_release_phy_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_mac_csr_80003es2lan - Acquire right to access Kumeran register + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the Kumeran interface. + * + **/ +static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = E1000_SWFW_CSR_SM; + + return e1000_acquire_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_release_mac_csr_80003es2lan - Release right to access Kumeran Register + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the Kumeran interface + **/ +static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw) +{ + u16 mask; + + mask = E1000_SWFW_CSR_SM; + + e1000_release_swfw_sync_80003es2lan(hw, mask); +} + +/** + * e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM + * @hw: pointer to the HW structure + * + * Acquire the semaphore to access the EEPROM. + **/ +static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + if (ret_val) + return ret_val; + + ret_val = e1000e_acquire_nvm(hw); + + if (ret_val) + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); + + return ret_val; +} + +/** + * e1000_release_nvm_80003es2lan - Relinquish rights to access NVM + * @hw: pointer to the HW structure + * + * Release the semaphore used to access the EEPROM. + **/ +static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM); +} + +/** + * e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Acquire the SW/FW semaphore to access the PHY or NVM. The mask + * will also specify which port we're acquiring the lock for. + **/ +static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + u32 swmask = mask; + u32 fwmask = mask << 16; + s32 i = 0; + s32 timeout = 50; + + while (i < timeout) { + if (e1000e_get_hw_semaphore(hw)) + return -E1000_ERR_SWFW_SYNC; + + swfw_sync = er32(SW_FW_SYNC); + if (!(swfw_sync & (fwmask | swmask))) + break; + + /* Firmware currently using resource (fwmask) + * or other software thread using resource (swmask) + */ + e1000e_put_hw_semaphore(hw); + mdelay(5); + i++; + } + + if (i == timeout) { + e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n"); + return -E1000_ERR_SWFW_SYNC; + } + + swfw_sync |= swmask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); + + return 0; +} + +/** + * e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore + * @hw: pointer to the HW structure + * @mask: specifies which semaphore to acquire + * + * Release the SW/FW semaphore used to access the PHY or NVM. The mask + * will also specify which port we're releasing the lock for. + **/ +static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask) +{ + u32 swfw_sync; + + while (e1000e_get_hw_semaphore(hw) != 0) + ; /* Empty */ + + swfw_sync = er32(SW_FW_SYNC); + swfw_sync &= ~mask; + ew32(SW_FW_SYNC, swfw_sync); + + e1000e_put_hw_semaphore(hw); +} + +/** + * e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: pointer to the data returned from the operation + * + * Read the GG82563 PHY register. + **/ +static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 *data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + usleep_range(200, 400); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + e1000_release_phy_80003es2lan(hw); + return -E1000_ERR_PHY; + } + + usleep_range(200, 400); + + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + + usleep_range(200, 400); + } else { + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @data: value to write to the register + * + * Write to the GG82563 PHY register. + **/ +static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw, + u32 offset, u16 data) +{ + s32 ret_val; + u32 page_select; + u16 temp; + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + /* Select Configuration Page */ + if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) { + page_select = GG82563_PHY_PAGE_SELECT; + } else { + /* Use Alternative Page Select register to access + * registers 30 and 31 + */ + page_select = GG82563_PHY_PAGE_SELECT_ALT; + } + + temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT); + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp); + if (ret_val) { + e1000_release_phy_80003es2lan(hw); + return ret_val; + } + + if (hw->dev_spec.e80003es2lan.mdic_wa_enable) { + /* The "ready" bit in the MDIC register may be incorrectly set + * before the device has completed the "Page Select" MDI + * transaction. So we wait 200us after each MDI command... + */ + usleep_range(200, 400); + + /* ...and verify the command was successful. */ + ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp); + + if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) { + e1000_release_phy_80003es2lan(hw); + return -E1000_ERR_PHY; + } + + usleep_range(200, 400); + + ret_val = e1000e_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & + offset, data); + + usleep_range(200, 400); + } else { + ret_val = e1000e_write_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & + offset, data); + } + + e1000_release_phy_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_nvm_80003es2lan - Write to ESB2 NVM + * @hw: pointer to the HW structure + * @offset: offset of the register to read + * @words: number of words to write + * @data: buffer of data to write to the NVM + * + * Write "words" of data to the ESB2 NVM. + **/ +static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + return e1000e_write_nvm_spi(hw, offset, words, data); +} + +/** + * e1000_get_cfg_done_80003es2lan - Wait for configuration to complete + * @hw: pointer to the HW structure + * + * Wait a specific amount of time for manageability processes to complete. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + u32 mask = E1000_NVM_CFG_DONE_PORT_0; + + if (hw->bus.func == 1) + mask = E1000_NVM_CFG_DONE_PORT_1; + + while (timeout) { + if (er32(EEMNGCTL) & mask) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) { + e_dbg("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex + * @hw: pointer to the HW structure + * + * Force the speed and duplex settings onto the PHY. This is a + * function pointer entry point called by the phy module. + **/ +static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + bool link; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("GG82563 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + /* Reset the phy to commit changes. */ + phy_data |= BMCR_RESET; + + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + + if (hw->phy.autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on GG82563 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to verify the TX_CLK corresponds + * to the link speed. 10Mbps -> 2.5MHz, else 25MHz. + */ + phy_data &= ~GG82563_MSCR_TX_CLK_MASK; + if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED) + phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5; + else + phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_get_cable_length_80003es2lan - Set approximate cable length + * @hw: pointer to the HW structure + * + * Find the approximate cable length as measured by the GG82563 PHY. + * This is a function pointer entry point called by the phy module. + **/ +static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data); + if (ret_val) + return ret_val; + + index = phy_data & GG82563_DSPD_CABLE_LENGTH; + + if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_gg82563_cable_length_table[index]; + phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return 0; +} + +/** + * e1000_get_link_up_info_80003es2lan - Report speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to speed buffer + * @duplex: pointer to duplex buffer + * + * Retrieve the current speed and duplex configuration. + **/ +static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); + hw->phy.ops.cfg_on_link_up(hw); + } else { + ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw, + speed, + duplex); + } + + return ret_val; +} + +/** + * e1000_reset_hw_80003es2lan - Reset the ESB2 controller + * @hw: pointer to the HW structure + * + * Perform a global reset to the ESB2 controller. + **/ +static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 kum_reg_data; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + ctrl = er32(CTRL); + + ret_val = e1000_acquire_phy_80003es2lan(hw); + if (ret_val) + return ret_val; + + e_dbg("Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + e1000_release_phy_80003es2lan(hw); + + /* Disable IBIST slave mode (far-end loopback) */ + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + if (ret_val) + return ret_val; + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + er32(ICR); + + return e1000_check_alt_mac_addr_generic(hw); +} + +/** + * e1000_init_hw_80003es2lan - Initialize the ESB2 controller + * @hw: pointer to the HW structure + * + * Initialize the hw bits, LED, VFTA, MTA, link and hw counters. + **/ +static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 kum_reg_data; + u16 i; + + e1000_initialize_hw_bits_80003es2lan(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + e_dbg("Error initializing identification LED\n"); + + /* Disabling VLAN filtering */ + e_dbg("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + if (ret_val) + return ret_val; + + /* Disable IBIST slave mode (far-end loopback) */ + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + &kum_reg_data); + kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE; + e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + kum_reg_data); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL(0)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + ew32(TXDCTL(0), reg_data); + + /* ...for both queues. */ + reg_data = er32(TXDCTL(1)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + ew32(TXDCTL(1), reg_data); + + /* Enable retransmit on late collisions */ + reg_data = er32(TCTL); + reg_data |= E1000_TCTL_RTLC; + ew32(TCTL, reg_data); + + /* Configure Gigabit Carry Extend Padding */ + reg_data = er32(TCTL_EXT); + reg_data &= ~E1000_TCTL_EXT_GCEX_MASK; + reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN; + ew32(TCTL_EXT, reg_data); + + /* Configure Transmit Inter-Packet Gap */ + reg_data = er32(TIPG); + reg_data &= ~E1000_TIPG_IPGT_MASK; + reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, reg_data); + + reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001); + reg_data &= ~0x00100000; + E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data); + + /* default to true to enable the MDIC W/A */ + hw->dev_spec.e80003es2lan.mdic_wa_enable = true; + + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_OFFSET >> + E1000_KMRNCTRLSTA_OFFSET_SHIFT, &i); + if (!ret_val) { + if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) == + E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO) + hw->dev_spec.e80003es2lan.mdic_wa_enable = false; + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2 + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + if (hw->phy.media_type != e1000_media_type_copper) + reg &= ~(1 << 20); + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC(1), reg); + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + reg = er32(RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); +} + +/** + * e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link + * @hw: pointer to the HW structure + * + * Setup some GG82563 PHY registers for obtaining link + **/ +static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 reg; + u16 data; + + ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_MSCR_ASSERT_CRS_ON_TX; + /* Use 25MHz for both link down and 1000Base-T for Tx clock. */ + data |= GG82563_MSCR_TX_CLK_1000MBPS_25; + + ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK; + + switch (phy->mdix) { + case 1: + data |= GG82563_PSCR_CROSSOVER_MODE_MDI; + break; + case 2: + data |= GG82563_PSCR_CROSSOVER_MODE_MDIX; + break; + case 0: + default: + data |= GG82563_PSCR_CROSSOVER_MODE_AUTO; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + if (phy->disable_polarity_correction) + data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE; + + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data); + if (ret_val) + return ret_val; + + /* SW Reset the PHY so all changes take effect */ + ret_val = hw->phy.ops.commit(hw); + if (ret_val) { + e_dbg("Error Resetting the PHY\n"); + return ret_val; + } + + /* Bypass Rx and Tx FIFO's */ + reg = E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL; + data = (E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS | + E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS); + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); + if (ret_val) + return ret_val; + + reg = E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, reg, &data); + if (ret_val) + return ret_val; + data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, reg, data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG; + ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data); + if (ret_val) + return ret_val; + + reg = er32(CTRL_EXT); + reg &= ~E1000_CTRL_EXT_LINK_MODE_MASK; + ew32(CTRL_EXT, reg); + + ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data); + if (ret_val) + return ret_val; + + /* Do not init these registers when the HW is in IAMT mode, since the + * firmware will have already initialized them. We only initialize + * them if the HW is not in IAMT mode. + */ + if (!hw->mac.ops.check_mng_mode(hw)) { + /* Enable Electrical Idle on the PHY */ + data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE; + ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data); + if (ret_val) + return ret_val; + } + + /* Workaround: Disable padding in Kumeran interface in the MAC + * and in the PHY to avoid CRC errors. + */ + ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data); + if (ret_val) + return ret_val; + + data |= GG82563_ICR_DIS_PADDING; + ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data); + if (ret_val) + return ret_val; + + return 0; +} + +/** + * e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2 + * @hw: pointer to the HW structure + * + * Essentially a wrapper for setting up all things "copper" related. + * This is a function pointer entry point called by the mac module. + **/ +static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* Set the mac to wait the maximum time between each + * iteration and increase the max iterations when + * polling the phy; this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4), + 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9), + reg_data); + if (ret_val) + return ret_val; + ret_val = + e1000_read_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + ®_data); + if (ret_val) + return ret_val; + reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_INB_CTRL, + reg_data); + if (ret_val) + return ret_val; + + ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw); + if (ret_val) + return ret_val; + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 speed; + u16 duplex; + + if (hw->phy.media_type == e1000_media_type_copper) { + ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed, + &duplex); + if (ret_val) + return ret_val; + + if (speed == SPEED_1000) + ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw); + else + ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex); + } + + return ret_val; +} + +/** + * e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation + * @hw: pointer to the HW structure + * @duplex: current duplex setting + * + * Configure the KMRN interface by applying last minute quirks for + * 10/100 operation. + **/ +static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex) +{ + s32 ret_val; + u32 tipg; + u32 i = 0; + u16 reg_data, reg_data2; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN; + ew32(TIPG, tipg); + + do { + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + if (duplex == HALF_DUPLEX) + reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER; + else + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); +} + +/** + * e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation + * @hw: pointer to the HW structure + * + * Configure the KMRN interface by applying last minute quirks for + * gigabit operation. + **/ +static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data, reg_data2; + u32 tipg; + u32 i = 0; + + reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT; + ret_val = + e1000_write_kmrn_reg_80003es2lan(hw, + E1000_KMRNCTRLSTA_OFFSET_HD_CTRL, + reg_data); + if (ret_val) + return ret_val; + + /* Configure Transmit Inter-Packet Gap */ + tipg = er32(TIPG); + tipg &= ~E1000_TIPG_IPGT_MASK; + tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN; + ew32(TIPG, tipg); + + do { + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, ®_data2); + if (ret_val) + return ret_val; + i++; + } while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY)); + + reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER; + + return e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data); +} + +/** + * e1000_read_kmrn_reg_80003es2lan - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquire semaphore, then read the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release the semaphore before exiting. + **/ +static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + kmrnctrlsta = er32(KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_write_kmrn_reg_80003es2lan - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquire semaphore, then write the data to PHY register + * at the offset using the kumeran interface. Release semaphore + * before exiting. + **/ +static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset, + u16 data) +{ + u32 kmrnctrlsta; + s32 ret_val; + + ret_val = e1000_acquire_mac_csr_80003es2lan(hw); + if (ret_val) + return ret_val; + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + e1000_release_mac_csr_80003es2lan(hw); + + return ret_val; +} + +/** + * e1000_read_mac_addr_80003es2lan - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw) +{ + s32 ret_val; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw) +{ + e1000e_clear_hw_cntrs_base(hw); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + er32(ICRXPTC); + er32(ICRXATC); + er32(ICTXPTC); + er32(ICTXATC); + er32(ICTXQEC); + er32(ICTXQMTC); + er32(ICRXDMTC); +} + +static const struct e1000_mac_operations es2_mac_ops = { + .read_mac_addr = e1000_read_mac_addr_80003es2lan, + .id_led_init = e1000e_id_led_init_generic, + .blink_led = e1000e_blink_led_generic, + .check_mng_mode = e1000e_check_mng_mode_generic, + /* check_for_link dependent on media type */ + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan, + .get_bus_info = e1000e_get_bus_info_pcie, + .set_lan_id = e1000_set_lan_id_multi_port_pcie, + .get_link_up_info = e1000_get_link_up_info_80003es2lan, + .led_on = e1000e_led_on_generic, + .led_off = e1000e_led_off_generic, + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .write_vfta = e1000_write_vfta_generic, + .clear_vfta = e1000_clear_vfta_generic, + .reset_hw = e1000_reset_hw_80003es2lan, + .init_hw = e1000_init_hw_80003es2lan, + .setup_link = e1000e_setup_link_generic, + /* setup_physical_interface dependent on media type */ + .setup_led = e1000e_setup_led_generic, + .config_collision_dist = e1000e_config_collision_dist_generic, + .rar_set = e1000e_rar_set_generic, + .rar_get_count = e1000e_rar_get_count_generic, +}; + +static const struct e1000_phy_operations es2_phy_ops = { + .acquire = e1000_acquire_phy_80003es2lan, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan, + .get_cfg_done = e1000_get_cfg_done_80003es2lan, + .get_cable_length = e1000_get_cable_length_80003es2lan, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000_read_phy_reg_gg82563_80003es2lan, + .release = e1000_release_phy_80003es2lan, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = NULL, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000_write_phy_reg_gg82563_80003es2lan, + .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, +}; + +static const struct e1000_nvm_operations es2_nvm_ops = { + .acquire = e1000_acquire_nvm_80003es2lan, + .read = e1000e_read_nvm_eerd, + .release = e1000_release_nvm_80003es2lan, + .reload = e1000e_reload_nvm_generic, + .update = e1000e_update_nvm_checksum_generic, + .valid_led_default = e1000e_valid_led_default, + .validate = e1000e_validate_nvm_checksum_generic, + .write = e1000_write_nvm_80003es2lan, +}; + +const struct e1000_info e1000_es2_info = { + .mac = e1000_80003es2lan, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_RX_NEEDS_RESTART /* errata */ + | FLAG_TARC_SET_BIT_ZERO /* errata */ + | FLAG_APME_CHECK_PORT_B + | FLAG_DISABLE_FC_PAUSE_TIME, /* errata */ + .flags2 = FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_80003es2lan, + .mac_ops = &es2_mac_ops, + .phy_ops = &es2_phy_ops, + .nvm_ops = &es2_nvm_ops, +}; diff --git a/addons/e1000e/src/4.4.180/80003es2lan.h b/addons/e1000e/src/4.4.180/80003es2lan.h new file mode 100644 index 00000000..a2162e11 --- /dev/null +++ b/addons/e1000e/src/4.4.180/80003es2lan.h @@ -0,0 +1,88 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_80003ES2LAN_H_ +#define _E1000E_80003ES2LAN_H_ + +#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL 0x00 +#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL 0x02 +#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL 0x10 +#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE 0x1F + +#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS 0x0008 +#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS 0x0800 +#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING 0x0010 + +#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004 +#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT 0x0000 +#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE 0x2000 + +#define E1000_KMRNCTRLSTA_OPMODE_MASK 0x000C +#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO 0x0004 + +#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gig Carry Extend Padding */ +#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN 0x00010000 + +#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN 0x8 +#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN 0x9 + +/* GG82563 PHY Specific Status Register (Page 0, Register 16 */ +#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE 0x0002 /* 1=Reversal Dis */ +#define GG82563_PSCR_CROSSOVER_MODE_MASK 0x0060 +#define GG82563_PSCR_CROSSOVER_MODE_MDI 0x0000 /* 00=Manual MDI */ +#define GG82563_PSCR_CROSSOVER_MODE_MDIX 0x0020 /* 01=Manual MDIX */ +#define GG82563_PSCR_CROSSOVER_MODE_AUTO 0x0060 /* 11=Auto crossover */ + +/* PHY Specific Control Register 2 (Page 0, Register 26) */ +#define GG82563_PSCR2_REVERSE_AUTO_NEG 0x2000 /* 1=Reverse Auto-Neg */ + +/* MAC Specific Control Register (Page 2, Register 21) */ +/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */ +#define GG82563_MSCR_TX_CLK_MASK 0x0007 +#define GG82563_MSCR_TX_CLK_10MBPS_2_5 0x0004 +#define GG82563_MSCR_TX_CLK_100MBPS_25 0x0005 +#define GG82563_MSCR_TX_CLK_1000MBPS_25 0x0007 + +#define GG82563_MSCR_ASSERT_CRS_ON_TX 0x0010 /* 1=Assert */ + +/* DSP Distance Register (Page 5, Register 26) + * 0 = <50M + * 1 = 50-80M + * 2 = 80-100M + * 3 = 110-140M + * 4 = >140M + */ +#define GG82563_DSPD_CABLE_LENGTH 0x0007 + +/* Kumeran Mode Control Register (Page 193, Register 16) */ +#define GG82563_KMCR_PASS_FALSE_CARRIER 0x0800 + +/* Max number of times Kumeran read/write should be validated */ +#define GG82563_MAX_KMRN_RETRY 0x5 + +/* Power Management Control Register (Page 193, Register 20) */ +/* 1=Enable SERDES Electrical Idle */ +#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE 0x0001 + +/* In-Band Control Register (Page 194, Register 18) */ +#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */ + +#endif diff --git a/addons/e1000e/src/4.4.180/82571.c b/addons/e1000e/src/4.4.180/82571.c new file mode 100644 index 00000000..5f701644 --- /dev/null +++ b/addons/e1000e/src/4.4.180/82571.c @@ -0,0 +1,2063 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* 82571EB Gigabit Ethernet Controller + * 82571EB Gigabit Ethernet Controller (Copper) + * 82571EB Gigabit Ethernet Controller (Fiber) + * 82571EB Dual Port Gigabit Mezzanine Adapter + * 82571EB Quad Port Gigabit Mezzanine Adapter + * 82571PT Gigabit PT Quad Port Server ExpressModule + * 82572EI Gigabit Ethernet Controller (Copper) + * 82572EI Gigabit Ethernet Controller (Fiber) + * 82572EI Gigabit Ethernet Controller + * 82573V Gigabit Ethernet Controller (Copper) + * 82573E Gigabit Ethernet Controller (Copper) + * 82573L Gigabit Ethernet Controller + * 82574L Gigabit Network Connection + * 82583V Gigabit Network Connection + */ + +#include "e1000.h" + +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw); +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw); +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw); +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data); +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw); +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw); +static bool e1000_check_mng_mode_82574(struct e1000_hw *hw); +static s32 e1000_led_on_82574(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw); +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw); +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw); +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active); +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active); + +/** + * e1000_init_phy_params_82571 - Init PHY func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_phy_params_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + if (hw->phy.media_type != e1000_media_type_copper) { + phy->type = e1000_phy_none; + return 0; + } + + phy->addr = 1; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->reset_delay_us = 100; + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_82571; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + phy->type = e1000_phy_igp_2; + break; + case e1000_82573: + phy->type = e1000_phy_m88; + break; + case e1000_82574: + case e1000_82583: + phy->type = e1000_phy_bm; + phy->ops.acquire = e1000_get_hw_semaphore_82574; + phy->ops.release = e1000_put_hw_semaphore_82574; + phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574; + phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574; + break; + default: + return -E1000_ERR_PHY; + } + + /* This can only be done after all function pointers are setup. */ + ret_val = e1000_get_phy_id_82571(hw); + if (ret_val) { + e_dbg("Error getting PHY ID\n"); + return ret_val; + } + + /* Verify phy id */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + if (phy->id != IGP01E1000_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82573: + if (phy->id != M88E1111_I_PHY_ID) + ret_val = -E1000_ERR_PHY; + break; + case e1000_82574: + case e1000_82583: + if (phy->id != BME1000_E_PHY_ID_R2) + ret_val = -E1000_ERR_PHY; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + if (ret_val) + e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id); + + return ret_val; +} + +/** + * e1000_init_nvm_params_82571 - Init NVM func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u16 size; + + nvm->opcode_bits = 8; + nvm->delay_usec = 1; + switch (nvm->override) { + case e1000_nvm_override_spi_large: + nvm->page_size = 32; + nvm->address_bits = 16; + break; + case e1000_nvm_override_spi_small: + nvm->page_size = 8; + nvm->address_bits = 8; + break; + default: + nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8; + nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8; + break; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (((eecd >> 15) & 0x3) == 0x3) { + nvm->type = e1000_nvm_flash_hw; + nvm->word_size = 2048; + /* Autonomous Flash update bit must be cleared due + * to Flash update issue. + */ + eecd &= ~E1000_EECD_AUPDEN; + ew32(EECD, eecd); + break; + } + /* Fall Through */ + default: + nvm->type = e1000_nvm_eeprom_spi; + size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >> + E1000_EECD_SIZE_EX_SHIFT); + /* Added to a constant, "size" becomes the left-shift value + * for setting word_size. + */ + size += NVM_WORD_SIZE_BASE_SHIFT; + + /* EEPROM access above 16k is unsupported */ + if (size > 14) + size = 14; + nvm->word_size = 1 << size; + break; + } + + /* Function Pointers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + nvm->ops.acquire = e1000_get_hw_semaphore_82574; + nvm->ops.release = e1000_put_hw_semaphore_82574; + break; + default: + break; + } + + return 0; +} + +/** + * e1000_init_mac_params_82571 - Init MAC func ptrs. + * @hw: pointer to the HW structure + **/ +static s32 e1000_init_mac_params_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 swsm = 0; + u32 swsm2 = 0; + bool force_clear_smbi = false; + + /* Set media type and media-dependent function pointers */ + switch (hw->adapter->pdev->device) { + case E1000_DEV_ID_82571EB_FIBER: + case E1000_DEV_ID_82572EI_FIBER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + hw->phy.media_type = e1000_media_type_fiber; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + mac->ops.check_for_link = e1000e_check_for_fiber_link; + mac->ops.get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; + break; + case E1000_DEV_ID_82571EB_SERDES: + case E1000_DEV_ID_82571EB_SERDES_DUAL: + case E1000_DEV_ID_82571EB_SERDES_QUAD: + case E1000_DEV_ID_82572EI_SERDES: + hw->phy.media_type = e1000_media_type_internal_serdes; + mac->ops.setup_physical_interface = + e1000_setup_fiber_serdes_link_82571; + mac->ops.check_for_link = e1000_check_for_serdes_link_82571; + mac->ops.get_link_up_info = + e1000e_get_speed_and_duplex_fiber_serdes; + break; + default: + hw->phy.media_type = e1000_media_type_copper; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_82571; + mac->ops.check_for_link = e1000e_check_for_copper_link; + mac->ops.get_link_up_info = e1000e_get_speed_and_duplex_copper; + break; + } + + /* Set mta register count */ + mac->mta_reg_count = 128; + /* Set rar entry count */ + mac->rar_entry_count = E1000_RAR_ENTRIES; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* MAC-specific function pointers */ + switch (hw->mac.type) { + case e1000_82573: + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; + mac->ops.led_on = e1000e_led_on_generic; + mac->ops.blink_led = e1000e_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + /* ARC supported; valid only if manageability features are + * enabled. + */ + mac->arc_subsystem_valid = !!(er32(FWSM) & + E1000_FWSM_MODE_MASK); + break; + case e1000_82574: + case e1000_82583: + mac->ops.set_lan_id = e1000_set_lan_id_single_port; + mac->ops.check_mng_mode = e1000_check_mng_mode_82574; + mac->ops.led_on = e1000_led_on_82574; + break; + default: + mac->ops.check_mng_mode = e1000e_check_mng_mode_generic; + mac->ops.led_on = e1000e_led_on_generic; + mac->ops.blink_led = e1000e_blink_led_generic; + + /* FWSM register */ + mac->has_fwsm = true; + break; + } + + /* Ensure that the inter-port SWSM.SMBI lock bit is clear before + * first NVM or PHY access. This should be done for single-port + * devices, and for one port only on dual-port devices so that + * for those devices we can still use the SMBI lock to synchronize + * inter-port accesses to the PHY & NVM. + */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + swsm2 = er32(SWSM2); + + if (!(swsm2 & E1000_SWSM2_LOCK)) { + /* Only do this for the first interface on this card */ + ew32(SWSM2, swsm2 | E1000_SWSM2_LOCK); + force_clear_smbi = true; + } else { + force_clear_smbi = false; + } + break; + default: + force_clear_smbi = true; + break; + } + + if (force_clear_smbi) { + /* Make sure SWSM.SMBI is clear */ + swsm = er32(SWSM); + if (swsm & E1000_SWSM_SMBI) { + /* This bit should not be set on a first interface, and + * indicates that the bootagent or EFI code has + * improperly left this bit enabled + */ + e_dbg("Please update your 82571 Bootagent\n"); + } + ew32(SWSM, swsm & ~E1000_SWSM_SMBI); + } + + /* Initialize device specific counter of SMBI acquisition timeouts. */ + hw->dev_spec.e82571.smb_counter = 0; + + return 0; +} + +static s32 e1000_get_variants_82571(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + static int global_quad_port_a; /* global port a indication */ + struct pci_dev *pdev = adapter->pdev; + int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1; + s32 rc; + + rc = e1000_init_mac_params_82571(hw); + if (rc) + return rc; + + rc = e1000_init_nvm_params_82571(hw); + if (rc) + return rc; + + rc = e1000_init_phy_params_82571(hw); + if (rc) + return rc; + + /* tag quad port adapters first, it's used below */ + switch (pdev->device) { + case E1000_DEV_ID_82571EB_QUAD_COPPER: + case E1000_DEV_ID_82571EB_QUAD_FIBER: + case E1000_DEV_ID_82571EB_QUAD_COPPER_LP: + case E1000_DEV_ID_82571PT_QUAD_COPPER: + adapter->flags |= FLAG_IS_QUAD_PORT; + /* mark the first port */ + if (global_quad_port_a == 0) + adapter->flags |= FLAG_IS_QUAD_PORT_A; + /* Reset for multiple quad port adapters */ + global_quad_port_a++; + if (global_quad_port_a == 4) + global_quad_port_a = 0; + break; + default: + break; + } + + switch (adapter->hw.mac.type) { + case e1000_82571: + /* these dual ports don't have WoL on port B at all */ + if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) || + (pdev->device == E1000_DEV_ID_82571EB_SERDES) || + (pdev->device == E1000_DEV_ID_82571EB_COPPER)) && + (is_port_b)) + adapter->flags &= ~FLAG_HAS_WOL; + /* quad ports only support WoL on port A */ + if (adapter->flags & FLAG_IS_QUAD_PORT && + (!(adapter->flags & FLAG_IS_QUAD_PORT_A))) + adapter->flags &= ~FLAG_HAS_WOL; + /* Does not support WoL on any port */ + if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) + adapter->flags &= ~FLAG_HAS_WOL; + break; + case e1000_82573: + if (pdev->device == E1000_DEV_ID_82573L) { + adapter->flags |= FLAG_HAS_JUMBO_FRAMES; + adapter->max_hw_frame_size = DEFAULT_JUMBO; + } + break; + default: + break; + } + + return 0; +} + +/** + * e1000_get_phy_id_82571 - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +static s32 e1000_get_phy_id_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_id = 0; + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* The 82571 firmware may still be configuring the PHY. + * In this case, we cannot access the PHY until the + * configuration is done. So we explicitly set the + * PHY ID. + */ + phy->id = IGP01E1000_I_PHY_ID; + break; + case e1000_82573: + return e1000e_get_phy_id(hw); + case e1000_82574: + case e1000_82583: + ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + usleep_range(20, 40); + ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + break; + default: + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * e1000_get_hw_semaphore_82571 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + s32 sw_timeout = hw->nvm.word_size + 1; + s32 fw_timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* If we have timedout 3 times on trying to acquire + * the inter-port SMBI semaphore, there is old code + * operating on the other port, and it is not + * releasing SMBI. Modify the number of times that + * we try for the semaphore to interwork with this + * older code. + */ + if (hw->dev_spec.e82571.smb_counter > 2) + sw_timeout = 1; + + /* Get the SW semaphore */ + while (i < sw_timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usleep_range(50, 100); + i++; + } + + if (i == sw_timeout) { + e_dbg("Driver can't access device - SMBI bit is set.\n"); + hw->dev_spec.e82571.smb_counter++; + } + /* Get the FW semaphore. */ + for (i = 0; i < fw_timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + usleep_range(50, 100); + } + + if (i == fw_timeout) { + /* Release semaphores */ + e1000_put_hw_semaphore_82571(hw); + e_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_put_hw_semaphore_82571 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} + +/** + * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore during reset. + * + **/ +static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + s32 i = 0; + + extcnf_ctrl = er32(EXTCNF_CTRL); + do { + extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + ew32(EXTCNF_CTRL, extcnf_ctrl); + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP) + break; + + usleep_range(2000, 4000); + i++; + } while (i < MDIO_OWNERSHIP_TIMEOUT); + + if (i == MDIO_OWNERSHIP_TIMEOUT) { + /* Release semaphores */ + e1000_put_hw_semaphore_82573(hw); + e_dbg("Driver can't access the PHY\n"); + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * e1000_put_hw_semaphore_82573 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used during reset. + * + **/ +static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP; + ew32(EXTCNF_CTRL, extcnf_ctrl); +} + +static DEFINE_MUTEX(swflag_mutex); + +/** + * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM. + * + **/ +static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw) +{ + s32 ret_val; + + mutex_lock(&swflag_mutex); + ret_val = e1000_get_hw_semaphore_82573(hw); + if (ret_val) + mutex_unlock(&swflag_mutex); + return ret_val; +} + +/** + * e1000_put_hw_semaphore_82574 - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + * + **/ +static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw) +{ + e1000_put_hw_semaphore_82573(hw); + mutex_unlock(&swflag_mutex); +} + +/** + * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. + * LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u32 data = er32(POEMB); + + if (active) + data |= E1000_PHY_CTRL_D0A_LPLU; + else + data &= ~E1000_PHY_CTRL_D0A_LPLU; + + ew32(POEMB, data); + return 0; +} + +/** + * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * The low power link up (lplu) state is set to the power management level D3 + * when active is true, else clear lplu for D3. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active) +{ + u32 data = er32(POEMB); + + if (!active) { + data &= ~E1000_PHY_CTRL_NOND0A_LPLU; + } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) || + (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= E1000_PHY_CTRL_NOND0A_LPLU; + } + + ew32(POEMB, data); + return 0; +} + +/** + * e1000_acquire_nvm_82571 - Request for access to the EEPROM + * @hw: pointer to the HW structure + * + * To gain access to the EEPROM, first we must obtain a hardware semaphore. + * Then for non-82573 hardware, set the EEPROM access request bit and wait + * for EEPROM access grant bit. If the access grant bit is not set, release + * hardware semaphore. + **/ +static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1000_get_hw_semaphore_82571(hw); + if (ret_val) + return ret_val; + + switch (hw->mac.type) { + case e1000_82573: + break; + default: + ret_val = e1000e_acquire_nvm(hw); + break; + } + + if (ret_val) + e1000_put_hw_semaphore_82571(hw); + + return ret_val; +} + +/** + * e1000_release_nvm_82571 - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +static void e1000_release_nvm_82571(struct e1000_hw *hw) +{ + e1000e_release_nvm(hw); + e1000_put_hw_semaphore_82571(hw); +} + +/** + * e1000_write_nvm_82571 - Write to EEPROM using appropriate interface + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * For non-82573 silicon, write data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + s32 ret_val; + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data); + break; + case e1000_82571: + case e1000_82572: + ret_val = e1000e_write_nvm_spi(hw, offset, words, data); + break; + default: + ret_val = -E1000_ERR_NVM; + break; + } + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_82571 - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw) +{ + u32 eecd; + s32 ret_val; + u16 i; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + return ret_val; + + /* If our nvm is an EEPROM, then we're done + * otherwise, commit the checksum to the flash NVM. + */ + if (hw->nvm.type != e1000_nvm_flash_hw) + return 0; + + /* Check for pending operations. */ + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + usleep_range(1000, 2000); + if (!(er32(EECD) & E1000_EECD_FLUPD)) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + /* Reset the firmware if using STM opcode. */ + if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) { + /* The enabling of and the actual reset must be done + * in two write cycles. + */ + ew32(HICR, E1000_HICR_FW_RESET_ENABLE); + e1e_flush(); + ew32(HICR, E1000_HICR_FW_RESET); + } + + /* Commit the write to flash */ + eecd = er32(EECD) | E1000_EECD_FLUPD; + ew32(EECD, eecd); + + for (i = 0; i < E1000_FLASH_UPDATES; i++) { + usleep_range(1000, 2000); + if (!(er32(EECD) & E1000_EECD_FLUPD)) + break; + } + + if (i == E1000_FLASH_UPDATES) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw) +{ + if (hw->nvm.type == e1000_nvm_flash_hw) + e1000_fix_nvm_checksum_82571(hw); + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * After checking for invalid values, poll the EEPROM to ensure the previous + * command has completed before trying to write the next word. After write + * poll for completion. + * + * If e1000e_update_nvm_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset, + u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eewr = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eewr = ((data[i] << E1000_NVM_RW_REG_DATA) | + ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) | + E1000_NVM_RW_REG_START); + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + + ew32(EEWR, eewr); + + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE); + if (ret_val) + break; + } + + return ret_val; +} + +/** + * e1000_get_cfg_done_82571 - Poll for configuration done + * @hw: pointer to the HW structure + * + * Reads the management control register for the config done bit to be set. + **/ +static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw) +{ + s32 timeout = PHY_CFG_TIMEOUT; + + while (timeout) { + if (er32(EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0) + break; + usleep_range(1000, 2000); + timeout--; + } + if (!timeout) { + e_dbg("MNG configuration cycle has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When activating LPLU + * this function also disables smart speed and vice versa. LPLU will not be + * activated unless the device autonegotiation advertisement meets standards + * of either 10 or 10/100 or 10/100/1000 at all duplexes. This is a function + * pointer entry point only called by PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (active) { + data |= IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + data &= ~IGP02E1000_PM_D0_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_reset_hw_82571 - Reset hardware + * @hw: pointer to the HW structure + * + * This resets the hardware into a known state. + **/ +static s32 e1000_reset_hw_82571(struct e1000_hw *hw) +{ + u32 ctrl, ctrl_ext, eecd, tctl; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + ew32(RCTL, 0); + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + e1e_flush(); + + usleep_range(10000, 20000); + + /* Must acquire the MDIO ownership before MAC reset. + * Ownership defaults to firmware after a reset. + */ + switch (hw->mac.type) { + case e1000_82573: + ret_val = e1000_get_hw_semaphore_82573(hw); + break; + case e1000_82574: + case e1000_82583: + ret_val = e1000_get_hw_semaphore_82574(hw); + break; + default: + break; + } + + ctrl = er32(CTRL); + + e_dbg("Issuing a global reset to MAC\n"); + ew32(CTRL, ctrl | E1000_CTRL_RST); + + /* Must release MDIO ownership and mutex after MAC reset. */ + switch (hw->mac.type) { + case e1000_82573: + /* Release mutex only if the hw semaphore is acquired */ + if (!ret_val) + e1000_put_hw_semaphore_82573(hw); + break; + case e1000_82574: + case e1000_82583: + /* Release mutex only if the hw semaphore is acquired */ + if (!ret_val) + e1000_put_hw_semaphore_82574(hw); + break; + default: + break; + } + + if (hw->nvm.type == e1000_nvm_flash_hw) { + usleep_range(10, 20); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + } + + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) + /* We don't want to continue accessing MAC registers. */ + return ret_val; + + /* Phy configuration from NVM just starts after EECD_AUTO_RD is set. + * Need to wait for Phy configuration completion before accessing + * NVM and Phy. + */ + + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* REQ and GNT bits need to be cleared when using AUTO_RD + * to access the EEPROM. + */ + eecd = er32(EECD); + eecd &= ~(E1000_EECD_REQ | E1000_EECD_GNT); + ew32(EECD, eecd); + break; + case e1000_82573: + case e1000_82574: + case e1000_82583: + msleep(25); + break; + default: + break; + } + + /* Clear any pending interrupt events. */ + ew32(IMC, 0xffffffff); + er32(ICR); + + if (hw->mac.type == e1000_82571) { + /* Install any alternate MAC address into RAR0 */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + + e1000e_set_laa_state_82571(hw, true); + } + + /* Reinitialize the 82571 serdes link state machine */ + if (hw->phy.media_type == e1000_media_type_internal_serdes) + hw->mac.serdes_link_state = e1000_serdes_link_down; + + return 0; +} + +/** + * e1000_init_hw_82571 - Initialize hardware + * @hw: pointer to the HW structure + * + * This inits the hardware readying it for operation. + **/ +static s32 e1000_init_hw_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 reg_data; + s32 ret_val; + u16 i, rar_count = mac->rar_entry_count; + + e1000_initialize_hw_bits_82571(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + e_dbg("Error initializing identification LED\n"); + + /* Disabling VLAN filtering */ + e_dbg("Initializing the IEEE VLAN\n"); + mac->ops.clear_vfta(hw); + + /* Setup the receive address. + * If, however, a locally administered address was assigned to the + * 82571, we must reserve a RAR for it to work around an issue where + * resetting one port will reload the MAC on the other port. + */ + if (e1000e_get_laa_state_82571(hw)) + rar_count--; + e1000e_init_rx_addrs(hw, rar_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the transmit descriptor write-back policy */ + reg_data = er32(TXDCTL(0)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC); + ew32(TXDCTL(0), reg_data); + + /* ...for both queues. */ + switch (mac->type) { + case e1000_82573: + e1000e_enable_tx_pkt_filtering(hw); + /* fall through */ + case e1000_82574: + case e1000_82583: + reg_data = er32(GCR); + reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX; + ew32(GCR, reg_data); + break; + default: + reg_data = er32(TXDCTL(1)); + reg_data = ((reg_data & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB | + E1000_TXDCTL_COUNT_DESC); + ew32(TXDCTL(1), reg_data); + break; + } + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_82571(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits + * @hw: pointer to the HW structure + * + * Initializes required hardware-dependent bits needed for normal operation. + **/ +static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw) +{ + u32 reg; + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + reg &= ~(0xF << 27); /* 30:27 */ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26); + break; + case e1000_82574: + case e1000_82583: + reg |= (1 << 26); + break; + default: + break; + } + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + reg &= ~((1 << 29) | (1 << 30)); + reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + ew32(TARC(1), reg); + break; + default: + break; + } + + /* Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = er32(CTRL); + reg &= ~(1 << 29); + ew32(CTRL, reg); + break; + default: + break; + } + + /* Extended Device Control */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + reg = er32(CTRL_EXT); + reg &= ~(1 << 23); + reg |= (1 << 22); + ew32(CTRL_EXT, reg); + break; + default: + break; + } + + if (hw->mac.type == e1000_82571) { + reg = er32(PBA_ECC); + reg |= E1000_PBA_ECC_CORR_EN; + ew32(PBA_ECC, reg); + } + + /* Workaround for hardware errata. + * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572 + */ + if ((hw->mac.type == e1000_82571) || (hw->mac.type == e1000_82572)) { + reg = er32(CTRL_EXT); + reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN; + ew32(CTRL_EXT, reg); + } + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type <= e1000_82573) { + reg = er32(RFCTL); + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); + } + + /* PCI-Ex Control Registers */ + switch (hw->mac.type) { + case e1000_82574: + case e1000_82583: + reg = er32(GCR); + reg |= (1 << 22); + ew32(GCR, reg); + + /* Workaround for hardware errata. + * apply workaround for hardware errata documented in errata + * docs Fixes issue where some error prone or unreliable PCIe + * completions are occurring, particularly with ASPM enabled. + * Without fix, issue can cause Tx timeouts. + */ + reg = er32(GCR2); + reg |= 1; + ew32(GCR2, reg); + break; + default: + break; + } +} + +/** + * e1000_clear_vfta_82571 - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +static void e1000_clear_vfta_82571(struct e1000_hw *hw) +{ + u32 offset; + u32 vfta_value = 0; + u32 vfta_offset = 0; + u32 vfta_bit_in_reg = 0; + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->mng_cookie.vlan_id != 0) { + /* The VFTA is a 4096b bit-field, each identifying + * a single VLAN ID. The following operations + * determine which 32b entry (i.e. offset) into the + * array we want to set the VLAN ID (i.e. bit) of + * the manageability unit. + */ + vfta_offset = (hw->mng_cookie.vlan_id >> + E1000_VFTA_ENTRY_SHIFT) & + E1000_VFTA_ENTRY_MASK; + vfta_bit_in_reg = + 1 << (hw->mng_cookie.vlan_id & + E1000_VFTA_ENTRY_BIT_SHIFT_MASK); + } + break; + default: + break; + } + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + /* If the offset we want to clear is the same offset of the + * manageability VLAN ID, then clear all bits except that of + * the manageability unit. + */ + vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0; + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value); + e1e_flush(); + } +} + +/** + * e1000_check_mng_mode_82574 - Check manageability is enabled + * @hw: pointer to the HW structure + * + * Reads the NVM Initialization Control Word 2 and returns true + * (>0) if any manageability is enabled, else false (0). + **/ +static bool e1000_check_mng_mode_82574(struct e1000_hw *hw) +{ + u16 data; + + e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0; +} + +/** + * e1000_led_on_82574 - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +static s32 e1000_led_on_82574(struct e1000_hw *hw) +{ + u32 ctrl; + u32 i; + + ctrl = hw->mac.ledctl_mode2; + if (!(E1000_STATUS_LU & er32(STATUS))) { + /* If no link, then turn LED on by setting the invert bit + * for each LED that's "on" (0x0E) in ledctl_mode2. + */ + for (i = 0; i < 4; i++) + if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) == + E1000_LEDCTL_MODE_LED_ON) + ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8)); + } + ew32(LEDCTL, ctrl); + + return 0; +} + +/** + * e1000_check_phy_82574 - check 82574 phy hung state + * @hw: pointer to the HW structure + * + * Returns whether phy is hung or not + **/ +bool e1000_check_phy_82574(struct e1000_hw *hw) +{ + u16 status_1kbt = 0; + u16 receive_errors = 0; + s32 ret_val; + + /* Read PHY Receive Error counter first, if its is max - all F's then + * read the Base1000T status register If both are max then PHY is hung. + */ + ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors); + if (ret_val) + return false; + if (receive_errors == E1000_RECEIVE_ERROR_MAX) { + ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt); + if (ret_val) + return false; + if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) == + E1000_IDLE_ERROR_COUNT_MASK) + return true; + } + + return false; +} + +/** + * e1000_setup_link_82571 - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_82571(struct e1000_hw *hw) +{ + /* 82573 does not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (hw->fc.requested_mode == e1000_fc_default) + hw->fc.requested_mode = e1000_fc_full; + break; + default: + break; + } + + return e1000e_setup_link_generic(hw); +} + +/** + * e1000_setup_copper_link_82571 - Configure copper link settings + * @hw: pointer to the HW structure + * + * Configures the link for auto-neg or forced speed and duplex. Then we check + * for link, once link is established calls to configure collision distance + * and flow control are called. + **/ +static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + switch (hw->phy.type) { + case e1000_phy_m88: + case e1000_phy_bm: + ret_val = e1000e_copper_link_setup_m88(hw); + break; + case e1000_phy_igp_2: + ret_val = e1000e_copper_link_setup_igp(hw); + break; + default: + return -E1000_ERR_PHY; + } + + if (ret_val) + return ret_val; + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes links. + * Upon successful setup, poll for link. + **/ +static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw) +{ + switch (hw->mac.type) { + case e1000_82571: + case e1000_82572: + /* If SerDes loopback mode is entered, there is no form + * of reset to take the adapter out of that mode. So we + * have to explicitly take the adapter out of loopback + * mode. This prevents drivers from twiddling their thumbs + * if another tool failed to take it out of loopback mode. + */ + ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + break; + default: + break; + } + + return e1000e_setup_fiber_serdes_link(hw); +} + +/** + * e1000_check_for_serdes_link_82571 - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Reports the link state as up or down. + * + * If autonegotiation is supported by the link partner, the link state is + * determined by the result of autonegotiation. This is the most likely case. + * If autonegotiation is not supported by the link partner, and the link + * has a valid signal, force the link up. + * + * The link state is represented internally here by 4 states: + * + * 1) down + * 2) autoneg_progress + * 3) autoneg_complete (the link successfully autonegotiated) + * 4) forced_up (the link has been forced up, it did not autonegotiate) + * + **/ +static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + u32 txcw; + u32 i; + s32 ret_val = 0; + + ctrl = er32(CTRL); + status = er32(STATUS); + er32(RXCW); + /* SYNCH bit and IV bit are sticky */ + usleep_range(10, 20); + rxcw = er32(RXCW); + + if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) { + /* Receiver is synchronized with no invalid bits. */ + switch (mac->serdes_link_state) { + case e1000_serdes_link_autoneg_complete: + if (!(status & E1000_STATUS_LU)) { + /* We have lost link, retry autoneg before + * reporting link failure + */ + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("AN_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_forced_up: + /* If we are receiving /C/ ordered sets, re-enable + * auto-negotiation in the TXCW register and disable + * forced link in the Device Control register in an + * attempt to auto-negotiate with our link partner. + */ + if (rxcw & E1000_RXCW_C) { + /* Enable autoneg, and unforce link up */ + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("FORCED_UP -> AN_PROG\n"); + } else { + mac->serdes_has_link = true; + } + break; + + case e1000_serdes_link_autoneg_progress: + if (rxcw & E1000_RXCW_C) { + /* We received /C/ ordered sets, meaning the + * link partner has autonegotiated, and we can + * trust the Link Up (LU) status bit. + */ + if (status & E1000_STATUS_LU) { + mac->serdes_link_state = + e1000_serdes_link_autoneg_complete; + e_dbg("AN_PROG -> AN_UP\n"); + mac->serdes_has_link = true; + } else { + /* Autoneg completed, but failed. */ + mac->serdes_link_state = + e1000_serdes_link_down; + e_dbg("AN_PROG -> DOWN\n"); + } + } else { + /* The link partner did not autoneg. + * Force link up and full duplex, and change + * state to forced. + */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error config flow control\n"); + break; + } + mac->serdes_link_state = + e1000_serdes_link_forced_up; + mac->serdes_has_link = true; + e_dbg("AN_PROG -> FORCED_UP\n"); + } + break; + + case e1000_serdes_link_down: + default: + /* The link was down but the receiver has now gained + * valid sync, so lets see if we can bring the link + * up. + */ + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("DOWN -> AN_PROG\n"); + break; + } + } else { + if (!(rxcw & E1000_RXCW_SYNCH)) { + mac->serdes_has_link = false; + mac->serdes_link_state = e1000_serdes_link_down; + e_dbg("ANYSTATE -> DOWN\n"); + } else { + /* Check several times, if SYNCH bit and CONFIG + * bit both are consistently 1 then simply ignore + * the IV bit and restart Autoneg + */ + for (i = 0; i < AN_RETRY_COUNT; i++) { + usleep_range(10, 20); + rxcw = er32(RXCW); + if ((rxcw & E1000_RXCW_SYNCH) && + (rxcw & E1000_RXCW_C)) + continue; + + if (rxcw & E1000_RXCW_IV) { + mac->serdes_has_link = false; + mac->serdes_link_state = + e1000_serdes_link_down; + e_dbg("ANYSTATE -> DOWN\n"); + break; + } + } + + if (i == AN_RETRY_COUNT) { + txcw = er32(TXCW); + txcw |= E1000_TXCW_ANE; + ew32(TXCW, txcw); + mac->serdes_link_state = + e1000_serdes_link_autoneg_progress; + mac->serdes_has_link = false; + e_dbg("ANYSTATE -> AN_PROG\n"); + } + } + } + + return ret_val; +} + +/** + * e1000_valid_led_default_82571 - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + switch (hw->mac.type) { + case e1000_82573: + case e1000_82574: + case e1000_82583: + if (*data == ID_LED_RESERVED_F746) + *data = ID_LED_DEFAULT_82573; + break; + default: + if (*data == ID_LED_RESERVED_0000 || + *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + break; + } + + return 0; +} + +/** + * e1000e_get_laa_state_82571 - Get locally administered address state + * @hw: pointer to the HW structure + * + * Retrieve and return the current locally administered address state. + **/ +bool e1000e_get_laa_state_82571(struct e1000_hw *hw) +{ + if (hw->mac.type != e1000_82571) + return false; + + return hw->dev_spec.e82571.laa_is_present; +} + +/** + * e1000e_set_laa_state_82571 - Set locally administered address state + * @hw: pointer to the HW structure + * @state: enable/disable locally administered address + * + * Enable/Disable the current locally administered address state. + **/ +void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state) +{ + if (hw->mac.type != e1000_82571) + return; + + hw->dev_spec.e82571.laa_is_present = state; + + /* If workaround is activated... */ + if (state) + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed, the actual LAA is in one of the RARs and no + * incoming packets directed to this port are dropped. + * Eventually the LAA will be in RAR[0] and RAR[14]. + */ + hw->mac.ops.rar_set(hw, hw->mac.addr, + hw->mac.rar_entry_count - 1); +} + +/** + * e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum + * @hw: pointer to the HW structure + * + * Verifies that the EEPROM has completed the update. After updating the + * EEPROM, we need to check bit 15 in work 0x23 for the checksum fix. If + * the checksum fix is not implemented, we need to set the bit and update + * the checksum. Otherwise, if bit 15 is set and the checksum is incorrect, + * we need to return bad checksum. + **/ +static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val; + u16 data; + + if (nvm->type != e1000_nvm_flash_hw) + return 0; + + /* Check bit 4 of word 10h. If it is 0, firmware is done updating + * 10h-12h. Checksum may need to be fixed. + */ + ret_val = e1000_read_nvm(hw, 0x10, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x10)) { + /* Read 0x23 and check bit 15. This bit is a 1 + * when the checksum has already been fixed. If + * the checksum is still wrong and this bit is a + * 1, we need to return bad checksum. Otherwise, + * we need to set this bit to a 1 and update the + * checksum. + */ + ret_val = e1000_read_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & 0x8000)) { + data |= 0x8000; + ret_val = e1000_write_nvm(hw, 0x23, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_read_mac_addr_82571 - Read device MAC address + * @hw: pointer to the HW structure + **/ +static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) +{ + if (hw->mac.type == e1000_82571) { + s32 ret_val; + + /* If there's an alternate MAC address place it in RAR0 + * so that it will override the Si installed default perm + * address. + */ + ret_val = e1000_check_alt_mac_addr_generic(hw); + if (ret_val) + return ret_val; + } + + return e1000_read_mac_addr_generic(hw); +} + +/** + * e1000_power_down_phy_copper_82571 - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + struct e1000_mac_info *mac = &hw->mac; + + if (!phy->ops.check_reset_block) + return; + + /* If the management interface is not enabled, then power down */ + if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters + * @hw: pointer to the HW structure + * + * Clears the hardware counters by reading the counter registers. + **/ +static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw) +{ + e1000e_clear_hw_cntrs_base(hw); + + er32(PRC64); + er32(PRC127); + er32(PRC255); + er32(PRC511); + er32(PRC1023); + er32(PRC1522); + er32(PTC64); + er32(PTC127); + er32(PTC255); + er32(PTC511); + er32(PTC1023); + er32(PTC1522); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + er32(ICRXPTC); + er32(ICRXATC); + er32(ICTXPTC); + er32(ICTXATC); + er32(ICTXQEC); + er32(ICTXQMTC); + er32(ICRXDMTC); +} + +static const struct e1000_mac_operations e82571_mac_ops = { + /* .check_mng_mode: mac type dependent */ + /* .check_for_link: media type dependent */ + .id_led_init = e1000e_id_led_init_generic, + .cleanup_led = e1000e_cleanup_led_generic, + .clear_hw_cntrs = e1000_clear_hw_cntrs_82571, + .get_bus_info = e1000e_get_bus_info_pcie, + .set_lan_id = e1000_set_lan_id_multi_port_pcie, + /* .get_link_up_info: media type dependent */ + /* .led_on: mac type dependent */ + .led_off = e1000e_led_off_generic, + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .write_vfta = e1000_write_vfta_generic, + .clear_vfta = e1000_clear_vfta_82571, + .reset_hw = e1000_reset_hw_82571, + .init_hw = e1000_init_hw_82571, + .setup_link = e1000_setup_link_82571, + /* .setup_physical_interface: media type dependent */ + .setup_led = e1000e_setup_led_generic, + .config_collision_dist = e1000e_config_collision_dist_generic, + .read_mac_addr = e1000_read_mac_addr_82571, + .rar_set = e1000e_rar_set_generic, + .rar_get_count = e1000e_rar_get_count_generic, +}; + +static const struct e1000_phy_operations e82_phy_ops_igp = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_igp, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = NULL, + .force_speed_duplex = e1000e_phy_force_speed_duplex_igp, + .get_cfg_done = e1000_get_cfg_done_82571, + .get_cable_length = e1000e_get_cable_length_igp_2, + .get_info = e1000e_get_phy_info_igp, + .read_reg = e1000e_read_phy_reg_igp, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_igp, + .cfg_on_link_up = NULL, +}; + +static const struct e1000_phy_operations e82_phy_ops_m88 = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done_generic, + .get_cable_length = e1000e_get_cable_length_m88, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000e_read_phy_reg_m88, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_m88, + .cfg_on_link_up = NULL, +}; + +static const struct e1000_phy_operations e82_phy_ops_bm = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, + .commit = e1000e_phy_sw_reset, + .force_speed_duplex = e1000e_phy_force_speed_duplex_m88, + .get_cfg_done = e1000e_get_cfg_done_generic, + .get_cable_length = e1000e_get_cable_length_m88, + .get_info = e1000e_get_phy_info_m88, + .read_reg = e1000e_read_phy_reg_bm2, + .release = e1000_put_hw_semaphore_82571, + .reset = e1000e_phy_hw_reset_generic, + .set_d0_lplu_state = e1000_set_d0_lplu_state_82571, + .set_d3_lplu_state = e1000e_set_d3_lplu_state, + .write_reg = e1000e_write_phy_reg_bm2, + .cfg_on_link_up = NULL, +}; + +static const struct e1000_nvm_operations e82571_nvm_ops = { + .acquire = e1000_acquire_nvm_82571, + .read = e1000e_read_nvm_eerd, + .release = e1000_release_nvm_82571, + .reload = e1000e_reload_nvm_generic, + .update = e1000_update_nvm_checksum_82571, + .valid_led_default = e1000_valid_led_default_82571, + .validate = e1000_validate_nvm_checksum_82571, + .write = e1000_write_nvm_82571, +}; + +const struct e1000_info e1000_82571_info = { + .mac = e1000_82571, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_RESET_OVERWRITES_LAA /* errata */ + | FLAG_TARC_SPEED_MODE_BIT /* errata */ + | FLAG_APME_CHECK_PORT_B, + .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ + | FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82572_info = { + .mac = e1000_82572, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_TARC_SPEED_MODE_BIT, /* errata */ + .flags2 = FLAG2_DISABLE_ASPM_L1 /* errata 13 */ + | FLAG2_DMA_BURST, + .pba = 38, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_igp, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82573_info = { + .mac = e1000_82573, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_WOL + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_SWSM_ON_LOAD, + .flags2 = FLAG2_DISABLE_ASPM_L1 + | FLAG2_DISABLE_ASPM_L0S, + .pba = 20, + .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_m88, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82574_info = { + .mac = e1000_82574, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_MSIX + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_CTRLEXT_ON_LOAD, + .flags2 = FLAG2_CHECK_PHY_HANG + | FLAG2_DISABLE_ASPM_L0S + | FLAG2_DISABLE_ASPM_L1 + | FLAG2_NO_DISABLE_RX + | FLAG2_DMA_BURST, + .pba = 32, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_bm, + .nvm_ops = &e82571_nvm_ops, +}; + +const struct e1000_info e1000_82583_info = { + .mac = e1000_82583, + .flags = FLAG_HAS_HW_VLAN_FILTER + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_APME_IN_CTRL3 + | FLAG_HAS_SMART_POWER_DOWN + | FLAG_HAS_AMT + | FLAG_HAS_JUMBO_FRAMES + | FLAG_HAS_CTRLEXT_ON_LOAD, + .flags2 = FLAG2_DISABLE_ASPM_L0S + | FLAG2_DISABLE_ASPM_L1 + | FLAG2_NO_DISABLE_RX, + .pba = 32, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_82571, + .mac_ops = &e82571_mac_ops, + .phy_ops = &e82_phy_ops_bm, + .nvm_ops = &e82571_nvm_ops, +}; diff --git a/addons/e1000e/src/4.4.180/82571.h b/addons/e1000e/src/4.4.180/82571.h new file mode 100644 index 00000000..abc6a9ab --- /dev/null +++ b/addons/e1000e/src/4.4.180/82571.h @@ -0,0 +1,53 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_82571_H_ +#define _E1000E_82571_H_ + +#define ID_LED_RESERVED_F746 0xF746 +#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_ON2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 +#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */ + +/* Intr Throttling - RW */ +#define E1000_EITR_82574(_n) (0x000E8 + (0x4 * (_n))) + +#define E1000_EIAC_82574 0x000DC /* Ext. Interrupt Auto Clear - RW */ +#define E1000_EIAC_MASK_82574 0x01F00000 + +#define E1000_IVAR_INT_ALLOC_VALID 0x8 + +/* Manageability Operation Mode mask */ +#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 + +#define E1000_BASE1000T_STATUS 10 +#define E1000_IDLE_ERROR_COUNT_MASK 0xFF +#define E1000_RECEIVE_ERROR_COUNTER 21 +#define E1000_RECEIVE_ERROR_MAX 0xFFFF +bool e1000_check_phy_82574(struct e1000_hw *hw); +bool e1000e_get_laa_state_82571(struct e1000_hw *hw); +void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state); + +#endif diff --git a/addons/e1000e/src/4.4.180/Makefile b/addons/e1000e/src/4.4.180/Makefile new file mode 100644 index 00000000..9393a936 --- /dev/null +++ b/addons/e1000e/src/4.4.180/Makefile @@ -0,0 +1,5 @@ +obj-m += e1000e.o +e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \ + mac.o manage.o nvm.o phy.o \ + param.o ethtool.o netdev.o ptp.o + diff --git a/addons/e1000e/src/4.4.180/defines.h b/addons/e1000e/src/4.4.180/defines.h new file mode 100644 index 00000000..133d4074 --- /dev/null +++ b/addons/e1000e/src/4.4.180/defines.h @@ -0,0 +1,800 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_DEFINES_H_ +#define _E1000_DEFINES_H_ + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define REQ_TX_DESCRIPTOR_MULTIPLE 8 +#define REQ_RX_DESCRIPTOR_MULTIPLE 8 + +/* Definitions for power management and wakeup registers */ +/* Wake Up Control */ +#define E1000_WUC_APME 0x00000001 /* APM Enable */ +#define E1000_WUC_PME_EN 0x00000002 /* PME Enable */ +#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */ +#define E1000_WUC_APMPME 0x00000008 /* Assert PME on APM Wakeup */ +#define E1000_WUC_PHY_WAKE 0x00000100 /* if PHY supports wakeup */ + +/* Wake Up Filter Control */ +#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define E1000_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define E1000_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define E1000_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define E1000_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define E1000_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ + +/* Wake Up Status */ +#define E1000_WUS_LNKC E1000_WUFC_LNKC +#define E1000_WUS_MAG E1000_WUFC_MAG +#define E1000_WUS_EX E1000_WUFC_EX +#define E1000_WUS_MC E1000_WUFC_MC +#define E1000_WUS_BC E1000_WUFC_BC + +/* Extended Device Control */ +#define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */ +#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */ +#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */ +#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ +#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ +#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */ +#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 +#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 +#define E1000_CTRL_EXT_EIAME 0x01000000 +#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ +#define E1000_CTRL_EXT_IAME 0x08000000 /* Int ACK Auto-mask */ +#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ +#define E1000_CTRL_EXT_LSECCK 0x00001000 +#define E1000_CTRL_EXT_PHYPDEN 0x00100000 + +/* Receive Descriptor bit definitions */ +#define E1000_RXD_STAT_DD 0x01 /* Descriptor Done */ +#define E1000_RXD_STAT_EOP 0x02 /* End of Packet */ +#define E1000_RXD_STAT_IXSM 0x04 /* Ignore checksum */ +#define E1000_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +#define E1000_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +#define E1000_RXD_STAT_TCPCS 0x20 /* TCP xsum calculated */ +#define E1000_RXD_ERR_CE 0x01 /* CRC Error */ +#define E1000_RXD_ERR_SE 0x02 /* Symbol Error */ +#define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ +#define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ +#define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ +#define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ +#define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ + +#define E1000_RXDEXT_STATERR_TST 0x00000100 /* Time Stamp taken */ +#define E1000_RXDEXT_STATERR_CE 0x01000000 +#define E1000_RXDEXT_STATERR_SE 0x02000000 +#define E1000_RXDEXT_STATERR_SEQ 0x04000000 +#define E1000_RXDEXT_STATERR_CXE 0x10000000 +#define E1000_RXDEXT_STATERR_RXE 0x80000000 + +/* mask to determine if packets should be dropped due to frame errors */ +#define E1000_RXD_ERR_FRAME_ERR_MASK ( \ + E1000_RXD_ERR_CE | \ + E1000_RXD_ERR_SE | \ + E1000_RXD_ERR_SEQ | \ + E1000_RXD_ERR_CXE | \ + E1000_RXD_ERR_RXE) + +/* Same mask, but for extended and packet split descriptors */ +#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \ + E1000_RXDEXT_STATERR_CE | \ + E1000_RXDEXT_STATERR_SE | \ + E1000_RXDEXT_STATERR_SEQ | \ + E1000_RXDEXT_STATERR_CXE | \ + E1000_RXDEXT_STATERR_RXE) + +#define E1000_MRQC_RSS_FIELD_MASK 0xFFFF0000 +#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX 0x00040000 +#define E1000_MRQC_RSS_FIELD_IPV6 0x00100000 +#define E1000_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 + +#define E1000_RXDPS_HDRSTAT_HDRSP 0x00008000 + +/* Management Control */ +#define E1000_MANC_SMBUS_EN 0x00000001 /* SMBus Enabled - RO */ +#define E1000_MANC_ASF_EN 0x00000002 /* ASF Enabled - RO */ +#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */ +#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */ +#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */ +/* Enable MAC address filtering */ +#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 +/* Enable MNG packets to host memory */ +#define E1000_MANC_EN_MNG2HOST 0x00200000 + +#define E1000_MANC2H_PORT_623 0x00000020 /* Port 0x26f */ +#define E1000_MANC2H_PORT_664 0x00000040 /* Port 0x298 */ +#define E1000_MDEF_PORT_623 0x00000800 /* Port 0x26f */ +#define E1000_MDEF_PORT_664 0x00000400 /* Port 0x298 */ + +/* Receive Control */ +#define E1000_RCTL_EN 0x00000002 /* enable */ +#define E1000_RCTL_SBP 0x00000004 /* store bad packet */ +#define E1000_RCTL_UPE 0x00000008 /* unicast promiscuous enable */ +#define E1000_RCTL_MPE 0x00000010 /* multicast promiscuous enab */ +#define E1000_RCTL_LPE 0x00000020 /* long packet enable */ +#define E1000_RCTL_LBM_NO 0x00000000 /* no loopback mode */ +#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */ +#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */ +#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */ +#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */ +#define E1000_RCTL_RDMTS_HEX 0x00010000 +#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */ +#define E1000_RCTL_MO_3 0x00003000 /* multicast offset 15:4 */ +#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */ +#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */ +#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */ +#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */ +#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */ +/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */ +#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */ +#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */ +#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */ +#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ +#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ +#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */ +#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */ +#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */ +#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */ +#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ + +/* Use byte values for the following shift parameters + * Usage: + * psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) & + * E1000_PSRCTL_BSIZE0_MASK) | + * ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) & + * E1000_PSRCTL_BSIZE1_MASK) | + * ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) & + * E1000_PSRCTL_BSIZE2_MASK) | + * ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |; + * E1000_PSRCTL_BSIZE3_MASK)) + * where value0 = [128..16256], default=256 + * value1 = [1024..64512], default=4096 + * value2 = [0..64512], default=4096 + * value3 = [0..64512], default=0 + */ + +#define E1000_PSRCTL_BSIZE0_MASK 0x0000007F +#define E1000_PSRCTL_BSIZE1_MASK 0x00003F00 +#define E1000_PSRCTL_BSIZE2_MASK 0x003F0000 +#define E1000_PSRCTL_BSIZE3_MASK 0x3F000000 + +#define E1000_PSRCTL_BSIZE0_SHIFT 7 /* Shift _right_ 7 */ +#define E1000_PSRCTL_BSIZE1_SHIFT 2 /* Shift _right_ 2 */ +#define E1000_PSRCTL_BSIZE2_SHIFT 6 /* Shift _left_ 6 */ +#define E1000_PSRCTL_BSIZE3_SHIFT 14 /* Shift _left_ 14 */ + +/* SWFW_SYNC Definitions */ +#define E1000_SWFW_EEP_SM 0x1 +#define E1000_SWFW_PHY0_SM 0x2 +#define E1000_SWFW_PHY1_SM 0x4 +#define E1000_SWFW_CSR_SM 0x8 + +/* Device Control */ +#define E1000_CTRL_FD 0x00000001 /* Full duplex.0=half; 1=full */ +#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */ +#define E1000_CTRL_LRST 0x00000008 /* Link reset. 0=normal,1=reset */ +#define E1000_CTRL_ASDE 0x00000020 /* Auto-speed detect enable */ +#define E1000_CTRL_SLU 0x00000040 /* Set link up (Force Link) */ +#define E1000_CTRL_ILOS 0x00000080 /* Invert Loss-Of Signal */ +#define E1000_CTRL_SPD_SEL 0x00000300 /* Speed Select Mask */ +#define E1000_CTRL_SPD_10 0x00000000 /* Force 10Mb */ +#define E1000_CTRL_SPD_100 0x00000100 /* Force 100Mb */ +#define E1000_CTRL_SPD_1000 0x00000200 /* Force 1Gb */ +#define E1000_CTRL_FRCSPD 0x00000800 /* Force Speed */ +#define E1000_CTRL_FRCDPX 0x00001000 /* Force Duplex */ +#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */ +#define E1000_CTRL_LANPHYPC_VALUE 0x00020000 /* SW value of LANPHYPC */ +#define E1000_CTRL_MEHE 0x00080000 /* Memory Error Handling Enable */ +#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ +#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ +#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ +#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */ +#define E1000_CTRL_SWDPIO0 0x00400000 /* SWDPIN 0 Input or output */ +#define E1000_CTRL_RST 0x04000000 /* Global reset */ +#define E1000_CTRL_RFCE 0x08000000 /* Receive Flow Control enable */ +#define E1000_CTRL_TFCE 0x10000000 /* Transmit flow control enable */ +#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */ +#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */ + +#define E1000_PCS_LCTL_FORCE_FCTRL 0x80 + +#define E1000_PCS_LSTS_AN_COMPLETE 0x10000 + +/* Device Status */ +#define E1000_STATUS_FD 0x00000001 /* Full duplex.0=half,1=full */ +#define E1000_STATUS_LU 0x00000002 /* Link up.0=no,1=link */ +#define E1000_STATUS_FUNC_MASK 0x0000000C /* PCI Function Mask */ +#define E1000_STATUS_FUNC_SHIFT 2 +#define E1000_STATUS_FUNC_1 0x00000004 /* Function 1 */ +#define E1000_STATUS_TXOFF 0x00000010 /* transmission paused */ +#define E1000_STATUS_SPEED_MASK 0x000000C0 +#define E1000_STATUS_SPEED_10 0x00000000 /* Speed 10Mb/s */ +#define E1000_STATUS_SPEED_100 0x00000040 /* Speed 100Mb/s */ +#define E1000_STATUS_SPEED_1000 0x00000080 /* Speed 1000Mb/s */ +#define E1000_STATUS_LAN_INIT_DONE 0x00000200 /* Lan Init Completion by NVM */ +#define E1000_STATUS_PHYRA 0x00000400 /* PHY Reset Asserted */ +#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Master Req status */ + +#define HALF_DUPLEX 1 +#define FULL_DUPLEX 2 + +#define ADVERTISE_10_HALF 0x0001 +#define ADVERTISE_10_FULL 0x0002 +#define ADVERTISE_100_HALF 0x0004 +#define ADVERTISE_100_FULL 0x0008 +#define ADVERTISE_1000_HALF 0x0010 /* Not used, just FYI */ +#define ADVERTISE_1000_FULL 0x0020 + +/* 1000/H is not supported, nor spec-compliant. */ +#define E1000_ALL_SPEED_DUPLEX ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL | ADVERTISE_1000_FULL) +#define E1000_ALL_NOT_GIG ( \ + ADVERTISE_10_HALF | ADVERTISE_10_FULL | ADVERTISE_100_HALF | \ + ADVERTISE_100_FULL) +#define E1000_ALL_100_SPEED (ADVERTISE_100_HALF | ADVERTISE_100_FULL) +#define E1000_ALL_10_SPEED (ADVERTISE_10_HALF | ADVERTISE_10_FULL) +#define E1000_ALL_HALF_DUPLEX (ADVERTISE_10_HALF | ADVERTISE_100_HALF) + +#define AUTONEG_ADVERTISE_SPEED_DEFAULT E1000_ALL_SPEED_DUPLEX + +/* LED Control */ +#define E1000_PHY_LED0_MODE_MASK 0x00000007 +#define E1000_PHY_LED0_IVRT 0x00000008 +#define E1000_PHY_LED0_MASK 0x0000001F + +#define E1000_LEDCTL_LED0_MODE_MASK 0x0000000F +#define E1000_LEDCTL_LED0_MODE_SHIFT 0 +#define E1000_LEDCTL_LED0_IVRT 0x00000040 +#define E1000_LEDCTL_LED0_BLINK 0x00000080 + +#define E1000_LEDCTL_MODE_LINK_UP 0x2 +#define E1000_LEDCTL_MODE_LED_ON 0xE +#define E1000_LEDCTL_MODE_LED_OFF 0xF + +/* Transmit Descriptor bit definitions */ +#define E1000_TXD_DTYP_D 0x00100000 /* Data Descriptor */ +#define E1000_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +#define E1000_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +#define E1000_TXD_CMD_EOP 0x01000000 /* End of Packet */ +#define E1000_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +#define E1000_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +#define E1000_TXD_CMD_RS 0x08000000 /* Report Status */ +#define E1000_TXD_CMD_RPS 0x10000000 /* Report Packet Sent */ +#define E1000_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +#define E1000_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +#define E1000_TXD_CMD_IDE 0x80000000 /* Enable Tidv register */ +#define E1000_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +#define E1000_TXD_STAT_EC 0x00000002 /* Excess Collisions */ +#define E1000_TXD_STAT_LC 0x00000004 /* Late Collisions */ +#define E1000_TXD_STAT_TU 0x00000008 /* Transmit underrun */ +#define E1000_TXD_CMD_TCP 0x01000000 /* TCP packet */ +#define E1000_TXD_CMD_IP 0x02000000 /* IP packet */ +#define E1000_TXD_CMD_TSE 0x04000000 /* TCP Seg enable */ +#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */ +#define E1000_TXD_EXTCMD_TSTAMP 0x00000010 /* IEEE1588 Timestamp packet */ + +/* Transmit Control */ +#define E1000_TCTL_EN 0x00000002 /* enable Tx */ +#define E1000_TCTL_PSP 0x00000008 /* pad short packets */ +#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */ +#define E1000_TCTL_COLD 0x003ff000 /* collision distance */ +#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */ +#define E1000_TCTL_MULR 0x10000000 /* Multiple request support */ + +/* SerDes Control */ +#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400 +#define E1000_SCTL_ENABLE_SERDES_LOOPBACK 0x0410 + +/* Receive Checksum Control */ +#define E1000_RXCSUM_TUOFL 0x00000200 /* TCP / UDP checksum offload */ +#define E1000_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +#define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + +/* Header split receive */ +#define E1000_RFCTL_NFSW_DIS 0x00000040 +#define E1000_RFCTL_NFSR_DIS 0x00000080 +#define E1000_RFCTL_ACK_DIS 0x00001000 +#define E1000_RFCTL_EXTEN 0x00008000 +#define E1000_RFCTL_IPV6_EX_DIS 0x00010000 +#define E1000_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + +/* Collision related configuration parameters */ +#define E1000_COLLISION_THRESHOLD 15 +#define E1000_CT_SHIFT 4 +#define E1000_COLLISION_DISTANCE 63 +#define E1000_COLD_SHIFT 12 + +/* Default values for the transmit IPG register */ +#define DEFAULT_82543_TIPG_IPGT_COPPER 8 + +#define E1000_TIPG_IPGT_MASK 0x000003FF + +#define DEFAULT_82543_TIPG_IPGR1 8 +#define E1000_TIPG_IPGR1_SHIFT 10 + +#define DEFAULT_82543_TIPG_IPGR2 6 +#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7 +#define E1000_TIPG_IPGR2_SHIFT 20 + +#define MAX_JUMBO_FRAME_SIZE 0x3F00 +#define E1000_TX_PTR_GAP 0x1F + +/* Extended Configuration Control and Size */ +#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020 +#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE 0x00000001 +#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE 0x00000008 +#define E1000_EXTCNF_CTRL_SWFLAG 0x00000020 +#define E1000_EXTCNF_CTRL_GATE_PHY_CFG 0x00000080 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK 0x00FF0000 +#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT 16 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK 0x0FFF0000 +#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT 16 + +#define E1000_PHY_CTRL_D0A_LPLU 0x00000002 +#define E1000_PHY_CTRL_NOND0A_LPLU 0x00000004 +#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008 +#define E1000_PHY_CTRL_GBE_DISABLE 0x00000040 + +#define E1000_KABGTXD_BGSQLBIAS 0x00050000 + +/* Low Power IDLE Control */ +#define E1000_LPIC_LPIET_SHIFT 24 /* Low Power Idle Entry Time */ + +/* PBA constants */ +#define E1000_PBA_8K 0x0008 /* 8KB */ +#define E1000_PBA_16K 0x0010 /* 16KB */ + +#define E1000_PBA_RXA_MASK 0xFFFF + +#define E1000_PBS_16K E1000_PBA_16K + +/* Uncorrectable/correctable ECC Error counts and enable bits */ +#define E1000_PBECCSTS_CORR_ERR_CNT_MASK 0x000000FF +#define E1000_PBECCSTS_UNCORR_ERR_CNT_MASK 0x0000FF00 +#define E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT 8 +#define E1000_PBECCSTS_ECC_ENABLE 0x00010000 + +#define IFS_MAX 80 +#define IFS_MIN 40 +#define IFS_RATIO 4 +#define IFS_STEP 10 +#define MIN_NUM_XMITS 1000 + +/* SW Semaphore Register */ +#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +#define E1000_SWSM_DRV_LOAD 0x00000008 /* Driver Loaded Bit */ + +#define E1000_SWSM2_LOCK 0x00000002 /* Secondary driver semaphore bit */ + +/* Interrupt Cause Read */ +#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */ +#define E1000_ICR_LSC 0x00000004 /* Link Status Change */ +#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */ +#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */ +#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */ +#define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */ +/* If this bit asserted, the driver should claim the interrupt */ +#define E1000_ICR_INT_ASSERTED 0x80000000 +#define E1000_ICR_RXQ0 0x00100000 /* Rx Queue 0 Interrupt */ +#define E1000_ICR_RXQ1 0x00200000 /* Rx Queue 1 Interrupt */ +#define E1000_ICR_TXQ0 0x00400000 /* Tx Queue 0 Interrupt */ +#define E1000_ICR_TXQ1 0x00800000 /* Tx Queue 1 Interrupt */ +#define E1000_ICR_OTHER 0x01000000 /* Other Interrupts */ + +/* PBA ECC Register */ +#define E1000_PBA_ECC_COUNTER_MASK 0xFFF00000 /* ECC counter mask */ +#define E1000_PBA_ECC_COUNTER_SHIFT 20 /* ECC counter shift value */ +#define E1000_PBA_ECC_CORR_EN 0x00000001 /* ECC correction enable */ +#define E1000_PBA_ECC_STAT_CLR 0x00000002 /* Clear ECC error counter */ +#define E1000_PBA_ECC_INT_EN 0x00000004 /* Enable ICR bit 5 for ECC */ + +/* This defines the bits that are set in the Interrupt Mask + * Set/Read Register. Each bit is documented below: + * o RXT0 = Receiver Timer Interrupt (ring 0) + * o TXDW = Transmit Descriptor Written Back + * o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0) + * o RXSEQ = Receive Sequence Error + * o LSC = Link Status Change + */ +#define IMS_ENABLE_MASK ( \ + E1000_IMS_RXT0 | \ + E1000_IMS_TXDW | \ + E1000_IMS_RXDMT0 | \ + E1000_IMS_RXSEQ | \ + E1000_IMS_LSC) + +/* Interrupt Mask Set */ +#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ +#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ +#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */ +#define E1000_IMS_ECCER E1000_ICR_ECCER /* Uncorrectable ECC Error */ +#define E1000_IMS_RXQ0 E1000_ICR_RXQ0 /* Rx Queue 0 Interrupt */ +#define E1000_IMS_RXQ1 E1000_ICR_RXQ1 /* Rx Queue 1 Interrupt */ +#define E1000_IMS_TXQ0 E1000_ICR_TXQ0 /* Tx Queue 0 Interrupt */ +#define E1000_IMS_TXQ1 E1000_ICR_TXQ1 /* Tx Queue 1 Interrupt */ +#define E1000_IMS_OTHER E1000_ICR_OTHER /* Other Interrupts */ + +/* Interrupt Cause Set */ +#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */ +#define E1000_ICS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */ +#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */ + +/* Transmit Descriptor Control */ +#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */ +#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */ +#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */ +#define E1000_TXDCTL_GRAN 0x01000000 /* TXDCTL Granularity */ +#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */ +#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */ +/* Enable the counting of desc. still to be processed. */ +#define E1000_TXDCTL_COUNT_DESC 0x00400000 + +/* Flow Control Constants */ +#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001 +#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100 +#define FLOW_CONTROL_TYPE 0x8808 + +/* 802.1q VLAN Packet Size */ +#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */ + +/* Receive Address + * Number of high/low register pairs in the RAR. The RAR (Receive Address + * Registers) holds the directed and multicast addresses that we monitor. + * Technically, we have 16 spots. However, we reserve one of these spots + * (RAR[15]) for our directed address used by controllers with + * manageability enabled, allowing us room for 15 multicast addresses. + */ +#define E1000_RAR_ENTRIES 15 +#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */ +#define E1000_RAL_MAC_ADDR_LEN 4 +#define E1000_RAH_MAC_ADDR_LEN 2 + +/* Error Codes */ +#define E1000_ERR_NVM 1 +#define E1000_ERR_PHY 2 +#define E1000_ERR_CONFIG 3 +#define E1000_ERR_PARAM 4 +#define E1000_ERR_MAC_INIT 5 +#define E1000_ERR_PHY_TYPE 6 +#define E1000_ERR_RESET 9 +#define E1000_ERR_MASTER_REQUESTS_PENDING 10 +#define E1000_ERR_HOST_INTERFACE_COMMAND 11 +#define E1000_BLK_PHY_RESET 12 +#define E1000_ERR_SWFW_SYNC 13 +#define E1000_NOT_IMPLEMENTED 14 +#define E1000_ERR_INVALID_ARGUMENT 16 +#define E1000_ERR_NO_SPACE 17 +#define E1000_ERR_NVM_PBA_SECTION 18 + +/* Loop limit on how long we wait for auto-negotiation to complete */ +#define FIBER_LINK_UP_LIMIT 50 +#define COPPER_LINK_UP_LIMIT 10 +#define PHY_AUTO_NEG_LIMIT 45 +#define PHY_FORCE_LIMIT 20 +/* Number of 100 microseconds we wait for PCI Express master disable */ +#define MASTER_DISABLE_TIMEOUT 800 +/* Number of milliseconds we wait for PHY configuration done after MAC reset */ +#define PHY_CFG_TIMEOUT 100 +/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */ +#define MDIO_OWNERSHIP_TIMEOUT 10 +/* Number of milliseconds for NVM auto read done after MAC reset. */ +#define AUTO_READ_DONE_TIMEOUT 10 + +/* Flow Control */ +#define E1000_FCRTH_RTH 0x0000FFF8 /* Mask Bits[15:3] for RTH */ +#define E1000_FCRTL_RTL 0x0000FFF8 /* Mask Bits[15:3] for RTL */ +#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ + +/* Transmit Configuration Word */ +#define E1000_TXCW_FD 0x00000020 /* TXCW full duplex */ +#define E1000_TXCW_PAUSE 0x00000080 /* TXCW sym pause request */ +#define E1000_TXCW_ASM_DIR 0x00000100 /* TXCW astm pause direction */ +#define E1000_TXCW_PAUSE_MASK 0x00000180 /* TXCW pause request mask */ +#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ + +/* Receive Configuration Word */ +#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */ +#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ +#define E1000_RXCW_C 0x20000000 /* Receive config */ +#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ + +#define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ +#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ + +#define E1000_TSYNCRXCTL_VALID 0x00000001 /* Rx timestamp valid */ +#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* Rx type mask */ +#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00 +#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02 +#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 +#define E1000_TSYNCRXCTL_TYPE_ALL 0x08 +#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A +#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable Rx timestamping */ +#define E1000_TSYNCRXCTL_SYSCFI 0x00000020 /* Sys clock frequency */ + +#define E1000_RXMTRL_PTP_V1_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE 0x00010000 + +#define E1000_RXMTRL_PTP_V2_SYNC_MESSAGE 0x00000000 +#define E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE 0x01000000 + +#define E1000_TIMINCA_INCPERIOD_SHIFT 24 +#define E1000_TIMINCA_INCVALUE_MASK 0x00FFFFFF + +/* PCI Express Control */ +#define E1000_GCR_RXD_NO_SNOOP 0x00000001 +#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002 +#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004 +#define E1000_GCR_TXD_NO_SNOOP 0x00000008 +#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010 +#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020 + +#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \ + E1000_GCR_RXDSCW_NO_SNOOP | \ + E1000_GCR_RXDSCR_NO_SNOOP | \ + E1000_GCR_TXD_NO_SNOOP | \ + E1000_GCR_TXDSCW_NO_SNOOP | \ + E1000_GCR_TXDSCR_NO_SNOOP) + +/* NVM Control */ +#define E1000_EECD_SK 0x00000001 /* NVM Clock */ +#define E1000_EECD_CS 0x00000002 /* NVM Chip Select */ +#define E1000_EECD_DI 0x00000004 /* NVM Data In */ +#define E1000_EECD_DO 0x00000008 /* NVM Data Out */ +#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */ +#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */ +#define E1000_EECD_PRES 0x00000100 /* NVM Present */ +#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */ +/* NVM Addressing bits based on type (0-small, 1-large) */ +#define E1000_EECD_ADDR_BITS 0x00000400 +#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */ +#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */ +#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */ +#define E1000_EECD_SIZE_EX_SHIFT 11 +#define E1000_EECD_FLUPD 0x00080000 /* Update FLASH */ +#define E1000_EECD_AUPDEN 0x00100000 /* Enable Autonomous FLASH update */ +#define E1000_EECD_SEC1VAL 0x00400000 /* Sector One Valid */ +#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES) + +#define E1000_NVM_RW_REG_DATA 16 /* Offset to data in NVM r/w regs */ +#define E1000_NVM_RW_REG_DONE 2 /* Offset to READ/WRITE done bit */ +#define E1000_NVM_RW_REG_START 1 /* Start operation */ +#define E1000_NVM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +#define E1000_NVM_POLL_WRITE 1 /* Flag for polling write complete */ +#define E1000_NVM_POLL_READ 0 /* Flag for polling read complete */ +#define E1000_FLASH_UPDATES 2000 + +/* NVM Word Offsets */ +#define NVM_COMPAT 0x0003 +#define NVM_ID_LED_SETTINGS 0x0004 +#define NVM_FUTURE_INIT_WORD1 0x0019 +#define NVM_COMPAT_VALID_CSUM 0x0001 +#define NVM_FUTURE_INIT_WORD1_VALID_CSUM 0x0040 + +#define NVM_INIT_CONTROL2_REG 0x000F +#define NVM_INIT_CONTROL3_PORT_B 0x0014 +#define NVM_INIT_3GIO_3 0x001A +#define NVM_INIT_CONTROL3_PORT_A 0x0024 +#define NVM_CFG 0x0012 +#define NVM_ALT_MAC_ADDR_PTR 0x0037 +#define NVM_CHECKSUM_REG 0x003F + +#define E1000_NVM_CFG_DONE_PORT_0 0x40000 /* MNG config cycle done */ +#define E1000_NVM_CFG_DONE_PORT_1 0x80000 /* ...for second port */ + +/* Mask bits for fields in Word 0x0f of the NVM */ +#define NVM_WORD0F_PAUSE_MASK 0x3000 +#define NVM_WORD0F_PAUSE 0x1000 +#define NVM_WORD0F_ASM_DIR 0x2000 + +/* Mask bits for fields in Word 0x1a of the NVM */ +#define NVM_WORD1A_ASPM_MASK 0x000C + +/* Mask bits for fields in Word 0x03 of the EEPROM */ +#define NVM_COMPAT_LOM 0x0800 + +/* length of string needed to store PBA number */ +#define E1000_PBANUM_LENGTH 11 + +/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ +#define NVM_SUM 0xBABA + +/* PBA (printed board assembly) number words */ +#define NVM_PBA_OFFSET_0 8 +#define NVM_PBA_OFFSET_1 9 +#define NVM_PBA_PTR_GUARD 0xFAFA +#define NVM_WORD_SIZE_BASE_SHIFT 6 + +/* NVM Commands - SPI */ +#define NVM_MAX_RETRY_SPI 5000 /* Max wait of 5ms, for RDY signal */ +#define NVM_READ_OPCODE_SPI 0x03 /* NVM read opcode */ +#define NVM_WRITE_OPCODE_SPI 0x02 /* NVM write opcode */ +#define NVM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = address bit-8 */ +#define NVM_WREN_OPCODE_SPI 0x06 /* NVM set Write Enable latch */ +#define NVM_RDSR_OPCODE_SPI 0x05 /* NVM read Status register */ + +/* SPI NVM Status Register */ +#define NVM_STATUS_RDY_SPI 0x01 + +/* Word definitions for ID LED Settings */ +#define ID_LED_RESERVED_0000 0x0000 +#define ID_LED_RESERVED_FFFF 0xFFFF +#define ID_LED_DEFAULT ((ID_LED_OFF1_ON2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_DEF1_DEF2 << 4) | \ + (ID_LED_DEF1_DEF2)) +#define ID_LED_DEF1_DEF2 0x1 +#define ID_LED_DEF1_ON2 0x2 +#define ID_LED_DEF1_OFF2 0x3 +#define ID_LED_ON1_DEF2 0x4 +#define ID_LED_ON1_ON2 0x5 +#define ID_LED_ON1_OFF2 0x6 +#define ID_LED_OFF1_DEF2 0x7 +#define ID_LED_OFF1_ON2 0x8 +#define ID_LED_OFF1_OFF2 0x9 + +#define IGP_ACTIVITY_LED_MASK 0xFFFFF0FF +#define IGP_ACTIVITY_LED_ENABLE 0x0300 +#define IGP_LED3_MODE 0x07000000 + +/* PCI/PCI-X/PCI-EX Config space */ +#define PCI_HEADER_TYPE_REGISTER 0x0E +#define PCIE_LINK_STATUS 0x12 + +#define PCI_HEADER_TYPE_MULTIFUNC 0x80 +#define PCIE_LINK_WIDTH_MASK 0x3F0 +#define PCIE_LINK_WIDTH_SHIFT 4 + +#define PHY_REVISION_MASK 0xFFFFFFF0 +#define MAX_PHY_REG_ADDRESS 0x1F /* 5 bit address bus (0-0x1F) */ +#define MAX_PHY_MULTI_PAGE_REG 0xF + +/* Bit definitions for valid PHY IDs. + * I = Integrated + * E = External + */ +#define M88E1000_E_PHY_ID 0x01410C50 +#define M88E1000_I_PHY_ID 0x01410C30 +#define M88E1011_I_PHY_ID 0x01410C20 +#define IGP01E1000_I_PHY_ID 0x02A80380 +#define M88E1111_I_PHY_ID 0x01410CC0 +#define GG82563_E_PHY_ID 0x01410CA0 +#define IGP03E1000_E_PHY_ID 0x02A80390 +#define IFE_E_PHY_ID 0x02A80330 +#define IFE_PLUS_E_PHY_ID 0x02A80320 +#define IFE_C_E_PHY_ID 0x02A80310 +#define BME1000_E_PHY_ID 0x01410CB0 +#define BME1000_E_PHY_ID_R2 0x01410CB1 +#define I82577_E_PHY_ID 0x01540050 +#define I82578_E_PHY_ID 0x004DD040 +#define I82579_E_PHY_ID 0x01540090 +#define I217_E_PHY_ID 0x015400A0 + +/* M88E1000 Specific Registers */ +#define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ +#define M88E1000_PHY_SPEC_STATUS 0x11 /* PHY Specific Status Register */ +#define M88E1000_EXT_PHY_SPEC_CTRL 0x14 /* Extended PHY Specific Control */ + +#define M88E1000_PHY_PAGE_SELECT 0x1D /* Reg 29 for page number setting */ +#define M88E1000_PHY_GEN_CONTROL 0x1E /* Its meaning depends on reg 29 */ + +/* M88E1000 PHY Specific Control Register */ +#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */ +#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */ + /* Manual MDI configuration */ +#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */ +/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */ +#define M88E1000_PSCR_AUTO_X_1000T 0x0040 +/* Auto crossover enabled all speeds */ +#define M88E1000_PSCR_AUTO_X_MODE 0x0060 +#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */ + +/* M88E1000 PHY Specific Status Register */ +#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */ +#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */ +#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */ +/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */ +#define M88E1000_PSSR_CABLE_LENGTH 0x0380 +#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */ +#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */ + +#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7 + +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the master + */ +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00 +#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000 +/* Number of times we will attempt to autonegotiate before downshifting if we + * are the slave + */ +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300 +#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 +#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ + +/* M88EC018 Rev 2 specific DownShift settings */ +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 +#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 + +#define I82578_EPSCR_DOWNSHIFT_ENABLE 0x0020 +#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK 0x001C + +/* BME1000 PHY Specific Control Register */ +#define BME1000_PSCR_ENABLE_DOWNSHIFT 0x0800 /* 1 = enable downshift */ + +/* Bits... + * 15-5: page + * 4-0: register offset + */ +#define GG82563_PAGE_SHIFT 5 +#define GG82563_REG(page, reg) \ + (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS)) +#define GG82563_MIN_ALT_REG 30 + +/* GG82563 Specific Registers */ +#define GG82563_PHY_SPEC_CTRL \ + GG82563_REG(0, 16) /* PHY Specific Control */ +#define GG82563_PHY_PAGE_SELECT \ + GG82563_REG(0, 22) /* Page Select */ +#define GG82563_PHY_SPEC_CTRL_2 \ + GG82563_REG(0, 26) /* PHY Specific Control 2 */ +#define GG82563_PHY_PAGE_SELECT_ALT \ + GG82563_REG(0, 29) /* Alternate Page Select */ + +#define GG82563_PHY_MAC_SPEC_CTRL \ + GG82563_REG(2, 21) /* MAC Specific Control Register */ + +#define GG82563_PHY_DSP_DISTANCE \ + GG82563_REG(5, 26) /* DSP Distance */ + +/* Page 193 - Port Control Registers */ +#define GG82563_PHY_KMRN_MODE_CTRL \ + GG82563_REG(193, 16) /* Kumeran Mode Control */ +#define GG82563_PHY_PWR_MGMT_CTRL \ + GG82563_REG(193, 20) /* Power Management Control */ + +/* Page 194 - KMRN Registers */ +#define GG82563_PHY_INBAND_CTRL \ + GG82563_REG(194, 18) /* Inband Control */ + +/* MDI Control */ +#define E1000_MDIC_REG_MASK 0x001F0000 +#define E1000_MDIC_REG_SHIFT 16 +#define E1000_MDIC_PHY_SHIFT 21 +#define E1000_MDIC_OP_WRITE 0x04000000 +#define E1000_MDIC_OP_READ 0x08000000 +#define E1000_MDIC_READY 0x10000000 +#define E1000_MDIC_ERROR 0x40000000 + +/* SerDes Control */ +#define E1000_GEN_POLL_TIMEOUT 640 + +#endif /* _E1000_DEFINES_H_ */ diff --git a/addons/e1000e/src/4.4.180/e1000.h b/addons/e1000e/src/4.4.180/e1000.h new file mode 100644 index 00000000..0b748d19 --- /dev/null +++ b/addons/e1000e/src/4.4.180/e1000.h @@ -0,0 +1,601 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* Linux PRO/1000 Ethernet Driver main header file */ + +#ifndef _E1000_H_ +#define _E1000_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hw.h" + +struct e1000_info; + +#define e_dbg(format, arg...) \ + netdev_dbg(hw->adapter->netdev, format, ## arg) +#define e_err(format, arg...) \ + netdev_err(adapter->netdev, format, ## arg) +#define e_info(format, arg...) \ + netdev_info(adapter->netdev, format, ## arg) +#define e_warn(format, arg...) \ + netdev_warn(adapter->netdev, format, ## arg) +#define e_notice(format, arg...) \ + netdev_notice(adapter->netdev, format, ## arg) + +/* Interrupt modes, as used by the IntMode parameter */ +#define E1000E_INT_MODE_LEGACY 0 +#define E1000E_INT_MODE_MSI 1 +#define E1000E_INT_MODE_MSIX 2 + +/* Tx/Rx descriptor defines */ +#define E1000_DEFAULT_TXD 256 +#define E1000_MAX_TXD 4096 +#define E1000_MIN_TXD 64 + +#define E1000_DEFAULT_RXD 256 +#define E1000_MAX_RXD 4096 +#define E1000_MIN_RXD 64 + +#define E1000_MIN_ITR_USECS 10 /* 100000 irq/sec */ +#define E1000_MAX_ITR_USECS 10000 /* 100 irq/sec */ + +#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */ + +/* How many Tx Descriptors do we need to call netif_wake_queue ? */ +/* How many Rx Buffers do we bundle into one write to the hardware ? */ +#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + +#define AUTO_ALL_MODES 0 +#define E1000_EEPROM_APME 0x0400 + +#define E1000_MNG_VLAN_NONE (-1) + +#define DEFAULT_JUMBO 9234 + +/* Time to wait before putting the device into D3 if there's no link (in ms). */ +#define LINK_TIMEOUT 100 + +/* Count for polling __E1000_RESET condition every 10-20msec. + * Experimentation has shown the reset can take approximately 210msec. + */ +#define E1000_CHECK_RESET_COUNT 25 + +#define DEFAULT_RDTR 0 +#define DEFAULT_RADV 8 +#define BURST_RDTR 0x20 +#define BURST_RADV 0x20 +#define PCICFG_DESC_RING_STATUS 0xe4 +#define FLUSH_DESC_REQUIRED 0x100 + +/* in the case of WTHRESH, it appears at least the 82571/2 hardware + * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when + * WTHRESH=4, so a setting of 5 gives the most efficient bus + * utilization but to avoid possible Tx stalls, set it to 1 + */ +#define E1000_TXDCTL_DMA_BURST_ENABLE \ + (E1000_TXDCTL_GRAN | /* set descriptor granularity */ \ + E1000_TXDCTL_COUNT_DESC | \ + (1 << 16) | /* wthresh must be +1 more than desired */\ + (1 << 8) | /* hthresh */ \ + 0x1f) /* pthresh */ + +#define E1000_RXDCTL_DMA_BURST_ENABLE \ + (0x01000000 | /* set descriptor granularity */ \ + (4 << 16) | /* set writeback threshold */ \ + (4 << 8) | /* set prefetch threshold */ \ + 0x20) /* set hthresh */ + +#define E1000_TIDV_FPD (1 << 31) +#define E1000_RDTR_FPD (1 << 31) + +enum e1000_boards { + board_82571, + board_82572, + board_82573, + board_82574, + board_82583, + board_80003es2lan, + board_ich8lan, + board_ich9lan, + board_ich10lan, + board_pchlan, + board_pch2lan, + board_pch_lpt, + board_pch_spt +}; + +struct e1000_ps_page { + struct page *page; + u64 dma; /* must be u64 - written to hw */ +}; + +/* wrappers around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer + */ +struct e1000_buffer { + dma_addr_t dma; + struct sk_buff *skb; + union { + /* Tx */ + struct { + unsigned long time_stamp; + u16 length; + u16 next_to_watch; + unsigned int segs; + unsigned int bytecount; + u16 mapped_as_page; + }; + /* Rx */ + struct { + /* arrays of page information for packet split */ + struct e1000_ps_page *ps_pages; + struct page *page; + }; + }; +}; + +struct e1000_ring { + struct e1000_adapter *adapter; /* back pointer to adapter */ + void *desc; /* pointer to ring memory */ + dma_addr_t dma; /* phys address of ring */ + unsigned int size; /* length of ring in bytes */ + unsigned int count; /* number of desc. in ring */ + + u16 next_to_use; + u16 next_to_clean; + + void __iomem *head; + void __iomem *tail; + + /* array of buffer information structs */ + struct e1000_buffer *buffer_info; + + char name[IFNAMSIZ + 5]; + u32 ims_val; + u32 itr_val; + void __iomem *itr_register; + int set_itr; + + struct sk_buff *rx_skb_top; +}; + +/* PHY register snapshot values */ +struct e1000_phy_regs { + u16 bmcr; /* basic mode control register */ + u16 bmsr; /* basic mode status register */ + u16 advertise; /* auto-negotiation advertisement */ + u16 lpa; /* link partner ability register */ + u16 expansion; /* auto-negotiation expansion reg */ + u16 ctrl1000; /* 1000BASE-T control register */ + u16 stat1000; /* 1000BASE-T status register */ + u16 estatus; /* extended status register */ +}; + +/* board specific private data structure */ +struct e1000_adapter { + struct timer_list watchdog_timer; + struct timer_list phy_info_timer; + struct timer_list blink_timer; + + struct work_struct reset_task; + struct work_struct watchdog_task; + + const struct e1000_info *ei; + + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; + u32 bd_number; + u32 rx_buffer_len; + u16 mng_vlan_id; + u16 link_speed; + u16 link_duplex; + u16 eeprom_vers; + + /* track device up/down/testing state */ + unsigned long state; + + /* Interrupt Throttle Rate */ + u32 itr; + u32 itr_setting; + u16 tx_itr; + u16 rx_itr; + + /* Tx - one ring per active queue */ + struct e1000_ring *tx_ring ____cacheline_aligned_in_smp; + u32 tx_fifo_limit; + + struct napi_struct napi; + + unsigned int uncorr_errors; /* uncorrectable ECC errors */ + unsigned int corr_errors; /* correctable ECC errors */ + unsigned int restart_queue; + u32 txd_cmd; + + bool detect_tx_hung; + bool tx_hang_recheck; + u8 tx_timeout_factor; + + u32 tx_int_delay; + u32 tx_abs_int_delay; + + unsigned int total_tx_bytes; + unsigned int total_tx_packets; + unsigned int total_rx_bytes; + unsigned int total_rx_packets; + + /* Tx stats */ + u64 tpt_old; + u64 colc_old; + u32 gotc; + u64 gotc_old; + u32 tx_timeout_count; + u32 tx_fifo_head; + u32 tx_head_addr; + u32 tx_fifo_size; + u32 tx_dma_failed; + u32 tx_hwtstamp_timeouts; + + /* Rx */ + bool (*clean_rx)(struct e1000_ring *ring, int *work_done, + int work_to_do) ____cacheline_aligned_in_smp; + void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count, + gfp_t gfp); + struct e1000_ring *rx_ring; + + u32 rx_int_delay; + u32 rx_abs_int_delay; + + /* Rx stats */ + u64 hw_csum_err; + u64 hw_csum_good; + u64 rx_hdr_split; + u32 gorc; + u64 gorc_old; + u32 alloc_rx_buff_failed; + u32 rx_dma_failed; + u32 rx_hwtstamp_cleared; + + unsigned int rx_ps_pages; + u16 rx_ps_bsize0; + u32 max_frame_size; + u32 min_frame_size; + + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; + + /* structs defined in e1000_hw.h */ + struct e1000_hw hw; + + spinlock_t stats64_lock; /* protects statistics counters */ + struct e1000_hw_stats stats; + struct e1000_phy_info phy_info; + struct e1000_phy_stats phy_stats; + + /* Snapshot of PHY registers */ + struct e1000_phy_regs phy_regs; + + struct e1000_ring test_tx_ring; + struct e1000_ring test_rx_ring; + u32 test_icr; + + u32 msg_enable; + unsigned int num_vectors; + struct msix_entry *msix_entries; + int int_mode; + u32 eiac_mask; + + u32 eeprom_wol; + u32 wol; + u32 pba; + u32 max_hw_frame_size; + + bool fc_autoneg; + + unsigned int flags; + unsigned int flags2; + struct work_struct downshift_task; + struct work_struct update_phy_task; + struct work_struct print_hang_task; + + int phy_hang_count; + + u16 tx_ring_count; + u16 rx_ring_count; + + struct hwtstamp_config hwtstamp_config; + struct delayed_work systim_overflow_work; + struct sk_buff *tx_hwtstamp_skb; + unsigned long tx_hwtstamp_start; + struct work_struct tx_hwtstamp_work; + spinlock_t systim_lock; /* protects SYSTIML/H regsters */ + struct cyclecounter cc; + struct timecounter tc; + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_clock_info; + struct pm_qos_request pm_qos_req; + + u16 eee_advert; +}; + +struct e1000_info { + enum e1000_mac_type mac; + unsigned int flags; + unsigned int flags2; + u32 pba; + u32 max_hw_frame_size; + s32 (*get_variants)(struct e1000_adapter *); + const struct e1000_mac_operations *mac_ops; + const struct e1000_phy_operations *phy_ops; + const struct e1000_nvm_operations *nvm_ops; +}; + +s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca); + +/* The system time is maintained by a 64-bit counter comprised of the 32-bit + * SYSTIMH and SYSTIML registers. How the counter increments (and therefore + * its resolution) is based on the contents of the TIMINCA register - it + * increments every incperiod (bits 31:24) clock ticks by incvalue (bits 23:0). + * For the best accuracy, the incperiod should be as small as possible. The + * incvalue is scaled by a factor as large as possible (while still fitting + * in bits 23:0) so that relatively small clock corrections can be made. + * + * As a result, a shift of INCVALUE_SHIFT_n is used to fit a value of + * INCVALUE_n into the TIMINCA register allowing 32+8+(24-INCVALUE_SHIFT_n) + * bits to count nanoseconds leaving the rest for fractional nonseconds. + */ +#define INCVALUE_96MHz 125 +#define INCVALUE_SHIFT_96MHz 17 +#define INCPERIOD_SHIFT_96MHz 2 +#define INCPERIOD_96MHz (12 >> INCPERIOD_SHIFT_96MHz) + +#define INCVALUE_25MHz 40 +#define INCVALUE_SHIFT_25MHz 18 +#define INCPERIOD_25MHz 1 + +#define INCVALUE_24MHz 125 +#define INCVALUE_SHIFT_24MHz 14 +#define INCPERIOD_24MHz 3 + +/* Another drawback of scaling the incvalue by a large factor is the + * 64-bit SYSTIM register overflows more quickly. This is dealt with + * by simply reading the clock before it overflows. + * + * Clock ns bits Overflows after + * ~~~~~~ ~~~~~~~ ~~~~~~~~~~~~~~~ + * 96MHz 47-bit 2^(47-INCPERIOD_SHIFT_96MHz) / 10^9 / 3600 = 9.77 hrs + * 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours + */ +#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4) +#define E1000_MAX_82574_SYSTIM_REREADS 50 +#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL) + +/* hardware capability, feature, and workaround flags */ +#define FLAG_HAS_AMT (1 << 0) +#define FLAG_HAS_FLASH (1 << 1) +#define FLAG_HAS_HW_VLAN_FILTER (1 << 2) +#define FLAG_HAS_WOL (1 << 3) +/* reserved bit4 */ +#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5) +#define FLAG_HAS_SWSM_ON_LOAD (1 << 6) +#define FLAG_HAS_JUMBO_FRAMES (1 << 7) +#define FLAG_READ_ONLY_NVM (1 << 8) +#define FLAG_IS_ICH (1 << 9) +#define FLAG_HAS_MSIX (1 << 10) +#define FLAG_HAS_SMART_POWER_DOWN (1 << 11) +#define FLAG_IS_QUAD_PORT_A (1 << 12) +#define FLAG_IS_QUAD_PORT (1 << 13) +#define FLAG_HAS_HW_TIMESTAMP (1 << 14) +#define FLAG_APME_IN_WUC (1 << 15) +#define FLAG_APME_IN_CTRL3 (1 << 16) +#define FLAG_APME_CHECK_PORT_B (1 << 17) +#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18) +#define FLAG_NO_WAKE_UCAST (1 << 19) +#define FLAG_MNG_PT_ENABLED (1 << 20) +#define FLAG_RESET_OVERWRITES_LAA (1 << 21) +#define FLAG_TARC_SPEED_MODE_BIT (1 << 22) +#define FLAG_TARC_SET_BIT_ZERO (1 << 23) +#define FLAG_RX_NEEDS_RESTART (1 << 24) +#define FLAG_LSC_GIG_SPEED_DROP (1 << 25) +#define FLAG_SMART_POWER_DOWN (1 << 26) +#define FLAG_MSI_ENABLED (1 << 27) +/* reserved (1 << 28) */ +#define FLAG_TSO_FORCE (1 << 29) +#define FLAG_RESTART_NOW (1 << 30) +#define FLAG_MSI_TEST_FAILED (1 << 31) + +#define FLAG2_CRC_STRIPPING (1 << 0) +#define FLAG2_HAS_PHY_WAKEUP (1 << 1) +#define FLAG2_IS_DISCARDING (1 << 2) +#define FLAG2_DISABLE_ASPM_L1 (1 << 3) +#define FLAG2_HAS_PHY_STATS (1 << 4) +#define FLAG2_HAS_EEE (1 << 5) +#define FLAG2_DMA_BURST (1 << 6) +#define FLAG2_DISABLE_ASPM_L0S (1 << 7) +#define FLAG2_DISABLE_AIM (1 << 8) +#define FLAG2_CHECK_PHY_HANG (1 << 9) +#define FLAG2_NO_DISABLE_RX (1 << 10) +#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11) +#define FLAG2_DFLT_CRC_STRIPPING (1 << 12) +#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13) + +#define E1000_RX_DESC_PS(R, i) \ + (&(((union e1000_rx_desc_packet_split *)((R).desc))[i])) +#define E1000_RX_DESC_EXT(R, i) \ + (&(((union e1000_rx_desc_extended *)((R).desc))[i])) +#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) +#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) +#define E1000_CONTEXT_DESC(R, i) E1000_GET_DESC(R, i, e1000_context_desc) + +enum e1000_state_t { + __E1000_TESTING, + __E1000_RESETTING, + __E1000_ACCESS_SHARED_RESOURCE, + __E1000_DOWN +}; + +enum latency_range { + lowest_latency = 0, + low_latency = 1, + bulk_latency = 2, + latency_invalid = 255 +}; + +extern char e1000e_driver_name[]; +extern const char e1000e_driver_version[]; + +void e1000e_check_options(struct e1000_adapter *adapter); +void e1000e_set_ethtool_ops(struct net_device *netdev); + +int e1000e_up(struct e1000_adapter *adapter); +void e1000e_down(struct e1000_adapter *adapter, bool reset); +void e1000e_reinit_locked(struct e1000_adapter *adapter); +void e1000e_reset(struct e1000_adapter *adapter); +void e1000e_power_up_phy(struct e1000_adapter *adapter); +int e1000e_setup_rx_resources(struct e1000_ring *ring); +int e1000e_setup_tx_resources(struct e1000_ring *ring); +void e1000e_free_rx_resources(struct e1000_ring *ring); +void e1000e_free_tx_resources(struct e1000_ring *ring); +struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats); +void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); +void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); +void e1000e_get_hw_control(struct e1000_adapter *adapter); +void e1000e_release_hw_control(struct e1000_adapter *adapter); +void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr); + +extern unsigned int copybreak; + +extern const struct e1000_info e1000_82571_info; +extern const struct e1000_info e1000_82572_info; +extern const struct e1000_info e1000_82573_info; +extern const struct e1000_info e1000_82574_info; +extern const struct e1000_info e1000_82583_info; +extern const struct e1000_info e1000_ich8_info; +extern const struct e1000_info e1000_ich9_info; +extern const struct e1000_info e1000_ich10_info; +extern const struct e1000_info e1000_pch_info; +extern const struct e1000_info e1000_pch2_info; +extern const struct e1000_info e1000_pch_lpt_info; +extern const struct e1000_info e1000_pch_spt_info; +extern const struct e1000_info e1000_es2_info; + +void e1000e_ptp_init(struct e1000_adapter *adapter); +void e1000e_ptp_remove(struct e1000_adapter *adapter); + +static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) +{ + return hw->phy.ops.reset(hw); +} + +static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_reg(hw, offset, data); +} + +static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return hw->phy.ops.read_reg_locked(hw, offset, data); +} + +static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_reg(hw, offset, data); +} + +static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return hw->phy.ops.write_reg_locked(hw, offset, data); +} + +void e1000e_reload_nvm_generic(struct e1000_hw *hw); + +static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw) +{ + if (hw->mac.ops.read_mac_addr) + return hw->mac.ops.read_mac_addr(hw); + + return e1000_read_mac_addr_generic(hw); +} + +static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.validate(hw); +} + +static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw) +{ + return hw->nvm.ops.update(hw); +} + +static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + return hw->nvm.ops.read(hw, offset, words, data); +} + +static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + return hw->nvm.ops.write(hw, offset, words, data); +} + +static inline s32 e1000_get_phy_info(struct e1000_hw *hw) +{ + return hw->phy.ops.get_info(hw); +} + +static inline u32 __er32(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->hw_addr + reg); +} + +#define er32(reg) __er32(hw, E1000_##reg) + +s32 __ew32_prepare(struct e1000_hw *hw); +void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val); + +#define ew32(reg, val) __ew32(hw, E1000_##reg, (val)) + +#define e1e_flush() er32(STATUS) + +#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \ + (__ew32((a), (reg + ((offset) << 2)), (value))) + +#define E1000_READ_REG_ARRAY(a, reg, offset) \ + (readl((a)->hw_addr + reg + ((offset) << 2))) + +#endif /* _E1000_H_ */ diff --git a/addons/e1000e/src/4.4.180/ethtool.c b/addons/e1000e/src/4.4.180/ethtool.c new file mode 100644 index 00000000..6cab1f30 --- /dev/null +++ b/addons/e1000e/src/4.4.180/ethtool.c @@ -0,0 +1,2341 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* ethtool support for e1000 */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000.h" + +enum { NETDEV_STATS, E1000_STATS }; + +struct e1000_stats { + char stat_string[ETH_GSTRING_LEN]; + int type; + int sizeof_stat; + int stat_offset; +}; + +#define E1000_STAT(str, m) { \ + .stat_string = str, \ + .type = E1000_STATS, \ + .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \ + .stat_offset = offsetof(struct e1000_adapter, m) } +#define E1000_NETDEV_STAT(str, m) { \ + .stat_string = str, \ + .type = NETDEV_STATS, \ + .sizeof_stat = sizeof(((struct rtnl_link_stats64 *)0)->m), \ + .stat_offset = offsetof(struct rtnl_link_stats64, m) } + +static const struct e1000_stats e1000_gstrings_stats[] = { + E1000_STAT("rx_packets", stats.gprc), + E1000_STAT("tx_packets", stats.gptc), + E1000_STAT("rx_bytes", stats.gorc), + E1000_STAT("tx_bytes", stats.gotc), + E1000_STAT("rx_broadcast", stats.bprc), + E1000_STAT("tx_broadcast", stats.bptc), + E1000_STAT("rx_multicast", stats.mprc), + E1000_STAT("tx_multicast", stats.mptc), + E1000_NETDEV_STAT("rx_errors", rx_errors), + E1000_NETDEV_STAT("tx_errors", tx_errors), + E1000_NETDEV_STAT("tx_dropped", tx_dropped), + E1000_STAT("multicast", stats.mprc), + E1000_STAT("collisions", stats.colc), + E1000_NETDEV_STAT("rx_length_errors", rx_length_errors), + E1000_NETDEV_STAT("rx_over_errors", rx_over_errors), + E1000_STAT("rx_crc_errors", stats.crcerrs), + E1000_NETDEV_STAT("rx_frame_errors", rx_frame_errors), + E1000_STAT("rx_no_buffer_count", stats.rnbc), + E1000_STAT("rx_missed_errors", stats.mpc), + E1000_STAT("tx_aborted_errors", stats.ecol), + E1000_STAT("tx_carrier_errors", stats.tncrs), + E1000_NETDEV_STAT("tx_fifo_errors", tx_fifo_errors), + E1000_NETDEV_STAT("tx_heartbeat_errors", tx_heartbeat_errors), + E1000_STAT("tx_window_errors", stats.latecol), + E1000_STAT("tx_abort_late_coll", stats.latecol), + E1000_STAT("tx_deferred_ok", stats.dc), + E1000_STAT("tx_single_coll_ok", stats.scc), + E1000_STAT("tx_multi_coll_ok", stats.mcc), + E1000_STAT("tx_timeout_count", tx_timeout_count), + E1000_STAT("tx_restart_queue", restart_queue), + E1000_STAT("rx_long_length_errors", stats.roc), + E1000_STAT("rx_short_length_errors", stats.ruc), + E1000_STAT("rx_align_errors", stats.algnerrc), + E1000_STAT("tx_tcp_seg_good", stats.tsctc), + E1000_STAT("tx_tcp_seg_failed", stats.tsctfc), + E1000_STAT("rx_flow_control_xon", stats.xonrxc), + E1000_STAT("rx_flow_control_xoff", stats.xoffrxc), + E1000_STAT("tx_flow_control_xon", stats.xontxc), + E1000_STAT("tx_flow_control_xoff", stats.xofftxc), + E1000_STAT("rx_csum_offload_good", hw_csum_good), + E1000_STAT("rx_csum_offload_errors", hw_csum_err), + E1000_STAT("rx_header_split", rx_hdr_split), + E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), + E1000_STAT("tx_smbus", stats.mgptc), + E1000_STAT("rx_smbus", stats.mgprc), + E1000_STAT("dropped_smbus", stats.mgpdc), + E1000_STAT("rx_dma_failed", rx_dma_failed), + E1000_STAT("tx_dma_failed", tx_dma_failed), + E1000_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), + E1000_STAT("uncorr_ecc_errors", uncorr_errors), + E1000_STAT("corr_ecc_errors", corr_errors), + E1000_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), +}; + +#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) +#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN) +static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" +}; + +#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test) + +static int e1000_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 speed; + + if (hw->phy.media_type == e1000_media_type_copper) { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + if (hw->phy.type == e1000_phy_ife) + ecmd->supported &= ~SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_TP; + + if (hw->mac.autoneg == 1) { + ecmd->advertising |= ADVERTISED_Autoneg; + /* the e1000 autoneg seems to match ethtool nicely */ + ecmd->advertising |= hw->phy.autoneg_advertised; + } + + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy.addr; + ecmd->transceiver = XCVR_INTERNAL; + + } else { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | + ADVERTISED_Autoneg); + + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + } + + speed = SPEED_UNKNOWN; + ecmd->duplex = DUPLEX_UNKNOWN; + + if (netif_running(netdev)) { + if (netif_carrier_ok(netdev)) { + speed = adapter->link_speed; + ecmd->duplex = adapter->link_duplex - 1; + } + } else if (!pm_runtime_suspended(netdev->dev.parent)) { + u32 status = er32(STATUS); + + if (status & E1000_STATUS_LU) { + if (status & E1000_STATUS_SPEED_1000) + speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + speed = SPEED_100; + else + speed = SPEED_10; + + if (status & E1000_STATUS_FD) + ecmd->duplex = DUPLEX_FULL; + else + ecmd->duplex = DUPLEX_HALF; + } + } + + ethtool_cmd_speed_set(ecmd, speed); + ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) || + hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE; + + /* MDI-X => 2; MDI =>1; Invalid =>0 */ + if ((hw->phy.media_type == e1000_media_type_copper) && + netif_carrier_ok(netdev)) + ecmd->eth_tp_mdix = hw->phy.is_mdix ? ETH_TP_MDI_X : ETH_TP_MDI; + else + ecmd->eth_tp_mdix = ETH_TP_MDI_INVALID; + + if (hw->phy.mdix == AUTO_ALL_MODES) + ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO; + else + ecmd->eth_tp_mdix_ctrl = hw->phy.mdix; + + return 0; +} + +static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + + mac->autoneg = 0; + + /* Make sure dplx is at most 1 bit and lsb of speed is not set + * for the switch() below to work + */ + if ((spd & 1) || (dplx & ~1)) + goto err_inval; + + /* Fiber NICs only allow 1000 gbps Full duplex */ + if ((adapter->hw.phy.media_type == e1000_media_type_fiber) && + (spd != SPEED_1000) && (dplx != DUPLEX_FULL)) { + goto err_inval; + } + + switch (spd + dplx) { + case SPEED_10 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_10_HALF; + break; + case SPEED_10 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_10_FULL; + break; + case SPEED_100 + DUPLEX_HALF: + mac->forced_speed_duplex = ADVERTISE_100_HALF; + break; + case SPEED_100 + DUPLEX_FULL: + mac->forced_speed_duplex = ADVERTISE_100_FULL; + break; + case SPEED_1000 + DUPLEX_FULL: + mac->autoneg = 1; + adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; + break; + case SPEED_1000 + DUPLEX_HALF: /* not supported */ + default: + goto err_inval; + } + + /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */ + adapter->hw.phy.mdix = AUTO_ALL_MODES; + + return 0; + +err_inval: + e_err("Unsupported Speed/Duplex configuration\n"); + return -EINVAL; +} + +static int e1000_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int ret_val = 0; + + pm_runtime_get_sync(netdev->dev.parent); + + /* When SoL/IDER sessions are active, autoneg/speed/duplex + * cannot be changed + */ + if (hw->phy.ops.check_reset_block && + hw->phy.ops.check_reset_block(hw)) { + e_err("Cannot change link characteristics when SoL/IDER is active.\n"); + ret_val = -EINVAL; + goto out; + } + + /* MDI setting is only allowed when autoneg enabled because + * some hardware doesn't allow MDI setting when speed or + * duplex is forced. + */ + if (ecmd->eth_tp_mdix_ctrl) { + if (hw->phy.media_type != e1000_media_type_copper) { + ret_val = -EOPNOTSUPP; + goto out; + } + + if ((ecmd->eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) && + (ecmd->autoneg != AUTONEG_ENABLE)) { + e_err("forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n"); + ret_val = -EINVAL; + goto out; + } + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (ecmd->autoneg == AUTONEG_ENABLE) { + hw->mac.autoneg = 1; + if (hw->phy.media_type == e1000_media_type_fiber) + hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE | ADVERTISED_Autoneg; + else + hw->phy.autoneg_advertised = ecmd->advertising | + ADVERTISED_TP | ADVERTISED_Autoneg; + ecmd->advertising = hw->phy.autoneg_advertised; + if (adapter->fc_autoneg) + hw->fc.requested_mode = e1000_fc_default; + } else { + u32 speed = ethtool_cmd_speed(ecmd); + /* calling this overrides forced MDI setting */ + if (e1000_set_spd_dplx(adapter, speed, ecmd->duplex)) { + ret_val = -EINVAL; + goto out; + } + } + + /* MDI-X => 2; MDI => 1; Auto => 3 */ + if (ecmd->eth_tp_mdix_ctrl) { + /* fix up the value for auto (3 => 0) as zero is mapped + * internally to auto + */ + if (ecmd->eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO) + hw->phy.mdix = AUTO_ALL_MODES; + else + hw->phy.mdix = ecmd->eth_tp_mdix_ctrl; + } + + /* reset the link */ + if (netif_running(adapter->netdev)) { + e1000e_down(adapter, true); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + +out: + pm_runtime_put_sync(netdev->dev.parent); + clear_bit(__E1000_RESETTING, &adapter->state); + return ret_val; +} + +static void e1000_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + pause->autoneg = + (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE); + + if (hw->fc.current_mode == e1000_fc_rx_pause) { + pause->rx_pause = 1; + } else if (hw->fc.current_mode == e1000_fc_tx_pause) { + pause->tx_pause = 1; + } else if (hw->fc.current_mode == e1000_fc_full) { + pause->rx_pause = 1; + pause->tx_pause = 1; + } +} + +static int e1000_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + int retval = 0; + + adapter->fc_autoneg = pause->autoneg; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + pm_runtime_get_sync(netdev->dev.parent); + + if (adapter->fc_autoneg == AUTONEG_ENABLE) { + hw->fc.requested_mode = e1000_fc_default; + if (netif_running(adapter->netdev)) { + e1000e_down(adapter, true); + e1000e_up(adapter); + } else { + e1000e_reset(adapter); + } + } else { + if (pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_full; + else if (pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_rx_pause; + else if (!pause->rx_pause && pause->tx_pause) + hw->fc.requested_mode = e1000_fc_tx_pause; + else if (!pause->rx_pause && !pause->tx_pause) + hw->fc.requested_mode = e1000_fc_none; + + hw->fc.current_mode = hw->fc.requested_mode; + + if (hw->phy.media_type == e1000_media_type_fiber) { + retval = hw->mac.ops.setup_link(hw); + /* implicit goto out */ + } else { + retval = e1000e_force_mac_fc(hw); + if (retval) + goto out; + e1000e_set_fc_watermarks(hw); + } + } + +out: + pm_runtime_put_sync(netdev->dev.parent); + clear_bit(__E1000_RESETTING, &adapter->state); + return retval; +} + +static u32 e1000_get_msglevel(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->msg_enable; +} + +static void e1000_set_msglevel(struct net_device *netdev, u32 data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + adapter->msg_enable = data; +} + +static int e1000_get_regs_len(struct net_device __always_unused *netdev) +{ +#define E1000_REGS_LEN 32 /* overestimate */ + return E1000_REGS_LEN * sizeof(u32); +} + +static void e1000_get_regs(struct net_device *netdev, + struct ethtool_regs *regs, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 *regs_buff = p; + u16 phy_data; + + pm_runtime_get_sync(netdev->dev.parent); + + memset(p, 0, E1000_REGS_LEN * sizeof(u32)); + + regs->version = (1 << 24) | (adapter->pdev->revision << 16) | + adapter->pdev->device; + + regs_buff[0] = er32(CTRL); + regs_buff[1] = er32(STATUS); + + regs_buff[2] = er32(RCTL); + regs_buff[3] = er32(RDLEN(0)); + regs_buff[4] = er32(RDH(0)); + regs_buff[5] = er32(RDT(0)); + regs_buff[6] = er32(RDTR); + + regs_buff[7] = er32(TCTL); + regs_buff[8] = er32(TDLEN(0)); + regs_buff[9] = er32(TDH(0)); + regs_buff[10] = er32(TDT(0)); + regs_buff[11] = er32(TIDV); + + regs_buff[12] = adapter->hw.phy.type; /* PHY type (IGP=1, M88=0) */ + + /* ethtool doesn't use anything past this point, so all this + * code is likely legacy junk for apps that may or may not exist + */ + if (hw->phy.type == e1000_phy_m88) { + e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + regs_buff[13] = (u32)phy_data; /* cable length */ + regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + regs_buff[17] = (u32)phy_data; /* extended 10bt distance */ + regs_buff[18] = regs_buff[13]; /* cable polarity */ + regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */ + regs_buff[20] = regs_buff[17]; /* polarity correction */ + /* phy receive errors */ + regs_buff[22] = adapter->phy_stats.receive_errors; + regs_buff[23] = regs_buff[13]; /* mdix mode */ + } + regs_buff[21] = 0; /* was idle_errors */ + e1e_rphy(hw, MII_STAT1000, &phy_data); + regs_buff[24] = (u32)phy_data; /* phy local receiver status */ + regs_buff[25] = regs_buff[24]; /* phy remote receiver status */ + + pm_runtime_put_sync(netdev->dev.parent); +} + +static int e1000_get_eeprom_len(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + return adapter->hw.nvm.word_size * 2; +} + +static int e1000_get_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EINVAL; + + eeprom->magic = adapter->pdev->vendor | (adapter->pdev->device << 16); + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + + eeprom_buff = kmalloc(sizeof(u16) * (last_word - first_word + 1), + GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + pm_runtime_get_sync(netdev->dev.parent); + + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + ret_val = e1000_read_nvm(hw, first_word, + last_word - first_word + 1, + eeprom_buff); + } else { + for (i = 0; i < last_word - first_word + 1; i++) { + ret_val = e1000_read_nvm(hw, first_word + i, 1, + &eeprom_buff[i]); + if (ret_val) + break; + } + } + + pm_runtime_put_sync(netdev->dev.parent); + + if (ret_val) { + /* a read error occurred, throw away the result */ + memset(eeprom_buff, 0xff, sizeof(u16) * + (last_word - first_word + 1)); + } else { + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + } + + memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len); + kfree(eeprom_buff); + + return ret_val; +} + +static int e1000_set_eeprom(struct net_device *netdev, + struct ethtool_eeprom *eeprom, u8 *bytes) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 *eeprom_buff; + void *ptr; + int max_len; + int first_word; + int last_word; + int ret_val = 0; + u16 i; + + if (eeprom->len == 0) + return -EOPNOTSUPP; + + if (eeprom->magic != + (adapter->pdev->vendor | (adapter->pdev->device << 16))) + return -EFAULT; + + if (adapter->flags & FLAG_READ_ONLY_NVM) + return -EINVAL; + + max_len = hw->nvm.word_size * 2; + + first_word = eeprom->offset >> 1; + last_word = (eeprom->offset + eeprom->len - 1) >> 1; + eeprom_buff = kmalloc(max_len, GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + ptr = (void *)eeprom_buff; + + pm_runtime_get_sync(netdev->dev.parent); + + if (eeprom->offset & 1) { + /* need read/modify/write of first changed EEPROM word */ + /* only the second byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, first_word, 1, &eeprom_buff[0]); + ptr++; + } + if (((eeprom->offset + eeprom->len) & 1) && (!ret_val)) + /* need read/modify/write of last changed EEPROM word */ + /* only the first byte of the word is being modified */ + ret_val = e1000_read_nvm(hw, last_word, 1, + &eeprom_buff[last_word - first_word]); + + if (ret_val) + goto out; + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < last_word - first_word + 1; i++) + le16_to_cpus(&eeprom_buff[i]); + + memcpy(ptr, bytes, eeprom->len); + + for (i = 0; i < last_word - first_word + 1; i++) + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = e1000_write_nvm(hw, first_word, + last_word - first_word + 1, eeprom_buff); + + if (ret_val) + goto out; + + /* Update the checksum over the first part of the EEPROM if needed + * and flush shadow RAM for applicable controllers + */ + if ((first_word <= NVM_CHECKSUM_REG) || + (hw->mac.type == e1000_82583) || + (hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82573)) + ret_val = e1000e_update_nvm_checksum(hw); + +out: + pm_runtime_put_sync(netdev->dev.parent); + kfree(eeprom_buff); + return ret_val; +} + +static void e1000_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver)); + strlcpy(drvinfo->version, e1000e_driver_version, + sizeof(drvinfo->version)); + + /* EEPROM image version # is reported as firmware version # for + * PCI-E controllers + */ + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d-%d", + (adapter->eeprom_vers & 0xF000) >> 12, + (adapter->eeprom_vers & 0x0FF0) >> 4, + (adapter->eeprom_vers & 0x000F)); + + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), + sizeof(drvinfo->bus_info)); +} + +static void e1000_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + ring->rx_max_pending = E1000_MAX_RXD; + ring->tx_max_pending = E1000_MAX_TXD; + ring->rx_pending = adapter->rx_ring_count; + ring->tx_pending = adapter->tx_ring_count; +} + +static int e1000_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *temp_tx = NULL, *temp_rx = NULL; + int err = 0, size = sizeof(struct e1000_ring); + bool set_tx = false, set_rx = false; + u16 new_rx_count, new_tx_count; + + if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) + return -EINVAL; + + new_rx_count = clamp_t(u32, ring->rx_pending, E1000_MIN_RXD, + E1000_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); + + new_tx_count = clamp_t(u32, ring->tx_pending, E1000_MIN_TXD, + E1000_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); + + if ((new_tx_count == adapter->tx_ring_count) && + (new_rx_count == adapter->rx_ring_count)) + /* nothing to do */ + return 0; + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + + if (!netif_running(adapter->netdev)) { + /* Set counts now and allocate resources during open() */ + adapter->tx_ring->count = new_tx_count; + adapter->rx_ring->count = new_rx_count; + adapter->tx_ring_count = new_tx_count; + adapter->rx_ring_count = new_rx_count; + goto clear_reset; + } + + set_tx = (new_tx_count != adapter->tx_ring_count); + set_rx = (new_rx_count != adapter->rx_ring_count); + + /* Allocate temporary storage for ring updates */ + if (set_tx) { + temp_tx = vmalloc(size); + if (!temp_tx) { + err = -ENOMEM; + goto free_temp; + } + } + if (set_rx) { + temp_rx = vmalloc(size); + if (!temp_rx) { + err = -ENOMEM; + goto free_temp; + } + } + + pm_runtime_get_sync(netdev->dev.parent); + + e1000e_down(adapter, true); + + /* We can't just free everything and then setup again, because the + * ISRs in MSI-X mode get passed pointers to the Tx and Rx ring + * structs. First, attempt to allocate new resources... + */ + if (set_tx) { + memcpy(temp_tx, adapter->tx_ring, size); + temp_tx->count = new_tx_count; + err = e1000e_setup_tx_resources(temp_tx); + if (err) + goto err_setup; + } + if (set_rx) { + memcpy(temp_rx, adapter->rx_ring, size); + temp_rx->count = new_rx_count; + err = e1000e_setup_rx_resources(temp_rx); + if (err) + goto err_setup_rx; + } + + /* ...then free the old resources and copy back any new ring data */ + if (set_tx) { + e1000e_free_tx_resources(adapter->tx_ring); + memcpy(adapter->tx_ring, temp_tx, size); + adapter->tx_ring_count = new_tx_count; + } + if (set_rx) { + e1000e_free_rx_resources(adapter->rx_ring); + memcpy(adapter->rx_ring, temp_rx, size); + adapter->rx_ring_count = new_rx_count; + } + +err_setup_rx: + if (err && set_tx) + e1000e_free_tx_resources(temp_tx); +err_setup: + e1000e_up(adapter); + pm_runtime_put_sync(netdev->dev.parent); +free_temp: + vfree(temp_tx); + vfree(temp_rx); +clear_reset: + clear_bit(__E1000_RESETTING, &adapter->state); + return err; +} + +static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, + int reg, int offset, u32 mask, u32 write) +{ + u32 pat, val; + static const u32 test[] = { + 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF + }; + for (pat = 0; pat < ARRAY_SIZE(test); pat++) { + E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, + (test[pat] & write)); + val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset); + if (val != (test[pat] & write & mask)) { + e_err("pattern test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", + reg + (offset << 2), val, + (test[pat] & write & mask)); + *data = reg; + return true; + } + } + return false; +} + +static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, + int reg, u32 mask, u32 write) +{ + u32 val; + + __ew32(&adapter->hw, reg, write & mask); + val = __er32(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { + e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; + return true; + } + return false; +} + +#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \ + do { \ + if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \ + return 1; \ + } while (0) +#define REG_PATTERN_TEST(reg, mask, write) \ + REG_PATTERN_TEST_ARRAY(reg, 0, mask, write) + +#define REG_SET_AND_CHECK(reg, mask, write) \ + do { \ + if (reg_set_and_check(adapter, data, reg, mask, write)) \ + return 1; \ + } while (0) + +static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &adapter->hw.mac; + u32 value; + u32 before; + u32 after; + u32 i; + u32 toggle; + u32 mask; + u32 wlock_mac = 0; + + /* The status register is Read Only, so a write should fail. + * Some bits that get toggled are ignored. There are several bits + * on newer hardware that are r/w. + */ + switch (mac->type) { + case e1000_82571: + case e1000_82572: + case e1000_80003es2lan: + toggle = 0x7FFFF3FF; + break; + default: + toggle = 0x7FFFF033; + break; + } + + before = er32(STATUS); + value = (er32(STATUS) & toggle); + ew32(STATUS, toggle); + after = er32(STATUS) & toggle; + if (value != after) { + e_err("failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); + *data = 1; + return 1; + } + /* restore previous status */ + ew32(STATUS, before); + + if (!(adapter->flags & FLAG_IS_ICH)) { + REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF); + } + + REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF); + REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8); + REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF); + REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF); + + REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000); + + before = ((adapter->flags & FLAG_IS_ICH) ? 0x06C3B33E : 0x06DFB3FE); + REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB); + REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000); + + REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); + if (!(adapter->flags & FLAG_IS_ICH)) + REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF); + REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF); + REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF); + mask = 0x8003FFFF; + switch (mac->type) { + case e1000_ich10lan: + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + mask |= (1 << 18); + break; + default: + break; + } + + if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) + wlock_mac = (er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK) >> + E1000_FWSM_WLOCK_MAC_SHIFT; + + for (i = 0; i < mac->rar_entry_count; i++) { + if ((mac->type == e1000_pch_lpt) || + (mac->type == e1000_pch_spt)) { + /* Cannot test write-protected SHRAL[n] registers */ + if ((wlock_mac == 1) || (wlock_mac && (i > wlock_mac))) + continue; + + /* SHRAH[9] different than the others */ + if (i == 10) + mask |= (1 << 30); + else + mask &= ~(1 << 30); + } + if (mac->type == e1000_pch2lan) { + /* SHRAH[0,1,2] different than previous */ + if (i == 1) + mask &= 0xFFF4FFFF; + /* SHRAH[3] different than SHRAH[0,1,2] */ + if (i == 4) + mask |= (1 << 30); + /* RAR[1-6] owned by management engine - skipping */ + if (i > 0) + i += 6; + } + + REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1), mask, + 0xFFFFFFFF); + /* reset index to actual value */ + if ((mac->type == e1000_pch2lan) && (i > 6)) + i -= 6; + } + + for (i = 0; i < mac->mta_reg_count; i++) + REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF); + + *data = 0; + + return 0; +} + +static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data) +{ + u16 temp; + u16 checksum = 0; + u16 i; + + *data = 0; + /* Read and add up the contents of the EEPROM */ + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) { + *data = 1; + return *data; + } + checksum += temp; + } + + /* If Checksum is not Correct return error else test passed */ + if ((checksum != (u16)NVM_SUM) && !(*data)) + *data = 2; + + return *data; +} + +static irqreturn_t e1000_test_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = (struct net_device *)data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + adapter->test_icr |= er32(ICR); + + return IRQ_HANDLED; +} + +static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 mask; + u32 shared_int = 1; + u32 irq = adapter->pdev->irq; + int i; + int ret_val = 0; + int int_mode = E1000E_INT_MODE_LEGACY; + + *data = 0; + + /* NOTE: we don't test MSI/MSI-X interrupts here, yet */ + if (adapter->int_mode == E1000E_INT_MODE_MSIX) { + int_mode = adapter->int_mode; + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e1000e_set_interrupt_capability(adapter); + } + /* Hook up test interrupt handler just for this test */ + if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name, + netdev)) { + shared_int = 0; + } else if (request_irq(irq, e1000_test_intr, IRQF_SHARED, netdev->name, + netdev)) { + *data = 1; + ret_val = -1; + goto out; + } + e_info("testing %s interrupt\n", (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + e1e_flush(); + usleep_range(10000, 20000); + + /* Test each interrupt */ + for (i = 0; i < 10; i++) { + /* Interrupt to test */ + mask = 1 << i; + + if (adapter->flags & FLAG_IS_ICH) { + switch (mask) { + case E1000_ICR_RXSEQ: + continue; + case 0x00000100: + if (adapter->hw.mac.type == e1000_ich8lan || + adapter->hw.mac.type == e1000_ich9lan) + continue; + break; + default: + break; + } + } + + if (!shared_int) { + /* Disable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, mask); + ew32(ICS, mask); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->test_icr & mask) { + *data = 3; + break; + } + } + + /* Enable the interrupt to be reported in + * the cause register and then force the same + * interrupt and see if one gets posted. If + * an interrupt was not posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMS, mask); + ew32(ICS, mask); + e1e_flush(); + usleep_range(10000, 20000); + + if (!(adapter->test_icr & mask)) { + *data = 4; + break; + } + + if (!shared_int) { + /* Disable the other interrupts to be reported in + * the cause register and then force the other + * interrupts and see if any get posted. If + * an interrupt was posted to the bus, the + * test failed. + */ + adapter->test_icr = 0; + ew32(IMC, ~mask & 0x00007FFF); + ew32(ICS, ~mask & 0x00007FFF); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->test_icr) { + *data = 5; + break; + } + } + } + + /* Disable all the interrupts */ + ew32(IMC, 0xFFFFFFFF); + e1e_flush(); + usleep_range(10000, 20000); + + /* Unhook test interrupt handler */ + free_irq(irq, netdev); + +out: + if (int_mode == E1000E_INT_MODE_MSIX) { + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = int_mode; + e1000e_set_interrupt_capability(adapter); + } + + return ret_val; +} + +static void e1000_free_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info; + int i; + + if (tx_ring->desc && tx_ring->buffer_info) { + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + if (buffer_info->skb) + dev_kfree_skb(buffer_info->skb); + } + } + + if (rx_ring->desc && rx_ring->buffer_info) { + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + + if (buffer_info->dma) + dma_unmap_single(&pdev->dev, + buffer_info->dma, + 2048, DMA_FROM_DEVICE); + if (buffer_info->skb) + dev_kfree_skb(buffer_info->skb); + } + } + + if (tx_ring->desc) { + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; + } + if (rx_ring->desc) { + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; + } + + kfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + kfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; +} + +static int e1000_setup_desc_rings(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + int i; + int ret_val; + + /* Setup Tx descriptor ring and Tx buffers */ + + if (!tx_ring->count) + tx_ring->count = E1000_DEFAULT_TXD; + + tx_ring->buffer_info = kcalloc(tx_ring->count, + sizeof(struct e1000_buffer), GFP_KERNEL); + if (!tx_ring->buffer_info) { + ret_val = 1; + goto err_nomem; + } + + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, + &tx_ring->dma, GFP_KERNEL); + if (!tx_ring->desc) { + ret_val = 2; + goto err_nomem; + } + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + ew32(TDBAL(0), ((u64)tx_ring->dma & 0x00000000FFFFFFFF)); + ew32(TDBAH(0), ((u64)tx_ring->dma >> 32)); + ew32(TDLEN(0), tx_ring->count * sizeof(struct e1000_tx_desc)); + ew32(TDH(0), 0); + ew32(TDT(0), 0); + ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR | + E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT | + E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); + + for (i = 0; i < tx_ring->count; i++) { + struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); + struct sk_buff *skb; + unsigned int skb_size = 1024; + + skb = alloc_skb(skb_size, GFP_KERNEL); + if (!skb) { + ret_val = 3; + goto err_nomem; + } + skb_put(skb, skb_size); + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].length = skb->len; + tx_ring->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, + tx_ring->buffer_info[i].dma)) { + ret_val = 4; + goto err_nomem; + } + tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); + tx_desc->lower.data = cpu_to_le32(skb->len); + tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | + E1000_TXD_CMD_IFCS | + E1000_TXD_CMD_RS); + tx_desc->upper.data = 0; + } + + /* Setup Rx descriptor ring and Rx buffers */ + + if (!rx_ring->count) + rx_ring->count = E1000_DEFAULT_RXD; + + rx_ring->buffer_info = kcalloc(rx_ring->count, + sizeof(struct e1000_buffer), GFP_KERNEL); + if (!rx_ring->buffer_info) { + ret_val = 5; + goto err_nomem; + } + + rx_ring->size = rx_ring->count * sizeof(union e1000_rx_desc_extended); + rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, + &rx_ring->dma, GFP_KERNEL); + if (!rx_ring->desc) { + ret_val = 6; + goto err_nomem; + } + rx_ring->next_to_use = 0; + rx_ring->next_to_clean = 0; + + rctl = er32(RCTL); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); + ew32(RDBAL(0), ((u64)rx_ring->dma & 0xFFFFFFFF)); + ew32(RDBAH(0), ((u64)rx_ring->dma >> 32)); + ew32(RDLEN(0), rx_ring->size); + ew32(RDH(0), 0); + ew32(RDT(0), 0); + rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 | + E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE | + E1000_RCTL_SBP | E1000_RCTL_SECRC | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + ew32(RCTL, rctl); + + for (i = 0; i < rx_ring->count; i++) { + union e1000_rx_desc_extended *rx_desc; + struct sk_buff *skb; + + skb = alloc_skb(2048 + NET_IP_ALIGN, GFP_KERNEL); + if (!skb) { + ret_val = 7; + goto err_nomem; + } + skb_reserve(skb, NET_IP_ALIGN); + rx_ring->buffer_info[i].skb = skb; + rx_ring->buffer_info[i].dma = + dma_map_single(&pdev->dev, skb->data, 2048, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + rx_ring->buffer_info[i].dma)) { + ret_val = 8; + goto err_nomem; + } + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + rx_desc->read.buffer_addr = + cpu_to_le64(rx_ring->buffer_info[i].dma); + memset(skb->data, 0x00, skb->len); + } + + return 0; + +err_nomem: + e1000_free_desc_rings(adapter); + return ret_val; +} + +static void e1000_phy_disable_receiver(struct e1000_adapter *adapter) +{ + /* Write out to PHY registers 29 and 30 to disable the Receiver. */ + e1e_wphy(&adapter->hw, 29, 0x001F); + e1e_wphy(&adapter->hw, 30, 0x8FFC); + e1e_wphy(&adapter->hw, 29, 0x001A); + e1e_wphy(&adapter->hw, 30, 0x8FF0); +} + +static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_reg = 0; + u16 phy_reg = 0; + s32 ret_val = 0; + + hw->mac.autoneg = 0; + + if (hw->phy.type == e1000_phy_ife) { + /* force 100, set loopback */ + e1e_wphy(hw, MII_BMCR, 0x6100); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_100 |/* Force Speed to 100 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + ew32(CTRL, ctrl_reg); + e1e_flush(); + usleep_range(500, 1000); + + return 0; + } + + /* Specific PHY configuration for loopback */ + switch (hw->phy.type) { + case e1000_phy_m88: + /* Auto-MDI/MDIX Off */ + e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808); + /* reset to update Auto-MDI/MDIX */ + e1e_wphy(hw, MII_BMCR, 0x9140); + /* autoneg off */ + e1e_wphy(hw, MII_BMCR, 0x8140); + break; + case e1000_phy_gg82563: + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC); + break; + case e1000_phy_bm: + /* Set Default MAC Interface speed to 1GB */ + e1e_rphy(hw, PHY_REG(2, 21), &phy_reg); + phy_reg &= ~0x0007; + phy_reg |= 0x006; + e1e_wphy(hw, PHY_REG(2, 21), phy_reg); + /* Assert SW reset for above settings to take effect */ + hw->phy.ops.commit(hw); + usleep_range(1000, 2000); + /* Force Full Duplex */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x000C); + /* Set Link Up (in force link) */ + e1e_rphy(hw, PHY_REG(776, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(776, 16), phy_reg | 0x0040); + /* Force Link */ + e1e_rphy(hw, PHY_REG(769, 16), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 16), phy_reg | 0x0040); + /* Set Early Link Enable */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + e1e_wphy(hw, PHY_REG(769, 20), phy_reg | 0x0400); + break; + case e1000_phy_82577: + case e1000_phy_82578: + /* Workaround: K1 must be disabled for stable 1Gbps operation */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_err("Cannot setup 1Gbps loopback.\n"); + return ret_val; + } + e1000_configure_k1_ich8lan(hw, false); + hw->phy.ops.release(hw); + break; + case e1000_phy_82579: + /* Disable PHY energy detect power down */ + e1e_rphy(hw, PHY_REG(0, 21), &phy_reg); + e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3)); + /* Disable full chip energy detect */ + e1e_rphy(hw, PHY_REG(776, 18), &phy_reg); + e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1); + /* Enable loopback on the PHY */ + e1e_wphy(hw, I82577_PHY_LBK_CTRL, 0x8001); + break; + default: + break; + } + + /* force 1000, set loopback */ + e1e_wphy(hw, MII_BMCR, 0x4140); + msleep(250); + + /* Now set up the MAC to the same speed/duplex as the PHY. */ + ctrl_reg = er32(CTRL); + ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */ + ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */ + E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */ + E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */ + E1000_CTRL_FD); /* Force Duplex to FULL */ + + if (adapter->flags & FLAG_IS_ICH) + ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */ + + if (hw->phy.media_type == e1000_media_type_copper && + hw->phy.type == e1000_phy_m88) { + ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */ + } else { + /* Set the ILOS bit on the fiber Nic if half duplex link is + * detected. + */ + if ((er32(STATUS) & E1000_STATUS_FD) == 0) + ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU); + } + + ew32(CTRL, ctrl_reg); + + /* Disable the receiver on the PHY so when a cable is plugged in, the + * PHY does not begin to autoneg when a cable is reconnected to the NIC. + */ + if (hw->phy.type == e1000_phy_m88) + e1000_phy_disable_receiver(adapter); + + usleep_range(500, 1000); + + return 0; +} + +static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + int link; + + /* special requirements for 82571/82572 fiber adapters */ + + /* jump through hoops to make sure link is up because serdes + * link is hardwired up + */ + ctrl |= E1000_CTRL_SLU; + ew32(CTRL, ctrl); + + /* disable autoneg */ + ctrl = er32(TXCW); + ctrl &= ~(1 << 31); + ew32(TXCW, ctrl); + + link = (er32(STATUS) & E1000_STATUS_LU); + + if (!link) { + /* set invert loss of signal */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_ILOS; + ew32(CTRL, ctrl); + } + + /* special write to serdes control register to enable SerDes analog + * loopback + */ + ew32(SCTL, E1000_SCTL_ENABLE_SERDES_LOOPBACK); + e1e_flush(); + usleep_range(10000, 20000); + + return 0; +} + +/* only call this for fiber/serdes connections to es2lan */ +static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrlext = er32(CTRL_EXT); + u32 ctrl = er32(CTRL); + + /* save CTRL_EXT to restore later, reuse an empty variable (unused + * on mac_type 80003es2lan) + */ + adapter->tx_fifo_head = ctrlext; + + /* clear the serdes mode bits, putting the device into mac loopback */ + ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES; + ew32(CTRL_EXT, ctrlext); + + /* force speed to 1000/FD, link up */ + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | + E1000_CTRL_SPD_1000 | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* set mac loopback */ + ctrl = er32(RCTL); + ctrl |= E1000_RCTL_LBM_MAC; + ew32(RCTL, ctrl); + + /* set testing mode parameters (no need to reset later) */ +#define KMRNCTRLSTA_OPMODE (0x1F << 16) +#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582 + ew32(KMRNCTRLSTA, + (KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII)); + + return 0; +} + +static int e1000_setup_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl, fext_nvm11, tarc0; + + if (hw->mac.type == e1000_pch_spt) { + fext_nvm11 = er32(FEXTNVM11); + fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; + ew32(FEXTNVM11, fext_nvm11); + tarc0 = er32(TARC(0)); + /* clear bits 28 & 29 (control of MULR concurrent requests) */ + tarc0 &= 0xcfffffff; + /* set bit 29 (value of MULR requests is now 2) */ + tarc0 |= 0x20000000; + ew32(TARC(0), tarc0); + } + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + switch (hw->mac.type) { + case e1000_80003es2lan: + return e1000_set_es2lan_mac_loopback(adapter); + case e1000_82571: + case e1000_82572: + return e1000_set_82571_fiber_loopback(adapter); + default: + rctl = er32(RCTL); + rctl |= E1000_RCTL_LBM_TCVR; + ew32(RCTL, rctl); + return 0; + } + } else if (hw->phy.media_type == e1000_media_type_copper) { + return e1000_integrated_phy_loopback(adapter); + } + + return 7; +} + +static void e1000_loopback_cleanup(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl, fext_nvm11, tarc0; + u16 phy_reg; + + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); + ew32(RCTL, rctl); + + switch (hw->mac.type) { + case e1000_pch_spt: + fext_nvm11 = er32(FEXTNVM11); + fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; + ew32(FEXTNVM11, fext_nvm11); + tarc0 = er32(TARC(0)); + /* clear bits 28 & 29 (control of MULR concurrent requests) */ + /* set bit 29 (value of MULR requests is now 0) */ + tarc0 &= 0xcfffffff; + ew32(TARC(0), tarc0); + /* fall through */ + case e1000_80003es2lan: + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + /* restore CTRL_EXT, stealing space from tx_fifo_head */ + ew32(CTRL_EXT, adapter->tx_fifo_head); + adapter->tx_fifo_head = 0; + } + /* fall through */ + case e1000_82571: + case e1000_82572: + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) { + ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK); + e1e_flush(); + usleep_range(10000, 20000); + break; + } + /* Fall Through */ + default: + hw->mac.autoneg = 1; + if (hw->phy.type == e1000_phy_gg82563) + e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x180); + e1e_rphy(hw, MII_BMCR, &phy_reg); + if (phy_reg & BMCR_LOOPBACK) { + phy_reg &= ~BMCR_LOOPBACK; + e1e_wphy(hw, MII_BMCR, phy_reg); + if (hw->phy.ops.commit) + hw->phy.ops.commit(hw); + } + break; + } +} + +static void e1000_create_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + memset(skb->data, 0xFF, frame_size); + frame_size &= ~1; + memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); + memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); + memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); +} + +static int e1000_check_lbtest_frame(struct sk_buff *skb, + unsigned int frame_size) +{ + frame_size &= ~1; + if (*(skb->data + 3) == 0xFF) + if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && + (*(skb->data + frame_size / 2 + 12) == 0xAF)) + return 0; + return 13; +} + +static int e1000_run_loopback_test(struct e1000_adapter *adapter) +{ + struct e1000_ring *tx_ring = &adapter->test_tx_ring; + struct e1000_ring *rx_ring = &adapter->test_rx_ring; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_buffer *buffer_info; + int i, j, k, l; + int lc; + int good_cnt; + int ret_val = 0; + unsigned long time; + + ew32(RDT(0), rx_ring->count - 1); + + /* Calculate the loop count based on the largest descriptor ring + * The idea is to wrap the largest ring a number of times using 64 + * send/receive pairs during each loop + */ + + if (rx_ring->count <= tx_ring->count) + lc = ((tx_ring->count / 64) * 2) + 1; + else + lc = ((rx_ring->count / 64) * 2) + 1; + + k = 0; + l = 0; + /* loop count loop */ + for (j = 0; j <= lc; j++) { + /* send the packets */ + for (i = 0; i < 64; i++) { + buffer_info = &tx_ring->buffer_info[k]; + + e1000_create_lbtest_frame(buffer_info->skb, 1024); + dma_sync_single_for_device(&pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + k++; + if (k == tx_ring->count) + k = 0; + } + ew32(TDT(0), k); + e1e_flush(); + msleep(200); + time = jiffies; /* set the start time for the receive */ + good_cnt = 0; + /* receive the sent packets */ + do { + buffer_info = &rx_ring->buffer_info[l]; + + dma_sync_single_for_cpu(&pdev->dev, + buffer_info->dma, 2048, + DMA_FROM_DEVICE); + + ret_val = e1000_check_lbtest_frame(buffer_info->skb, + 1024); + if (!ret_val) + good_cnt++; + l++; + if (l == rx_ring->count) + l = 0; + /* time + 20 msecs (200 msecs on 2.4) is more than + * enough time to complete the receives, if it's + * exceeded, break and error off + */ + } while ((good_cnt < 64) && !time_after(jiffies, time + 20)); + if (good_cnt != 64) { + ret_val = 13; /* ret_val is the same as mis-compare */ + break; + } + if (time_after(jiffies, time + 20)) { + ret_val = 14; /* error code for time out error */ + break; + } + } + return ret_val; +} + +static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + /* PHY loopback cannot be performed if SoL/IDER sessions are active */ + if (hw->phy.ops.check_reset_block && + hw->phy.ops.check_reset_block(hw)) { + e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); + *data = 0; + goto out; + } + + *data = e1000_setup_desc_rings(adapter); + if (*data) + goto out; + + *data = e1000_setup_loopback_test(adapter); + if (*data) + goto err_loopback; + + *data = e1000_run_loopback_test(adapter); + e1000_loopback_cleanup(adapter); + +err_loopback: + e1000_free_desc_rings(adapter); +out: + return *data; +} + +static int e1000_link_test(struct e1000_adapter *adapter, u64 *data) +{ + struct e1000_hw *hw = &adapter->hw; + + *data = 0; + if (hw->phy.media_type == e1000_media_type_internal_serdes) { + int i = 0; + + hw->mac.serdes_has_link = false; + + /* On some blade server designs, link establishment + * could take as long as 2-3 minutes + */ + do { + hw->mac.ops.check_for_link(hw); + if (hw->mac.serdes_has_link) + return *data; + msleep(20); + } while (i++ < 3750); + + *data = 1; + } else { + hw->mac.ops.check_for_link(hw); + if (hw->mac.autoneg) + /* On some Phy/switch combinations, link establishment + * can take a few seconds more than expected. + */ + msleep_interruptible(5000); + + if (!(er32(STATUS) & E1000_STATUS_LU)) + *data = 1; + } + return *data; +} + +static int e1000e_get_sset_count(struct net_device __always_unused *netdev, + int sset) +{ + switch (sset) { + case ETH_SS_TEST: + return E1000_TEST_LEN; + case ETH_SS_STATS: + return E1000_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} + +static void e1000_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + u16 autoneg_advertised; + u8 forced_speed_duplex; + u8 autoneg; + bool if_running = netif_running(netdev); + + pm_runtime_get_sync(netdev->dev.parent); + + set_bit(__E1000_TESTING, &adapter->state); + + if (!if_running) { + /* Get control of and reset hardware */ + if (adapter->flags & FLAG_HAS_AMT) + e1000e_get_hw_control(adapter); + + e1000e_power_up_phy(adapter); + + adapter->hw.phy.autoneg_wait_to_complete = 1; + e1000e_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = 0; + } + + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { + /* Offline tests */ + + /* save speed, duplex, autoneg settings */ + autoneg_advertised = adapter->hw.phy.autoneg_advertised; + forced_speed_duplex = adapter->hw.mac.forced_speed_duplex; + autoneg = adapter->hw.mac.autoneg; + + e_info("offline testing starting\n"); + + if (if_running) + /* indicate we're in test mode */ + dev_close(netdev); + + if (e1000_reg_test(adapter, &data[0])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_eeprom_test(adapter, &data[1])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_intr_test(adapter, &data[2])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + e1000e_reset(adapter); + if (e1000_loopback_test(adapter, &data[3])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* force this routine to wait until autoneg complete/timeout */ + adapter->hw.phy.autoneg_wait_to_complete = 1; + e1000e_reset(adapter); + adapter->hw.phy.autoneg_wait_to_complete = 0; + + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + /* restore speed, duplex, autoneg settings */ + adapter->hw.phy.autoneg_advertised = autoneg_advertised; + adapter->hw.mac.forced_speed_duplex = forced_speed_duplex; + adapter->hw.mac.autoneg = autoneg; + e1000e_reset(adapter); + + clear_bit(__E1000_TESTING, &adapter->state); + if (if_running) + dev_open(netdev); + } else { + /* Online tests */ + + e_info("online testing starting\n"); + + /* register, eeprom, intr and loopback tests not run online */ + data[0] = 0; + data[1] = 0; + data[2] = 0; + data[3] = 0; + + if (e1000_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + + clear_bit(__E1000_TESTING, &adapter->state); + } + + if (!if_running) { + e1000e_reset(adapter); + + if (adapter->flags & FLAG_HAS_AMT) + e1000e_release_hw_control(adapter); + } + + msleep_interruptible(4 * 1000); + + pm_runtime_put_sync(netdev->dev.parent); +} + +static void e1000_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + wol->supported = 0; + wol->wolopts = 0; + + if (!(adapter->flags & FLAG_HAS_WOL) || + !device_can_wakeup(&adapter->pdev->dev)) + return; + + wol->supported = WAKE_UCAST | WAKE_MCAST | + WAKE_BCAST | WAKE_MAGIC | WAKE_PHY; + + /* apply any specific unsupported masks here */ + if (adapter->flags & FLAG_NO_WAKE_UCAST) { + wol->supported &= ~WAKE_UCAST; + + if (adapter->wol & E1000_WUFC_EX) + e_err("Interface does not support directed (unicast) frame wake-up packets\n"); + } + + if (adapter->wol & E1000_WUFC_EX) + wol->wolopts |= WAKE_UCAST; + if (adapter->wol & E1000_WUFC_MC) + wol->wolopts |= WAKE_MCAST; + if (adapter->wol & E1000_WUFC_BC) + wol->wolopts |= WAKE_BCAST; + if (adapter->wol & E1000_WUFC_MAG) + wol->wolopts |= WAKE_MAGIC; + if (adapter->wol & E1000_WUFC_LNKC) + wol->wolopts |= WAKE_PHY; +} + +static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!(adapter->flags & FLAG_HAS_WOL) || + !device_can_wakeup(&adapter->pdev->dev) || + (wol->wolopts & ~(WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | + WAKE_MAGIC | WAKE_PHY))) + return -EOPNOTSUPP; + + /* these settings will always override what we currently have */ + adapter->wol = 0; + + if (wol->wolopts & WAKE_UCAST) + adapter->wol |= E1000_WUFC_EX; + if (wol->wolopts & WAKE_MCAST) + adapter->wol |= E1000_WUFC_MC; + if (wol->wolopts & WAKE_BCAST) + adapter->wol |= E1000_WUFC_BC; + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= E1000_WUFC_MAG; + if (wol->wolopts & WAKE_PHY) + adapter->wol |= E1000_WUFC_LNKC; + + device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); + + return 0; +} + +static int e1000_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + pm_runtime_get_sync(netdev->dev.parent); + + if (!hw->mac.ops.blink_led) + return 2; /* cycle on/off twice per second */ + + hw->mac.ops.blink_led(hw); + break; + + case ETHTOOL_ID_INACTIVE: + if (hw->phy.type == e1000_phy_ife) + e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + hw->mac.ops.led_off(hw); + hw->mac.ops.cleanup_led(hw); + pm_runtime_put_sync(netdev->dev.parent); + break; + + case ETHTOOL_ID_ON: + hw->mac.ops.led_on(hw); + break; + + case ETHTOOL_ID_OFF: + hw->mac.ops.led_off(hw); + break; + } + + return 0; +} + +static int e1000_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->itr_setting <= 4) + ec->rx_coalesce_usecs = adapter->itr_setting; + else + ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting; + + return 0; +} + +static int e1000_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) || + ((ec->rx_coalesce_usecs > 4) && + (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) || + (ec->rx_coalesce_usecs == 2)) + return -EINVAL; + + if (ec->rx_coalesce_usecs == 4) { + adapter->itr_setting = 4; + adapter->itr = adapter->itr_setting; + } else if (ec->rx_coalesce_usecs <= 3) { + adapter->itr = 20000; + adapter->itr_setting = ec->rx_coalesce_usecs; + } else { + adapter->itr = (1000000 / ec->rx_coalesce_usecs); + adapter->itr_setting = adapter->itr & ~3; + } + + pm_runtime_get_sync(netdev->dev.parent); + + if (adapter->itr_setting != 0) + e1000e_write_itr(adapter, adapter->itr); + else + e1000e_write_itr(adapter, 0); + + pm_runtime_put_sync(netdev->dev.parent); + + return 0; +} + +static int e1000_nway_reset(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (!netif_running(netdev)) + return -EAGAIN; + + if (!adapter->hw.mac.autoneg) + return -EINVAL; + + pm_runtime_get_sync(netdev->dev.parent); + e1000e_reinit_locked(adapter); + pm_runtime_put_sync(netdev->dev.parent); + + return 0; +} + +static void e1000_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats __always_unused *stats, + u64 *data) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct rtnl_link_stats64 net_stats; + int i; + char *p = NULL; + + pm_runtime_get_sync(netdev->dev.parent); + + e1000e_get_stats64(netdev, &net_stats); + + pm_runtime_put_sync(netdev->dev.parent); + + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + switch (e1000_gstrings_stats[i].type) { + case NETDEV_STATS: + p = (char *)&net_stats + + e1000_gstrings_stats[i].stat_offset; + break; + case E1000_STATS: + p = (char *)adapter + + e1000_gstrings_stats[i].stat_offset; + break; + default: + data[i] = 0; + continue; + } + + data[i] = (e1000_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +} + +static void e1000_get_strings(struct net_device __always_unused *netdev, + u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_TEST: + memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test)); + break; + case ETH_SS_STATS: + for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { + memcpy(p, e1000_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + break; + } +} + +static int e1000_get_rxnfc(struct net_device *netdev, + struct ethtool_rxnfc *info, + u32 __always_unused *rule_locs) +{ + info->data = 0; + + switch (info->cmd) { + case ETHTOOL_GRXFH: { + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 mrqc; + + pm_runtime_get_sync(netdev->dev.parent); + mrqc = er32(MRQC); + pm_runtime_put_sync(netdev->dev.parent); + + if (!(mrqc & E1000_MRQC_RSS_FIELD_MASK)) + return 0; + + switch (info->flow_type) { + case TCP_V4_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4_TCP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case IPV4_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV4) + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + case TCP_V6_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6_TCP) + info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; + /* fall through */ + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case IPV6_FLOW: + if (mrqc & E1000_MRQC_RSS_FIELD_IPV6) + info->data |= RXH_IP_SRC | RXH_IP_DST; + break; + default: + break; + } + return 0; + } + default: + return -EOPNOTSUPP; + } +} + +static int e1000e_get_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 cap_addr, lpa_addr, pcs_stat_addr, phy_data; + u32 ret_val; + + if (!(adapter->flags2 & FLAG2_HAS_EEE)) + return -EOPNOTSUPP; + + switch (hw->phy.type) { + case e1000_phy_82579: + cap_addr = I82579_EEE_CAPABILITY; + lpa_addr = I82579_EEE_LP_ABILITY; + pcs_stat_addr = I82579_EEE_PCS_STATUS; + break; + case e1000_phy_i217: + cap_addr = I217_EEE_CAPABILITY; + lpa_addr = I217_EEE_LP_ABILITY; + pcs_stat_addr = I217_EEE_PCS_STATUS; + break; + default: + return -EOPNOTSUPP; + } + + pm_runtime_get_sync(netdev->dev.parent); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + pm_runtime_put_sync(netdev->dev.parent); + return -EBUSY; + } + + /* EEE Capability */ + ret_val = e1000_read_emi_reg_locked(hw, cap_addr, &phy_data); + if (ret_val) + goto release; + edata->supported = mmd_eee_cap_to_ethtool_sup_t(phy_data); + + /* EEE Advertised */ + edata->advertised = mmd_eee_adv_to_ethtool_adv_t(adapter->eee_advert); + + /* EEE Link Partner Advertised */ + ret_val = e1000_read_emi_reg_locked(hw, lpa_addr, &phy_data); + if (ret_val) + goto release; + edata->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(phy_data); + + /* EEE PCS Status */ + ret_val = e1000_read_emi_reg_locked(hw, pcs_stat_addr, &phy_data); + if (ret_val) + goto release; + if (hw->phy.type == e1000_phy_82579) + phy_data <<= 8; + + /* Result of the EEE auto negotiation - there is no register that + * has the status of the EEE negotiation so do a best-guess based + * on whether Tx or Rx LPI indications have been received. + */ + if (phy_data & (E1000_EEE_TX_LPI_RCVD | E1000_EEE_RX_LPI_RCVD)) + edata->eee_active = true; + + edata->eee_enabled = !hw->dev_spec.ich8lan.eee_disable; + edata->tx_lpi_enabled = true; + edata->tx_lpi_timer = er32(LPIC) >> E1000_LPIC_LPIET_SHIFT; + +release: + hw->phy.ops.release(hw); + if (ret_val) + ret_val = -ENODATA; + + pm_runtime_put_sync(netdev->dev.parent); + + return ret_val; +} + +static int e1000e_set_eee(struct net_device *netdev, struct ethtool_eee *edata) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct ethtool_eee eee_curr; + s32 ret_val; + + ret_val = e1000e_get_eee(netdev, &eee_curr); + if (ret_val) + return ret_val; + + if (eee_curr.tx_lpi_enabled != edata->tx_lpi_enabled) { + e_err("Setting EEE tx-lpi is not supported\n"); + return -EINVAL; + } + + if (eee_curr.tx_lpi_timer != edata->tx_lpi_timer) { + e_err("Setting EEE Tx LPI timer is not supported\n"); + return -EINVAL; + } + + if (edata->advertised & ~(ADVERTISE_100_FULL | ADVERTISE_1000_FULL)) { + e_err("EEE advertisement supports only 100TX and/or 1000T full-duplex\n"); + return -EINVAL; + } + + adapter->eee_advert = ethtool_adv_to_mmd_eee_adv_t(edata->advertised); + + hw->dev_spec.ich8lan.eee_disable = !edata->eee_enabled; + + pm_runtime_get_sync(netdev->dev.parent); + + /* reset the link */ + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + + pm_runtime_put_sync(netdev->dev.parent); + + return 0; +} + +static int e1000e_get_ts_info(struct net_device *netdev, + struct ethtool_ts_info *info) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + ethtool_op_get_ts_info(netdev, info); + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return 0; + + info->so_timestamping |= (SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE); + + info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); + + info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) | + (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_ALL)); + + if (adapter->ptp_clock) + info->phc_index = ptp_clock_index(adapter->ptp_clock); + + return 0; +} + +static const struct ethtool_ops e1000_ethtool_ops = { + .get_settings = e1000_get_settings, + .set_settings = e1000_set_settings, + .get_drvinfo = e1000_get_drvinfo, + .get_regs_len = e1000_get_regs_len, + .get_regs = e1000_get_regs, + .get_wol = e1000_get_wol, + .set_wol = e1000_set_wol, + .get_msglevel = e1000_get_msglevel, + .set_msglevel = e1000_set_msglevel, + .nway_reset = e1000_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = e1000_get_eeprom_len, + .get_eeprom = e1000_get_eeprom, + .set_eeprom = e1000_set_eeprom, + .get_ringparam = e1000_get_ringparam, + .set_ringparam = e1000_set_ringparam, + .get_pauseparam = e1000_get_pauseparam, + .set_pauseparam = e1000_set_pauseparam, + .self_test = e1000_diag_test, + .get_strings = e1000_get_strings, + .set_phys_id = e1000_set_phys_id, + .get_ethtool_stats = e1000_get_ethtool_stats, + .get_sset_count = e1000e_get_sset_count, + .get_coalesce = e1000_get_coalesce, + .set_coalesce = e1000_set_coalesce, + .get_rxnfc = e1000_get_rxnfc, + .get_ts_info = e1000e_get_ts_info, + .get_eee = e1000e_get_eee, + .set_eee = e1000e_set_eee, +}; + +void e1000e_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &e1000_ethtool_ops; +} diff --git a/addons/e1000e/src/4.4.180/hw.h b/addons/e1000e/src/4.4.180/hw.h new file mode 100644 index 00000000..c9da4654 --- /dev/null +++ b/addons/e1000e/src/4.4.180/hw.h @@ -0,0 +1,698 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000_HW_H_ +#define _E1000_HW_H_ + +#include "regs.h" +#include "defines.h" + +struct e1000_hw; + +#define E1000_DEV_ID_82571EB_COPPER 0x105E +#define E1000_DEV_ID_82571EB_FIBER 0x105F +#define E1000_DEV_ID_82571EB_SERDES 0x1060 +#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4 +#define E1000_DEV_ID_82571PT_QUAD_COPPER 0x10D5 +#define E1000_DEV_ID_82571EB_QUAD_FIBER 0x10A5 +#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP 0x10BC +#define E1000_DEV_ID_82571EB_SERDES_DUAL 0x10D9 +#define E1000_DEV_ID_82571EB_SERDES_QUAD 0x10DA +#define E1000_DEV_ID_82572EI_COPPER 0x107D +#define E1000_DEV_ID_82572EI_FIBER 0x107E +#define E1000_DEV_ID_82572EI_SERDES 0x107F +#define E1000_DEV_ID_82572EI 0x10B9 +#define E1000_DEV_ID_82573E 0x108B +#define E1000_DEV_ID_82573E_IAMT 0x108C +#define E1000_DEV_ID_82573L 0x109A +#define E1000_DEV_ID_82574L 0x10D3 +#define E1000_DEV_ID_82574LA 0x10F6 +#define E1000_DEV_ID_82583V 0x150C +#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT 0x1096 +#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT 0x1098 +#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT 0x10BA +#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT 0x10BB +#define E1000_DEV_ID_ICH8_82567V_3 0x1501 +#define E1000_DEV_ID_ICH8_IGP_M_AMT 0x1049 +#define E1000_DEV_ID_ICH8_IGP_AMT 0x104A +#define E1000_DEV_ID_ICH8_IGP_C 0x104B +#define E1000_DEV_ID_ICH8_IFE 0x104C +#define E1000_DEV_ID_ICH8_IFE_GT 0x10C4 +#define E1000_DEV_ID_ICH8_IFE_G 0x10C5 +#define E1000_DEV_ID_ICH8_IGP_M 0x104D +#define E1000_DEV_ID_ICH9_IGP_AMT 0x10BD +#define E1000_DEV_ID_ICH9_BM 0x10E5 +#define E1000_DEV_ID_ICH9_IGP_M_AMT 0x10F5 +#define E1000_DEV_ID_ICH9_IGP_M 0x10BF +#define E1000_DEV_ID_ICH9_IGP_M_V 0x10CB +#define E1000_DEV_ID_ICH9_IGP_C 0x294C +#define E1000_DEV_ID_ICH9_IFE 0x10C0 +#define E1000_DEV_ID_ICH9_IFE_GT 0x10C3 +#define E1000_DEV_ID_ICH9_IFE_G 0x10C2 +#define E1000_DEV_ID_ICH10_R_BM_LM 0x10CC +#define E1000_DEV_ID_ICH10_R_BM_LF 0x10CD +#define E1000_DEV_ID_ICH10_R_BM_V 0x10CE +#define E1000_DEV_ID_ICH10_D_BM_LM 0x10DE +#define E1000_DEV_ID_ICH10_D_BM_LF 0x10DF +#define E1000_DEV_ID_ICH10_D_BM_V 0x1525 +#define E1000_DEV_ID_PCH_M_HV_LM 0x10EA +#define E1000_DEV_ID_PCH_M_HV_LC 0x10EB +#define E1000_DEV_ID_PCH_D_HV_DM 0x10EF +#define E1000_DEV_ID_PCH_D_HV_DC 0x10F0 +#define E1000_DEV_ID_PCH2_LV_LM 0x1502 +#define E1000_DEV_ID_PCH2_LV_V 0x1503 +#define E1000_DEV_ID_PCH_LPT_I217_LM 0x153A +#define E1000_DEV_ID_PCH_LPT_I217_V 0x153B +#define E1000_DEV_ID_PCH_LPTLP_I218_LM 0x155A +#define E1000_DEV_ID_PCH_LPTLP_I218_V 0x1559 +#define E1000_DEV_ID_PCH_I218_LM2 0x15A0 +#define E1000_DEV_ID_PCH_I218_V2 0x15A1 +#define E1000_DEV_ID_PCH_I218_LM3 0x15A2 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_I218_V3 0x15A3 /* Wildcat Point PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM 0x156F /* SPT PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V 0x1570 /* SPT PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* SPT-H PCH */ +#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* SPT-H PCH */ + +#define E1000_REVISION_4 4 + +#define E1000_FUNC_1 1 + +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0 +#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3 + +enum e1000_mac_type { + e1000_82571, + e1000_82572, + e1000_82573, + e1000_82574, + e1000_82583, + e1000_80003es2lan, + e1000_ich8lan, + e1000_ich9lan, + e1000_ich10lan, + e1000_pchlan, + e1000_pch2lan, + e1000_pch_lpt, + e1000_pch_spt, +}; + +enum e1000_media_type { + e1000_media_type_unknown = 0, + e1000_media_type_copper = 1, + e1000_media_type_fiber = 2, + e1000_media_type_internal_serdes = 3, + e1000_num_media_types +}; + +enum e1000_nvm_type { + e1000_nvm_unknown = 0, + e1000_nvm_none, + e1000_nvm_eeprom_spi, + e1000_nvm_flash_hw, + e1000_nvm_flash_sw +}; + +enum e1000_nvm_override { + e1000_nvm_override_none = 0, + e1000_nvm_override_spi_small, + e1000_nvm_override_spi_large +}; + +enum e1000_phy_type { + e1000_phy_unknown = 0, + e1000_phy_none, + e1000_phy_m88, + e1000_phy_igp, + e1000_phy_igp_2, + e1000_phy_gg82563, + e1000_phy_igp_3, + e1000_phy_ife, + e1000_phy_bm, + e1000_phy_82578, + e1000_phy_82577, + e1000_phy_82579, + e1000_phy_i217, +}; + +enum e1000_bus_width { + e1000_bus_width_unknown = 0, + e1000_bus_width_pcie_x1, + e1000_bus_width_pcie_x2, + e1000_bus_width_pcie_x4 = 4, + e1000_bus_width_pcie_x8 = 8, + e1000_bus_width_32, + e1000_bus_width_64, + e1000_bus_width_reserved +}; + +enum e1000_1000t_rx_status { + e1000_1000t_rx_status_not_ok = 0, + e1000_1000t_rx_status_ok, + e1000_1000t_rx_status_undefined = 0xFF +}; + +enum e1000_rev_polarity { + e1000_rev_polarity_normal = 0, + e1000_rev_polarity_reversed, + e1000_rev_polarity_undefined = 0xFF +}; + +enum e1000_fc_mode { + e1000_fc_none = 0, + e1000_fc_rx_pause, + e1000_fc_tx_pause, + e1000_fc_full, + e1000_fc_default = 0xFF +}; + +enum e1000_ms_type { + e1000_ms_hw_default = 0, + e1000_ms_force_master, + e1000_ms_force_slave, + e1000_ms_auto +}; + +enum e1000_smart_speed { + e1000_smart_speed_default = 0, + e1000_smart_speed_on, + e1000_smart_speed_off +}; + +enum e1000_serdes_link_state { + e1000_serdes_link_down = 0, + e1000_serdes_link_autoneg_progress, + e1000_serdes_link_autoneg_complete, + e1000_serdes_link_forced_up +}; + +/* Receive Descriptor - Extended */ +union e1000_rx_desc_extended { + struct { + __le64 buffer_addr; + __le64 reserved; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length; + __le16 vlan; /* VLAN tag */ + } upper; + } wb; /* writeback */ +}; + +#define MAX_PS_BUFFERS 4 + +/* Number of packet split data buffers (not including the header buffer) */ +#define PS_PAGE_BUFFERS (MAX_PS_BUFFERS - 1) + +/* Receive Descriptor - Packet Split */ +union e1000_rx_desc_packet_split { + struct { + /* one buffer for protocol header(s), three data buffers */ + __le64 buffer_addr[MAX_PS_BUFFERS]; + } read; + struct { + struct { + __le32 mrq; /* Multiple Rx Queues */ + union { + __le32 rss; /* RSS Hash */ + struct { + __le16 ip_id; /* IP id */ + __le16 csum; /* Packet Checksum */ + } csum_ip; + } hi_dword; + } lower; + struct { + __le32 status_error; /* ext status/error */ + __le16 length0; /* length of buffer 0 */ + __le16 vlan; /* VLAN tag */ + } middle; + struct { + __le16 header_status; + /* length of buffers 1-3 */ + __le16 length[PS_PAGE_BUFFERS]; + } upper; + __le64 reserved; + } wb; /* writeback */ +}; + +/* Transmit Descriptor */ +struct e1000_tx_desc { + __le64 buffer_addr; /* Address of the descriptor's data buffer */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 cso; /* Checksum offset */ + u8 cmd; /* Descriptor control */ + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 css; /* Checksum start */ + __le16 special; + } fields; + } upper; +}; + +/* Offload Context Descriptor */ +struct e1000_context_desc { + union { + __le32 ip_config; + struct { + u8 ipcss; /* IP checksum start */ + u8 ipcso; /* IP checksum offset */ + __le16 ipcse; /* IP checksum end */ + } ip_fields; + } lower_setup; + union { + __le32 tcp_config; + struct { + u8 tucss; /* TCP checksum start */ + u8 tucso; /* TCP checksum offset */ + __le16 tucse; /* TCP checksum end */ + } tcp_fields; + } upper_setup; + __le32 cmd_and_length; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 hdr_len; /* Header length */ + __le16 mss; /* Maximum segment size */ + } fields; + } tcp_seg_setup; +}; + +/* Offload data descriptor */ +struct e1000_data_desc { + __le64 buffer_addr; /* Address of the descriptor's buffer address */ + union { + __le32 data; + struct { + __le16 length; /* Data buffer length */ + u8 typ_len_ext; + u8 cmd; + } flags; + } lower; + union { + __le32 data; + struct { + u8 status; /* Descriptor status */ + u8 popts; /* Packet Options */ + __le16 special; + } fields; + } upper; +}; + +/* Statistics counters collected by the MAC */ +struct e1000_hw_stats { + u64 crcerrs; + u64 algnerrc; + u64 symerrs; + u64 rxerrc; + u64 mpc; + u64 scc; + u64 ecol; + u64 mcc; + u64 latecol; + u64 colc; + u64 dc; + u64 tncrs; + u64 sec; + u64 cexterr; + u64 rlec; + u64 xonrxc; + u64 xontxc; + u64 xoffrxc; + u64 xofftxc; + u64 fcruc; + u64 prc64; + u64 prc127; + u64 prc255; + u64 prc511; + u64 prc1023; + u64 prc1522; + u64 gprc; + u64 bprc; + u64 mprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 rnbc; + u64 ruc; + u64 rfc; + u64 roc; + u64 rjc; + u64 mgprc; + u64 mgpdc; + u64 mgptc; + u64 tor; + u64 tot; + u64 tpr; + u64 tpt; + u64 ptc64; + u64 ptc127; + u64 ptc255; + u64 ptc511; + u64 ptc1023; + u64 ptc1522; + u64 mptc; + u64 bptc; + u64 tsctc; + u64 tsctfc; + u64 iac; + u64 icrxptc; + u64 icrxatc; + u64 ictxptc; + u64 ictxatc; + u64 ictxqec; + u64 ictxqmtc; + u64 icrxdmtc; + u64 icrxoc; +}; + +struct e1000_phy_stats { + u32 idle_errors; + u32 receive_errors; +}; + +struct e1000_host_mng_dhcp_cookie { + u32 signature; + u8 status; + u8 reserved0; + u16 vlan_id; + u32 reserved1; + u16 reserved2; + u8 reserved3; + u8 checksum; +}; + +/* Host Interface "Rev 1" */ +struct e1000_host_command_header { + u8 command_id; + u8 command_length; + u8 command_options; + u8 checksum; +}; + +#define E1000_HI_MAX_DATA_LENGTH 252 +struct e1000_host_command_info { + struct e1000_host_command_header command_header; + u8 command_data[E1000_HI_MAX_DATA_LENGTH]; +}; + +/* Host Interface "Rev 2" */ +struct e1000_host_mng_command_header { + u8 command_id; + u8 checksum; + u16 reserved1; + u16 reserved2; + u16 command_length; +}; + +#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 +struct e1000_host_mng_command_info { + struct e1000_host_mng_command_header command_header; + u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; +}; + +#include "mac.h" +#include "phy.h" +#include "nvm.h" +#include "manage.h" + +/* Function pointers for the MAC. */ +struct e1000_mac_operations { + s32 (*id_led_init)(struct e1000_hw *); + s32 (*blink_led)(struct e1000_hw *); + bool (*check_mng_mode)(struct e1000_hw *); + s32 (*check_for_link)(struct e1000_hw *); + s32 (*cleanup_led)(struct e1000_hw *); + void (*clear_hw_cntrs)(struct e1000_hw *); + void (*clear_vfta)(struct e1000_hw *); + s32 (*get_bus_info)(struct e1000_hw *); + void (*set_lan_id)(struct e1000_hw *); + s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *); + s32 (*led_on)(struct e1000_hw *); + s32 (*led_off)(struct e1000_hw *); + void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32); + s32 (*reset_hw)(struct e1000_hw *); + s32 (*init_hw)(struct e1000_hw *); + s32 (*setup_link)(struct e1000_hw *); + s32 (*setup_physical_interface)(struct e1000_hw *); + s32 (*setup_led)(struct e1000_hw *); + void (*write_vfta)(struct e1000_hw *, u32, u32); + void (*config_collision_dist)(struct e1000_hw *); + int (*rar_set)(struct e1000_hw *, u8 *, u32); + s32 (*read_mac_addr)(struct e1000_hw *); + u32 (*rar_get_count)(struct e1000_hw *); +}; + +/* When to use various PHY register access functions: + * + * Func Caller + * Function Does Does When to use + * ~~~~~~~~~~~~ ~~~~~ ~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * X_reg L,P,A n/a for simple PHY reg accesses + * X_reg_locked P,A L for multiple accesses of different regs + * on different pages + * X_reg_page A L,P for multiple accesses of different regs + * on the same page + * + * Where X=[read|write], L=locking, P=sets page, A=register access + * + */ +struct e1000_phy_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*cfg_on_link_up)(struct e1000_hw *); + s32 (*check_polarity)(struct e1000_hw *); + s32 (*check_reset_block)(struct e1000_hw *); + s32 (*commit)(struct e1000_hw *); + s32 (*force_speed_duplex)(struct e1000_hw *); + s32 (*get_cfg_done)(struct e1000_hw *hw); + s32 (*get_cable_length)(struct e1000_hw *); + s32 (*get_info)(struct e1000_hw *); + s32 (*set_page)(struct e1000_hw *, u16); + s32 (*read_reg)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_locked)(struct e1000_hw *, u32, u16 *); + s32 (*read_reg_page)(struct e1000_hw *, u32, u16 *); + void (*release)(struct e1000_hw *); + s32 (*reset)(struct e1000_hw *); + s32 (*set_d0_lplu_state)(struct e1000_hw *, bool); + s32 (*set_d3_lplu_state)(struct e1000_hw *, bool); + s32 (*write_reg)(struct e1000_hw *, u32, u16); + s32 (*write_reg_locked)(struct e1000_hw *, u32, u16); + s32 (*write_reg_page)(struct e1000_hw *, u32, u16); + void (*power_up)(struct e1000_hw *); + void (*power_down)(struct e1000_hw *); +}; + +/* Function pointers for the NVM. */ +struct e1000_nvm_operations { + s32 (*acquire)(struct e1000_hw *); + s32 (*read)(struct e1000_hw *, u16, u16, u16 *); + void (*release)(struct e1000_hw *); + void (*reload)(struct e1000_hw *); + s32 (*update)(struct e1000_hw *); + s32 (*valid_led_default)(struct e1000_hw *, u16 *); + s32 (*validate)(struct e1000_hw *); + s32 (*write)(struct e1000_hw *, u16, u16, u16 *); +}; + +struct e1000_mac_info { + struct e1000_mac_operations ops; + u8 addr[ETH_ALEN]; + u8 perm_addr[ETH_ALEN]; + + enum e1000_mac_type type; + + u32 collision_delta; + u32 ledctl_default; + u32 ledctl_mode1; + u32 ledctl_mode2; + u32 mc_filter_type; + u32 tx_packet_delta; + u32 txcw; + + u16 current_ifs_val; + u16 ifs_max_val; + u16 ifs_min_val; + u16 ifs_ratio; + u16 ifs_step_size; + u16 mta_reg_count; + + /* Maximum size of the MTA register table in all supported adapters */ +#define MAX_MTA_REG 128 + u32 mta_shadow[MAX_MTA_REG]; + u16 rar_entry_count; + + u8 forced_speed_duplex; + + bool adaptive_ifs; + bool has_fwsm; + bool arc_subsystem_valid; + bool autoneg; + bool autoneg_failed; + bool get_link_status; + bool in_ifs_mode; + bool serdes_has_link; + bool tx_pkt_filtering; + enum e1000_serdes_link_state serdes_link_state; +}; + +struct e1000_phy_info { + struct e1000_phy_operations ops; + + enum e1000_phy_type type; + + enum e1000_1000t_rx_status local_rx; + enum e1000_1000t_rx_status remote_rx; + enum e1000_ms_type ms_type; + enum e1000_ms_type original_ms_type; + enum e1000_rev_polarity cable_polarity; + enum e1000_smart_speed smart_speed; + + u32 addr; + u32 id; + u32 reset_delay_us; /* in usec */ + u32 revision; + + enum e1000_media_type media_type; + + u16 autoneg_advertised; + u16 autoneg_mask; + u16 cable_length; + u16 max_cable_length; + u16 min_cable_length; + + u8 mdix; + + bool disable_polarity_correction; + bool is_mdix; + bool polarity_correction; + bool speed_downgraded; + bool autoneg_wait_to_complete; +}; + +struct e1000_nvm_info { + struct e1000_nvm_operations ops; + + enum e1000_nvm_type type; + enum e1000_nvm_override override; + + u32 flash_bank_size; + u32 flash_base_addr; + + u16 word_size; + u16 delay_usec; + u16 address_bits; + u16 opcode_bits; + u16 page_size; +}; + +struct e1000_bus_info { + enum e1000_bus_width width; + + u16 func; +}; + +struct e1000_fc_info { + u32 high_water; /* Flow control high-water mark */ + u32 low_water; /* Flow control low-water mark */ + u16 pause_time; /* Flow control pause timer */ + u16 refresh_time; /* Flow control refresh timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ + enum e1000_fc_mode current_mode; /* FC mode in effect */ + enum e1000_fc_mode requested_mode; /* FC mode requested by caller */ +}; + +struct e1000_dev_spec_82571 { + bool laa_is_present; + u32 smb_counter; +}; + +struct e1000_dev_spec_80003es2lan { + bool mdic_wa_enable; +}; + +struct e1000_shadow_ram { + u16 value; + bool modified; +}; + +#define E1000_ICH8_SHADOW_RAM_WORDS 2048 + +/* I218 PHY Ultra Low Power (ULP) states */ +enum e1000_ulp_state { + e1000_ulp_state_unknown, + e1000_ulp_state_off, + e1000_ulp_state_on, +}; + +struct e1000_dev_spec_ich8lan { + bool kmrn_lock_loss_workaround_enabled; + struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS]; + bool nvm_k1_enabled; + bool eee_disable; + u16 eee_lp_ability; + enum e1000_ulp_state ulp_state; +}; + +struct e1000_hw { + struct e1000_adapter *adapter; + + void __iomem *hw_addr; + void __iomem *flash_address; + + struct e1000_mac_info mac; + struct e1000_fc_info fc; + struct e1000_phy_info phy; + struct e1000_nvm_info nvm; + struct e1000_bus_info bus; + struct e1000_host_mng_dhcp_cookie mng_cookie; + + union { + struct e1000_dev_spec_82571 e82571; + struct e1000_dev_spec_80003es2lan e80003es2lan; + struct e1000_dev_spec_ich8lan ich8lan; + } dev_spec; +}; + +#include "82571.h" +#include "80003es2lan.h" +#include "ich8lan.h" + +#endif diff --git a/addons/e1000e/src/4.4.180/ich8lan.c b/addons/e1000e/src/4.4.180/ich8lan.c new file mode 100644 index 00000000..485b9cc5 --- /dev/null +++ b/addons/e1000e/src/4.4.180/ich8lan.c @@ -0,0 +1,5873 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* 82562G 10/100 Network Connection + * 82562G-2 10/100 Network Connection + * 82562GT 10/100 Network Connection + * 82562GT-2 10/100 Network Connection + * 82562V 10/100 Network Connection + * 82562V-2 10/100 Network Connection + * 82566DC-2 Gigabit Network Connection + * 82566DC Gigabit Network Connection + * 82566DM-2 Gigabit Network Connection + * 82566DM Gigabit Network Connection + * 82566MC Gigabit Network Connection + * 82566MM Gigabit Network Connection + * 82567LM Gigabit Network Connection + * 82567LF Gigabit Network Connection + * 82567V Gigabit Network Connection + * 82567LM-2 Gigabit Network Connection + * 82567LF-2 Gigabit Network Connection + * 82567V-2 Gigabit Network Connection + * 82567LF-3 Gigabit Network Connection + * 82567LM-3 Gigabit Network Connection + * 82567LM-4 Gigabit Network Connection + * 82577LM Gigabit Network Connection + * 82577LC Gigabit Network Connection + * 82578DM Gigabit Network Connection + * 82578DC Gigabit Network Connection + * 82579LM Gigabit Network Connection + * 82579V Gigabit Network Connection + * Ethernet Connection I217-LM + * Ethernet Connection I217-V + * Ethernet Connection I218-V + * Ethernet Connection I218-LM + * Ethernet Connection (2) I218-LM + * Ethernet Connection (2) I218-V + * Ethernet Connection (3) I218-LM + * Ethernet Connection (3) I218-V + */ + +#include "e1000.h" + +/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */ +/* Offset 04h HSFSTS */ +union ich8_hws_flash_status { + struct ich8_hsfsts { + u16 flcdone:1; /* bit 0 Flash Cycle Done */ + u16 flcerr:1; /* bit 1 Flash Cycle Error */ + u16 dael:1; /* bit 2 Direct Access error Log */ + u16 berasesz:2; /* bit 4:3 Sector Erase Size */ + u16 flcinprog:1; /* bit 5 flash cycle in Progress */ + u16 reserved1:2; /* bit 13:6 Reserved */ + u16 reserved2:6; /* bit 13:6 Reserved */ + u16 fldesvalid:1; /* bit 14 Flash Descriptor Valid */ + u16 flockdn:1; /* bit 15 Flash Config Lock-Down */ + } hsf_status; + u16 regval; +}; + +/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */ +/* Offset 06h FLCTL */ +union ich8_hws_flash_ctrl { + struct ich8_hsflctl { + u16 flcgo:1; /* 0 Flash Cycle Go */ + u16 flcycle:2; /* 2:1 Flash Cycle */ + u16 reserved:5; /* 7:3 Reserved */ + u16 fldbcount:2; /* 9:8 Flash Data Byte Count */ + u16 flockdn:6; /* 15:10 Reserved */ + } hsf_ctrl; + u16 regval; +}; + +/* ICH Flash Region Access Permissions */ +union ich8_hws_flash_regacc { + struct ich8_flracc { + u32 grra:8; /* 0:7 GbE region Read Access */ + u32 grwa:8; /* 8:15 GbE region Write Access */ + u32 gmrag:8; /* 23:16 GbE Master Read Access Grant */ + u32 gmwag:8; /* 31:24 GbE Master Write Access Grant */ + } hsf_flregacc; + u16 regval; +}; + +/* ICH Flash Protected Region */ +union ich8_flash_protected_range { + struct ich8_pr { + u32 base:13; /* 0:12 Protected Range Base */ + u32 reserved1:2; /* 13:14 Reserved */ + u32 rpe:1; /* 15 Read Protection Enable */ + u32 limit:13; /* 16:28 Protected Range Limit */ + u32 reserved2:2; /* 29:30 Reserved */ + u32 wpe:1; /* 31 Write Protection Enable */ + } range; + u32 regval; +}; + +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw); +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw); +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank); +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte); +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data); +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data); +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data); +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data); +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 *data); +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, + u32 offset, u32 data); +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword); +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw); +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw); +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw); +static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw); +static s32 e1000_setup_led_pchlan(struct e1000_hw *hw); +static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw); +static s32 e1000_led_on_pchlan(struct e1000_hw *hw); +static s32 e1000_led_off_pchlan(struct e1000_hw *hw); +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active); +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw); +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw); +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link); +static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw); +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw); +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw); +static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index); +static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index); +static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw); +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw); +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate); +static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force); +static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw); +static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state); + +static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg) +{ + return readw(hw->flash_address + reg); +} + +static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg) +{ + return readl(hw->flash_address + reg); +} + +static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val) +{ + writew(val, hw->flash_address + reg); +} + +static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + writel(val, hw->flash_address + reg); +} + +#define er16flash(reg) __er16flash(hw, (reg)) +#define er32flash(reg) __er32flash(hw, (reg)) +#define ew16flash(reg, val) __ew16flash(hw, (reg), (val)) +#define ew32flash(reg, val) __ew32flash(hw, (reg), (val)) + +/** + * e1000_phy_is_accessible_pchlan - Check if able to access PHY registers + * @hw: pointer to the HW structure + * + * Test access to the PHY registers by reading the PHY ID registers. If + * the PHY ID is already known (e.g. resume path) compare it with known ID, + * otherwise assume the read PHY ID is correct if it is valid. + * + * Assumes the sw/fw/hw semaphore is already acquired. + **/ +static bool e1000_phy_is_accessible_pchlan(struct e1000_hw *hw) +{ + u16 phy_reg = 0; + u32 phy_id = 0; + s32 ret_val = 0; + u16 retry_count; + u32 mac_reg = 0; + + for (retry_count = 0; retry_count < 2; retry_count++) { + ret_val = e1e_rphy_locked(hw, MII_PHYSID1, &phy_reg); + if (ret_val || (phy_reg == 0xFFFF)) + continue; + phy_id = (u32)(phy_reg << 16); + + ret_val = e1e_rphy_locked(hw, MII_PHYSID2, &phy_reg); + if (ret_val || (phy_reg == 0xFFFF)) { + phy_id = 0; + continue; + } + phy_id |= (u32)(phy_reg & PHY_REVISION_MASK); + break; + } + + if (hw->phy.id) { + if (hw->phy.id == phy_id) + goto out; + } else if (phy_id) { + hw->phy.id = phy_id; + hw->phy.revision = (u32)(phy_reg & ~PHY_REVISION_MASK); + goto out; + } + + /* In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + if (hw->mac.type < e1000_pch_lpt) { + hw->phy.ops.release(hw); + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (!ret_val) + ret_val = e1000e_get_phy_id(hw); + hw->phy.ops.acquire(hw); + } + + if (ret_val) + return false; +out: + if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { + /* Only unforce SMBus if ME is not active */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + /* Unforce SMBus mode in PHY */ + e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + } + } + + return true; +} + +/** + * e1000_toggle_lanphypc_pch_lpt - toggle the LANPHYPC pin value + * @hw: pointer to the HW structure + * + * Toggling the LANPHYPC pin value fully power-cycles the PHY and is + * used to reset the PHY to a quiescent state when necessary. + **/ +static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw) +{ + u32 mac_reg; + + /* Set Phy Config Counter to 50msec */ + mac_reg = er32(FEXTNVM3); + mac_reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + mac_reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + ew32(FEXTNVM3, mac_reg); + + /* Toggle LANPHYPC Value bit */ + mac_reg = er32(CTRL); + mac_reg |= E1000_CTRL_LANPHYPC_OVERRIDE; + mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE; + ew32(CTRL, mac_reg); + e1e_flush(); + usleep_range(10, 20); + mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE; + ew32(CTRL, mac_reg); + e1e_flush(); + + if (hw->mac.type < e1000_pch_lpt) { + msleep(50); + } else { + u16 count = 20; + + do { + usleep_range(5000, 10000); + } while (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LPCD) && count--); + + msleep(30); + } +} + +/** + * e1000_init_phy_workarounds_pchlan - PHY initialization workarounds + * @hw: pointer to the HW structure + * + * Workarounds/flow necessary for PHY initialization during driver load + * and resume paths. + **/ +static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) +{ + struct e1000_adapter *adapter = hw->adapter; + u32 mac_reg, fwsm = er32(FWSM); + s32 ret_val; + + /* Gate automatic PHY configuration by hardware on managed and + * non-managed 82579 and newer adapters. + */ + e1000_gate_hw_phy_config_ich8lan(hw, true); + + /* It is not possible to be certain of the current state of ULP + * so forcibly disable it. + */ + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown; + e1000_disable_ulp_lpt_lp(hw, true); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to initialize PHY flow\n"); + goto out; + } + + /* The MAC-PHY interconnect may be in SMBus mode. If the PHY is + * inaccessible and resetting the PHY is not blocked, toggle the + * LANPHYPC Value bit to force the interconnect to PCIe mode. + */ + switch (hw->mac.type) { + case e1000_pch_lpt: + case e1000_pch_spt: + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* Before toggling LANPHYPC, see if PHY is accessible by + * forcing MAC to SMBus mode first. + */ + mac_reg = er32(CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + /* Wait 50 milliseconds for MAC to finish any retries + * that it might be trying to perform from previous + * attempts to acknowledge any phy read requests. + */ + msleep(50); + + /* fall-through */ + case e1000_pch2lan: + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* fall-through */ + case e1000_pchlan: + if ((hw->mac.type == e1000_pchlan) && + (fwsm & E1000_ICH_FWSM_FW_VALID)) + break; + + if (hw->phy.ops.check_reset_block(hw)) { + e_dbg("Required LANPHYPC toggle blocked by ME\n"); + ret_val = -E1000_ERR_PHY; + break; + } + + /* Toggle LANPHYPC Value bit */ + e1000_toggle_lanphypc_pch_lpt(hw); + if (hw->mac.type >= e1000_pch_lpt) { + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + /* Toggling LANPHYPC brings the PHY out of SMBus mode + * so ensure that the MAC is also out of SMBus mode + */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + if (e1000_phy_is_accessible_pchlan(hw)) + break; + + ret_val = -E1000_ERR_PHY; + } + break; + default: + break; + } + + hw->phy.ops.release(hw); + if (!ret_val) { + + /* Check to see if able to reset PHY. Print error if not */ + if (hw->phy.ops.check_reset_block(hw)) { + e_err("Reset blocked by ME\n"); + goto out; + } + + /* Reset the PHY before any access to it. Doing so, ensures + * that the PHY is in a known good state before we read/write + * PHY registers. The generic reset is sufficient here, + * because we haven't determined the PHY type yet. + */ + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + goto out; + + /* On a successful reset, possibly need to wait for the PHY + * to quiesce to an accessible state before returning control + * to the calling function. If the PHY does not quiesce, then + * return E1000E_BLK_PHY_RESET, as this is the condition that + * the PHY is in. + */ + ret_val = hw->phy.ops.check_reset_block(hw); + if (ret_val) + e_err("ME blocked access to PHY after reset\n"); + } + +out: + /* Ungate automatic PHY configuration on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(fwsm & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + return ret_val; +} + +/** + * e1000_init_phy_params_pchlan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.set_page = e1000_set_page_igp; + phy->ops.read_reg = e1000_read_phy_reg_hv; + phy->ops.read_reg_locked = e1000_read_phy_reg_hv_locked; + phy->ops.read_reg_page = e1000_read_phy_reg_page_hv; + phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan; + phy->ops.write_reg = e1000_write_phy_reg_hv; + phy->ops.write_reg_locked = e1000_write_phy_reg_hv_locked; + phy->ops.write_reg_page = e1000_write_phy_reg_page_hv; + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + + phy->id = e1000_phy_unknown; + + ret_val = e1000_init_phy_workarounds_pchlan(hw); + if (ret_val) + return ret_val; + + if (phy->id == e1000_phy_unknown) + switch (hw->mac.type) { + default: + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK)) + break; + /* fall-through */ + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + /* In case the PHY needs to be in mdio slow mode, + * set slow mode and try to get the PHY id again. + */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + break; + } + phy->type = e1000e_get_phy_type_from_id(phy->id); + + switch (phy->type) { + case e1000_phy_82577: + case e1000_phy_82579: + case e1000_phy_i217: + phy->ops.check_polarity = e1000_check_polarity_82577; + phy->ops.force_speed_duplex = + e1000_phy_force_speed_duplex_82577; + phy->ops.get_cable_length = e1000_get_cable_length_82577; + phy->ops.get_info = e1000_get_phy_info_82577; + phy->ops.commit = e1000e_phy_sw_reset; + break; + case e1000_phy_82578: + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; + phy->ops.get_cable_length = e1000e_get_cable_length_m88; + phy->ops.get_info = e1000e_get_phy_info_m88; + break; + default: + ret_val = -E1000_ERR_PHY; + break; + } + + return ret_val; +} + +/** + * e1000_init_phy_params_ich8lan - Initialize PHY function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific PHY parameters and function pointers. + **/ +static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 i = 0; + + phy->addr = 1; + phy->reset_delay_us = 100; + + phy->ops.power_up = e1000_power_up_phy_copper; + phy->ops.power_down = e1000_power_down_phy_copper_ich8lan; + + /* We may need to do this twice - once for IGP and if that fails, + * we'll set BM func pointers and try again + */ + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + phy->ops.write_reg = e1000e_write_phy_reg_bm; + phy->ops.read_reg = e1000e_read_phy_reg_bm; + ret_val = e1000e_determine_phy_address(hw); + if (ret_val) { + e_dbg("Cannot determine PHY addr. Erroring out\n"); + return ret_val; + } + } + + phy->id = 0; + while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) && + (i++ < 100)) { + usleep_range(1000, 2000); + ret_val = e1000e_get_phy_id(hw); + if (ret_val) + return ret_val; + } + + /* Verify phy id */ + switch (phy->id) { + case IGP03E1000_E_PHY_ID: + phy->type = e1000_phy_igp_3; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked; + phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked; + phy->ops.get_info = e1000e_get_phy_info_igp; + phy->ops.check_polarity = e1000_check_polarity_igp; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy->type = e1000_phy_ife; + phy->autoneg_mask = E1000_ALL_NOT_GIG; + phy->ops.get_info = e1000_get_phy_info_ife; + phy->ops.check_polarity = e1000_check_polarity_ife; + phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife; + break; + case BME1000_E_PHY_ID: + phy->type = e1000_phy_bm; + phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT; + phy->ops.read_reg = e1000e_read_phy_reg_bm; + phy->ops.write_reg = e1000e_write_phy_reg_bm; + phy->ops.commit = e1000e_phy_sw_reset; + phy->ops.get_info = e1000e_get_phy_info_m88; + phy->ops.check_polarity = e1000_check_polarity_m88; + phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88; + break; + default: + return -E1000_ERR_PHY; + } + + return 0; +} + +/** + * e1000_init_nvm_params_ich8lan - Initialize NVM function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific NVM parameters and function + * pointers. + **/ +static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 gfpreg, sector_base_addr, sector_end_addr; + u16 i; + u32 nvm_size; + + nvm->type = e1000_nvm_flash_sw; + + if (hw->mac.type == e1000_pch_spt) { + /* in SPT, gfpreg doesn't exist. NVM size is taken from the + * STRAP register. This is because in SPT the GbE Flash region + * is no longer accessed through the flash registers. Instead, + * the mechanism has changed, and the Flash region access + * registers are now implemented in GbE memory space. + */ + nvm->flash_base_addr = 0; + nvm_size = (((er32(STRAP) >> 1) & 0x1F) + 1) + * NVM_SIZE_MULTIPLIER; + nvm->flash_bank_size = nvm_size / 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + /* Set the base address for flash register access */ + hw->flash_address = hw->hw_addr + E1000_FLASH_BASE_ADDR; + } else { + /* Can't read flash registers if register set isn't mapped. */ + if (!hw->flash_address) { + e_dbg("ERROR: Flash registers not mapped\n"); + return -E1000_ERR_CONFIG; + } + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* sector_X_addr is a "sector"-aligned address (4096 bytes) + * Add 1 to sector_end_addr since this sector is included in + * the overall size. + */ + sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK; + sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1; + + /* flash_base_addr is byte-aligned */ + nvm->flash_base_addr = sector_base_addr + << FLASH_SECTOR_ADDR_SHIFT; + + /* find total size of the NVM, then cut in half since the total + * size represents two separate NVM banks. + */ + nvm->flash_bank_size = ((sector_end_addr - sector_base_addr) + << FLASH_SECTOR_ADDR_SHIFT); + nvm->flash_bank_size /= 2; + /* Adjust to word count */ + nvm->flash_bank_size /= sizeof(u16); + } + + nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS; + + /* Clear shadow ram */ + for (i = 0; i < nvm->word_size; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + + return 0; +} + +/** + * e1000_init_mac_params_ich8lan - Initialize MAC function pointers + * @hw: pointer to the HW structure + * + * Initialize family-specific MAC parameters and function + * pointers. + **/ +static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + /* Set media type function pointer */ + hw->phy.media_type = e1000_media_type_copper; + + /* Set mta register count */ + mac->mta_reg_count = 32; + /* Set rar entry count */ + mac->rar_entry_count = E1000_ICH_RAR_ENTRIES; + if (mac->type == e1000_ich8lan) + mac->rar_entry_count--; + /* FWSM register */ + mac->has_fwsm = true; + /* ARC subsystem not supported */ + mac->arc_subsystem_valid = false; + /* Adaptive IFS supported */ + mac->adaptive_ifs = true; + + /* LED and other operations */ + switch (mac->type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan; + /* ID LED init */ + mac->ops.id_led_init = e1000e_id_led_init_generic; + /* blink LED */ + mac->ops.blink_led = e1000e_blink_led_generic; + /* setup LED */ + mac->ops.setup_led = e1000e_setup_led_generic; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_ich8lan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_ich8lan; + mac->ops.led_off = e1000_led_off_ich8lan; + break; + case e1000_pch2lan: + mac->rar_entry_count = E1000_PCH2_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch2lan; + /* fall-through */ + case e1000_pch_lpt: + case e1000_pch_spt: + case e1000_pchlan: + /* check management mode */ + mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; + /* ID LED init */ + mac->ops.id_led_init = e1000_id_led_init_pchlan; + /* setup LED */ + mac->ops.setup_led = e1000_setup_led_pchlan; + /* cleanup LED */ + mac->ops.cleanup_led = e1000_cleanup_led_pchlan; + /* turn on/off LED */ + mac->ops.led_on = e1000_led_on_pchlan; + mac->ops.led_off = e1000_led_off_pchlan; + break; + default: + break; + } + + if ((mac->type == e1000_pch_lpt) || (mac->type == e1000_pch_spt)) { + mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES; + mac->ops.rar_set = e1000_rar_set_pch_lpt; + mac->ops.setup_physical_interface = + e1000_setup_copper_link_pch_lpt; + mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt; + } + + /* Enable PCS Lock-loss workaround for ICH8 */ + if (mac->type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true); + + return 0; +} + +/** + * __e1000_access_emi_reg_locked - Read/write EMI register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: pointer to value to read/write from/to the EMI address + * @read: boolean flag to indicate read or write + * + * This helper function assumes the SW/FW/HW Semaphore is already acquired. + **/ +static s32 __e1000_access_emi_reg_locked(struct e1000_hw *hw, u16 address, + u16 *data, bool read) +{ + s32 ret_val; + + ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR, address); + if (ret_val) + return ret_val; + + if (read) + ret_val = e1e_rphy_locked(hw, I82579_EMI_DATA, data); + else + ret_val = e1e_wphy_locked(hw, I82579_EMI_DATA, *data); + + return ret_val; +} + +/** + * e1000_read_emi_reg_locked - Read Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be read from the EMI address + * + * Assumes the SW/FW/HW Semaphore is already acquired. + **/ +s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data) +{ + return __e1000_access_emi_reg_locked(hw, addr, data, true); +} + +/** + * e1000_write_emi_reg_locked - Write Extended Management Interface register + * @hw: pointer to the HW structure + * @addr: EMI address to program + * @data: value to be written to the EMI address + * + * Assumes the SW/FW/HW Semaphore is already acquired. + **/ +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data) +{ + return __e1000_access_emi_reg_locked(hw, addr, &data, false); +} + +/** + * e1000_set_eee_pchlan - Enable/disable EEE support + * @hw: pointer to the HW structure + * + * Enable/disable EEE based on setting in dev_spec structure, the duplex of + * the link and the EEE capabilities of the link partner. The LPI Control + * register bits will remain set only if/when link is up. + * + * EEE LPI must not be asserted earlier than one second after link is up. + * On 82579, EEE LPI should not be enabled until such time otherwise there + * can be link issues with some switches. Other devices can have EEE LPI + * enabled immediately upon link up since they have a timer in hardware which + * prevents LPI from being asserted too early. + **/ +s32 e1000_set_eee_pchlan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + s32 ret_val; + u16 lpa, pcs_status, adv, adv_addr, lpi_ctrl, data; + + switch (hw->phy.type) { + case e1000_phy_82579: + lpa = I82579_EEE_LP_ABILITY; + pcs_status = I82579_EEE_PCS_STATUS; + adv_addr = I82579_EEE_ADVERTISEMENT; + break; + case e1000_phy_i217: + lpa = I217_EEE_LP_ABILITY; + pcs_status = I217_EEE_PCS_STATUS; + adv_addr = I217_EEE_ADVERTISEMENT; + break; + default: + return 0; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy_locked(hw, I82579_LPI_CTRL, &lpi_ctrl); + if (ret_val) + goto release; + + /* Clear bits that enable EEE in various speeds */ + lpi_ctrl &= ~I82579_LPI_CTRL_ENABLE_MASK; + + /* Enable EEE if not disabled by user */ + if (!dev_spec->eee_disable) { + /* Save off link partner's EEE ability */ + ret_val = e1000_read_emi_reg_locked(hw, lpa, + &dev_spec->eee_lp_ability); + if (ret_val) + goto release; + + /* Read EEE advertisement */ + ret_val = e1000_read_emi_reg_locked(hw, adv_addr, &adv); + if (ret_val) + goto release; + + /* Enable EEE only for speeds in which the link partner is + * EEE capable and for which we advertise EEE. + */ + if (adv & dev_spec->eee_lp_ability & I82579_EEE_1000_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; + + if (adv & dev_spec->eee_lp_ability & I82579_EEE_100_SUPPORTED) { + e1e_rphy_locked(hw, MII_LPA, &data); + if (data & LPA_100FULL) + lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; + else + /* EEE is not supported in 100Half, so ignore + * partner's EEE in 100 ability if full-duplex + * is not advertised. + */ + dev_spec->eee_lp_ability &= + ~I82579_EEE_100_SUPPORTED; + } + } + + if (hw->phy.type == e1000_phy_82579) { + ret_val = e1000_read_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, + &data); + if (ret_val) + goto release; + + data &= ~I82579_LPI_100_PLL_SHUT; + ret_val = e1000_write_emi_reg_locked(hw, I82579_LPI_PLL_SHUT, + data); + } + + /* R/Clr IEEE MMD 3.1 bits 11:10 - Tx/Rx LPI Received */ + ret_val = e1000_read_emi_reg_locked(hw, pcs_status, &data); + if (ret_val) + goto release; + + ret_val = e1e_wphy_locked(hw, I82579_LPI_CTRL, lpi_ctrl); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_k1_workaround_lpt_lp - K1 workaround on Lynxpoint-LP + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * When K1 is enabled for 1Gbps, the MAC can miss 2 DMA completion indications + * preventing further DMA write requests. Workaround the issue by disabling + * the de-assertion of the clock request when in 1Gpbs mode. + * Also, set appropriate Tx re-transmission timeouts for 10 and 100Half link + * speeds in order to avoid Tx hangs. + **/ +static s32 e1000_k1_workaround_lpt_lp(struct e1000_hw *hw, bool link) +{ + u32 fextnvm6 = er32(FEXTNVM6); + u32 status = er32(STATUS); + s32 ret_val = 0; + u16 reg; + + if (link && (status & E1000_STATUS_SPEED_1000)) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = + e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + ®); + if (ret_val) + goto release; + + ret_val = + e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + reg & + ~E1000_KMRNCTRLSTA_K1_ENABLE); + if (ret_val) + goto release; + + usleep_range(10, 20); + + ew32(FEXTNVM6, fextnvm6 | E1000_FEXTNVM6_REQ_PLL_CLK); + + ret_val = + e1000e_write_kmrn_reg_locked(hw, + E1000_KMRNCTRLSTA_K1_CONFIG, + reg); +release: + hw->phy.ops.release(hw); + } else { + /* clear FEXTNVM6 bit 8 on link down or 10/100 */ + fextnvm6 &= ~E1000_FEXTNVM6_REQ_PLL_CLK; + + if ((hw->phy.revision > 5) || !link || + ((status & E1000_STATUS_SPEED_100) && + (status & E1000_STATUS_FD))) + goto update_fextnvm6; + + ret_val = e1e_rphy(hw, I217_INBAND_CTRL, ®); + if (ret_val) + return ret_val; + + /* Clear link status transmit timeout */ + reg &= ~I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK; + + if (status & E1000_STATUS_SPEED_100) { + /* Set inband Tx timeout to 5x10us for 100Half */ + reg |= 5 << I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Do not extend the K1 entry latency for 100Half */ + fextnvm6 &= ~E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } else { + /* Set inband Tx timeout to 50x10us for 10Full/Half */ + reg |= 50 << + I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT; + + /* Extend the K1 entry latency for 10 Mbps */ + fextnvm6 |= E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION; + } + + ret_val = e1e_wphy(hw, I217_INBAND_CTRL, reg); + if (ret_val) + return ret_val; + +update_fextnvm6: + ew32(FEXTNVM6, fextnvm6); + } + + return ret_val; +} + +/** + * e1000_platform_pm_pch_lpt - Set platform power management values + * @hw: pointer to the HW structure + * @link: bool indicating link status + * + * Set the Latency Tolerance Reporting (LTR) values for the "PCIe-like" + * GbE MAC in the Lynx Point PCH based on Rx buffer size and link speed + * when link is up (which must not exceed the maximum latency supported + * by the platform), otherwise specify there is no LTR requirement. + * Unlike true-PCIe devices which set the LTR maximum snoop/no-snoop + * latencies in the LTR Extended Capability Structure in the PCIe Extended + * Capability register set, on this device LTR is set by writing the + * equivalent snoop/no-snoop latencies in the LTRV register in the MAC and + * set the SEND bit to send an Intel On-chip System Fabric sideband (IOSF-SB) + * message to the PMC. + **/ +static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) +{ + u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | + link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; + u16 lat_enc = 0; /* latency encoded */ + + if (link) { + u16 speed, duplex, scale = 0; + u16 max_snoop, max_nosnoop; + u16 max_ltr_enc; /* max LTR latency encoded */ + u64 value; + u32 rxa; + + if (!hw->adapter->max_frame_size) { + e_dbg("max_frame_size not set.\n"); + return -E1000_ERR_CONFIG; + } + + hw->mac.ops.get_link_up_info(hw, &speed, &duplex); + if (!speed) { + e_dbg("Speed not set.\n"); + return -E1000_ERR_CONFIG; + } + + /* Rx Packet Buffer Allocation size (KB) */ + rxa = er32(PBA) & E1000_PBA_RXA_MASK; + + /* Determine the maximum latency tolerated by the device. + * + * Per the PCIe spec, the tolerated latencies are encoded as + * a 3-bit encoded scale (only 0-5 are valid) multiplied by + * a 10-bit value (0-1023) to provide a range from 1 ns to + * 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns, + * 1=2^5ns, 2=2^10ns,...5=2^25ns. + */ + rxa *= 512; + value = (rxa > hw->adapter->max_frame_size) ? + (rxa - hw->adapter->max_frame_size) * (16000 / speed) : + 0; + + while (value > PCI_LTR_VALUE_MASK) { + scale++; + value = DIV_ROUND_UP(value, (1 << 5)); + } + if (scale > E1000_LTRV_SCALE_MAX) { + e_dbg("Invalid LTR latency scale %d\n", scale); + return -E1000_ERR_CONFIG; + } + lat_enc = (u16)((scale << PCI_LTR_SCALE_SHIFT) | value); + + /* Determine the maximum latency tolerated by the platform */ + pci_read_config_word(hw->adapter->pdev, E1000_PCI_LTR_CAP_LPT, + &max_snoop); + pci_read_config_word(hw->adapter->pdev, + E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); + max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); + + if (lat_enc > max_ltr_enc) + lat_enc = max_ltr_enc; + } + + /* Set Snoop and No-Snoop latencies the same */ + reg |= lat_enc | (lat_enc << E1000_LTRV_NOSNOOP_SHIFT); + ew32(LTRV, reg); + + return 0; +} + +/** + * e1000_enable_ulp_lpt_lp - configure Ultra Low Power mode for LynxPoint-LP + * @hw: pointer to the HW structure + * @to_sx: boolean indicating a system power state transition to Sx + * + * When link is down, configure ULP mode to significantly reduce the power + * to the PHY. If on a Manageability Engine (ME) enabled system, tell the + * ME firmware to start the ULP configuration. If not on an ME enabled + * system, configure the ULP mode by software. + */ +s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx) +{ + u32 mac_reg; + s32 ret_val = 0; + u16 phy_reg; + u16 oem_reg = 0; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) || + (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_on)) + return 0; + + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + /* Request ME configure ULP mode in the PHY */ + mac_reg = er32(H2ME); + mac_reg |= E1000_H2ME_ULP | E1000_H2ME_ENFORCE_SETTINGS; + ew32(H2ME, mac_reg); + + goto out; + } + + if (!to_sx) { + int i = 0; + + /* Poll up to 5 seconds for Cable Disconnected indication */ + while (!(er32(FEXT) & E1000_FEXT_PHY_CABLE_DISCONNECTED)) { + /* Bail if link is re-acquired */ + if (er32(STATUS) & E1000_STATUS_LU) + return -E1000_ERR_PHY; + + if (i++ == 100) + break; + + msleep(50); + } + e_dbg("CABLE_DISCONNECTED %s set after %dmsec\n", + (er32(FEXT) & + E1000_FEXT_PHY_CABLE_DISCONNECTED) ? "" : "not", i * 50); + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + /* Force SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Force SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable + * LPLU and disable Gig speed when entering ULP + */ + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, + &oem_reg); + if (ret_val) + goto release; + + phy_reg = oem_reg; + phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; + + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, + phy_reg); + + if (ret_val) + goto release; + } + + /* Set Inband ULP Exit, Reset to SMBus mode and + * Disable SMBus Release on PERST# in PHY + */ + ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); + if (ret_val) + goto release; + phy_reg |= (I218_ULP_CONFIG1_RESET_TO_SMBUS | + I218_ULP_CONFIG1_DISABLE_SMB_PERST); + if (to_sx) { + if (er32(WUFC) & E1000_WUFC_LNKC) + phy_reg |= I218_ULP_CONFIG1_WOL_HOST; + else + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; + + phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; + phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT; + } else { + phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; + phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP; + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; + } + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Set Disable SMBus Release on PERST# in MAC */ + mac_reg = er32(FEXTNVM7); + mac_reg |= E1000_FEXTNVM7_DISABLE_SMB_PERST; + ew32(FEXTNVM7, mac_reg); + + /* Commit ULP changes in PHY by starting auto ULP configuration */ + phy_reg |= I218_ULP_CONFIG1_START; + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) && + to_sx && (er32(STATUS) & E1000_STATUS_LU)) { + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, + oem_reg); + if (ret_val) + goto release; + } + +release: + hw->phy.ops.release(hw); +out: + if (ret_val) + e_dbg("Error in ULP enable flow: %d\n", ret_val); + else + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_on; + + return ret_val; +} + +/** + * e1000_disable_ulp_lpt_lp - unconfigure Ultra Low Power mode for LynxPoint-LP + * @hw: pointer to the HW structure + * @force: boolean indicating whether or not to force disabling ULP + * + * Un-configure ULP mode when link is up, the system is transitioned from + * Sx or the driver is unloaded. If on a Manageability Engine (ME) enabled + * system, poll for an indication from ME that ULP has been un-configured. + * If not on an ME enabled system, un-configure the ULP mode by software. + * + * During nominal operation, this function is called when link is acquired + * to disable ULP mode (force=false); otherwise, for example when unloading + * the driver or during Sx->S0 transitions, this is called with force=true + * to forcibly disable ULP. + */ +static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) +{ + s32 ret_val = 0; + u32 mac_reg; + u16 phy_reg; + int i = 0; + + if ((hw->mac.type < e1000_pch_lpt) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_V) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM2) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V2) || + (hw->dev_spec.ich8lan.ulp_state == e1000_ulp_state_off)) + return 0; + + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + if (force) { + /* Request ME un-configure ULP mode in the PHY */ + mac_reg = er32(H2ME); + mac_reg &= ~E1000_H2ME_ULP; + mac_reg |= E1000_H2ME_ENFORCE_SETTINGS; + ew32(H2ME, mac_reg); + } + + /* Poll up to 100msec for ME to clear ULP_CFG_DONE */ + while (er32(FWSM) & E1000_FWSM_ULP_CFG_DONE) { + if (i++ == 10) { + ret_val = -E1000_ERR_PHY; + goto out; + } + + usleep_range(10000, 20000); + } + e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); + + if (force) { + mac_reg = er32(H2ME); + mac_reg &= ~E1000_H2ME_ENFORCE_SETTINGS; + ew32(H2ME, mac_reg); + } else { + /* Clear H2ME.ULP after ME ULP configuration */ + mac_reg = er32(H2ME); + mac_reg &= ~E1000_H2ME_ULP; + ew32(H2ME, mac_reg); + } + + goto out; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (force) + /* Toggle LANPHYPC Value bit */ + e1000_toggle_lanphypc_pch_lpt(hw); + + /* Unforce SMBus mode in PHY */ + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); + if (ret_val) { + /* The MAC might be in PCIe mode, so temporarily force to + * SMBus mode in order to access the PHY. + */ + mac_reg = er32(CTRL_EXT); + mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + msleep(50); + + ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, + &phy_reg); + if (ret_val) + goto release; + } + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1000_write_phy_reg_hv_locked(hw, CV_SMB_CTRL, phy_reg); + + /* Unforce SMBus mode in MAC */ + mac_reg = er32(CTRL_EXT); + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_reg); + + /* When ULP mode was previously entered, K1 was disabled by the + * hardware. Re-Enable K1 in the PHY when exiting ULP. + */ + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_PM_CTRL, &phy_reg); + if (ret_val) + goto release; + phy_reg |= HV_PM_CTRL_K1_ENABLE; + e1000_write_phy_reg_hv_locked(hw, HV_PM_CTRL, phy_reg); + + /* Clear ULP enabled configuration */ + ret_val = e1000_read_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, &phy_reg); + if (ret_val) + goto release; + phy_reg &= ~(I218_ULP_CONFIG1_IND | + I218_ULP_CONFIG1_STICKY_ULP | + I218_ULP_CONFIG1_RESET_TO_SMBUS | + I218_ULP_CONFIG1_WOL_HOST | + I218_ULP_CONFIG1_INBAND_EXIT | + I218_ULP_CONFIG1_DISABLE_SMB_PERST); + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Commit ULP changes by starting auto ULP configuration */ + phy_reg |= I218_ULP_CONFIG1_START; + e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); + + /* Clear Disable SMBus Release on PERST# in MAC */ + mac_reg = er32(FEXTNVM7); + mac_reg &= ~E1000_FEXTNVM7_DISABLE_SMB_PERST; + ew32(FEXTNVM7, mac_reg); + +release: + hw->phy.ops.release(hw); + if (force) { + e1000_phy_hw_reset(hw); + msleep(50); + } +out: + if (ret_val) + e_dbg("Error in ULP disable flow: %d\n", ret_val); + else + hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_off; + + return ret_val; +} + +/** + * e1000_check_for_copper_link_ich8lan - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + * + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link + * up). + **/ +static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val, tipg_reg = 0; + u16 emi_addr, emi_val = 0; + bool link; + u16 phy_reg; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return 1; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pchlan) { + ret_val = e1000_k1_gig_workaround_hv(hw, link); + if (ret_val) + return ret_val; + } + + /* When connected at 10Mbps half-duplex, some parts are excessively + * aggressive resulting in many collisions. To avoid this, increase + * the IPG and reduce Rx latency in the PHY. + */ + if (((hw->mac.type == e1000_pch2lan) || + (hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) && link) { + u16 speed, duplex; + + e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex); + tipg_reg = er32(TIPG); + tipg_reg &= ~E1000_TIPG_IPGT_MASK; + + if (duplex == HALF_DUPLEX && speed == SPEED_10) { + tipg_reg |= 0xFF; + /* Reduce Rx latency in analog PHY */ + emi_val = 0; + } else if (hw->mac.type == e1000_pch_spt && + duplex == FULL_DUPLEX && speed != SPEED_1000) { + tipg_reg |= 0xC; + emi_val = 1; + } else { + + /* Roll back the default values */ + tipg_reg |= 0x08; + emi_val = 1; + } + + ew32(TIPG, tipg_reg); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pch2lan) + emi_addr = I82579_RX_CONFIG; + else + emi_addr = I217_RX_CONFIG; + ret_val = e1000_write_emi_reg_locked(hw, emi_addr, emi_val); + + hw->phy.ops.release(hw); + + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pch_spt) { + u16 data; + u16 ptr_gap; + + if (speed == SPEED_1000) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy_locked(hw, + PHY_REG(776, 20), + &data); + if (ret_val) { + hw->phy.ops.release(hw); + return ret_val; + } + + ptr_gap = (data & (0x3FF << 2)) >> 2; + if (ptr_gap < 0x18) { + data &= ~(0x3FF << 2); + data |= (0x18 << 2); + ret_val = + e1e_wphy_locked(hw, + PHY_REG(776, 20), + data); + } + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + } + } + } + + /* I217 Packet Loss issue: + * ensure that FEXTNVM4 Beacon Duration is set correctly + * on power up. + * Set the Beacon Duration for I217 to 8 usec + */ + if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { + u32 mac_reg; + + mac_reg = er32(FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; + ew32(FEXTNVM4, mac_reg); + } + + /* Work-around I218 hang issue */ + if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { + ret_val = e1000_k1_workaround_lpt_lp(hw, link); + if (ret_val) + return ret_val; + } + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { + /* Set platform power management values for + * Latency Tolerance Reporting (LTR) + */ + ret_val = e1000_platform_pm_pch_lpt(hw, link); + if (ret_val) + return ret_val; + } + + /* Clear link partner's EEE ability */ + hw->dev_spec.ich8lan.eee_lp_ability = 0; + + /* FEXTNVM6 K1-off workaround */ + if (hw->mac.type == e1000_pch_spt) { + u32 pcieanacfg = er32(PCIEANACFG); + u32 fextnvm6 = er32(FEXTNVM6); + + if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) + fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE; + else + fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE; + + ew32(FEXTNVM6, fextnvm6); + } + + if (!link) + return 0; /* No link detected */ + + mac->get_link_status = false; + + switch (hw->mac.type) { + case e1000_pch2lan: + ret_val = e1000_k1_workaround_lv(hw); + if (ret_val) + return ret_val; + /* fall-thru */ + case e1000_pchlan: + if (hw->phy.type == e1000_phy_82578) { + ret_val = e1000_link_stall_workaround_hv(hw); + if (ret_val) + return ret_val; + } + + /* Workaround for PCHx parts in half-duplex: + * Set the number of preambles removed from the packet + * when it is passed from the PHY to the MAC to prevent + * the MAC from misinterpreting the packet type. + */ + e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg); + phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK; + + if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD) + phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT); + + e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg); + break; + default: + break; + } + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000e_check_downshift(hw); + + /* Enable/Disable EEE after link up */ + if (hw->phy.type > e1000_phy_82579) { + ret_val = e1000_set_eee_pchlan(hw); + if (ret_val) + return ret_val; + } + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + return 1; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + return 1; +} + +static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 rc; + + rc = e1000_init_mac_params_ich8lan(hw); + if (rc) + return rc; + + rc = e1000_init_nvm_params_ich8lan(hw); + if (rc) + return rc; + + switch (hw->mac.type) { + case e1000_ich8lan: + case e1000_ich9lan: + case e1000_ich10lan: + rc = e1000_init_phy_params_ich8lan(hw); + break; + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + rc = e1000_init_phy_params_pchlan(hw); + break; + default: + break; + } + if (rc) + return rc; + + /* Disable Jumbo Frame support on parts with Intel 10/100 PHY or + * on parts with MACsec enabled in NVM (reflected in CTRL_EXT). + */ + if ((adapter->hw.phy.type == e1000_phy_ife) || + ((adapter->hw.mac.type >= e1000_pch2lan) && + (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) { + adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES; + adapter->max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; + + hw->mac.ops.blink_led = NULL; + } + + if ((adapter->hw.mac.type == e1000_ich8lan) && + (adapter->hw.phy.type != e1000_phy_ife)) + adapter->flags |= FLAG_LSC_GIG_SPEED_DROP; + + /* Enable workaround for 82579 w/ ME enabled */ + if ((adapter->hw.mac.type == e1000_pch2lan) && + (er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA; + + return 0; +} + +static DEFINE_MUTEX(nvm_mutex); + +/** + * e1000_acquire_nvm_ich8lan - Acquire NVM mutex + * @hw: pointer to the HW structure + * + * Acquires the mutex for performing NVM operations. + **/ +static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw __always_unused *hw) +{ + mutex_lock(&nvm_mutex); + + return 0; +} + +/** + * e1000_release_nvm_ich8lan - Release NVM mutex + * @hw: pointer to the HW structure + * + * Releases the mutex used while performing NVM operations. + **/ +static void e1000_release_nvm_ich8lan(struct e1000_hw __always_unused *hw) +{ + mutex_unlock(&nvm_mutex); +} + +/** + * e1000_acquire_swflag_ich8lan - Acquire software control flag + * @hw: pointer to the HW structure + * + * Acquires the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT; + s32 ret_val = 0; + + if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE, + &hw->adapter->state)) { + e_dbg("contention for Phy access\n"); + return -E1000_ERR_PHY; + } + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)) + break; + + mdelay(1); + timeout--; + } + + if (!timeout) { + e_dbg("SW has already locked the resource.\n"); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + + timeout = SW_FLAG_TIMEOUT; + + extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + + while (timeout) { + extcnf_ctrl = er32(EXTCNF_CTRL); + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) + break; + + mdelay(1); + timeout--; + } + + if (!timeout) { + e_dbg("Failed to acquire the semaphore, FW or HW has it: FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n", + er32(FWSM), extcnf_ctrl); + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + ret_val = -E1000_ERR_CONFIG; + goto out; + } + +out: + if (ret_val) + clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); + + return ret_val; +} + +/** + * e1000_release_swflag_ich8lan - Release software control flag + * @hw: pointer to the HW structure + * + * Releases the software control flag for performing PHY and select + * MAC CSR accesses. + **/ +static void e1000_release_swflag_ich8lan(struct e1000_hw *hw) +{ + u32 extcnf_ctrl; + + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) { + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG; + ew32(EXTCNF_CTRL, extcnf_ctrl); + } else { + e_dbg("Semaphore unexpectedly released by sw/fw/hw\n"); + } + + clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); +} + +/** + * e1000_check_mng_mode_ich8lan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has any manageability enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_check_mng_mode_pchlan - Checks management mode + * @hw: pointer to the HW structure + * + * This checks if the adapter has iAMT enabled. + * This is a function pointer entry point only called by read/write + * routines for the PHY and NVM parts. + **/ +static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw) +{ + u32 fwsm; + + fwsm = er32(FWSM); + return (fwsm & E1000_ICH_FWSM_FW_VALID) && + (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)); +} + +/** + * e1000_rar_set_pch2lan - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. For 82579, RAR[0] is the base address register that is to + * contain the MAC address but RAR[1-6] are reserved for manageability (ME). + * Use SHRA[0-3] in place of those reserved for ME. + **/ +static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | + ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); + return 0; + } + + /* RAR[1-6] are owned by manageability. Skip those and program the + * next address into the SHRA register array. + */ + if (index < (u32)(hw->mac.rar_entry_count)) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + if (ret_val) + goto out; + + ew32(SHRAL(index - 1), rar_low); + e1e_flush(); + ew32(SHRAH(index - 1), rar_high); + e1e_flush(); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((er32(SHRAL(index - 1)) == rar_low) && + (er32(SHRAH(index - 1)) == rar_high)) + return 0; + + e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n", + (index - 1), er32(FWSM)); + } + +out: + e_dbg("Failed to write receive address at index %d\n", index); + return -E1000_ERR_CONFIG; +} + +/** + * e1000_rar_get_count_pch_lpt - Get the number of available SHRA + * @hw: pointer to the HW structure + * + * Get the number of available receive registers that the Host can + * program. SHRA[0-10] are the shared receive address registers + * that are shared between the Host and manageability engine (ME). + * ME can reserve any number of addresses and the host needs to be + * able to tell how many available registers it has access to. + **/ +static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw) +{ + u32 wlock_mac; + u32 num_entries; + + wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; + wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; + + switch (wlock_mac) { + case 0: + /* All SHRA[0..10] and RAR[0] available */ + num_entries = hw->mac.rar_entry_count; + break; + case 1: + /* Only RAR[0] available */ + num_entries = 1; + break; + default: + /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */ + num_entries = wlock_mac + 1; + break; + } + + return num_entries; +} + +/** + * e1000_rar_set_pch_lpt - Set receive address registers + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address register array at index to the address passed + * in by addr. For LPT, RAR[0] is the base address register that is to + * contain the MAC address. SHRA[0-10] are the shared receive address + * registers that are shared between the Host and manageability engine (ME). + **/ +static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + u32 wlock_mac; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + if (index == 0) { + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); + return 0; + } + + /* The manageability engine (ME) can lock certain SHRAR registers that + * it is using - those registers are unavailable for use. + */ + if (index < hw->mac.rar_entry_count) { + wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK; + wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT; + + /* Check if all SHRAR registers are locked */ + if (wlock_mac == 1) + goto out; + + if ((wlock_mac == 0) || (index <= wlock_mac)) { + s32 ret_val; + + ret_val = e1000_acquire_swflag_ich8lan(hw); + + if (ret_val) + goto out; + + ew32(SHRAL_PCH_LPT(index - 1), rar_low); + e1e_flush(); + ew32(SHRAH_PCH_LPT(index - 1), rar_high); + e1e_flush(); + + e1000_release_swflag_ich8lan(hw); + + /* verify the register updates */ + if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) && + (er32(SHRAH_PCH_LPT(index - 1)) == rar_high)) + return 0; + } + } + +out: + e_dbg("Failed to write receive address at index %d\n", index); + return -E1000_ERR_CONFIG; +} + +/** + * e1000_check_reset_block_ich8lan - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Checks if firmware is blocking the reset of the PHY. + * This is a function pointer entry point only called by + * reset routines. + **/ +static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) +{ + bool blocked = false; + int i = 0; + + while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && + (i++ < 10)) + usleep_range(10000, 20000); + return blocked ? E1000_BLK_PHY_RESET : 0; +} + +/** + * e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states + * @hw: pointer to the HW structure + * + * Assumes semaphore already acquired. + * + **/ +static s32 e1000_write_smbus_addr(struct e1000_hw *hw) +{ + u16 phy_data; + u32 strap = er32(STRAP); + u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >> + E1000_STRAP_SMT_FREQ_SHIFT; + s32 ret_val; + + strap &= E1000_STRAP_SMBUS_ADDRESS_MASK; + + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~HV_SMB_ADDR_MASK; + phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT); + phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID; + + if (hw->phy.type == e1000_phy_i217) { + /* Restore SMBus frequency */ + if (freq--) { + phy_data &= ~HV_SMB_ADDR_FREQ_MASK; + phy_data |= (freq & (1 << 0)) << + HV_SMB_ADDR_FREQ_LOW_SHIFT; + phy_data |= (freq & (1 << 1)) << + (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1); + } else { + e_dbg("Unsupported SMB frequency in PHY\n"); + } + } + + return e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data); +} + +/** + * e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * + * SW should configure the LCD from the NVM extended configuration region + * as a workaround for certain parts. + **/ +static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask; + s32 ret_val = 0; + u16 word_addr, reg_data, reg_addr, phy_page = 0; + + /* Initialize the PHY from the NVM on ICH platforms. This + * is needed due to an issue where the NVM configuration is + * not properly autoloaded after power transitions. + * Therefore, after each PHY reset, we will load the + * configuration data out of the NVM manually. + */ + switch (hw->mac.type) { + case e1000_ich8lan: + if (phy->type != e1000_phy_igp_3) + return ret_val; + + if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) || + (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) { + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG; + break; + } + /* Fall-thru */ + case e1000_pchlan: + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; + break; + default: + return ret_val; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + data = er32(FEXTNVM); + if (!(data & sw_cfg_mask)) + goto release; + + /* Make sure HW does not configure LCD from PHY + * extended configuration before SW configuration + */ + data = er32(EXTCNF_CTRL); + if ((hw->mac.type < e1000_pch2lan) && + (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)) + goto release; + + cnf_size = er32(EXTCNF_SIZE); + cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK; + cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT; + if (!cnf_size) + goto release; + + cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK; + cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT; + + if (((hw->mac.type == e1000_pchlan) && + !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) || + (hw->mac.type > e1000_pchlan)) { + /* HW configures the SMBus address and LEDs when the + * OEM and LCD Write Enable bits are set in the NVM. + * When both NVM bits are cleared, SW will configure + * them instead. + */ + ret_val = e1000_write_smbus_addr(hw); + if (ret_val) + goto release; + + data = er32(LEDCTL); + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG, + (u16)data); + if (ret_val) + goto release; + } + + /* Configure LCD from extended configuration region. */ + + /* cnf_base_addr is in DWORD */ + word_addr = (u16)(cnf_base_addr << 1); + + for (i = 0; i < cnf_size; i++) { + ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1, ®_data); + if (ret_val) + goto release; + + ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1), + 1, ®_addr); + if (ret_val) + goto release; + + /* Save off the PHY page for future writes. */ + if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) { + phy_page = reg_data; + continue; + } + + reg_addr &= PHY_REG_MASK; + reg_addr |= phy_page; + + ret_val = e1e_wphy_locked(hw, (u32)reg_addr, reg_data); + if (ret_val) + goto release; + } + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_k1_gig_workaround_hv - K1 Si workaround + * @hw: pointer to the HW structure + * @link: link up bool flag + * + * If K1 is enabled for 1Gbps, the MAC might stall when transitioning + * from a lower speed. This workaround disables K1 whenever link is at 1Gig + * If link is down, the function will restore the default K1 setting located + * in the NVM. + **/ +static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link) +{ + s32 ret_val = 0; + u16 status_reg = 0; + bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled; + + if (hw->mac.type != e1000_pchlan) + return 0; + + /* Wrap the whole flow with the sw flag */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Disable K1 when link is 1Gbps, otherwise use the NVM setting */ + if (link) { + if (hw->phy.type == e1000_phy_82578) { + ret_val = e1e_rphy_locked(hw, BM_CS_STATUS, + &status_reg); + if (ret_val) + goto release; + + status_reg &= (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); + + if (status_reg == (BM_CS_STATUS_LINK_UP | + BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + k1_enable = false; + } + + if (hw->phy.type == e1000_phy_82577) { + ret_val = e1e_rphy_locked(hw, HV_M_STATUS, &status_reg); + if (ret_val) + goto release; + + status_reg &= (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_MASK); + + if (status_reg == (HV_M_STATUS_LINK_UP | + HV_M_STATUS_AUTONEG_COMPLETE | + HV_M_STATUS_SPEED_1000)) + k1_enable = false; + } + + /* Link stall fix for link up */ + ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x0100); + if (ret_val) + goto release; + + } else { + /* Link stall fix for link down */ + ret_val = e1e_wphy_locked(hw, PHY_REG(770, 19), 0x4100); + if (ret_val) + goto release; + } + + ret_val = e1000_configure_k1_ich8lan(hw, k1_enable); + +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_configure_k1_ich8lan - Configure K1 power state + * @hw: pointer to the HW structure + * @enable: K1 state to configure + * + * Configure the K1 power state based on the provided parameter. + * Assumes semaphore already acquired. + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + **/ +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable) +{ + s32 ret_val; + u32 ctrl_reg = 0; + u32 ctrl_ext = 0; + u32 reg = 0; + u16 kmrn_reg = 0; + + ret_val = e1000e_read_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + &kmrn_reg); + if (ret_val) + return ret_val; + + if (k1_enable) + kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE; + else + kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE; + + ret_val = e1000e_write_kmrn_reg_locked(hw, E1000_KMRNCTRLSTA_K1_CONFIG, + kmrn_reg); + if (ret_val) + return ret_val; + + usleep_range(20, 40); + ctrl_ext = er32(CTRL_EXT); + ctrl_reg = er32(CTRL); + + reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + reg |= E1000_CTRL_FRCSPD; + ew32(CTRL, reg); + + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS); + e1e_flush(); + usleep_range(20, 40); + ew32(CTRL, ctrl_reg); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + usleep_range(20, 40); + + return 0; +} + +/** + * e1000_oem_bits_config_ich8lan - SW-based LCD Configuration + * @hw: pointer to the HW structure + * @d0_state: boolean if entering d0 or d3 device state + * + * SW will configure Gbe Disable and LPLU based on the NVM. The four bits are + * collectively called OEM bits. The OEM Write Enable bit and SW Config bit + * in NVM determines whether HW should configure LPLU and Gbe Disable. + **/ +static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state) +{ + s32 ret_val = 0; + u32 mac_reg; + u16 oem_reg; + + if (hw->mac.type < e1000_pchlan) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + if (hw->mac.type == e1000_pchlan) { + mac_reg = er32(EXTCNF_CTRL); + if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE) + goto release; + } + + mac_reg = er32(FEXTNVM); + if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M)) + goto release; + + mac_reg = er32(PHY_CTRL); + + ret_val = e1e_rphy_locked(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + goto release; + + oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU); + + if (d0_state) { + if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & E1000_PHY_CTRL_D0A_LPLU) + oem_reg |= HV_OEM_BITS_LPLU; + } else { + if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE)) + oem_reg |= HV_OEM_BITS_GBE_DIS; + + if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU)) + oem_reg |= HV_OEM_BITS_LPLU; + } + + /* Set Restart auto-neg to activate the bits */ + if ((d0_state || (hw->mac.type != e1000_pchlan)) && + !hw->phy.ops.check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + + ret_val = e1e_wphy_locked(hw, HV_OEM_BITS, oem_reg); + +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode + * @hw: pointer to the HW structure + **/ +static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data); + if (ret_val) + return ret_val; + + data |= HV_KMRN_MDIO_SLOW; + + ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data); + + return ret_val; +} + +/** + * e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 phy_data; + + if (hw->mac.type != e1000_pchlan) + return 0; + + /* Set MDIO slow mode before any other MDIO access */ + if (hw->phy.type == e1000_phy_82577) { + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + } + + if (((hw->phy.type == e1000_phy_82577) && + ((hw->phy.revision == 1) || (hw->phy.revision == 2))) || + ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) { + /* Disable generation of early preamble */ + ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431); + if (ret_val) + return ret_val; + + /* Preamble tuning for SSC */ + ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204); + if (ret_val) + return ret_val; + } + + if (hw->phy.type == e1000_phy_82578) { + /* Return registers to default by doing a soft reset then + * writing 0x3140 to the control register. + */ + if (hw->phy.revision < 2) { + e1000e_phy_sw_reset(hw); + ret_val = e1e_wphy(hw, MII_BMCR, 0x3140); + } + } + + /* Select page 0 */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + hw->phy.addr = 1; + ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0); + hw->phy.ops.release(hw); + if (ret_val) + return ret_val; + + /* Configure the K1 Si workaround during phy reset assuming there is + * link so that it disables K1 if link is in 1Gbps. + */ + ret_val = e1000_k1_gig_workaround_hv(hw, true); + if (ret_val) + return ret_val; + + /* Workaround for link disconnects on a busy hub in half duplex */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = e1e_rphy_locked(hw, BM_PORT_GEN_CFG, &phy_data); + if (ret_val) + goto release; + ret_val = e1e_wphy_locked(hw, BM_PORT_GEN_CFG, phy_data & 0x00FF); + if (ret_val) + goto release; + + /* set MSE higher to enable link to stay up when noise is high */ + ret_val = e1000_write_emi_reg_locked(hw, I82577_MSE_THRESHOLD, 0x0034); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY + * @hw: pointer to the HW structure + **/ +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) +{ + u32 mac_reg; + u16 i, phy_reg = 0; + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) + goto release; + + /* Copy both RAL/H (rar_entry_count) and SHRAL/H to PHY */ + for (i = 0; i < (hw->mac.rar_entry_count); i++) { + mac_reg = er32(RAL(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_L(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_M(i), + (u16)((mac_reg >> 16) & 0xFFFF)); + + mac_reg = er32(RAH(i)); + hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), + (u16)((mac_reg & E1000_RAH_AV) + >> 16)); + } + + e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + +release: + hw->phy.ops.release(hw); +} + +/** + * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation + * with 82579 PHY + * @hw: pointer to the HW structure + * @enable: flag to enable/disable workaround when enabling/disabling jumbos + **/ +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable) +{ + s32 ret_val = 0; + u16 phy_reg, data; + u32 mac_reg; + u16 i; + + if (hw->mac.type < e1000_pch2lan) + return 0; + + /* disable Rx path while enabling/disabling workaround */ + e1e_rphy(hw, PHY_REG(769, 20), &phy_reg); + ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14)); + if (ret_val) + return ret_val; + + if (enable) { + /* Write Rx addresses (rar_entry_count for RAL/H, and + * SHRAL/H) and initial CRC values to the MAC + */ + for (i = 0; i < hw->mac.rar_entry_count; i++) { + u8 mac_addr[ETH_ALEN] = { 0 }; + u32 addr_high, addr_low; + + addr_high = er32(RAH(i)); + if (!(addr_high & E1000_RAH_AV)) + continue; + addr_low = er32(RAL(i)); + mac_addr[0] = (addr_low & 0xFF); + mac_addr[1] = ((addr_low >> 8) & 0xFF); + mac_addr[2] = ((addr_low >> 16) & 0xFF); + mac_addr[3] = ((addr_low >> 24) & 0xFF); + mac_addr[4] = (addr_high & 0xFF); + mac_addr[5] = ((addr_high >> 8) & 0xFF); + + ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr)); + } + + /* Write Rx addresses to the PHY */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + /* Enable jumbo frame workaround in the MAC */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(1 << 14); + mac_reg |= (7 << 15); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg |= E1000_RCTL_SECRC; + ew32(RCTL, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + return ret_val; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data | (1 << 0)); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + return ret_val; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + return ret_val; + + /* Enable jumbo frame workaround in the PHY */ + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + data |= (0x37 << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + return ret_val; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data &= ~(1 << 13); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + return ret_val; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (E1000_TX_PTR_GAP << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + return ret_val; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100); + if (ret_val) + return ret_val; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10)); + if (ret_val) + return ret_val; + } else { + /* Write MAC register values back to h/w defaults */ + mac_reg = er32(FFLT_DBG); + mac_reg &= ~(0xF << 14); + ew32(FFLT_DBG, mac_reg); + + mac_reg = er32(RCTL); + mac_reg &= ~E1000_RCTL_SECRC; + ew32(RCTL, mac_reg); + + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + &data); + if (ret_val) + return ret_val; + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_CTRL_OFFSET, + data & ~(1 << 0)); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + &data); + if (ret_val) + return ret_val; + data &= ~(0xF << 8); + data |= (0xB << 8); + ret_val = e1000e_write_kmrn_reg(hw, + E1000_KMRNCTRLSTA_HD_CTRL, + data); + if (ret_val) + return ret_val; + + /* Write PHY register values back to h/w defaults */ + e1e_rphy(hw, PHY_REG(769, 23), &data); + data &= ~(0x7F << 5); + ret_val = e1e_wphy(hw, PHY_REG(769, 23), data); + if (ret_val) + return ret_val; + e1e_rphy(hw, PHY_REG(769, 16), &data); + data |= (1 << 13); + ret_val = e1e_wphy(hw, PHY_REG(769, 16), data); + if (ret_val) + return ret_val; + e1e_rphy(hw, PHY_REG(776, 20), &data); + data &= ~(0x3FF << 2); + data |= (0x8 << 2); + ret_val = e1e_wphy(hw, PHY_REG(776, 20), data); + if (ret_val) + return ret_val; + ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00); + if (ret_val) + return ret_val; + e1e_rphy(hw, HV_PM_CTRL, &data); + ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10)); + if (ret_val) + return ret_val; + } + + /* re-enable Rx path after enabling/disabling workaround */ + return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14)); +} + +/** + * e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be + * done after every PHY reset. + **/ +static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + if (hw->mac.type != e1000_pch2lan) + return 0; + + /* Set MDIO slow mode before any other MDIO access */ + ret_val = e1000_set_mdio_slow_mode_hv(hw); + if (ret_val) + return ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + /* set MSE higher to enable link to stay up when noise is high */ + ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_THRESHOLD, 0x0034); + if (ret_val) + goto release; + /* drop link after 5 times MSE threshold was reached */ + ret_val = e1000_write_emi_reg_locked(hw, I82579_MSE_LINK_DOWN, 0x0005); +release: + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_k1_gig_workaround_lv - K1 Si workaround + * @hw: pointer to the HW structure + * + * Workaround to set the K1 beacon duration for 82579 parts in 10Mbps + * Disable K1 in 1000Mbps and 100Mbps + **/ +static s32 e1000_k1_workaround_lv(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 status_reg = 0; + + if (hw->mac.type != e1000_pch2lan) + return 0; + + /* Set K1 beacon duration based on 10Mbs speed */ + ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg); + if (ret_val) + return ret_val; + + if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) + == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) { + if (status_reg & + (HV_M_STATUS_SPEED_1000 | HV_M_STATUS_SPEED_100)) { + u16 pm_phy_reg; + + /* LV 1G/100 Packet drop issue wa */ + ret_val = e1e_rphy(hw, HV_PM_CTRL, &pm_phy_reg); + if (ret_val) + return ret_val; + pm_phy_reg &= ~HV_PM_CTRL_K1_ENABLE; + ret_val = e1e_wphy(hw, HV_PM_CTRL, pm_phy_reg); + if (ret_val) + return ret_val; + } else { + u32 mac_reg; + + mac_reg = er32(FEXTNVM4); + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC; + ew32(FEXTNVM4, mac_reg); + } + } + + return ret_val; +} + +/** + * e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware + * @hw: pointer to the HW structure + * @gate: boolean set to true to gate, false to ungate + * + * Gate/ungate the automatic PHY configuration via hardware; perform + * the configuration via software instead. + **/ +static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate) +{ + u32 extcnf_ctrl; + + if (hw->mac.type < e1000_pch2lan) + return; + + extcnf_ctrl = er32(EXTCNF_CTRL); + + if (gate) + extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; + else + extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG; + + ew32(EXTCNF_CTRL, extcnf_ctrl); +} + +/** + * e1000_lan_init_done_ich8lan - Check for PHY config completion + * @hw: pointer to the HW structure + * + * Check the appropriate indication the MAC has finished configuring the + * PHY after a software reset. + **/ +static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw) +{ + u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT; + + /* Wait for basic configuration completes before proceeding */ + do { + data = er32(STATUS); + data &= E1000_STATUS_LAN_INIT_DONE; + usleep_range(100, 200); + } while ((!data) && --loop); + + /* If basic configuration is incomplete before the above loop + * count reaches 0, loading the configuration from NVM will + * leave the PHY in a bad state possibly resulting in no link. + */ + if (loop == 0) + e_dbg("LAN_INIT_DONE not set, increase timeout\n"); + + /* Clear the Init Done bit for the next init event */ + data = er32(STATUS); + data &= ~E1000_STATUS_LAN_INIT_DONE; + ew32(STATUS, data); +} + +/** + * e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset + * @hw: pointer to the HW structure + **/ +static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 reg; + + if (hw->phy.ops.check_reset_block(hw)) + return 0; + + /* Allow time for h/w to get to quiescent state after reset */ + usleep_range(10000, 20000); + + /* Perform any necessary post-reset workarounds */ + switch (hw->mac.type) { + case e1000_pchlan: + ret_val = e1000_hv_phy_workarounds_ich8lan(hw); + if (ret_val) + return ret_val; + break; + case e1000_pch2lan: + ret_val = e1000_lv_phy_workarounds_ich8lan(hw); + if (ret_val) + return ret_val; + break; + default: + break; + } + + /* Clear the host wakeup bit after lcd reset */ + if (hw->mac.type >= e1000_pchlan) { + e1e_rphy(hw, BM_PORT_GEN_CFG, ®); + reg &= ~BM_WUC_HOST_WU_BIT; + e1e_wphy(hw, BM_PORT_GEN_CFG, reg); + } + + /* Configure the LCD with the extended configuration region in NVM */ + ret_val = e1000_sw_lcd_config_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Configure the LCD with the OEM bits in NVM */ + ret_val = e1000_oem_bits_config_ich8lan(hw, true); + + if (hw->mac.type == e1000_pch2lan) { + /* Ungate automatic PHY configuration on non-managed 82579 */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + usleep_range(10000, 20000); + e1000_gate_hw_phy_config_ich8lan(hw, false); + } + + /* Set EEE LPI Update Timer to 200usec */ + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + ret_val = e1000_write_emi_reg_locked(hw, + I82579_LPI_UPDATE_TIMER, + 0x1387); + hw->phy.ops.release(hw); + } + + return ret_val; +} + +/** + * e1000_phy_hw_reset_ich8lan - Performs a PHY reset + * @hw: pointer to the HW structure + * + * Resets the PHY + * This is a function pointer entry point called by drivers + * or other shared routines. + **/ +static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + + /* Gate automatic PHY configuration by hardware on non-managed 82579 */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + + ret_val = e1000e_phy_hw_reset_generic(hw); + if (ret_val) + return ret_val; + + return e1000_post_phy_reset_ich8lan(hw); +} + +/** + * e1000_set_lplu_state_pchlan - Set Low Power Link Up state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU state according to the active flag. For PCH, if OEM write + * bit are disabled in the NVM, writing the LPLU bits in the MAC will not set + * the phy speed. This function will manually set the LPLU bit and restart + * auto-neg as hw would do. D3 and D0 LPLU will call the same function + * since it configures the same bit. + **/ +static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active) +{ + s32 ret_val; + u16 oem_reg; + + ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg); + if (ret_val) + return ret_val; + + if (active) + oem_reg |= HV_OEM_BITS_LPLU; + else + oem_reg &= ~HV_OEM_BITS_LPLU; + + if (!hw->phy.ops.check_reset_block(hw)) + oem_reg |= HV_OEM_BITS_RESTART_AN; + + return e1e_wphy(hw, HV_OEM_BITS, oem_reg); +} + +/** + * e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D0 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = 0; + u16 data; + + if (phy->type == e1000_phy_ife) + return 0; + + phy_ctrl = er32(PHY_CTRL); + + if (active) { + phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + if (ret_val) + return ret_val; + } else { + phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } + + return 0; +} + +/** + * e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state + * @hw: pointer to the HW structure + * @active: true to enable LPLU, false to disable + * + * Sets the LPLU D3 state according to the active flag. When + * activating LPLU this function also disables smart speed + * and vice versa. LPLU will not be activated unless the + * device autonegotiation advertisement meets standards of + * either 10 or 10/100 or 10/100/1000 at all duplexes. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 phy_ctrl; + s32 ret_val = 0; + u16 data; + + phy_ctrl = er32(PHY_CTRL); + + if (!active) { + phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU; + ew32(PHY_CTRL, phy_ctrl); + + if (phy->type != e1000_phy_igp_3) + return 0; + + /* Call gig speed drop workaround on LPLU before accessing + * any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + } + + return ret_val; +} + +/** + * e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1 + * @hw: pointer to the HW structure + * @bank: pointer to the variable that returns the active bank + * + * Reads signature byte from the NVM using the flash access registers. + * Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank. + **/ +static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) +{ + u32 eecd; + struct e1000_nvm_info *nvm = &hw->nvm; + u32 bank1_offset = nvm->flash_bank_size * sizeof(u16); + u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1; + u8 sig_byte = 0; + s32 ret_val; + + switch (hw->mac.type) { + /* In SPT, read from the CTRL_EXT reg instead of + * accessing the sector valid bits from the nvm + */ + case e1000_pch_spt: + *bank = er32(CTRL_EXT) + & E1000_CTRL_EXT_NVMVS; + if ((*bank == 0) || (*bank == 1)) { + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } else { + *bank = *bank - 2; + return 0; + } + break; + case e1000_ich8lan: + case e1000_ich9lan: + eecd = er32(EECD); + if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) == + E1000_EECD_SEC1VAL_VALID_MASK) { + if (eecd & E1000_EECD_SEC1VAL) + *bank = 1; + else + *bank = 0; + + return 0; + } + e_dbg("Unable to determine valid NVM bank via EEC - reading flash signature\n"); + /* fall-thru */ + default: + /* set bank to 0 in case flash read fails */ + *bank = 0; + + /* Check bank 0 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 0; + return 0; + } + + /* Check bank 1 */ + ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset + + bank1_offset, + &sig_byte); + if (ret_val) + return ret_val; + if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) == + E1000_ICH_NVM_SIG_VALUE) { + *bank = 1; + return 0; + } + + e_dbg("ERROR: No valid NVM bank present\n"); + return -E1000_ERR_NVM; + } +} + +/** + * e1000_read_nvm_spt - NVM access for SPT + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words. + * @data: pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM + **/ +static s32 e1000_read_nvm_spt(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = 0; + u32 bank = 0; + u32 dword = 0; + u16 offset_to_read; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = 0; + + for (i = 0; i < words; i += 2) { + if (words - i == 1) { + if (dev_spec->shadow_ram[offset + i].modified) { + data[i] = + dev_spec->shadow_ram[offset + i].value; + } else { + offset_to_read = act_offset + i - + ((act_offset + i) % 2); + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + if ((act_offset + i) % 2 == 0) + data[i] = (u16)(dword & 0xFFFF); + else + data[i] = (u16)((dword >> 16) & 0xFFFF); + } + } else { + offset_to_read = act_offset + i; + if (!(dev_spec->shadow_ram[offset + i].modified) || + !(dev_spec->shadow_ram[offset + i + 1].modified)) { + ret_val = + e1000_read_flash_dword_ich8lan(hw, + offset_to_read, + &dword); + if (ret_val) + break; + } + if (dev_spec->shadow_ram[offset + i].modified) + data[i] = + dev_spec->shadow_ram[offset + i].value; + else + data[i] = (u16)(dword & 0xFFFF); + if (dev_spec->shadow_ram[offset + i].modified) + data[i + 1] = + dev_spec->shadow_ram[offset + i + 1].value; + else + data[i + 1] = (u16)(dword >> 16 & 0xFFFF); + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + e_dbg("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_read_nvm_ich8lan - Read word(s) from the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to read. + * @words: Size of data to read in words + * @data: Pointer to the word(s) to read at offset. + * + * Reads a word(s) from the NVM using the flash access registers. + **/ +static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 act_offset; + s32 ret_val = 0; + u32 bank = 0; + u16 i, word; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + ret_val = -E1000_ERR_NVM; + goto out; + } + + nvm->ops.acquire(hw); + + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + act_offset = (bank) ? nvm->flash_bank_size : 0; + act_offset += offset; + + ret_val = 0; + for (i = 0; i < words; i++) { + if (dev_spec->shadow_ram[offset + i].modified) { + data[i] = dev_spec->shadow_ram[offset + i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, + act_offset + i, + &word); + if (ret_val) + break; + data[i] = word; + } + } + + nvm->ops.release(hw); + +out: + if (ret_val) + e_dbg("NVM read error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_flash_cycle_init_ich8lan - Initialize flash + * @hw: pointer to the HW structure + * + * This function does initial flash setup so that a new read/write/erase cycle + * can be started. + **/ +static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw) +{ + union ich8_hws_flash_status hsfsts; + s32 ret_val = -E1000_ERR_NVM; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* Check if the flash descriptor is valid */ + if (!hsfsts.hsf_status.fldesvalid) { + e_dbg("Flash descriptor invalid. SW Sequencing must be used.\n"); + return -E1000_ERR_NVM; + } + + /* Clear FCERR and DAEL in hw status by writing 1 */ + hsfsts.hsf_status.flcerr = 1; + hsfsts.hsf_status.dael = 1; + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + /* Either we should have a hardware SPI cycle in progress + * bit to check against, in order to start a new cycle or + * FDONE bit should be changed in the hardware so that it + * is 1 after hardware reset, which can then be used as an + * indication whether a cycle is in progress or has been + * completed. + */ + + if (!hsfsts.hsf_status.flcinprog) { + /* There is no cycle running at present, + * so we can start a cycle. + * Begin by setting Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + ret_val = 0; + } else { + s32 i; + + /* Otherwise poll for sometime so the current + * cycle has a chance to end before giving up. + */ + for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) { + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (!hsfsts.hsf_status.flcinprog) { + ret_val = 0; + break; + } + udelay(1); + } + if (!ret_val) { + /* Successful in waiting for previous cycle to timeout, + * now set the Flash Cycle Done. + */ + hsfsts.hsf_status.flcdone = 1; + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, + hsfsts.regval & 0xFFFF); + else + ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); + } else { + e_dbg("Flash controller busy, cannot get access\n"); + } + } + + return ret_val; +} + +/** + * e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase) + * @hw: pointer to the HW structure + * @timeout: maximum time to wait for completion + * + * This function starts a flash cycle and waits for its completion. + **/ +static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout) +{ + union ich8_hws_flash_ctrl hsflctl; + union ich8_hws_flash_status hsfsts; + u32 i = 0; + + /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + hsflctl.hsf_ctrl.flcgo = 1; + + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* wait till FDONE bit is set to 1 */ + do { + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcdone) + break; + udelay(1); + } while (i++ < timeout); + + if (hsfsts.hsf_status.flcdone && !hsfsts.hsf_status.flcerr) + return 0; + + return -E1000_ERR_NVM; +} + +/** + * e1000_read_flash_dword_ich8lan - Read dword from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash dword at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_dword_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + /* Must convert word offset into bytes. */ + offset <<= 1; + return e1000_read_flash_data32_ich8lan(hw, offset, data); +} + +/** + * e1000_read_flash_word_ich8lan - Read word from flash + * @hw: pointer to the HW structure + * @offset: offset to data location + * @data: pointer to the location for storing the data + * + * Reads the flash word at offset into data. Offset is converted + * to bytes before read. + **/ +static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset, + u16 *data) +{ + /* Must convert offset into bytes. */ + offset <<= 1; + + return e1000_read_flash_data_ich8lan(hw, offset, 2, data); +} + +/** + * e1000_read_flash_byte_ich8lan - Read byte from flash + * @hw: pointer to the HW structure + * @offset: The offset of the byte to read. + * @data: Pointer to a byte to store the value read. + * + * Reads a single byte from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 *data) +{ + s32 ret_val; + u16 word = 0; + + /* In SPT, only 32 bits access is supported, + * so this function should not be called. + */ + if (hw->mac.type == e1000_pch_spt) + return -E1000_ERR_NVM; + else + ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word); + + if (ret_val) + return ret_val; + + *data = (u8)word; + + return 0; +} + +/** + * e1000_read_flash_data_ich8lan - Read byte or word from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte or word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: Pointer to the word to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ +static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (!ret_val) { + flash_data = er32flash(ICH_FLASH_FDATA0); + if (size == 1) + *data = (u8)(flash_data & 0x000000FF); + else if (size == 2) + *data = (u16)(flash_data & 0x0000FFFF); + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) { + /* Repeat for some time before giving up. */ + continue; + } else if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_read_flash_data32_ich8lan - Read dword from NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the dword to read. + * @data: Pointer to the dword to store the value read. + * + * Reads a byte or word from the NVM using the flash access registers. + **/ + +static s32 e1000_read_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 *data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val = -E1000_ERR_NVM; + u8 count = 0; + + if (offset > ICH_FLASH_LINEAR_ADDR_MASK || + hw->mac.type != e1000_pch_spt) + return -E1000_ERR_NVM; + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ; + /* In SPT, This register is in Lan memory space, not flash. + * Therefore, only 32 bit access is supported + */ + ew32flash(ICH_FLASH_HSFSTS, (u32)hsflctl.regval << 16); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_READ_COMMAND_TIMEOUT); + + /* Check if FCERR is set to 1, if set to 1, clear it + * and try the whole sequence a few more times, else + * read in (shift in) the Flash Data0, the order is + * least significant byte first msb to lsb + */ + if (!ret_val) { + *data = er32flash(ICH_FLASH_FDATA0); + break; + } else { + /* If we've gotten here, then things are probably + * completely hosed, but if the error condition is + * detected, it won't hurt to give it another try... + * ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) { + /* Repeat for some time before giving up. */ + continue; + } else if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_nvm_ich8lan - Write word(s) to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the word(s) to write. + * @words: Size of data to write in words + * @data: Pointer to the word(s) to write at offset. + * + * Writes a byte or word to the NVM using the flash access registers. + **/ +static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words, + u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 i; + + if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + nvm->ops.acquire(hw); + + for (i = 0; i < words; i++) { + dev_spec->shadow_ram[offset + i].modified = true; + dev_spec->shadow_ram[offset + i].value = data[i]; + } + + nvm->ops.release(hw); + + return 0; +} + +/** + * e1000_update_nvm_checksum_spt - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_spt(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u32 dword = 0; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i += 2) { + /* Determine whether to write the value stored + * in the other NVM bank or a modified value stored + * in the shadow RAM + */ + ret_val = e1000_read_flash_dword_ich8lan(hw, + i + old_bank_offset, + &dword); + + if (dev_spec->shadow_ram[i].modified) { + dword &= 0xffff0000; + dword |= (dev_spec->shadow_ram[i].value & 0xffff); + } + if (dev_spec->shadow_ram[i + 1].modified) { + dword &= 0x0000ffff; + dword |= ((dev_spec->shadow_ram[i + 1].value & 0xffff) + << 16); + } + if (ret_val) + break; + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD - 1) + dword |= E1000_ICH_NVM_SIG_MASK << 16; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usleep_range(100, 200); + + /* Write the data to the new bank. Offset in words */ + act_offset = i + new_bank_offset; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, + dword); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ + e_dbg("Flash commit failed.\n"); + goto release; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + + /*offset in words but we read dword */ + --act_offset; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0xBFFFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + + /* offset in words but we read dword */ + act_offset = old_bank_offset + E1000_ICH_NVM_SIG_WORD - 1; + ret_val = e1000_read_flash_dword_ich8lan(hw, act_offset, &dword); + + if (ret_val) + goto release; + + dword &= 0x00FFFFFF; + ret_val = e1000_retry_write_flash_dword_ich8lan(hw, act_offset, dword); + + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + nvm->ops.reload(hw); + usleep_range(10000, 20000); + } + +out: + if (ret_val) + e_dbg("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM + * @hw: pointer to the HW structure + * + * The NVM checksum is updated by calling the generic update_nvm_checksum, + * which writes the checksum to the shadow ram. The changes in the shadow + * ram are then committed to the EEPROM by processing each bank at a time + * checking for the modified bit and writing only the pending changes. + * After a successful commit, the shadow ram is cleared and is ready for + * future writes. + **/ +static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 i, act_offset, new_bank_offset, old_bank_offset, bank; + s32 ret_val; + u16 data = 0; + + ret_val = e1000e_update_nvm_checksum_generic(hw); + if (ret_val) + goto out; + + if (nvm->type != e1000_nvm_flash_sw) + goto out; + + nvm->ops.acquire(hw); + + /* We're writing to the opposite bank so if we're on bank 1, + * write to bank 0 etc. We also need to erase the segment that + * is going to be written + */ + ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank); + if (ret_val) { + e_dbg("Could not detect valid bank, assuming bank 0\n"); + bank = 0; + } + + if (bank == 0) { + new_bank_offset = nvm->flash_bank_size; + old_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 1); + if (ret_val) + goto release; + } else { + old_bank_offset = nvm->flash_bank_size; + new_bank_offset = 0; + ret_val = e1000_erase_flash_bank_ich8lan(hw, 0); + if (ret_val) + goto release; + } + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + if (dev_spec->shadow_ram[i].modified) { + data = dev_spec->shadow_ram[i].value; + } else { + ret_val = e1000_read_flash_word_ich8lan(hw, i + + old_bank_offset, + &data); + if (ret_val) + break; + } + + /* If the word is 0x13, then make sure the signature bits + * (15:14) are 11b until the commit has completed. + * This will allow us to write 10b which indicates the + * signature is valid. We want to do this after the write + * has completed so that we don't mark the segment valid + * while the write is still in progress + */ + if (i == E1000_ICH_NVM_SIG_WORD) + data |= E1000_ICH_NVM_SIG_MASK; + + /* Convert offset to bytes. */ + act_offset = (i + new_bank_offset) << 1; + + usleep_range(100, 200); + /* Write the bytes to the new bank. */ + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset, + (u8)data); + if (ret_val) + break; + + usleep_range(100, 200); + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset + 1, + (u8)(data >> 8)); + if (ret_val) + break; + } + + /* Don't bother writing the segment valid bits if sector + * programming failed. + */ + if (ret_val) { + /* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */ + e_dbg("Flash commit failed.\n"); + goto release; + } + + /* Finally validate the new segment by setting bit 15:14 + * to 10b in word 0x13 , this can be done without an + * erase as well since these bits are 11 to start with + * and we need to change bit 14 to 0b + */ + act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD; + ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data); + if (ret_val) + goto release; + + data &= 0xBFFF; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, + act_offset * 2 + 1, + (u8)(data >> 8)); + if (ret_val) + goto release; + + /* And invalidate the previously valid segment by setting + * its signature word (0x13) high_byte to 0b. This can be + * done without an erase because flash erase sets all bits + * to 1's. We can write 1's to 0's without an erase + */ + act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1; + ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0); + if (ret_val) + goto release; + + /* Great! Everything worked, we can now clear the cached entries. */ + for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) { + dev_spec->shadow_ram[i].modified = false; + dev_spec->shadow_ram[i].value = 0xFFFF; + } + +release: + nvm->ops.release(hw); + + /* Reload the EEPROM, or else modifications will not appear + * until after the next adapter reset. + */ + if (!ret_val) { + nvm->ops.reload(hw); + usleep_range(10000, 20000); + } + +out: + if (ret_val) + e_dbg("NVM update error: %d\n", ret_val); + + return ret_val; +} + +/** + * e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Check to see if checksum needs to be fixed by reading bit 6 in word 0x19. + * If the bit is 0, that the EEPROM had been modified, but the checksum was not + * calculated, in which case we need to calculate the checksum and set bit 6. + **/ +static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 data; + u16 word; + u16 valid_csum_mask; + + /* Read NVM and check Invalid Image CSUM bit. If this bit is 0, + * the checksum needs to be fixed. This bit is an indication that + * the NVM was prepared by OEM software and did not calculate + * the checksum...a likely scenario. + */ + switch (hw->mac.type) { + case e1000_pch_lpt: + case e1000_pch_spt: + word = NVM_COMPAT; + valid_csum_mask = NVM_COMPAT_VALID_CSUM; + break; + default: + word = NVM_FUTURE_INIT_WORD1; + valid_csum_mask = NVM_FUTURE_INIT_WORD1_VALID_CSUM; + break; + } + + ret_val = e1000_read_nvm(hw, word, 1, &data); + if (ret_val) + return ret_val; + + if (!(data & valid_csum_mask)) { + data |= valid_csum_mask; + ret_val = e1000_write_nvm(hw, word, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } + + return e1000e_validate_nvm_checksum_generic(hw); +} + +/** + * e1000e_write_protect_nvm_ich8lan - Make the NVM read-only + * @hw: pointer to the HW structure + * + * To prevent malicious write/erase of the NVM, set it to be read-only + * so that the hardware ignores all write/erase cycles of the NVM via + * the flash control registers. The shadow-ram copy of the NVM will + * still be updated, however any updates to this copy will not stick + * across driver reloads. + **/ +void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_flash_protected_range pr0; + union ich8_hws_flash_status hsfsts; + u32 gfpreg; + + nvm->ops.acquire(hw); + + gfpreg = er32flash(ICH_FLASH_GFPREG); + + /* Write-protect GbE Sector of NVM */ + pr0.regval = er32flash(ICH_FLASH_PR0); + pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK; + pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK); + pr0.range.wpe = true; + ew32flash(ICH_FLASH_PR0, pr0.regval); + + /* Lock down a subset of GbE Flash Control Registers, e.g. + * PR0 to prevent the write-protection from being lifted. + * Once FLOCKDN is set, the registers protected by it cannot + * be written until FLOCKDN is cleared by a hardware reset. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + hsfsts.hsf_status.flockdn = true; + ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval); + + nvm->ops.release(hw); +} + +/** + * e1000_write_flash_data_ich8lan - Writes bytes to the NVM + * @hw: pointer to the HW structure + * @offset: The offset (in bytes) of the byte/word to read. + * @size: Size of data to read, 1=byte 2=word + * @data: The byte(s) to write to the NVM. + * + * Writes one/two bytes to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset, + u8 size, u16 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + u32 flash_data = 0; + s32 ret_val; + u8 count = 0; + + if (hw->mac.type == e1000_pch_spt) { + if (size != 4 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } else { + if (size < 1 || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } + + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + + /* 0b/1b corresponds to 1 or 2 byte size, respectively. */ + hsflctl.hsf_ctrl.fldbcount = size - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + if (size == 1) + flash_data = (u32)data & 0x00FF; + else + flash_data = (u32)data; + + ew32flash(ICH_FLASH_FDATA0, flash_data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + if (!ret_val) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) + /* Repeat for some time before giving up. */ + continue; + if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** +* e1000_write_flash_data32_ich8lan - Writes 4 bytes to the NVM +* @hw: pointer to the HW structure +* @offset: The offset (in bytes) of the dwords to read. +* @data: The 4 bytes to write to the NVM. +* +* Writes one/two/four bytes to the NVM using the flash access registers. +**/ +static s32 e1000_write_flash_data32_ich8lan(struct e1000_hw *hw, u32 offset, + u32 data) +{ + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + s32 ret_val; + u8 count = 0; + + if (hw->mac.type == e1000_pch_spt) { + if (offset > ICH_FLASH_LINEAR_ADDR_MASK) + return -E1000_ERR_NVM; + } + flash_linear_addr = ((ICH_FLASH_LINEAR_ADDR_MASK & offset) + + hw->nvm.flash_base_addr); + do { + udelay(1); + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + break; + + /* In SPT, This register is in Lan memory space, not + * flash. Therefore, only 32 bit access is supported + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = er32flash(ICH_FLASH_HSFSTS) + >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + + hsflctl.hsf_ctrl.fldbcount = sizeof(u32) - 1; + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE; + + /* In SPT, This register is in Lan memory space, + * not flash. Therefore, only 32 bit access is + * supported + */ + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ew32flash(ICH_FLASH_FDATA0, data); + + /* check if FCERR is set to 1 , if set to 1, clear it + * and try the whole sequence a few more times else done + */ + ret_val = + e1000_flash_cycle_ich8lan(hw, + ICH_FLASH_WRITE_COMMAND_TIMEOUT); + + if (!ret_val) + break; + + /* If we're here, then things are most likely + * completely hosed, but if the error condition + * is detected, it won't hurt to give it another + * try...ICH_FLASH_CYCLE_REPEAT_COUNT times. + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + if (hsfsts.hsf_status.flcerr) + /* Repeat for some time before giving up. */ + continue; + if (!hsfsts.hsf_status.flcdone) { + e_dbg("Timeout error - flash cycle did not complete.\n"); + break; + } + } while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT); + + return ret_val; +} + +/** + * e1000_write_flash_byte_ich8lan - Write a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The index of the byte to read. + * @data: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + **/ +static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset, + u8 data) +{ + u16 word = (u16)data; + + return e1000_write_flash_data_ich8lan(hw, offset, 1, word); +} + +/** +* e1000_retry_write_flash_dword_ich8lan - Writes a dword to NVM +* @hw: pointer to the HW structure +* @offset: The offset of the word to write. +* @dword: The dword to write to the NVM. +* +* Writes a single dword to the NVM using the flash access registers. +* Goes through a retry algorithm before giving up. +**/ +static s32 e1000_retry_write_flash_dword_ich8lan(struct e1000_hw *hw, + u32 offset, u32 dword) +{ + s32 ret_val; + u16 program_retries; + + /* Must convert word offset into bytes. */ + offset <<= 1; + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + + if (!ret_val) + return ret_val; + for (program_retries = 0; program_retries < 100; program_retries++) { + e_dbg("Retrying Byte %8.8X at offset %u\n", dword, offset); + usleep_range(100, 200); + ret_val = e1000_write_flash_data32_ich8lan(hw, offset, dword); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM + * @hw: pointer to the HW structure + * @offset: The offset of the byte to write. + * @byte: The byte to write to the NVM. + * + * Writes a single byte to the NVM using the flash access registers. + * Goes through a retry algorithm before giving up. + **/ +static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw, + u32 offset, u8 byte) +{ + s32 ret_val; + u16 program_retries; + + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + return ret_val; + + for (program_retries = 0; program_retries < 100; program_retries++) { + e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset); + usleep_range(100, 200); + ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte); + if (!ret_val) + break; + } + if (program_retries == 100) + return -E1000_ERR_NVM; + + return 0; +} + +/** + * e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM + * @hw: pointer to the HW structure + * @bank: 0 for first bank, 1 for second bank, etc. + * + * Erases the bank specified. Each bank is a 4k block. Banks are 0 based. + * bank N is 4096 * N + flash_reg_addr. + **/ +static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + union ich8_hws_flash_status hsfsts; + union ich8_hws_flash_ctrl hsflctl; + u32 flash_linear_addr; + /* bank size is in 16bit words - adjust to bytes */ + u32 flash_bank_size = nvm->flash_bank_size * 2; + s32 ret_val; + s32 count = 0; + s32 j, iteration, sector_size; + + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + + /* Determine HW Sector size: Read BERASE bits of hw flash status + * register + * 00: The Hw sector is 256 bytes, hence we need to erase 16 + * consecutive sectors. The start index for the nth Hw sector + * can be calculated as = bank * 4096 + n * 256 + * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector. + * The start index for the nth Hw sector can be calculated + * as = bank * 4096 + * 10: The Hw sector is 8K bytes, nth sector = bank * 8192 + * (ich9 only, otherwise error condition) + * 11: The Hw sector is 64K bytes, nth sector = bank * 65536 + */ + switch (hsfsts.hsf_status.berasesz) { + case 0: + /* Hw sector size 256 */ + sector_size = ICH_FLASH_SEG_SIZE_256; + iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256; + break; + case 1: + sector_size = ICH_FLASH_SEG_SIZE_4K; + iteration = 1; + break; + case 2: + sector_size = ICH_FLASH_SEG_SIZE_8K; + iteration = 1; + break; + case 3: + sector_size = ICH_FLASH_SEG_SIZE_64K; + iteration = 1; + break; + default: + return -E1000_ERR_NVM; + } + + /* Start with the base address, then add the sector offset. */ + flash_linear_addr = hw->nvm.flash_base_addr; + flash_linear_addr += (bank) ? flash_bank_size : 0; + + for (j = 0; j < iteration; j++) { + do { + u32 timeout = ICH_FLASH_ERASE_COMMAND_TIMEOUT; + + /* Steps */ + ret_val = e1000_flash_cycle_init_ich8lan(hw); + if (ret_val) + return ret_val; + + /* Write a value 11 (block Erase) in Flash + * Cycle field in hw flash control + */ + if (hw->mac.type == e1000_pch_spt) + hsflctl.regval = + er32flash(ICH_FLASH_HSFSTS) >> 16; + else + hsflctl.regval = er16flash(ICH_FLASH_HSFCTL); + + hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE; + if (hw->mac.type == e1000_pch_spt) + ew32flash(ICH_FLASH_HSFSTS, + hsflctl.regval << 16); + else + ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval); + + /* Write the last 24 bits of an index within the + * block into Flash Linear address field in Flash + * Address. + */ + flash_linear_addr += (j * sector_size); + ew32flash(ICH_FLASH_FADDR, flash_linear_addr); + + ret_val = e1000_flash_cycle_ich8lan(hw, timeout); + if (!ret_val) + break; + + /* Check if FCERR is set to 1. If 1, + * clear it and try the whole sequence + * a few more times else Done + */ + hsfsts.regval = er16flash(ICH_FLASH_HSFSTS); + if (hsfsts.hsf_status.flcerr) + /* repeat for some time before giving up */ + continue; + else if (!hsfsts.hsf_status.flcdone) + return ret_val; + } while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT); + } + + return 0; +} + +/** + * e1000_valid_led_default_ich8lan - Set the default LED settings + * @hw: pointer to the HW structure + * @data: Pointer to the LED settings + * + * Reads the LED default settings from the NVM to data. If the NVM LED + * settings is all 0's or F's, set the LED default to a valid LED default + * setting. + **/ +static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT_ICH8LAN; + + return 0; +} + +/** + * e1000_id_led_init_pchlan - store LED configurations + * @hw: pointer to the HW structure + * + * PCH does not control LEDs via the LEDCTL register, rather it uses + * the PHY LED configuration register. + * + * PCH also does not have an "always on" or "always off" mode which + * complicates the ID feature. Instead of using the "on" mode to indicate + * in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init_generic()), + * use "link_up" mode. The LEDs will still ID on request if there is no + * link based on logic in e1000_led_[on|off]_pchlan(). + **/ +static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP; + const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT; + u16 data, i, temp, shift; + + /* Get default ID LED modes */ + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK; + shift = (i * 5); + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_on << shift); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode1 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_on << shift); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift); + mac->ledctl_mode2 |= (ledctl_off << shift); + break; + default: + /* Do nothing */ + break; + } + } + + return 0; +} + +/** + * e1000_get_bus_info_ich8lan - Get/Set the bus type and width + * @hw: pointer to the HW structure + * + * ICH8 use the PCI Express bus, but does not contain a PCI Express Capability + * register, so the the bus width is hard coded. + **/ +static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + s32 ret_val; + + ret_val = e1000e_get_bus_info_pcie(hw); + + /* ICH devices are "PCI Express"-ish. They have + * a configuration space, but do not contain + * PCI Express Capability registers, so bus width + * must be hardcoded. + */ + if (bus->width == e1000_bus_width_unknown) + bus->width = e1000_bus_width_pcie_x1; + + return ret_val; +} + +/** + * e1000_reset_hw_ich8lan - Reset the hardware + * @hw: pointer to the HW structure + * + * Does a full reset of the hardware which includes a reset of the PHY and + * MAC. + **/ +static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u16 kum_cfg; + u32 ctrl, reg; + s32 ret_val; + + /* Prevent the PCI-E bus from sticking if there is no TLP connection + * on the last TLP read/write transaction when MAC is reset. + */ + ret_val = e1000e_disable_pcie_master(hw); + if (ret_val) + e_dbg("PCI-E Master disable polling has failed.\n"); + + e_dbg("Masking off all interrupts\n"); + ew32(IMC, 0xffffffff); + + /* Disable the Transmit and Receive units. Then delay to allow + * any pending transactions to complete before we hit the MAC + * with the global reset. + */ + ew32(RCTL, 0); + ew32(TCTL, E1000_TCTL_PSP); + e1e_flush(); + + usleep_range(10000, 20000); + + /* Workaround for ICH8 bit corruption issue in FIFO memory */ + if (hw->mac.type == e1000_ich8lan) { + /* Set Tx and Rx buffer allocation to 8k apiece. */ + ew32(PBA, E1000_PBA_8K); + /* Set Packet Buffer Size to 16k. */ + ew32(PBS, E1000_PBS_16K); + } + + if (hw->mac.type == e1000_pchlan) { + /* Save the NVM K1 bit setting */ + ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &kum_cfg); + if (ret_val) + return ret_val; + + if (kum_cfg & E1000_NVM_K1_ENABLE) + dev_spec->nvm_k1_enabled = true; + else + dev_spec->nvm_k1_enabled = false; + } + + ctrl = er32(CTRL); + + if (!hw->phy.ops.check_reset_block(hw)) { + /* Full-chip reset requires MAC and PHY reset at the same + * time to make sure the interface between MAC and the + * external PHY is reset. + */ + ctrl |= E1000_CTRL_PHY_RST; + + /* Gate automatic PHY configuration by hardware on + * non-managed 82579 + */ + if ((hw->mac.type == e1000_pch2lan) && + !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) + e1000_gate_hw_phy_config_ich8lan(hw, true); + } + ret_val = e1000_acquire_swflag_ich8lan(hw); + e_dbg("Issuing a global reset to ich8lan\n"); + ew32(CTRL, (ctrl | E1000_CTRL_RST)); + /* cannot issue a flush here because it hangs the hardware */ + msleep(20); + + /* Set Phy Config Counter to 50msec */ + if (hw->mac.type == e1000_pch2lan) { + reg = er32(FEXTNVM3); + reg &= ~E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK; + reg |= E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC; + ew32(FEXTNVM3, reg); + } + + if (!ret_val) + clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state); + + if (ctrl & E1000_CTRL_PHY_RST) { + ret_val = hw->phy.ops.get_cfg_done(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_post_phy_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + + /* For PCH, this write will make sure that any noise + * will be detected as a CRC error and be dropped rather than show up + * as a bad packet to the DMA engine. + */ + if (hw->mac.type == e1000_pchlan) + ew32(CRC_OFFSET, 0x65656565); + + ew32(IMC, 0xffffffff); + er32(ICR); + + reg = er32(KABGTXD); + reg |= E1000_KABGTXD_BGSQLBIAS; + ew32(KABGTXD, reg); + + return 0; +} + +/** + * e1000_init_hw_ich8lan - Initialize the hardware + * @hw: pointer to the HW structure + * + * Prepares the hardware for transmit and receive by doing the following: + * - initialize hardware bits + * - initialize LED identification + * - setup receive address registers + * - setup flow control + * - setup transmit descriptors + * - clear statistics + **/ +static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl_ext, txdctl, snoop; + s32 ret_val; + u16 i; + + e1000_initialize_hw_bits_ich8lan(hw); + + /* Initialize identification LED */ + ret_val = mac->ops.id_led_init(hw); + /* An error is not fatal and we should not stop init due to this */ + if (ret_val) + e_dbg("Error initializing identification LED\n"); + + /* Setup the receive address. */ + e1000e_init_rx_addrs(hw, mac->rar_entry_count); + + /* Zero out the Multicast HASH table */ + e_dbg("Zeroing the MTA\n"); + for (i = 0; i < mac->mta_reg_count; i++) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0); + + /* The 82578 Rx buffer will stall if wakeup is enabled in host and + * the ME. Disable wakeup by clearing the host wakeup bit. + * Reset the phy after disabling host wakeup to reset the Rx buffer. + */ + if (hw->phy.type == e1000_phy_82578) { + e1e_rphy(hw, BM_PORT_GEN_CFG, &i); + i &= ~BM_WUC_HOST_WU_BIT; + e1e_wphy(hw, BM_PORT_GEN_CFG, i); + ret_val = e1000_phy_hw_reset_ich8lan(hw); + if (ret_val) + return ret_val; + } + + /* Setup link and flow control */ + ret_val = mac->ops.setup_link(hw); + + /* Set the transmit descriptor write-back policy for both queues */ + txdctl = er32(TXDCTL(0)); + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); + ew32(TXDCTL(0), txdctl); + txdctl = er32(TXDCTL(1)); + txdctl = ((txdctl & ~E1000_TXDCTL_WTHRESH) | + E1000_TXDCTL_FULL_TX_DESC_WB); + txdctl = ((txdctl & ~E1000_TXDCTL_PTHRESH) | + E1000_TXDCTL_MAX_TX_DESC_PREFETCH); + ew32(TXDCTL(1), txdctl); + + /* ICH8 has opposite polarity of no_snoop bits. + * By default, we should use snoop behavior. + */ + if (mac->type == e1000_ich8lan) + snoop = PCIE_ICH8_SNOOP_ALL; + else + snoop = (u32)~(PCIE_NO_SNOOP_ALL); + e1000e_set_pcie_no_snoop(hw, snoop); + + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_RO_DIS; + ew32(CTRL_EXT, ctrl_ext); + + /* Clear all of the statistics registers (clear on read). It is + * important that we do this after we have tried to establish link + * because the symbol error count will increment wildly if there + * is no link. + */ + e1000_clear_hw_cntrs_ich8lan(hw); + + return ret_val; +} + +/** + * e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits + * @hw: pointer to the HW structure + * + * Sets/Clears required hardware bits necessary for correctly setting up the + * hardware for transmit and receive. + **/ +static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + + /* Extended Device Control */ + reg = er32(CTRL_EXT); + reg |= (1 << 22); + /* Enable PHY low-power state when MAC is at D3 w/o WoL */ + if (hw->mac.type >= e1000_pchlan) + reg |= E1000_CTRL_EXT_PHYPDEN; + ew32(CTRL_EXT, reg); + + /* Transmit Descriptor Control 0 */ + reg = er32(TXDCTL(0)); + reg |= (1 << 22); + ew32(TXDCTL(0), reg); + + /* Transmit Descriptor Control 1 */ + reg = er32(TXDCTL(1)); + reg |= (1 << 22); + ew32(TXDCTL(1), reg); + + /* Transmit Arbitration Control 0 */ + reg = er32(TARC(0)); + if (hw->mac.type == e1000_ich8lan) + reg |= (1 << 28) | (1 << 29); + reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27); + ew32(TARC(0), reg); + + /* Transmit Arbitration Control 1 */ + reg = er32(TARC(1)); + if (er32(TCTL) & E1000_TCTL_MULR) + reg &= ~(1 << 28); + else + reg |= (1 << 28); + reg |= (1 << 24) | (1 << 26) | (1 << 30); + ew32(TARC(1), reg); + + /* Device Status */ + if (hw->mac.type == e1000_ich8lan) { + reg = er32(STATUS); + reg &= ~(1 << 31); + ew32(STATUS, reg); + } + + /* work-around descriptor data corruption issue during nfs v2 udp + * traffic, just disable the nfs filtering capability + */ + reg = er32(RFCTL); + reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS); + + /* Disable IPv6 extension header parsing because some malformed + * IPv6 headers can hang the Rx. + */ + if (hw->mac.type == e1000_ich8lan) + reg |= (E1000_RFCTL_IPV6_EX_DIS | E1000_RFCTL_NEW_IPV6_EXT_DIS); + ew32(RFCTL, reg); + + /* Enable ECC on Lynxpoint */ + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { + reg = er32(PBECCSTS); + reg |= E1000_PBECCSTS_ECC_ENABLE; + ew32(PBECCSTS, reg); + + reg = er32(CTRL); + reg |= E1000_CTRL_MEHE; + ew32(CTRL, reg); + } +} + +/** + * e1000_setup_link_ich8lan - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + + if (hw->phy.ops.check_reset_block(hw)) + return 0; + + /* ICH parts do not have a word in the NVM to determine + * the default flow control setting, so we explicitly + * set it to full. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + /* Workaround h/w hang when Tx flow control enabled */ + if (hw->mac.type == e1000_pchlan) + hw->fc.requested_mode = e1000_fc_rx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + } + + /* Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Continue to configure the copper link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + ew32(FCTTV, hw->fc.pause_time); + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || + (hw->phy.type == e1000_phy_82577)) { + ew32(FCRTV_PCH, hw->fc.refresh_time); + + ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27), + hw->fc.pause_time); + if (ret_val) + return ret_val; + } + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Configures the kumeran interface to the PHY to wait the appropriate time + * when polling the PHY, then call the generic setup_copper_link to finish + * configuring the copper link. + **/ +static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + u16 reg_data; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + /* Set the mac to wait the maximum time between each iteration + * and increase the max iterations when polling the phy; + * this fixes erroneous timeouts at 10Mbps. + */ + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF); + if (ret_val) + return ret_val; + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + ®_data); + if (ret_val) + return ret_val; + reg_data |= 0x3F; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM, + reg_data); + if (ret_val) + return ret_val; + + switch (hw->phy.type) { + case e1000_phy_igp_3: + ret_val = e1000e_copper_link_setup_igp(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_bm: + case e1000_phy_82578: + ret_val = e1000e_copper_link_setup_m88(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_82577: + case e1000_phy_82579: + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + break; + case e1000_phy_ife: + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, ®_data); + if (ret_val) + return ret_val; + + reg_data &= ~IFE_PMC_AUTO_MDIX; + + switch (hw->phy.mdix) { + case 1: + reg_data &= ~IFE_PMC_FORCE_MDIX; + break; + case 2: + reg_data |= IFE_PMC_FORCE_MDIX; + break; + case 0: + default: + reg_data |= IFE_PMC_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data); + if (ret_val) + return ret_val; + break; + default: + break; + } + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_setup_copper_link_pch_lpt - Configure MAC/PHY interface + * @hw: pointer to the HW structure + * + * Calls the PHY specific link setup function and then calls the + * generic setup_copper_link to finish configuring the link for + * Lynxpoint PCH devices + **/ +static s32 e1000_setup_copper_link_pch_lpt(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SLU; + ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ew32(CTRL, ctrl); + + ret_val = e1000_copper_link_setup_82577(hw); + if (ret_val) + return ret_val; + + return e1000e_setup_copper_link(hw); +} + +/** + * e1000_get_link_up_info_ich8lan - Get current link speed and duplex + * @hw: pointer to the HW structure + * @speed: pointer to store current link speed + * @duplex: pointer to store the current link duplex + * + * Calls the generic get_speed_and_duplex to retrieve the current link + * information and then calls the Kumeran lock loss workaround for links at + * gigabit speeds. + **/ +static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + s32 ret_val; + + ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex); + if (ret_val) + return ret_val; + + if ((hw->mac.type == e1000_ich8lan) && + (hw->phy.type == e1000_phy_igp_3) && (*speed == SPEED_1000)) { + ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw); + } + + return ret_val; +} + +/** + * e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround + * @hw: pointer to the HW structure + * + * Work-around for 82566 Kumeran PCS lock loss: + * On link status change (i.e. PCI reset, speed change) and link is up and + * speed is gigabit- + * 0) if workaround is optionally disabled do nothing + * 1) wait 1ms for Kumeran link to come up + * 2) check Kumeran Diagnostic register PCS lock loss bit + * 3) if not set the link is locked (all is good), otherwise... + * 4) reset the PHY + * 5) repeat up to 10 times + * Note: this is only called for IGP3 copper when speed is 1gb. + **/ +static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + u16 i, data; + bool link; + + if (!dev_spec->kmrn_lock_loss_workaround_enabled) + return 0; + + /* Make sure link is up before proceeding. If not just return. + * Attempting this while link is negotiating fouled up link + * stability + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (!link) + return 0; + + for (i = 0; i < 10; i++) { + /* read once to clear */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + /* and again to get new status */ + ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data); + if (ret_val) + return ret_val; + + /* check for PCS lock */ + if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) + return 0; + + /* Issue PHY reset */ + e1000_phy_hw_reset(hw); + mdelay(5); + } + /* Disable GigE link negotiation */ + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, phy_ctrl); + + /* Call gig speed drop workaround on Gig disable before accessing + * any PHY registers + */ + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* unable to acquire PCS lock */ + return -E1000_ERR_PHY; +} + +/** + * e1000e_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state + * @hw: pointer to the HW structure + * @state: boolean value used to set the current Kumeran workaround state + * + * If ICH8, set the current Kumeran workaround state (enabled - true + * /disabled - false). + **/ +void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + + if (hw->mac.type != e1000_ich8lan) { + e_dbg("Workaround applies to ICH8 only.\n"); + return; + } + + dev_spec->kmrn_lock_loss_workaround_enabled = state; +} + +/** + * e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3 + * @hw: pointer to the HW structure + * + * Workaround for 82566 power-down on D3 entry: + * 1) disable gigabit link + * 2) write VR power-down enable + * 3) read it back + * Continue if successful, else issue LCD reset and repeat + **/ +void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw) +{ + u32 reg; + u16 data; + u8 retry = 0; + + if (hw->phy.type != e1000_phy_igp_3) + return; + + /* Try the workaround twice (if needed) */ + do { + /* Disable link */ + reg = er32(PHY_CTRL); + reg |= (E1000_PHY_CTRL_GBE_DISABLE | + E1000_PHY_CTRL_NOND0A_GBE_DISABLE); + ew32(PHY_CTRL, reg); + + /* Call gig speed drop workaround on Gig disable before + * accessing any PHY registers + */ + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + /* Write VR power-down enable */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN); + + /* Read it back and test */ + e1e_rphy(hw, IGP3_VR_CTRL, &data); + data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK; + if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry) + break; + + /* Issue PHY reset and repeat at most one more time */ + reg = er32(CTRL); + ew32(CTRL, reg | E1000_CTRL_PHY_RST); + retry++; + } while (retry); +} + +/** + * e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working + * @hw: pointer to the HW structure + * + * Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC), + * LPLU, Gig disable, MDIC PHY reset): + * 1) Set Kumeran Near-end loopback + * 2) Clear Kumeran Near-end loopback + * Should only be called for ICH8[m] devices with any 1G Phy. + **/ +void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val; + u16 reg_data; + + if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife)) + return; + + ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + ®_data); + if (ret_val) + return; + reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK; + ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, + reg_data); + if (ret_val) + return; + reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK; + e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET, reg_data); +} + +/** + * e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx + * @hw: pointer to the HW structure + * + * During S0 to Sx transition, it is possible the link remains at gig + * instead of negotiating to a lower speed. Before going to Sx, set + * 'Gig Disable' to force link speed negotiation to a lower speed based on + * the LPLU setting in the NVM or custom setting. For PCH and newer parts, + * the OEM bits PHY register (LED, GbE disable and LPLU configurations) also + * needs to be written. + * Parts that support (and are linked to a partner which support) EEE in + * 100Mbps should disable LPLU since 100Mbps w/ EEE requires less power + * than 10Mbps w/o EEE. + **/ +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw) +{ + struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; + u32 phy_ctrl; + s32 ret_val; + + phy_ctrl = er32(PHY_CTRL); + phy_ctrl |= E1000_PHY_CTRL_GBE_DISABLE; + + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg, device_id = hw->adapter->pdev->device; + + if ((device_id == E1000_DEV_ID_PCH_LPTLP_I218_LM) || + (device_id == E1000_DEV_ID_PCH_LPTLP_I218_V) || + (device_id == E1000_DEV_ID_PCH_I218_LM3) || + (device_id == E1000_DEV_ID_PCH_I218_V3) || + (hw->mac.type == e1000_pch_spt)) { + u32 fextnvm6 = er32(FEXTNVM6); + + ew32(FEXTNVM6, fextnvm6 & ~E1000_FEXTNVM6_REQ_PLL_CLK); + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto out; + + if (!dev_spec->eee_disable) { + u16 eee_advert; + + ret_val = + e1000_read_emi_reg_locked(hw, + I217_EEE_ADVERTISEMENT, + &eee_advert); + if (ret_val) + goto release; + + /* Disable LPLU if both link partners support 100BaseT + * EEE and 100Full is advertised on both ends of the + * link, and enable Auto Enable LPI since there will + * be no driver to enable LPI while in Sx. + */ + if ((eee_advert & I82579_EEE_100_SUPPORTED) && + (dev_spec->eee_lp_ability & + I82579_EEE_100_SUPPORTED) && + (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) { + phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU | + E1000_PHY_CTRL_NOND0A_LPLU); + + /* Set Auto Enable LPI after link up */ + e1e_rphy_locked(hw, + I217_LPI_GPIO_CTRL, &phy_reg); + phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI; + e1e_wphy_locked(hw, + I217_LPI_GPIO_CTRL, phy_reg); + } + } + + /* For i217 Intel Rapid Start Technology support, + * when the system is going into Sx and no manageability engine + * is present, the driver must configure proxy to reset only on + * power good. LPI (Low Power Idle) state must also reset only + * on power good, as well as the MTA (Multicast table array). + * The SMBus release must also be disabled on LCD reset. + */ + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + /* Enable proxy to reset only on power good. */ + e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg); + phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE; + e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg); + + /* Set bit enable LPI (EEE) to reset only on + * power good. + */ + e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); + phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET; + e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); + + /* Disable the SMB release on LCD reset. */ + e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); + phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE; + e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); + } + + /* Enable MTA to reset for Intel Rapid Start Technology + * Support + */ + e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); + phy_reg |= I217_CGFREG_ENABLE_MTA_RESET; + e1e_wphy_locked(hw, I217_CGFREG, phy_reg); + +release: + hw->phy.ops.release(hw); + } +out: + ew32(PHY_CTRL, phy_ctrl); + + if (hw->mac.type == e1000_ich8lan) + e1000e_gig_downshift_workaround_ich8lan(hw); + + if (hw->mac.type >= e1000_pchlan) { + e1000_oem_bits_config_ich8lan(hw, false); + + /* Reset PHY to activate OEM bits on 82577/8 */ + if (hw->mac.type == e1000_pchlan) + e1000e_phy_hw_reset_generic(hw); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + e1000_write_smbus_addr(hw); + hw->phy.ops.release(hw); + } +} + +/** + * e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0 + * @hw: pointer to the HW structure + * + * During Sx to S0 transitions on non-managed devices or managed devices + * on which PHY resets are not blocked, if the PHY registers cannot be + * accessed properly by the s/w toggle the LANPHYPC value to power cycle + * the PHY. + * On i217, setup Intel Rapid Start Technology. + **/ +void e1000_resume_workarounds_pchlan(struct e1000_hw *hw) +{ + s32 ret_val; + + if (hw->mac.type < e1000_pch2lan) + return; + + ret_val = e1000_init_phy_workarounds_pchlan(hw); + if (ret_val) { + e_dbg("Failed to init PHY flow ret_val=%d\n", ret_val); + return; + } + + /* For i217 Intel Rapid Start Technology support when the system + * is transitioning from Sx and no manageability engine is present + * configure SMBus to restore on reset, disable proxy, and enable + * the reset on MTA (Multicast table array). + */ + if (hw->phy.type == e1000_phy_i217) { + u16 phy_reg; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + e_dbg("Failed to setup iRST\n"); + return; + } + + /* Clear Auto Enable LPI after link up */ + e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg); + phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI; + e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg); + + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { + /* Restore clear on SMB if no manageability engine + * is present + */ + ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); + if (ret_val) + goto release; + phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE; + e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); + + /* Disable Proxy */ + e1e_wphy_locked(hw, I217_PROXY_CTRL, 0); + } + /* Enable reset on MTA */ + ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); + if (ret_val) + goto release; + phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET; + e1e_wphy_locked(hw, I217_CGFREG, phy_reg); +release: + if (ret_val) + e_dbg("Error %d in resume workarounds\n", ret_val); + hw->phy.ops.release(hw); + } +} + +/** + * e1000_cleanup_led_ich8lan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0); + + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000_led_on_ich8lan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +static s32 e1000_led_on_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON)); + + ew32(LEDCTL, hw->mac.ledctl_mode2); + return 0; +} + +/** + * e1000_led_off_ich8lan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +static s32 e1000_led_off_ich8lan(struct e1000_hw *hw) +{ + if (hw->phy.type == e1000_phy_ife) + return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, + (IFE_PSCL_PROBE_MODE | + IFE_PSCL_PROBE_LEDS_OFF)); + + ew32(LEDCTL, hw->mac.ledctl_mode1); + return 0; +} + +/** + * e1000_setup_led_pchlan - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use. + **/ +static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) +{ + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1); +} + +/** + * e1000_cleanup_led_pchlan - Restore the default LED operation + * @hw: pointer to the HW structure + * + * Return the LED back to the default configuration. + **/ +static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) +{ + return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default); +} + +/** + * e1000_led_on_pchlan - Turn LEDs on + * @hw: pointer to the HW structure + * + * Turn on the LEDs. + **/ +static s32 e1000_led_on_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode2; + u32 i, led; + + /* If no link, then turn LED on by setting the invert bit + * for each LED that's mode is "link_up" in ledctl_mode2. + */ + if (!(er32(STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return e1e_wphy(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_led_off_pchlan - Turn LEDs off + * @hw: pointer to the HW structure + * + * Turn off the LEDs. + **/ +static s32 e1000_led_off_pchlan(struct e1000_hw *hw) +{ + u16 data = (u16)hw->mac.ledctl_mode1; + u32 i, led; + + /* If no link, then turn LED off by clearing the invert bit + * for each LED that's mode is "link_up" in ledctl_mode1. + */ + if (!(er32(STATUS) & E1000_STATUS_LU)) { + for (i = 0; i < 3; i++) { + led = (data >> (i * 5)) & E1000_PHY_LED0_MASK; + if ((led & E1000_PHY_LED0_MODE_MASK) != + E1000_LEDCTL_MODE_LINK_UP) + continue; + if (led & E1000_PHY_LED0_IVRT) + data &= ~(E1000_PHY_LED0_IVRT << (i * 5)); + else + data |= (E1000_PHY_LED0_IVRT << (i * 5)); + } + } + + return e1e_wphy(hw, HV_LED_CONFIG, data); +} + +/** + * e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset + * @hw: pointer to the HW structure + * + * Read appropriate register for the config done bit for completion status + * and configure the PHY through s/w for EEPROM-less parts. + * + * NOTE: some silicon which is EEPROM-less will fail trying to read the + * config done bit, so only an error is logged and continues. If we were + * to return with error, EEPROM-less silicon would not be able to be reset + * or change link. + **/ +static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u32 bank = 0; + u32 status; + + e1000e_get_cfg_done_generic(hw); + + /* Wait for indication from h/w that it has completed basic config */ + if (hw->mac.type >= e1000_ich10lan) { + e1000_lan_init_done_ich8lan(hw); + } else { + ret_val = e1000e_get_auto_rd_done(hw); + if (ret_val) { + /* When auto config read does not complete, do not + * return with an error. This can happen in situations + * where there is no eeprom and prevents getting link. + */ + e_dbg("Auto Read Done did not complete\n"); + ret_val = 0; + } + } + + /* Clear PHY Reset Asserted bit */ + status = er32(STATUS); + if (status & E1000_STATUS_PHYRA) + ew32(STATUS, status & ~E1000_STATUS_PHYRA); + else + e_dbg("PHY Reset Asserted not set - needs delay\n"); + + /* If EEPROM is not marked present, init the IGP 3 PHY manually */ + if (hw->mac.type <= e1000_ich9lan) { + if (!(er32(EECD) & E1000_EECD_PRES) && + (hw->phy.type == e1000_phy_igp_3)) { + e1000e_phy_init_script_igp3(hw); + } + } else { + if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) { + /* Maybe we should do a basic PHY config */ + e_dbg("EEPROM not present\n"); + ret_val = -E1000_ERR_CONFIG; + } + } + + return ret_val; +} + +/** + * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, remove the link. + **/ +static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw) +{ + /* If the management interface is not enabled, then power down */ + if (!(hw->mac.ops.check_mng_mode(hw) || + hw->phy.ops.check_reset_block(hw))) + e1000_power_down_phy_copper(hw); +} + +/** + * e1000_clear_hw_cntrs_ich8lan - Clear statistical counters + * @hw: pointer to the HW structure + * + * Clears hardware counters specific to the silicon family and calls + * clear_hw_cntrs_generic to clear all general purpose counters. + **/ +static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw) +{ + u16 phy_data; + s32 ret_val; + + e1000e_clear_hw_cntrs_base(hw); + + er32(ALGNERRC); + er32(RXERRC); + er32(TNCRS); + er32(CEXTERR); + er32(TSCTC); + er32(TSCTFC); + + er32(MGTPRC); + er32(MGTPDC); + er32(MGTPTC); + + er32(IAC); + er32(ICRXOC); + + /* Clear PHY statistics registers */ + if ((hw->phy.type == e1000_phy_82578) || + (hw->phy.type == e1000_phy_82579) || + (hw->phy.type == e1000_phy_i217) || + (hw->phy.type == e1000_phy_82577)) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + ret_val = hw->phy.ops.set_page(hw, + HV_STATS_PAGE << IGP_PAGE_SHIFT); + if (ret_val) + goto release; + hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); + hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); +release: + hw->phy.ops.release(hw); + } +} + +static const struct e1000_mac_operations ich8_mac_ops = { + /* check_mng_mode dependent on mac type */ + .check_for_link = e1000_check_for_copper_link_ich8lan, + /* cleanup_led dependent on mac type */ + .clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan, + .get_bus_info = e1000_get_bus_info_ich8lan, + .set_lan_id = e1000_set_lan_id_single_port, + .get_link_up_info = e1000_get_link_up_info_ich8lan, + /* led_on dependent on mac type */ + /* led_off dependent on mac type */ + .update_mc_addr_list = e1000e_update_mc_addr_list_generic, + .reset_hw = e1000_reset_hw_ich8lan, + .init_hw = e1000_init_hw_ich8lan, + .setup_link = e1000_setup_link_ich8lan, + .setup_physical_interface = e1000_setup_copper_link_ich8lan, + /* id_led_init dependent on mac type */ + .config_collision_dist = e1000e_config_collision_dist_generic, + .rar_set = e1000e_rar_set_generic, + .rar_get_count = e1000e_rar_get_count_generic, +}; + +static const struct e1000_phy_operations ich8_phy_ops = { + .acquire = e1000_acquire_swflag_ich8lan, + .check_reset_block = e1000_check_reset_block_ich8lan, + .commit = NULL, + .get_cfg_done = e1000_get_cfg_done_ich8lan, + .get_cable_length = e1000e_get_cable_length_igp_2, + .read_reg = e1000e_read_phy_reg_igp, + .release = e1000_release_swflag_ich8lan, + .reset = e1000_phy_hw_reset_ich8lan, + .set_d0_lplu_state = e1000_set_d0_lplu_state_ich8lan, + .set_d3_lplu_state = e1000_set_d3_lplu_state_ich8lan, + .write_reg = e1000e_write_phy_reg_igp, +}; + +static const struct e1000_nvm_operations ich8_nvm_ops = { + .acquire = e1000_acquire_nvm_ich8lan, + .read = e1000_read_nvm_ich8lan, + .release = e1000_release_nvm_ich8lan, + .reload = e1000e_reload_nvm_generic, + .update = e1000_update_nvm_checksum_ich8lan, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate = e1000_validate_nvm_checksum_ich8lan, + .write = e1000_write_nvm_ich8lan, +}; + +static const struct e1000_nvm_operations spt_nvm_ops = { + .acquire = e1000_acquire_nvm_ich8lan, + .release = e1000_release_nvm_ich8lan, + .read = e1000_read_nvm_spt, + .update = e1000_update_nvm_checksum_spt, + .reload = e1000e_reload_nvm_generic, + .valid_led_default = e1000_valid_led_default_ich8lan, + .validate = e1000_validate_nvm_checksum_ich8lan, + .write = e1000_write_nvm_ich8lan, +}; + +const struct e1000_info e1000_ich8_info = { + .mac = e1000_ich8lan, + .flags = FLAG_HAS_WOL + | FLAG_IS_ICH + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 8, + .max_hw_frame_size = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_ich9_info = { + .mac = e1000_ich9lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 18, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_ich10_info = { + .mac = e1000_ich10lan, + .flags = FLAG_HAS_JUMBO_FRAMES + | FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_APME_IN_WUC, + .pba = 18, + .max_hw_frame_size = DEFAULT_JUMBO, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch_info = { + .mac = e1000_pchlan, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_DISABLE_FC_PAUSE_TIME /* errata */ + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS, + .pba = 26, + .max_hw_frame_size = 4096, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch2_info = { + .mac = e1000_pch2lan, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9022, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch_lpt_info = { + .mac = e1000_pch_lpt, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9022, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &ich8_nvm_ops, +}; + +const struct e1000_info e1000_pch_spt_info = { + .mac = e1000_pch_spt, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9022, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &spt_nvm_ops, +}; diff --git a/addons/e1000e/src/4.4.180/ich8lan.h b/addons/e1000e/src/4.4.180/ich8lan.h new file mode 100644 index 00000000..34c551e3 --- /dev/null +++ b/addons/e1000e/src/4.4.180/ich8lan.h @@ -0,0 +1,310 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_ICH8LAN_H_ +#define _E1000E_ICH8LAN_H_ + +#define ICH_FLASH_GFPREG 0x0000 +#define ICH_FLASH_HSFSTS 0x0004 +#define ICH_FLASH_HSFCTL 0x0006 +#define ICH_FLASH_FADDR 0x0008 +#define ICH_FLASH_FDATA0 0x0010 +#define ICH_FLASH_PR0 0x0074 + +/* Requires up to 10 seconds when MNG might be accessing part. */ +#define ICH_FLASH_READ_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_WRITE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_ERASE_COMMAND_TIMEOUT 10000000 +#define ICH_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF +#define ICH_FLASH_CYCLE_REPEAT_COUNT 10 + +#define ICH_CYCLE_READ 0 +#define ICH_CYCLE_WRITE 2 +#define ICH_CYCLE_ERASE 3 + +#define FLASH_GFPREG_BASE_MASK 0x1FFF +#define FLASH_SECTOR_ADDR_SHIFT 12 + +#define ICH_FLASH_SEG_SIZE_256 256 +#define ICH_FLASH_SEG_SIZE_4K 4096 +#define ICH_FLASH_SEG_SIZE_8K 8192 +#define ICH_FLASH_SEG_SIZE_64K 65536 + +#define E1000_ICH_FWSM_RSPCIPHY 0x00000040 /* Reset PHY on PCI Reset */ +/* FW established a valid mode */ +#define E1000_ICH_FWSM_FW_VALID 0x00008000 +#define E1000_ICH_FWSM_PCIM2PCI 0x01000000 /* ME PCIm-to-PCI active */ +#define E1000_ICH_FWSM_PCIM2PCI_COUNT 2000 + +#define E1000_ICH_MNG_IAMT_MODE 0x2 + +#define E1000_FWSM_WLOCK_MAC_MASK 0x0380 +#define E1000_FWSM_WLOCK_MAC_SHIFT 7 +#define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */ + +/* Shared Receive Address Registers */ +#define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) +#define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) + +#define E1000_H2ME 0x05B50 /* Host to ME */ +#define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */ +#define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */ + +#define ID_LED_DEFAULT_ICH8LAN ((ID_LED_DEF1_DEF2 << 12) | \ + (ID_LED_OFF1_OFF2 << 8) | \ + (ID_LED_OFF1_ON2 << 4) | \ + (ID_LED_DEF1_DEF2)) + +#define E1000_ICH_NVM_SIG_WORD 0x13 +#define E1000_ICH_NVM_SIG_MASK 0xC000 +#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0 +#define E1000_ICH_NVM_SIG_VALUE 0x80 + +#define E1000_ICH8_LAN_INIT_TIMEOUT 1500 + +/* FEXT register bit definition */ +#define E1000_FEXT_PHY_CABLE_DISCONNECTED 0x00000004 + +#define E1000_FEXTNVM_SW_CONFIG 1 +#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* different on ICH8M */ + +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK 0x0C000000 +#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC 0x08000000 + +#define E1000_FEXTNVM4_BEACON_DURATION_MASK 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_8USEC 0x7 +#define E1000_FEXTNVM4_BEACON_DURATION_16USEC 0x3 + +#define E1000_FEXTNVM6_REQ_PLL_CLK 0x00000100 +#define E1000_FEXTNVM6_ENABLE_K1_ENTRY_CONDITION 0x00000200 +#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000 +/* bit for disabling packet buffer read */ +#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000 +#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004 +#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020 +#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800 +#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000 +#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200 +#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000 + +/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */ +#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000 + +#define K1_ENTRY_LATENCY 0 +#define K1_MIN_TIME 1 +#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ +#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ +#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ +#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) +#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL + +#define E1000_ICH_RAR_ENTRIES 7 +#define E1000_PCH2_RAR_ENTRIES 5 /* RAR[0], SHRA[0-3] */ +#define E1000_PCH_LPT_RAR_ENTRIES 12 /* RAR[0], SHRA[0-10] */ + +#define PHY_PAGE_SHIFT 5 +#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \ + ((reg) & MAX_PHY_REG_ADDRESS)) +#define IGP3_KMRN_DIAG PHY_REG(770, 19) /* KMRN Diagnostic */ +#define IGP3_VR_CTRL PHY_REG(776, 18) /* Voltage Regulator Control */ + +#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 +#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300 +#define IGP3_VR_CTRL_MODE_SHUTDOWN 0x0200 + +/* PHY Wakeup Registers and defines */ +#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17) +#define BM_RCTL PHY_REG(BM_WUC_PAGE, 0) +#define BM_WUC PHY_REG(BM_WUC_PAGE, 1) +#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2) +#define BM_WUS PHY_REG(BM_WUC_PAGE, 3) +#define BM_RAR_L(_i) (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2))) +#define BM_RAR_M(_i) (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2))) +#define BM_RAR_H(_i) (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2))) +#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2))) +#define BM_MTA(_i) (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1))) + +#define BM_RCTL_UPE 0x0001 /* Unicast Promiscuous Mode */ +#define BM_RCTL_MPE 0x0002 /* Multicast Promiscuous Mode */ +#define BM_RCTL_MO_SHIFT 3 /* Multicast Offset Shift */ +#define BM_RCTL_MO_MASK (3 << 3) /* Multicast Offset Mask */ +#define BM_RCTL_BAM 0x0020 /* Broadcast Accept Mode */ +#define BM_RCTL_PMCF 0x0040 /* Pass MAC Control Frames */ +#define BM_RCTL_RFCE 0x0080 /* Rx Flow Control Enable */ + +#define HV_LED_CONFIG PHY_REG(768, 30) /* LED Configuration */ +#define HV_MUX_DATA_CTRL PHY_REG(776, 16) +#define HV_MUX_DATA_CTRL_GEN_TO_MAC 0x0400 +#define HV_MUX_DATA_CTRL_FORCE_SPEED 0x0004 +#define HV_STATS_PAGE 778 +/* Half-duplex collision counts */ +#define HV_SCC_UPPER PHY_REG(HV_STATS_PAGE, 16) /* Single Collision */ +#define HV_SCC_LOWER PHY_REG(HV_STATS_PAGE, 17) +#define HV_ECOL_UPPER PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. */ +#define HV_ECOL_LOWER PHY_REG(HV_STATS_PAGE, 19) +#define HV_MCC_UPPER PHY_REG(HV_STATS_PAGE, 20) /* Multiple Collision */ +#define HV_MCC_LOWER PHY_REG(HV_STATS_PAGE, 21) +#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision */ +#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24) +#define HV_COLC_UPPER PHY_REG(HV_STATS_PAGE, 25) /* Collision */ +#define HV_COLC_LOWER PHY_REG(HV_STATS_PAGE, 26) +#define HV_DC_UPPER PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */ +#define HV_DC_LOWER PHY_REG(HV_STATS_PAGE, 28) +#define HV_TNCRS_UPPER PHY_REG(HV_STATS_PAGE, 29) /* Tx with no CRS */ +#define HV_TNCRS_LOWER PHY_REG(HV_STATS_PAGE, 30) + +#define E1000_FCRTV_PCH 0x05F40 /* PCH Flow Control Refresh Timer Value */ + +#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */ +#define E1000_NVM_K1_ENABLE 0x1 /* NVM Enable K1 bit */ + +/* SMBus Control Phy Register */ +#define CV_SMB_CTRL PHY_REG(769, 23) +#define CV_SMB_CTRL_FORCE_SMBUS 0x0001 + +/* I218 Ultra Low Power Configuration 1 Register */ +#define I218_ULP_CONFIG1 PHY_REG(779, 16) +#define I218_ULP_CONFIG1_START 0x0001 /* Start auto ULP config */ +#define I218_ULP_CONFIG1_IND 0x0004 /* Pwr up from ULP indication */ +#define I218_ULP_CONFIG1_STICKY_ULP 0x0010 /* Set sticky ULP mode */ +#define I218_ULP_CONFIG1_INBAND_EXIT 0x0020 /* Inband on ULP exit */ +#define I218_ULP_CONFIG1_WOL_HOST 0x0040 /* WoL Host on ULP exit */ +#define I218_ULP_CONFIG1_RESET_TO_SMBUS 0x0100 /* Reset to SMBus mode */ +#define I218_ULP_CONFIG1_DISABLE_SMB_PERST 0x1000 /* Disable on PERST# */ + +/* SMBus Address Phy Register */ +#define HV_SMB_ADDR PHY_REG(768, 26) +#define HV_SMB_ADDR_MASK 0x007F +#define HV_SMB_ADDR_PEC_EN 0x0200 +#define HV_SMB_ADDR_VALID 0x0080 +#define HV_SMB_ADDR_FREQ_MASK 0x1100 +#define HV_SMB_ADDR_FREQ_LOW_SHIFT 8 +#define HV_SMB_ADDR_FREQ_HIGH_SHIFT 12 + +/* Strapping Option Register - RO */ +#define E1000_STRAP 0x0000C +#define E1000_STRAP_SMBUS_ADDRESS_MASK 0x00FE0000 +#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17 +#define E1000_STRAP_SMT_FREQ_MASK 0x00003000 +#define E1000_STRAP_SMT_FREQ_SHIFT 12 + +/* OEM Bits Phy Register */ +#define HV_OEM_BITS PHY_REG(768, 25) +#define HV_OEM_BITS_LPLU 0x0004 /* Low Power Link Up */ +#define HV_OEM_BITS_GBE_DIS 0x0040 /* Gigabit Disable */ +#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */ + +/* KMRN Mode Control */ +#define HV_KMRN_MODE_CTRL PHY_REG(769, 16) +#define HV_KMRN_MDIO_SLOW 0x0400 + +/* KMRN FIFO Control and Status */ +#define HV_KMRN_FIFO_CTRLSTA PHY_REG(770, 16) +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK 0x7000 +#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT 12 + +/* PHY Power Management Control */ +#define HV_PM_CTRL PHY_REG(770, 17) +#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100 +#define HV_PM_CTRL_K1_ENABLE 0x4000 + +#define SW_FLAG_TIMEOUT 1000 /* SW Semaphore flag timeout in ms */ + +/* Inband Control */ +#define I217_INBAND_CTRL PHY_REG(770, 18) +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK 0x3F00 +#define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT 8 + +/* Low Power Idle GPIO Control */ +#define I217_LPI_GPIO_CTRL PHY_REG(772, 18) +#define I217_LPI_GPIO_CTRL_AUTO_EN_LPI 0x0800 + +/* PHY Low Power Idle Control */ +#define I82579_LPI_CTRL PHY_REG(772, 20) +#define I82579_LPI_CTRL_100_ENABLE 0x2000 +#define I82579_LPI_CTRL_1000_ENABLE 0x4000 +#define I82579_LPI_CTRL_ENABLE_MASK 0x6000 +#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT 0x80 + +/* Extended Management Interface (EMI) Registers */ +#define I82579_EMI_ADDR 0x10 +#define I82579_EMI_DATA 0x11 +#define I82579_LPI_UPDATE_TIMER 0x4805 /* in 40ns units + 40 ns base value */ +#define I82579_MSE_THRESHOLD 0x084F /* 82579 Mean Square Error Threshold */ +#define I82577_MSE_THRESHOLD 0x0887 /* 82577 Mean Square Error Threshold */ +#define I82579_MSE_LINK_DOWN 0x2411 /* MSE count before dropping link */ +#define I82579_RX_CONFIG 0x3412 /* Receive configuration */ +#define I82579_LPI_PLL_SHUT 0x4412 /* LPI PLL Shut Enable */ +#define I82579_EEE_PCS_STATUS 0x182E /* IEEE MMD Register 3.1 >> 8 */ +#define I82579_EEE_CAPABILITY 0x0410 /* IEEE MMD Register 3.20 */ +#define I82579_EEE_ADVERTISEMENT 0x040E /* IEEE MMD Register 7.60 */ +#define I82579_EEE_LP_ABILITY 0x040F /* IEEE MMD Register 7.61 */ +#define I82579_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE */ +#define I82579_EEE_1000_SUPPORTED (1 << 2) /* 1000BaseTx EEE */ +#define I82579_LPI_100_PLL_SHUT (1 << 2) /* 100M LPI PLL Shut Enabled */ +#define I217_EEE_PCS_STATUS 0x9401 /* IEEE MMD Register 3.1 */ +#define I217_EEE_CAPABILITY 0x8000 /* IEEE MMD Register 3.20 */ +#define I217_EEE_ADVERTISEMENT 0x8001 /* IEEE MMD Register 7.60 */ +#define I217_EEE_LP_ABILITY 0x8002 /* IEEE MMD Register 7.61 */ +#define I217_RX_CONFIG 0xB20C /* Receive configuration */ + +#define E1000_EEE_RX_LPI_RCVD 0x0400 /* Tx LP idle received */ +#define E1000_EEE_TX_LPI_RCVD 0x0800 /* Rx LP idle received */ + +/* Intel Rapid Start Technology Support */ +#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70) +#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 +#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) +#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000 +#define I217_CGFREG PHY_REG(772, 29) +#define I217_CGFREG_ENABLE_MTA_RESET 0x0002 +#define I217_MEMPWR PHY_REG(772, 26) +#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 + +/* Receive Address Initial CRC Calculation */ +#define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) + +/* Latency Tolerance Reporting */ +#define E1000_LTRV 0x000F8 +#define E1000_LTRV_SCALE_MAX 5 +#define E1000_LTRV_SCALE_FACTOR 5 +#define E1000_LTRV_REQ_SHIFT 15 +#define E1000_LTRV_NOSNOOP_SHIFT 16 +#define E1000_LTRV_SEND (1 << 30) + +/* Proprietary Latency Tolerance Reporting PCI Capability */ +#define E1000_PCI_LTR_CAP_LPT 0xA8 + +void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw); +void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw, + bool state); +void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw); +void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw); +void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw); +void e1000_resume_workarounds_pchlan(struct e1000_hw *hw); +s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable); +void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw); +s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable); +s32 e1000_read_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 *data); +s32 e1000_write_emi_reg_locked(struct e1000_hw *hw, u16 addr, u16 data); +s32 e1000_set_eee_pchlan(struct e1000_hw *hw); +s32 e1000_enable_ulp_lpt_lp(struct e1000_hw *hw, bool to_sx); +#endif /* _E1000E_ICH8LAN_H_ */ diff --git a/addons/e1000e/src/4.4.180/mac.c b/addons/e1000e/src/4.4.180/mac.c new file mode 100644 index 00000000..fe133f33 --- /dev/null +++ b/addons/e1000e/src/4.4.180/mac.c @@ -0,0 +1,1805 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "e1000.h" + +/** + * e1000e_get_bus_info_pcie - Get PCIe bus information + * @hw: pointer to the HW structure + * + * Determines and stores the system bus information for a particular + * network interface. The following bus information is determined and stored: + * bus speed, bus width, type (PCIe), and PCIe function. + **/ +s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + struct e1000_bus_info *bus = &hw->bus; + struct e1000_adapter *adapter = hw->adapter; + u16 pcie_link_status, cap_offset; + + cap_offset = adapter->pdev->pcie_cap; + if (!cap_offset) { + bus->width = e1000_bus_width_unknown; + } else { + pci_read_config_word(adapter->pdev, + cap_offset + PCIE_LINK_STATUS, + &pcie_link_status); + bus->width = (enum e1000_bus_width)((pcie_link_status & + PCIE_LINK_WIDTH_MASK) >> + PCIE_LINK_WIDTH_SHIFT); + } + + mac->ops.set_lan_id(hw); + + return 0; +} + +/** + * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * + * @hw: pointer to the HW structure + * + * Determines the LAN function id by reading memory-mapped registers + * and swaps the port value if requested. + **/ +void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + u32 reg; + + /* The status register reports the correct function number + * for the device regardless of function swap state. + */ + reg = er32(STATUS); + bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT; +} + +/** + * e1000_set_lan_id_single_port - Set LAN id for a single port device + * @hw: pointer to the HW structure + * + * Sets the LAN function id to zero for a single port device. + **/ +void e1000_set_lan_id_single_port(struct e1000_hw *hw) +{ + struct e1000_bus_info *bus = &hw->bus; + + bus->func = 0; +} + +/** + * e1000_clear_vfta_generic - Clear VLAN filter table + * @hw: pointer to the HW structure + * + * Clears the register array which contains the VLAN filter table by + * setting all the values to 0. + **/ +void e1000_clear_vfta_generic(struct e1000_hw *hw) +{ + u32 offset; + + for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) { + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0); + e1e_flush(); + } +} + +/** + * e1000_write_vfta_generic - Write value to VLAN filter table + * @hw: pointer to the HW structure + * @offset: register offset in VLAN filter table + * @value: register value written to VLAN filter table + * + * Writes value at the given offset in the register array which stores + * the VLAN filter table. + **/ +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value) +{ + E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value); + e1e_flush(); +} + +/** + * e1000e_init_rx_addrs - Initialize receive address's + * @hw: pointer to the HW structure + * @rar_count: receive address registers + * + * Setup the receive address registers by setting the base receive address + * register to the devices MAC address and clearing all the other receive + * address registers to 0. + **/ +void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) +{ + u32 i; + u8 mac_addr[ETH_ALEN] = { 0 }; + + /* Setup the receive address */ + e_dbg("Programming MAC Address into RAR[0]\n"); + + hw->mac.ops.rar_set(hw, hw->mac.addr, 0); + + /* Zero out the other (rar_entry_count - 1) receive addresses */ + e_dbg("Clearing RAR[1-%u]\n", rar_count - 1); + for (i = 1; i < rar_count; i++) + hw->mac.ops.rar_set(hw, mac_addr, i); +} + +/** + * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr + * @hw: pointer to the HW structure + * + * Checks the nvm for an alternate MAC address. An alternate MAC address + * can be setup by pre-boot software and must be treated like a permanent + * address and must override the actual permanent MAC address. If an + * alternate MAC address is found it is programmed into RAR0, replacing + * the permanent address that was installed into RAR0 by the Si on reset. + * This function will return SUCCESS unless it encounters an error while + * reading the EEPROM. + **/ +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) +{ + u32 i; + s32 ret_val; + u16 offset, nvm_alt_mac_addr_offset, nvm_data; + u8 alt_mac_addr[ETH_ALEN]; + + ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); + if (ret_val) + return ret_val; + + /* not supported on 82573 */ + if (hw->mac.type == e1000_82573) + return 0; + + ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, + &nvm_alt_mac_addr_offset); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if ((nvm_alt_mac_addr_offset == 0xFFFF) || + (nvm_alt_mac_addr_offset == 0x0000)) + /* There is no Alternate MAC Address */ + return 0; + + if (hw->bus.func == E1000_FUNC_1) + nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1; + for (i = 0; i < ETH_ALEN; i += 2) { + offset = nvm_alt_mac_addr_offset + (i >> 1); + ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + alt_mac_addr[i] = (u8)(nvm_data & 0xFF); + alt_mac_addr[i + 1] = (u8)(nvm_data >> 8); + } + + /* if multicast bit is set, the alternate address will not be used */ + if (is_multicast_ether_addr(alt_mac_addr)) { + e_dbg("Ignoring Alternate Mac Address with MC bit set\n"); + return 0; + } + + /* We have a valid alternate MAC address, and we want to treat it the + * same as the normal permanent MAC address stored by the HW into the + * RAR. Do this by mapping this address into RAR0. + */ + hw->mac.ops.rar_set(hw, alt_mac_addr, 0); + + return 0; +} + +u32 e1000e_rar_get_count_generic(struct e1000_hw *hw) +{ + return hw->mac.rar_entry_count; +} + +/** + * e1000e_rar_set_generic - Set receive address register + * @hw: pointer to the HW structure + * @addr: pointer to the receive address + * @index: receive address array register + * + * Sets the receive address array register at index to the address passed + * in by addr. + **/ +int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index) +{ + u32 rar_low, rar_high; + + /* HW expects these in little endian so we reverse the byte order + * from network order (big endian) to little endian + */ + rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) | + ((u32)addr[2] << 16) | ((u32)addr[3] << 24)); + + rar_high = ((u32)addr[4] | ((u32)addr[5] << 8)); + + /* If MAC address zero, no need to set the AV bit */ + if (rar_low || rar_high) + rar_high |= E1000_RAH_AV; + + /* Some bridges will combine consecutive 32-bit writes into + * a single burst write, which will malfunction on some parts. + * The flushes avoid this. + */ + ew32(RAL(index), rar_low); + e1e_flush(); + ew32(RAH(index), rar_high); + e1e_flush(); + + return 0; +} + +/** + * e1000_hash_mc_addr - Generate a multicast hash value + * @hw: pointer to the HW structure + * @mc_addr: pointer to a multicast address + * + * Generates a multicast address hash value which is used to determine + * the multicast filter table array address and new table value. + **/ +static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) +{ + u32 hash_value, hash_mask; + u8 bit_shift = 0; + + /* Register count multiplied by bits per register */ + hash_mask = (hw->mac.mta_reg_count * 32) - 1; + + /* For a mc_filter_type of 0, bit_shift is the number of left-shifts + * where 0xFF would still fall within the hash mask. + */ + while (hash_mask >> bit_shift != 0xFF) + bit_shift++; + + /* The portion of the address that is used for the hash table + * is determined by the mc_filter_type setting. + * The algorithm is such that there is a total of 8 bits of shifting. + * The bit_shift for a mc_filter_type of 0 represents the number of + * left-shifts where the MSB of mc_addr[5] would still fall within + * the hash_mask. Case 0 does this exactly. Since there are a total + * of 8 bits of shifting, then mc_addr[4] will shift right the + * remaining number of bits. Thus 8 - bit_shift. The rest of the + * cases are a variation of this algorithm...essentially raising the + * number of bits to shift mc_addr[5] left, while still keeping the + * 8-bit shifting total. + * + * For example, given the following Destination MAC Address and an + * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask), + * we can see that the bit_shift for case 0 is 4. These are the hash + * values resulting from each mc_filter_type... + * [0] [1] [2] [3] [4] [5] + * 01 AA 00 12 34 56 + * LSB MSB + * + * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563 + * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6 + * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163 + * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634 + */ + switch (hw->mac.mc_filter_type) { + default: + case 0: + break; + case 1: + bit_shift += 1; + break; + case 2: + bit_shift += 2; + break; + case 3: + bit_shift += 4; + break; + } + + hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) | + (((u16)mc_addr[5]) << bit_shift))); + + return hash_value; +} + +/** + * e1000e_update_mc_addr_list_generic - Update Multicast addresses + * @hw: pointer to the HW structure + * @mc_addr_list: array of multicast addresses to program + * @mc_addr_count: number of multicast addresses to program + * + * Updates entire Multicast Table Array. + * The caller must have a packed mc_addr_list of multicast addresses. + **/ +void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count) +{ + u32 hash_value, hash_bit, hash_reg; + int i; + + /* clear mta_shadow */ + memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); + + /* update mta_shadow from mc_addr_list */ + for (i = 0; (u32)i < mc_addr_count; i++) { + hash_value = e1000_hash_mc_addr(hw, mc_addr_list); + + hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1); + hash_bit = hash_value & 0x1F; + + hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit); + mc_addr_list += (ETH_ALEN); + } + + /* replace the entire MTA table */ + for (i = hw->mac.mta_reg_count - 1; i >= 0; i--) + E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]); + e1e_flush(); +} + +/** + * e1000e_clear_hw_cntrs_base - Clear base hardware counters + * @hw: pointer to the HW structure + * + * Clears the base hardware counters by reading the counter registers. + **/ +void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw) +{ + er32(CRCERRS); + er32(SYMERRS); + er32(MPC); + er32(SCC); + er32(ECOL); + er32(MCC); + er32(LATECOL); + er32(COLC); + er32(DC); + er32(SEC); + er32(RLEC); + er32(XONRXC); + er32(XONTXC); + er32(XOFFRXC); + er32(XOFFTXC); + er32(FCRUC); + er32(GPRC); + er32(BPRC); + er32(MPRC); + er32(GPTC); + er32(GORCL); + er32(GORCH); + er32(GOTCL); + er32(GOTCH); + er32(RNBC); + er32(RUC); + er32(RFC); + er32(ROC); + er32(RJC); + er32(TORL); + er32(TORH); + er32(TOTL); + er32(TOTH); + er32(TPR); + er32(TPT); + er32(MPTC); + er32(BPTC); +} + +/** + * e1000e_check_for_copper_link - Check for link (Copper) + * @hw: pointer to the HW structure + * + * Checks to see of the link status of the hardware has changed. If a + * change in link status has been detected, then we read the PHY registers + * to get the current speed/duplex if link exists. + * + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link + * up). + **/ +s32 e1000e_check_for_copper_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + bool link; + + /* We only want to go out to the PHY registers to see if Auto-Neg + * has completed and/or if our link status has changed. The + * get_link_status flag is set upon receiving a Link Status + * Change or Rx Sequence Error interrupt. + */ + if (!mac->get_link_status) + return 1; + + /* First we want to see if the MII Status Register reports + * link. If so, then we want to get the current speed/duplex + * of the PHY. + */ + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) + return 0; /* No link detected */ + + mac->get_link_status = false; + + /* Check if there was DownShift, must be checked + * immediately after link-up + */ + e1000e_check_downshift(hw); + + /* If we are forcing speed/duplex, then we simply return since + * we have already determined whether we have link or not. + */ + if (!mac->autoneg) + return 1; + + /* Auto-Neg is enabled. Auto Speed Detection takes care + * of MAC speed/duplex configuration. So we only need to + * configure Collision Distance in the MAC. + */ + mac->ops.config_collision_dist(hw); + + /* Configure Flow Control now that Auto-Neg has completed. + * First, we need to restore the desired flow control + * settings because we may have had to re-autoneg with a + * different link partner. + */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + + return 1; +} + +/** + * e1000e_check_for_fiber_link - Check for link (Fiber) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_fiber_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), the cable is plugged in (we have signal), + * and our link partner is not trying to auto-negotiate with us (we + * are receiving idles or data), we need to force link up. We also + * need to give auto-negotiation time to complete, in case the cable + * was just plugged in. The autoneg_failed flag does this. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if ((ctrl & E1000_CTRL_SWDPIN1) && !(status & E1000_STATUS_LU) && + !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return 0; + } + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } + + return 0; +} + +/** + * e1000e_check_for_serdes_link - Check for link (Serdes) + * @hw: pointer to the HW structure + * + * Checks for link up on the hardware. If link is not up and we have + * a signal, then we need to force link up. + **/ +s32 e1000e_check_for_serdes_link(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 rxcw; + u32 ctrl; + u32 status; + s32 ret_val; + + ctrl = er32(CTRL); + status = er32(STATUS); + rxcw = er32(RXCW); + + /* If we don't have link (auto-negotiation failed or link partner + * cannot auto-negotiate), and our link partner is not trying to + * auto-negotiate with us (we are receiving idles or data), + * we need to force link up. We also need to give auto-negotiation + * time to complete. + */ + /* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */ + if (!(status & E1000_STATUS_LU) && !(rxcw & E1000_RXCW_C)) { + if (!mac->autoneg_failed) { + mac->autoneg_failed = true; + return 0; + } + e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n"); + + /* Disable auto-negotiation in the TXCW register */ + ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE)); + + /* Force link-up and also force full-duplex. */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD); + ew32(CTRL, ctrl); + + /* Configure Flow Control after forcing link up. */ + ret_val = e1000e_config_fc_after_link_up(hw); + if (ret_val) { + e_dbg("Error configuring flow control\n"); + return ret_val; + } + } else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) { + /* If we are forcing link and we are receiving /C/ ordered + * sets, re-enable auto-negotiation in the TXCW register + * and disable forced link in the Device Control register + * in an attempt to auto-negotiate with our link partner. + */ + e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n"); + ew32(TXCW, mac->txcw); + ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); + + mac->serdes_has_link = true; + } else if (!(E1000_TXCW_ANE & er32(TXCW))) { + /* If we force link for non-auto-negotiation switch, check + * link status based on MAC synchronization for internal + * serdes media type. + */ + /* SYNCH bit and IV bit are sticky. */ + usleep_range(10, 20); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + e_dbg("SERDES: Link up - forced.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - force failed.\n"); + } + } + + if (E1000_TXCW_ANE & er32(TXCW)) { + status = er32(STATUS); + if (status & E1000_STATUS_LU) { + /* SYNCH bit and IV bit are sticky, so reread rxcw. */ + usleep_range(10, 20); + rxcw = er32(RXCW); + if (rxcw & E1000_RXCW_SYNCH) { + if (!(rxcw & E1000_RXCW_IV)) { + mac->serdes_has_link = true; + e_dbg("SERDES: Link up - autoneg completed successfully.\n"); + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - invalid codewords detected in autoneg.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - no sync.\n"); + } + } else { + mac->serdes_has_link = false; + e_dbg("SERDES: Link down - autoneg failed\n"); + } + } + + return 0; +} + +/** + * e1000_set_default_fc_generic - Set flow control default values + * @hw: pointer to the HW structure + * + * Read the EEPROM for the default values for flow control and store the + * values. + **/ +static s32 e1000_set_default_fc_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 nvm_data; + + /* Read and store word 0x0F of the EEPROM. This word contains bits + * that determine the hardware's default PAUSE (flow control) mode, + * a bit that determines whether the HW defaults to enabling or + * disabling auto-negotiation, and the direction of the + * SW defined pins. If there is no SW over-ride of the flow + * control setting, then the variable hw->fc will + * be initialized based on a value in the EEPROM. + */ + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data); + + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (!(nvm_data & NVM_WORD0F_PAUSE_MASK)) + hw->fc.requested_mode = e1000_fc_none; + else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == NVM_WORD0F_ASM_DIR) + hw->fc.requested_mode = e1000_fc_tx_pause; + else + hw->fc.requested_mode = e1000_fc_full; + + return 0; +} + +/** + * e1000e_setup_link_generic - Setup flow control and link settings + * @hw: pointer to the HW structure + * + * Determines which flow control settings to use, then configures flow + * control. Calls the appropriate media-specific link configuration + * function. Assuming the adapter has a valid link partner, a valid link + * should be established. Assumes the hardware has previously been reset + * and the transmitter and receiver are not enabled. + **/ +s32 e1000e_setup_link_generic(struct e1000_hw *hw) +{ + s32 ret_val; + + /* In the case of the phy reset being blocked, we already have a link. + * We do not need to set it up again. + */ + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) + return 0; + + /* If requested flow control is set to default, set flow control + * based on the EEPROM flow control settings. + */ + if (hw->fc.requested_mode == e1000_fc_default) { + ret_val = e1000_set_default_fc_generic(hw); + if (ret_val) + return ret_val; + } + + /* Save off the requested flow control mode for use later. Depending + * on the link partner's capabilities, we may or may not use this mode. + */ + hw->fc.current_mode = hw->fc.requested_mode; + + e_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode); + + /* Call the necessary media_type subroutine to configure the link. */ + ret_val = hw->mac.ops.setup_physical_interface(hw); + if (ret_val) + return ret_val; + + /* Initialize the flow control address, type, and PAUSE timer + * registers to their default values. This is done even if flow + * control is disabled, because it does not hurt anything to + * initialize these registers. + */ + e_dbg("Initializing the Flow Control address, type and timer regs\n"); + ew32(FCT, FLOW_CONTROL_TYPE); + ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH); + ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW); + + ew32(FCTTV, hw->fc.pause_time); + + return e1000e_set_fc_watermarks(hw); +} + +/** + * e1000_commit_fc_settings_generic - Configure flow control + * @hw: pointer to the HW structure + * + * Write the flow control settings to the Transmit Config Word Register (TXCW) + * base on the flow control settings in e1000_mac_info. + **/ +static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 txcw; + + /* Check for a software override of the flow control settings, and + * setup the device accordingly. If auto-negotiation is enabled, then + * software will have to set the "PAUSE" bits to the correct value in + * the Transmit Config Word Register (TXCW) and re-start auto- + * negotiation. However, if auto-negotiation is disabled, then + * software will have to manually configure the two flow control enable + * bits in the CTRL register. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but we + * do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control completely disabled by a software over-ride. */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is disabled + * by a software over-ride. Since there really isn't a way to + * advertise that we are capable of Rx Pause ONLY, we will + * advertise that we support both symmetric and asymmetric Rx + * PAUSE. Later, we will disable the adapter's ability to send + * PAUSE frames. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is disabled, + * by a software over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR); + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ew32(TXCW, txcw); + mac->txcw = txcw; + + return 0; +} + +/** + * e1000_poll_fiber_serdes_link_generic - Poll for link up + * @hw: pointer to the HW structure + * + * Polls for link up by reading the status register, if link fails to come + * up with auto-negotiation, then the link is forced if a signal is detected. + **/ +static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 i, status; + s32 ret_val; + + /* If we have a signal (the cable is plugged in, or assumed true for + * serdes media) then poll for a "Link-Up" indication in the Device + * Status Register. Time-out if a link isn't seen in 500 milliseconds + * seconds (Auto-negotiation should complete in less than 500 + * milliseconds even if the other end is doing it in SW). + */ + for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) { + usleep_range(10000, 20000); + status = er32(STATUS); + if (status & E1000_STATUS_LU) + break; + } + if (i == FIBER_LINK_UP_LIMIT) { + e_dbg("Never got a valid link from auto-neg!!!\n"); + mac->autoneg_failed = true; + /* AutoNeg failed to achieve a link, so we'll call + * mac->check_for_link. This routine will force the + * link up if we detect a signal. This will allow us to + * communicate with non-autonegotiating link partners. + */ + ret_val = mac->ops.check_for_link(hw); + if (ret_val) { + e_dbg("Error while checking for link\n"); + return ret_val; + } + mac->autoneg_failed = false; + } else { + mac->autoneg_failed = false; + e_dbg("Valid Link Found\n"); + } + + return 0; +} + +/** + * e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes + * @hw: pointer to the HW structure + * + * Configures collision distance and flow control for fiber and serdes + * links. Upon successful setup, poll for link. + **/ +s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw) +{ + u32 ctrl; + s32 ret_val; + + ctrl = er32(CTRL); + + /* Take the link out of reset */ + ctrl &= ~E1000_CTRL_LRST; + + hw->mac.ops.config_collision_dist(hw); + + ret_val = e1000_commit_fc_settings_generic(hw); + if (ret_val) + return ret_val; + + /* Since auto-negotiation is enabled, take the link out of reset (the + * link will be in reset, because we previously reset the chip). This + * will restart auto-negotiation. If auto-negotiation is successful + * then the link-up status bit will be set and the flow control enable + * bits (RFCE and TFCE) will be set according to their negotiated value. + */ + e_dbg("Auto-negotiation enabled\n"); + + ew32(CTRL, ctrl); + e1e_flush(); + usleep_range(1000, 2000); + + /* For these adapters, the SW definable pin 1 is set when the optics + * detect a signal. If we have a signal, then poll for a "Link-Up" + * indication. + */ + if (hw->phy.media_type == e1000_media_type_internal_serdes || + (er32(CTRL) & E1000_CTRL_SWDPIN1)) { + ret_val = e1000_poll_fiber_serdes_link_generic(hw); + } else { + e_dbg("No signal detected\n"); + } + + return ret_val; +} + +/** + * e1000e_config_collision_dist_generic - Configure collision distance + * @hw: pointer to the HW structure + * + * Configures the collision distance to the default value and is used + * during link setup. + **/ +void e1000e_config_collision_dist_generic(struct e1000_hw *hw) +{ + u32 tctl; + + tctl = er32(TCTL); + + tctl &= ~E1000_TCTL_COLD; + tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; + + ew32(TCTL, tctl); + e1e_flush(); +} + +/** + * e1000e_set_fc_watermarks - Set flow control high/low watermarks + * @hw: pointer to the HW structure + * + * Sets the flow control high/low threshold (watermark) registers. If + * flow control XON frame transmission is enabled, then set XON frame + * transmission as well. + **/ +s32 e1000e_set_fc_watermarks(struct e1000_hw *hw) +{ + u32 fcrtl = 0, fcrth = 0; + + /* Set the flow control receive threshold registers. Normally, + * these registers will be set to a default threshold that may be + * adjusted later by the driver's runtime code. However, if the + * ability to transmit pause frames is not enabled, then these + * registers will be set to 0. + */ + if (hw->fc.current_mode & e1000_fc_tx_pause) { + /* We need to set up the Receive Threshold high and low water + * marks as well as (optionally) enabling the transmission of + * XON frames. + */ + fcrtl = hw->fc.low_water; + if (hw->fc.send_xon) + fcrtl |= E1000_FCRTL_XONE; + + fcrth = hw->fc.high_water; + } + ew32(FCRTL, fcrtl); + ew32(FCRTH, fcrth); + + return 0; +} + +/** + * e1000e_force_mac_fc - Force the MAC's flow control settings + * @hw: pointer to the HW structure + * + * Force the MAC's flow control settings. Sets the TFCE and RFCE bits in the + * device control register to reflect the adapter settings. TFCE and RFCE + * need to be explicitly set by software when a copper PHY is used because + * autonegotiation is managed by the PHY rather than the MAC. Software must + * also configure these bits when link is forced on a fiber connection. + **/ +s32 e1000e_force_mac_fc(struct e1000_hw *hw) +{ + u32 ctrl; + + ctrl = er32(CTRL); + + /* Because we didn't get link via the internal auto-negotiation + * mechanism (we either forced link or we got link via PHY + * auto-neg), we have to manually enable/disable transmit an + * receive flow control. + * + * The "Case" statement below enables/disable flow control + * according to the "hw->fc.current_mode" parameter. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause + * frames but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * frames but we do not receive pause frames). + * 3: Both Rx and Tx flow control (symmetric) is enabled. + * other: No other values should be possible at this point. + */ + e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode); + + switch (hw->fc.current_mode) { + case e1000_fc_none: + ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE)); + break; + case e1000_fc_rx_pause: + ctrl &= (~E1000_CTRL_TFCE); + ctrl |= E1000_CTRL_RFCE; + break; + case e1000_fc_tx_pause: + ctrl &= (~E1000_CTRL_RFCE); + ctrl |= E1000_CTRL_TFCE; + break; + case e1000_fc_full: + ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ew32(CTRL, ctrl); + + return 0; +} + +/** + * e1000e_config_fc_after_link_up - Configures flow control after link + * @hw: pointer to the HW structure + * + * Checks the status of auto-negotiation after link up to ensure that the + * speed and duplex were not forced. If the link needed to be forced, then + * flow control needs to be forced also. If auto-negotiation is enabled + * and did not fail, then we configure flow control based on our link + * partner. + **/ +s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val = 0; + u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg; + u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg; + u16 speed, duplex; + + /* Check for the case where we have fiber media and auto-neg failed + * so we had to force link. In this case, we need to force the + * configuration of the MAC to match the "fc" parameter. + */ + if (mac->autoneg_failed) { + if (hw->phy.media_type == e1000_media_type_fiber || + hw->phy.media_type == e1000_media_type_internal_serdes) + ret_val = e1000e_force_mac_fc(hw); + } else { + if (hw->phy.media_type == e1000_media_type_copper) + ret_val = e1000e_force_mac_fc(hw); + } + + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + + /* Check for the case where we have copper media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) { + /* Read the MII Status Register and check to see if AutoNeg + * has completed. We read this twice because this reg has + * some "sticky" (latched) bits. + */ + ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg); + if (ret_val) + return ret_val; + ret_val = e1e_rphy(hw, MII_BMSR, &mii_status_reg); + if (ret_val) + return ret_val; + + if (!(mii_status_reg & BMSR_ANEGCOMPLETE)) { + e_dbg("Copper PHY and Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (Address 4) and the Auto_Negotiation Base + * Page Ability Register (Address 5) to determine how + * flow control was negotiated. + */ + ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_nway_adv_reg); + if (ret_val) + return ret_val; + ret_val = e1e_rphy(hw, MII_LPA, &mii_nway_lp_ability_reg); + if (ret_val) + return ret_val; + + /* Two bits in the Auto Negotiation Advertisement Register + * (Address 4) and two bits in the Auto Negotiation Base + * Page Ability Register (Address 5) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | E1000_fc_full + * + */ + if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && + (mii_nway_lp_ability_reg & LPA_PAUSE_CAP)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && + (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) && + (mii_nway_lp_ability_reg & LPA_PAUSE_CAP) && + (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) { + hw->fc.current_mode = e1000_fc_tx_pause; + e_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((mii_nway_adv_reg & ADVERTISE_PAUSE_CAP) && + (mii_nway_adv_reg & ADVERTISE_PAUSE_ASYM) && + !(mii_nway_lp_ability_reg & LPA_PAUSE_CAP) && + (mii_nway_lp_ability_reg & LPA_PAUSE_ASYM)) { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + e_dbg("Flow Control = NONE.\n"); + } + + /* Now we need to do one last check... If we auto- + * negotiated to HALF DUPLEX, flow control should not be + * enabled per IEEE 802.3 spec. + */ + ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex); + if (ret_val) { + e_dbg("Error getting link speed and duplex\n"); + return ret_val; + } + + if (duplex == HALF_DUPLEX) + hw->fc.current_mode = e1000_fc_none; + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + ret_val = e1000e_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + /* Check for the case where we have SerDes media and auto-neg is + * enabled. In this case, we need to check and see if Auto-Neg + * has completed, and if so, how the PHY and link partner has + * flow control configured. + */ + if ((hw->phy.media_type == e1000_media_type_internal_serdes) && + mac->autoneg) { + /* Read the PCS_LSTS and check to see if AutoNeg + * has completed. + */ + pcs_status_reg = er32(PCS_LSTAT); + + if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) { + e_dbg("PCS Auto Neg has not completed.\n"); + return ret_val; + } + + /* The AutoNeg process has completed, so we now need to + * read both the Auto Negotiation Advertisement + * Register (PCS_ANADV) and the Auto_Negotiation Base + * Page Ability Register (PCS_LPAB) to determine how + * flow control was negotiated. + */ + pcs_adv_reg = er32(PCS_ANADV); + pcs_lp_ability_reg = er32(PCS_LPAB); + + /* Two bits in the Auto Negotiation Advertisement Register + * (PCS_ANADV) and two bits in the Auto Negotiation Base + * Page Ability Register (PCS_LPAB) determine flow control + * for both the PHY and the link partner. The following + * table, taken out of the IEEE 802.3ab/D6.0 dated March 25, + * 1999, describes these PAUSE resolution bits and how flow + * control is determined based upon these settings. + * NOTE: DC = Don't Care + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution + *-------|---------|-------|---------|-------------------- + * 0 | 0 | DC | DC | e1000_fc_none + * 0 | 1 | 0 | DC | e1000_fc_none + * 0 | 1 | 1 | 0 | e1000_fc_none + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + * 1 | 0 | 0 | DC | e1000_fc_none + * 1 | DC | 1 | DC | e1000_fc_full + * 1 | 1 | 0 | 0 | e1000_fc_none + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + * + * Are both PAUSE bits set to 1? If so, this implies + * Symmetric Flow Control is enabled at both ends. The + * ASM_DIR bits are irrelevant per the spec. + * + * For Symmetric Flow Control: + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | DC | 1 | DC | e1000_fc_full + * + */ + if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) { + /* Now we need to check if the user selected Rx ONLY + * of pause frames. In this case, we had to advertise + * FULL flow control because we could not advertise Rx + * ONLY. Hence, we must now check to see if we need to + * turn OFF the TRANSMISSION of PAUSE frames. + */ + if (hw->fc.requested_mode == e1000_fc_full) { + hw->fc.current_mode = e1000_fc_full; + e_dbg("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } + } + /* For receiving PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 0 | 1 | 1 | 1 | e1000_fc_tx_pause + */ + else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + (pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_tx_pause; + e_dbg("Flow Control = Tx PAUSE frames only.\n"); + } + /* For transmitting PAUSE frames ONLY. + * + * LOCAL DEVICE | LINK PARTNER + * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result + *-------|---------|-------|---------|-------------------- + * 1 | 1 | 0 | 1 | e1000_fc_rx_pause + */ + else if ((pcs_adv_reg & E1000_TXCW_PAUSE) && + (pcs_adv_reg & E1000_TXCW_ASM_DIR) && + !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) && + (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) { + hw->fc.current_mode = e1000_fc_rx_pause; + e_dbg("Flow Control = Rx PAUSE frames only.\n"); + } else { + /* Per the IEEE spec, at this point flow control + * should be disabled. + */ + hw->fc.current_mode = e1000_fc_none; + e_dbg("Flow Control = NONE.\n"); + } + + /* Now we call a subroutine to actually force the MAC + * controller to use the correct flow control settings. + */ + pcs_ctrl_reg = er32(PCS_LCTL); + pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL; + ew32(PCS_LCTL, pcs_ctrl_reg); + + ret_val = e1000e_force_mac_fc(hw); + if (ret_val) { + e_dbg("Error forcing flow control settings\n"); + return ret_val; + } + } + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Read the status register for the current speed/duplex and store the current + * speed and duplex for copper connections. + **/ +s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex) +{ + u32 status; + + status = er32(STATUS); + if (status & E1000_STATUS_SPEED_1000) + *speed = SPEED_1000; + else if (status & E1000_STATUS_SPEED_100) + *speed = SPEED_100; + else + *speed = SPEED_10; + + if (status & E1000_STATUS_FD) + *duplex = FULL_DUPLEX; + else + *duplex = HALF_DUPLEX; + + e_dbg("%u Mbps, %s Duplex\n", + *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10, + *duplex == FULL_DUPLEX ? "Full" : "Half"); + + return 0; +} + +/** + * e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex + * @hw: pointer to the HW structure + * @speed: stores the current speed + * @duplex: stores the current duplex + * + * Sets the speed and duplex to gigabit full duplex (the only possible option) + * for fiber/serdes links. + **/ +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw __always_unused + *hw, u16 *speed, u16 *duplex) +{ + *speed = SPEED_1000; + *duplex = FULL_DUPLEX; + + return 0; +} + +/** + * e1000e_get_hw_semaphore - Acquire hardware semaphore + * @hw: pointer to the HW structure + * + * Acquire the HW semaphore to access the PHY or NVM + **/ +s32 e1000e_get_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + s32 timeout = hw->nvm.word_size + 1; + s32 i = 0; + + /* Get the SW semaphore */ + while (i < timeout) { + swsm = er32(SWSM); + if (!(swsm & E1000_SWSM_SMBI)) + break; + + usleep_range(50, 100); + i++; + } + + if (i == timeout) { + e_dbg("Driver can't access device - SMBI bit is set.\n"); + return -E1000_ERR_NVM; + } + + /* Get the FW semaphore. */ + for (i = 0; i < timeout; i++) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_SWESMBI); + + /* Semaphore acquired if bit latched */ + if (er32(SWSM) & E1000_SWSM_SWESMBI) + break; + + usleep_range(50, 100); + } + + if (i == timeout) { + /* Release semaphores */ + e1000e_put_hw_semaphore(hw); + e_dbg("Driver can't access the NVM\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_put_hw_semaphore - Release hardware semaphore + * @hw: pointer to the HW structure + * + * Release hardware semaphore used to access the PHY or NVM + **/ +void e1000e_put_hw_semaphore(struct e1000_hw *hw) +{ + u32 swsm; + + swsm = er32(SWSM); + swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); + ew32(SWSM, swsm); +} + +/** + * e1000e_get_auto_rd_done - Check for auto read completion + * @hw: pointer to the HW structure + * + * Check EEPROM for Auto Read done bit. + **/ +s32 e1000e_get_auto_rd_done(struct e1000_hw *hw) +{ + s32 i = 0; + + while (i < AUTO_READ_DONE_TIMEOUT) { + if (er32(EECD) & E1000_EECD_AUTO_RD) + break; + usleep_range(1000, 2000); + i++; + } + + if (i == AUTO_READ_DONE_TIMEOUT) { + e_dbg("Auto read by HW from NVM has not completed.\n"); + return -E1000_ERR_RESET; + } + + return 0; +} + +/** + * e1000e_valid_led_default - Verify a valid default LED config + * @hw: pointer to the HW structure + * @data: pointer to the NVM (EEPROM) + * + * Read the EEPROM for the current default LED configuration. If the + * LED configuration is not valid, set to a valid LED configuration. + **/ +s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data) +{ + s32 ret_val; + + ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) + *data = ID_LED_DEFAULT; + + return 0; +} + +/** + * e1000e_id_led_init_generic - + * @hw: pointer to the HW structure + * + **/ +s32 e1000e_id_led_init_generic(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + s32 ret_val; + const u32 ledctl_mask = 0x000000FF; + const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON; + const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF; + u16 data, i, temp; + const u16 led_mask = 0x0F; + + ret_val = hw->nvm.ops.valid_led_default(hw, &data); + if (ret_val) + return ret_val; + + mac->ledctl_default = er32(LEDCTL); + mac->ledctl_mode1 = mac->ledctl_default; + mac->ledctl_mode2 = mac->ledctl_default; + + for (i = 0; i < 4; i++) { + temp = (data >> (i << 2)) & led_mask; + switch (temp) { + case ID_LED_ON1_DEF2: + case ID_LED_ON1_ON2: + case ID_LED_ON1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_on << (i << 3); + break; + case ID_LED_OFF1_DEF2: + case ID_LED_OFF1_ON2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode1 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + switch (temp) { + case ID_LED_DEF1_ON2: + case ID_LED_ON1_ON2: + case ID_LED_OFF1_ON2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_on << (i << 3); + break; + case ID_LED_DEF1_OFF2: + case ID_LED_ON1_OFF2: + case ID_LED_OFF1_OFF2: + mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3)); + mac->ledctl_mode2 |= ledctl_off << (i << 3); + break; + default: + /* Do nothing */ + break; + } + } + + return 0; +} + +/** + * e1000e_setup_led_generic - Configures SW controllable LED + * @hw: pointer to the HW structure + * + * This prepares the SW controllable LED for use and saves the current state + * of the LED so it can be later restored. + **/ +s32 e1000e_setup_led_generic(struct e1000_hw *hw) +{ + u32 ledctl; + + if (hw->mac.ops.setup_led != e1000e_setup_led_generic) + return -E1000_ERR_CONFIG; + + if (hw->phy.media_type == e1000_media_type_fiber) { + ledctl = er32(LEDCTL); + hw->mac.ledctl_default = ledctl; + /* Turn off LED0 */ + ledctl &= ~(E1000_LEDCTL_LED0_IVRT | E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_LED0_MODE_MASK); + ledctl |= (E1000_LEDCTL_MODE_LED_OFF << + E1000_LEDCTL_LED0_MODE_SHIFT); + ew32(LEDCTL, ledctl); + } else if (hw->phy.media_type == e1000_media_type_copper) { + ew32(LEDCTL, hw->mac.ledctl_mode1); + } + + return 0; +} + +/** + * e1000e_cleanup_led_generic - Set LED config to default operation + * @hw: pointer to the HW structure + * + * Remove the current LED configuration and set the LED configuration + * to the default value, saved from the EEPROM. + **/ +s32 e1000e_cleanup_led_generic(struct e1000_hw *hw) +{ + ew32(LEDCTL, hw->mac.ledctl_default); + return 0; +} + +/** + * e1000e_blink_led_generic - Blink LED + * @hw: pointer to the HW structure + * + * Blink the LEDs which are set to be on. + **/ +s32 e1000e_blink_led_generic(struct e1000_hw *hw) +{ + u32 ledctl_blink = 0; + u32 i; + + if (hw->phy.media_type == e1000_media_type_fiber) { + /* always blink LED0 for PCI-E fiber */ + ledctl_blink = E1000_LEDCTL_LED0_BLINK | + (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT); + } else { + /* Set the blink bit for each LED that's "on" (0x0E) + * (or "off" if inverted) in ledctl_mode2. The blink + * logic in hardware only works when mode is set to "on" + * so it must be changed accordingly when the mode is + * "off" and inverted. + */ + ledctl_blink = hw->mac.ledctl_mode2; + for (i = 0; i < 32; i += 8) { + u32 mode = (hw->mac.ledctl_mode2 >> i) & + E1000_LEDCTL_LED0_MODE_MASK; + u32 led_default = hw->mac.ledctl_default >> i; + + if ((!(led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_ON)) || + ((led_default & E1000_LEDCTL_LED0_IVRT) && + (mode == E1000_LEDCTL_MODE_LED_OFF))) { + ledctl_blink &= + ~(E1000_LEDCTL_LED0_MODE_MASK << i); + ledctl_blink |= (E1000_LEDCTL_LED0_BLINK | + E1000_LEDCTL_MODE_LED_ON) << i; + } + } + } + + ew32(LEDCTL, ledctl_blink); + + return 0; +} + +/** + * e1000e_led_on_generic - Turn LED on + * @hw: pointer to the HW structure + * + * Turn LED on. + **/ +s32 e1000e_led_on_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode2); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_led_off_generic - Turn LED off + * @hw: pointer to the HW structure + * + * Turn LED off. + **/ +s32 e1000e_led_off_generic(struct e1000_hw *hw) +{ + u32 ctrl; + + switch (hw->phy.media_type) { + case e1000_media_type_fiber: + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_SWDPIN0; + ctrl |= E1000_CTRL_SWDPIO0; + ew32(CTRL, ctrl); + break; + case e1000_media_type_copper: + ew32(LEDCTL, hw->mac.ledctl_mode1); + break; + default: + break; + } + + return 0; +} + +/** + * e1000e_set_pcie_no_snoop - Set PCI-express capabilities + * @hw: pointer to the HW structure + * @no_snoop: bitmap of snoop events + * + * Set the PCI-express register to snoop for events enabled in 'no_snoop'. + **/ +void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop) +{ + u32 gcr; + + if (no_snoop) { + gcr = er32(GCR); + gcr &= ~(PCIE_NO_SNOOP_ALL); + gcr |= no_snoop; + ew32(GCR, gcr); + } +} + +/** + * e1000e_disable_pcie_master - Disables PCI-express master access + * @hw: pointer to the HW structure + * + * Returns 0 if successful, else returns -10 + * (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused + * the master requests to be disabled. + * + * Disables PCI-Express master access and verifies there are no pending + * requests. + **/ +s32 e1000e_disable_pcie_master(struct e1000_hw *hw) +{ + u32 ctrl; + s32 timeout = MASTER_DISABLE_TIMEOUT; + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_GIO_MASTER_DISABLE; + ew32(CTRL, ctrl); + + while (timeout) { + if (!(er32(STATUS) & E1000_STATUS_GIO_MASTER_ENABLE)) + break; + usleep_range(100, 200); + timeout--; + } + + if (!timeout) { + e_dbg("Master requests are pending.\n"); + return -E1000_ERR_MASTER_REQUESTS_PENDING; + } + + return 0; +} + +/** + * e1000e_reset_adaptive - Reset Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Reset the Adaptive Interframe Spacing throttle to default values. + **/ +void e1000e_reset_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if (!mac->adaptive_ifs) { + e_dbg("Not in Adaptive IFS mode!\n"); + return; + } + + mac->current_ifs_val = 0; + mac->ifs_min_val = IFS_MIN; + mac->ifs_max_val = IFS_MAX; + mac->ifs_step_size = IFS_STEP; + mac->ifs_ratio = IFS_RATIO; + + mac->in_ifs_mode = false; + ew32(AIT, 0); +} + +/** + * e1000e_update_adaptive - Update Adaptive Interframe Spacing + * @hw: pointer to the HW structure + * + * Update the Adaptive Interframe Spacing Throttle value based on the + * time between transmitted packets and time between collisions. + **/ +void e1000e_update_adaptive(struct e1000_hw *hw) +{ + struct e1000_mac_info *mac = &hw->mac; + + if (!mac->adaptive_ifs) { + e_dbg("Not in Adaptive IFS mode!\n"); + return; + } + + if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { + if (mac->tx_packet_delta > MIN_NUM_XMITS) { + mac->in_ifs_mode = true; + if (mac->current_ifs_val < mac->ifs_max_val) { + if (!mac->current_ifs_val) + mac->current_ifs_val = mac->ifs_min_val; + else + mac->current_ifs_val += + mac->ifs_step_size; + ew32(AIT, mac->current_ifs_val); + } + } + } else { + if (mac->in_ifs_mode && + (mac->tx_packet_delta <= MIN_NUM_XMITS)) { + mac->current_ifs_val = 0; + mac->in_ifs_mode = false; + ew32(AIT, 0); + } + } +} diff --git a/addons/e1000e/src/4.4.180/mac.h b/addons/e1000e/src/4.4.180/mac.h new file mode 100644 index 00000000..8284618a --- /dev/null +++ b/addons/e1000e/src/4.4.180/mac.h @@ -0,0 +1,68 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_MAC_H_ +#define _E1000E_MAC_H_ + +s32 e1000e_blink_led_generic(struct e1000_hw *hw); +s32 e1000e_check_for_copper_link(struct e1000_hw *hw); +s32 e1000e_check_for_fiber_link(struct e1000_hw *hw); +s32 e1000e_check_for_serdes_link(struct e1000_hw *hw); +s32 e1000e_cleanup_led_generic(struct e1000_hw *hw); +s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw); +s32 e1000e_disable_pcie_master(struct e1000_hw *hw); +s32 e1000e_force_mac_fc(struct e1000_hw *hw); +s32 e1000e_get_auto_rd_done(struct e1000_hw *hw); +s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw); +void e1000_set_lan_id_single_port(struct e1000_hw *hw); +s32 e1000e_get_hw_semaphore(struct e1000_hw *hw); +s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, + u16 *duplex); +s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, + u16 *speed, u16 *duplex); +s32 e1000e_id_led_init_generic(struct e1000_hw *hw); +s32 e1000e_led_on_generic(struct e1000_hw *hw); +s32 e1000e_led_off_generic(struct e1000_hw *hw); +void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw, + u8 *mc_addr_list, u32 mc_addr_count); +s32 e1000e_set_fc_watermarks(struct e1000_hw *hw); +s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw); +s32 e1000e_setup_led_generic(struct e1000_hw *hw); +s32 e1000e_setup_link_generic(struct e1000_hw *hw); +s32 e1000e_validate_mdi_setting_generic(struct e1000_hw *hw); +s32 e1000e_validate_mdi_setting_crossover_generic(struct e1000_hw *hw); + +void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw); +void e1000_clear_vfta_generic(struct e1000_hw *hw); +void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count); +void e1000e_put_hw_semaphore(struct e1000_hw *hw); +s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw); +void e1000e_reset_adaptive(struct e1000_hw *hw); +void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop); +void e1000e_update_adaptive(struct e1000_hw *hw); +void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value); + +void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw); +u32 e1000e_rar_get_count_generic(struct e1000_hw *hw); +int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index); +void e1000e_config_collision_dist_generic(struct e1000_hw *hw); + +#endif diff --git a/addons/e1000e/src/4.4.180/manage.c b/addons/e1000e/src/4.4.180/manage.c new file mode 100644 index 00000000..cc9b3bef --- /dev/null +++ b/addons/e1000e/src/4.4.180/manage.c @@ -0,0 +1,347 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "e1000.h" + +/** + * e1000_calculate_checksum - Calculate checksum for buffer + * @buffer: pointer to EEPROM + * @length: size of EEPROM to calculate a checksum for + * + * Calculates the checksum for some buffer on a specified length. The + * checksum calculated is returned. + **/ +static u8 e1000_calculate_checksum(u8 *buffer, u32 length) +{ + u32 i; + u8 sum = 0; + + if (!buffer) + return 0; + + for (i = 0; i < length; i++) + sum += buffer[i]; + + return (u8)(0 - sum); +} + +/** + * e1000_mng_enable_host_if - Checks host interface is enabled + * @hw: pointer to the HW structure + * + * Returns 0 upon success, else -E1000_ERR_HOST_INTERFACE_COMMAND + * + * This function checks whether the HOST IF is enabled for command operation + * and also checks whether the previous command is completed. It busy waits + * in case of previous command is not completed. + **/ +static s32 e1000_mng_enable_host_if(struct e1000_hw *hw) +{ + u32 hicr; + u8 i; + + if (!hw->mac.arc_subsystem_valid) { + e_dbg("ARC subsystem not valid.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + /* Check that the host interface is enabled. */ + hicr = er32(HICR); + if (!(hicr & E1000_HICR_EN)) { + e_dbg("E1000_HOST_EN bit disabled.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + /* check the previous command is completed */ + for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) { + hicr = er32(HICR); + if (!(hicr & E1000_HICR_C)) + break; + mdelay(1); + } + + if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { + e_dbg("Previous command timeout failed.\n"); + return -E1000_ERR_HOST_INTERFACE_COMMAND; + } + + return 0; +} + +/** + * e1000e_check_mng_mode_generic - Generic check management mode + * @hw: pointer to the HW structure + * + * Reads the firmware semaphore register and returns true (>0) if + * manageability is enabled, else false (0). + **/ +bool e1000e_check_mng_mode_generic(struct e1000_hw *hw) +{ + u32 fwsm = er32(FWSM); + + return (fwsm & E1000_FWSM_MODE_MASK) == + (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT); +} + +/** + * e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx + * @hw: pointer to the HW structure + * + * Enables packet filtering on transmit packets if manageability is enabled + * and host interface is enabled. + **/ +bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw) +{ + struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie; + u32 *buffer = (u32 *)&hw->mng_cookie; + u32 offset; + s32 ret_val, hdr_csum, csum; + u8 i, len; + + hw->mac.tx_pkt_filtering = true; + + /* No manageability, no filtering */ + if (!hw->mac.ops.check_mng_mode(hw)) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* If we can't read from the host interface for whatever + * reason, disable filtering. + */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) { + hw->mac.tx_pkt_filtering = false; + return hw->mac.tx_pkt_filtering; + } + + /* Read in the header. Length and offset are in dwords. */ + len = E1000_MNG_DHCP_COOKIE_LENGTH >> 2; + offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2; + for (i = 0; i < len; i++) + *(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, + offset + i); + hdr_csum = hdr->checksum; + hdr->checksum = 0; + csum = e1000_calculate_checksum((u8 *)hdr, + E1000_MNG_DHCP_COOKIE_LENGTH); + /* If either the checksums or signature don't match, then + * the cookie area isn't considered valid, in which case we + * take the safe route of assuming Tx filtering is enabled. + */ + if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { + hw->mac.tx_pkt_filtering = true; + return hw->mac.tx_pkt_filtering; + } + + /* Cookie area is valid, make the final check for filtering. */ + if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) + hw->mac.tx_pkt_filtering = false; + + return hw->mac.tx_pkt_filtering; +} + +/** + * e1000_mng_write_cmd_header - Writes manageability command header + * @hw: pointer to the HW structure + * @hdr: pointer to the host interface command header + * + * Writes the command header after does the checksum calculation. + **/ +static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw, + struct e1000_host_mng_command_header *hdr) +{ + u16 i, length = sizeof(struct e1000_host_mng_command_header); + + /* Write the whole command header structure with new checksum. */ + + hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length); + + length >>= 2; + /* Write the relevant command block into the ram area. */ + for (i = 0; i < length; i++) { + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i, *((u32 *)hdr + i)); + e1e_flush(); + } + + return 0; +} + +/** + * e1000_mng_host_if_write - Write to the manageability host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface buffer + * @length: size of the buffer + * @offset: location in the buffer to write to + * @sum: sum of the data (not checksum) + * + * This function writes the buffer content at the offset given on the host if. + * It also does alignment considerations to do the writes in most efficient + * way. Also fills up the sum of the buffer in *buffer parameter. + **/ +static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer, + u16 length, u16 offset, u8 *sum) +{ + u8 *tmp; + u8 *bufptr = buffer; + u32 data = 0; + u16 remaining, i, j, prev_bytes; + + /* sum = only sum of the data and it is not checksum */ + + if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) + return -E1000_ERR_PARAM; + + tmp = (u8 *)&data; + prev_bytes = offset & 0x3; + offset >>= 2; + + if (prev_bytes) { + data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset); + for (j = prev_bytes; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data); + length -= j - prev_bytes; + offset++; + } + + remaining = length & 0x3; + length -= remaining; + + /* Calculate length in DWORDs */ + length >>= 2; + + /* The device driver writes the relevant command block into the + * ram area. + */ + for (i = 0; i < length; i++) { + for (j = 0; j < sizeof(u32); j++) { + *(tmp + j) = *bufptr++; + *sum += *(tmp + j); + } + + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + if (remaining) { + for (j = 0; j < sizeof(u32); j++) { + if (j < remaining) + *(tmp + j) = *bufptr++; + else + *(tmp + j) = 0; + + *sum += *(tmp + j); + } + E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data); + } + + return 0; +} + +/** + * e1000e_mng_write_dhcp_info - Writes DHCP info to host interface + * @hw: pointer to the HW structure + * @buffer: pointer to the host interface + * @length: size of the buffer + * + * Writes the DHCP information to the host interface. + **/ +s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length) +{ + struct e1000_host_mng_command_header hdr; + s32 ret_val; + u32 hicr; + + hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD; + hdr.command_length = length; + hdr.reserved1 = 0; + hdr.reserved2 = 0; + hdr.checksum = 0; + + /* Enable the host interface */ + ret_val = e1000_mng_enable_host_if(hw); + if (ret_val) + return ret_val; + + /* Populate the host interface with the contents of "buffer". */ + ret_val = e1000_mng_host_if_write(hw, buffer, length, + sizeof(hdr), &(hdr.checksum)); + if (ret_val) + return ret_val; + + /* Write the manageability command header */ + ret_val = e1000_mng_write_cmd_header(hw, &hdr); + if (ret_val) + return ret_val; + + /* Tell the ARC a new command is pending. */ + hicr = er32(HICR); + ew32(HICR, hicr | E1000_HICR_C); + + return 0; +} + +/** + * e1000e_enable_mng_pass_thru - Check if management passthrough is needed + * @hw: pointer to the HW structure + * + * Verifies the hardware needs to leave interface enabled so that frames can + * be directed to and from the management interface. + **/ +bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw) +{ + u32 manc; + u32 fwsm, factps; + + manc = er32(MANC); + + if (!(manc & E1000_MANC_RCV_TCO_EN)) + return false; + + if (hw->mac.has_fwsm) { + fwsm = er32(FWSM); + factps = er32(FACTPS); + + if (!(factps & E1000_FACTPS_MNGCG) && + ((fwsm & E1000_FWSM_MODE_MASK) == + (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) + return true; + } else if ((hw->mac.type == e1000_82574) || + (hw->mac.type == e1000_82583)) { + u16 data; + s32 ret_val; + + factps = er32(FACTPS); + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data); + if (ret_val) + return false; + + if (!(factps & E1000_FACTPS_MNGCG) && + ((data & E1000_NVM_INIT_CTRL2_MNGM) == + (e1000_mng_mode_pt << 13))) + return true; + } else if ((manc & E1000_MANC_SMBUS_EN) && + !(manc & E1000_MANC_ASF_EN)) { + return true; + } + + return false; +} diff --git a/addons/e1000e/src/4.4.180/manage.h b/addons/e1000e/src/4.4.180/manage.h new file mode 100644 index 00000000..0b9ea595 --- /dev/null +++ b/addons/e1000e/src/4.4.180/manage.h @@ -0,0 +1,65 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_MANAGE_H_ +#define _E1000E_MANAGE_H_ + +bool e1000e_check_mng_mode_generic(struct e1000_hw *hw); +bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw); +s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length); +bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw); + +enum e1000_mng_mode { + e1000_mng_mode_none = 0, + e1000_mng_mode_asf, + e1000_mng_mode_pt, + e1000_mng_mode_ipmi, + e1000_mng_mode_host_if_only +}; + +#define E1000_FACTPS_MNGCG 0x20000000 + +#define E1000_FWSM_MODE_MASK 0xE +#define E1000_FWSM_MODE_SHIFT 1 + +#define E1000_MNG_IAMT_MODE 0x3 +#define E1000_MNG_DHCP_COOKIE_LENGTH 0x10 +#define E1000_MNG_DHCP_COOKIE_OFFSET 0x6F0 +#define E1000_MNG_DHCP_COMMAND_TIMEOUT 10 +#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64 +#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1 +#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN 0x2 + +#define E1000_VFTA_ENTRY_SHIFT 5 +#define E1000_VFTA_ENTRY_MASK 0x7F +#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F + +#define E1000_HICR_EN 0x01 /* Enable bit - RO */ +/* Driver sets this bit when done to put command in RAM */ +#define E1000_HICR_C 0x02 +#define E1000_HICR_SV 0x04 /* Status Validity */ +#define E1000_HICR_FW_RESET_ENABLE 0x40 +#define E1000_HICR_FW_RESET 0x80 + +/* Intel(R) Active Management Technology signature */ +#define E1000_IAMT_SIGNATURE 0x544D4149 + +#endif diff --git a/addons/e1000e/src/4.4.180/netdev.c b/addons/e1000e/src/4.4.180/netdev.c new file mode 100644 index 00000000..6b1cacd8 --- /dev/null +++ b/addons/e1000e/src/4.4.180/netdev.c @@ -0,0 +1,7554 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "e1000.h" + +#define DRV_EXTRAVERSION "-k" + +#define DRV_VERSION "3.2.6" DRV_EXTRAVERSION +char e1000e_driver_name[] = "e1000e"; +const char e1000e_driver_version[] = DRV_VERSION; + +#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +static int debug = -1; +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static const struct e1000_info *e1000_info_tbl[] = { + [board_82571] = &e1000_82571_info, + [board_82572] = &e1000_82572_info, + [board_82573] = &e1000_82573_info, + [board_82574] = &e1000_82574_info, + [board_82583] = &e1000_82583_info, + [board_80003es2lan] = &e1000_es2_info, + [board_ich8lan] = &e1000_ich8_info, + [board_ich9lan] = &e1000_ich9_info, + [board_ich10lan] = &e1000_ich10_info, + [board_pchlan] = &e1000_pch_info, + [board_pch2lan] = &e1000_pch2_info, + [board_pch_lpt] = &e1000_pch_lpt_info, + [board_pch_spt] = &e1000_pch_spt_info, +}; + +struct e1000_reg_info { + u32 ofs; + char *name; +}; + +static const struct e1000_reg_info e1000_reg_info_tbl[] = { + /* General Registers */ + {E1000_CTRL, "CTRL"}, + {E1000_STATUS, "STATUS"}, + {E1000_CTRL_EXT, "CTRL_EXT"}, + + /* Interrupt Registers */ + {E1000_ICR, "ICR"}, + + /* Rx Registers */ + {E1000_RCTL, "RCTL"}, + {E1000_RDLEN(0), "RDLEN"}, + {E1000_RDH(0), "RDH"}, + {E1000_RDT(0), "RDT"}, + {E1000_RDTR, "RDTR"}, + {E1000_RXDCTL(0), "RXDCTL"}, + {E1000_ERT, "ERT"}, + {E1000_RDBAL(0), "RDBAL"}, + {E1000_RDBAH(0), "RDBAH"}, + {E1000_RDFH, "RDFH"}, + {E1000_RDFT, "RDFT"}, + {E1000_RDFHS, "RDFHS"}, + {E1000_RDFTS, "RDFTS"}, + {E1000_RDFPC, "RDFPC"}, + + /* Tx Registers */ + {E1000_TCTL, "TCTL"}, + {E1000_TDBAL(0), "TDBAL"}, + {E1000_TDBAH(0), "TDBAH"}, + {E1000_TDLEN(0), "TDLEN"}, + {E1000_TDH(0), "TDH"}, + {E1000_TDT(0), "TDT"}, + {E1000_TIDV, "TIDV"}, + {E1000_TXDCTL(0), "TXDCTL"}, + {E1000_TADV, "TADV"}, + {E1000_TARC(0), "TARC"}, + {E1000_TDFH, "TDFH"}, + {E1000_TDFT, "TDFT"}, + {E1000_TDFHS, "TDFHS"}, + {E1000_TDFTS, "TDFTS"}, + {E1000_TDFPC, "TDFPC"}, + + /* List Terminator */ + {0, NULL} +}; + +/** + * __ew32_prepare - prepare to write to MAC CSR register on certain parts + * @hw: pointer to the HW structure + * + * When updating the MAC CSR registers, the Manageability Engine (ME) could + * be accessing the registers at the same time. Normally, this is handled in + * h/w by an arbiter but on some parts there is a bug that acknowledges Host + * accesses later than it should which could result in the register to have + * an incorrect value. Workaround this by checking the FWSM register which + * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set + * and try again a number of times. + **/ +s32 __ew32_prepare(struct e1000_hw *hw) +{ + s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT; + + while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i) + udelay(50); + + return i; +} + +void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val) +{ + if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + __ew32_prepare(hw); + + writel(val, hw->hw_addr + reg); +} + +/** + * e1000_regdump - register printout routine + * @hw: pointer to the HW structure + * @reginfo: pointer to the register info table + **/ +static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) +{ + int n = 0; + char rname[16]; + u32 regs[8]; + + switch (reginfo->ofs) { + case E1000_RXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_RXDCTL(n)); + break; + case E1000_TXDCTL(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TXDCTL(n)); + break; + case E1000_TARC(0): + for (n = 0; n < 2; n++) + regs[n] = __er32(hw, E1000_TARC(n)); + break; + default: + pr_info("%-15s %08x\n", + reginfo->name, __er32(hw, reginfo->ofs)); + return; + } + + snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]"); + pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); +} + +static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, + struct e1000_buffer *bi) +{ + int i; + struct e1000_ps_page *ps_page; + + for (i = 0; i < adapter->rx_ps_pages; i++) { + ps_page = &bi->ps_pages[i]; + + if (ps_page->page) { + pr_info("packet dump for ps_page %d:\n", i); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, page_address(ps_page->page), + PAGE_SIZE, true); + } + } +} + +/** + * e1000e_dump - Print registers, Tx-ring and Rx-ring + * @adapter: board private structure + **/ +static void e1000e_dump(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_reg_info *reginfo; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc; + struct my_u0 { + __le64 a; + __le64 b; + } *u0; + struct e1000_buffer *buffer_info; + struct e1000_ring *rx_ring = adapter->rx_ring; + union e1000_rx_desc_packet_split *rx_desc_ps; + union e1000_rx_desc_extended *rx_desc; + struct my_u1 { + __le64 a; + __le64 b; + __le64 c; + __le64 d; + } *u1; + u32 staterr; + int i = 0; + + if (!netif_msg_hw(adapter)) + return; + + /* Print netdevice Info */ + if (netdev) { + dev_info(&adapter->pdev->dev, "Net device Info\n"); + pr_info("Device Name state trans_start last_rx\n"); + pr_info("%-15s %016lX %016lX %016lX\n", netdev->name, + netdev->state, netdev->trans_start, netdev->last_rx); + } + + /* Print Registers */ + dev_info(&adapter->pdev->dev, "Register Dump\n"); + pr_info(" Register Name Value\n"); + for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl; + reginfo->name; reginfo++) { + e1000_regdump(hw, reginfo); + } + + /* Print Tx Ring Summary */ + if (!netdev || !netif_running(netdev)) + return; + + dev_info(&adapter->pdev->dev, "Tx Ring Summary\n"); + pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n"); + buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean]; + pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n", + 0, tx_ring->next_to_use, tx_ring->next_to_clean, + (unsigned long long)buffer_info->dma, + buffer_info->length, + buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp); + + /* Print Tx Ring */ + if (!netif_msg_tx_done(adapter)) + goto rx_ring_summary; + + dev_info(&adapter->pdev->dev, "Tx Ring Dump\n"); + + /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended) + * + * Legacy Transmit Descriptor + * +--------------------------------------------------------------+ + * 0 | Buffer Address [63:0] (Reserved on Write Back) | + * +--------------------------------------------------------------+ + * 8 | Special | CSS | Status | CMD | CSO | Length | + * +--------------------------------------------------------------+ + * 63 48 47 36 35 32 31 24 23 16 15 0 + * + * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload + * 63 48 47 40 39 32 31 16 15 8 7 0 + * +----------------------------------------------------------------+ + * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS | + * +----------------------------------------------------------------+ + * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + * + * Extended Data Descriptor (DTYP=0x1) + * +----------------------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +----------------------------------------------------------------+ + * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN | + * +----------------------------------------------------------------+ + * 63 48 47 40 39 36 35 32 31 24 23 20 19 0 + */ + pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n"); + pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n"); + pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n"); + for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { + const char *next_desc; + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + u0 = (struct my_u0 *)tx_desc; + if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean) + next_desc = " NTC/U"; + else if (i == tx_ring->next_to_use) + next_desc = " NTU"; + else if (i == tx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n", + (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' : + ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), + i, + (unsigned long long)le64_to_cpu(u0->a), + (unsigned long long)le64_to_cpu(u0->b), + (unsigned long long)buffer_info->dma, + buffer_info->length, buffer_info->next_to_watch, + (unsigned long long)buffer_info->time_stamp, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && buffer_info->skb) + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + buffer_info->skb->len, true); + } + + /* Print Rx Ring Summary */ +rx_ring_summary: + dev_info(&adapter->pdev->dev, "Rx Ring Summary\n"); + pr_info("Queue [NTU] [NTC]\n"); + pr_info(" %5d %5X %5X\n", + 0, rx_ring->next_to_use, rx_ring->next_to_clean); + + /* Print Rx Ring */ + if (!netif_msg_rx_status(adapter)) + return; + + dev_info(&adapter->pdev->dev, "Rx Ring Dump\n"); + switch (adapter->rx_ps_pages) { + case 1: + case 2: + case 3: + /* [Extended] Packet Split Receive Descriptor Format + * + * +-----------------------------------------------------+ + * 0 | Buffer Address 0 [63:0] | + * +-----------------------------------------------------+ + * 8 | Buffer Address 1 [63:0] | + * +-----------------------------------------------------+ + * 16 | Buffer Address 2 [63:0] | + * +-----------------------------------------------------+ + * 24 | Buffer Address 3 [63:0] | + * +-----------------------------------------------------+ + */ + pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n"); + /* [Extended] Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 13 12 8 7 4 3 0 + * +------------------------------------------------------+ + * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS | + * | Checksum | Ident | | Queue | | Type | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n"); + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + buffer_info = &rx_ring->buffer_info[i]; + rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i); + u1 = (struct my_u1 *)rx_desc_ps; + staterr = + le32_to_cpu(rx_desc_ps->wb.middle.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n", + "RWB", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + buffer_info->skb, next_desc); + } else { + pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n", + "R ", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)le64_to_cpu(u1->c), + (unsigned long long)le64_to_cpu(u1->d), + (unsigned long long)buffer_info->dma, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter)) + e1000e_dump_ps_pages(adapter, + buffer_info); + } + } + break; + default: + case 0: + /* Extended Receive Descriptor (Read) Format + * + * +-----------------------------------------------------+ + * 0 | Buffer Address [63:0] | + * +-----------------------------------------------------+ + * 8 | Reserved | + * +-----------------------------------------------------+ + */ + pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n"); + /* Extended Receive Descriptor (Write-Back) Format + * + * 63 48 47 32 31 24 23 4 3 0 + * +------------------------------------------------------+ + * | RSS Hash | | | | + * 0 +-------------------+ Rsvd | Reserved | MRQ RSS | + * | Packet | IP | | | Type | + * | Checksum | Ident | | | | + * +------------------------------------------------------+ + * 8 | VLAN Tag | Length | Extended Error | Extended Status | + * +------------------------------------------------------+ + * 63 48 47 32 31 20 19 0 + */ + pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n"); + + for (i = 0; i < rx_ring->count; i++) { + const char *next_desc; + + buffer_info = &rx_ring->buffer_info[i]; + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + u1 = (struct my_u1 *)rx_desc; + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + + if (i == rx_ring->next_to_use) + next_desc = " NTU"; + else if (i == rx_ring->next_to_clean) + next_desc = " NTC"; + else + next_desc = ""; + + if (staterr & E1000_RXD_STAT_DD) { + /* Descriptor Done */ + pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n", + "RWB", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + buffer_info->skb, next_desc); + } else { + pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n", + "R ", i, + (unsigned long long)le64_to_cpu(u1->a), + (unsigned long long)le64_to_cpu(u1->b), + (unsigned long long)buffer_info->dma, + buffer_info->skb, next_desc); + + if (netif_msg_pktdata(adapter) && + buffer_info->skb) + print_hex_dump(KERN_INFO, "", + DUMP_PREFIX_ADDRESS, 16, + 1, + buffer_info->skb->data, + adapter->rx_buffer_len, + true); + } + } + } +} + +/** + * e1000_desc_unused - calculate if we have unused descriptors + **/ +static int e1000_desc_unused(struct e1000_ring *ring) +{ + if (ring->next_to_clean > ring->next_to_use) + return ring->next_to_clean - ring->next_to_use - 1; + + return ring->count + ring->next_to_clean - ring->next_to_use - 1; +} + +/** + * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp + * @adapter: board private structure + * @hwtstamps: time stamp structure to update + * @systim: unsigned 64bit system time value. + * + * Convert the system time value stored in the RX/TXSTMP registers into a + * hwtstamp which can be used by the upper level time stamping functions. + * + * The 'systim_lock' spinlock is used to protect the consistency of the + * system time value. This is needed because reading the 64 bit time + * value involves reading two 32 bit registers. The first read latches the + * value. + **/ +static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter, + struct skb_shared_hwtstamps *hwtstamps, + u64 systim) +{ + u64 ns; + unsigned long flags; + + spin_lock_irqsave(&adapter->systim_lock, flags); + ns = timecounter_cyc2time(&adapter->tc, systim); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +/** + * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp + * @adapter: board private structure + * @status: descriptor extended error and status field + * @skb: particular skb to include time stamp + * + * If the time stamp is valid, convert it into the timecounter ns value + * and store that result into the shhwtstamps structure which is passed + * up the network stack. + **/ +static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status, + struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u64 rxstmp; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) || + !(status & E1000_RXDEXT_STATERR_TST) || + !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) + return; + + /* The Rx time stamp registers contain the time stamp. No other + * received packet will be time stamped until the Rx time stamp + * registers are read. Because only one packet can be time stamped + * at a time, the register values must belong to this packet and + * therefore none of the other additional attributes need to be + * compared. + */ + rxstmp = (u64)er32(RXSTMPL); + rxstmp |= (u64)er32(RXSTMPH) << 32; + e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp); + + adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP; +} + +/** + * e1000_receive_skb - helper function to handle Rx indications + * @adapter: board private structure + * @staterr: descriptor extended error and status field as written by hardware + * @vlan: descriptor vlan field as written by hardware (no le/be conversion) + * @skb: pointer to sk_buff to be indicated to stack + **/ +static void e1000_receive_skb(struct e1000_adapter *adapter, + struct net_device *netdev, struct sk_buff *skb, + u32 staterr, __le16 vlan) +{ + u16 tag = le16_to_cpu(vlan); + + e1000e_rx_hwtstamp(adapter, staterr, skb); + + skb->protocol = eth_type_trans(skb, netdev); + + if (staterr & E1000_RXD_STAT_VP) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag); + + napi_gro_receive(&adapter->napi, skb); +} + +/** + * e1000_rx_checksum - Receive Checksum Offload + * @adapter: board private structure + * @status_err: receive descriptor status and error fields + * @csum: receive descriptor csum field + * @sk_buff: socket buffer with received data + **/ +static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, + struct sk_buff *skb) +{ + u16 status = (u16)status_err; + u8 errors = (u8)(status_err >> 24); + + skb_checksum_none_assert(skb); + + /* Rx checksum disabled */ + if (!(adapter->netdev->features & NETIF_F_RXCSUM)) + return; + + /* Ignore Checksum bit is set */ + if (status & E1000_RXD_STAT_IXSM) + return; + + /* TCP/UDP checksum error bit or IP checksum error bit is set */ + if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) { + /* let the stack verify checksum errors */ + adapter->hw_csum_err++; + return; + } + + /* TCP/UDP Checksum has not been calculated */ + if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))) + return; + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + adapter->hw_csum_good++; +} + +static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + s32 ret_val = __ew32_prepare(hw); + + writel(i, rx_ring->tail); + + if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) { + u32 rctl = er32(RCTL); + + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e_err("ME firmware caused invalid RDT - resetting\n"); + schedule_work(&adapter->reset_task); + } +} + +static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + s32 ret_val = __ew32_prepare(hw); + + writel(i, tx_ring->tail); + + if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) { + u32 tctl = er32(TCTL); + + ew32(TCTL, tctl & ~E1000_TCTL_EN); + e_err("ME firmware caused invalid TDT - resetting\n"); + schedule_work(&adapter->reset_task); + } +} + +/** + * e1000_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: Rx descriptor ring + **/ +static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring, + int cleaned_count, gfp_t gfp) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_extended *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = adapter->rx_buffer_len; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto map_skb; + } + + skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); + if (!skb) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; +map_skb: + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_err(&pdev->dev, "Rx DMA map failed\n"); + adapter->rx_dma_failed++; + break; + } + + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, i); + else + writel(i, rx_ring->tail); + } + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + rx_ring->next_to_use = i; +} + +/** + * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split + * @rx_ring: Rx descriptor ring + **/ +static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring, + int cleaned_count, gfp_t gfp) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_packet_split *rx_desc; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &buffer_info->ps_pages[j]; + if (j >= adapter->rx_ps_pages) { + /* all unused desc entries get hw null ptr */ + rx_desc->read.buffer_addr[j + 1] = + ~cpu_to_le64(0); + continue; + } + if (!ps_page->page) { + ps_page->page = alloc_page(gfp); + if (!ps_page->page) { + adapter->alloc_rx_buff_failed++; + goto no_buffers; + } + ps_page->dma = dma_map_page(&pdev->dev, + ps_page->page, + 0, PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + ps_page->dma)) { + dev_err(&adapter->pdev->dev, + "Rx DMA page map failed\n"); + adapter->rx_dma_failed++; + goto no_buffers; + } + } + /* Refresh the desc even if buffer_addrs + * didn't change because each write-back + * erases this info. + */ + rx_desc->read.buffer_addr[j + 1] = + cpu_to_le64(ps_page->dma); + } + + skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0, + gfp); + + if (!skb) { + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; + buffer_info->dma = dma_map_single(&pdev->dev, skb->data, + adapter->rx_ps_bsize0, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + dev_err(&pdev->dev, "Rx DMA map failed\n"); + adapter->rx_dma_failed++; + /* cleanup skb */ + dev_kfree_skb_any(skb); + buffer_info->skb = NULL; + break; + } + + rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma); + + if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) { + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, i << 1); + else + writel(i << 1, rx_ring->tail); + } + + i++; + if (i == rx_ring->count) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + +no_buffers: + rx_ring->next_to_use = i; +} + +/** + * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers + * @rx_ring: Rx descriptor ring + * @cleaned_count: number of buffers to allocate this pass + **/ + +static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring, + int cleaned_count, gfp_t gfp) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_extended *rx_desc; + struct e1000_buffer *buffer_info; + struct sk_buff *skb; + unsigned int i; + unsigned int bufsz = 256 - 16; /* for skb_reserve */ + + i = rx_ring->next_to_use; + buffer_info = &rx_ring->buffer_info[i]; + + while (cleaned_count--) { + skb = buffer_info->skb; + if (skb) { + skb_trim(skb, 0); + goto check_page; + } + + skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp); + if (unlikely(!skb)) { + /* Better luck next round */ + adapter->alloc_rx_buff_failed++; + break; + } + + buffer_info->skb = skb; +check_page: + /* allocate a new page if necessary */ + if (!buffer_info->page) { + buffer_info->page = alloc_page(gfp); + if (unlikely(!buffer_info->page)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + if (!buffer_info->dma) { + buffer_info->dma = dma_map_page(&pdev->dev, + buffer_info->page, 0, + PAGE_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + adapter->alloc_rx_buff_failed++; + break; + } + } + + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); + + if (unlikely(++i == rx_ring->count)) + i = 0; + buffer_info = &rx_ring->buffer_info[i]; + } + + if (likely(rx_ring->next_to_use != i)) { + rx_ring->next_to_use = i; + if (unlikely(i-- == 0)) + i = (rx_ring->count - 1); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, i); + else + writel(i, rx_ring->tail); + } +} + +static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss, + struct sk_buff *skb) +{ + if (netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3); +} + +/** + * e1000_clean_rx_irq - Send received data up the network stack + * @rx_ring: Rx descriptor ring + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, + int work_to_do) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_hw *hw = &adapter->hw; + union e1000_rx_desc_extended *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length, staterr; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + skb = buffer_info->skb; + buffer_info->skb = NULL; + + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->wb.upper.length); + + /* !EOP means multiple descriptors were used to store a single + * packet, if that's the case we need to toss it. In fact, we + * need to toss every packet with the EOP bit clear and the + * next frame that _does_ have the EOP bit set, as it is by + * definition only a frame fragment + */ + if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) + adapter->flags2 |= FLAG2_IS_DISCARDING; + + if (adapter->flags2 & FLAG2_IS_DISCARDING) { + /* All receives must fit into a single buffer */ + e_dbg("Receive packet consumed multiple buffers\n"); + /* recycle */ + buffer_info->skb = skb; + if (staterr & E1000_RXD_STAT_EOP) + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + goto next_desc; + } + + if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + /* recycle */ + buffer_info->skb = skb; + goto next_desc; + } + + /* adjust length to remove Ethernet CRC */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { + /* If configured to store CRC, don't subtract FCS, + * but keep the FCS bytes out of the total_rx_bytes + * counter + */ + if (netdev->features & NETIF_F_RXFCS) + total_rx_bytes -= 4; + else + length -= 4; + } + + total_rx_bytes += length; + total_rx_packets++; + + /* code added for copybreak, this should improve + * performance for small packets with large amounts + * of reassembly being done in the stack + */ + if (length < copybreak) { + struct sk_buff *new_skb = + napi_alloc_skb(&adapter->napi, length); + if (new_skb) { + skb_copy_to_linear_data_offset(new_skb, + -NET_IP_ALIGN, + (skb->data - + NET_IP_ALIGN), + (length + + NET_IP_ALIGN)); + /* save the skb in buffer_info as good */ + buffer_info->skb = skb; + skb = new_skb; + } + /* else just continue with the old one */ + } + /* end copybreak code */ + skb_put(skb, length); + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, staterr, skb); + + e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); + + e1000_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.upper.vlan); + +next_desc: + rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(rx_ring, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +static void e1000_put_txbuf(struct e1000_ring *tx_ring, + struct e1000_buffer *buffer_info) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + + if (buffer_info->dma) { + if (buffer_info->mapped_as_page) + dma_unmap_page(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + else + dma_unmap_single(&adapter->pdev->dev, buffer_info->dma, + buffer_info->length, DMA_TO_DEVICE); + buffer_info->dma = 0; + } + if (buffer_info->skb) { + dev_kfree_skb_any(buffer_info->skb); + buffer_info->skb = NULL; + } + buffer_info->time_stamp = 0; +} + +static void e1000_print_hw_hang(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + print_hang_task); + struct net_device *netdev = adapter->netdev; + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int i = tx_ring->next_to_clean; + unsigned int eop = tx_ring->buffer_info[i].next_to_watch; + struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop); + struct e1000_hw *hw = &adapter->hw; + u16 phy_status, phy_1000t_status, phy_ext_status; + u16 pci_status; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) { + /* May be block on write-back, flush and detect again + * flush pending descriptor writebacks to memory + */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + /* execute the writes immediately */ + e1e_flush(); + /* Due to rare timing issues, write to TIDV again to ensure + * the write is successful + */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + /* execute the writes immediately */ + e1e_flush(); + adapter->tx_hang_recheck = true; + return; + } + adapter->tx_hang_recheck = false; + + if (er32(TDH(0)) == er32(TDT(0))) { + e_dbg("false hang detected, ignoring\n"); + return; + } + + /* Real hang detected */ + netif_stop_queue(netdev); + + e1e_rphy(hw, MII_BMSR, &phy_status); + e1e_rphy(hw, MII_STAT1000, &phy_1000t_status); + e1e_rphy(hw, MII_ESTATUS, &phy_ext_status); + + pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status); + + /* detected Hardware unit hang */ + e_err("Detected Hardware Unit Hang:\n" + " TDH <%x>\n" + " TDT <%x>\n" + " next_to_use <%x>\n" + " next_to_clean <%x>\n" + "buffer_info[next_to_clean]:\n" + " time_stamp <%lx>\n" + " next_to_watch <%x>\n" + " jiffies <%lx>\n" + " next_to_watch.status <%x>\n" + "MAC Status <%x>\n" + "PHY Status <%x>\n" + "PHY 1000BASE-T Status <%x>\n" + "PHY Extended Status <%x>\n" + "PCI Status <%x>\n", + readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use, + tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp, + eop, jiffies, eop_desc->upper.fields.status, er32(STATUS), + phy_status, phy_1000t_status, phy_ext_status, pci_status); + + e1000e_dump(adapter); + + /* Suggest workaround for known h/w issue */ + if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE)) + e_err("Try turning off Tx pause (flow control) via ethtool\n"); +} + +/** + * e1000e_tx_hwtstamp_work - check for Tx time stamp + * @work: pointer to work struct + * + * This work function polls the TSYNCTXCTL valid bit to determine when a + * timestamp has been taken for the current stored skb. The timestamp must + * be for this skb because only one such packet is allowed in the queue. + */ +static void e1000e_tx_hwtstamp_work(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, + tx_hwtstamp_work); + struct e1000_hw *hw = &adapter->hw; + + if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) { + struct sk_buff *skb = adapter->tx_hwtstamp_skb; + struct skb_shared_hwtstamps shhwtstamps; + u64 txstmp; + + txstmp = er32(TXSTMPL); + txstmp |= (u64)er32(TXSTMPH) << 32; + + e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp); + + /* Clear the global tx_hwtstamp_skb pointer and force writes + * prior to notifying the stack of a Tx timestamp. + */ + adapter->tx_hwtstamp_skb = NULL; + wmb(); /* force write prior to skb_tstamp_tx */ + + skb_tstamp_tx(skb, &shhwtstamps); + dev_kfree_skb_any(skb); + } else if (time_after(jiffies, adapter->tx_hwtstamp_start + + adapter->tx_timeout_factor * HZ)) { + dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + adapter->tx_hwtstamp_skb = NULL; + adapter->tx_hwtstamp_timeouts++; + e_warn("clearing Tx timestamp hang\n"); + } else { + /* reschedule to check later */ + schedule_work(&adapter->tx_hwtstamp_work); + } +} + +/** + * e1000_clean_tx_irq - Reclaim resources after transmit completes + * @tx_ring: Tx descriptor ring + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct e1000_tx_desc *tx_desc, *eop_desc; + struct e1000_buffer *buffer_info; + unsigned int i, eop; + unsigned int count = 0; + unsigned int total_tx_bytes = 0, total_tx_packets = 0; + unsigned int bytes_compl = 0, pkts_compl = 0; + + i = tx_ring->next_to_clean; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + + while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) && + (count < tx_ring->count)) { + bool cleaned = false; + + dma_rmb(); /* read buffer_info after eop_desc */ + for (; !cleaned; count++) { + tx_desc = E1000_TX_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + cleaned = (i == eop); + + if (cleaned) { + total_tx_packets += buffer_info->segs; + total_tx_bytes += buffer_info->bytecount; + if (buffer_info->skb) { + bytes_compl += buffer_info->skb->len; + pkts_compl++; + } + } + + e1000_put_txbuf(tx_ring, buffer_info); + tx_desc->upper.data = 0; + + i++; + if (i == tx_ring->count) + i = 0; + } + + if (i == tx_ring->next_to_use) + break; + eop = tx_ring->buffer_info[i].next_to_watch; + eop_desc = E1000_TX_DESC(*tx_ring, eop); + } + + tx_ring->next_to_clean = i; + + netdev_completed_queue(netdev, pkts_compl, bytes_compl); + +#define TX_WAKE_THRESHOLD 32 + if (count && netif_carrier_ok(netdev) && + e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); + + if (netif_queue_stopped(netdev) && + !(test_bit(__E1000_DOWN, &adapter->state))) { + netif_wake_queue(netdev); + ++adapter->restart_queue; + } + } + + if (adapter->detect_tx_hung) { + /* Detect a transmit hang in hardware, this serializes the + * check with the clearing of time_stamp and movement of i + */ + adapter->detect_tx_hung = false; + if (tx_ring->buffer_info[i].time_stamp && + time_after(jiffies, tx_ring->buffer_info[i].time_stamp + + (adapter->tx_timeout_factor * HZ)) && + !(er32(STATUS) & E1000_STATUS_TXOFF)) + schedule_work(&adapter->print_hang_task); + else + adapter->tx_hang_recheck = false; + } + adapter->total_tx_bytes += total_tx_bytes; + adapter->total_tx_packets += total_tx_packets; + return count < tx_ring->count; +} + +/** + * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split + * @rx_ring: Rx descriptor ring + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done, + int work_to_do) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_hw *hw = &adapter->hw; + union e1000_rx_desc_packet_split *rx_desc, *next_rxd; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info, *next_buffer; + struct e1000_ps_page *ps_page; + struct sk_buff *skb; + unsigned int i, j; + u32 length, staterr; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_PS(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + if (*work_done >= work_to_do) + break; + (*work_done)++; + skb = buffer_info->skb; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + /* in the packet split case this is header only */ + prefetch(skb->data - NET_IP_ALIGN); + + i++; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_PS(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_bsize0, DMA_FROM_DEVICE); + buffer_info->dma = 0; + + /* see !EOP comment in other Rx routine */ + if (!(staterr & E1000_RXD_STAT_EOP)) + adapter->flags2 |= FLAG2_IS_DISCARDING; + + if (adapter->flags2 & FLAG2_IS_DISCARDING) { + e_dbg("Packet Split buffers didn't pick up the full packet\n"); + dev_kfree_skb_irq(skb); + if (staterr & E1000_RXD_STAT_EOP) + adapter->flags2 &= ~FLAG2_IS_DISCARDING; + goto next_desc; + } + + if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL))) { + dev_kfree_skb_irq(skb); + goto next_desc; + } + + length = le16_to_cpu(rx_desc->wb.middle.length0); + + if (!length) { + e_dbg("Last part of the packet spanning multiple descriptors\n"); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + /* Good Receive */ + skb_put(skb, length); + + { + /* this looks ugly, but it seems compiler issues make + * it more efficient than reusing j + */ + int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]); + + /* page alloc/put takes too long and effects small + * packet throughput, so unsplit small packets and + * save the alloc/put only valid in softirq (napi) + * context to call kmap_* + */ + if (l1 && (l1 <= copybreak) && + ((length + l1) <= adapter->rx_ps_bsize0)) { + u8 *vaddr; + + ps_page = &buffer_info->ps_pages[0]; + + /* there is no documentation about how to call + * kmap_atomic, so we can't hold the mapping + * very long + */ + dma_sync_single_for_cpu(&pdev->dev, + ps_page->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + vaddr = kmap_atomic(ps_page->page); + memcpy(skb_tail_pointer(skb), vaddr, l1); + kunmap_atomic(vaddr); + dma_sync_single_for_device(&pdev->dev, + ps_page->dma, + PAGE_SIZE, + DMA_FROM_DEVICE); + + /* remove the CRC */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { + if (!(netdev->features & NETIF_F_RXFCS)) + l1 -= 4; + } + + skb_put(skb, l1); + goto copydone; + } /* if */ + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + length = le16_to_cpu(rx_desc->wb.upper.length[j]); + if (!length) + break; + + ps_page = &buffer_info->ps_pages[j]; + dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + ps_page->dma = 0; + skb_fill_page_desc(skb, j, ps_page->page, 0, length); + ps_page->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; + } + + /* strip the ethernet crc, problem is we're using pages now so + * this whole operation can get a little cpu intensive + */ + if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) { + if (!(netdev->features & NETIF_F_RXFCS)) + pskb_trim(skb, skb->len - 4); + } + +copydone: + total_rx_bytes += skb->len; + total_rx_packets++; + + e1000_rx_checksum(adapter, staterr, skb); + + e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); + + if (rx_desc->wb.upper.header_status & + cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)) + adapter->rx_hdr_split++; + + e1000_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.middle.vlan); + +next_desc: + rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF); + buffer_info->skb = NULL; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= E1000_RX_BUFFER_WRITE) { + adapter->alloc_rx_buf(rx_ring, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.middle.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_consume_page - helper function + **/ +static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb, + u16 length) +{ + bi->page = NULL; + skb->len += length; + skb->data_len += length; + skb->truesize += PAGE_SIZE; +} + +/** + * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy + * @adapter: board private structure + * + * the return value indicates whether actual cleaning was done, there + * is no guarantee that everything was cleaned + **/ +static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, + int work_to_do) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct net_device *netdev = adapter->netdev; + struct pci_dev *pdev = adapter->pdev; + union e1000_rx_desc_extended *rx_desc, *next_rxd; + struct e1000_buffer *buffer_info, *next_buffer; + u32 length, staterr; + unsigned int i; + int cleaned_count = 0; + bool cleaned = false; + unsigned int total_rx_bytes = 0, total_rx_packets = 0; + struct skb_shared_info *shinfo; + + i = rx_ring->next_to_clean; + rx_desc = E1000_RX_DESC_EXT(*rx_ring, i); + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + buffer_info = &rx_ring->buffer_info[i]; + + while (staterr & E1000_RXD_STAT_DD) { + struct sk_buff *skb; + + if (*work_done >= work_to_do) + break; + (*work_done)++; + dma_rmb(); /* read descriptor and rx_buffer_info after status DD */ + + skb = buffer_info->skb; + buffer_info->skb = NULL; + + ++i; + if (i == rx_ring->count) + i = 0; + next_rxd = E1000_RX_DESC_EXT(*rx_ring, i); + prefetch(next_rxd); + + next_buffer = &rx_ring->buffer_info[i]; + + cleaned = true; + cleaned_count++; + dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + + length = le16_to_cpu(rx_desc->wb.upper.length); + + /* errors is only valid for DD + EOP descriptors */ + if (unlikely((staterr & E1000_RXD_STAT_EOP) && + ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) && + !(netdev->features & NETIF_F_RXALL)))) { + /* recycle both page and skb */ + buffer_info->skb = skb; + /* an error means any chain goes out the window too */ + if (rx_ring->rx_skb_top) + dev_kfree_skb_irq(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + goto next_desc; + } +#define rxtop (rx_ring->rx_skb_top) + if (!(staterr & E1000_RXD_STAT_EOP)) { + /* this descriptor is only the beginning (or middle) */ + if (!rxtop) { + /* this is the beginning of a chain */ + rxtop = skb; + skb_fill_page_desc(rxtop, 0, buffer_info->page, + 0, length); + } else { + /* this is the middle of a chain */ + shinfo = skb_shinfo(rxtop); + skb_fill_page_desc(rxtop, shinfo->nr_frags, + buffer_info->page, 0, + length); + /* re-use the skb, only consumed the page */ + buffer_info->skb = skb; + } + e1000_consume_page(buffer_info, rxtop, length); + goto next_desc; + } else { + if (rxtop) { + /* end of the chain */ + shinfo = skb_shinfo(rxtop); + skb_fill_page_desc(rxtop, shinfo->nr_frags, + buffer_info->page, 0, + length); + /* re-use the current skb, we only consumed the + * page + */ + buffer_info->skb = skb; + skb = rxtop; + rxtop = NULL; + e1000_consume_page(buffer_info, skb, length); + } else { + /* no chain, got EOP, this buf is the packet + * copybreak to save the put_page/alloc_page + */ + if (length <= copybreak && + skb_tailroom(skb) >= length) { + u8 *vaddr; + vaddr = kmap_atomic(buffer_info->page); + memcpy(skb_tail_pointer(skb), vaddr, + length); + kunmap_atomic(vaddr); + /* re-use the page, so don't erase + * buffer_info->page + */ + skb_put(skb, length); + } else { + skb_fill_page_desc(skb, 0, + buffer_info->page, 0, + length); + e1000_consume_page(buffer_info, skb, + length); + } + } + } + + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, staterr, skb); + + e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); + + /* probably a little skewed due to removing CRC */ + total_rx_bytes += skb->len; + total_rx_packets++; + + /* eth type trans needs skb->data to point to something */ + if (!pskb_may_pull(skb, ETH_HLEN)) { + e_err("pskb_may_pull failed.\n"); + dev_kfree_skb_irq(skb); + goto next_desc; + } + + e1000_receive_skb(adapter, netdev, skb, staterr, + rx_desc->wb.upper.vlan); + +next_desc: + rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF); + + /* return some buffers to hardware, one at a time is too slow */ + if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) { + adapter->alloc_rx_buf(rx_ring, cleaned_count, + GFP_ATOMIC); + cleaned_count = 0; + } + + /* use prefetched values */ + rx_desc = next_rxd; + buffer_info = next_buffer; + + staterr = le32_to_cpu(rx_desc->wb.upper.status_error); + } + rx_ring->next_to_clean = i; + + cleaned_count = e1000_desc_unused(rx_ring); + if (cleaned_count) + adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC); + + adapter->total_rx_bytes += total_rx_bytes; + adapter->total_rx_packets += total_rx_packets; + return cleaned; +} + +/** + * e1000_clean_rx_ring - Free Rx Buffers per Queue + * @rx_ring: Rx descriptor ring + **/ +static void e1000_clean_rx_ring(struct e1000_ring *rx_ring) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_buffer *buffer_info; + struct e1000_ps_page *ps_page; + struct pci_dev *pdev = adapter->pdev; + unsigned int i, j; + + /* Free all the Rx ring sk_buffs */ + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + if (buffer_info->dma) { + if (adapter->clean_rx == e1000_clean_rx_irq) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_buffer_len, + DMA_FROM_DEVICE); + else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq) + dma_unmap_page(&pdev->dev, buffer_info->dma, + PAGE_SIZE, DMA_FROM_DEVICE); + else if (adapter->clean_rx == e1000_clean_rx_irq_ps) + dma_unmap_single(&pdev->dev, buffer_info->dma, + adapter->rx_ps_bsize0, + DMA_FROM_DEVICE); + buffer_info->dma = 0; + } + + if (buffer_info->page) { + put_page(buffer_info->page); + buffer_info->page = NULL; + } + + if (buffer_info->skb) { + dev_kfree_skb(buffer_info->skb); + buffer_info->skb = NULL; + } + + for (j = 0; j < PS_PAGE_BUFFERS; j++) { + ps_page = &buffer_info->ps_pages[j]; + if (!ps_page->page) + break; + dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE, + DMA_FROM_DEVICE); + ps_page->dma = 0; + put_page(ps_page->page); + ps_page->page = NULL; + } + } + + /* there also may be some cached data from a chained receive */ + if (rx_ring->rx_skb_top) { + dev_kfree_skb(rx_ring->rx_skb_top); + rx_ring->rx_skb_top = NULL; + } + + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + adapter->flags2 &= ~FLAG2_IS_DISCARDING; +} + +static void e1000e_downshift_workaround(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + downshift_task); + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); +} + +/** + * e1000_intr_msi - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + /* read ICR disables interrupts using IAM */ + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = true; + /* ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers + */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + schedule_work(&adapter->downshift_task); + + /* 80003ES2LAN workaround-- For packet buffer work-around on + * link down event; disable receives here in the ISR and reset + * adapter in watchdog + */ + if (netif_carrier_ok(netdev) && + adapter->flags & FLAG_RX_NEEDS_RESTART) { + /* disable receives */ + u32 rctl = er32(RCTL); + + ew32(RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= FLAG_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + /* Reset on uncorrectable ECC error */ + if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt))) { + u32 pbeccsts = er32(PBECCSTS); + + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); + + /* return immediately since reset is imminent */ + return IRQ_HANDLED; + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + + return IRQ_HANDLED; +} + +/** + * e1000_intr - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 rctl, icr = er32(ICR); + + if (!icr || test_bit(__E1000_DOWN, &adapter->state)) + return IRQ_NONE; /* Not our interrupt */ + + /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is + * not set, then the adapter didn't send an interrupt + */ + if (!(icr & E1000_ICR_INT_ASSERTED)) + return IRQ_NONE; + + /* Interrupt Auto-Mask...upon reading ICR, + * interrupts are masked. No need for the + * IMC write + */ + + if (icr & E1000_ICR_LSC) { + hw->mac.get_link_status = true; + /* ICH8 workaround-- Call gig speed drop workaround on cable + * disconnect (LSC) before accessing any PHY registers + */ + if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) && + (!(er32(STATUS) & E1000_STATUS_LU))) + schedule_work(&adapter->downshift_task); + + /* 80003ES2LAN workaround-- + * For packet buffer work-around on link down event; + * disable receives here in the ISR and + * reset adapter in watchdog + */ + if (netif_carrier_ok(netdev) && + (adapter->flags & FLAG_RX_NEEDS_RESTART)) { + /* disable receives */ + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + adapter->flags |= FLAG_RESTART_NOW; + } + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + + /* Reset on uncorrectable ECC error */ + if ((icr & E1000_ICR_ECCER) && ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt))) { + u32 pbeccsts = er32(PBECCSTS); + + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + + /* Do the reset outside of interrupt context */ + schedule_work(&adapter->reset_task); + + /* return immediately since reset is imminent */ + return IRQ_HANDLED; + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_msix_other(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + if (!(icr & E1000_ICR_INT_ASSERTED)) { + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_OTHER); + return IRQ_NONE; + } + + if (icr & adapter->eiac_mask) + ew32(ICS, (icr & adapter->eiac_mask)); + + if (icr & E1000_ICR_OTHER) { + if (!(icr & E1000_ICR_LSC)) + goto no_link_interrupt; + hw->mac.get_link_status = true; + /* guard against interrupt when we're going down */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, jiffies + 1); + } + +no_link_interrupt: + if (!test_bit(__E1000_DOWN, &adapter->state)) + ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER); + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + + adapter->total_tx_bytes = 0; + adapter->total_tx_packets = 0; + + if (!e1000_clean_tx_irq(tx_ring)) + /* Ring was not completely cleaned, so fire another interrupt */ + ew32(ICS, tx_ring->ims_val); + + return IRQ_HANDLED; +} + +static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *rx_ring = adapter->rx_ring; + + /* Write the ITR value calculated at the end of the + * previous interrupt. + */ + if (rx_ring->set_itr) { + u32 itr = rx_ring->itr_val ? + 1000000000 / (rx_ring->itr_val * 256) : 0; + + writel(itr, rx_ring->itr_register); + rx_ring->set_itr = 0; + } + + if (napi_schedule_prep(&adapter->napi)) { + adapter->total_rx_bytes = 0; + adapter->total_rx_packets = 0; + __napi_schedule(&adapter->napi); + } + return IRQ_HANDLED; +} + +/** + * e1000_configure_msix - Configure MSI-X hardware + * + * e1000_configure_msix sets up the hardware to properly + * generate MSI-X interrupts. + **/ +static void e1000_configure_msix(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + struct e1000_ring *tx_ring = adapter->tx_ring; + int vector = 0; + u32 ctrl_ext, ivar = 0; + + adapter->eiac_mask = 0; + + /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */ + if (hw->mac.type == e1000_82574) { + u32 rfctl = er32(RFCTL); + + rfctl |= E1000_RFCTL_ACK_DIS; + ew32(RFCTL, rfctl); + } + + /* Configure Rx vector */ + rx_ring->ims_val = E1000_IMS_RXQ0; + adapter->eiac_mask |= rx_ring->ims_val; + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + rx_ring->itr_register); + else + writel(1, rx_ring->itr_register); + ivar = E1000_IVAR_INT_ALLOC_VALID | vector; + + /* Configure Tx vector */ + tx_ring->ims_val = E1000_IMS_TXQ0; + vector++; + if (tx_ring->itr_val) + writel(1000000000 / (tx_ring->itr_val * 256), + tx_ring->itr_register); + else + writel(1, tx_ring->itr_register); + adapter->eiac_mask |= tx_ring->ims_val; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8); + + /* set vector for Other Causes, e.g. link changes */ + vector++; + ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16); + if (rx_ring->itr_val) + writel(1000000000 / (rx_ring->itr_val * 256), + hw->hw_addr + E1000_EITR_82574(vector)); + else + writel(1, hw->hw_addr + E1000_EITR_82574(vector)); + + /* Cause Tx interrupts on every write back */ + ivar |= (1 << 31); + + ew32(IVAR, ivar); + + /* enable MSI-X PBA support */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_PBA_CLR; + + /* Auto-Mask Other interrupts upon ICR read */ + ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER); + ctrl_ext |= E1000_CTRL_EXT_EIAME; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} + +void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter) +{ + if (adapter->msix_entries) { + pci_disable_msix(adapter->pdev); + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; + } else if (adapter->flags & FLAG_MSI_ENABLED) { + pci_disable_msi(adapter->pdev); + adapter->flags &= ~FLAG_MSI_ENABLED; + } +} + +/** + * e1000e_set_interrupt_capability - set MSI or MSI-X if supported + * + * Attempt to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +void e1000e_set_interrupt_capability(struct e1000_adapter *adapter) +{ + int err; + int i; + + switch (adapter->int_mode) { + case E1000E_INT_MODE_MSIX: + if (adapter->flags & FLAG_HAS_MSIX) { + adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */ + adapter->msix_entries = kcalloc(adapter->num_vectors, + sizeof(struct + msix_entry), + GFP_KERNEL); + if (adapter->msix_entries) { + struct e1000_adapter *a = adapter; + + for (i = 0; i < adapter->num_vectors; i++) + adapter->msix_entries[i].entry = i; + + err = pci_enable_msix_range(a->pdev, + a->msix_entries, + a->num_vectors, + a->num_vectors); + if (err > 0) + return; + } + /* MSI-X failed, so fall through and try MSI */ + e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n"); + e1000e_reset_interrupt_capability(adapter); + } + adapter->int_mode = E1000E_INT_MODE_MSI; + /* Fall through */ + case E1000E_INT_MODE_MSI: + if (!pci_enable_msi(adapter->pdev)) { + adapter->flags |= FLAG_MSI_ENABLED; + } else { + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n"); + } + /* Fall through */ + case E1000E_INT_MODE_LEGACY: + /* Don't do anything; this is the system default */ + break; + } + + /* store the number of vectors being used */ + adapter->num_vectors = 1; +} + +/** + * e1000_request_msix - Initialize MSI-X interrupts + * + * e1000_request_msix allocates MSI-X vectors and requests interrupts from the + * kernel. + **/ +static int e1000_request_msix(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err = 0, vector = 0; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + snprintf(adapter->rx_ring->name, + sizeof(adapter->rx_ring->name) - 1, + "%.14s-rx-0", netdev->name); + else + memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[vector].vector, + e1000_intr_msix_rx, 0, adapter->rx_ring->name, + netdev); + if (err) + return err; + adapter->rx_ring->itr_register = adapter->hw.hw_addr + + E1000_EITR_82574(vector); + adapter->rx_ring->itr_val = adapter->itr; + vector++; + + if (strlen(netdev->name) < (IFNAMSIZ - 5)) + snprintf(adapter->tx_ring->name, + sizeof(adapter->tx_ring->name) - 1, + "%.14s-tx-0", netdev->name); + else + memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ); + err = request_irq(adapter->msix_entries[vector].vector, + e1000_intr_msix_tx, 0, adapter->tx_ring->name, + netdev); + if (err) + return err; + adapter->tx_ring->itr_register = adapter->hw.hw_addr + + E1000_EITR_82574(vector); + adapter->tx_ring->itr_val = adapter->itr; + vector++; + + err = request_irq(adapter->msix_entries[vector].vector, + e1000_msix_other, 0, netdev->name, netdev); + if (err) + return err; + + e1000_configure_msix(adapter); + + return 0; +} + +/** + * e1000_request_irq - initialize interrupts + * + * Attempts to configure interrupts using the best available + * capabilities of the hardware and kernel. + **/ +static int e1000_request_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + int err; + + if (adapter->msix_entries) { + err = e1000_request_msix(adapter); + if (!err) + return err; + /* fall back to MSI */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_MSI; + e1000e_set_interrupt_capability(adapter); + } + if (adapter->flags & FLAG_MSI_ENABLED) { + err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0, + netdev->name, netdev); + if (!err) + return err; + + /* fall back to legacy interrupt */ + e1000e_reset_interrupt_capability(adapter); + adapter->int_mode = E1000E_INT_MODE_LEGACY; + } + + err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED, + netdev->name, netdev); + if (err) + e_err("Unable to allocate interrupt, Error: %d\n", err); + + return err; +} + +static void e1000_free_irq(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + if (adapter->msix_entries) { + int vector = 0; + + free_irq(adapter->msix_entries[vector].vector, netdev); + vector++; + + free_irq(adapter->msix_entries[vector].vector, netdev); + vector++; + + /* Other Causes interrupt vector */ + free_irq(adapter->msix_entries[vector].vector, netdev); + return; + } + + free_irq(adapter->pdev->irq, netdev); +} + +/** + * e1000_irq_disable - Mask off interrupt generation on the NIC + **/ +static void e1000_irq_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + ew32(IMC, ~0); + if (adapter->msix_entries) + ew32(EIAC_82574, 0); + e1e_flush(); + + if (adapter->msix_entries) { + int i; + + for (i = 0; i < adapter->num_vectors; i++) + synchronize_irq(adapter->msix_entries[i].vector); + } else { + synchronize_irq(adapter->pdev->irq); + } +} + +/** + * e1000_irq_enable - Enable default interrupt generation settings + **/ +static void e1000_irq_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (adapter->msix_entries) { + ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574); + ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC); + } else if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { + ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER); + } else { + ew32(IMS, IMS_ENABLE_MASK); + } + e1e_flush(); +} + +/** + * e1000e_get_hw_control - get control of the h/w from f/w + * @adapter: address of board private structure + * + * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that + * the driver is loaded. For AMT version (only with 82573) + * of the f/w this means that the network i/f is open. + **/ +void e1000e_get_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware know the driver has taken over */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * e1000e_release_hw_control - release control of the h/w to f/w + * @adapter: address of board private structure + * + * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. + * For ASF and Pass Through versions of f/w this means that the + * driver is no longer loaded. For AMT version (only with 82573) i + * of the f/w this means that the network i/f is closed. + * + **/ +void e1000e_release_hw_control(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl_ext; + u32 swsm; + + /* Let firmware taken over control of h/w */ + if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) { + swsm = er32(SWSM); + ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD); + } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) { + ctrl_ext = er32(CTRL_EXT); + ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); + } +} + +/** + * e1000_alloc_ring_dma - allocate memory for a ring structure + **/ +static int e1000_alloc_ring_dma(struct e1000_adapter *adapter, + struct e1000_ring *ring) +{ + struct pci_dev *pdev = adapter->pdev; + + ring->desc = dma_zalloc_coherent(&pdev->dev, ring->size, &ring->dma, + GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + return 0; +} + +/** + * e1000e_setup_tx_resources - allocate Tx resources (Descriptors) + * @tx_ring: Tx descriptor ring + * + * Return 0 on success, negative on failure + **/ +int e1000e_setup_tx_resources(struct e1000_ring *tx_ring) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + int err = -ENOMEM, size; + + size = sizeof(struct e1000_buffer) * tx_ring->count; + tx_ring->buffer_info = vzalloc(size); + if (!tx_ring->buffer_info) + goto err; + + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, tx_ring); + if (err) + goto err; + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; + + return 0; +err: + vfree(tx_ring->buffer_info); + e_err("Unable to allocate memory for the transmit descriptor ring\n"); + return err; +} + +/** + * e1000e_setup_rx_resources - allocate Rx resources (Descriptors) + * @rx_ring: Rx descriptor ring + * + * Returns 0 on success, negative on failure + **/ +int e1000e_setup_rx_resources(struct e1000_ring *rx_ring) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct e1000_buffer *buffer_info; + int i, size, desc_len, err = -ENOMEM; + + size = sizeof(struct e1000_buffer) * rx_ring->count; + rx_ring->buffer_info = vzalloc(size); + if (!rx_ring->buffer_info) + goto err; + + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS, + sizeof(struct e1000_ps_page), + GFP_KERNEL); + if (!buffer_info->ps_pages) + goto err_pages; + } + + desc_len = sizeof(union e1000_rx_desc_packet_split); + + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * desc_len; + rx_ring->size = ALIGN(rx_ring->size, 4096); + + err = e1000_alloc_ring_dma(adapter, rx_ring); + if (err) + goto err_pages; + + rx_ring->next_to_clean = 0; + rx_ring->next_to_use = 0; + rx_ring->rx_skb_top = NULL; + + return 0; + +err_pages: + for (i = 0; i < rx_ring->count; i++) { + buffer_info = &rx_ring->buffer_info[i]; + kfree(buffer_info->ps_pages); + } +err: + vfree(rx_ring->buffer_info); + e_err("Unable to allocate memory for the receive descriptor ring\n"); + return err; +} + +/** + * e1000_clean_tx_ring - Free Tx Buffers + * @tx_ring: Tx descriptor ring + **/ +static void e1000_clean_tx_ring(struct e1000_ring *tx_ring) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_buffer *buffer_info; + unsigned long size; + unsigned int i; + + for (i = 0; i < tx_ring->count; i++) { + buffer_info = &tx_ring->buffer_info[i]; + e1000_put_txbuf(tx_ring, buffer_info); + } + + netdev_reset_queue(adapter->netdev); + size = sizeof(struct e1000_buffer) * tx_ring->count; + memset(tx_ring->buffer_info, 0, size); + + memset(tx_ring->desc, 0, tx_ring->size); + + tx_ring->next_to_use = 0; + tx_ring->next_to_clean = 0; +} + +/** + * e1000e_free_tx_resources - Free Tx Resources per Queue + * @tx_ring: Tx descriptor ring + * + * Free all transmit software resources + **/ +void e1000e_free_tx_resources(struct e1000_ring *tx_ring) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct pci_dev *pdev = adapter->pdev; + + e1000_clean_tx_ring(tx_ring); + + vfree(tx_ring->buffer_info); + tx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, + tx_ring->dma); + tx_ring->desc = NULL; +} + +/** + * e1000e_free_rx_resources - Free Rx Resources + * @rx_ring: Rx descriptor ring + * + * Free all receive software resources + **/ +void e1000e_free_rx_resources(struct e1000_ring *rx_ring) +{ + struct e1000_adapter *adapter = rx_ring->adapter; + struct pci_dev *pdev = adapter->pdev; + int i; + + e1000_clean_rx_ring(rx_ring); + + for (i = 0; i < rx_ring->count; i++) + kfree(rx_ring->buffer_info[i].ps_pages); + + vfree(rx_ring->buffer_info); + rx_ring->buffer_info = NULL; + + dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, + rx_ring->dma); + rx_ring->desc = NULL; +} + +/** + * e1000_update_itr - update the dynamic ITR value based on statistics + * @adapter: pointer to adapter + * @itr_setting: current adapter->itr + * @packets: the number of packets during this measurement interval + * @bytes: the number of bytes during this measurement interval + * + * Stores a new ITR value based on packets and byte + * counts during the last interrupt. The advantage of per interrupt + * computation is faster updates and more accurate ITR for the current + * traffic pattern. Constants in this function were computed + * based on theoretical maximum wire speed and thresholds were set based + * on testing data as well as attempting to minimize response time + * while increasing bulk throughput. This functionality is controlled + * by the InterruptThrottleRate module parameter. + **/ +static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes) +{ + unsigned int retval = itr_setting; + + if (packets == 0) + return itr_setting; + + switch (itr_setting) { + case lowest_latency: + /* handle TSO and jumbo frames */ + if (bytes / packets > 8000) + retval = bulk_latency; + else if ((packets < 5) && (bytes > 512)) + retval = low_latency; + break; + case low_latency: /* 50 usec aka 20000 ints/s */ + if (bytes > 10000) { + /* this if handles the TSO accounting */ + if (bytes / packets > 8000) + retval = bulk_latency; + else if ((packets < 10) || ((bytes / packets) > 1200)) + retval = bulk_latency; + else if ((packets > 35)) + retval = lowest_latency; + } else if (bytes / packets > 2000) { + retval = bulk_latency; + } else if (packets <= 2 && bytes < 512) { + retval = lowest_latency; + } + break; + case bulk_latency: /* 250 usec aka 4000 ints/s */ + if (bytes > 25000) { + if (packets > 35) + retval = low_latency; + } else if (bytes < 6000) { + retval = low_latency; + } + break; + } + + return retval; +} + +static void e1000_set_itr(struct e1000_adapter *adapter) +{ + u16 current_itr; + u32 new_itr = adapter->itr; + + /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ + if (adapter->link_speed != SPEED_1000) { + current_itr = 0; + new_itr = 4000; + goto set_itr_now; + } + + if (adapter->flags2 & FLAG2_DISABLE_AIM) { + new_itr = 0; + goto set_itr_now; + } + + adapter->tx_itr = e1000_update_itr(adapter->tx_itr, + adapter->total_tx_packets, + adapter->total_tx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency) + adapter->tx_itr = low_latency; + + adapter->rx_itr = e1000_update_itr(adapter->rx_itr, + adapter->total_rx_packets, + adapter->total_rx_bytes); + /* conservative mode (itr 3) eliminates the lowest_latency setting */ + if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency) + adapter->rx_itr = low_latency; + + current_itr = max(adapter->rx_itr, adapter->tx_itr); + + /* counts and packets in update_itr are dependent on these numbers */ + switch (current_itr) { + case lowest_latency: + new_itr = 70000; + break; + case low_latency: + new_itr = 20000; /* aka hwitr = ~200 */ + break; + case bulk_latency: + new_itr = 4000; + break; + default: + break; + } + +set_itr_now: + if (new_itr != adapter->itr) { + /* this attempts to bias the interrupt rate towards Bulk + * by adding intermediate steps when interrupt rate is + * increasing + */ + new_itr = new_itr > adapter->itr ? + min(adapter->itr + (new_itr >> 2), new_itr) : new_itr; + adapter->itr = new_itr; + adapter->rx_ring->itr_val = new_itr; + if (adapter->msix_entries) + adapter->rx_ring->set_itr = 1; + else + e1000e_write_itr(adapter, new_itr); + } +} + +/** + * e1000e_write_itr - write the ITR value to the appropriate registers + * @adapter: address of board private structure + * @itr: new ITR value to program + * + * e1000e_write_itr determines if the adapter is in MSI-X mode + * and, if so, writes the EITR registers with the ITR value. + * Otherwise, it writes the ITR value into the ITR register. + **/ +void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr) +{ + struct e1000_hw *hw = &adapter->hw; + u32 new_itr = itr ? 1000000000 / (itr * 256) : 0; + + if (adapter->msix_entries) { + int vector; + + for (vector = 0; vector < adapter->num_vectors; vector++) + writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector)); + } else { + ew32(ITR, new_itr); + } +} + +/** + * e1000_alloc_queues - Allocate memory for all rings + * @adapter: board private structure to initialize + **/ +static int e1000_alloc_queues(struct e1000_adapter *adapter) +{ + int size = sizeof(struct e1000_ring); + + adapter->tx_ring = kzalloc(size, GFP_KERNEL); + if (!adapter->tx_ring) + goto err; + adapter->tx_ring->count = adapter->tx_ring_count; + adapter->tx_ring->adapter = adapter; + + adapter->rx_ring = kzalloc(size, GFP_KERNEL); + if (!adapter->rx_ring) + goto err; + adapter->rx_ring->count = adapter->rx_ring_count; + adapter->rx_ring->adapter = adapter; + + return 0; +err: + e_err("Unable to allocate memory for queues\n"); + kfree(adapter->rx_ring); + kfree(adapter->tx_ring); + return -ENOMEM; +} + +/** + * e1000e_poll - NAPI Rx polling callback + * @napi: struct associated with this polling callback + * @weight: number of packets driver is allowed to process this poll + **/ +static int e1000e_poll(struct napi_struct *napi, int weight) +{ + struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, + napi); + struct e1000_hw *hw = &adapter->hw; + struct net_device *poll_dev = adapter->netdev; + int tx_cleaned = 1, work_done = 0; + + adapter = netdev_priv(poll_dev); + + if (!adapter->msix_entries || + (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) + tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); + + adapter->clean_rx(adapter->rx_ring, &work_done, weight); + + if (!tx_cleaned) + work_done = weight; + + /* If weight not fully consumed, exit the polling mode */ + if (work_done < weight) { + if (adapter->itr_setting & 3) + e1000_set_itr(adapter); + napi_complete_done(napi, work_done); + if (!test_bit(__E1000_DOWN, &adapter->state)) { + if (adapter->msix_entries) + ew32(IMS, adapter->rx_ring->ims_val); + else + e1000_irq_enable(adapter); + } + } + + return work_done; +} + +static int e1000_vlan_rx_add_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + /* don't update vlan cookie if already programmed */ + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) + return 0; + + /* add VID to filter table */ + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta |= (1 << (vid & 0x1F)); + hw->mac.ops.write_vfta(hw, index, vfta); + } + + set_bit(vid, adapter->active_vlans); + + return 0; +} + +static int e1000_vlan_rx_kill_vid(struct net_device *netdev, + __always_unused __be16 proto, u16 vid) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 vfta, index; + + if ((adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && + (vid == adapter->mng_vlan_id)) { + /* release control to f/w */ + e1000e_release_hw_control(adapter); + return 0; + } + + /* remove VID from filter table */ + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + index = (vid >> 5) & 0x7F; + vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index); + vfta &= ~(1 << (vid & 0x1F)); + hw->mac.ops.write_vfta(hw, index, vfta); + } + + clear_bit(vid, adapter->active_vlans); + + return 0; +} + +/** + * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* disable VLAN receive filtering */ + rctl = er32(RCTL); + rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN); + ew32(RCTL, rctl); + + if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) { + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + } + } +} + +/** + * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) { + /* enable VLAN receive filtering */ + rctl = er32(RCTL); + rctl |= E1000_RCTL_VFE; + rctl &= ~E1000_RCTL_CFIEN; + ew32(RCTL, rctl); + } +} + +/** + * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + /* disable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl &= ~E1000_CTRL_VME; + ew32(CTRL, ctrl); +} + +/** + * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping + * @adapter: board private structure to initialize + **/ +static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl; + + /* enable VLAN tag insert/strip */ + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_VME; + ew32(CTRL, ctrl); +} + +static void e1000_update_mng_vlan(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + u16 vid = adapter->hw.mng_cookie.vlan_id; + u16 old_vid = adapter->mng_vlan_id; + + if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) { + e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid); + adapter->mng_vlan_id = vid; + } + + if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid)) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid); +} + +static void e1000_restore_vlan(struct e1000_adapter *adapter) +{ + u16 vid; + + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); + + for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) + e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); +} + +static void e1000_init_manageability_pt(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 manc, manc2h, mdef, i, j; + + if (!(adapter->flags & FLAG_MNG_PT_ENABLED)) + return; + + manc = er32(MANC); + + /* enable receiving management packets to the host. this will probably + * generate destination unreachable messages from the host OS, but + * the packets will be handled on SMBUS + */ + manc |= E1000_MANC_EN_MNG2HOST; + manc2h = er32(MANC2H); + + switch (hw->mac.type) { + default: + manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664); + break; + case e1000_82574: + case e1000_82583: + /* Check if IPMI pass-through decision filter already exists; + * if so, enable it. + */ + for (i = 0, j = 0; i < 8; i++) { + mdef = er32(MDEF(i)); + + /* Ignore filters with anything other than IPMI ports */ + if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + continue; + + /* Enable this decision filter in MANC2H */ + if (mdef) + manc2h |= (1 << i); + + j |= mdef; + } + + if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664)) + break; + + /* Create new decision filter in an empty filter */ + for (i = 0, j = 0; i < 8; i++) + if (er32(MDEF(i)) == 0) { + ew32(MDEF(i), (E1000_MDEF_PORT_623 | + E1000_MDEF_PORT_664)); + manc2h |= (1 << 1); + j++; + break; + } + + if (!j) + e_warn("Unable to create IPMI pass-through filter\n"); + break; + } + + ew32(MANC2H, manc2h); + ew32(MANC, manc); +} + +/** + * e1000_configure_tx - Configure Transmit Unit after Reset + * @adapter: board private structure + * + * Configure the Tx unit of the MAC after a reset. + **/ +static void e1000_configure_tx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + u64 tdba; + u32 tdlen, tctl, tarc; + + /* Setup the HW Tx Head and Tail descriptor pointers */ + tdba = tx_ring->dma; + tdlen = tx_ring->count * sizeof(struct e1000_tx_desc); + ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32))); + ew32(TDBAH(0), (tdba >> 32)); + ew32(TDLEN(0), tdlen); + ew32(TDH(0), 0); + ew32(TDT(0), 0); + tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0); + tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0); + + writel(0, tx_ring->head); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_tdt_wa(tx_ring, 0); + else + writel(0, tx_ring->tail); + + /* Set the Tx Interrupt Delay register */ + ew32(TIDV, adapter->tx_int_delay); + /* Tx irq moderation */ + ew32(TADV, adapter->tx_abs_int_delay); + + if (adapter->flags2 & FLAG2_DMA_BURST) { + u32 txdctl = er32(TXDCTL(0)); + + txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH | + E1000_TXDCTL_WTHRESH); + /* set up some performance related parameters to encourage the + * hardware to use the bus more efficiently in bursts, depends + * on the tx_int_delay to be enabled, + * wthresh = 1 ==> burst write is disabled to avoid Tx stalls + * hthresh = 1 ==> prefetch when one or more available + * pthresh = 0x1f ==> prefetch if internal cache 31 or less + * BEWARE: this seems to work but should be considered first if + * there are Tx hangs or other Tx related bugs + */ + txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE; + ew32(TXDCTL(0), txdctl); + } + /* erratum work around: set txdctl the same for both queues */ + ew32(TXDCTL(1), er32(TXDCTL(0))); + + /* Program the Transmit Control Register */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_CT; + tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC | + (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT); + + if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) { + tarc = er32(TARC(0)); + /* set the speed mode bit, we'll clear it if we're not at + * gigabit link later + */ +#define SPEED_MODE_BIT (1 << 21) + tarc |= SPEED_MODE_BIT; + ew32(TARC(0), tarc); + } + + /* errata: program both queues to unweighted RR */ + if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) { + tarc = er32(TARC(0)); + tarc |= 1; + ew32(TARC(0), tarc); + tarc = er32(TARC(1)); + tarc |= 1; + ew32(TARC(1), tarc); + } + + /* Setup Transmit Descriptor Settings for eop descriptor */ + adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS; + + /* only set IDE if we are delaying interrupts using the timers */ + if (adapter->tx_int_delay) + adapter->txd_cmd |= E1000_TXD_CMD_IDE; + + /* enable Report Status bit */ + adapter->txd_cmd |= E1000_TXD_CMD_RS; + + ew32(TCTL, tctl); + + hw->mac.ops.config_collision_dist(hw); + + /* SPT Si errata workaround to avoid data corruption */ + if (hw->mac.type == e1000_pch_spt) { + u32 reg_val; + + reg_val = er32(IOSFPC); + reg_val |= E1000_RCTL_RDMTS_HEX; + ew32(IOSFPC, reg_val); + + reg_val = er32(TARC(0)); + reg_val |= E1000_TARC0_CB_MULTIQ_3_REQ; + ew32(TARC(0), reg_val); + } +} + +/** + * e1000_setup_rctl - configure the receive control registers + * @adapter: Board private structure + **/ +#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) +static void e1000_setup_rctl(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 rctl, rfctl; + u32 pages = 0; + + /* Workaround Si errata on PCHx - configure jumbo frame flow. + * If jumbo frames not set, program related MAC/PHY registers + * to h/w defaults + */ + if (hw->mac.type >= e1000_pch2lan) { + s32 ret_val; + + if (adapter->netdev->mtu > ETH_DATA_LEN) + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); + else + ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); + + if (ret_val) + e_dbg("failed to enable|disable jumbo frame workaround mode\n"); + } + + /* Program MC offset vector base */ + rctl = er32(RCTL); + rctl &= ~(3 << E1000_RCTL_MO_SHIFT); + rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | + E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | + (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); + + /* Do not Store bad packets */ + rctl &= ~E1000_RCTL_SBP; + + /* Enable Long Packet receive */ + if (adapter->netdev->mtu <= ETH_DATA_LEN) + rctl &= ~E1000_RCTL_LPE; + else + rctl |= E1000_RCTL_LPE; + + /* Some systems expect that the CRC is included in SMBUS traffic. The + * hardware strips the CRC before sending to both SMBUS (BMC) and to + * host memory when this is enabled + */ + if (adapter->flags2 & FLAG2_CRC_STRIPPING) + rctl |= E1000_RCTL_SECRC; + + /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */ + if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) { + u16 phy_data; + + e1e_rphy(hw, PHY_REG(770, 26), &phy_data); + phy_data &= 0xfff8; + phy_data |= (1 << 2); + e1e_wphy(hw, PHY_REG(770, 26), phy_data); + + e1e_rphy(hw, 22, &phy_data); + phy_data &= 0x0fff; + phy_data |= (1 << 14); + e1e_wphy(hw, 0x10, 0x2823); + e1e_wphy(hw, 0x11, 0x0003); + e1e_wphy(hw, 22, phy_data); + } + + /* Setup buffer sizes */ + rctl &= ~E1000_RCTL_SZ_4096; + rctl |= E1000_RCTL_BSEX; + switch (adapter->rx_buffer_len) { + case 2048: + default: + rctl |= E1000_RCTL_SZ_2048; + rctl &= ~E1000_RCTL_BSEX; + break; + case 4096: + rctl |= E1000_RCTL_SZ_4096; + break; + case 8192: + rctl |= E1000_RCTL_SZ_8192; + break; + case 16384: + rctl |= E1000_RCTL_SZ_16384; + break; + } + + /* Enable Extended Status in all Receive Descriptors */ + rfctl = er32(RFCTL); + rfctl |= E1000_RFCTL_EXTEN; + ew32(RFCTL, rfctl); + + /* 82571 and greater support packet-split where the protocol + * header is placed in skb->data and the packet data is + * placed in pages hanging off of skb_shinfo(skb)->nr_frags. + * In the case of a non-split, skb->data is linearly filled, + * followed by the page buffers. Therefore, skb->data is + * sized to hold the largest protocol header. + * + * allocations using alloc_page take too long for regular MTU + * so only enable packet split for jumbo frames + * + * Using pages when the page size is greater than 16k wastes + * a lot of memory, since we allocate 3 pages at all times + * per packet. + */ + pages = PAGE_USE_COUNT(adapter->netdev->mtu); + if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE)) + adapter->rx_ps_pages = pages; + else + adapter->rx_ps_pages = 0; + + if (adapter->rx_ps_pages) { + u32 psrctl = 0; + + /* Enable Packet split descriptors */ + rctl |= E1000_RCTL_DTYP_PS; + + psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT; + + switch (adapter->rx_ps_pages) { + case 3: + psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT; + /* fall-through */ + case 2: + psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT; + /* fall-through */ + case 1: + psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT; + break; + } + + ew32(PSRCTL, psrctl); + } + + /* This is useful for sniffing bad packets. */ + if (adapter->netdev->features & NETIF_F_RXALL) { + /* UPE and MPE will be handled by normal PROMISC logic + * in e1000e_set_rx_mode + */ + rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ + E1000_RCTL_BAM | /* RX All Bcast Pkts */ + E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ + + rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */ + E1000_RCTL_DPF | /* Allow filtered pause */ + E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */ + /* Do not mess with E1000_CTRL_VME, it affects transmit as well, + * and that breaks VLANs. + */ + } + + ew32(RCTL, rctl); + /* just started the receive unit, no need to restart */ + adapter->flags &= ~FLAG_RESTART_NOW; +} + +/** + * e1000_configure_rx - Configure Receive Unit after Reset + * @adapter: board private structure + * + * Configure the Rx unit of the MAC after a reset. + **/ +static void e1000_configure_rx(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *rx_ring = adapter->rx_ring; + u64 rdba; + u32 rdlen, rctl, rxcsum, ctrl_ext; + + if (adapter->rx_ps_pages) { + /* this is a 32 byte descriptor */ + rdlen = rx_ring->count * + sizeof(union e1000_rx_desc_packet_split); + adapter->clean_rx = e1000_clean_rx_irq_ps; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps; + } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) { + rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); + adapter->clean_rx = e1000_clean_jumbo_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers; + } else { + rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended); + adapter->clean_rx = e1000_clean_rx_irq; + adapter->alloc_rx_buf = e1000_alloc_rx_buffers; + } + + /* disable receives while setting up the descriptors */ + rctl = er32(RCTL); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e1e_flush(); + usleep_range(10000, 20000); + + if (adapter->flags2 & FLAG2_DMA_BURST) { + /* set the writeback threshold (only takes effect if the RDTR + * is set). set GRAN=1 and write back up to 0x4 worth, and + * enable prefetching of 0x20 Rx descriptors + * granularity = 01 + * wthresh = 04, + * hthresh = 04, + * pthresh = 0x20 + */ + ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE); + ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE); + + /* override the delay timers for enabling bursting, only if + * the value was not set by the user via module options + */ + if (adapter->rx_int_delay == DEFAULT_RDTR) + adapter->rx_int_delay = BURST_RDTR; + if (adapter->rx_abs_int_delay == DEFAULT_RADV) + adapter->rx_abs_int_delay = BURST_RADV; + } + + /* set the Receive Delay Timer Register */ + ew32(RDTR, adapter->rx_int_delay); + + /* irq moderation */ + ew32(RADV, adapter->rx_abs_int_delay); + if ((adapter->itr_setting != 0) && (adapter->itr != 0)) + e1000e_write_itr(adapter, adapter->itr); + + ctrl_ext = er32(CTRL_EXT); + /* Auto-Mask interrupts upon ICR access */ + ctrl_ext |= E1000_CTRL_EXT_IAME; + ew32(IAM, 0xffffffff); + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); + + /* Setup the HW Rx Head and Tail Descriptor Pointers and + * the Base and Length of the Rx Descriptor Ring + */ + rdba = rx_ring->dma; + ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32))); + ew32(RDBAH(0), (rdba >> 32)); + ew32(RDLEN(0), rdlen); + ew32(RDH(0), 0); + ew32(RDT(0), 0); + rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0); + rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0); + + writel(0, rx_ring->head); + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_rdt_wa(rx_ring, 0); + else + writel(0, rx_ring->tail); + + /* Enable Receive Checksum Offload for TCP and UDP */ + rxcsum = er32(RXCSUM); + if (adapter->netdev->features & NETIF_F_RXCSUM) + rxcsum |= E1000_RXCSUM_TUOFL; + else + rxcsum &= ~E1000_RXCSUM_TUOFL; + ew32(RXCSUM, rxcsum); + + /* With jumbo frames, excessive C-state transition latencies result + * in dropped transactions. + */ + if (adapter->netdev->mtu > ETH_DATA_LEN) { + u32 lat = + ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 - + adapter->max_frame_size) * 8 / 1000; + + if (adapter->flags & FLAG_IS_ICH) { + u32 rxdctl = er32(RXDCTL(0)); + + ew32(RXDCTL(0), rxdctl | 0x3); + } + + pm_qos_update_request(&adapter->pm_qos_req, lat); + } else { + pm_qos_update_request(&adapter->pm_qos_req, + PM_QOS_DEFAULT_VALUE); + } + + /* Enable Receives */ + ew32(RCTL, rctl); +} + +/** + * e1000e_write_mc_addr_list - write multicast addresses to MTA + * @netdev: network interface device structure + * + * Writes multicast address list to the MTA hash table. + * Returns: -ENOMEM on failure + * 0 on no addresses written + * X on writing X addresses to MTA + */ +static int e1000e_write_mc_addr_list(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct netdev_hw_addr *ha; + u8 *mta_list; + int i; + + if (netdev_mc_empty(netdev)) { + /* nothing to program, so clear mc list */ + hw->mac.ops.update_mc_addr_list(hw, NULL, 0); + return 0; + } + + mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC); + if (!mta_list) + return -ENOMEM; + + /* update_mc_addr_list expects a packed array of only addresses. */ + i = 0; + netdev_for_each_mc_addr(ha, netdev) + memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN); + + hw->mac.ops.update_mc_addr_list(hw, mta_list, i); + kfree(mta_list); + + return netdev_mc_count(netdev); +} + +/** + * e1000e_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure + * + * Writes unicast address list to the RAR table. + * Returns: -ENOMEM on failure/insufficient address space + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +static int e1000e_write_uc_addr_list(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + unsigned int rar_entries; + int count = 0; + + rar_entries = hw->mac.ops.rar_get_count(hw); + + /* save a rar entry for our hardware address */ + rar_entries--; + + /* save a rar entry for the LAA workaround */ + if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) + rar_entries--; + + /* return ENOMEM indicating insufficient memory for addresses */ + if (netdev_uc_count(netdev) > rar_entries) + return -ENOMEM; + + if (!netdev_uc_empty(netdev) && rar_entries) { + struct netdev_hw_addr *ha; + + /* write the addresses in reverse order to avoid write + * combining + */ + netdev_for_each_uc_addr(ha, netdev) { + int rval; + + if (!rar_entries) + break; + rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--); + if (rval < 0) + return -ENOMEM; + count++; + } + } + + /* zero out the remaining RAR entries not used above */ + for (; rar_entries > 0; rar_entries--) { + ew32(RAH(rar_entries), 0); + ew32(RAL(rar_entries), 0); + } + e1e_flush(); + + return count; +} + +/** + * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure + * + * The ndo_set_rx_mode entry point is called whenever the unicast or multicast + * address list or the network interface flags are updated. This routine is + * responsible for configuring the hardware for proper unicast, multicast, + * promiscuous mode, and all-multi behavior. + **/ +static void e1000e_set_rx_mode(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 rctl; + + if (pm_runtime_suspended(netdev->dev.parent)) + return; + + /* Check for Promiscuous and All Multicast modes */ + rctl = er32(RCTL); + + /* clear the affected bits */ + rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE); + + if (netdev->flags & IFF_PROMISC) { + rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); + /* Do not hardware filter VLANs in promisc mode */ + e1000e_vlan_filter_disable(adapter); + } else { + int count; + + if (netdev->flags & IFF_ALLMULTI) { + rctl |= E1000_RCTL_MPE; + } else { + /* Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ + count = e1000e_write_mc_addr_list(netdev); + if (count < 0) + rctl |= E1000_RCTL_MPE; + } + e1000e_vlan_filter_enable(adapter); + /* Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ + count = e1000e_write_uc_addr_list(netdev); + if (count < 0) + rctl |= E1000_RCTL_UPE; + } + + ew32(RCTL, rctl); + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) + e1000e_vlan_strip_enable(adapter); + else + e1000e_vlan_strip_disable(adapter); +} + +static void e1000e_setup_rss_hash(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 mrqc, rxcsum; + u32 rss_key[10]; + int i; + + netdev_rss_key_fill(rss_key, sizeof(rss_key)); + for (i = 0; i < 10; i++) + ew32(RSSRK(i), rss_key[i]); + + /* Direct all traffic to queue 0 */ + for (i = 0; i < 32; i++) + ew32(RETA(i), 0); + + /* Disable raw packet checksumming so that RSS hash is placed in + * descriptor on writeback. + */ + rxcsum = er32(RXCSUM); + rxcsum |= E1000_RXCSUM_PCSD; + + ew32(RXCSUM, rxcsum); + + mrqc = (E1000_MRQC_RSS_FIELD_IPV4 | + E1000_MRQC_RSS_FIELD_IPV4_TCP | + E1000_MRQC_RSS_FIELD_IPV6 | + E1000_MRQC_RSS_FIELD_IPV6_TCP | + E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); + + ew32(MRQC, mrqc); +} + +/** + * e1000e_get_base_timinca - get default SYSTIM time increment attributes + * @adapter: board private structure + * @timinca: pointer to returned time increment attributes + * + * Get attributes for incrementing the System Time Register SYSTIML/H at + * the default base frequency, and set the cyclecounter shift value. + **/ +s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) +{ + struct e1000_hw *hw = &adapter->hw; + u32 incvalue, incperiod, shift; + + /* Make sure clock is enabled on I217/I218/I219 before checking + * the frequency + */ + if (((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) && + !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) && + !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) { + u32 fextnvm7 = er32(FEXTNVM7); + + if (!(fextnvm7 & (1 << 0))) { + ew32(FEXTNVM7, fextnvm7 | (1 << 0)); + e1e_flush(); + } + } + + switch (hw->mac.type) { + case e1000_pch2lan: + /* Stable 96MHz frequency */ + incperiod = INCPERIOD_96MHz; + incvalue = INCVALUE_96MHz; + shift = INCVALUE_SHIFT_96MHz; + adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + break; + case e1000_pch_lpt: + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { + /* Stable 96MHz frequency */ + incperiod = INCPERIOD_96MHz; + incvalue = INCVALUE_96MHz; + shift = INCVALUE_SHIFT_96MHz; + adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz; + } else { + /* Stable 25MHz frequency */ + incperiod = INCPERIOD_25MHz; + incvalue = INCVALUE_25MHz; + shift = INCVALUE_SHIFT_25MHz; + adapter->cc.shift = shift; + } + break; + case e1000_pch_spt: + if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { + /* Stable 24MHz frequency */ + incperiod = INCPERIOD_24MHz; + incvalue = INCVALUE_24MHz; + shift = INCVALUE_SHIFT_24MHz; + adapter->cc.shift = shift; + break; + } + return -EINVAL; + case e1000_82574: + case e1000_82583: + /* Stable 25MHz frequency */ + incperiod = INCPERIOD_25MHz; + incvalue = INCVALUE_25MHz; + shift = INCVALUE_SHIFT_25MHz; + adapter->cc.shift = shift; + break; + default: + return -EINVAL; + } + + *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) | + ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK)); + + return 0; +} + +/** + * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable + * @adapter: board private structure + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware filters. + * Not all combinations are supported, in particular event type has to be + * specified. Matching the kind of event packet is not supported, with the + * exception of "all V2 events regardless of level 2 or 4". + **/ +static int e1000e_config_hwtstamp(struct e1000_adapter *adapter, + struct hwtstamp_config *config) +{ + struct e1000_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED; + u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED; + u32 rxmtrl = 0; + u16 rxudp = 0; + bool is_l4 = false; + bool is_l2 = false; + u32 regval; + s32 ret_val; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return -EINVAL; + + /* flags reserved for future extensions - must be zero */ + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tsync_tx_ctl = 0; + break; + case HWTSTAMP_TX_ON: + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1; + rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + /* Also time stamps V2 L2 Path Delay Request/Response */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; + is_l2 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + /* Also time stamps V2 L2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; + is_l2 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + /* Hardware cannot filter just V2 L4 Sync messages; + * fall-through to V2 (both L2 and L4) Sync. + */ + case HWTSTAMP_FILTER_PTP_V2_SYNC: + /* Also time stamps V2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + /* Hardware cannot filter just V2 L4 Delay Request messages; + * fall-through to V2 (both L2 and L4) Delay Request. + */ + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* Also time stamps V2 Path Delay Request/Response. */ + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2; + rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + /* Hardware cannot filter just V2 L4 or L2 Event messages; + * fall-through to all V2 (both L2 and L4) Events. + */ + case HWTSTAMP_FILTER_PTP_V2_EVENT: + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; + is_l2 = true; + is_l4 = true; + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: + /* For V1, the hardware can only filter Sync messages or + * Delay Request messages but not both so fall-through to + * time stamp all packets. + */ + case HWTSTAMP_FILTER_ALL: + is_l2 = true; + is_l4 = true; + tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL; + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + return -ERANGE; + } + + adapter->hwtstamp_config = *config; + + /* enable/disable Tx h/w time stamping */ + regval = er32(TSYNCTXCTL); + regval &= ~E1000_TSYNCTXCTL_ENABLED; + regval |= tsync_tx_ctl; + ew32(TSYNCTXCTL, regval); + if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) != + (regval & E1000_TSYNCTXCTL_ENABLED)) { + e_err("Timesync Tx Control register not set as expected\n"); + return -EAGAIN; + } + + /* enable/disable Rx h/w time stamping */ + regval = er32(TSYNCRXCTL); + regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK); + regval |= tsync_rx_ctl; + ew32(TSYNCRXCTL, regval); + if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED | + E1000_TSYNCRXCTL_TYPE_MASK)) != + (regval & (E1000_TSYNCRXCTL_ENABLED | + E1000_TSYNCRXCTL_TYPE_MASK))) { + e_err("Timesync Rx Control register not set as expected\n"); + return -EAGAIN; + } + + /* L2: define ethertype filter for time stamped packets */ + if (is_l2) + rxmtrl |= ETH_P_1588; + + /* define which PTP packets get time stamped */ + ew32(RXMTRL, rxmtrl); + + /* Filter by destination port */ + if (is_l4) { + rxudp = PTP_EV_PORT; + cpu_to_be16s(&rxudp); + } + ew32(RXUDP, rxudp); + + e1e_flush(); + + /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */ + er32(RXSTMPH); + er32(TXSTMPH); + + /* Get and set the System Time Register SYSTIM base frequency */ + ret_val = e1000e_get_base_timinca(adapter, ®val); + if (ret_val) + return ret_val; + ew32(TIMINCA, regval); + + /* reset the ns time counter */ + timecounter_init(&adapter->tc, &adapter->cc, + ktime_to_ns(ktime_get_real())); + + return 0; +} + +/** + * e1000_configure - configure the hardware for Rx and Tx + * @adapter: private board structure + **/ +static void e1000_configure(struct e1000_adapter *adapter) +{ + struct e1000_ring *rx_ring = adapter->rx_ring; + + e1000e_set_rx_mode(adapter->netdev); + + e1000_restore_vlan(adapter); + e1000_init_manageability_pt(adapter); + + e1000_configure_tx(adapter); + + if (adapter->netdev->features & NETIF_F_RXHASH) + e1000e_setup_rss_hash(adapter); + e1000_setup_rctl(adapter); + e1000_configure_rx(adapter); + adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL); +} + +/** + * e1000e_power_up_phy - restore link in case the phy was powered down + * @adapter: address of board private structure + * + * The phy may be powered down to save power and turn off link when the + * driver is unloaded and wake on lan is not enabled (among others) + * *** this routine MUST be followed by a call to e1000e_reset *** + **/ +void e1000e_power_up_phy(struct e1000_adapter *adapter) +{ + if (adapter->hw.phy.ops.power_up) + adapter->hw.phy.ops.power_up(&adapter->hw); + + adapter->hw.mac.ops.setup_link(&adapter->hw); +} + +/** + * e1000_power_down_phy - Power down the PHY + * + * Power down the PHY so no link is implied when interface is down. + * The PHY cannot be powered down if management or WoL is active. + */ +static void e1000_power_down_phy(struct e1000_adapter *adapter) +{ + if (adapter->hw.phy.ops.power_down) + adapter->hw.phy.ops.power_down(&adapter->hw); +} + +/** + * e1000_flush_tx_ring - remove all descriptors from the tx_ring + * + * We want to clear all pending descriptors from the TX ring. + * zeroing happens when the HW reads the regs. We assign the ring itself as + * the data of the next descriptor. We don't care about the data we are about + * to reset the HW. + */ +static void e1000_flush_tx_ring(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_tx_desc *tx_desc = NULL; + u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS; + u16 size = 512; + + tctl = er32(TCTL); + ew32(TCTL, tctl | E1000_TCTL_EN); + tdt = er32(TDT(0)); + BUG_ON(tdt != tx_ring->next_to_use); + tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use); + tx_desc->buffer_addr = tx_ring->dma; + + tx_desc->lower.data = cpu_to_le32(txd_lower | size); + tx_desc->upper.data = 0; + /* flush descriptors to memory before notifying the HW */ + wmb(); + tx_ring->next_to_use++; + if (tx_ring->next_to_use == tx_ring->count) + tx_ring->next_to_use = 0; + ew32(TDT(0), tx_ring->next_to_use); + mmiowb(); + usleep_range(200, 250); +} + +/** + * e1000_flush_rx_ring - remove all descriptors from the rx_ring + * + * Mark all descriptors in the RX ring as consumed and disable the rx ring + */ +static void e1000_flush_rx_ring(struct e1000_adapter *adapter) +{ + u32 rctl, rxdctl; + struct e1000_hw *hw = &adapter->hw; + + rctl = er32(RCTL); + ew32(RCTL, rctl & ~E1000_RCTL_EN); + e1e_flush(); + usleep_range(100, 150); + + rxdctl = er32(RXDCTL(0)); + /* zero the lower 14 bits (prefetch and host thresholds) */ + rxdctl &= 0xffffc000; + + /* update thresholds: prefetch threshold to 31, host threshold to 1 + * and make sure the granularity is "descriptors" and not "cache lines" + */ + rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC); + + ew32(RXDCTL(0), rxdctl); + /* momentarily enable the RX ring for the changes to take effect */ + ew32(RCTL, rctl | E1000_RCTL_EN); + e1e_flush(); + usleep_range(100, 150); + ew32(RCTL, rctl & ~E1000_RCTL_EN); +} + +/** + * e1000_flush_desc_rings - remove all descriptors from the descriptor rings + * + * In i219, the descriptor rings must be emptied before resetting the HW + * or before changing the device state to D3 during runtime (runtime PM). + * + * Failure to do this will cause the HW to enter a unit hang state which can + * only be released by PCI reset on the device + * + */ + +static void e1000_flush_desc_rings(struct e1000_adapter *adapter) +{ + u16 hang_state; + u32 fext_nvm11, tdlen; + struct e1000_hw *hw = &adapter->hw; + + /* First, disable MULR fix in FEXTNVM11 */ + fext_nvm11 = er32(FEXTNVM11); + fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX; + ew32(FEXTNVM11, fext_nvm11); + /* do nothing if we're not in faulty state, or if the queue is empty */ + tdlen = er32(TDLEN(0)); + pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, + &hang_state); + if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen) + return; + e1000_flush_tx_ring(adapter); + /* recheck, maybe the fault is caused by the rx ring */ + pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS, + &hang_state); + if (hang_state & FLUSH_DESC_REQUIRED) + e1000_flush_rx_ring(adapter); +} + +/** + * e1000e_reset - bring the hardware into a known good state + * + * This function boots the hardware and enables some settings that + * require a configuration cycle of the hardware - those cannot be + * set/changed during runtime. After reset the device needs to be + * properly configured for Rx, Tx etc. + */ +void e1000e_reset(struct e1000_adapter *adapter) +{ + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_fc_info *fc = &adapter->hw.fc; + struct e1000_hw *hw = &adapter->hw; + u32 tx_space, min_tx_space, min_rx_space; + u32 pba = adapter->pba; + u16 hwm; + + /* reset Packet Buffer Allocation to default */ + ew32(PBA, pba); + + if (adapter->max_frame_size > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) { + /* To maintain wire speed transmits, the Tx FIFO should be + * large enough to accommodate two full transmit packets, + * rounded up to the next 1KB and expressed in KB. Likewise, + * the Rx FIFO should be large enough to accommodate at least + * one full receive packet and is similarly rounded up and + * expressed in KB. + */ + pba = er32(PBA); + /* upper 16 bits has Tx packet buffer allocation size in KB */ + tx_space = pba >> 16; + /* lower 16 bits has Rx packet buffer allocation size in KB */ + pba &= 0xffff; + /* the Tx fifo also stores 16 bytes of information about the Tx + * but don't include ethernet FCS because hardware appends it + */ + min_tx_space = (adapter->max_frame_size + + sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2; + min_tx_space = ALIGN(min_tx_space, 1024); + min_tx_space >>= 10; + /* software strips receive CRC, so leave room for it */ + min_rx_space = adapter->max_frame_size; + min_rx_space = ALIGN(min_rx_space, 1024); + min_rx_space >>= 10; + + /* If current Tx allocation is less than the min Tx FIFO size, + * and the min Tx FIFO size is less than the current Rx FIFO + * allocation, take space away from current Rx allocation + */ + if ((tx_space < min_tx_space) && + ((min_tx_space - tx_space) < pba)) { + pba -= min_tx_space - tx_space; + + /* if short on Rx space, Rx wins and must trump Tx + * adjustment + */ + if (pba < min_rx_space) + pba = min_rx_space; + } + + ew32(PBA, pba); + } + + /* flow control settings + * + * The high water mark must be low enough to fit one full frame + * (or the size used for early receive) above it in the Rx FIFO. + * Set it to the lower of: + * - 90% of the Rx FIFO size, and + * - the full Rx FIFO size minus one full frame + */ + if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME) + fc->pause_time = 0xFFFF; + else + fc->pause_time = E1000_FC_PAUSE_TIME; + fc->send_xon = true; + fc->current_mode = fc->requested_mode; + + switch (hw->mac.type) { + case e1000_ich9lan: + case e1000_ich10lan: + if (adapter->netdev->mtu > ETH_DATA_LEN) { + pba = 14; + ew32(PBA, pba); + fc->high_water = 0x2800; + fc->low_water = fc->high_water - 8; + break; + } + /* fall-through */ + default: + hwm = min(((pba << 10) * 9 / 10), + ((pba << 10) - adapter->max_frame_size)); + + fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */ + fc->low_water = fc->high_water - 8; + break; + case e1000_pchlan: + /* Workaround PCH LOM adapter hangs with certain network + * loads. If hangs persist, try disabling Tx flow control. + */ + if (adapter->netdev->mtu > ETH_DATA_LEN) { + fc->high_water = 0x3500; + fc->low_water = 0x1500; + } else { + fc->high_water = 0x5000; + fc->low_water = 0x3000; + } + fc->refresh_time = 0x1000; + break; + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + fc->refresh_time = 0x0400; + + if (adapter->netdev->mtu <= ETH_DATA_LEN) { + fc->high_water = 0x05C20; + fc->low_water = 0x05048; + fc->pause_time = 0x0650; + break; + } + + pba = 14; + ew32(PBA, pba); + fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH; + fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL; + break; + } + + /* Alignment of Tx data is on an arbitrary byte boundary with the + * maximum size per Tx descriptor limited only to the transmit + * allocation of the packet buffer minus 96 bytes with an upper + * limit of 24KB due to receive synchronization limitations. + */ + adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, + 24 << 10); + + /* Disable Adaptive Interrupt Moderation if 2 full packets cannot + * fit in receive buffer. + */ + if (adapter->itr_setting & 0x3) { + if ((adapter->max_frame_size * 2) > (pba << 10)) { + if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) { + dev_info(&adapter->pdev->dev, + "Interrupt Throttle Rate off\n"); + adapter->flags2 |= FLAG2_DISABLE_AIM; + e1000e_write_itr(adapter, 0); + } + } else if (adapter->flags2 & FLAG2_DISABLE_AIM) { + dev_info(&adapter->pdev->dev, + "Interrupt Throttle Rate on\n"); + adapter->flags2 &= ~FLAG2_DISABLE_AIM; + adapter->itr = 20000; + e1000e_write_itr(adapter, adapter->itr); + } + } + + if (hw->mac.type == e1000_pch_spt) + e1000_flush_desc_rings(adapter); + /* Allow time for pending master requests to run */ + mac->ops.reset_hw(hw); + + /* For parts with AMT enabled, let the firmware know + * that the network interface is in control + */ + if (adapter->flags & FLAG_HAS_AMT) + e1000e_get_hw_control(adapter); + + ew32(WUC, 0); + + if (mac->ops.init_hw(hw)) + e_err("Hardware Error\n"); + + e1000_update_mng_vlan(adapter); + + /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ + ew32(VET, ETH_P_8021Q); + + e1000e_reset_adaptive(hw); + + /* initialize systim and reset the ns time counter */ + e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config); + + /* Set EEE advertisement as appropriate */ + if (adapter->flags2 & FLAG2_HAS_EEE) { + s32 ret_val; + u16 adv_addr; + + switch (hw->phy.type) { + case e1000_phy_82579: + adv_addr = I82579_EEE_ADVERTISEMENT; + break; + case e1000_phy_i217: + adv_addr = I217_EEE_ADVERTISEMENT; + break; + default: + dev_err(&adapter->pdev->dev, + "Invalid PHY type setting EEE advertisement\n"); + return; + } + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) { + dev_err(&adapter->pdev->dev, + "EEE advertisement - unable to acquire PHY\n"); + return; + } + + e1000_write_emi_reg_locked(hw, adv_addr, + hw->dev_spec.ich8lan.eee_disable ? + 0 : adapter->eee_advert); + + hw->phy.ops.release(hw); + } + + if (!netif_running(adapter->netdev) && + !test_bit(__E1000_TESTING, &adapter->state)) + e1000_power_down_phy(adapter); + + e1000_get_phy_info(hw); + + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && + !(adapter->flags & FLAG_SMART_POWER_DOWN)) { + u16 phy_data = 0; + /* speed up time to link by disabling smart power down, ignore + * the return value of this function because there is nothing + * different we would do if it failed + */ + e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data); + phy_data &= ~IGP02E1000_PM_SPD; + e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data); + } + if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) { + u32 reg; + + /* Fextnvm7 @ 0xe4[2] = 1 */ + reg = er32(FEXTNVM7); + reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE; + ew32(FEXTNVM7, reg); + /* Fextnvm9 @ 0x5bb4[13:12] = 11 */ + reg = er32(FEXTNVM9); + reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS | + E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS; + ew32(FEXTNVM9, reg); + } + +} + +int e1000e_up(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* hardware has been reset, we need to reload some things */ + e1000_configure(adapter); + + clear_bit(__E1000_DOWN, &adapter->state); + + if (adapter->msix_entries) + e1000_configure_msix(adapter); + e1000_irq_enable(adapter); + + netif_start_queue(adapter->netdev); + + /* fire a link change interrupt to start the watchdog */ + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); + + return 0; +} + +static void e1000e_flush_descriptors(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + if (!(adapter->flags2 & FLAG2_DMA_BURST)) + return; + + /* flush pending descriptor writebacks to memory */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); + + /* execute the writes immediately */ + e1e_flush(); + + /* due to rare timing issues, write to TIDV/RDTR again to ensure the + * write is successful + */ + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); + ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); + + /* execute the writes immediately */ + e1e_flush(); +} + +static void e1000e_update_stats(struct e1000_adapter *adapter); + +/** + * e1000e_down - quiesce the device and optionally reset the hardware + * @adapter: board private structure + * @reset: boolean flag to reset the hardware or not + */ +void e1000e_down(struct e1000_adapter *adapter, bool reset) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + u32 tctl, rctl; + + /* signal that we're down so the interrupt handler does not + * reschedule our watchdog timer + */ + set_bit(__E1000_DOWN, &adapter->state); + + netif_carrier_off(netdev); + + /* disable receives in the hardware */ + rctl = er32(RCTL); + if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX)) + ew32(RCTL, rctl & ~E1000_RCTL_EN); + /* flush and sleep below */ + + netif_stop_queue(netdev); + + /* disable transmits in the hardware */ + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); + + /* flush both disables and wait for them to finish */ + e1e_flush(); + usleep_range(10000, 20000); + + e1000_irq_disable(adapter); + + napi_synchronize(&adapter->napi); + + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + spin_unlock(&adapter->stats64_lock); + + e1000e_flush_descriptors(adapter); + + adapter->link_speed = 0; + adapter->link_duplex = 0; + + /* Disable Si errata workaround on PCHx for jumbo frame flow */ + if ((hw->mac.type >= e1000_pch2lan) && + (adapter->netdev->mtu > ETH_DATA_LEN) && + e1000_lv_jumbo_workaround_ich8lan(hw, false)) + e_dbg("failed to disable jumbo frame workaround mode\n"); + + if (!pci_channel_offline(adapter->pdev)) { + if (reset) + e1000e_reset(adapter); + else if (hw->mac.type == e1000_pch_spt) + e1000_flush_desc_rings(adapter); + } + e1000_clean_tx_ring(adapter->tx_ring); + e1000_clean_rx_ring(adapter->rx_ring); +} + +void e1000e_reinit_locked(struct e1000_adapter *adapter) +{ + might_sleep(); + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + e1000e_down(adapter, true); + e1000e_up(adapter); + clear_bit(__E1000_RESETTING, &adapter->state); +} + +/** + * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) + * @cc: cyclecounter structure + **/ +static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc) +{ + struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, + cc); + struct e1000_hw *hw = &adapter->hw; + u32 systimel_1, systimel_2, systimeh; + cycle_t systim, systim_next; + /* SYSTIMH latching upon SYSTIML read does not work well. + * This means that if SYSTIML overflows after we read it but before + * we read SYSTIMH, the value of SYSTIMH has been incremented and we + * will experience a huge non linear increment in the systime value + * to fix that we test for overflow and if true, we re-read systime. + */ + systimel_1 = er32(SYSTIML); + systimeh = er32(SYSTIMH); + systimel_2 = er32(SYSTIML); + /* Check for overflow. If there was no overflow, use the values */ + if (systimel_1 < systimel_2) { + systim = (cycle_t)systimel_1; + systim |= (cycle_t)systimeh << 32; + } else { + /* There was an overflow, read again SYSTIMH, and use + * systimel_2 + */ + systimeh = er32(SYSTIMH); + systim = (cycle_t)systimel_2; + systim |= (cycle_t)systimeh << 32; + } + + if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) { + u64 incvalue, time_delta, rem, temp; + int i; + + /* errata for 82574/82583 possible bad bits read from SYSTIMH/L + * check to see that the time is incrementing at a reasonable + * rate and is a multiple of incvalue + */ + incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; + for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { + /* latch SYSTIMH on read of SYSTIML */ + systim_next = (cycle_t)er32(SYSTIML); + systim_next |= (cycle_t)er32(SYSTIMH) << 32; + + time_delta = systim_next - systim; + temp = time_delta; + rem = do_div(temp, incvalue); + + systim = systim_next; + + if ((time_delta < E1000_82574_SYSTIM_EPSILON) && + (rem == 0)) + break; + } + } + return systim; +} + +/** + * e1000_sw_init - Initialize general software structures (struct e1000_adapter) + * @adapter: board private structure to initialize + * + * e1000_sw_init initializes the Adapter private data structure. + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +static int e1000_sw_init(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + + adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; + adapter->rx_ps_bsize0 = 128; + adapter->max_frame_size = netdev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; + adapter->tx_ring_count = E1000_DEFAULT_TXD; + adapter->rx_ring_count = E1000_DEFAULT_RXD; + + spin_lock_init(&adapter->stats64_lock); + + e1000e_set_interrupt_capability(adapter); + + if (e1000_alloc_queues(adapter)) + return -ENOMEM; + + /* Setup hardware time stamping cyclecounter */ + if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { + adapter->cc.read = e1000e_cyclecounter_read; + adapter->cc.mask = CYCLECOUNTER_MASK(64); + adapter->cc.mult = 1; + /* cc.shift set in e1000e_get_base_tininca() */ + + spin_lock_init(&adapter->systim_lock); + INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work); + } + + /* Explicitly disable IRQ since the NIC can be in any state. */ + e1000_irq_disable(adapter); + + set_bit(__E1000_DOWN, &adapter->state); + return 0; +} + +/** + * e1000_intr_msi_test - Interrupt Handler + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 icr = er32(ICR); + + e_dbg("icr is %08X\n", icr); + if (icr & E1000_ICR_RXSEQ) { + adapter->flags &= ~FLAG_MSI_TEST_FAILED; + /* Force memory writes to complete before acknowledging the + * interrupt is handled. + */ + wmb(); + } + + return IRQ_HANDLED; +} + +/** + * e1000_test_msi_interrupt - Returns 0 for successful test + * @adapter: board private struct + * + * code flow taken from tg3.c + **/ +static int e1000_test_msi_interrupt(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + int err; + + /* poll_enable hasn't been called yet, so don't need disable */ + /* clear any pending events */ + er32(ICR); + + /* free the real vector and request a test handler */ + e1000_free_irq(adapter); + e1000e_reset_interrupt_capability(adapter); + + /* Assume that the test fails, if it succeeds then the test + * MSI irq handler will unset this flag + */ + adapter->flags |= FLAG_MSI_TEST_FAILED; + + err = pci_enable_msi(adapter->pdev); + if (err) + goto msi_test_failed; + + err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0, + netdev->name, netdev); + if (err) { + pci_disable_msi(adapter->pdev); + goto msi_test_failed; + } + + /* Force memory writes to complete before enabling and firing an + * interrupt. + */ + wmb(); + + e1000_irq_enable(adapter); + + /* fire an unusual interrupt on the test handler */ + ew32(ICS, E1000_ICS_RXSEQ); + e1e_flush(); + msleep(100); + + e1000_irq_disable(adapter); + + rmb(); /* read flags after interrupt has been fired */ + + if (adapter->flags & FLAG_MSI_TEST_FAILED) { + adapter->int_mode = E1000E_INT_MODE_LEGACY; + e_info("MSI interrupt test failed, using legacy interrupt.\n"); + } else { + e_dbg("MSI interrupt test succeeded!\n"); + } + + free_irq(adapter->pdev->irq, netdev); + pci_disable_msi(adapter->pdev); + +msi_test_failed: + e1000e_set_interrupt_capability(adapter); + return e1000_request_irq(adapter); +} + +/** + * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored + * @adapter: board private struct + * + * code flow taken from tg3.c, called with e1000 interrupts disabled. + **/ +static int e1000_test_msi(struct e1000_adapter *adapter) +{ + int err; + u16 pci_cmd; + + if (!(adapter->flags & FLAG_MSI_ENABLED)) + return 0; + + /* disable SERR in case the MSI write causes a master abort */ + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + if (pci_cmd & PCI_COMMAND_SERR) + pci_write_config_word(adapter->pdev, PCI_COMMAND, + pci_cmd & ~PCI_COMMAND_SERR); + + err = e1000_test_msi_interrupt(adapter); + + /* re-enable SERR */ + if (pci_cmd & PCI_COMMAND_SERR) { + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd); + pci_cmd |= PCI_COMMAND_SERR; + pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd); + } + + return err; +} + +/** + * e1000_open - Called when a network interface is made active + * @netdev: network interface device structure + * + * Returns 0 on success, negative value on failure + * + * The open entry point is called when a network interface is made + * active by the system (IFF_UP). At this point all resources needed + * for transmit and receive operations are allocated, the interrupt + * handler is registered with the OS, the watchdog timer is started, + * and the stack is notified that the interface is ready. + **/ +static int e1000_open(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + int err; + + /* disallow open during test */ + if (test_bit(__E1000_TESTING, &adapter->state)) + return -EBUSY; + + pm_runtime_get_sync(&pdev->dev); + + netif_carrier_off(netdev); + + /* allocate transmit descriptors */ + err = e1000e_setup_tx_resources(adapter->tx_ring); + if (err) + goto err_setup_tx; + + /* allocate receive descriptors */ + err = e1000e_setup_rx_resources(adapter->rx_ring); + if (err) + goto err_setup_rx; + + /* If AMT is enabled, let the firmware know that the network + * interface is now open and reset the part to a known state. + */ + if (adapter->flags & FLAG_HAS_AMT) { + e1000e_get_hw_control(adapter); + e1000e_reset(adapter); + } + + e1000e_power_up_phy(adapter); + + adapter->mng_vlan_id = E1000_MNG_VLAN_NONE; + if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) + e1000_update_mng_vlan(adapter); + + /* DMA latency requirement to workaround jumbo issue */ + pm_qos_add_request(&adapter->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, + PM_QOS_DEFAULT_VALUE); + + /* before we allocate an interrupt, we must be ready to handle it. + * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt + * as soon as we call pci_request_irq, so we have to setup our + * clean_rx handler before we do so. + */ + e1000_configure(adapter); + + err = e1000_request_irq(adapter); + if (err) + goto err_req_irq; + + /* Work around PCIe errata with MSI interrupts causing some chipsets to + * ignore e1000e MSI messages, which means we need to test our MSI + * interrupt now + */ + if (adapter->int_mode != E1000E_INT_MODE_LEGACY) { + err = e1000_test_msi(adapter); + if (err) { + e_err("Interrupt allocation failed\n"); + goto err_req_irq; + } + } + + /* From here on the code is the same as e1000e_up() */ + clear_bit(__E1000_DOWN, &adapter->state); + + napi_enable(&adapter->napi); + + e1000_irq_enable(adapter); + + adapter->tx_hang_recheck = false; + netif_start_queue(netdev); + + hw->mac.get_link_status = true; + pm_runtime_put(&pdev->dev); + + /* fire a link status change interrupt to start the watchdog */ + if (adapter->msix_entries) + ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER); + else + ew32(ICS, E1000_ICS_LSC); + + return 0; + +err_req_irq: + pm_qos_remove_request(&adapter->pm_qos_req); + e1000e_release_hw_control(adapter); + e1000_power_down_phy(adapter); + e1000e_free_rx_resources(adapter->rx_ring); +err_setup_rx: + e1000e_free_tx_resources(adapter->tx_ring); +err_setup_tx: + e1000e_reset(adapter); + pm_runtime_put_sync(&pdev->dev); + + return err; +} + +/** + * e1000_close - Disables a network interface + * @netdev: network interface device structure + * + * Returns 0, this is not allowed to fail + * + * The close entry point is called when an interface is de-activated + * by the OS. The hardware is still under the drivers control, but + * needs to be disabled. A global MAC reset is issued to stop the + * hardware, and all transmit and receive resources are freed. + **/ +static int e1000_close(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct pci_dev *pdev = adapter->pdev; + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->state) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + + pm_runtime_get_sync(&pdev->dev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) { + e1000e_down(adapter, true); + e1000_free_irq(adapter); + + /* Link status message must follow this format */ + pr_info("%s NIC Link is Down\n", adapter->netdev->name); + } + + napi_disable(&adapter->napi); + + e1000e_free_tx_resources(adapter->tx_ring); + e1000e_free_rx_resources(adapter->rx_ring); + + /* kill manageability vlan ID if supported, but not if a vlan with + * the same ID is registered on the host OS (let 8021q kill it) + */ + if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) + e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), + adapter->mng_vlan_id); + + /* If AMT is enabled, let the firmware know that the network + * interface is now closed + */ + if ((adapter->flags & FLAG_HAS_AMT) && + !test_bit(__E1000_TESTING, &adapter->state)) + e1000e_release_hw_control(adapter); + + pm_qos_remove_request(&adapter->pm_qos_req); + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +/** + * e1000_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure + * @p: pointer to an address structure + * + * Returns 0 on success, negative on failure + **/ +static int e1000_set_mac(struct net_device *netdev, void *p) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); + + hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0); + + if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) { + /* activate the work around */ + e1000e_set_laa_state_82571(&adapter->hw, 1); + + /* Hold a copy of the LAA in RAR[14] This is done so that + * between the time RAR[0] gets clobbered and the time it + * gets fixed (in e1000_watchdog), the actual LAA is in one + * of the RARs and no incoming packets directed to this port + * are dropped. Eventually the LAA will be in RAR[0] and + * RAR[14] + */ + hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, + adapter->hw.mac.rar_entry_count - 1); + } + + return 0; +} + +/** + * e1000e_update_phy_task - work thread to update phy + * @work: pointer to our work struct + * + * this worker thread exists because we must acquire a + * semaphore to read the phy, which we could msleep while + * waiting for it, and we can't msleep in a timer. + **/ +static void e1000e_update_phy_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + update_phy_task); + struct e1000_hw *hw = &adapter->hw; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + e1000_get_phy_info(hw); + + /* Enable EEE on 82579 after link up */ + if (hw->phy.type >= e1000_phy_82579) + e1000_set_eee_pchlan(hw); +} + +/** + * e1000_update_phy_info - timre call-back to update PHY info + * @data: pointer to adapter cast into an unsigned long + * + * Need to wait a few seconds after link up to get diagnostic information from + * the phy + **/ +static void e1000_update_phy_info(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *)data; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + schedule_work(&adapter->update_phy_task); +} + +/** + * e1000e_update_phy_stats - Update the PHY statistics counters + * @adapter: board private structure + * + * Read/clear the upper 16-bit PHY registers and read/accumulate lower + **/ +static void e1000e_update_phy_stats(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + s32 ret_val; + u16 phy_data; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return; + + /* A page set is expensive so check if already on desired page. + * If not, set to the page with the PHY status registers. + */ + hw->phy.addr = 1; + ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, + &phy_data); + if (ret_val) + goto release; + if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) { + ret_val = hw->phy.ops.set_page(hw, + HV_STATS_PAGE << IGP_PAGE_SHIFT); + if (ret_val) + goto release; + } + + /* Single Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.scc += phy_data; + + /* Excessive Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data); + if (!ret_val) + adapter->stats.ecol += phy_data; + + /* Multiple Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.mcc += phy_data; + + /* Late Collision Count */ + hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data); + if (!ret_val) + adapter->stats.latecol += phy_data; + + /* Collision Count - also used for adaptive IFS */ + hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data); + if (!ret_val) + hw->mac.collision_delta = phy_data; + + /* Defer Count */ + hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data); + if (!ret_val) + adapter->stats.dc += phy_data; + + /* Transmit with no CRS */ + hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data); + ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data); + if (!ret_val) + adapter->stats.tncrs += phy_data; + +release: + hw->phy.ops.release(hw); +} + +/** + * e1000e_update_stats - Update the board statistics counters + * @adapter: board private structure + **/ +static void e1000e_update_stats(struct e1000_adapter *adapter) +{ + struct net_device *netdev = adapter->netdev; + struct e1000_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; + + /* Prevent stats update while adapter is being reset, or if the pci + * connection is down. + */ + if (adapter->link_speed == 0) + return; + if (pci_channel_offline(pdev)) + return; + + adapter->stats.crcerrs += er32(CRCERRS); + adapter->stats.gprc += er32(GPRC); + adapter->stats.gorc += er32(GORCL); + er32(GORCH); /* Clear gorc */ + adapter->stats.bprc += er32(BPRC); + adapter->stats.mprc += er32(MPRC); + adapter->stats.roc += er32(ROC); + + adapter->stats.mpc += er32(MPC); + + /* Half-duplex statistics */ + if (adapter->link_duplex == HALF_DUPLEX) { + if (adapter->flags2 & FLAG2_HAS_PHY_STATS) { + e1000e_update_phy_stats(adapter); + } else { + adapter->stats.scc += er32(SCC); + adapter->stats.ecol += er32(ECOL); + adapter->stats.mcc += er32(MCC); + adapter->stats.latecol += er32(LATECOL); + adapter->stats.dc += er32(DC); + + hw->mac.collision_delta = er32(COLC); + + if ((hw->mac.type != e1000_82574) && + (hw->mac.type != e1000_82583)) + adapter->stats.tncrs += er32(TNCRS); + } + adapter->stats.colc += hw->mac.collision_delta; + } + + adapter->stats.xonrxc += er32(XONRXC); + adapter->stats.xontxc += er32(XONTXC); + adapter->stats.xoffrxc += er32(XOFFRXC); + adapter->stats.xofftxc += er32(XOFFTXC); + adapter->stats.gptc += er32(GPTC); + adapter->stats.gotc += er32(GOTCL); + er32(GOTCH); /* Clear gotc */ + adapter->stats.rnbc += er32(RNBC); + adapter->stats.ruc += er32(RUC); + + adapter->stats.mptc += er32(MPTC); + adapter->stats.bptc += er32(BPTC); + + /* used for adaptive IFS */ + + hw->mac.tx_packet_delta = er32(TPT); + adapter->stats.tpt += hw->mac.tx_packet_delta; + + adapter->stats.algnerrc += er32(ALGNERRC); + adapter->stats.rxerrc += er32(RXERRC); + adapter->stats.cexterr += er32(CEXTERR); + adapter->stats.tsctc += er32(TSCTC); + adapter->stats.tsctfc += er32(TSCTFC); + + /* Fill out the OS statistics structure */ + netdev->stats.multicast = adapter->stats.mprc; + netdev->stats.collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + netdev->stats.rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; + netdev->stats.rx_length_errors = adapter->stats.ruc + + adapter->stats.roc; + netdev->stats.rx_crc_errors = adapter->stats.crcerrs; + netdev->stats.rx_frame_errors = adapter->stats.algnerrc; + netdev->stats.rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol; + netdev->stats.tx_aborted_errors = adapter->stats.ecol; + netdev->stats.tx_window_errors = adapter->stats.latecol; + netdev->stats.tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + /* Management Stats */ + adapter->stats.mgptc += er32(MGTPTC); + adapter->stats.mgprc += er32(MGTPRC); + adapter->stats.mgpdc += er32(MGTPDC); + + /* Correctable ECC Errors */ + if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { + u32 pbeccsts = er32(PBECCSTS); + + adapter->corr_errors += + pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK; + adapter->uncorr_errors += + (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >> + E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT; + } +} + +/** + * e1000_phy_read_status - Update the PHY register status snapshot + * @adapter: board private structure + **/ +static void e1000_phy_read_status(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct e1000_phy_regs *phy = &adapter->phy_regs; + + if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) && + (er32(STATUS) & E1000_STATUS_LU) && + (adapter->hw.phy.media_type == e1000_media_type_copper)) { + int ret_val; + + ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr); + ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr); + ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise); + ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa); + ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion); + ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000); + ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000); + ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus); + if (ret_val) + e_warn("Error reading PHY register\n"); + } else { + /* Do not read PHY registers if link is not up + * Set values to typical power-on defaults + */ + phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX); + phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL | + BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE | + BMSR_ERCAP); + phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP | + ADVERTISE_ALL | ADVERTISE_CSMA); + phy->lpa = 0; + phy->expansion = EXPANSION_ENABLENPAGE; + phy->ctrl1000 = ADVERTISE_1000FULL; + phy->stat1000 = 0; + phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF); + } +} + +static void e1000_print_link_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + u32 ctrl = er32(CTRL); + + /* Link status message must follow this format for user tools */ + pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n", + adapter->netdev->name, adapter->link_speed, + adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half", + (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" : + (ctrl & E1000_CTRL_RFCE) ? "Rx" : + (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None"); +} + +static bool e1000e_has_link(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + bool link_active = false; + s32 ret_val = 0; + + /* get_link_status is set on LSC (link status) interrupt or + * Rx sequence error interrupt. get_link_status will stay + * false until the check_for_link establishes link + * for copper adapters ONLY + */ + switch (hw->phy.media_type) { + case e1000_media_type_copper: + if (hw->mac.get_link_status) { + ret_val = hw->mac.ops.check_for_link(hw); + link_active = ret_val > 0; + } else { + link_active = true; + } + break; + case e1000_media_type_fiber: + ret_val = hw->mac.ops.check_for_link(hw); + link_active = !!(er32(STATUS) & E1000_STATUS_LU); + break; + case e1000_media_type_internal_serdes: + ret_val = hw->mac.ops.check_for_link(hw); + link_active = adapter->hw.mac.serdes_has_link; + break; + default: + case e1000_media_type_unknown: + break; + } + + if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) && + (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) { + /* See e1000_kmrn_lock_loss_workaround_ich8lan() */ + e_info("Gigabit has been disabled, downgrading speed\n"); + } + + return link_active; +} + +static void e1000e_enable_receives(struct e1000_adapter *adapter) +{ + /* make sure the receive unit is started */ + if ((adapter->flags & FLAG_RX_NEEDS_RESTART) && + (adapter->flags & FLAG_RESTART_NOW)) { + struct e1000_hw *hw = &adapter->hw; + u32 rctl = er32(RCTL); + + ew32(RCTL, rctl | E1000_RCTL_EN); + adapter->flags &= ~FLAG_RESTART_NOW; + } +} + +static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + /* With 82574 controllers, PHY needs to be checked periodically + * for hung state and reset, if two calls return true + */ + if (e1000_check_phy_82574(hw)) + adapter->phy_hang_count++; + else + adapter->phy_hang_count = 0; + + if (adapter->phy_hang_count > 1) { + adapter->phy_hang_count = 0; + e_dbg("PHY appears hung - resetting\n"); + schedule_work(&adapter->reset_task); + } +} + +/** + * e1000_watchdog - Timer Call-back + * @data: pointer to adapter cast into an unsigned long + **/ +static void e1000_watchdog(unsigned long data) +{ + struct e1000_adapter *adapter = (struct e1000_adapter *)data; + + /* Do the rest outside of interrupt context */ + schedule_work(&adapter->watchdog_task); + + /* TODO: make this use queue_delayed_work() */ +} + +static void e1000_watchdog_task(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, + struct e1000_adapter, + watchdog_task); + struct net_device *netdev = adapter->netdev; + struct e1000_mac_info *mac = &adapter->hw.mac; + struct e1000_phy_info *phy = &adapter->hw.phy; + struct e1000_ring *tx_ring = adapter->tx_ring; + struct e1000_hw *hw = &adapter->hw; + u32 link, tctl; + + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + link = e1000e_has_link(adapter); + if ((netif_carrier_ok(netdev)) && link) { + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + e1000e_enable_receives(adapter); + goto link_up; + } + + if ((e1000e_enable_tx_pkt_filtering(hw)) && + (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)) + e1000_update_mng_vlan(adapter); + + if (link) { + if (!netif_carrier_ok(netdev)) { + bool txb2b = true; + + /* Cancel scheduled suspend requests. */ + pm_runtime_resume(netdev->dev.parent); + + /* update snapshot of PHY registers on LSC */ + e1000_phy_read_status(adapter); + mac->ops.get_link_up_info(&adapter->hw, + &adapter->link_speed, + &adapter->link_duplex); + e1000_print_link_info(adapter); + + /* check if SmartSpeed worked */ + e1000e_check_downshift(hw); + if (phy->speed_downgraded) + netdev_warn(netdev, + "Link Speed was downgraded by SmartSpeed\n"); + + /* On supported PHYs, check for duplex mismatch only + * if link has autonegotiated at 10/100 half + */ + if ((hw->phy.type == e1000_phy_igp_3 || + hw->phy.type == e1000_phy_bm) && + hw->mac.autoneg && + (adapter->link_speed == SPEED_10 || + adapter->link_speed == SPEED_100) && + (adapter->link_duplex == HALF_DUPLEX)) { + u16 autoneg_exp; + + e1e_rphy(hw, MII_EXPANSION, &autoneg_exp); + + if (!(autoneg_exp & EXPANSION_NWAY)) + e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n"); + } + + /* adjust timeout factor according to speed/duplex */ + adapter->tx_timeout_factor = 1; + switch (adapter->link_speed) { + case SPEED_10: + txb2b = false; + adapter->tx_timeout_factor = 16; + break; + case SPEED_100: + txb2b = false; + adapter->tx_timeout_factor = 10; + break; + } + + /* workaround: re-program speed mode bit after + * link-up event + */ + if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) && + !txb2b) { + u32 tarc0; + + tarc0 = er32(TARC(0)); + tarc0 &= ~SPEED_MODE_BIT; + ew32(TARC(0), tarc0); + } + + /* disable TSO for pcie and 10/100 speeds, to avoid + * some hardware issues + */ + if (!(adapter->flags & FLAG_TSO_FORCE)) { + switch (adapter->link_speed) { + case SPEED_10: + case SPEED_100: + e_info("10/100 speed: disabling TSO\n"); + netdev->features &= ~NETIF_F_TSO; + netdev->features &= ~NETIF_F_TSO6; + break; + case SPEED_1000: + netdev->features |= NETIF_F_TSO; + netdev->features |= NETIF_F_TSO6; + break; + default: + /* oops */ + break; + } + } + + /* enable transmits in the hardware, need to do this + * after setting TARC(0) + */ + tctl = er32(TCTL); + tctl |= E1000_TCTL_EN; + ew32(TCTL, tctl); + + /* Perform any post-link-up configuration before + * reporting link up. + */ + if (phy->ops.cfg_on_link_up) + phy->ops.cfg_on_link_up(hw); + + netif_carrier_on(netdev); + + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + } + } else { + if (netif_carrier_ok(netdev)) { + adapter->link_speed = 0; + adapter->link_duplex = 0; + /* Link status message must follow this format */ + pr_info("%s NIC Link is Down\n", adapter->netdev->name); + netif_carrier_off(netdev); + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->phy_info_timer, + round_jiffies(jiffies + 2 * HZ)); + + /* 8000ES2LAN requires a Rx packet buffer work-around + * on link down event; reset the controller to flush + * the Rx packet buffer. + */ + if (adapter->flags & FLAG_RX_NEEDS_RESTART) + adapter->flags |= FLAG_RESTART_NOW; + else + pm_schedule_suspend(netdev->dev.parent, + LINK_TIMEOUT); + } + } + +link_up: + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + + mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; + adapter->tpt_old = adapter->stats.tpt; + mac->collision_delta = adapter->stats.colc - adapter->colc_old; + adapter->colc_old = adapter->stats.colc; + + adapter->gorc = adapter->stats.gorc - adapter->gorc_old; + adapter->gorc_old = adapter->stats.gorc; + adapter->gotc = adapter->stats.gotc - adapter->gotc_old; + adapter->gotc_old = adapter->stats.gotc; + spin_unlock(&adapter->stats64_lock); + + /* If the link is lost the controller stops DMA, but + * if there is queued Tx work it cannot be done. So + * reset the controller to flush the Tx packet buffers. + */ + if (!netif_carrier_ok(netdev) && + (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) + adapter->flags |= FLAG_RESTART_NOW; + + /* If reset is necessary, do it outside of interrupt context. */ + if (adapter->flags & FLAG_RESTART_NOW) { + schedule_work(&adapter->reset_task); + /* return immediately since reset is imminent */ + return; + } + + e1000e_update_adaptive(&adapter->hw); + + /* Simple mode for Interrupt Throttle Rate (ITR) */ + if (adapter->itr_setting == 4) { + /* Symmetric Tx/Rx gets a reduced ITR=2000; + * Total asymmetrical Tx or Rx gets ITR=8000; + * everyone else is between 2000-8000. + */ + u32 goc = (adapter->gotc + adapter->gorc) / 10000; + u32 dif = (adapter->gotc > adapter->gorc ? + adapter->gotc - adapter->gorc : + adapter->gorc - adapter->gotc) / 10000; + u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000; + + e1000e_write_itr(adapter, itr); + } + + /* Cause software interrupt to ensure Rx ring is cleaned */ + if (adapter->msix_entries) + ew32(ICS, adapter->rx_ring->ims_val); + else + ew32(ICS, E1000_ICS_RXDMT0); + + /* flush pending descriptors to memory before detecting Tx hang */ + e1000e_flush_descriptors(adapter); + + /* Force detection of hung controller every watchdog period */ + adapter->detect_tx_hung = true; + + /* With 82571 controllers, LAA may be overwritten due to controller + * reset from the other port. Set the appropriate LAA in RAR[0] + */ + if (e1000e_get_laa_state_82571(hw)) + hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0); + + if (adapter->flags2 & FLAG2_CHECK_PHY_HANG) + e1000e_check_82574_phy_workaround(adapter); + + /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */ + if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) { + if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) && + (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) { + er32(RXSTMPH); + adapter->rx_hwtstamp_cleared++; + } else { + adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP; + } + } + + /* Reset the timer */ + if (!test_bit(__E1000_DOWN, &adapter->state)) + mod_timer(&adapter->watchdog_timer, + round_jiffies(jiffies + 2 * HZ)); +} + +#define E1000_TX_FLAGS_CSUM 0x00000001 +#define E1000_TX_FLAGS_VLAN 0x00000002 +#define E1000_TX_FLAGS_TSO 0x00000004 +#define E1000_TX_FLAGS_IPV4 0x00000008 +#define E1000_TX_FLAGS_NO_FCS 0x00000010 +#define E1000_TX_FLAGS_HWTSTAMP 0x00000020 +#define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 +#define E1000_TX_FLAGS_VLAN_SHIFT 16 + +static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u32 cmd_length = 0; + u16 ipcse = 0, mss; + u8 ipcss, ipcso, tucss, tucso, hdr_len; + int err; + + if (!skb_is_gso(skb)) + return 0; + + err = skb_cow_head(skb, 0); + if (err < 0) + return err; + + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + mss = skb_shinfo(skb)->gso_size; + if (protocol == htons(ETH_P_IP)) { + struct iphdr *iph = ip_hdr(skb); + iph->tot_len = 0; + iph->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, + 0, IPPROTO_TCP, 0); + cmd_length = E1000_TXD_CMD_IP; + ipcse = skb_transport_offset(skb) - 1; + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, + 0, IPPROTO_TCP, 0); + ipcse = 0; + } + ipcss = skb_network_offset(skb); + ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data; + tucss = skb_transport_offset(skb); + tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data; + + cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE | + E1000_TXD_CMD_TCP | (skb->len - (hdr_len))); + + i = tx_ring->next_to_use; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + buffer_info = &tx_ring->buffer_info[i]; + + context_desc->lower_setup.ip_fields.ipcss = ipcss; + context_desc->lower_setup.ip_fields.ipcso = ipcso; + context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse); + context_desc->upper_setup.tcp_fields.tucss = tucss; + context_desc->upper_setup.tcp_fields.tucso = tucso; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss); + context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; + context_desc->cmd_and_length = cpu_to_le32(cmd_length); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return 1; +} + +static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb, + __be16 protocol) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_context_desc *context_desc; + struct e1000_buffer *buffer_info; + unsigned int i; + u8 css; + u32 cmd_len = E1000_TXD_CMD_DEXT; + + if (skb->ip_summed != CHECKSUM_PARTIAL) + return false; + + switch (protocol) { + case cpu_to_be16(ETH_P_IP): + if (ip_hdr(skb)->protocol == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + case cpu_to_be16(ETH_P_IPV6): + /* XXX not handling all IPV6 headers */ + if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) + cmd_len |= E1000_TXD_CMD_TCP; + break; + default: + if (unlikely(net_ratelimit())) + e_warn("checksum_partial proto=%x!\n", + be16_to_cpu(protocol)); + break; + } + + css = skb_checksum_start_offset(skb); + + i = tx_ring->next_to_use; + buffer_info = &tx_ring->buffer_info[i]; + context_desc = E1000_CONTEXT_DESC(*tx_ring, i); + + context_desc->lower_setup.ip_config = 0; + context_desc->upper_setup.tcp_fields.tucss = css; + context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset; + context_desc->upper_setup.tcp_fields.tucse = 0; + context_desc->tcp_seg_setup.data = 0; + context_desc->cmd_and_length = cpu_to_le32(cmd_len); + + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + + i++; + if (i == tx_ring->count) + i = 0; + tx_ring->next_to_use = i; + + return true; +} + +static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, + unsigned int first, unsigned int max_per_txd, + unsigned int nr_frags) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct pci_dev *pdev = adapter->pdev; + struct e1000_buffer *buffer_info; + unsigned int len = skb_headlen(skb); + unsigned int offset = 0, size, count = 0, i; + unsigned int f, bytecount, segs; + + i = tx_ring->next_to_use; + + while (len) { + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = dma_map_single(&pdev->dev, + skb->data + offset, + size, DMA_TO_DEVICE); + buffer_info->mapped_as_page = false; + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + + len -= size; + offset += size; + count++; + + if (len) { + i++; + if (i == tx_ring->count) + i = 0; + } + } + + for (f = 0; f < nr_frags; f++) { + const struct skb_frag_struct *frag; + + frag = &skb_shinfo(skb)->frags[f]; + len = skb_frag_size(frag); + offset = 0; + + while (len) { + i++; + if (i == tx_ring->count) + i = 0; + + buffer_info = &tx_ring->buffer_info[i]; + size = min(len, max_per_txd); + + buffer_info->length = size; + buffer_info->time_stamp = jiffies; + buffer_info->next_to_watch = i; + buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag, + offset, size, + DMA_TO_DEVICE); + buffer_info->mapped_as_page = true; + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) + goto dma_error; + + len -= size; + offset += size; + count++; + } + } + + segs = skb_shinfo(skb)->gso_segs ? : 1; + /* multiply data chunks by size of headers */ + bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len; + + tx_ring->buffer_info[i].skb = skb; + tx_ring->buffer_info[i].segs = segs; + tx_ring->buffer_info[i].bytecount = bytecount; + tx_ring->buffer_info[first].next_to_watch = i; + + return count; + +dma_error: + dev_err(&pdev->dev, "Tx DMA map failed\n"); + buffer_info->dma = 0; + if (count) + count--; + + while (count--) { + if (i == 0) + i += tx_ring->count; + i--; + buffer_info = &tx_ring->buffer_info[i]; + e1000_put_txbuf(tx_ring, buffer_info); + } + + return 0; +} + +static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + struct e1000_tx_desc *tx_desc = NULL; + struct e1000_buffer *buffer_info; + u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; + unsigned int i; + + if (tx_flags & E1000_TX_FLAGS_TSO) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D | + E1000_TXD_CMD_TSE; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + + if (tx_flags & E1000_TX_FLAGS_IPV4) + txd_upper |= E1000_TXD_POPTS_IXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_CSUM) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_POPTS_TXSM << 8; + } + + if (tx_flags & E1000_TX_FLAGS_VLAN) { + txd_lower |= E1000_TXD_CMD_VLE; + txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK); + } + + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + txd_lower &= ~(E1000_TXD_CMD_IFCS); + + if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) { + txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D; + txd_upper |= E1000_TXD_EXTCMD_TSTAMP; + } + + i = tx_ring->next_to_use; + + do { + buffer_info = &tx_ring->buffer_info[i]; + tx_desc = E1000_TX_DESC(*tx_ring, i); + tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); + tx_desc->lower.data = cpu_to_le32(txd_lower | + buffer_info->length); + tx_desc->upper.data = cpu_to_le32(txd_upper); + + i++; + if (i == tx_ring->count) + i = 0; + } while (--count > 0); + + tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); + + /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */ + if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS)) + tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS)); + + /* Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only + * applicable for weak-ordered memory model archs, + * such as IA-64). + */ + wmb(); + + tx_ring->next_to_use = i; +} + +#define MINIMUM_DHCP_PACKET_SIZE 282 +static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter, + struct sk_buff *skb) +{ + struct e1000_hw *hw = &adapter->hw; + u16 length, offset; + + if (skb_vlan_tag_present(skb) && + !((skb_vlan_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) && + (adapter->hw.mng_cookie.status & + E1000_MNG_DHCP_COOKIE_STATUS_VLAN))) + return 0; + + if (skb->len <= MINIMUM_DHCP_PACKET_SIZE) + return 0; + + if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP)) + return 0; + + { + const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14); + struct udphdr *udp; + + if (ip->protocol != IPPROTO_UDP) + return 0; + + udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2)); + if (ntohs(udp->dest) != 67) + return 0; + + offset = (u8 *)udp + 8 - skb->data; + length = skb->len - offset; + return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length); + } + + return 0; +} + +static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) +{ + struct e1000_adapter *adapter = tx_ring->adapter; + + netif_stop_queue(adapter->netdev); + /* Herbert's original patch had: + * smp_mb__after_netif_stop_queue(); + * but since that doesn't exist yet, just open code it. + */ + smp_mb(); + + /* We need to check again in a case another CPU has just + * made room available. + */ + if (e1000_desc_unused(tx_ring) < size) + return -EBUSY; + + /* A reprieve! */ + netif_start_queue(adapter->netdev); + ++adapter->restart_queue; + return 0; +} + +static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) +{ + BUG_ON(size > tx_ring->count); + + if (e1000_desc_unused(tx_ring) >= size) + return 0; + return __e1000_maybe_stop_tx(tx_ring, size); +} + +static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, + struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_ring *tx_ring = adapter->tx_ring; + unsigned int first; + unsigned int tx_flags = 0; + unsigned int len = skb_headlen(skb); + unsigned int nr_frags; + unsigned int mss; + int count = 0; + int tso; + unsigned int f; + __be16 protocol = vlan_get_protocol(skb); + + if (test_bit(__E1000_DOWN, &adapter->state)) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* The minimum packet size with TCTL.PSP set is 17 bytes so + * pad skb in order to meet this minimum size requirement + */ + if (skb_put_padto(skb, 17)) + return NETDEV_TX_OK; + + mss = skb_shinfo(skb)->gso_size; + if (mss) { + u8 hdr_len; + + /* TSO Workaround for 82571/2/3 Controllers -- if skb->data + * points to just header, pull a few bytes of payload from + * frags into skb->data + */ + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); + /* we do this workaround for ES2LAN, but it is un-necessary, + * avoiding it could save a lot of cycles + */ + if (skb->data_len && (hdr_len == len)) { + unsigned int pull_size; + + pull_size = min_t(unsigned int, 4, skb->data_len); + if (!__pskb_pull_tail(skb, pull_size)) { + e_err("__pskb_pull_tail failed.\n"); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + len = skb_headlen(skb); + } + } + + /* reserve a descriptor for the offload context */ + if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL)) + count++; + count++; + + count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); + + nr_frags = skb_shinfo(skb)->nr_frags; + for (f = 0; f < nr_frags; f++) + count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), + adapter->tx_fifo_limit); + + if (adapter->hw.mac.tx_pkt_filtering) + e1000_transfer_dhcp_info(adapter, skb); + + /* need: count + 2 desc gap to keep tail from touching + * head, otherwise try next time + */ + if (e1000_maybe_stop_tx(tx_ring, count + 2)) + return NETDEV_TX_BUSY; + + if (skb_vlan_tag_present(skb)) { + tx_flags |= E1000_TX_FLAGS_VLAN; + tx_flags |= (skb_vlan_tag_get(skb) << + E1000_TX_FLAGS_VLAN_SHIFT); + } + + first = tx_ring->next_to_use; + + tso = e1000_tso(tx_ring, skb, protocol); + if (tso < 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + if (tso) + tx_flags |= E1000_TX_FLAGS_TSO; + else if (e1000_tx_csum(tx_ring, skb, protocol)) + tx_flags |= E1000_TX_FLAGS_CSUM; + + /* Old method was to assume IPv4 packet by default if TSO was enabled. + * 82571 hardware supports TSO capabilities for IPv6 as well... + * no longer assume, we must. + */ + if (protocol == htons(ETH_P_IP)) + tx_flags |= E1000_TX_FLAGS_IPV4; + + if (unlikely(skb->no_fcs)) + tx_flags |= E1000_TX_FLAGS_NO_FCS; + + /* if count is 0 then mapping error has occurred */ + count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, + nr_frags); + if (count) { + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && + (adapter->flags & FLAG_HAS_HW_TIMESTAMP) && + !adapter->tx_hwtstamp_skb) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + tx_flags |= E1000_TX_FLAGS_HWTSTAMP; + adapter->tx_hwtstamp_skb = skb_get(skb); + adapter->tx_hwtstamp_start = jiffies; + schedule_work(&adapter->tx_hwtstamp_work); + } else { + skb_tx_timestamp(skb); + } + + netdev_sent_queue(netdev, skb->len); + e1000_tx_queue(tx_ring, tx_flags, count); + /* Make sure there is space in the ring for the next send. */ + e1000_maybe_stop_tx(tx_ring, + (MAX_SKB_FRAGS * + DIV_ROUND_UP(PAGE_SIZE, + adapter->tx_fifo_limit) + 2)); + + if (!skb->xmit_more || + netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) { + if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA) + e1000e_update_tdt_wa(tx_ring, + tx_ring->next_to_use); + else + writel(tx_ring->next_to_use, tx_ring->tail); + + /* we need this if more than one processor can write + * to our tail at a time, it synchronizes IO on + *IA64/Altix systems + */ + mmiowb(); + } + } else { + dev_kfree_skb_any(skb); + tx_ring->buffer_info[first].time_stamp = 0; + tx_ring->next_to_use = first; + } + + return NETDEV_TX_OK; +} + +/** + * e1000_tx_timeout - Respond to a Tx Hang + * @netdev: network interface device structure + **/ +static void e1000_tx_timeout(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + /* Do the reset outside of interrupt context */ + adapter->tx_timeout_count++; + schedule_work(&adapter->reset_task); +} + +static void e1000_reset_task(struct work_struct *work) +{ + struct e1000_adapter *adapter; + adapter = container_of(work, struct e1000_adapter, reset_task); + + /* don't run the task if already down */ + if (test_bit(__E1000_DOWN, &adapter->state)) + return; + + if (!(adapter->flags & FLAG_RESTART_NOW)) { + e1000e_dump(adapter); + e_err("Reset adapter unexpectedly\n"); + } + e1000e_reinit_locked(adapter); +} + +/** + * e1000_get_stats64 - Get System Network Statistics + * @netdev: network interface device structure + * @stats: rtnl_link_stats64 pointer + * + * Returns the address of the device statistics structure. + **/ +struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev, + struct rtnl_link_stats64 *stats) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + memset(stats, 0, sizeof(struct rtnl_link_stats64)); + spin_lock(&adapter->stats64_lock); + e1000e_update_stats(adapter); + /* Fill out the OS statistics structure */ + stats->rx_bytes = adapter->stats.gorc; + stats->rx_packets = adapter->stats.gprc; + stats->tx_bytes = adapter->stats.gotc; + stats->tx_packets = adapter->stats.gptc; + stats->multicast = adapter->stats.mprc; + stats->collisions = adapter->stats.colc; + + /* Rx Errors */ + + /* RLEC on some newer hardware can be incorrect so build + * our own version based on RUC and ROC + */ + stats->rx_errors = adapter->stats.rxerrc + + adapter->stats.crcerrs + adapter->stats.algnerrc + + adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr; + stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc; + stats->rx_crc_errors = adapter->stats.crcerrs; + stats->rx_frame_errors = adapter->stats.algnerrc; + stats->rx_missed_errors = adapter->stats.mpc; + + /* Tx Errors */ + stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol; + stats->tx_aborted_errors = adapter->stats.ecol; + stats->tx_window_errors = adapter->stats.latecol; + stats->tx_carrier_errors = adapter->stats.tncrs; + + /* Tx Dropped needs to be maintained elsewhere */ + + spin_unlock(&adapter->stats64_lock); + return stats; +} + +/** + * e1000_change_mtu - Change the Maximum Transfer Unit + * @netdev: network interface device structure + * @new_mtu: new value for maximum frame size + * + * Returns 0 on success, negative on failure + **/ +static int e1000_change_mtu(struct net_device *netdev, int new_mtu) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + int max_frame = new_mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; + + /* Jumbo frame support */ + if ((max_frame > (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) && + !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { + e_err("Jumbo Frames not supported.\n"); + return -EINVAL; + } + + /* Supported frame sizes */ + if ((new_mtu < (VLAN_ETH_ZLEN + ETH_FCS_LEN)) || + (max_frame > adapter->max_hw_frame_size)) { + e_err("Unsupported MTU setting\n"); + return -EINVAL; + } + + /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ + if ((adapter->hw.mac.type >= e1000_pch2lan) && + !(adapter->flags2 & FLAG2_CRC_STRIPPING) && + (new_mtu > ETH_DATA_LEN)) { + e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n"); + return -EINVAL; + } + + while (test_and_set_bit(__E1000_RESETTING, &adapter->state)) + usleep_range(1000, 2000); + /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */ + adapter->max_frame_size = max_frame; + e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu); + netdev->mtu = new_mtu; + + pm_runtime_get_sync(netdev->dev.parent); + + if (netif_running(netdev)) + e1000e_down(adapter, true); + + /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN + * means we reserve 2 more, this pushes us to allocate from the next + * larger slab size. + * i.e. RXBUFFER_2048 --> size-4096 slab + * However with the new *_jumbo_rx* routines, jumbo receives will use + * fragmented skbs + */ + + if (max_frame <= 2048) + adapter->rx_buffer_len = 2048; + else + adapter->rx_buffer_len = 4096; + + /* adjust allocation if LPE protects us, and we aren't using SBP */ + if (max_frame <= (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)) + adapter->rx_buffer_len = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN; + + if (netif_running(netdev)) + e1000e_up(adapter); + else + e1000e_reset(adapter); + + pm_runtime_put_sync(netdev->dev.parent); + + clear_bit(__E1000_RESETTING, &adapter->state); + + return 0; +} + +static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, + int cmd) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct mii_ioctl_data *data = if_mii(ifr); + + if (adapter->hw.phy.media_type != e1000_media_type_copper) + return -EOPNOTSUPP; + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = adapter->hw.phy.addr; + break; + case SIOCGMIIREG: + e1000_phy_read_status(adapter); + + switch (data->reg_num & 0x1F) { + case MII_BMCR: + data->val_out = adapter->phy_regs.bmcr; + break; + case MII_BMSR: + data->val_out = adapter->phy_regs.bmsr; + break; + case MII_PHYSID1: + data->val_out = (adapter->hw.phy.id >> 16); + break; + case MII_PHYSID2: + data->val_out = (adapter->hw.phy.id & 0xFFFF); + break; + case MII_ADVERTISE: + data->val_out = adapter->phy_regs.advertise; + break; + case MII_LPA: + data->val_out = adapter->phy_regs.lpa; + break; + case MII_EXPANSION: + data->val_out = adapter->phy_regs.expansion; + break; + case MII_CTRL1000: + data->val_out = adapter->phy_regs.ctrl1000; + break; + case MII_STAT1000: + data->val_out = adapter->phy_regs.stat1000; + break; + case MII_ESTATUS: + data->val_out = adapter->phy_regs.estatus; + break; + default: + return -EIO; + } + break; + case SIOCSMIIREG: + default: + return -EOPNOTSUPP; + } + return 0; +} + +/** + * e1000e_hwtstamp_ioctl - control hardware time stamping + * @netdev: network interface device structure + * @ifreq: interface request + * + * Outgoing time stamping can be enabled and disabled. Play nice and + * disable it when requested, although it shouldn't cause any overhead + * when no packet needs it. At most one packet in the queue may be + * marked for time stamping, otherwise it would be impossible to tell + * for sure to which packet the hardware time stamp belongs. + * + * Incoming time stamping has to be configured via the hardware filters. + * Not all combinations are supported, in particular event type has to be + * specified. Matching the kind of event packet is not supported, with the + * exception of "all V2 events regardless of level 2 or 4". + **/ +static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct hwtstamp_config config; + int ret_val; + + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) + return -EFAULT; + + ret_val = e1000e_config_hwtstamp(adapter, &config); + if (ret_val) + return ret_val; + + switch (config.rx_filter) { + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_SYNC: + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: + /* With V2 type filters which specify a Sync or Delay Request, + * Path Delay Request/Response messages are also time stamped + * by hardware so notify the caller the requested packets plus + * some others are time stamped. + */ + config.rx_filter = HWTSTAMP_FILTER_SOME; + break; + default: + break; + } + + return copy_to_user(ifr->ifr_data, &config, + sizeof(config)) ? -EFAULT : 0; +} + +static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, + sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0; +} + +static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return e1000_mii_ioctl(netdev, ifr, cmd); + case SIOCSHWTSTAMP: + return e1000e_hwtstamp_set(netdev, ifr); + case SIOCGHWTSTAMP: + return e1000e_hwtstamp_get(netdev, ifr); + default: + return -EOPNOTSUPP; + } +} + +static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc) +{ + struct e1000_hw *hw = &adapter->hw; + u32 i, mac_reg, wuc; + u16 phy_reg, wuc_enable; + int retval; + + /* copy MAC RARs to PHY RARs */ + e1000_copy_rx_addrs_to_phy_ich8lan(hw); + + retval = hw->phy.ops.acquire(hw); + if (retval) { + e_err("Could not acquire PHY\n"); + return retval; + } + + /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */ + retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable); + if (retval) + goto release; + + /* copy MAC MTA to PHY MTA - only needed for pchlan */ + for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) { + mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i); + hw->phy.ops.write_reg_page(hw, BM_MTA(i), + (u16)(mac_reg & 0xFFFF)); + hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1, + (u16)((mac_reg >> 16) & 0xFFFF)); + } + + /* configure PHY Rx Control register */ + hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg); + mac_reg = er32(RCTL); + if (mac_reg & E1000_RCTL_UPE) + phy_reg |= BM_RCTL_UPE; + if (mac_reg & E1000_RCTL_MPE) + phy_reg |= BM_RCTL_MPE; + phy_reg &= ~(BM_RCTL_MO_MASK); + if (mac_reg & E1000_RCTL_MO_3) + phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT) + << BM_RCTL_MO_SHIFT); + if (mac_reg & E1000_RCTL_BAM) + phy_reg |= BM_RCTL_BAM; + if (mac_reg & E1000_RCTL_PMCF) + phy_reg |= BM_RCTL_PMCF; + mac_reg = er32(CTRL); + if (mac_reg & E1000_CTRL_RFCE) + phy_reg |= BM_RCTL_RFCE; + hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg); + + wuc = E1000_WUC_PME_EN; + if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC)) + wuc |= E1000_WUC_APME; + + /* enable PHY wakeup in MAC register */ + ew32(WUFC, wufc); + ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME | + E1000_WUC_PME_STATUS | wuc)); + + /* configure and enable PHY wakeup in PHY registers */ + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc); + hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc); + + /* activate PHY wakeup */ + wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT; + retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable); + if (retval) + e_err("Could not set PHY Host Wakeup bit\n"); +release: + hw->phy.ops.release(hw); + + return retval; +} + +static void e1000e_flush_lpic(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ret_val; + + pm_runtime_get_sync(netdev->dev.parent); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + goto fl_out; + + pr_info("EEE TX LPI TIMER: %08X\n", + er32(LPIC) >> E1000_LPIC_LPIET_SHIFT); + + hw->phy.ops.release(hw); + +fl_out: + pm_runtime_put_sync(netdev->dev.parent); +} + +static int e1000e_pm_freeze(struct device *dev) +{ + struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (netif_running(netdev)) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->state) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + + /* Quiesce the device without resetting the hardware */ + e1000e_down(adapter, false); + e1000_free_irq(adapter); + } + e1000e_reset_interrupt_capability(adapter); + + /* Allow time for pending master requests to run */ + e1000e_disable_pcie_master(&adapter->hw); + + return 0; +} + +static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u32 ctrl, ctrl_ext, rctl, status; + /* Runtime suspend should only enable wakeup for link changes */ + u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; + int retval = 0; + + status = er32(STATUS); + if (status & E1000_STATUS_LU) + wufc &= ~E1000_WUFC_LNKC; + + if (wufc) { + e1000_setup_rctl(adapter); + e1000e_set_rx_mode(netdev); + + /* turn on all-multi mode if wake on multicast is enabled */ + if (wufc & E1000_WUFC_MC) { + rctl = er32(RCTL); + rctl |= E1000_RCTL_MPE; + ew32(RCTL, rctl); + } + + ctrl = er32(CTRL); + ctrl |= E1000_CTRL_ADVD3WUC; + if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP)) + ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT; + ew32(CTRL, ctrl); + + if (adapter->hw.phy.media_type == e1000_media_type_fiber || + adapter->hw.phy.media_type == + e1000_media_type_internal_serdes) { + /* keep the laser running in D3 */ + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA; + ew32(CTRL_EXT, ctrl_ext); + } + + if (!runtime) + e1000e_power_up_phy(adapter); + + if (adapter->flags & FLAG_IS_ICH) + e1000_suspend_workarounds_ich8lan(&adapter->hw); + + if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { + /* enable wakeup by the PHY */ + retval = e1000_init_phy_wakeup(adapter, wufc); + if (retval) + return retval; + } else { + /* enable wakeup by the MAC */ + ew32(WUFC, wufc); + ew32(WUC, E1000_WUC_PME_EN); + } + } else { + ew32(WUC, 0); + ew32(WUFC, 0); + + e1000_power_down_phy(adapter); + } + + if (adapter->hw.phy.type == e1000_phy_igp_3) { + e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); + } else if ((hw->mac.type == e1000_pch_lpt) || + (hw->mac.type == e1000_pch_spt)) { + if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + /* ULP does not support wake from unicast, multicast + * or broadcast. + */ + retval = e1000_enable_ulp_lpt_lp(hw, !runtime); + + if (retval) + return retval; + } + + /* Ensure that the appropriate bits are set in LPI_CTRL + * for EEE in Sx + */ + if ((hw->phy.type >= e1000_phy_i217) && + adapter->eee_advert && hw->dev_spec.ich8lan.eee_lp_ability) { + u16 lpi_ctrl = 0; + + retval = hw->phy.ops.acquire(hw); + if (!retval) { + retval = e1e_rphy_locked(hw, I82579_LPI_CTRL, + &lpi_ctrl); + if (!retval) { + if (adapter->eee_advert & + hw->dev_spec.ich8lan.eee_lp_ability & + I82579_EEE_100_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_100_ENABLE; + if (adapter->eee_advert & + hw->dev_spec.ich8lan.eee_lp_ability & + I82579_EEE_1000_SUPPORTED) + lpi_ctrl |= I82579_LPI_CTRL_1000_ENABLE; + + retval = e1e_wphy_locked(hw, I82579_LPI_CTRL, + lpi_ctrl); + } + } + hw->phy.ops.release(hw); + } + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + e1000e_release_hw_control(adapter); + + pci_clear_master(pdev); + + /* The pci-e switch on some quad port adapters will report a + * correctable error when the MAC transitions from D0 to D3. To + * prevent this we need to mask off the correctable errors on the + * downstream port of the pci-e switch. + * + * We don't have the associated upstream bridge while assigning + * the PCI device into guest. For example, the KVM on power is + * one of the cases. + */ + if (adapter->flags & FLAG_IS_QUAD_PORT) { + struct pci_dev *us_dev = pdev->bus->self; + u16 devctl; + + if (!us_dev) + return 0; + + pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl); + pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, + (devctl & ~PCI_EXP_DEVCTL_CERE)); + + pci_save_state(pdev); + pci_prepare_to_sleep(pdev); + + pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl); + } + + return 0; +} + +/** + * __e1000e_disable_aspm - Disable ASPM states + * @pdev: pointer to PCI device struct + * @state: bit-mask of ASPM states to disable + * @locked: indication if this context holds pci_bus_sem locked. + * + * Some devices *must* have certain ASPM states disabled per hardware errata. + **/ +static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked) +{ + struct pci_dev *parent = pdev->bus->self; + u16 aspm_dis_mask = 0; + u16 pdev_aspmc, parent_aspmc; + + switch (state) { + case PCIE_LINK_STATE_L0S: + case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1: + aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S; + /* fall-through - can't have L1 without L0s */ + case PCIE_LINK_STATE_L1: + aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1; + break; + default: + return; + } + + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); + pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; + + if (parent) { + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, + &parent_aspmc); + parent_aspmc &= PCI_EXP_LNKCTL_ASPMC; + } + + /* Nothing to do if the ASPM states to be disabled already are */ + if (!(pdev_aspmc & aspm_dis_mask) && + (!parent || !(parent_aspmc & aspm_dis_mask))) + return; + + dev_info(&pdev->dev, "Disabling ASPM %s %s\n", + (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ? + "L0s" : "", + (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ? + "L1" : ""); + +#ifdef CONFIG_PCIEASPM + if (locked) + pci_disable_link_state_locked(pdev, state); + else + pci_disable_link_state(pdev, state); + + /* Double-check ASPM control. If not disabled by the above, the + * BIOS is preventing that from happening (or CONFIG_PCIEASPM is + * not enabled); override by writing PCI config space directly. + */ + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc); + pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC; + + if (!(aspm_dis_mask & pdev_aspmc)) + return; +#endif + + /* Both device and parent should have the same ASPM setting. + * Disable ASPM in downstream component first and then upstream. + */ + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask); + + if (parent) + pcie_capability_clear_word(parent, PCI_EXP_LNKCTL, + aspm_dis_mask); +} + +/** + * e1000e_disable_aspm - Disable ASPM states. + * @pdev: pointer to PCI device struct + * @state: bit-mask of ASPM states to disable + * + * This function acquires the pci_bus_sem! + * Some devices *must* have certain ASPM states disabled per hardware errata. + **/ +static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) +{ + __e1000e_disable_aspm(pdev, state, 0); +} + +/** + * e1000e_disable_aspm_locked Disable ASPM states. + * @pdev: pointer to PCI device struct + * @state: bit-mask of ASPM states to disable + * + * This function must be called with pci_bus_sem acquired! + * Some devices *must* have certain ASPM states disabled per hardware errata. + **/ +static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state) +{ + __e1000e_disable_aspm(pdev, state, 1); +} + +#ifdef CONFIG_PM +static int __e1000_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 aspm_disable_flag = 0; + + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + pci_set_master(pdev); + + if (hw->mac.type >= e1000_pch2lan) + e1000_resume_workarounds_pchlan(&adapter->hw); + + e1000e_power_up_phy(adapter); + + /* report the system wakeup cause from S3/S4 */ + if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) { + u16 phy_data; + + e1e_rphy(&adapter->hw, BM_WUS, &phy_data); + if (phy_data) { + e_info("PHY Wakeup cause - %s\n", + phy_data & E1000_WUS_EX ? "Unicast Packet" : + phy_data & E1000_WUS_MC ? "Multicast Packet" : + phy_data & E1000_WUS_BC ? "Broadcast Packet" : + phy_data & E1000_WUS_MAG ? "Magic Packet" : + phy_data & E1000_WUS_LNKC ? + "Link Status Change" : "other"); + } + e1e_wphy(&adapter->hw, BM_WUS, ~0); + } else { + u32 wus = er32(WUS); + + if (wus) { + e_info("MAC Wakeup cause - %s\n", + wus & E1000_WUS_EX ? "Unicast Packet" : + wus & E1000_WUS_MC ? "Multicast Packet" : + wus & E1000_WUS_BC ? "Broadcast Packet" : + wus & E1000_WUS_MAG ? "Magic Packet" : + wus & E1000_WUS_LNKC ? "Link Status Change" : + "other"); + } + ew32(WUS, ~0); + } + + e1000e_reset(adapter); + + e1000_init_manageability_pt(adapter); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int e1000e_pm_thaw(struct device *dev) +{ + struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000e_set_interrupt_capability(adapter); + if (netif_running(netdev)) { + u32 err = e1000_request_irq(adapter); + + if (err) + return err; + + e1000e_up(adapter); + } + + netif_device_attach(netdev); + + return 0; +} + +static int e1000e_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int rc; + + e1000e_flush_lpic(pdev); + + e1000e_pm_freeze(dev); + + rc = __e1000_shutdown(pdev, false); + if (rc) + e1000e_pm_thaw(dev); + + return rc; +} + +static int e1000e_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + int rc; + + rc = __e1000_resume(pdev); + if (rc) + return rc; + + return e1000e_pm_thaw(dev); +} +#endif /* CONFIG_PM_SLEEP */ + +static int e1000e_pm_runtime_idle(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + u16 eee_lp; + + eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability; + + if (!e1000e_has_link(adapter)) { + adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp; + pm_schedule_suspend(dev, 5 * MSEC_PER_SEC); + } + + return -EBUSY; +} + +static int e1000e_pm_runtime_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + int rc; + + rc = __e1000_resume(pdev); + if (rc) + return rc; + + if (netdev->flags & IFF_UP) + rc = e1000e_up(adapter); + + return rc; +} + +static int e1000e_pm_runtime_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (netdev->flags & IFF_UP) { + int count = E1000_CHECK_RESET_COUNT; + + while (test_bit(__E1000_RESETTING, &adapter->state) && count--) + usleep_range(10000, 20000); + + WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); + + /* Down the device without resetting the hardware */ + e1000e_down(adapter, false); + } + + if (__e1000_shutdown(pdev, true)) { + e1000e_pm_runtime_resume(dev); + return -EBUSY; + } + + return 0; +} +#endif /* CONFIG_PM */ + +static void e1000_shutdown(struct pci_dev *pdev) +{ + e1000e_flush_lpic(pdev); + + e1000e_pm_freeze(&pdev->dev); + + __e1000_shutdown(pdev, false); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER + +static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data) +{ + struct net_device *netdev = data; + struct e1000_adapter *adapter = netdev_priv(netdev); + + if (adapter->msix_entries) { + int vector, msix_irq; + + vector = 0; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_intr_msix_rx(msix_irq, netdev); + enable_irq(msix_irq); + + vector++; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_intr_msix_tx(msix_irq, netdev); + enable_irq(msix_irq); + + vector++; + msix_irq = adapter->msix_entries[vector].vector; + disable_irq(msix_irq); + e1000_msix_other(msix_irq, netdev); + enable_irq(msix_irq); + } + + return IRQ_HANDLED; +} + +/** + * e1000_netpoll + * @netdev: network interface device structure + * + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void e1000_netpoll(struct net_device *netdev) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + + switch (adapter->int_mode) { + case E1000E_INT_MODE_MSIX: + e1000_intr_msix(adapter->pdev->irq, netdev); + break; + case E1000E_INT_MODE_MSI: + disable_irq(adapter->pdev->irq); + e1000_intr_msi(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); + break; + default: /* E1000E_INT_MODE_LEGACY */ + disable_irq(adapter->pdev->irq); + e1000_intr(adapter->pdev->irq, netdev); + enable_irq(adapter->pdev->irq); + break; + } +} +#endif + +/** + * e1000_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device + * @state: The current pci connection state + * + * This function is called after a PCI bus error affecting + * this device has been detected. + */ +static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + netif_device_detach(netdev); + + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + if (netif_running(netdev)) + e1000e_down(adapter, true); + pci_disable_device(pdev); + + /* Request a slot slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * e1000_io_slot_reset - called after the pci bus has been reset. + * @pdev: Pointer to PCI device + * + * Restart the card from scratch, as if from a cold-boot. Implementation + * resembles the first-half of the e1000e_pm_resume routine. + */ +static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + u16 aspm_disable_flag = 0; + int err; + pci_ers_result_t result; + + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm_locked(pdev, aspm_disable_flag); + + err = pci_enable_device_mem(pdev); + if (err) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pdev->state_saved = true; + pci_restore_state(pdev); + pci_set_master(pdev); + + pci_enable_wake(pdev, PCI_D3hot, 0); + pci_enable_wake(pdev, PCI_D3cold, 0); + + e1000e_reset(adapter); + ew32(WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + + pci_cleanup_aer_uncorrect_error_status(pdev); + + return result; +} + +/** + * e1000_io_resume - called when traffic can start flowing again. + * @pdev: Pointer to PCI device + * + * This callback is called when the error recovery driver tells us that + * its OK to resume normal operation. Implementation resembles the + * second-half of the e1000e_pm_resume routine. + */ +static void e1000_io_resume(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + + e1000_init_manageability_pt(adapter); + + if (netif_running(netdev)) { + if (e1000e_up(adapter)) { + dev_err(&pdev->dev, + "can't bring device back up after reset\n"); + return; + } + } + + netif_device_attach(netdev); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); +} + +static void e1000_print_device_info(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + struct net_device *netdev = adapter->netdev; + u32 ret_val; + u8 pba_str[E1000_PBANUM_LENGTH]; + + /* print bus type/speed/width info */ + e_info("(PCI Express:2.5GT/s:%s) %pM\n", + /* bus width */ + ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : + "Width x1"), + /* MAC address */ + netdev->dev_addr); + e_info("Intel(R) PRO/%s Network Connection\n", + (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); + ret_val = e1000_read_pba_string_generic(hw, pba_str, + E1000_PBANUM_LENGTH); + if (ret_val) + strlcpy((char *)pba_str, "Unknown", sizeof(pba_str)); + e_info("MAC: %d, PHY: %d, PBA No: %s\n", + hw->mac.type, hw->phy.type, pba_str); +} + +static void e1000_eeprom_checks(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int ret_val; + u16 buf = 0; + + if (hw->mac.type != e1000_82573) + return; + + ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf); + le16_to_cpus(&buf); + if (!ret_val && (!(buf & (1 << 0)))) { + /* Deep Smart Power Down (DSPD) */ + dev_warn(&adapter->pdev->dev, + "Warning: detected DSPD enabled in EEPROM\n"); + } +} + +static netdev_features_t e1000_fix_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + struct e1000_hw *hw = &adapter->hw; + + /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */ + if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN)) + features &= ~NETIF_F_RXFCS; + + return features; +} + +static int e1000_set_features(struct net_device *netdev, + netdev_features_t features) +{ + struct e1000_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed = features ^ netdev->features; + + if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) + adapter->flags |= FLAG_TSO_FORCE; + + if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS | + NETIF_F_RXALL))) + return 0; + + if (changed & NETIF_F_RXFCS) { + if (features & NETIF_F_RXFCS) { + adapter->flags2 &= ~FLAG2_CRC_STRIPPING; + } else { + /* We need to take it back to defaults, which might mean + * stripping is still disabled at the adapter level. + */ + if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING) + adapter->flags2 |= FLAG2_CRC_STRIPPING; + else + adapter->flags2 &= ~FLAG2_CRC_STRIPPING; + } + } + + netdev->features = features; + + if (netif_running(netdev)) + e1000e_reinit_locked(adapter); + else + e1000e_reset(adapter); + + return 0; +} + +static const struct net_device_ops e1000e_netdev_ops = { + .ndo_open = e1000_open, + .ndo_stop = e1000_close, + .ndo_start_xmit = e1000_xmit_frame, + .ndo_get_stats64 = e1000e_get_stats64, + .ndo_set_rx_mode = e1000e_set_rx_mode, + .ndo_set_mac_address = e1000_set_mac, + .ndo_change_mtu = e1000_change_mtu, + .ndo_do_ioctl = e1000_ioctl, + .ndo_tx_timeout = e1000_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + + .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = e1000_netpoll, +#endif + .ndo_set_features = e1000_set_features, + .ndo_fix_features = e1000_fix_features, + .ndo_features_check = passthru_features_check, +}; + +/** + * e1000_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in e1000_pci_tbl + * + * Returns 0 on success, negative on failure + * + * e1000_probe initializes an adapter identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *netdev; + struct e1000_adapter *adapter; + struct e1000_hw *hw; + const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; + resource_size_t mmio_start, mmio_len; + resource_size_t flash_start, flash_len; + static int cards_found; + u16 aspm_disable_flag = 0; + int bars, i, err, pci_using_dac; + u16 eeprom_data = 0; + u16 eeprom_apme_mask = E1000_EEPROM_APME; + s32 rval = 0; + + if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S) + aspm_disable_flag = PCIE_LINK_STATE_L0S; + if (ei->flags2 & FLAG2_DISABLE_ASPM_L1) + aspm_disable_flag |= PCIE_LINK_STATE_L1; + if (aspm_disable_flag) + e1000e_disable_aspm(pdev, aspm_disable_flag); + + err = pci_enable_device_mem(pdev); + if (err) + return err; + + pci_using_dac = 0; + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (!err) { + pci_using_dac = 1; + } else { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, + "No usable DMA configuration, aborting\n"); + goto err_dma; + } + } + + bars = pci_select_bars(pdev, IORESOURCE_MEM); + err = pci_request_selected_regions_exclusive(pdev, bars, + e1000e_driver_name); + if (err) + goto err_pci_reg; + + /* AER (Advanced Error Reporting) hooks */ + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + /* PCI config space info */ + err = pci_save_state(pdev); + if (err) + goto err_alloc_etherdev; + + err = -ENOMEM; + netdev = alloc_etherdev(sizeof(struct e1000_adapter)); + if (!netdev) + goto err_alloc_etherdev; + + SET_NETDEV_DEV(netdev, &pdev->dev); + + netdev->irq = pdev->irq; + + pci_set_drvdata(pdev, netdev); + adapter = netdev_priv(netdev); + hw = &adapter->hw; + adapter->netdev = netdev; + adapter->pdev = pdev; + adapter->ei = ei; + adapter->pba = ei->pba; + adapter->flags = ei->flags; + adapter->flags2 = ei->flags2; + adapter->hw.adapter = adapter; + adapter->hw.mac.type = ei->mac; + adapter->max_hw_frame_size = ei->max_hw_frame_size; + adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + + mmio_start = pci_resource_start(pdev, 0); + mmio_len = pci_resource_len(pdev, 0); + + err = -EIO; + adapter->hw.hw_addr = ioremap(mmio_start, mmio_len); + if (!adapter->hw.hw_addr) + goto err_ioremap; + + if ((adapter->flags & FLAG_HAS_FLASH) && + (pci_resource_flags(pdev, 1) & IORESOURCE_MEM) && + (hw->mac.type < e1000_pch_spt)) { + flash_start = pci_resource_start(pdev, 1); + flash_len = pci_resource_len(pdev, 1); + adapter->hw.flash_address = ioremap(flash_start, flash_len); + if (!adapter->hw.flash_address) + goto err_flashmap; + } + + /* Set default EEE advertisement */ + if (adapter->flags2 & FLAG2_HAS_EEE) + adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T; + + /* construct the net_device struct */ + netdev->netdev_ops = &e1000e_netdev_ops; + e1000e_set_ethtool_ops(netdev); + netdev->watchdog_timeo = 5 * HZ; + netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64); + strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + + netdev->mem_start = mmio_start; + netdev->mem_end = mmio_start + mmio_len; + + adapter->bd_number = cards_found++; + + e1000e_check_options(adapter); + + /* setup adapter struct */ + err = e1000_sw_init(adapter); + if (err) + goto err_sw_init; + + memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); + memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops)); + memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); + + err = ei->get_variants(adapter); + if (err) + goto err_hw_init; + + if ((adapter->flags & FLAG_IS_ICH) && + (adapter->flags & FLAG_READ_ONLY_NVM) && + (hw->mac.type < e1000_pch_spt)) + e1000e_write_protect_nvm_ich8lan(&adapter->hw); + + hw->mac.ops.get_bus_info(&adapter->hw); + + adapter->hw.phy.autoneg_wait_to_complete = 0; + + /* Copper options */ + if (adapter->hw.phy.media_type == e1000_media_type_copper) { + adapter->hw.phy.mdix = AUTO_ALL_MODES; + adapter->hw.phy.disable_polarity_correction = 0; + adapter->hw.phy.ms_type = e1000_ms_hw_default; + } + + if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw)) + dev_info(&pdev->dev, + "PHY reset is blocked due to SOL/IDER session.\n"); + + /* Set initial default active device features */ + netdev->features = (NETIF_F_SG | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | + NETIF_F_RXCSUM | + NETIF_F_HW_CSUM); + + /* Set user-changeable features (subset of all device features) */ + netdev->hw_features = netdev->features; + netdev->hw_features |= NETIF_F_RXFCS; + netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->hw_features |= NETIF_F_RXALL; + + if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->vlan_features |= (NETIF_F_SG | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_HW_CSUM); + + netdev->priv_flags |= IFF_UNICAST_FLT; + + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; + netdev->vlan_features |= NETIF_F_HIGHDMA; + } + + if (e1000e_enable_mng_pass_thru(&adapter->hw)) + adapter->flags |= FLAG_MNG_PT_ENABLED; + + /* before reading the NVM, reset the controller to + * put the device in a known good starting state + */ + adapter->hw.mac.ops.reset_hw(&adapter->hw); + + /* systems with ASPM and others may see the checksum fail on the first + * attempt. Let's give it a few tries + */ + for (i = 0;; i++) { + if (e1000_validate_nvm_checksum(&adapter->hw) >= 0) + break; + if (i == 2) { + dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n"); + err = -EIO; + goto err_eeprom; + } + } + + e1000_eeprom_checks(adapter); + + /* copy the MAC address */ + if (e1000e_read_mac_addr(&adapter->hw)) + dev_err(&pdev->dev, + "NVM Read Error while reading MAC address\n"); + + memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len); + + if (!is_valid_ether_addr(netdev->dev_addr)) { + dev_err(&pdev->dev, "Invalid MAC Address: %pM\n", + netdev->dev_addr); + err = -EIO; + goto err_eeprom; + } + + init_timer(&adapter->watchdog_timer); + adapter->watchdog_timer.function = e1000_watchdog; + adapter->watchdog_timer.data = (unsigned long)adapter; + + init_timer(&adapter->phy_info_timer); + adapter->phy_info_timer.function = e1000_update_phy_info; + adapter->phy_info_timer.data = (unsigned long)adapter; + + INIT_WORK(&adapter->reset_task, e1000_reset_task); + INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task); + INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); + INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); + INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); + + /* Initialize link parameters. User can change them with ethtool */ + adapter->hw.mac.autoneg = 1; + adapter->fc_autoneg = true; + adapter->hw.fc.requested_mode = e1000_fc_default; + adapter->hw.fc.current_mode = e1000_fc_default; + adapter->hw.phy.autoneg_advertised = 0x2f; + + /* Initial Wake on LAN setting - If APM wake is enabled in + * the EEPROM, enable the ACPI Magic Packet filter + */ + if (adapter->flags & FLAG_APME_IN_WUC) { + /* APME bit in EEPROM is mapped to WUC.APME */ + eeprom_data = er32(WUC); + eeprom_apme_mask = E1000_WUC_APME; + if ((hw->mac.type > e1000_ich10lan) && + (eeprom_data & E1000_WUC_PHY_WAKE)) + adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; + } else if (adapter->flags & FLAG_APME_IN_CTRL3) { + if (adapter->flags & FLAG_APME_CHECK_PORT_B && + (adapter->hw.bus.func == 1)) + rval = e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_B, + 1, &eeprom_data); + else + rval = e1000_read_nvm(&adapter->hw, + NVM_INIT_CONTROL3_PORT_A, + 1, &eeprom_data); + } + + /* fetch WoL from EEPROM */ + if (rval) + e_dbg("NVM read error getting WoL initial values: %d\n", rval); + else if (eeprom_data & eeprom_apme_mask) + adapter->eeprom_wol |= E1000_WUFC_MAG; + + /* now that we have the eeprom settings, apply the special cases + * where the eeprom may be wrong or the board simply won't support + * wake on lan on a particular port + */ + if (!(adapter->flags & FLAG_HAS_WOL)) + adapter->eeprom_wol = 0; + + /* initialize the wol settings based on the eeprom settings */ + adapter->wol = adapter->eeprom_wol; + + /* make sure adapter isn't asleep if manageability is enabled */ + if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) || + (hw->mac.ops.check_mng_mode(hw))) + device_wakeup_enable(&pdev->dev); + + /* save off EEPROM version number */ + rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers); + + if (rval) { + e_dbg("NVM read error getting EEPROM version: %d\n", rval); + adapter->eeprom_vers = 0; + } + + /* reset the hardware with the new settings */ + e1000e_reset(adapter); + + /* If the controller has AMT, do not set DRV_LOAD until the interface + * is up. For all other cases, let the f/w know that the h/w is now + * under the control of the driver. + */ + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_get_hw_control(adapter); + + strlcpy(netdev->name, "eth%d", sizeof(netdev->name)); + err = register_netdev(netdev); + if (err) + goto err_register; + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); + + /* init PTP hardware clock */ + e1000e_ptp_init(adapter); + + e1000_print_device_info(adapter); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + return 0; + +err_register: + if (!(adapter->flags & FLAG_HAS_AMT)) + e1000e_release_hw_control(adapter); +err_eeprom: + if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw)) + e1000_phy_hw_reset(&adapter->hw); +err_hw_init: + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); +err_sw_init: + if ((adapter->hw.flash_address) && (hw->mac.type < e1000_pch_spt)) + iounmap(adapter->hw.flash_address); + e1000e_reset_interrupt_capability(adapter); +err_flashmap: + iounmap(adapter->hw.hw_addr); +err_ioremap: + free_netdev(netdev); +err_alloc_etherdev: + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); +err_pci_reg: +err_dma: + pci_disable_device(pdev); + return err; +} + +/** + * e1000_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * e1000_remove is called by the PCI subsystem to alert the driver + * that it should release a PCI device. The could be caused by a + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +static void e1000_remove(struct pci_dev *pdev) +{ + struct net_device *netdev = pci_get_drvdata(pdev); + struct e1000_adapter *adapter = netdev_priv(netdev); + bool down = test_bit(__E1000_DOWN, &adapter->state); + + e1000e_ptp_remove(adapter); + + /* The timers may be rescheduled, so explicitly disable them + * from being rescheduled. + */ + if (!down) + set_bit(__E1000_DOWN, &adapter->state); + del_timer_sync(&adapter->watchdog_timer); + del_timer_sync(&adapter->phy_info_timer); + + cancel_work_sync(&adapter->reset_task); + cancel_work_sync(&adapter->watchdog_task); + cancel_work_sync(&adapter->downshift_task); + cancel_work_sync(&adapter->update_phy_task); + cancel_work_sync(&adapter->print_hang_task); + + if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) { + cancel_work_sync(&adapter->tx_hwtstamp_work); + if (adapter->tx_hwtstamp_skb) { + dev_kfree_skb_any(adapter->tx_hwtstamp_skb); + adapter->tx_hwtstamp_skb = NULL; + } + } + + /* Don't lie to e1000_close() down the road. */ + if (!down) + clear_bit(__E1000_DOWN, &adapter->state); + unregister_netdev(netdev); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* Release control of h/w to f/w. If f/w is AMT enabled, this + * would have already happened in close and is redundant. + */ + e1000e_release_hw_control(adapter); + + e1000e_reset_interrupt_capability(adapter); + kfree(adapter->tx_ring); + kfree(adapter->rx_ring); + + iounmap(adapter->hw.hw_addr); + if ((adapter->hw.flash_address) && + (adapter->hw.mac.type < e1000_pch_spt)) + iounmap(adapter->hw.flash_address); + pci_release_selected_regions(pdev, + pci_select_bars(pdev, IORESOURCE_MEM)); + + free_netdev(netdev); + + /* AER disable */ + pci_disable_pcie_error_reporting(pdev); + + pci_disable_device(pdev); +} + +/* PCI Error Recovery (ERS) */ +static const struct pci_error_handlers e1000_err_handler = { + .error_detected = e1000_io_error_detected, + .slot_reset = e1000_io_slot_reset, + .resume = e1000_io_resume, +}; + +static const struct pci_device_id e1000_pci_tbl[] = { + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), + board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT), + board_80003es2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT), + board_80003es2lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan }, + + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_LM2), board_pch_spt }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_SPT_I219_V2), board_pch_spt }, + + { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ +}; +MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); + +static const struct dev_pm_ops e1000_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = e1000e_pm_suspend, + .resume = e1000e_pm_resume, + .freeze = e1000e_pm_freeze, + .thaw = e1000e_pm_thaw, + .poweroff = e1000e_pm_suspend, + .restore = e1000e_pm_resume, +#endif + SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume, + e1000e_pm_runtime_idle) +}; + +/* PCI Device API Driver */ +static struct pci_driver e1000_driver = { + .name = e1000e_driver_name, + .id_table = e1000_pci_tbl, + .probe = e1000_probe, + .remove = e1000_remove, + .driver = { + .pm = &e1000_pm_ops, + }, + .shutdown = e1000_shutdown, + .err_handler = &e1000_err_handler +}; + +/** + * e1000_init_module - Driver Registration Routine + * + * e1000_init_module is the first routine called when the driver is + * loaded. All it does is register with the PCI subsystem. + **/ +static int __init e1000_init_module(void) +{ + int ret; + + pr_info("Intel(R) PRO/1000 Network Driver - %s\n", + e1000e_driver_version); + pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n"); + ret = pci_register_driver(&e1000_driver); + + return ret; +} +module_init(e1000_init_module); + +/** + * e1000_exit_module - Driver Exit Cleanup Routine + * + * e1000_exit_module is called just before the driver is removed + * from memory. + **/ +static void __exit e1000_exit_module(void) +{ + pci_unregister_driver(&e1000_driver); +} +module_exit(e1000_exit_module); + +MODULE_AUTHOR("Intel Corporation, "); +MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +/* netdev.c */ diff --git a/addons/e1000e/src/4.4.180/nvm.c b/addons/e1000e/src/4.4.180/nvm.c new file mode 100644 index 00000000..49f205c0 --- /dev/null +++ b/addons/e1000e/src/4.4.180/nvm.c @@ -0,0 +1,633 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "e1000.h" + +/** + * e1000_raise_eec_clk - Raise EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Enable/Raise the EEPROM clock bit. + **/ +static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd | E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_lower_eec_clk - Lower EEPROM clock + * @hw: pointer to the HW structure + * @eecd: pointer to the EEPROM + * + * Clear/Lower the EEPROM clock bit. + **/ +static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd) +{ + *eecd = *eecd & ~E1000_EECD_SK; + ew32(EECD, *eecd); + e1e_flush(); + udelay(hw->nvm.delay_usec); +} + +/** + * e1000_shift_out_eec_bits - Shift data bits our to the EEPROM + * @hw: pointer to the HW structure + * @data: data to send to the EEPROM + * @count: number of bits to shift out + * + * We need to shift 'count' bits out to the EEPROM. So, the value in the + * "data" parameter will be shifted out to the EEPROM one bit at a time. + * In order to do this, "data" must be broken down into bits. + **/ +static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u32 mask; + + mask = 0x01 << (count - 1); + if (nvm->type == e1000_nvm_eeprom_spi) + eecd |= E1000_EECD_DO; + + do { + eecd &= ~E1000_EECD_DI; + + if (data & mask) + eecd |= E1000_EECD_DI; + + ew32(EECD, eecd); + e1e_flush(); + + udelay(nvm->delay_usec); + + e1000_raise_eec_clk(hw, &eecd); + e1000_lower_eec_clk(hw, &eecd); + + mask >>= 1; + } while (mask); + + eecd &= ~E1000_EECD_DI; + ew32(EECD, eecd); +} + +/** + * e1000_shift_in_eec_bits - Shift data bits in from the EEPROM + * @hw: pointer to the HW structure + * @count: number of bits to shift in + * + * In order to read a register from the EEPROM, we need to shift 'count' bits + * in from the EEPROM. Bits are "shifted in" by raising the clock input to + * the EEPROM (setting the SK bit), and then reading the value of the data out + * "DO" bit. During this "shifting in" process the data in "DI" bit should + * always be clear. + **/ +static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count) +{ + u32 eecd; + u32 i; + u16 data; + + eecd = er32(EECD); + eecd &= ~(E1000_EECD_DO | E1000_EECD_DI); + data = 0; + + for (i = 0; i < count; i++) { + data <<= 1; + e1000_raise_eec_clk(hw, &eecd); + + eecd = er32(EECD); + + eecd &= ~E1000_EECD_DI; + if (eecd & E1000_EECD_DO) + data |= 1; + + e1000_lower_eec_clk(hw, &eecd); + } + + return data; +} + +/** + * e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion + * @hw: pointer to the HW structure + * @ee_reg: EEPROM flag for polling + * + * Polls the EEPROM status bit for either read or write completion based + * upon the value of 'ee_reg'. + **/ +s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg) +{ + u32 attempts = 100000; + u32 i, reg = 0; + + for (i = 0; i < attempts; i++) { + if (ee_reg == E1000_NVM_POLL_READ) + reg = er32(EERD); + else + reg = er32(EEWR); + + if (reg & E1000_NVM_RW_REG_DONE) + return 0; + + udelay(5); + } + + return -E1000_ERR_NVM; +} + +/** + * e1000e_acquire_nvm - Generic request for access to EEPROM + * @hw: pointer to the HW structure + * + * Set the EEPROM access request bit and wait for EEPROM access grant bit. + * Return successful if access grant bit set, else clear the request for + * EEPROM access and return -E1000_ERR_NVM (-1). + **/ +s32 e1000e_acquire_nvm(struct e1000_hw *hw) +{ + u32 eecd = er32(EECD); + s32 timeout = E1000_NVM_GRANT_ATTEMPTS; + + ew32(EECD, eecd | E1000_EECD_REQ); + eecd = er32(EECD); + + while (timeout) { + if (eecd & E1000_EECD_GNT) + break; + udelay(5); + eecd = er32(EECD); + timeout--; + } + + if (!timeout) { + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); + e_dbg("Could not acquire NVM grant\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000_standby_nvm - Return EEPROM to standby state + * @hw: pointer to the HW structure + * + * Return the EEPROM to a standby state. + **/ +static void e1000_standby_nvm(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + + if (nvm->type == e1000_nvm_eeprom_spi) { + /* Toggle CS to flush commands */ + eecd |= E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + eecd &= ~E1000_EECD_CS; + ew32(EECD, eecd); + e1e_flush(); + udelay(nvm->delay_usec); + } +} + +/** + * e1000_stop_nvm - Terminate EEPROM command + * @hw: pointer to the HW structure + * + * Terminates the current command by inverting the EEPROM's chip select pin. + **/ +static void e1000_stop_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + eecd = er32(EECD); + if (hw->nvm.type == e1000_nvm_eeprom_spi) { + /* Pull CS high */ + eecd |= E1000_EECD_CS; + e1000_lower_eec_clk(hw, &eecd); + } +} + +/** + * e1000e_release_nvm - Release exclusive access to EEPROM + * @hw: pointer to the HW structure + * + * Stop any current commands to the EEPROM and clear the EEPROM request bit. + **/ +void e1000e_release_nvm(struct e1000_hw *hw) +{ + u32 eecd; + + e1000_stop_nvm(hw); + + eecd = er32(EECD); + eecd &= ~E1000_EECD_REQ; + ew32(EECD, eecd); +} + +/** + * e1000_ready_nvm_eeprom - Prepares EEPROM for read/write + * @hw: pointer to the HW structure + * + * Setups the EEPROM for reading and writing. + **/ +static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 eecd = er32(EECD); + u8 spi_stat_reg; + + if (nvm->type == e1000_nvm_eeprom_spi) { + u16 timeout = NVM_MAX_RETRY_SPI; + + /* Clear SK and CS */ + eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); + ew32(EECD, eecd); + e1e_flush(); + udelay(1); + + /* Read "Status Register" repeatedly until the LSB is cleared. + * The EEPROM will signal that the command has been completed + * by clearing bit 0 of the internal status register. If it's + * not cleared within 'timeout', then error out. + */ + while (timeout) { + e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI, + hw->nvm.opcode_bits); + spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8); + if (!(spi_stat_reg & NVM_STATUS_RDY_SPI)) + break; + + udelay(5); + e1000_standby_nvm(hw); + timeout--; + } + + if (!timeout) { + e_dbg("SPI NVM Status error\n"); + return -E1000_ERR_NVM; + } + } + + return 0; +} + +/** + * e1000e_read_nvm_eerd - Reads EEPROM using EERD register + * @hw: pointer to the HW structure + * @offset: offset of word in the EEPROM to read + * @words: number of words to read + * @data: word read from the EEPROM + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + u32 i, eerd = 0; + s32 ret_val = 0; + + /* A check for invalid values: offset too large, too many words, + * too many words for the offset, and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + for (i = 0; i < words; i++) { + eerd = ((offset + i) << E1000_NVM_RW_ADDR_SHIFT) + + E1000_NVM_RW_REG_START; + + ew32(EERD, eerd); + ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ); + if (ret_val) { + e_dbg("NVM read error: %d\n", ret_val); + break; + } + + data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA); + } + + return ret_val; +} + +/** + * e1000e_write_nvm_spi - Write to EEPROM using SPI + * @hw: pointer to the HW structure + * @offset: offset within the EEPROM to be written to + * @words: number of words to write + * @data: 16 bit word(s) to be written to the EEPROM + * + * Writes data to EEPROM at offset using SPI interface. + * + * If e1000e_update_nvm_checksum is not called after this function , the + * EEPROM will most likely contain an invalid checksum. + **/ +s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data) +{ + struct e1000_nvm_info *nvm = &hw->nvm; + s32 ret_val = -E1000_ERR_NVM; + u16 widx = 0; + + /* A check for invalid values: offset too large, too many words, + * and not enough words. + */ + if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) || + (words == 0)) { + e_dbg("nvm parameter(s) out of bounds\n"); + return -E1000_ERR_NVM; + } + + while (widx < words) { + u8 write_opcode = NVM_WRITE_OPCODE_SPI; + + ret_val = nvm->ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000_ready_nvm_eeprom(hw); + if (ret_val) { + nvm->ops.release(hw); + return ret_val; + } + + e1000_standby_nvm(hw); + + /* Send the WRITE ENABLE command (8 bit opcode) */ + e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI, + nvm->opcode_bits); + + e1000_standby_nvm(hw); + + /* Some SPI eeproms use the 8th address bit embedded in the + * opcode + */ + if ((nvm->address_bits == 8) && (offset >= 128)) + write_opcode |= NVM_A8_OPCODE_SPI; + + /* Send the Write command (8-bit opcode + addr) */ + e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits); + e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2), + nvm->address_bits); + + /* Loop to allow for up to whole page write of eeprom */ + while (widx < words) { + u16 word_out = data[widx]; + + word_out = (word_out >> 8) | (word_out << 8); + e1000_shift_out_eec_bits(hw, word_out, 16); + widx++; + + if ((((offset + widx) * 2) % nvm->page_size) == 0) { + e1000_standby_nvm(hw); + break; + } + } + usleep_range(10000, 20000); + nvm->ops.release(hw); + } + + return ret_val; +} + +/** + * e1000_read_pba_string_generic - Read device part number + * @hw: pointer to the HW structure + * @pba_num: pointer to device part number + * @pba_num_size: size of part number buffer + * + * Reads the product board assembly (PBA) number from the EEPROM and stores + * the value in pba_num. + **/ +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size) +{ + s32 ret_val; + u16 nvm_data; + u16 pba_ptr; + u16 offset; + u16 length; + + if (pba_num == NULL) { + e_dbg("PBA string buffer was null\n"); + return -E1000_ERR_INVALID_ARGUMENT; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + /* if nvm_data is not ptr guard the PBA must be in legacy format which + * means pba_ptr is actually our second data word for the PBA number + * and we can decode it into an ascii string + */ + if (nvm_data != NVM_PBA_PTR_GUARD) { + e_dbg("NVM PBA number is not stored as string\n"); + + /* make sure callers buffer is big enough to store the PBA */ + if (pba_num_size < E1000_PBANUM_LENGTH) { + e_dbg("PBA string buffer too small\n"); + return E1000_ERR_NO_SPACE; + } + + /* extract hex string from data and pba_ptr */ + pba_num[0] = (nvm_data >> 12) & 0xF; + pba_num[1] = (nvm_data >> 8) & 0xF; + pba_num[2] = (nvm_data >> 4) & 0xF; + pba_num[3] = nvm_data & 0xF; + pba_num[4] = (pba_ptr >> 12) & 0xF; + pba_num[5] = (pba_ptr >> 8) & 0xF; + pba_num[6] = '-'; + pba_num[7] = 0; + pba_num[8] = (pba_ptr >> 4) & 0xF; + pba_num[9] = pba_ptr & 0xF; + + /* put a null character on the end of our string */ + pba_num[10] = '\0'; + + /* switch all the data but the '-' to hex char */ + for (offset = 0; offset < 10; offset++) { + if (pba_num[offset] < 0xA) + pba_num[offset] += '0'; + else if (pba_num[offset] < 0x10) + pba_num[offset] += 'A' - 0xA; + } + + return 0; + } + + ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { + e_dbg("NVM PBA number section invalid length\n"); + return -E1000_ERR_NVM_PBA_SECTION; + } + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { + e_dbg("PBA string buffer too small\n"); + return -E1000_ERR_NO_SPACE; + } + + /* trim pba length from start of string */ + pba_ptr++; + length--; + + for (offset = 0; offset < length; offset++) { + ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(nvm_data >> 8); + pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF); + } + pba_num[offset * 2] = '\0'; + + return 0; +} + +/** + * e1000_read_mac_addr_generic - Read device MAC address + * @hw: pointer to the HW structure + * + * Reads the device MAC address from the EEPROM and stores the value. + * Since devices with two ports use the same EEPROM, we increment the + * last bit in the MAC address for the second port. + **/ +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw) +{ + u32 rar_high; + u32 rar_low; + u16 i; + + rar_high = er32(RAH(0)); + rar_low = er32(RAL(0)); + + for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i] = (u8)(rar_low >> (i * 8)); + + for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++) + hw->mac.perm_addr[i + 4] = (u8)(rar_high >> (i * 8)); + + for (i = 0; i < ETH_ALEN; i++) + hw->mac.addr[i] = hw->mac.perm_addr[i]; + + return 0; +} + +/** + * e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum + * @hw: pointer to the HW structure + * + * Calculates the EEPROM checksum by reading/adding each word of the EEPROM + * and then verifies that the sum of the EEPROM is equal to 0xBABA. + **/ +s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error\n"); + return ret_val; + } + checksum += nvm_data; + } + + if (checksum != (u16)NVM_SUM) { + e_dbg("NVM Checksum Invalid\n"); + return -E1000_ERR_NVM; + } + + return 0; +} + +/** + * e1000e_update_nvm_checksum_generic - Update EEPROM checksum + * @hw: pointer to the HW structure + * + * Updates the EEPROM checksum by reading/adding each word of the EEPROM + * up to the checksum. Then calculates the EEPROM checksum and writes the + * value to the EEPROM. + **/ +s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw) +{ + s32 ret_val; + u16 checksum = 0; + u16 i, nvm_data; + + for (i = 0; i < NVM_CHECKSUM_REG; i++) { + ret_val = e1000_read_nvm(hw, i, 1, &nvm_data); + if (ret_val) { + e_dbg("NVM Read Error while updating checksum.\n"); + return ret_val; + } + checksum += nvm_data; + } + checksum = (u16)NVM_SUM - checksum; + ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum); + if (ret_val) + e_dbg("NVM Write Error while updating checksum.\n"); + + return ret_val; +} + +/** + * e1000e_reload_nvm_generic - Reloads EEPROM + * @hw: pointer to the HW structure + * + * Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the + * extended control register. + **/ +void e1000e_reload_nvm_generic(struct e1000_hw *hw) +{ + u32 ctrl_ext; + + usleep_range(10, 20); + ctrl_ext = er32(CTRL_EXT); + ctrl_ext |= E1000_CTRL_EXT_EE_RST; + ew32(CTRL_EXT, ctrl_ext); + e1e_flush(); +} diff --git a/addons/e1000e/src/4.4.180/nvm.h b/addons/e1000e/src/4.4.180/nvm.h new file mode 100644 index 00000000..5d46967e --- /dev/null +++ b/addons/e1000e/src/4.4.180/nvm.h @@ -0,0 +1,40 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_NVM_H_ +#define _E1000E_NVM_H_ + +s32 e1000e_acquire_nvm(struct e1000_hw *hw); + +s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg); +s32 e1000_read_mac_addr_generic(struct e1000_hw *hw); +s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num, + u32 pba_num_size); +s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data); +s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw); +s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); +s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw); +void e1000e_release_nvm(struct e1000_hw *hw); + +#define E1000_STM_OPCODE 0xDB00 + +#endif diff --git a/addons/e1000e/src/4.4.180/param.c b/addons/e1000e/src/4.4.180/param.c new file mode 100644 index 00000000..6d8c39ab --- /dev/null +++ b/addons/e1000e/src/4.4.180/param.c @@ -0,0 +1,533 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include +#include +#include + +#include "e1000.h" + +/* This is the only thing that needs to be changed to adjust the + * maximum number of ports that the driver can manage. + */ +#define E1000_MAX_NIC 32 + +#define OPTION_UNSET -1 +#define OPTION_DISABLED 0 +#define OPTION_ENABLED 1 + +#define COPYBREAK_DEFAULT 256 +unsigned int copybreak = COPYBREAK_DEFAULT; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +/* All parameters are treated the same, as an integer array of values. + * This macro just reduces the need to repeat the same declaration code + * over and over (plus this helps to avoid typo bugs). + */ +#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET } +#define E1000_PARAM(X, desc) \ + static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \ + static unsigned int num_##X; \ + module_param_array_named(X, X, int, &num_##X, 0); \ + MODULE_PARM_DESC(X, desc); + +/* Transmit Interrupt Delay in units of 1.024 microseconds + * Tx interrupt delay needs to typically be set to something non-zero + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay"); +#define DEFAULT_TIDV 8 +#define MAX_TXDELAY 0xFFFF +#define MIN_TXDELAY 0 + +/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay"); +#define DEFAULT_TADV 32 +#define MAX_TXABSDELAY 0xFFFF +#define MIN_TXABSDELAY 0 + +/* Receive Interrupt Delay in units of 1.024 microseconds + * hardware will likely hang if you set this to anything but zero. + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxIntDelay, "Receive Interrupt Delay"); +#define MAX_RXDELAY 0xFFFF +#define MIN_RXDELAY 0 + +/* Receive Absolute Interrupt Delay in units of 1.024 microseconds + * + * Valid Range: 0-65535 + */ +E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); +#define MAX_RXABSDELAY 0xFFFF +#define MIN_RXABSDELAY 0 + +/* Interrupt Throttle Rate (interrupts/sec) + * + * Valid Range: 100-100000 or one of: 0=off, 1=dynamic, 3=dynamic conservative + */ +E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); +#define DEFAULT_ITR 3 +#define MAX_ITR 100000 +#define MIN_ITR 100 + +/* IntMode (Interrupt Mode) + * + * Valid Range: varies depending on kernel configuration & hardware support + * + * legacy=0, MSI=1, MSI-X=2 + * + * When MSI/MSI-X support is enabled in kernel- + * Default Value: 2 (MSI-X) when supported by hardware, 1 (MSI) otherwise + * When MSI/MSI-X support is not enabled in kernel- + * Default Value: 0 (legacy) + * + * When a mode is specified that is not allowed/supported, it will be + * demoted to the most advanced interrupt mode available. + */ +E1000_PARAM(IntMode, "Interrupt Mode"); +#define MAX_INTMODE 2 +#define MIN_INTMODE 0 + +/* Enable Smart Power Down of the PHY + * + * Valid Range: 0, 1 + * + * Default Value: 0 (disabled) + */ +E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down"); + +/* Enable Kumeran Lock Loss workaround + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround"); + +/* Write Protect NVM + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(WriteProtectNVM, + "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]"); + +/* Enable CRC Stripping + * + * Valid Range: 0, 1 + * + * Default Value: 1 (enabled) + */ +E1000_PARAM(CrcStripping, + "Enable CRC Stripping, disable if your BMC needs the CRC"); + +struct e1000_option { + enum { enable_option, range_option, list_option } type; + const char *name; + const char *err; + int def; + union { + /* range_option info */ + struct { + int min; + int max; + } r; + /* list_option info */ + struct { + int nr; + struct e1000_opt_list { + int i; + char *str; + } *p; + } l; + } arg; +}; + +static int e1000_validate_option(unsigned int *value, + const struct e1000_option *opt, + struct e1000_adapter *adapter) +{ + if (*value == OPTION_UNSET) { + *value = opt->def; + return 0; + } + + switch (opt->type) { + case enable_option: + switch (*value) { + case OPTION_ENABLED: + dev_info(&adapter->pdev->dev, "%s Enabled\n", + opt->name); + return 0; + case OPTION_DISABLED: + dev_info(&adapter->pdev->dev, "%s Disabled\n", + opt->name); + return 0; + } + break; + case range_option: + if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { + dev_info(&adapter->pdev->dev, "%s set to %i\n", + opt->name, *value); + return 0; + } + break; + case list_option: { + int i; + struct e1000_opt_list *ent; + + for (i = 0; i < opt->arg.l.nr; i++) { + ent = &opt->arg.l.p[i]; + if (*value == ent->i) { + if (ent->str[0] != '\0') + dev_info(&adapter->pdev->dev, "%s\n", + ent->str); + return 0; + } + } + } + break; + default: + BUG(); + } + + dev_info(&adapter->pdev->dev, "Invalid %s value specified (%i) %s\n", + opt->name, *value, opt->err); + *value = opt->def; + return -1; +} + +/** + * e1000e_check_options - Range Checking for Command Line Parameters + * @adapter: board private structure + * + * This routine checks all command line parameters for valid user + * input. If an invalid value is given, or if no user specified + * value exists, a default value is used. The final value is stored + * in a variable in the adapter structure. + **/ +void e1000e_check_options(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + int bd = adapter->bd_number; + + if (bd >= E1000_MAX_NIC) { + dev_notice(&adapter->pdev->dev, + "Warning: no configuration for board #%i\n", bd); + dev_notice(&adapter->pdev->dev, + "Using defaults for all values\n"); + } + + /* Transmit Interrupt Delay */ + { + static const struct e1000_option opt = { + .type = range_option, + .name = "Transmit Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TIDV), + .def = DEFAULT_TIDV, + .arg = { .r = { .min = MIN_TXDELAY, + .max = MAX_TXDELAY } } + }; + + if (num_TxIntDelay > bd) { + adapter->tx_int_delay = TxIntDelay[bd]; + e1000_validate_option(&adapter->tx_int_delay, &opt, + adapter); + } else { + adapter->tx_int_delay = opt.def; + } + } + /* Transmit Absolute Interrupt Delay */ + { + static const struct e1000_option opt = { + .type = range_option, + .name = "Transmit Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_TADV), + .def = DEFAULT_TADV, + .arg = { .r = { .min = MIN_TXABSDELAY, + .max = MAX_TXABSDELAY } } + }; + + if (num_TxAbsIntDelay > bd) { + adapter->tx_abs_int_delay = TxAbsIntDelay[bd]; + e1000_validate_option(&adapter->tx_abs_int_delay, &opt, + adapter); + } else { + adapter->tx_abs_int_delay = opt.def; + } + } + /* Receive Interrupt Delay */ + { + static struct e1000_option opt = { + .type = range_option, + .name = "Receive Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RDTR), + .def = DEFAULT_RDTR, + .arg = { .r = { .min = MIN_RXDELAY, + .max = MAX_RXDELAY } } + }; + + if (num_RxIntDelay > bd) { + adapter->rx_int_delay = RxIntDelay[bd]; + e1000_validate_option(&adapter->rx_int_delay, &opt, + adapter); + } else { + adapter->rx_int_delay = opt.def; + } + } + /* Receive Absolute Interrupt Delay */ + { + static const struct e1000_option opt = { + .type = range_option, + .name = "Receive Absolute Interrupt Delay", + .err = "using default of " + __MODULE_STRING(DEFAULT_RADV), + .def = DEFAULT_RADV, + .arg = { .r = { .min = MIN_RXABSDELAY, + .max = MAX_RXABSDELAY } } + }; + + if (num_RxAbsIntDelay > bd) { + adapter->rx_abs_int_delay = RxAbsIntDelay[bd]; + e1000_validate_option(&adapter->rx_abs_int_delay, &opt, + adapter); + } else { + adapter->rx_abs_int_delay = opt.def; + } + } + /* Interrupt Throttling Rate */ + { + static const struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Throttling Rate (ints/sec)", + .err = "using default of " + __MODULE_STRING(DEFAULT_ITR), + .def = DEFAULT_ITR, + .arg = { .r = { .min = MIN_ITR, + .max = MAX_ITR } } + }; + + if (num_InterruptThrottleRate > bd) { + adapter->itr = InterruptThrottleRate[bd]; + + /* Make sure a message is printed for non-special + * values. And in case of an invalid option, display + * warning, use default and go through itr/itr_setting + * adjustment logic below + */ + if ((adapter->itr > 4) && + e1000_validate_option(&adapter->itr, &opt, adapter)) + adapter->itr = opt.def; + } else { + /* If no option specified, use default value and go + * through the logic below to adjust itr/itr_setting + */ + adapter->itr = opt.def; + + /* Make sure a message is printed for non-special + * default values + */ + if (adapter->itr > 4) + dev_info(&adapter->pdev->dev, + "%s set to default %d\n", opt.name, + adapter->itr); + } + + adapter->itr_setting = adapter->itr; + switch (adapter->itr) { + case 0: + dev_info(&adapter->pdev->dev, "%s turned off\n", + opt.name); + break; + case 1: + dev_info(&adapter->pdev->dev, + "%s set to dynamic mode\n", opt.name); + adapter->itr = 20000; + break; + case 2: + dev_info(&adapter->pdev->dev, + "%s Invalid mode - setting default\n", + opt.name); + adapter->itr_setting = opt.def; + /* fall-through */ + case 3: + dev_info(&adapter->pdev->dev, + "%s set to dynamic conservative mode\n", + opt.name); + adapter->itr = 20000; + break; + case 4: + dev_info(&adapter->pdev->dev, + "%s set to simplified (2000-8000 ints) mode\n", + opt.name); + break; + default: + /* Save the setting, because the dynamic bits + * change itr. + * + * Clear the lower two bits because + * they are used as control. + */ + adapter->itr_setting &= ~3; + break; + } + } + /* Interrupt Mode */ + { + static struct e1000_option opt = { + .type = range_option, + .name = "Interrupt Mode", +#ifndef CONFIG_PCI_MSI + .err = "defaulting to 0 (legacy)", + .def = E1000E_INT_MODE_LEGACY, + .arg = { .r = { .min = 0, + .max = 0 } } +#endif + }; + +#ifdef CONFIG_PCI_MSI + if (adapter->flags & FLAG_HAS_MSIX) { + opt.err = kstrdup("defaulting to 2 (MSI-X)", + GFP_KERNEL); + opt.def = E1000E_INT_MODE_MSIX; + opt.arg.r.max = E1000E_INT_MODE_MSIX; + } else { + opt.err = kstrdup("defaulting to 1 (MSI)", GFP_KERNEL); + opt.def = E1000E_INT_MODE_MSI; + opt.arg.r.max = E1000E_INT_MODE_MSI; + } + + if (!opt.err) { + dev_err(&adapter->pdev->dev, + "Failed to allocate memory\n"); + return; + } +#endif + + if (num_IntMode > bd) { + unsigned int int_mode = IntMode[bd]; + + e1000_validate_option(&int_mode, &opt, adapter); + adapter->int_mode = int_mode; + } else { + adapter->int_mode = opt.def; + } + +#ifdef CONFIG_PCI_MSI + kfree(opt.err); +#endif + } + /* Smart Power Down */ + { + static const struct e1000_option opt = { + .type = enable_option, + .name = "PHY Smart Power Down", + .err = "defaulting to Disabled", + .def = OPTION_DISABLED + }; + + if (num_SmartPowerDownEnable > bd) { + unsigned int spd = SmartPowerDownEnable[bd]; + + e1000_validate_option(&spd, &opt, adapter); + if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd) + adapter->flags |= FLAG_SMART_POWER_DOWN; + } + } + /* CRC Stripping */ + { + static const struct e1000_option opt = { + .type = enable_option, + .name = "CRC Stripping", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (num_CrcStripping > bd) { + unsigned int crc_stripping = CrcStripping[bd]; + + e1000_validate_option(&crc_stripping, &opt, adapter); + if (crc_stripping == OPTION_ENABLED) { + adapter->flags2 |= FLAG2_CRC_STRIPPING; + adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; + } + } else { + adapter->flags2 |= FLAG2_CRC_STRIPPING; + adapter->flags2 |= FLAG2_DFLT_CRC_STRIPPING; + } + } + /* Kumeran Lock Loss Workaround */ + { + static const struct e1000_option opt = { + .type = enable_option, + .name = "Kumeran Lock Loss Workaround", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + bool enabled = opt.def; + + if (num_KumeranLockLoss > bd) { + unsigned int kmrn_lock_loss = KumeranLockLoss[bd]; + + e1000_validate_option(&kmrn_lock_loss, &opt, adapter); + enabled = kmrn_lock_loss; + } + + if (hw->mac.type == e1000_ich8lan) + e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, + enabled); + } + /* Write-protect NVM */ + { + static const struct e1000_option opt = { + .type = enable_option, + .name = "Write-protect NVM", + .err = "defaulting to Enabled", + .def = OPTION_ENABLED + }; + + if (adapter->flags & FLAG_IS_ICH) { + if (num_WriteProtectNVM > bd) { + unsigned int write_protect_nvm = + WriteProtectNVM[bd]; + e1000_validate_option(&write_protect_nvm, &opt, + adapter); + if (write_protect_nvm) + adapter->flags |= FLAG_READ_ONLY_NVM; + } else { + if (opt.def) + adapter->flags |= FLAG_READ_ONLY_NVM; + } + } + } +} diff --git a/addons/e1000e/src/4.4.180/phy.c b/addons/e1000e/src/4.4.180/phy.c new file mode 100644 index 00000000..8e674a09 --- /dev/null +++ b/addons/e1000e/src/4.4.180/phy.c @@ -0,0 +1,3238 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#include "e1000.h" + +static s32 e1000_wait_autoneg(struct e1000_hw *hw); +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set); +static u32 e1000_get_phy_addr_for_hv_page(u32 page); +static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read); + +/* Cable length tables */ +static const u16 e1000_m88_cable_length_table[] = { + 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED +}; + +#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_m88_cable_length_table) + +static const u16 e1000_igp_2_cable_length_table[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, + 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, + 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, + 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, + 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, + 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, + 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, + 124 +}; + +#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ + ARRAY_SIZE(e1000_igp_2_cable_length_table) + +/** + * e1000e_check_reset_block_generic - Check if PHY reset is blocked + * @hw: pointer to the HW structure + * + * Read the PHY management control register and check whether a PHY reset + * is blocked. If a reset is not blocked return 0, otherwise + * return E1000_BLK_PHY_RESET (12). + **/ +s32 e1000e_check_reset_block_generic(struct e1000_hw *hw) +{ + u32 manc; + + manc = er32(MANC); + + return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0; +} + +/** + * e1000e_get_phy_id - Retrieve the PHY ID and revision + * @hw: pointer to the HW structure + * + * Reads the PHY registers and stores the PHY ID and possibly the PHY + * revision in the hardware structure. + **/ +s32 e1000e_get_phy_id(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val = 0; + u16 phy_id; + u16 retry_count = 0; + + if (!phy->ops.read_reg) + return 0; + + while (retry_count < 2) { + ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id); + if (ret_val) + return ret_val; + + phy->id = (u32)(phy_id << 16); + usleep_range(20, 40); + ret_val = e1e_rphy(hw, MII_PHYSID2, &phy_id); + if (ret_val) + return ret_val; + + phy->id |= (u32)(phy_id & PHY_REVISION_MASK); + phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK); + + if (phy->id != 0 && phy->id != PHY_REVISION_MASK) + return 0; + + retry_count++; + } + + return 0; +} + +/** + * e1000e_phy_reset_dsp - Reset PHY DSP + * @hw: pointer to the HW structure + * + * Reset the digital signal processor. + **/ +s32 e1000e_phy_reset_dsp(struct e1000_hw *hw) +{ + s32 ret_val; + + ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); + if (ret_val) + return ret_val; + + return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0); +} + +/** + * e1000e_read_phy_reg_mdic - Read MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the MDI control register in the PHY at offset and stores the + * information read to data. + **/ +s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = ((offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_READ)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Read did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + e_dbg("MDI Read offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + *data = (u16)mdic; + + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + udelay(100); + + return 0; +} + +/** + * e1000e_write_phy_reg_mdic - Write MDI control register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write to register at offset + * + * Writes data to MDI control register in the PHY at offset. + **/ +s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data) +{ + struct e1000_phy_info *phy = &hw->phy; + u32 i, mdic = 0; + + if (offset > MAX_PHY_REG_ADDRESS) { + e_dbg("PHY Address %d is out of range\n", offset); + return -E1000_ERR_PARAM; + } + + /* Set up Op-code, Phy Address, and register offset in the MDI + * Control register. The MAC will take care of interfacing with the + * PHY to retrieve the desired data. + */ + mdic = (((u32)data) | + (offset << E1000_MDIC_REG_SHIFT) | + (phy->addr << E1000_MDIC_PHY_SHIFT) | + (E1000_MDIC_OP_WRITE)); + + ew32(MDIC, mdic); + + /* Poll the ready bit to see if the MDI read completed + * Increasing the time out as testing showed failures with + * the lower time out + */ + for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) { + udelay(50); + mdic = er32(MDIC); + if (mdic & E1000_MDIC_READY) + break; + } + if (!(mdic & E1000_MDIC_READY)) { + e_dbg("MDI Write did not complete\n"); + return -E1000_ERR_PHY; + } + if (mdic & E1000_MDIC_ERROR) { + e_dbg("MDI Error\n"); + return -E1000_ERR_PHY; + } + if (((mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT) != offset) { + e_dbg("MDI Write offset error - requested %d, returned %d\n", + offset, + (mdic & E1000_MDIC_REG_MASK) >> E1000_MDIC_REG_SHIFT); + return -E1000_ERR_PHY; + } + + /* Allow some time after each MDIC transaction to avoid + * reading duplicate data in the next MDIC transaction. + */ + if (hw->mac.type == e1000_pch2lan) + udelay(100); + + return 0; +} + +/** + * e1000e_read_phy_reg_m88 - Read m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000e_write_phy_reg_m88 - Write m88 PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_set_page_igp - Set page as on IGP-like PHY(s) + * @hw: pointer to the HW structure + * @page: page to set (shifted left when necessary) + * + * Sets PHY page required for PHY register access. Assumes semaphore is + * already acquired. Note, this function sets phy.addr to 1 so the caller + * must set it appropriately (if necessary) after this function returns. + **/ +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page) +{ + e_dbg("Setting page 0x%x\n", page); + + hw->phy.addr = 1; + + return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page); +} + +/** + * __e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + s32 ret_val = 0; + + if (!locked) { + if (!hw->phy.ops.acquire) + return 0; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000e_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000e_read_phy_reg_mdic(hw, + MAX_PHY_REG_ADDRESS & offset, + data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000e_read_phy_reg_igp - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores the + * retrieved information in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_read_phy_reg_igp_locked - Read igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000e_read_phy_reg_igp(hw, offset, data, true); +} + +/** + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + s32 ret_val = 0; + + if (!locked) { + if (!hw->phy.ops.acquire) + return 0; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + if (offset > MAX_PHY_MULTI_PAGE_REG) + ret_val = e1000e_write_phy_reg_mdic(hw, + IGP01E1000_PHY_PAGE_SELECT, + (u16)offset); + if (!ret_val) + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & + offset, data); + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000e_write_phy_reg_igp - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, false); +} + +/** + * e1000e_write_phy_reg_igp_locked - Write igp PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. + * Assumes semaphore already acquired. + **/ +s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000e_write_phy_reg_igp(hw, offset, data, true); +} + +/** + * __e1000_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then reads the PHY register at offset + * using the kumeran interface. The information retrieved is stored in data. + * Release any acquired semaphores before exiting. + **/ +static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked) +{ + u32 kmrnctrlsta; + + if (!locked) { + s32 ret_val = 0; + + if (!hw->phy.ops.acquire) + return 0; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + kmrnctrlsta = er32(KMRNCTRLSTA); + *data = (u16)kmrnctrlsta; + + if (!locked) + hw->phy.ops.release(hw); + + return 0; +} + +/** + * e1000e_read_kmrn_reg - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset using the + * kumeran interface. The information retrieved is stored in data. + * Release the acquired semaphore before exiting. + **/ +s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000e_read_kmrn_reg_locked - Read kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset using the kumeran interface. The + * information retrieved is stored in data. + * Assumes semaphore already acquired. + **/ +s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_kmrn_reg(hw, offset, data, true); +} + +/** + * __e1000_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary. Then write the data to PHY register + * at the offset using the kumeran interface. Release any acquired semaphores + * before exiting. + **/ +static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data, + bool locked) +{ + u32 kmrnctrlsta; + + if (!locked) { + s32 ret_val = 0; + + if (!hw->phy.ops.acquire) + return 0; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) & + E1000_KMRNCTRLSTA_OFFSET) | data; + ew32(KMRNCTRLSTA, kmrnctrlsta); + e1e_flush(); + + udelay(2); + + if (!locked) + hw->phy.ops.release(hw); + + return 0; +} + +/** + * e1000e_write_kmrn_reg - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to the PHY register at the offset + * using the kumeran interface. Release the acquired semaphore before exiting. + **/ +s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, false); +} + +/** + * e1000e_write_kmrn_reg_locked - Write kumeran register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Write the data to PHY register at the offset using the kumeran interface. + * Assumes semaphore already acquired. + **/ +s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_kmrn_reg(hw, offset, data, true); +} + +/** + * e1000_set_master_slave_mode - Setup PHY for Master/slave mode + * @hw: pointer to the HW structure + * + * Sets up Master/slave mode + **/ +static s32 e1000_set_master_slave_mode(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Resolve Master/Slave mode */ + ret_val = e1e_rphy(hw, MII_CTRL1000, &phy_data); + if (ret_val) + return ret_val; + + /* load defaults for future use */ + hw->phy.original_ms_type = (phy_data & CTL1000_ENABLE_MASTER) ? + ((phy_data & CTL1000_AS_MASTER) ? + e1000_ms_force_master : e1000_ms_force_slave) : e1000_ms_auto; + + switch (hw->phy.ms_type) { + case e1000_ms_force_master: + phy_data |= (CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER); + break; + case e1000_ms_force_slave: + phy_data |= CTL1000_ENABLE_MASTER; + phy_data &= ~(CTL1000_AS_MASTER); + break; + case e1000_ms_auto: + phy_data &= ~CTL1000_ENABLE_MASTER; + /* fall-through */ + default: + break; + } + + return e1e_wphy(hw, MII_CTRL1000, phy_data); +} + +/** + * e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link + * @hw: pointer to the HW structure + * + * Sets up Carrier-sense on Transmit and downshift values. + **/ +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_data; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= I82577_CFG_ASSERT_CRS_ON_TX; + + /* Enable downshift */ + phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; + + ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data); + if (ret_val) + return ret_val; + + /* Set MDI/MDIX mode */ + ret_val = e1e_rphy(hw, I82577_PHY_CTRL_2, &phy_data); + if (ret_val) + return ret_val; + phy_data &= ~I82577_PHY_CTRL2_MDIX_CFG_MASK; + /* Options: + * 0 - Auto (default) + * 1 - MDI mode + * 2 - MDI-X mode + */ + switch (hw->phy.mdix) { + case 1: + break; + case 2: + phy_data |= I82577_PHY_CTRL2_MANUAL_MDIX; + break; + case 0: + default: + phy_data |= I82577_PHY_CTRL2_AUTO_MDI_MDIX; + break; + } + ret_val = e1e_wphy(hw, I82577_PHY_CTRL_2, phy_data); + if (ret_val) + return ret_val; + + return e1000_set_master_slave_mode(hw); +} + +/** + * e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up MDI/MDI-X and polarity for m88 PHY's. If necessary, transmit clock + * and downshift values are set also. + **/ +s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + + /* Enable CRS on Tx. This must be set for half-duplex operation. */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* For BM PHY this bit is downshift enable */ + if (phy->type != e1000_phy_bm) + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + + /* Options: + * MDI/MDI-X = 0 (default) + * 0 - Auto for all speeds + * 1 - MDI mode + * 2 - MDI-X mode + * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes) + */ + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + + switch (phy->mdix) { + case 1: + phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE; + break; + case 2: + phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE; + break; + case 3: + phy_data |= M88E1000_PSCR_AUTO_X_1000T; + break; + case 0: + default: + phy_data |= M88E1000_PSCR_AUTO_X_MODE; + break; + } + + /* Options: + * disable_polarity_correction = 0 (default) + * Automatic Correction for Reversed Cable Polarity + * 0 - Disabled + * 1 - Enabled + */ + phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL; + if (phy->disable_polarity_correction) + phy_data |= M88E1000_PSCR_POLARITY_REVERSAL; + + /* Enable downshift on BM (disabled by default) */ + if (phy->type == e1000_phy_bm) { + /* For 82574/82583, first disable then enable downshift */ + if (phy->id == BME1000_E_PHY_ID_R2) { + phy_data &= ~BME1000_PSCR_ENABLE_DOWNSHIFT; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, + phy_data); + if (ret_val) + return ret_val; + /* Commit the changes. */ + ret_val = phy->ops.commit(hw); + if (ret_val) { + e_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + + phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT; + } + + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + if ((phy->type == e1000_phy_m88) && + (phy->revision < E1000_REVISION_4) && + (phy->id != BME1000_E_PHY_ID_R2)) { + /* Force TX_CLK in the Extended PHY Specific Control Register + * to 25MHz clock. + */ + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_EPSCR_TX_CLK_25; + + if ((phy->revision == 2) && (phy->id == M88E1111_I_PHY_ID)) { + /* 82573L PHY - set the downshift counter to 5x. */ + phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK; + phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X; + } else { + /* Configure Master and Slave downshift values */ + phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK); + phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X | + M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X); + } + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) { + /* Set PHY page 0, register 29 to 0x0003 */ + ret_val = e1e_wphy(hw, 29, 0x0003); + if (ret_val) + return ret_val; + + /* Set PHY page 0, register 30 to 0x0000 */ + ret_val = e1e_wphy(hw, 30, 0x0000); + if (ret_val) + return ret_val; + } + + /* Commit the changes. */ + if (phy->ops.commit) { + ret_val = phy->ops.commit(hw); + if (ret_val) { + e_dbg("Error committing the PHY changes\n"); + return ret_val; + } + } + + if (phy->type == e1000_phy_82578) { + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* 82578 PHY - set the downshift count to 1x. */ + phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; + phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * e1000e_copper_link_setup_igp - Setup igp PHY's for copper link + * @hw: pointer to the HW structure + * + * Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for + * igp PHY's. + **/ +s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1000_phy_hw_reset(hw); + if (ret_val) { + e_dbg("Error resetting the PHY.\n"); + return ret_val; + } + + /* Wait 100ms for MAC to configure PHY from NVM settings, to avoid + * timeout issues when LFS is enabled. + */ + msleep(100); + + /* disable lplu d0 during driver init */ + if (hw->phy.ops.set_d0_lplu_state) { + ret_val = hw->phy.ops.set_d0_lplu_state(hw, false); + if (ret_val) { + e_dbg("Error Disabling LPLU D0\n"); + return ret_val; + } + } + /* Configure mdi-mdix settings */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCR_AUTO_MDIX; + + switch (phy->mdix) { + case 1: + data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 2: + data |= IGP01E1000_PSCR_FORCE_MDI_MDIX; + break; + case 0: + default: + data |= IGP01E1000_PSCR_AUTO_MDIX; + break; + } + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data); + if (ret_val) + return ret_val; + + /* set auto-master slave resolution settings */ + if (hw->mac.autoneg) { + /* when autonegotiation advertisement is only 1000Mbps then we + * should disable SmartSpeed and enable Auto MasterSlave + * resolution as hardware default. + */ + if (phy->autoneg_advertised == ADVERTISE_1000_FULL) { + /* Disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + + /* Set auto Master/Slave resolution process */ + ret_val = e1e_rphy(hw, MII_CTRL1000, &data); + if (ret_val) + return ret_val; + + data &= ~CTL1000_ENABLE_MASTER; + ret_val = e1e_wphy(hw, MII_CTRL1000, data); + if (ret_val) + return ret_val; + } + + ret_val = e1000_set_master_slave_mode(hw); + } + + return ret_val; +} + +/** + * e1000_phy_setup_autoneg - Configure PHY for auto-negotiation + * @hw: pointer to the HW structure + * + * Reads the MII auto-neg advertisement register and/or the 1000T control + * register and if the PHY is already setup for auto-negotiation, then + * return successful. Otherwise, setup advertisement and flow control to + * the appropriate values for the wanted auto-negotiation. + **/ +static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 mii_autoneg_adv_reg; + u16 mii_1000t_ctrl_reg = 0; + + phy->autoneg_advertised &= phy->autoneg_mask; + + /* Read the MII Auto-Neg Advertisement Register (Address 4). */ + ret_val = e1e_rphy(hw, MII_ADVERTISE, &mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) { + /* Read the MII 1000Base-T Control Register (Address 9). */ + ret_val = e1e_rphy(hw, MII_CTRL1000, &mii_1000t_ctrl_reg); + if (ret_val) + return ret_val; + } + + /* Need to parse both autoneg_advertised and fc and set up + * the appropriate PHY registers. First we will parse for + * autoneg_advertised software override. Since we can advertise + * a plethora of combinations, we need to check each bit + * individually. + */ + + /* First we clear all the 10/100 mb speed bits in the Auto-Neg + * Advertisement Register (Address 4) and the 1000 mb speed bits in + * the 1000Base-T Control Register (Address 9). + */ + mii_autoneg_adv_reg &= ~(ADVERTISE_100FULL | + ADVERTISE_100HALF | + ADVERTISE_10FULL | ADVERTISE_10HALF); + mii_1000t_ctrl_reg &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); + + e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised); + + /* Do we want to advertise 10 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_HALF) { + e_dbg("Advertise 10mb Half duplex\n"); + mii_autoneg_adv_reg |= ADVERTISE_10HALF; + } + + /* Do we want to advertise 10 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_10_FULL) { + e_dbg("Advertise 10mb Full duplex\n"); + mii_autoneg_adv_reg |= ADVERTISE_10FULL; + } + + /* Do we want to advertise 100 Mb Half Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_HALF) { + e_dbg("Advertise 100mb Half duplex\n"); + mii_autoneg_adv_reg |= ADVERTISE_100HALF; + } + + /* Do we want to advertise 100 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_100_FULL) { + e_dbg("Advertise 100mb Full duplex\n"); + mii_autoneg_adv_reg |= ADVERTISE_100FULL; + } + + /* We do not allow the Phy to advertise 1000 Mb Half Duplex */ + if (phy->autoneg_advertised & ADVERTISE_1000_HALF) + e_dbg("Advertise 1000mb Half duplex request denied!\n"); + + /* Do we want to advertise 1000 Mb Full Duplex? */ + if (phy->autoneg_advertised & ADVERTISE_1000_FULL) { + e_dbg("Advertise 1000mb Full duplex\n"); + mii_1000t_ctrl_reg |= ADVERTISE_1000FULL; + } + + /* Check for a software override of the flow control settings, and + * setup the PHY advertisement registers accordingly. If + * auto-negotiation is enabled, then software will have to set the + * "PAUSE" bits to the correct value in the Auto-Negotiation + * Advertisement Register (MII_ADVERTISE) and re-start auto- + * negotiation. + * + * The possible values of the "fc" parameter are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames + * but we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: No software override. The flow control configuration + * in the EEPROM is used. + */ + switch (hw->fc.current_mode) { + case e1000_fc_none: + /* Flow control (Rx & Tx) is completely disabled by a + * software over-ride. + */ + mii_autoneg_adv_reg &= + ~(ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + break; + case e1000_fc_rx_pause: + /* Rx Flow control is enabled, and Tx Flow control is + * disabled, by a software over-ride. + * + * Since there really isn't a way to advertise that we are + * capable of Rx Pause ONLY, we will advertise that we + * support both symmetric and asymmetric Rx PAUSE. Later + * (in e1000e_config_fc_after_link_up) we will disable the + * hw's ability to send PAUSE frames. + */ + mii_autoneg_adv_reg |= + (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + break; + case e1000_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled, by a software over-ride. + */ + mii_autoneg_adv_reg |= ADVERTISE_PAUSE_ASYM; + mii_autoneg_adv_reg &= ~ADVERTISE_PAUSE_CAP; + break; + case e1000_fc_full: + /* Flow control (both Rx and Tx) is enabled by a software + * over-ride. + */ + mii_autoneg_adv_reg |= + (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP); + break; + default: + e_dbg("Flow control param set incorrectly\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_wphy(hw, MII_ADVERTISE, mii_autoneg_adv_reg); + if (ret_val) + return ret_val; + + e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); + + if (phy->autoneg_mask & ADVERTISE_1000_FULL) + ret_val = e1e_wphy(hw, MII_CTRL1000, mii_1000t_ctrl_reg); + + return ret_val; +} + +/** + * e1000_copper_link_autoneg - Setup/Enable autoneg for copper link + * @hw: pointer to the HW structure + * + * Performs initial bounds checking on autoneg advertisement parameter, then + * configure to advertise the full capability. Setup the PHY to autoneg + * and restart the negotiation process between the link partner. If + * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. + **/ +static s32 e1000_copper_link_autoneg(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_ctrl; + + /* Perform some bounds checking on the autoneg advertisement + * parameter. + */ + phy->autoneg_advertised &= phy->autoneg_mask; + + /* If autoneg_advertised is zero, we assume it was not defaulted + * by the calling code so we set to advertise full capability. + */ + if (!phy->autoneg_advertised) + phy->autoneg_advertised = phy->autoneg_mask; + + e_dbg("Reconfiguring auto-neg advertisement params\n"); + ret_val = e1000_phy_setup_autoneg(hw); + if (ret_val) { + e_dbg("Error Setting up Auto-Negotiation\n"); + return ret_val; + } + e_dbg("Restarting Auto-Neg\n"); + + /* Restart auto-negotiation by setting the Auto Neg Enable bit and + * the Auto Neg Restart bit in the PHY control register. + */ + ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= (BMCR_ANENABLE | BMCR_ANRESTART); + ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl); + if (ret_val) + return ret_val; + + /* Does the user want to wait for Auto-Neg to complete here, or + * check at a later time (for example, callback routine). + */ + if (phy->autoneg_wait_to_complete) { + ret_val = e1000_wait_autoneg(hw); + if (ret_val) { + e_dbg("Error while waiting for autoneg to complete\n"); + return ret_val; + } + } + + hw->mac.get_link_status = true; + + return ret_val; +} + +/** + * e1000e_setup_copper_link - Configure copper link settings + * @hw: pointer to the HW structure + * + * Calls the appropriate function to configure the link for auto-neg or forced + * speed and duplex. Then we check for link, once link is established calls + * to configure collision distance and flow control are called. If link is + * not established, we return -E1000_ERR_PHY (-2). + **/ +s32 e1000e_setup_copper_link(struct e1000_hw *hw) +{ + s32 ret_val; + bool link; + + if (hw->mac.autoneg) { + /* Setup autoneg and flow control advertisement and perform + * autonegotiation. + */ + ret_val = e1000_copper_link_autoneg(hw); + if (ret_val) + return ret_val; + } else { + /* PHY will be set to 10H, 10F, 100H or 100F + * depending on user settings. + */ + e_dbg("Forcing Speed and Duplex\n"); + ret_val = hw->phy.ops.force_speed_duplex(hw); + if (ret_val) { + e_dbg("Error Forcing Speed and Duplex\n"); + return ret_val; + } + } + + /* Check link status. Wait up to 100 microseconds for link to become + * valid. + */ + ret_val = e1000e_phy_has_link_generic(hw, COPPER_LINK_UP_LIMIT, 10, + &link); + if (ret_val) + return ret_val; + + if (link) { + e_dbg("Valid link established!!!\n"); + hw->mac.ops.config_collision_dist(hw); + ret_val = e1000e_config_fc_after_link_up(hw); + } else { + e_dbg("Unable to establish link!!!\n"); + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Waits for link and returns + * successful if link up is successful, else -E1000_ERR_PHY (-2). + **/ +s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); + if (ret_val) + return ret_val; + + /* Clear Auto-Crossover to force MDI manually. IGP requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX; + phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX; + + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("IGP PSCR: %X\n", phy_data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on IGP phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. Clears the + * auto-crossover to force MDI manually. Resets the PHY to commit the + * changes. If time expires while waiting for link up, we reset the DSP. + * After reset, TX_CLK and CRS on Tx must be set. Return successful upon + * successful completion, else return corresponding error code. + **/ +s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI + * forced whenever speed and duplex are forced. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data &= ~M88E1000_PSCR_AUTO_X_MODE; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + e_dbg("M88E1000 PSCR: %X\n", phy_data); + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); + if (ret_val) + return ret_val; + + /* Reset the phy to commit changes. */ + if (hw->phy.ops.commit) { + ret_val = hw->phy.ops.commit(hw); + if (ret_val) + return ret_val; + } + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) { + if (hw->phy.type != e1000_phy_m88) { + e_dbg("Link taking longer than expected.\n"); + } else { + /* We didn't get link. + * Reset the DSP and cross our fingers. + */ + ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, + 0x001d); + if (ret_val) + return ret_val; + ret_val = e1000e_phy_reset_dsp(hw); + if (ret_val) + return ret_val; + } + } + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + if (hw->phy.type != e1000_phy_m88) + return 0; + + ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + /* Resetting the phy means we need to re-force TX_CLK in the + * Extended PHY Specific Control Register to 25MHz clock from + * the reset value of 2.5MHz. + */ + phy_data |= M88E1000_EPSCR_TX_CLK_25; + ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data); + if (ret_val) + return ret_val; + + /* In addition, we must re-enable CRS on Tx for both half and full + * duplex. + */ + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX; + ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex + * @hw: pointer to the HW structure + * + * Forces the speed and duplex settings of the PHY. + * This is a function pointer entry point only called by + * PHY setup routines. + **/ +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1e_rphy(hw, MII_BMCR, &data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &data); + + ret_val = e1e_wphy(hw, MII_BMCR, data); + if (ret_val) + return ret_val; + + /* Disable MDI-X support for 10/100 */ + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + data &= ~IFE_PMC_AUTO_MDIX; + data &= ~IFE_PMC_FORCE_MDIX; + + ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data); + if (ret_val) + return ret_val; + + e_dbg("IFE PMC: %X\n", data); + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on IFE phy.\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + } + + return 0; +} + +/** + * e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex + * @hw: pointer to the HW structure + * @phy_ctrl: pointer to current value of MII_BMCR + * + * Forces speed and duplex on the PHY by doing the following: disable flow + * control, force speed/duplex on the MAC, disable auto speed detection, + * disable auto-negotiation, configure duplex, configure speed, configure + * the collision distance, write configuration to CTRL register. The + * caller must write to the MII_BMCR register for these settings to + * take affect. + **/ +void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl) +{ + struct e1000_mac_info *mac = &hw->mac; + u32 ctrl; + + /* Turn off flow control when forcing speed/duplex */ + hw->fc.current_mode = e1000_fc_none; + + /* Force speed/duplex on the mac */ + ctrl = er32(CTRL); + ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); + ctrl &= ~E1000_CTRL_SPD_SEL; + + /* Disable Auto Speed Detection */ + ctrl &= ~E1000_CTRL_ASDE; + + /* Disable autoneg on the phy */ + *phy_ctrl &= ~BMCR_ANENABLE; + + /* Forcing Full or Half Duplex? */ + if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) { + ctrl &= ~E1000_CTRL_FD; + *phy_ctrl &= ~BMCR_FULLDPLX; + e_dbg("Half Duplex\n"); + } else { + ctrl |= E1000_CTRL_FD; + *phy_ctrl |= BMCR_FULLDPLX; + e_dbg("Full Duplex\n"); + } + + /* Forcing 10mb or 100mb? */ + if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) { + ctrl |= E1000_CTRL_SPD_100; + *phy_ctrl |= BMCR_SPEED100; + *phy_ctrl &= ~BMCR_SPEED1000; + e_dbg("Forcing 100mb\n"); + } else { + ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100); + *phy_ctrl &= ~(BMCR_SPEED1000 | BMCR_SPEED100); + e_dbg("Forcing 10mb\n"); + } + + hw->mac.ops.config_collision_dist(hw); + + ew32(CTRL, ctrl); +} + +/** + * e1000e_set_d3_lplu_state - Sets low power link up state for D3 + * @hw: pointer to the HW structure + * @active: boolean used to enable/disable lplu + * + * Success returns 0, Failure returns 1 + * + * The low power link up (lplu) state is set to the power management level D3 + * and SmartSpeed is disabled when active is true, else clear lplu for D3 + * and enable Smartspeed. LPLU and Smartspeed are mutually exclusive. LPLU + * is used during Dx states where the power conservation is most important. + * During driver activity, SmartSpeed should be enabled so performance is + * maintained. + **/ +s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data); + if (ret_val) + return ret_val; + + if (!active) { + data &= ~IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + /* LPLU and SmartSpeed are mutually exclusive. LPLU is used + * during Dx states where the power conservation is most + * important. During driver activity we should enable + * SmartSpeed, so performance is maintained. + */ + if (phy->smart_speed == e1000_smart_speed_on) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data |= IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } else if (phy->smart_speed == e1000_smart_speed_off) { + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, + &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, + data); + if (ret_val) + return ret_val; + } + } else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) || + (phy->autoneg_advertised == E1000_ALL_NOT_GIG) || + (phy->autoneg_advertised == E1000_ALL_10_SPEED)) { + data |= IGP02E1000_PM_D3_LPLU; + ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data); + if (ret_val) + return ret_val; + + /* When LPLU is enabled, we should disable SmartSpeed */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data); + if (ret_val) + return ret_val; + + data &= ~IGP01E1000_PSCFR_SMART_SPEED; + ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data); + } + + return ret_val; +} + +/** + * e1000e_check_downshift - Checks whether a downshift in speed occurred + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns 1 + * + * A downshift is detected by querying the PHY link health. + **/ +s32 e1000e_check_downshift(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + switch (phy->type) { + case e1000_phy_m88: + case e1000_phy_gg82563: + case e1000_phy_bm: + case e1000_phy_82578: + offset = M88E1000_PHY_SPEC_STATUS; + mask = M88E1000_PSSR_DOWNSHIFT; + break; + case e1000_phy_igp_2: + case e1000_phy_igp_3: + offset = IGP01E1000_PHY_LINK_HEALTH; + mask = IGP01E1000_PLHR_SS_DOWNGRADE; + break; + default: + /* speed downshift not supported */ + phy->speed_downgraded = false; + return 0; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->speed_downgraded = !!(phy_data & mask); + + return ret_val; +} + +/** + * e1000_check_polarity_m88 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data); + + if (!ret_val) + phy->cable_polarity = ((data & M88E1000_PSSR_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_igp - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY port status register, and the + * current speed (since there is no polarity at 100Mbps). + **/ +s32 e1000_check_polarity_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data, offset, mask; + + /* Polarity is determined based on the speed of + * our connection. + */ + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + offset = IGP01E1000_PHY_PCS_INIT_REG; + mask = IGP01E1000_PHY_POLARITY_MASK; + } else { + /* This really only applies to 10Mbps since + * there is no polarity for 100Mbps (always 0). + */ + offset = IGP01E1000_PHY_PORT_STATUS; + mask = IGP01E1000_PSSR_POLARITY_REVERSED; + } + + ret_val = e1e_rphy(hw, offset, &data); + + if (!ret_val) + phy->cable_polarity = ((data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_check_polarity_ife - Check cable polarity for IFE PHY + * @hw: pointer to the HW structure + * + * Polarity is determined on the polarity reversal feature being enabled. + **/ +s32 e1000_check_polarity_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, offset, mask; + + /* Polarity is determined based on the reversal feature being enabled. + */ + if (phy->polarity_correction) { + offset = IFE_PHY_EXTENDED_STATUS_CONTROL; + mask = IFE_PESC_POLARITY_REVERSED; + } else { + offset = IFE_PHY_SPECIAL_CONTROL; + mask = IFE_PSC_FORCE_POLARITY; + } + + ret_val = e1e_rphy(hw, offset, &phy_data); + + if (!ret_val) + phy->cable_polarity = ((phy_data & mask) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_wait_autoneg - Wait for auto-neg completion + * @hw: pointer to the HW structure + * + * Waits for auto-negotiation to complete or for the auto-negotiation time + * limit to expire, which ever happens first. + **/ +static s32 e1000_wait_autoneg(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 i, phy_status; + + /* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */ + for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) { + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); + if (ret_val) + break; + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); + if (ret_val) + break; + if (phy_status & BMSR_ANEGCOMPLETE) + break; + msleep(100); + } + + /* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation + * has completed. + */ + return ret_val; +} + +/** + * e1000e_phy_has_link_generic - Polls PHY for link + * @hw: pointer to the HW structure + * @iterations: number of times to poll for link + * @usec_interval: delay between polling attempts + * @success: pointer to whether polling was successful or not + * + * Polls the PHY status register for link, 'iterations' number of times. + **/ +s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success) +{ + s32 ret_val = 0; + u16 i, phy_status; + + *success = false; + for (i = 0; i < iterations; i++) { + /* Some PHYs require the MII_BMSR register to be read + * twice due to the link bit being sticky. No harm doing + * it across the board. + */ + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); + if (ret_val) { + /* If the first read fails, another entity may have + * ownership of the resources, wait and try again to + * see if they have relinquished the resources yet. + */ + if (usec_interval >= 1000) + msleep(usec_interval / 1000); + else + udelay(usec_interval); + } + ret_val = e1e_rphy(hw, MII_BMSR, &phy_status); + if (ret_val) + break; + if (phy_status & BMSR_LSTATUS) { + *success = true; + break; + } + if (usec_interval >= 1000) + msleep(usec_interval / 1000); + else + udelay(usec_interval); + } + + return ret_val; +} + +/** + * e1000e_get_cable_length_m88 - Determine cable length for m88 PHY + * @hw: pointer to the HW structure + * + * Reads the PHY specific status register to retrieve the cable length + * information. The cable length is determined by averaging the minimum and + * maximum values to get the "average" cable length. The m88 PHY has four + * possible cable length values, which are: + * Register Value Cable Length + * 0 < 50 meters + * 1 50 - 80 meters + * 2 80 - 110 meters + * 3 110 - 140 meters + * 4 > 140 meters + **/ +s32 e1000e_get_cable_length_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, index; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + index = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >> + M88E1000_PSSR_CABLE_LENGTH_SHIFT); + + if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) + return -E1000_ERR_PHY; + + phy->min_cable_length = e1000_m88_cable_length_table[index]; + phy->max_cable_length = e1000_m88_cable_length_table[index + 1]; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return 0; +} + +/** + * e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY + * @hw: pointer to the HW structure + * + * The automatic gain control (agc) normalizes the amplitude of the + * received signal, adjusting for the attenuation produced by the + * cable. By reading the AGC registers, which represent the + * combination of coarse and fine gain value, the value can be put + * into a lookup table to obtain the approximate cable length + * for each channel. + **/ +s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, i, agc_value = 0; + u16 cur_agc_index, max_agc_index = 0; + u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; + static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = { + IGP02E1000_PHY_AGC_A, + IGP02E1000_PHY_AGC_B, + IGP02E1000_PHY_AGC_C, + IGP02E1000_PHY_AGC_D + }; + + /* Read the AGC registers for all channels */ + for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { + ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data); + if (ret_val) + return ret_val; + + /* Getting bits 15:9, which represent the combination of + * coarse and fine gain values. The result is a number + * that can be put into the lookup table to obtain the + * approximate cable length. + */ + cur_agc_index = ((phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) & + IGP02E1000_AGC_LENGTH_MASK); + + /* Array index bound check. */ + if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) || + (cur_agc_index == 0)) + return -E1000_ERR_PHY; + + /* Remove min & max AGC values from calculation. */ + if (e1000_igp_2_cable_length_table[min_agc_index] > + e1000_igp_2_cable_length_table[cur_agc_index]) + min_agc_index = cur_agc_index; + if (e1000_igp_2_cable_length_table[max_agc_index] < + e1000_igp_2_cable_length_table[cur_agc_index]) + max_agc_index = cur_agc_index; + + agc_value += e1000_igp_2_cable_length_table[cur_agc_index]; + } + + agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] + + e1000_igp_2_cable_length_table[max_agc_index]); + agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2); + + /* Calculate cable length with the error range of +/- 10 meters. */ + phy->min_cable_length = (((agc_value - IGP02E1000_AGC_RANGE) > 0) ? + (agc_value - IGP02E1000_AGC_RANGE) : 0); + phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE; + + phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; + + return 0; +} + +/** + * e1000e_get_phy_info_m88 - Retrieve PHY information + * @hw: pointer to the HW structure + * + * Valid for only copper links. Read the PHY status register (sticky read) + * to verify that link is up. Read the PHY special control register to + * determine the polarity and 10base-T extended distance. Read the PHY + * special status register to determine MDI/MDIx and current speed. If + * speed is 1000, then determine cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_m88(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + if (phy->media_type != e1000_media_type_copper) { + e_dbg("Phy info is only valid for copper media\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data); + if (ret_val) + return ret_val; + + phy->polarity_correction = !!(phy_data & + M88E1000_PSCR_POLARITY_REVERSAL); + + ret_val = e1000_check_polarity_m88(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(phy_data & M88E1000_PSSR_MDIX); + + if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, MII_STAT1000, &phy_data); + if (ret_val) + return ret_val; + + phy->local_rx = (phy_data & LPA_1000LOCALRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (phy_data & LPA_1000REMRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } else { + /* Set values to "undefined" */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000e_get_phy_info_igp - Retrieve igp PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000e_get_phy_info_igp(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_igp(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IGP01E1000_PSSR_MDIX); + + if ((data & IGP01E1000_PSSR_SPEED_MASK) == + IGP01E1000_PSSR_SPEED_1000MBPS) { + ret_val = phy->ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, MII_STAT1000, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & LPA_1000LOCALRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & LPA_1000REMRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return ret_val; +} + +/** + * e1000_get_phy_info_ife - Retrieves various IFE PHY states + * @hw: pointer to the HW structure + * + * Populates "phy" structure with various feature states. + **/ +s32 e1000_get_phy_info_ife(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data); + if (ret_val) + return ret_val; + phy->polarity_correction = !(data & IFE_PSC_AUTO_POLARITY_DISABLE); + + if (phy->polarity_correction) { + ret_val = e1000_check_polarity_ife(hw); + if (ret_val) + return ret_val; + } else { + /* Polarity is forced */ + phy->cable_polarity = ((data & IFE_PSC_FORCE_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + } + + ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & IFE_PMC_MDIX_STATUS); + + /* The following parameters are undefined for 10/100 operation. */ + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + + return 0; +} + +/** + * e1000e_phy_sw_reset - PHY software reset + * @hw: pointer to the HW structure + * + * Does a software reset of the PHY by reading the PHY control register and + * setting/write the control register reset bit to the PHY. + **/ +s32 e1000e_phy_sw_reset(struct e1000_hw *hw) +{ + s32 ret_val; + u16 phy_ctrl; + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_ctrl); + if (ret_val) + return ret_val; + + phy_ctrl |= BMCR_RESET; + ret_val = e1e_wphy(hw, MII_BMCR, phy_ctrl); + if (ret_val) + return ret_val; + + udelay(1); + + return ret_val; +} + +/** + * e1000e_phy_hw_reset_generic - PHY hardware reset + * @hw: pointer to the HW structure + * + * Verify the reset block is not blocking us from resetting. Acquire + * semaphore (if necessary) and read/set/write the device control reset + * bit in the PHY. Wait the appropriate delay time for the device to + * reset and release the semaphore (if necessary). + **/ +s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u32 ctrl; + + if (phy->ops.check_reset_block) { + ret_val = phy->ops.check_reset_block(hw); + if (ret_val) + return 0; + } + + ret_val = phy->ops.acquire(hw); + if (ret_val) + return ret_val; + + ctrl = er32(CTRL); + ew32(CTRL, ctrl | E1000_CTRL_PHY_RST); + e1e_flush(); + + udelay(phy->reset_delay_us); + + ew32(CTRL, ctrl); + e1e_flush(); + + usleep_range(150, 300); + + phy->ops.release(hw); + + return phy->ops.get_cfg_done(hw); +} + +/** + * e1000e_get_cfg_done_generic - Generic configuration done + * @hw: pointer to the HW structure + * + * Generic function to wait 10 milli-seconds for configuration to complete + * and return success. + **/ +s32 e1000e_get_cfg_done_generic(struct e1000_hw __always_unused *hw) +{ + mdelay(10); + + return 0; +} + +/** + * e1000e_phy_init_script_igp3 - Inits the IGP3 PHY + * @hw: pointer to the HW structure + * + * Initializes a Intel Gigabit PHY3 when an EEPROM is not present. + **/ +s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw) +{ + e_dbg("Running IGP 3 PHY init script\n"); + + /* PHY init IGP 3 */ + /* Enable rise/fall, 10-mode work in class-A */ + e1e_wphy(hw, 0x2F5B, 0x9018); + /* Remove all caps from Replica path filter */ + e1e_wphy(hw, 0x2F52, 0x0000); + /* Bias trimming for ADC, AFE and Driver (Default) */ + e1e_wphy(hw, 0x2FB1, 0x8B24); + /* Increase Hybrid poly bias */ + e1e_wphy(hw, 0x2FB2, 0xF8F0); + /* Add 4% to Tx amplitude in Gig mode */ + e1e_wphy(hw, 0x2010, 0x10B0); + /* Disable trimming (TTT) */ + e1e_wphy(hw, 0x2011, 0x0000); + /* Poly DC correction to 94.6% + 2% for all channels */ + e1e_wphy(hw, 0x20DD, 0x249A); + /* ABS DC correction to 95.9% */ + e1e_wphy(hw, 0x20DE, 0x00D3); + /* BG temp curve trim */ + e1e_wphy(hw, 0x28B4, 0x04CE); + /* Increasing ADC OPAMP stage 1 currents to max */ + e1e_wphy(hw, 0x2F70, 0x29E4); + /* Force 1000 ( required for enabling PHY regs configuration) */ + e1e_wphy(hw, 0x0000, 0x0140); + /* Set upd_freq to 6 */ + e1e_wphy(hw, 0x1F30, 0x1606); + /* Disable NPDFE */ + e1e_wphy(hw, 0x1F31, 0xB814); + /* Disable adaptive fixed FFE (Default) */ + e1e_wphy(hw, 0x1F35, 0x002A); + /* Enable FFE hysteresis */ + e1e_wphy(hw, 0x1F3E, 0x0067); + /* Fixed FFE for short cable lengths */ + e1e_wphy(hw, 0x1F54, 0x0065); + /* Fixed FFE for medium cable lengths */ + e1e_wphy(hw, 0x1F55, 0x002A); + /* Fixed FFE for long cable lengths */ + e1e_wphy(hw, 0x1F56, 0x002A); + /* Enable Adaptive Clip Threshold */ + e1e_wphy(hw, 0x1F72, 0x3FB0); + /* AHT reset limit to 1 */ + e1e_wphy(hw, 0x1F76, 0xC0FF); + /* Set AHT master delay to 127 msec */ + e1e_wphy(hw, 0x1F77, 0x1DEC); + /* Set scan bits for AHT */ + e1e_wphy(hw, 0x1F78, 0xF9EF); + /* Set AHT Preset bits */ + e1e_wphy(hw, 0x1F79, 0x0210); + /* Change integ_factor of channel A to 3 */ + e1e_wphy(hw, 0x1895, 0x0003); + /* Change prop_factor of channels BCD to 8 */ + e1e_wphy(hw, 0x1796, 0x0008); + /* Change cg_icount + enable integbp for channels BCD */ + e1e_wphy(hw, 0x1798, 0xD008); + /* Change cg_icount + enable integbp + change prop_factor_master + * to 8 for channel A + */ + e1e_wphy(hw, 0x1898, 0xD918); + /* Disable AHT in Slave mode on channel A */ + e1e_wphy(hw, 0x187A, 0x0800); + /* Enable LPLU and disable AN to 1000 in non-D0a states, + * Enable SPD+B2B + */ + e1e_wphy(hw, 0x0019, 0x008D); + /* Enable restart AN on an1000_dis change */ + e1e_wphy(hw, 0x001B, 0x2080); + /* Enable wh_fifo read clock in 10/100 modes */ + e1e_wphy(hw, 0x0014, 0x0045); + /* Restart AN, Speed selection is 1000 */ + e1e_wphy(hw, 0x0000, 0x1340); + + return 0; +} + +/** + * e1000e_get_phy_type_from_id - Get PHY type from id + * @phy_id: phy_id read from the phy + * + * Returns the phy type from the id. + **/ +enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id) +{ + enum e1000_phy_type phy_type = e1000_phy_unknown; + + switch (phy_id) { + case M88E1000_I_PHY_ID: + case M88E1000_E_PHY_ID: + case M88E1111_I_PHY_ID: + case M88E1011_I_PHY_ID: + phy_type = e1000_phy_m88; + break; + case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */ + phy_type = e1000_phy_igp_2; + break; + case GG82563_E_PHY_ID: + phy_type = e1000_phy_gg82563; + break; + case IGP03E1000_E_PHY_ID: + phy_type = e1000_phy_igp_3; + break; + case IFE_E_PHY_ID: + case IFE_PLUS_E_PHY_ID: + case IFE_C_E_PHY_ID: + phy_type = e1000_phy_ife; + break; + case BME1000_E_PHY_ID: + case BME1000_E_PHY_ID_R2: + phy_type = e1000_phy_bm; + break; + case I82578_E_PHY_ID: + phy_type = e1000_phy_82578; + break; + case I82577_E_PHY_ID: + phy_type = e1000_phy_82577; + break; + case I82579_E_PHY_ID: + phy_type = e1000_phy_82579; + break; + case I217_E_PHY_ID: + phy_type = e1000_phy_i217; + break; + default: + phy_type = e1000_phy_unknown; + break; + } + return phy_type; +} + +/** + * e1000e_determine_phy_address - Determines PHY address. + * @hw: pointer to the HW structure + * + * This uses a trial and error method to loop through possible PHY + * addresses. It tests each by reading the PHY ID registers and + * checking for a match. + **/ +s32 e1000e_determine_phy_address(struct e1000_hw *hw) +{ + u32 phy_addr = 0; + u32 i; + enum e1000_phy_type phy_type = e1000_phy_unknown; + + hw->phy.id = phy_type; + + for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) { + hw->phy.addr = phy_addr; + i = 0; + + do { + e1000e_get_phy_id(hw); + phy_type = e1000e_get_phy_type_from_id(hw->phy.id); + + /* If phy_type is valid, break - we found our + * PHY address + */ + if (phy_type != e1000_phy_unknown) + return 0; + + usleep_range(1000, 2000); + i++; + } while (i < 10); + } + + return -E1000_ERR_PHY_TYPE; +} + +/** + * e1000_get_phy_addr_for_bm_page - Retrieve PHY page address + * @page: page to access + * + * Returns the phy address for the page requested. + **/ +static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg) +{ + u32 phy_addr = 2; + + if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31)) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000e_write_phy_reg_bm - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto release; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto release; + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_read_phy_reg_bm - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u32 page = offset >> IGP_PAGE_SHIFT; + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto release; + } + + hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset); + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + u32 page_shift, page_select; + + /* Page select is register 31 for phy address 1 and 22 for + * phy address 2 and 3. Page select is shifted only for + * phy address 1. + */ + if (hw->phy.addr == 1) { + page_shift = IGP_PAGE_SHIFT; + page_select = IGP01E1000_PHY_PAGE_SELECT; + } else { + page_shift = 0; + page_select = BM_PHY_PAGE_SELECT; + } + + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, page_select, + (page << page_shift)); + if (ret_val) + goto release; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_read_phy_reg_bm2 - Read BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and storing the retrieved information in data. Release any acquired + * semaphores before exiting. + **/ +s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, false); + goto release; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto release; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000e_write_phy_reg_bm2 - Write BM PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data) +{ + s32 ret_val; + u16 page = (u16)(offset >> IGP_PAGE_SHIFT); + + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, false); + goto release; + } + + hw->phy.addr = 1; + + if (offset > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT, + page); + + if (ret_val) + goto release; + } + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset, + data); + +release: + hw->phy.ops.release(hw); + return ret_val; +} + +/** + * e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers + * @hw: pointer to the HW structure + * @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG + * + * Assumes semaphore already acquired and phy_reg points to a valid memory + * address to store contents of the BM_WUC_ENABLE_REG register. + **/ +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + u16 temp; + + /* All page select, port ctrl and wakeup registers use phy address 1 */ + hw->phy.addr = 1; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + e_dbg("Could not set Port Control page\n"); + return ret_val; + } + + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg); + if (ret_val) { + e_dbg("Could not read PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + return ret_val; + } + + /* Enable both PHY wakeup mode and Wakeup register page writes. + * Prevent a power state change by disabling ME and Host PHY wakeup. + */ + temp = *phy_reg; + temp |= BM_WUC_ENABLE_BIT; + temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT); + + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp); + if (ret_val) { + e_dbg("Could not write PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + return ret_val; + } + + /* Select Host Wakeup Registers page - caller now able to write + * registers on the Wakeup registers page + */ + return e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT)); +} + +/** + * e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs + * @hw: pointer to the HW structure + * @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG + * + * Restore BM_WUC_ENABLE_REG to its original value. + * + * Assumes semaphore already acquired and *phy_reg is the contents of the + * BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by + * caller. + **/ +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg) +{ + s32 ret_val; + + /* Select Port Control Registers page */ + ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT)); + if (ret_val) { + e_dbg("Could not set Port Control page\n"); + return ret_val; + } + + /* Restore 769.17 to its original value */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg); + if (ret_val) + e_dbg("Could not restore PHY register %d.%d\n", + BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG); + + return ret_val; +} + +/** + * e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to read or write + * @read: determines if operation is read or write + * @page_set: BM_WUC_PAGE already set and access enabled + * + * Read the PHY register at offset and store the retrieved information in + * data, or write data to PHY register at offset. Note the procedure to + * access the PHY wakeup registers is different than reading the other PHY + * registers. It works as such: + * 1) Set 769.17.2 (page 769, register 17, bit 2) = 1 + * 2) Set page to 800 for host (801 if we were manageability) + * 3) Write the address using the address opcode (0x11) + * 4) Read or write the data using the data opcode (0x12) + * 5) Restore 769.17.2 to its original value + * + * Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and + * step 5 is done by e1000_disable_phy_wakeup_reg_access_bm(). + * + * Assumes semaphore is already acquired. When page_set==true, assumes + * the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack + * is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()). + **/ +static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset, + u16 *data, bool read, bool page_set) +{ + s32 ret_val; + u16 reg = BM_PHY_REG_NUM(offset); + u16 page = BM_PHY_REG_PAGE(offset); + u16 phy_reg = 0; + + /* Gig must be disabled for MDIO accesses to Host Wakeup reg page */ + if ((hw->mac.type == e1000_pchlan) && + (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE))) + e_dbg("Attempting to access page %d while gig enabled.\n", + page); + + if (!page_set) { + /* Enable access to PHY wakeup registers */ + ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg); + if (ret_val) { + e_dbg("Could not enable PHY wakeup reg access\n"); + return ret_val; + } + } + + e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg); + + /* Write the Wakeup register page offset value using opcode 0x11 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg); + if (ret_val) { + e_dbg("Could not write address opcode to page %d\n", page); + return ret_val; + } + + if (read) { + /* Read the Wakeup register page value using opcode 0x12 */ + ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + data); + } else { + /* Write the Wakeup register page value using opcode 0x12 */ + ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE, + *data); + } + + if (ret_val) { + e_dbg("Could not access PHY reg %d.%d\n", page, reg); + return ret_val; + } + + if (!page_set) + ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); + + return ret_val; +} + +/** + * e1000_power_up_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_up_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + e1e_rphy(hw, MII_BMCR, &mii_reg); + mii_reg &= ~BMCR_PDOWN; + e1e_wphy(hw, MII_BMCR, mii_reg); +} + +/** + * e1000_power_down_phy_copper - Restore copper link in case of PHY power down + * @hw: pointer to the HW structure + * + * In the case of a PHY power down to save power, or to turn off link during a + * driver unload, or wake on lan is not enabled, restore the link to previous + * settings. + **/ +void e1000_power_down_phy_copper(struct e1000_hw *hw) +{ + u16 mii_reg = 0; + + /* The PHY will retain its settings across a power down/up cycle */ + e1e_rphy(hw, MII_BMCR, &mii_reg); + mii_reg |= BMCR_PDOWN; + e1e_wphy(hw, MII_BMCR, mii_reg); + usleep_range(1000, 2000); +} + +/** + * __e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then reads the PHY register at offset + * and stores the retrieved information in data. Release any acquired + * semaphore before exiting. + **/ +static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data, + true, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + data, true); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, data); +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_read_phy_reg_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Acquires semaphore then reads the PHY register at offset and stores + * the retrieved information in data. Release the acquired semaphore + * before exiting. + **/ +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_read_phy_reg_hv_locked - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to be read + * @data: pointer to the read data + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired. + **/ +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_read_phy_reg_page_hv - Read HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Reads the PHY register at offset and stores the retrieved information + * in data. Assumes semaphore already acquired and page already set. + **/ +s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data) +{ + return __e1000_read_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * __e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * @locked: semaphore has already been acquired or not + * + * Acquires semaphore, if necessary, then writes the data to PHY register + * at the offset. Release any acquired semaphores before exiting. + **/ +static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data, + bool locked, bool page_set) +{ + s32 ret_val; + u16 page = BM_PHY_REG_PAGE(offset); + u16 reg = BM_PHY_REG_NUM(offset); + u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page); + + if (!locked) { + ret_val = hw->phy.ops.acquire(hw); + if (ret_val) + return ret_val; + } + + /* Page 800 works differently than the rest so it has its own func */ + if (page == BM_WUC_PAGE) { + ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data, + false, page_set); + goto out; + } + + if (page > 0 && page < HV_INTC_FC_PAGE_START) { + ret_val = e1000_access_phy_debug_regs_hv(hw, offset, + &data, false); + goto out; + } + + if (!page_set) { + if (page == HV_INTC_FC_PAGE_START) + page = 0; + + /* Workaround MDIO accesses being disabled after entering IEEE + * Power Down (when bit 11 of the PHY Control register is set) + */ + if ((hw->phy.type == e1000_phy_82578) && + (hw->phy.revision >= 1) && + (hw->phy.addr == 2) && + !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) { + u16 data2 = 0x7EFF; + + ret_val = e1000_access_phy_debug_regs_hv(hw, + (1 << 6) | 0x3, + &data2, false); + if (ret_val) + goto out; + } + + if (reg > MAX_PHY_MULTI_PAGE_REG) { + /* Page is shifted left, PHY expects (page x 32) */ + ret_val = e1000_set_page_igp(hw, + (page << IGP_PAGE_SHIFT)); + + hw->phy.addr = phy_addr; + + if (ret_val) + goto out; + } + } + + e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page, + page << IGP_PAGE_SHIFT, reg); + + ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg, + data); + +out: + if (!locked) + hw->phy.ops.release(hw); + + return ret_val; +} + +/** + * e1000_write_phy_reg_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Acquires semaphore then writes the data to PHY register at the offset. + * Release the acquired semaphores before exiting. + **/ +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, false, false); +} + +/** + * e1000_write_phy_reg_hv_locked - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired. + **/ +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, false); +} + +/** + * e1000_write_phy_reg_page_hv - Write HV PHY register + * @hw: pointer to the HW structure + * @offset: register offset to write to + * @data: data to write at register offset + * + * Writes the data to PHY register at the offset. Assumes semaphore + * already acquired and page already set. + **/ +s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data) +{ + return __e1000_write_phy_reg_hv(hw, offset, data, true, true); +} + +/** + * e1000_get_phy_addr_for_hv_page - Get PHY address based on page + * @page: page to be accessed + **/ +static u32 e1000_get_phy_addr_for_hv_page(u32 page) +{ + u32 phy_addr = 2; + + if (page >= HV_INTC_FC_PAGE_START) + phy_addr = 1; + + return phy_addr; +} + +/** + * e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers + * @hw: pointer to the HW structure + * @offset: register offset to be read or written + * @data: pointer to the data to be read or written + * @read: determines if operation is read or write + * + * Reads the PHY register at offset and stores the retreived information + * in data. Assumes semaphore already acquired. Note that the procedure + * to access these regs uses the address port and data port to read/write. + * These accesses done with PHY address 2 and without using pages. + **/ +static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset, + u16 *data, bool read) +{ + s32 ret_val; + u32 addr_reg; + u32 data_reg; + + /* This takes care of the difference with desktop vs mobile phy */ + addr_reg = ((hw->phy.type == e1000_phy_82578) ? + I82578_ADDR_REG : I82577_ADDR_REG); + data_reg = addr_reg + 1; + + /* All operations in this function are phy address 2 */ + hw->phy.addr = 2; + + /* masking with 0x3F to remove the page from offset */ + ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F); + if (ret_val) { + e_dbg("Could not write the Address Offset port register\n"); + return ret_val; + } + + /* Read or write the data value next */ + if (read) + ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data); + else + ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data); + + if (ret_val) + e_dbg("Could not access the Data port register\n"); + + return ret_val; +} + +/** + * e1000_link_stall_workaround_hv - Si workaround + * @hw: pointer to the HW structure + * + * This function works around a Si bug where the link partner can get + * a link up indication before the PHY does. If small packets are sent + * by the link partner they can be placed in the packet buffer without + * being properly accounted for by the PHY and will stall preventing + * further packets from being received. The workaround is to clear the + * packet buffer after the PHY detects link up. + **/ +s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw) +{ + s32 ret_val = 0; + u16 data; + + if (hw->phy.type != e1000_phy_82578) + return 0; + + /* Do not apply workaround if in PHY loopback bit 14 set */ + e1e_rphy(hw, MII_BMCR, &data); + if (data & BMCR_LOOPBACK) + return 0; + + /* check if link is up and at 1Gbps */ + ret_val = e1e_rphy(hw, BM_CS_STATUS, &data); + if (ret_val) + return ret_val; + + data &= (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_MASK); + + if (data != (BM_CS_STATUS_LINK_UP | BM_CS_STATUS_RESOLVED | + BM_CS_STATUS_SPEED_1000)) + return 0; + + msleep(200); + + /* flush the packets in the fifo buffer */ + ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, + (HV_MUX_DATA_CTRL_GEN_TO_MAC | + HV_MUX_DATA_CTRL_FORCE_SPEED)); + if (ret_val) + return ret_val; + + return e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC); +} + +/** + * e1000_check_polarity_82577 - Checks the polarity. + * @hw: pointer to the HW structure + * + * Success returns 0, Failure returns -E1000_ERR_PHY (-2) + * + * Polarity is determined based on the PHY specific status register. + **/ +s32 e1000_check_polarity_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); + + if (!ret_val) + phy->cable_polarity = ((data & I82577_PHY_STATUS2_REV_POLARITY) + ? e1000_rev_polarity_reversed + : e1000_rev_polarity_normal); + + return ret_val; +} + +/** + * e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY + * @hw: pointer to the HW structure + * + * Calls the PHY setup function to force speed and duplex. + **/ +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data; + bool link; + + ret_val = e1e_rphy(hw, MII_BMCR, &phy_data); + if (ret_val) + return ret_val; + + e1000e_phy_force_speed_duplex_setup(hw, &phy_data); + + ret_val = e1e_wphy(hw, MII_BMCR, phy_data); + if (ret_val) + return ret_val; + + udelay(1); + + if (phy->autoneg_wait_to_complete) { + e_dbg("Waiting for forced speed/duplex link on 82577 phy\n"); + + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + if (ret_val) + return ret_val; + + if (!link) + e_dbg("Link taking longer than expected.\n"); + + /* Try once more */ + ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT, + 100000, &link); + } + + return ret_val; +} + +/** + * e1000_get_phy_info_82577 - Retrieve I82577 PHY information + * @hw: pointer to the HW structure + * + * Read PHY status to determine if link is up. If link is up, then + * set/determine 10base-T extended distance and polarity correction. Read + * PHY port status to determine MDI/MDIx and speed. Based on the speed, + * determine on the cable length, local and remote receiver. + **/ +s32 e1000_get_phy_info_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 data; + bool link; + + ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link); + if (ret_val) + return ret_val; + + if (!link) { + e_dbg("Phy info is only valid if link is up\n"); + return -E1000_ERR_CONFIG; + } + + phy->polarity_correction = true; + + ret_val = e1000_check_polarity_82577(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data); + if (ret_val) + return ret_val; + + phy->is_mdix = !!(data & I82577_PHY_STATUS2_MDIX); + + if ((data & I82577_PHY_STATUS2_SPEED_MASK) == + I82577_PHY_STATUS2_SPEED_1000MBPS) { + ret_val = hw->phy.ops.get_cable_length(hw); + if (ret_val) + return ret_val; + + ret_val = e1e_rphy(hw, MII_STAT1000, &data); + if (ret_val) + return ret_val; + + phy->local_rx = (data & LPA_1000LOCALRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + + phy->remote_rx = (data & LPA_1000REMRXOK) + ? e1000_1000t_rx_status_ok : e1000_1000t_rx_status_not_ok; + } else { + phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED; + phy->local_rx = e1000_1000t_rx_status_undefined; + phy->remote_rx = e1000_1000t_rx_status_undefined; + } + + return 0; +} + +/** + * e1000_get_cable_length_82577 - Determine cable length for 82577 PHY + * @hw: pointer to the HW structure + * + * Reads the diagnostic status register and verifies result is valid before + * placing it in the phy_cable_length field. + **/ +s32 e1000_get_cable_length_82577(struct e1000_hw *hw) +{ + struct e1000_phy_info *phy = &hw->phy; + s32 ret_val; + u16 phy_data, length; + + ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data); + if (ret_val) + return ret_val; + + length = ((phy_data & I82577_DSTATUS_CABLE_LENGTH) >> + I82577_DSTATUS_CABLE_LENGTH_SHIFT); + + if (length == E1000_CABLE_LENGTH_UNDEFINED) + return -E1000_ERR_PHY; + + phy->cable_length = length; + + return 0; +} diff --git a/addons/e1000e/src/4.4.180/phy.h b/addons/e1000e/src/4.4.180/phy.h new file mode 100644 index 00000000..55bfe473 --- /dev/null +++ b/addons/e1000e/src/4.4.180/phy.h @@ -0,0 +1,236 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_PHY_H_ +#define _E1000E_PHY_H_ + +s32 e1000e_check_downshift(struct e1000_hw *hw); +s32 e1000_check_polarity_m88(struct e1000_hw *hw); +s32 e1000_check_polarity_igp(struct e1000_hw *hw); +s32 e1000_check_polarity_ife(struct e1000_hw *hw); +s32 e1000e_check_reset_block_generic(struct e1000_hw *hw); +s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw); +s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw); +s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw); +s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw); +s32 e1000e_get_cable_length_m88(struct e1000_hw *hw); +s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw); +s32 e1000e_get_cfg_done_generic(struct e1000_hw *hw); +s32 e1000e_get_phy_id(struct e1000_hw *hw); +s32 e1000e_get_phy_info_igp(struct e1000_hw *hw); +s32 e1000e_get_phy_info_m88(struct e1000_hw *hw); +s32 e1000_get_phy_info_ife(struct e1000_hw *hw); +s32 e1000e_phy_sw_reset(struct e1000_hw *hw); +void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl); +s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw); +s32 e1000e_phy_reset_dsp(struct e1000_hw *hw); +s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page); +s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active); +s32 e1000e_setup_copper_link(struct e1000_hw *hw); +s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations, + u32 usec_interval, bool *success); +s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw); +enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id); +s32 e1000e_determine_phy_address(struct e1000_hw *hw); +s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg); +s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data); +void e1000_power_up_phy_copper(struct e1000_hw *hw); +void e1000_power_down_phy_copper(struct e1000_hw *hw); +s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data); +s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data); +s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw); +s32 e1000_copper_link_setup_82577(struct e1000_hw *hw); +s32 e1000_check_polarity_82577(struct e1000_hw *hw); +s32 e1000_get_phy_info_82577(struct e1000_hw *hw); +s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw); +s32 e1000_get_cable_length_82577(struct e1000_hw *hw); + +#define E1000_MAX_PHY_ADDR 8 + +/* IGP01E1000 Specific Registers */ +#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ +#define IGP01E1000_PHY_PORT_STATUS 0x11 /* Status */ +#define IGP01E1000_PHY_PORT_CTRL 0x12 /* Control */ +#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health */ +#define IGP02E1000_PHY_POWER_MGMT 0x19 /* Power Management */ +#define IGP01E1000_PHY_PAGE_SELECT 0x1F /* Page Select */ +#define BM_PHY_PAGE_SELECT 22 /* Page Select for BM */ +#define IGP_PAGE_SHIFT 5 +#define PHY_REG_MASK 0x1F + +/* BM/HV Specific Registers */ +#define BM_PORT_CTRL_PAGE 769 +#define BM_WUC_PAGE 800 +#define BM_WUC_ADDRESS_OPCODE 0x11 +#define BM_WUC_DATA_OPCODE 0x12 +#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE +#define BM_WUC_ENABLE_REG 17 +#define BM_WUC_ENABLE_BIT (1 << 2) +#define BM_WUC_HOST_WU_BIT (1 << 4) +#define BM_WUC_ME_WU_BIT (1 << 5) + +#define PHY_UPPER_SHIFT 21 +#define BM_PHY_REG(page, reg) \ + (((reg) & MAX_PHY_REG_ADDRESS) |\ + (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\ + (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT))) +#define BM_PHY_REG_PAGE(offset) \ + ((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF)) +#define BM_PHY_REG_NUM(offset) \ + ((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\ + (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\ + ~MAX_PHY_REG_ADDRESS))) + +#define HV_INTC_FC_PAGE_START 768 +#define I82578_ADDR_REG 29 +#define I82577_ADDR_REG 16 +#define I82577_CFG_REG 22 +#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15) +#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */ +#define I82577_CTRL_REG 23 + +/* 82577 specific PHY registers */ +#define I82577_PHY_CTRL_2 18 +#define I82577_PHY_LBK_CTRL 19 +#define I82577_PHY_STATUS_2 26 +#define I82577_PHY_DIAG_STATUS 31 + +/* I82577 PHY Status 2 */ +#define I82577_PHY_STATUS2_REV_POLARITY 0x0400 +#define I82577_PHY_STATUS2_MDIX 0x0800 +#define I82577_PHY_STATUS2_SPEED_MASK 0x0300 +#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200 + +/* I82577 PHY Control 2 */ +#define I82577_PHY_CTRL2_MANUAL_MDIX 0x0200 +#define I82577_PHY_CTRL2_AUTO_MDI_MDIX 0x0400 +#define I82577_PHY_CTRL2_MDIX_CFG_MASK 0x0600 + +/* I82577 PHY Diagnostics Status */ +#define I82577_DSTATUS_CABLE_LENGTH 0x03FC +#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2 + +/* BM PHY Copper Specific Control 1 */ +#define BM_CS_CTRL1 16 + +/* BM PHY Copper Specific Status */ +#define BM_CS_STATUS 17 +#define BM_CS_STATUS_LINK_UP 0x0400 +#define BM_CS_STATUS_RESOLVED 0x0800 +#define BM_CS_STATUS_SPEED_MASK 0xC000 +#define BM_CS_STATUS_SPEED_1000 0x8000 + +/* 82577 Mobile Phy Status Register */ +#define HV_M_STATUS 26 +#define HV_M_STATUS_AUTONEG_COMPLETE 0x1000 +#define HV_M_STATUS_SPEED_MASK 0x0300 +#define HV_M_STATUS_SPEED_1000 0x0200 +#define HV_M_STATUS_SPEED_100 0x0100 +#define HV_M_STATUS_LINK_UP 0x0040 + +#define IGP01E1000_PHY_PCS_INIT_REG 0x00B4 +#define IGP01E1000_PHY_POLARITY_MASK 0x0078 + +#define IGP01E1000_PSCR_AUTO_MDIX 0x1000 +#define IGP01E1000_PSCR_FORCE_MDI_MDIX 0x2000 /* 0=MDI, 1=MDIX */ + +#define IGP01E1000_PSCFR_SMART_SPEED 0x0080 + +#define IGP02E1000_PM_SPD 0x0001 /* Smart Power Down */ +#define IGP02E1000_PM_D0_LPLU 0x0002 /* For D0a states */ +#define IGP02E1000_PM_D3_LPLU 0x0004 /* For all other states */ + +#define IGP01E1000_PLHR_SS_DOWNGRADE 0x8000 + +#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002 +#define IGP01E1000_PSSR_MDIX 0x0800 +#define IGP01E1000_PSSR_SPEED_MASK 0xC000 +#define IGP01E1000_PSSR_SPEED_1000MBPS 0xC000 + +#define IGP02E1000_PHY_CHANNEL_NUM 4 +#define IGP02E1000_PHY_AGC_A 0x11B1 +#define IGP02E1000_PHY_AGC_B 0x12B1 +#define IGP02E1000_PHY_AGC_C 0x14B1 +#define IGP02E1000_PHY_AGC_D 0x18B1 + +#define IGP02E1000_AGC_LENGTH_SHIFT 9 /* Course=15:13, Fine=12:9 */ +#define IGP02E1000_AGC_LENGTH_MASK 0x7F +#define IGP02E1000_AGC_RANGE 15 + +#define E1000_CABLE_LENGTH_UNDEFINED 0xFF + +#define E1000_KMRNCTRLSTA_OFFSET 0x001F0000 +#define E1000_KMRNCTRLSTA_OFFSET_SHIFT 16 +#define E1000_KMRNCTRLSTA_REN 0x00200000 +#define E1000_KMRNCTRLSTA_CTRL_OFFSET 0x1 /* Kumeran Control */ +#define E1000_KMRNCTRLSTA_DIAG_OFFSET 0x3 /* Kumeran Diagnostic */ +#define E1000_KMRNCTRLSTA_TIMEOUTS 0x4 /* Kumeran Timeouts */ +#define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */ +#define E1000_KMRNCTRLSTA_IBIST_DISABLE 0x0200 /* Kumeran IBIST Disable */ +#define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */ +#define E1000_KMRNCTRLSTA_K1_CONFIG 0x7 +#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002 /* enable K1 */ +#define E1000_KMRNCTRLSTA_HD_CTRL 0x10 /* Kumeran HD Control */ + +#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10 +#define IFE_PHY_SPECIAL_CONTROL 0x11 /* 100BaseTx PHY Special Ctrl */ +#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Ctrl */ +#define IFE_PHY_MDIX_CONTROL 0x1C /* MDI/MDI-X Control */ + +/* IFE PHY Extended Status Control */ +#define IFE_PESC_POLARITY_REVERSED 0x0100 + +/* IFE PHY Special Control */ +#define IFE_PSC_AUTO_POLARITY_DISABLE 0x0010 +#define IFE_PSC_FORCE_POLARITY 0x0020 + +/* IFE PHY Special Control and LED Control */ +#define IFE_PSCL_PROBE_MODE 0x0020 +#define IFE_PSCL_PROBE_LEDS_OFF 0x0006 /* Force LEDs 0 and 2 off */ +#define IFE_PSCL_PROBE_LEDS_ON 0x0007 /* Force LEDs 0 and 2 on */ + +/* IFE PHY MDIX Control */ +#define IFE_PMC_MDIX_STATUS 0x0020 /* 1=MDI-X, 0=MDI */ +#define IFE_PMC_FORCE_MDIX 0x0040 /* 1=force MDI-X, 0=force MDI */ +#define IFE_PMC_AUTO_MDIX 0x0080 /* 1=enable auto, 0=disable */ + +#endif diff --git a/addons/e1000e/src/4.4.180/ptp.c b/addons/e1000e/src/4.4.180/ptp.c new file mode 100644 index 00000000..855cf8c1 --- /dev/null +++ b/addons/e1000e/src/4.4.180/ptp.c @@ -0,0 +1,280 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +/* PTP 1588 Hardware Clock (PHC) + * Derived from PTP Hardware Clock driver for Intel 82576 and 82580 (igb) + * Copyright (C) 2011 Richard Cochran + */ + +#include "e1000.h" + +/** + * e1000e_phc_adjfreq - adjust the frequency of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired frequency change in parts per billion + * + * Adjust the frequency of the PHC cycle counter by the indicated delta from + * the base frequency. + **/ +static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + struct e1000_hw *hw = &adapter->hw; + bool neg_adj = false; + unsigned long flags; + u64 adjustment; + u32 timinca, incvalue; + s32 ret_val; + + if ((delta > ptp->max_adj) || (delta <= -1000000000)) + return -EINVAL; + + if (delta < 0) { + neg_adj = true; + delta = -delta; + } + + /* Get the System Time Register SYSTIM base frequency */ + ret_val = e1000e_get_base_timinca(adapter, &timinca); + if (ret_val) + return ret_val; + + spin_lock_irqsave(&adapter->systim_lock, flags); + + incvalue = timinca & E1000_TIMINCA_INCVALUE_MASK; + + adjustment = incvalue; + adjustment *= delta; + adjustment = div_u64(adjustment, 1000000000); + + incvalue = neg_adj ? (incvalue - adjustment) : (incvalue + adjustment); + + timinca &= ~E1000_TIMINCA_INCVALUE_MASK; + timinca |= incvalue; + + ew32(TIMINCA, timinca); + + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + return 0; +} + +/** + * e1000e_phc_adjtime - Shift the time of the hardware clock + * @ptp: ptp clock structure + * @delta: Desired change in nanoseconds + * + * Adjust the timer by resetting the timecounter structure. + **/ +static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + unsigned long flags; + + spin_lock_irqsave(&adapter->systim_lock, flags); + timecounter_adjtime(&adapter->tc, delta); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + return 0; +} + +/** + * e1000e_phc_gettime - Reads the current time from the hardware clock + * @ptp: ptp clock structure + * @ts: timespec structure to hold the current time value + * + * Read the timecounter and return the correct value in ns after converting + * it into a struct timespec. + **/ +static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + unsigned long flags; + u64 cycles, ns; + + spin_lock_irqsave(&adapter->systim_lock, flags); + + /* Use timecounter_cyc2time() to allow non-monotonic SYSTIM readings */ + cycles = adapter->cc.read(&adapter->cc); + ns = timecounter_cyc2time(&adapter->tc, cycles); + + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +/** + * e1000e_phc_settime - Set the current time on the hardware clock + * @ptp: ptp clock structure + * @ts: timespec containing the new time for the cycle counter + * + * Reset the timecounter to use a new base value instead of the kernel + * wall timer value. + **/ +static int e1000e_phc_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, + ptp_clock_info); + unsigned long flags; + u64 ns; + + ns = timespec64_to_ns(ts); + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->systim_lock, flags); + timecounter_init(&adapter->tc, &adapter->cc, ns); + spin_unlock_irqrestore(&adapter->systim_lock, flags); + + return 0; +} + +/** + * e1000e_phc_enable - enable or disable an ancillary feature + * @ptp: ptp clock structure + * @request: Desired resource to enable or disable + * @on: Caller passes one to enable or zero to disable + * + * Enable (or disable) ancillary features of the PHC subsystem. + * Currently, no ancillary features are supported. + **/ +static int e1000e_phc_enable(struct ptp_clock_info __always_unused *ptp, + struct ptp_clock_request __always_unused *request, + int __always_unused on) +{ + return -EOPNOTSUPP; +} + +static void e1000e_systim_overflow_work(struct work_struct *work) +{ + struct e1000_adapter *adapter = container_of(work, struct e1000_adapter, + systim_overflow_work.work); + struct e1000_hw *hw = &adapter->hw; + struct timespec64 ts; + u64 ns; + + /* Update the timecounter */ + ns = timecounter_read(&adapter->tc); + + ts = ns_to_timespec64(ns); + e_dbg("SYSTIM overflow check at %lld.%09lu\n", + (long long) ts.tv_sec, ts.tv_nsec); + + schedule_delayed_work(&adapter->systim_overflow_work, + E1000_SYSTIM_OVERFLOW_PERIOD); +} + +static const struct ptp_clock_info e1000e_ptp_clock_info = { + .owner = THIS_MODULE, + .n_alarm = 0, + .n_ext_ts = 0, + .n_per_out = 0, + .n_pins = 0, + .pps = 0, + .adjfreq = e1000e_phc_adjfreq, + .adjtime = e1000e_phc_adjtime, + .gettime64 = e1000e_phc_gettime, + .settime64 = e1000e_phc_settime, + .enable = e1000e_phc_enable, +}; + +/** + * e1000e_ptp_init - initialize PTP for devices which support it + * @adapter: board private structure + * + * This function performs the required steps for enabling PTP support. + * If PTP support has already been loaded it simply calls the cyclecounter + * init routine and exits. + **/ +void e1000e_ptp_init(struct e1000_adapter *adapter) +{ + struct e1000_hw *hw = &adapter->hw; + + adapter->ptp_clock = NULL; + + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return; + + adapter->ptp_clock_info = e1000e_ptp_clock_info; + + snprintf(adapter->ptp_clock_info.name, + sizeof(adapter->ptp_clock_info.name), "%pm", + adapter->netdev->perm_addr); + + switch (hw->mac.type) { + case e1000_pch2lan: + case e1000_pch_lpt: + case e1000_pch_spt: + if (((hw->mac.type != e1000_pch_lpt) && + (hw->mac.type != e1000_pch_spt)) || + (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { + adapter->ptp_clock_info.max_adj = 24000000 - 1; + break; + } + /* fall-through */ + case e1000_82574: + case e1000_82583: + adapter->ptp_clock_info.max_adj = 600000000 - 1; + break; + default: + break; + } + + INIT_DELAYED_WORK(&adapter->systim_overflow_work, + e1000e_systim_overflow_work); + + schedule_delayed_work(&adapter->systim_overflow_work, + E1000_SYSTIM_OVERFLOW_PERIOD); + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info, + &adapter->pdev->dev); + if (IS_ERR(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; + e_err("ptp_clock_register failed\n"); + } else { + e_info("registered PHC clock\n"); + } +} + +/** + * e1000e_ptp_remove - disable PTP device and stop the overflow check + * @adapter: board private structure + * + * Stop the PTP support, and cancel the delayed work. + **/ +void e1000e_ptp_remove(struct e1000_adapter *adapter) +{ + if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP)) + return; + + cancel_delayed_work_sync(&adapter->systim_overflow_work); + + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_info("removed PHC\n"); + } +} diff --git a/addons/e1000e/src/4.4.180/regs.h b/addons/e1000e/src/4.4.180/regs.h new file mode 100644 index 00000000..1d5e0b77 --- /dev/null +++ b/addons/e1000e/src/4.4.180/regs.h @@ -0,0 +1,251 @@ +/* Intel PRO/1000 Linux driver + * Copyright(c) 1999 - 2015 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Contact Information: + * Linux NICS + * e1000-devel Mailing List + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + */ + +#ifndef _E1000E_REGS_H_ +#define _E1000E_REGS_H_ + +#define E1000_CTRL 0x00000 /* Device Control - RW */ +#define E1000_STATUS 0x00008 /* Device Status - RO */ +#define E1000_EECD 0x00010 /* EEPROM/Flash Control - RW */ +#define E1000_EERD 0x00014 /* EEPROM Read - RW */ +#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ +#define E1000_FLA 0x0001C /* Flash Access - RW */ +#define E1000_MDIC 0x00020 /* MDI Control - RW */ +#define E1000_SCTL 0x00024 /* SerDes Control - RW */ +#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ +#define E1000_FCAH 0x0002C /* Flow Control Address High -RW */ +#define E1000_FEXT 0x0002C /* Future Extended - RW */ +#define E1000_FEXTNVM 0x00028 /* Future Extended NVM - RW */ +#define E1000_FEXTNVM3 0x0003C /* Future Extended NVM 3 - RW */ +#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */ +#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */ +#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */ +#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */ +#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */ +#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */ +#define E1000_FCT 0x00030 /* Flow Control Type - RW */ +#define E1000_VET 0x00038 /* VLAN Ether Type - RW */ +#define E1000_ICR 0x000C0 /* Interrupt Cause Read - R/clr */ +#define E1000_ITR 0x000C4 /* Interrupt Throttling Rate - RW */ +#define E1000_ICS 0x000C8 /* Interrupt Cause Set - WO */ +#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ +#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ +#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ +#define E1000_IVAR 0x000E4 /* Interrupt Vector Allocation Register - RW */ +#define E1000_SVCR 0x000F0 +#define E1000_SVT 0x000F4 +#define E1000_LPIC 0x000FC /* Low Power IDLE control */ +#define E1000_RCTL 0x00100 /* Rx Control - RW */ +#define E1000_FCTTV 0x00170 /* Flow Control Transmit Timer Value - RW */ +#define E1000_TXCW 0x00178 /* Tx Configuration Word - RW */ +#define E1000_RXCW 0x00180 /* Rx Configuration Word - RO */ +#define E1000_PBA_ECC 0x01100 /* PBA ECC Register */ +#define E1000_TCTL 0x00400 /* Tx Control - RW */ +#define E1000_TCTL_EXT 0x00404 /* Extended Tx Control - RW */ +#define E1000_TIPG 0x00410 /* Tx Inter-packet gap -RW */ +#define E1000_AIT 0x00458 /* Adaptive Interframe Spacing Throttle - RW */ +#define E1000_LEDCTL 0x00E00 /* LED Control - RW */ +#define E1000_EXTCNF_CTRL 0x00F00 /* Extended Configuration Control */ +#define E1000_EXTCNF_SIZE 0x00F08 /* Extended Configuration Size */ +#define E1000_PHY_CTRL 0x00F10 /* PHY Control Register in CSR */ +#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */ +#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ +#define E1000_PBS 0x01008 /* Packet Buffer Size */ +#define E1000_PBECCSTS 0x0100C /* Packet Buffer ECC Status - RW */ +#define E1000_IOSFPC 0x00F28 /* TX corrupted data */ +#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ +#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ +#define E1000_FLOP 0x0103C /* FLASH Opcode Register */ +#define E1000_ERT 0x02008 /* Early Rx Threshold - RW */ +#define E1000_FCRTL 0x02160 /* Flow Control Receive Threshold Low - RW */ +#define E1000_FCRTH 0x02168 /* Flow Control Receive Threshold High - RW */ +#define E1000_PSRCTL 0x02170 /* Packet Split Receive Control - RW */ +#define E1000_RDFH 0x02410 /* Rx Data FIFO Head - RW */ +#define E1000_RDFT 0x02418 /* Rx Data FIFO Tail - RW */ +#define E1000_RDFHS 0x02420 /* Rx Data FIFO Head Saved - RW */ +#define E1000_RDFTS 0x02428 /* Rx Data FIFO Tail Saved - RW */ +#define E1000_RDFPC 0x02430 /* Rx Data FIFO Packet Count - RW */ +/* Split and Replication Rx Control - RW */ +#define E1000_RDTR 0x02820 /* Rx Delay Timer - RW */ +#define E1000_RADV 0x0282C /* Rx Interrupt Absolute Delay Timer - RW */ +/* Convenience macros + * + * Note: "_n" is the queue number of the register to be written to. + * + * Example usage: + * E1000_RDBAL_REG(current_rx_queue) + */ +#define E1000_RDBAL(_n) ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : \ + (0x0C000 + ((_n) * 0x40))) +#define E1000_RDBAH(_n) ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : \ + (0x0C004 + ((_n) * 0x40))) +#define E1000_RDLEN(_n) ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : \ + (0x0C008 + ((_n) * 0x40))) +#define E1000_RDH(_n) ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : \ + (0x0C010 + ((_n) * 0x40))) +#define E1000_RDT(_n) ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : \ + (0x0C018 + ((_n) * 0x40))) +#define E1000_RXDCTL(_n) ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : \ + (0x0C028 + ((_n) * 0x40))) +#define E1000_TDBAL(_n) ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : \ + (0x0E000 + ((_n) * 0x40))) +#define E1000_TDBAH(_n) ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : \ + (0x0E004 + ((_n) * 0x40))) +#define E1000_TDLEN(_n) ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : \ + (0x0E008 + ((_n) * 0x40))) +#define E1000_TDH(_n) ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : \ + (0x0E010 + ((_n) * 0x40))) +#define E1000_TDT(_n) ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : \ + (0x0E018 + ((_n) * 0x40))) +#define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : \ + (0x0E028 + ((_n) * 0x40))) +#define E1000_TARC(_n) (0x03840 + ((_n) * 0x100)) +#define E1000_KABGTXD 0x03004 /* AFE Band Gap Transmit Ref Data */ +#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ + (0x054E0 + ((_i - 16) * 8))) +#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ + (0x054E4 + ((_i - 16) * 8))) +#define E1000_SHRAL(_i) (0x05438 + ((_i) * 8)) +#define E1000_SHRAH(_i) (0x0543C + ((_i) * 8)) +#define E1000_TDFH 0x03410 /* Tx Data FIFO Head - RW */ +#define E1000_TDFT 0x03418 /* Tx Data FIFO Tail - RW */ +#define E1000_TDFHS 0x03420 /* Tx Data FIFO Head Saved - RW */ +#define E1000_TDFTS 0x03428 /* Tx Data FIFO Tail Saved - RW */ +#define E1000_TDFPC 0x03430 /* Tx Data FIFO Packet Count - RW */ +#define E1000_TIDV 0x03820 /* Tx Interrupt Delay Value - RW */ +#define E1000_TADV 0x0382C /* Tx Interrupt Absolute Delay Val - RW */ +#define E1000_CRCERRS 0x04000 /* CRC Error Count - R/clr */ +#define E1000_ALGNERRC 0x04004 /* Alignment Error Count - R/clr */ +#define E1000_SYMERRS 0x04008 /* Symbol Error Count - R/clr */ +#define E1000_RXERRC 0x0400C /* Receive Error Count - R/clr */ +#define E1000_MPC 0x04010 /* Missed Packet Count - R/clr */ +#define E1000_SCC 0x04014 /* Single Collision Count - R/clr */ +#define E1000_ECOL 0x04018 /* Excessive Collision Count - R/clr */ +#define E1000_MCC 0x0401C /* Multiple Collision Count - R/clr */ +#define E1000_LATECOL 0x04020 /* Late Collision Count - R/clr */ +#define E1000_COLC 0x04028 /* Collision Count - R/clr */ +#define E1000_DC 0x04030 /* Defer Count - R/clr */ +#define E1000_TNCRS 0x04034 /* Tx-No CRS - R/clr */ +#define E1000_SEC 0x04038 /* Sequence Error Count - R/clr */ +#define E1000_CEXTERR 0x0403C /* Carrier Extension Error Count - R/clr */ +#define E1000_RLEC 0x04040 /* Receive Length Error Count - R/clr */ +#define E1000_XONRXC 0x04048 /* XON Rx Count - R/clr */ +#define E1000_XONTXC 0x0404C /* XON Tx Count - R/clr */ +#define E1000_XOFFRXC 0x04050 /* XOFF Rx Count - R/clr */ +#define E1000_XOFFTXC 0x04054 /* XOFF Tx Count - R/clr */ +#define E1000_FCRUC 0x04058 /* Flow Control Rx Unsupported Count- R/clr */ +#define E1000_PRC64 0x0405C /* Packets Rx (64 bytes) - R/clr */ +#define E1000_PRC127 0x04060 /* Packets Rx (65-127 bytes) - R/clr */ +#define E1000_PRC255 0x04064 /* Packets Rx (128-255 bytes) - R/clr */ +#define E1000_PRC511 0x04068 /* Packets Rx (255-511 bytes) - R/clr */ +#define E1000_PRC1023 0x0406C /* Packets Rx (512-1023 bytes) - R/clr */ +#define E1000_PRC1522 0x04070 /* Packets Rx (1024-1522 bytes) - R/clr */ +#define E1000_GPRC 0x04074 /* Good Packets Rx Count - R/clr */ +#define E1000_BPRC 0x04078 /* Broadcast Packets Rx Count - R/clr */ +#define E1000_MPRC 0x0407C /* Multicast Packets Rx Count - R/clr */ +#define E1000_GPTC 0x04080 /* Good Packets Tx Count - R/clr */ +#define E1000_GORCL 0x04088 /* Good Octets Rx Count Low - R/clr */ +#define E1000_GORCH 0x0408C /* Good Octets Rx Count High - R/clr */ +#define E1000_GOTCL 0x04090 /* Good Octets Tx Count Low - R/clr */ +#define E1000_GOTCH 0x04094 /* Good Octets Tx Count High - R/clr */ +#define E1000_RNBC 0x040A0 /* Rx No Buffers Count - R/clr */ +#define E1000_RUC 0x040A4 /* Rx Undersize Count - R/clr */ +#define E1000_RFC 0x040A8 /* Rx Fragment Count - R/clr */ +#define E1000_ROC 0x040AC /* Rx Oversize Count - R/clr */ +#define E1000_RJC 0x040B0 /* Rx Jabber Count - R/clr */ +#define E1000_MGTPRC 0x040B4 /* Management Packets Rx Count - R/clr */ +#define E1000_MGTPDC 0x040B8 /* Management Packets Dropped Count - R/clr */ +#define E1000_MGTPTC 0x040BC /* Management Packets Tx Count - R/clr */ +#define E1000_TORL 0x040C0 /* Total Octets Rx Low - R/clr */ +#define E1000_TORH 0x040C4 /* Total Octets Rx High - R/clr */ +#define E1000_TOTL 0x040C8 /* Total Octets Tx Low - R/clr */ +#define E1000_TOTH 0x040CC /* Total Octets Tx High - R/clr */ +#define E1000_TPR 0x040D0 /* Total Packets Rx - R/clr */ +#define E1000_TPT 0x040D4 /* Total Packets Tx - R/clr */ +#define E1000_PTC64 0x040D8 /* Packets Tx (64 bytes) - R/clr */ +#define E1000_PTC127 0x040DC /* Packets Tx (65-127 bytes) - R/clr */ +#define E1000_PTC255 0x040E0 /* Packets Tx (128-255 bytes) - R/clr */ +#define E1000_PTC511 0x040E4 /* Packets Tx (256-511 bytes) - R/clr */ +#define E1000_PTC1023 0x040E8 /* Packets Tx (512-1023 bytes) - R/clr */ +#define E1000_PTC1522 0x040EC /* Packets Tx (1024-1522 Bytes) - R/clr */ +#define E1000_MPTC 0x040F0 /* Multicast Packets Tx Count - R/clr */ +#define E1000_BPTC 0x040F4 /* Broadcast Packets Tx Count - R/clr */ +#define E1000_TSCTC 0x040F8 /* TCP Segmentation Context Tx - R/clr */ +#define E1000_TSCTFC 0x040FC /* TCP Segmentation Context Tx Fail - R/clr */ +#define E1000_IAC 0x04100 /* Interrupt Assertion Count */ +#define E1000_ICRXPTC 0x04104 /* Interrupt Cause Rx Pkt Timer Expire Count */ +#define E1000_ICRXATC 0x04108 /* Interrupt Cause Rx Abs Timer Expire Count */ +#define E1000_ICTXPTC 0x0410C /* Interrupt Cause Tx Pkt Timer Expire Count */ +#define E1000_ICTXATC 0x04110 /* Interrupt Cause Tx Abs Timer Expire Count */ +#define E1000_ICTXQEC 0x04118 /* Interrupt Cause Tx Queue Empty Count */ +#define E1000_ICTXQMTC 0x0411C /* Interrupt Cause Tx Queue Min Thresh Count */ +#define E1000_ICRXDMTC 0x04120 /* Interrupt Cause Rx Desc Min Thresh Count */ +#define E1000_ICRXOC 0x04124 /* Interrupt Cause Receiver Overrun Count */ +#define E1000_CRC_OFFSET 0x05F50 /* CRC Offset register */ + +#define E1000_PCS_LCTL 0x04208 /* PCS Link Control - RW */ +#define E1000_PCS_LSTAT 0x0420C /* PCS Link Status - RO */ +#define E1000_PCS_ANADV 0x04218 /* AN advertisement - RW */ +#define E1000_PCS_LPAB 0x0421C /* Link Partner Ability - RW */ +#define E1000_RXCSUM 0x05000 /* Rx Checksum Control - RW */ +#define E1000_RFCTL 0x05008 /* Receive Filter Control */ +#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ +#define E1000_RA 0x05400 /* Receive Address - RW Array */ +#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ +#define E1000_WUC 0x05800 /* Wakeup Control - RW */ +#define E1000_WUFC 0x05808 /* Wakeup Filter Control - RW */ +#define E1000_WUS 0x05810 /* Wakeup Status - RO */ +#define E1000_MANC 0x05820 /* Management Control - RW */ +#define E1000_FFLT 0x05F00 /* Flexible Filter Length Table - RW Array */ +#define E1000_HOST_IF 0x08800 /* Host Interface */ + +#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */ +#define E1000_MANC2H 0x05860 /* Management Control To Host - RW */ +/* Management Decision Filters */ +#define E1000_MDEF(_n) (0x05890 + (4 * (_n))) +#define E1000_SW_FW_SYNC 0x05B5C /* SW-FW Synchronization - RW */ +#define E1000_GCR 0x05B00 /* PCI-Ex Control */ +#define E1000_GCR2 0x05B64 /* PCI-Ex Control #2 */ +#define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ +#define E1000_SWSM 0x05B50 /* SW Semaphore */ +#define E1000_FWSM 0x05B54 /* FW Semaphore */ +/* Driver-only SW semaphore (not used by BOOT agents) */ +#define E1000_SWSM2 0x05B58 +#define E1000_FFLT_DBG 0x05F04 /* Debug Register */ +#define E1000_HICR 0x08F00 /* Host Interface Control */ + +/* RSS registers */ +#define E1000_MRQC 0x05818 /* Multiple Receive Control - RW */ +#define E1000_RETA(_i) (0x05C00 + ((_i) * 4)) /* Redirection Table - RW */ +#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW */ +#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */ +#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */ +#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */ +#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */ +#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ +#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ +#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ +#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ +#define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ +#define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ + +#endif diff --git a/addons/ehci-pci/install-bromolow-3.10.108.sh b/addons/ehci-pci/install-bromolow-3.10.108.sh new file mode 100644 index 00000000..69c8294b --- /dev/null +++ b/addons/ehci-pci/install-bromolow-3.10.108.sh @@ -0,0 +1,6 @@ +if [ "${1}" = "rd" ]; then + echo "Installing module(s) for ehci-pci" + ${INSMOD} "/modules/pci-quirks.ko" + ${INSMOD} "/modules/ehci-hcd.ko" + ${INSMOD} "/modules/ehci-pci.ko" ${PARAMS} +fi diff --git a/addons/ehci-pci/install.sh b/addons/ehci-pci/install.sh new file mode 100644 index 00000000..02f75ca5 --- /dev/null +++ b/addons/ehci-pci/install.sh @@ -0,0 +1,5 @@ +if [ "${1}" = "rd" ]; then + echo "Installing modules for ehci-pci" + ${INSMOD} "/modules/ehci-hcd.ko" + ${INSMOD} "/modules/ehci-pci.ko" ${PARAMS} +fi diff --git a/addons/ehci-pci/manifest.yml b/addons/ehci-pci/manifest.yml new file mode 100644 index 00000000..dd29813d --- /dev/null +++ b/addons/ehci-pci/manifest.yml @@ -0,0 +1,7 @@ +version: 1 +name: ehci-pci +description: "Driver for USB 2.0 Host Controller" +available-for: + broadwell-4.4.180: + install-script: &script "install.sh" + modules: true diff --git a/addons/ehci-pci/src/3.10.108/Makefile b/addons/ehci-pci/src/3.10.108/Makefile new file mode 100644 index 00000000..898ba621 --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/Makefile @@ -0,0 +1,3 @@ +obj-y += pci-quirks.o +obj-m += ehci-hcd.o +obj-m += ehci-pci.o diff --git a/addons/ehci-pci/src/3.10.108/ehci-dbg.c b/addons/ehci-pci/src/3.10.108/ehci-dbg.c new file mode 100644 index 00000000..5429d264 --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/ehci-dbg.c @@ -0,0 +1,980 @@ +/* + * Copyright (c) 2001-2002 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of ehci-hcd.c */ + +#ifdef DEBUG + +/* check the values in the HCSPARAMS register + * (host controller _Structural_ parameters) + * see EHCI spec, Table 2-4 for each value + */ +static void dbg_hcs_params (struct ehci_hcd *ehci, char *label) +{ + u32 params = ehci_readl(ehci, &ehci->caps->hcs_params); + + ehci_dbg (ehci, + "%s hcs_params 0x%x dbg=%d%s cc=%d pcc=%d%s%s ports=%d\n", + label, params, + HCS_DEBUG_PORT (params), + HCS_INDICATOR (params) ? " ind" : "", + HCS_N_CC (params), + HCS_N_PCC (params), + HCS_PORTROUTED (params) ? "" : " ordered", + HCS_PPC (params) ? "" : " !ppc", + HCS_N_PORTS (params) + ); + /* Port routing, per EHCI 0.95 Spec, Section 2.2.5 */ + if (HCS_PORTROUTED (params)) { + int i; + char buf [46], tmp [7], byte; + + buf[0] = 0; + for (i = 0; i < HCS_N_PORTS (params); i++) { + // FIXME MIPS won't readb() ... + byte = readb (&ehci->caps->portroute[(i>>1)]); + sprintf(tmp, "%d ", + ((i & 0x1) ? ((byte)&0xf) : ((byte>>4)&0xf))); + strcat(buf, tmp); + } + ehci_dbg (ehci, "%s portroute %s\n", + label, buf); + } +} +#else + +static inline void dbg_hcs_params (struct ehci_hcd *ehci, char *label) {} + +#endif + +#ifdef DEBUG + +/* check the values in the HCCPARAMS register + * (host controller _Capability_ parameters) + * see EHCI Spec, Table 2-5 for each value + * */ +static void dbg_hcc_params (struct ehci_hcd *ehci, char *label) +{ + u32 params = ehci_readl(ehci, &ehci->caps->hcc_params); + + if (HCC_ISOC_CACHE (params)) { + ehci_dbg (ehci, + "%s hcc_params %04x caching frame %s%s%s\n", + label, params, + HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024", + HCC_CANPARK(params) ? " park" : "", + HCC_64BIT_ADDR(params) ? " 64 bit addr" : ""); + } else { + ehci_dbg (ehci, + "%s hcc_params %04x thresh %d uframes %s%s%s%s%s%s%s\n", + label, + params, + HCC_ISOC_THRES(params), + HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024", + HCC_CANPARK(params) ? " park" : "", + HCC_64BIT_ADDR(params) ? " 64 bit addr" : "", + HCC_LPM(params) ? " LPM" : "", + HCC_PER_PORT_CHANGE_EVENT(params) ? " ppce" : "", + HCC_HW_PREFETCH(params) ? " hw prefetch" : "", + HCC_32FRAME_PERIODIC_LIST(params) ? + " 32 periodic list" : ""); + } +} +#else + +static inline void dbg_hcc_params (struct ehci_hcd *ehci, char *label) {} + +#endif + +#ifdef DEBUG + +static void __maybe_unused +dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd) +{ + ehci_dbg(ehci, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd, + hc32_to_cpup(ehci, &qtd->hw_next), + hc32_to_cpup(ehci, &qtd->hw_alt_next), + hc32_to_cpup(ehci, &qtd->hw_token), + hc32_to_cpup(ehci, &qtd->hw_buf [0])); + if (qtd->hw_buf [1]) + ehci_dbg(ehci, " p1=%08x p2=%08x p3=%08x p4=%08x\n", + hc32_to_cpup(ehci, &qtd->hw_buf[1]), + hc32_to_cpup(ehci, &qtd->hw_buf[2]), + hc32_to_cpup(ehci, &qtd->hw_buf[3]), + hc32_to_cpup(ehci, &qtd->hw_buf[4])); +} + +static void __maybe_unused +dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + struct ehci_qh_hw *hw = qh->hw; + + ehci_dbg (ehci, "%s qh %p n%08x info %x %x qtd %x\n", label, + qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current); + dbg_qtd("overlay", ehci, (struct ehci_qtd *) &hw->hw_qtd_next); +} + +static void __maybe_unused +dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd) +{ + ehci_dbg (ehci, "%s [%d] itd %p, next %08x, urb %p\n", + label, itd->frame, itd, hc32_to_cpu(ehci, itd->hw_next), + itd->urb); + ehci_dbg (ehci, + " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n", + hc32_to_cpu(ehci, itd->hw_transaction[0]), + hc32_to_cpu(ehci, itd->hw_transaction[1]), + hc32_to_cpu(ehci, itd->hw_transaction[2]), + hc32_to_cpu(ehci, itd->hw_transaction[3]), + hc32_to_cpu(ehci, itd->hw_transaction[4]), + hc32_to_cpu(ehci, itd->hw_transaction[5]), + hc32_to_cpu(ehci, itd->hw_transaction[6]), + hc32_to_cpu(ehci, itd->hw_transaction[7])); + ehci_dbg (ehci, + " buf: %08x %08x %08x %08x %08x %08x %08x\n", + hc32_to_cpu(ehci, itd->hw_bufp[0]), + hc32_to_cpu(ehci, itd->hw_bufp[1]), + hc32_to_cpu(ehci, itd->hw_bufp[2]), + hc32_to_cpu(ehci, itd->hw_bufp[3]), + hc32_to_cpu(ehci, itd->hw_bufp[4]), + hc32_to_cpu(ehci, itd->hw_bufp[5]), + hc32_to_cpu(ehci, itd->hw_bufp[6])); + ehci_dbg (ehci, " index: %d %d %d %d %d %d %d %d\n", + itd->index[0], itd->index[1], itd->index[2], + itd->index[3], itd->index[4], itd->index[5], + itd->index[6], itd->index[7]); +} + +static void __maybe_unused +dbg_sitd (const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd) +{ + ehci_dbg (ehci, "%s [%d] sitd %p, next %08x, urb %p\n", + label, sitd->frame, sitd, hc32_to_cpu(ehci, sitd->hw_next), + sitd->urb); + ehci_dbg (ehci, + " addr %08x sched %04x result %08x buf %08x %08x\n", + hc32_to_cpu(ehci, sitd->hw_fullspeed_ep), + hc32_to_cpu(ehci, sitd->hw_uframe), + hc32_to_cpu(ehci, sitd->hw_results), + hc32_to_cpu(ehci, sitd->hw_buf[0]), + hc32_to_cpu(ehci, sitd->hw_buf[1])); +} + +static int __maybe_unused +dbg_status_buf (char *buf, unsigned len, const char *label, u32 status) +{ + return scnprintf (buf, len, + "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s%s", + label, label [0] ? " " : "", status, + (status & STS_PPCE_MASK) ? " PPCE" : "", + (status & STS_ASS) ? " Async" : "", + (status & STS_PSS) ? " Periodic" : "", + (status & STS_RECL) ? " Recl" : "", + (status & STS_HALT) ? " Halt" : "", + (status & STS_IAA) ? " IAA" : "", + (status & STS_FATAL) ? " FATAL" : "", + (status & STS_FLR) ? " FLR" : "", + (status & STS_PCD) ? " PCD" : "", + (status & STS_ERR) ? " ERR" : "", + (status & STS_INT) ? " INT" : "" + ); +} + +static int __maybe_unused +dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable) +{ + return scnprintf (buf, len, + "%s%sintrenable %02x%s%s%s%s%s%s%s", + label, label [0] ? " " : "", enable, + (enable & STS_PPCE_MASK) ? " PPCE" : "", + (enable & STS_IAA) ? " IAA" : "", + (enable & STS_FATAL) ? " FATAL" : "", + (enable & STS_FLR) ? " FLR" : "", + (enable & STS_PCD) ? " PCD" : "", + (enable & STS_ERR) ? " ERR" : "", + (enable & STS_INT) ? " INT" : "" + ); +} + +static const char *const fls_strings [] = + { "1024", "512", "256", "??" }; + +static int +dbg_command_buf (char *buf, unsigned len, const char *label, u32 command) +{ + return scnprintf (buf, len, + "%s%scommand %07x %s%s%s%s%s%s=%d ithresh=%d%s%s%s%s " + "period=%s%s %s", + label, label [0] ? " " : "", command, + (command & CMD_HIRD) ? " HIRD" : "", + (command & CMD_PPCEE) ? " PPCEE" : "", + (command & CMD_FSP) ? " FSP" : "", + (command & CMD_ASPE) ? " ASPE" : "", + (command & CMD_PSPE) ? " PSPE" : "", + (command & CMD_PARK) ? " park" : "(park)", + CMD_PARK_CNT (command), + (command >> 16) & 0x3f, + (command & CMD_LRESET) ? " LReset" : "", + (command & CMD_IAAD) ? " IAAD" : "", + (command & CMD_ASE) ? " Async" : "", + (command & CMD_PSE) ? " Periodic" : "", + fls_strings [(command >> 2) & 0x3], + (command & CMD_RESET) ? " Reset" : "", + (command & CMD_RUN) ? "RUN" : "HALT" + ); +} + +static int +dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status) +{ + char *sig; + + /* signaling state */ + switch (status & (3 << 10)) { + case 0 << 10: sig = "se0"; break; + case 1 << 10: sig = "k"; break; /* low speed */ + case 2 << 10: sig = "j"; break; + default: sig = "?"; break; + } + + return scnprintf (buf, len, + "%s%sport:%d status %06x %d %s%s%s%s%s%s " + "sig=%s%s%s%s%s%s%s%s%s%s%s", + label, label [0] ? " " : "", port, status, + status>>25,/*device address */ + (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ACK ? + " ACK" : "", + (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_NYET ? + " NYET" : "", + (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_STALL ? + " STALL" : "", + (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ERR ? + " ERR" : "", + (status & PORT_POWER) ? " POWER" : "", + (status & PORT_OWNER) ? " OWNER" : "", + sig, + (status & PORT_LPM) ? " LPM" : "", + (status & PORT_RESET) ? " RESET" : "", + (status & PORT_SUSPEND) ? " SUSPEND" : "", + (status & PORT_RESUME) ? " RESUME" : "", + (status & PORT_OCC) ? " OCC" : "", + (status & PORT_OC) ? " OC" : "", + (status & PORT_PEC) ? " PEC" : "", + (status & PORT_PE) ? " PE" : "", + (status & PORT_CSC) ? " CSC" : "", + (status & PORT_CONNECT) ? " CONNECT" : ""); +} + +#else +static inline void __maybe_unused +dbg_qh (char *label, struct ehci_hcd *ehci, struct ehci_qh *qh) +{} + +static inline int __maybe_unused +dbg_status_buf (char *buf, unsigned len, const char *label, u32 status) +{ return 0; } + +static inline int __maybe_unused +dbg_command_buf (char *buf, unsigned len, const char *label, u32 command) +{ return 0; } + +static inline int __maybe_unused +dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable) +{ return 0; } + +static inline int __maybe_unused +dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status) +{ return 0; } + +#endif /* DEBUG */ + +/* functions have the "wrong" filename when they're output... */ +#define dbg_status(ehci, label, status) { \ + char _buf [80]; \ + dbg_status_buf (_buf, sizeof _buf, label, status); \ + ehci_dbg (ehci, "%s\n", _buf); \ +} + +#define dbg_cmd(ehci, label, command) { \ + char _buf [80]; \ + dbg_command_buf (_buf, sizeof _buf, label, command); \ + ehci_dbg (ehci, "%s\n", _buf); \ +} + +#define dbg_port(ehci, label, port, status) { \ + char _buf [80]; \ + dbg_port_buf (_buf, sizeof _buf, label, port, status); \ + ehci_dbg (ehci, "%s\n", _buf); \ +} + +/*-------------------------------------------------------------------------*/ + +#ifdef STUB_DEBUG_FILES + +static inline void create_debug_files (struct ehci_hcd *bus) { } +static inline void remove_debug_files (struct ehci_hcd *bus) { } + +#else + +/* troubleshooting help: expose state in debugfs */ + +static int debug_async_open(struct inode *, struct file *); +static int debug_periodic_open(struct inode *, struct file *); +static int debug_registers_open(struct inode *, struct file *); +static int debug_async_open(struct inode *, struct file *); + +static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*); +static int debug_close(struct inode *, struct file *); + +static const struct file_operations debug_async_fops = { + .owner = THIS_MODULE, + .open = debug_async_open, + .read = debug_output, + .release = debug_close, + .llseek = default_llseek, +}; +static const struct file_operations debug_periodic_fops = { + .owner = THIS_MODULE, + .open = debug_periodic_open, + .read = debug_output, + .release = debug_close, + .llseek = default_llseek, +}; +static const struct file_operations debug_registers_fops = { + .owner = THIS_MODULE, + .open = debug_registers_open, + .read = debug_output, + .release = debug_close, + .llseek = default_llseek, +}; + +static struct dentry *ehci_debug_root; + +struct debug_buffer { + ssize_t (*fill_func)(struct debug_buffer *); /* fill method */ + struct usb_bus *bus; + struct mutex mutex; /* protect filling of buffer */ + size_t count; /* number of characters filled into buffer */ + char *output_buf; + size_t alloc_size; +}; + +#define speed_char(info1) ({ char tmp; \ + switch (info1 & (3 << 12)) { \ + case QH_FULL_SPEED: tmp = 'f'; break; \ + case QH_LOW_SPEED: tmp = 'l'; break; \ + case QH_HIGH_SPEED: tmp = 'h'; break; \ + default: tmp = '?'; break; \ + }; tmp; }) + +static inline char token_mark(struct ehci_hcd *ehci, __hc32 token) +{ + __u32 v = hc32_to_cpu(ehci, token); + + if (v & QTD_STS_ACTIVE) + return '*'; + if (v & QTD_STS_HALT) + return '-'; + if (!IS_SHORT_READ (v)) + return ' '; + /* tries to advance through hw_alt_next */ + return '/'; +} + +static void qh_lines ( + struct ehci_hcd *ehci, + struct ehci_qh *qh, + char **nextp, + unsigned *sizep +) +{ + u32 scratch; + u32 hw_curr; + struct list_head *entry; + struct ehci_qtd *td; + unsigned temp; + unsigned size = *sizep; + char *next = *nextp; + char mark; + __le32 list_end = EHCI_LIST_END(ehci); + struct ehci_qh_hw *hw = qh->hw; + + if (hw->hw_qtd_next == list_end) /* NEC does this */ + mark = '@'; + else + mark = token_mark(ehci, hw->hw_token); + if (mark == '/') { /* qh_alt_next controls qh advance? */ + if ((hw->hw_alt_next & QTD_MASK(ehci)) + == ehci->async->hw->hw_alt_next) + mark = '#'; /* blocked */ + else if (hw->hw_alt_next == list_end) + mark = '.'; /* use hw_qtd_next */ + /* else alt_next points to some other qtd */ + } + scratch = hc32_to_cpup(ehci, &hw->hw_info1); + hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &hw->hw_current) : 0; + temp = scnprintf (next, size, + "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)", + qh, scratch & 0x007f, + speed_char (scratch), + (scratch >> 8) & 0x000f, + scratch, hc32_to_cpup(ehci, &hw->hw_info2), + hc32_to_cpup(ehci, &hw->hw_token), mark, + (cpu_to_hc32(ehci, QTD_TOGGLE) & hw->hw_token) + ? "data1" : "data0", + (hc32_to_cpup(ehci, &hw->hw_alt_next) >> 1) & 0x0f); + size -= temp; + next += temp; + + /* hc may be modifying the list as we read it ... */ + list_for_each (entry, &qh->qtd_list) { + td = list_entry (entry, struct ehci_qtd, qtd_list); + scratch = hc32_to_cpup(ehci, &td->hw_token); + mark = ' '; + if (hw_curr == td->qtd_dma) + mark = '*'; + else if (hw->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma)) + mark = '+'; + else if (QTD_LENGTH (scratch)) { + if (td->hw_alt_next == ehci->async->hw->hw_alt_next) + mark = '#'; + else if (td->hw_alt_next != list_end) + mark = '/'; + } + temp = snprintf (next, size, + "\n\t%p%c%s len=%d %08x urb %p", + td, mark, ({ char *tmp; + switch ((scratch>>8)&0x03) { + case 0: tmp = "out"; break; + case 1: tmp = "in"; break; + case 2: tmp = "setup"; break; + default: tmp = "?"; break; + } tmp;}), + (scratch >> 16) & 0x7fff, + scratch, + td->urb); + if (size < temp) + temp = size; + size -= temp; + next += temp; + if (temp == size) + goto done; + } + + temp = snprintf (next, size, "\n"); + if (size < temp) + temp = size; + size -= temp; + next += temp; + +done: + *sizep = size; + *nextp = next; +} + +static ssize_t fill_async_buffer(struct debug_buffer *buf) +{ + struct usb_hcd *hcd; + struct ehci_hcd *ehci; + unsigned long flags; + unsigned temp, size; + char *next; + struct ehci_qh *qh; + + hcd = bus_to_hcd(buf->bus); + ehci = hcd_to_ehci (hcd); + next = buf->output_buf; + size = buf->alloc_size; + + *next = 0; + + /* dumps a snapshot of the async schedule. + * usually empty except for long-term bulk reads, or head. + * one QH per line, and TDs we know about + */ + spin_lock_irqsave (&ehci->lock, flags); + for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh) + qh_lines (ehci, qh, &next, &size); + if (!list_empty(&ehci->async_unlink) && size > 0) { + temp = scnprintf(next, size, "\nunlink =\n"); + size -= temp; + next += temp; + + list_for_each_entry(qh, &ehci->async_unlink, unlink_node) { + if (size <= 0) + break; + qh_lines(ehci, qh, &next, &size); + } + } + spin_unlock_irqrestore (&ehci->lock, flags); + + return strlen(buf->output_buf); +} + +#define DBG_SCHED_LIMIT 64 +static ssize_t fill_periodic_buffer(struct debug_buffer *buf) +{ + struct usb_hcd *hcd; + struct ehci_hcd *ehci; + unsigned long flags; + union ehci_shadow p, *seen; + unsigned temp, size, seen_count; + char *next; + unsigned i; + __hc32 tag; + + if (!(seen = kmalloc (DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC))) + return 0; + seen_count = 0; + + hcd = bus_to_hcd(buf->bus); + ehci = hcd_to_ehci (hcd); + next = buf->output_buf; + size = buf->alloc_size; + + temp = scnprintf (next, size, "size = %d\n", ehci->periodic_size); + size -= temp; + next += temp; + + /* dump a snapshot of the periodic schedule. + * iso changes, interrupt usually doesn't. + */ + spin_lock_irqsave (&ehci->lock, flags); + for (i = 0; i < ehci->periodic_size; i++) { + p = ehci->pshadow [i]; + if (likely (!p.ptr)) + continue; + tag = Q_NEXT_TYPE(ehci, ehci->periodic [i]); + + temp = scnprintf (next, size, "%4d: ", i); + size -= temp; + next += temp; + + do { + struct ehci_qh_hw *hw; + + switch (hc32_to_cpu(ehci, tag)) { + case Q_TYPE_QH: + hw = p.qh->hw; + temp = scnprintf (next, size, " qh%d-%04x/%p", + p.qh->period, + hc32_to_cpup(ehci, + &hw->hw_info2) + /* uframe masks */ + & (QH_CMASK | QH_SMASK), + p.qh); + size -= temp; + next += temp; + /* don't repeat what follows this qh */ + for (temp = 0; temp < seen_count; temp++) { + if (seen [temp].ptr != p.ptr) + continue; + if (p.qh->qh_next.ptr) { + temp = scnprintf (next, size, + " ..."); + size -= temp; + next += temp; + } + break; + } + /* show more info the first time around */ + if (temp == seen_count) { + u32 scratch = hc32_to_cpup(ehci, + &hw->hw_info1); + struct ehci_qtd *qtd; + char *type = ""; + + /* count tds, get ep direction */ + temp = 0; + list_for_each_entry (qtd, + &p.qh->qtd_list, + qtd_list) { + temp++; + switch (0x03 & (hc32_to_cpu( + ehci, + qtd->hw_token) >> 8)) { + case 0: type = "out"; continue; + case 1: type = "in"; continue; + } + } + + temp = scnprintf (next, size, + " (%c%d ep%d%s " + "[%d/%d] q%d p%d)", + speed_char (scratch), + scratch & 0x007f, + (scratch >> 8) & 0x000f, type, + p.qh->usecs, p.qh->c_usecs, + temp, + 0x7ff & (scratch >> 16)); + + if (seen_count < DBG_SCHED_LIMIT) + seen [seen_count++].qh = p.qh; + } else + temp = 0; + tag = Q_NEXT_TYPE(ehci, hw->hw_next); + p = p.qh->qh_next; + break; + case Q_TYPE_FSTN: + temp = scnprintf (next, size, + " fstn-%8x/%p", p.fstn->hw_prev, + p.fstn); + tag = Q_NEXT_TYPE(ehci, p.fstn->hw_next); + p = p.fstn->fstn_next; + break; + case Q_TYPE_ITD: + temp = scnprintf (next, size, + " itd/%p", p.itd); + tag = Q_NEXT_TYPE(ehci, p.itd->hw_next); + p = p.itd->itd_next; + break; + case Q_TYPE_SITD: + temp = scnprintf (next, size, + " sitd%d-%04x/%p", + p.sitd->stream->interval, + hc32_to_cpup(ehci, &p.sitd->hw_uframe) + & 0x0000ffff, + p.sitd); + tag = Q_NEXT_TYPE(ehci, p.sitd->hw_next); + p = p.sitd->sitd_next; + break; + } + size -= temp; + next += temp; + } while (p.ptr); + + temp = scnprintf (next, size, "\n"); + size -= temp; + next += temp; + } + spin_unlock_irqrestore (&ehci->lock, flags); + kfree (seen); + + return buf->alloc_size - size; +} +#undef DBG_SCHED_LIMIT + +static const char *rh_state_string(struct ehci_hcd *ehci) +{ + switch (ehci->rh_state) { + case EHCI_RH_HALTED: + return "halted"; + case EHCI_RH_SUSPENDED: + return "suspended"; + case EHCI_RH_RUNNING: + return "running"; + case EHCI_RH_STOPPING: + return "stopping"; + } + return "?"; +} + +static ssize_t fill_registers_buffer(struct debug_buffer *buf) +{ + struct usb_hcd *hcd; + struct ehci_hcd *ehci; + unsigned long flags; + unsigned temp, size, i; + char *next, scratch [80]; + static char fmt [] = "%*s\n"; + static char label [] = ""; + + hcd = bus_to_hcd(buf->bus); + ehci = hcd_to_ehci (hcd); + next = buf->output_buf; + size = buf->alloc_size; + + spin_lock_irqsave (&ehci->lock, flags); + + if (!HCD_HW_ACCESSIBLE(hcd)) { + size = scnprintf (next, size, + "bus %s, device %s\n" + "%s\n" + "SUSPENDED (no register access)\n", + hcd->self.controller->bus->name, + dev_name(hcd->self.controller), + hcd->product_desc); + goto done; + } + + /* Capability Registers */ + i = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); + temp = scnprintf (next, size, + "bus %s, device %s\n" + "%s\n" + "EHCI %x.%02x, rh state %s\n", + hcd->self.controller->bus->name, + dev_name(hcd->self.controller), + hcd->product_desc, + i >> 8, i & 0x0ff, rh_state_string(ehci)); + size -= temp; + next += temp; + +#ifdef CONFIG_PCI + /* EHCI 0.96 and later may have "extended capabilities" */ + if (hcd->self.controller->bus == &pci_bus_type) { + struct pci_dev *pdev; + u32 offset, cap, cap2; + unsigned count = 256/4; + + pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller); + offset = HCC_EXT_CAPS(ehci_readl(ehci, + &ehci->caps->hcc_params)); + while (offset && count--) { + pci_read_config_dword (pdev, offset, &cap); + switch (cap & 0xff) { + case 1: + temp = scnprintf (next, size, + "ownership %08x%s%s\n", cap, + (cap & (1 << 24)) ? " linux" : "", + (cap & (1 << 16)) ? " firmware" : ""); + size -= temp; + next += temp; + + offset += 4; + pci_read_config_dword (pdev, offset, &cap2); + temp = scnprintf (next, size, + "SMI sts/enable 0x%08x\n", cap2); + size -= temp; + next += temp; + break; + case 0: /* illegal reserved capability */ + cap = 0; + /* FALLTHROUGH */ + default: /* unknown */ + break; + } + temp = (cap >> 8) & 0xff; + } + } +#endif + + // FIXME interpret both types of params + i = ehci_readl(ehci, &ehci->caps->hcs_params); + temp = scnprintf (next, size, "structural params 0x%08x\n", i); + size -= temp; + next += temp; + + i = ehci_readl(ehci, &ehci->caps->hcc_params); + temp = scnprintf (next, size, "capability params 0x%08x\n", i); + size -= temp; + next += temp; + + /* Operational Registers */ + temp = dbg_status_buf (scratch, sizeof scratch, label, + ehci_readl(ehci, &ehci->regs->status)); + temp = scnprintf (next, size, fmt, temp, scratch); + size -= temp; + next += temp; + + temp = dbg_command_buf (scratch, sizeof scratch, label, + ehci_readl(ehci, &ehci->regs->command)); + temp = scnprintf (next, size, fmt, temp, scratch); + size -= temp; + next += temp; + + temp = dbg_intr_buf (scratch, sizeof scratch, label, + ehci_readl(ehci, &ehci->regs->intr_enable)); + temp = scnprintf (next, size, fmt, temp, scratch); + size -= temp; + next += temp; + + temp = scnprintf (next, size, "uframe %04x\n", + ehci_read_frame_index(ehci)); + size -= temp; + next += temp; + + for (i = 1; i <= HCS_N_PORTS (ehci->hcs_params); i++) { + temp = dbg_port_buf (scratch, sizeof scratch, label, i, + ehci_readl(ehci, + &ehci->regs->port_status[i - 1])); + temp = scnprintf (next, size, fmt, temp, scratch); + size -= temp; + next += temp; + if (i == HCS_DEBUG_PORT(ehci->hcs_params) && ehci->debug) { + temp = scnprintf (next, size, + " debug control %08x\n", + ehci_readl(ehci, + &ehci->debug->control)); + size -= temp; + next += temp; + } + } + + if (!list_empty(&ehci->async_unlink)) { + temp = scnprintf(next, size, "async unlink qh %p\n", + list_first_entry(&ehci->async_unlink, + struct ehci_qh, unlink_node)); + size -= temp; + next += temp; + } + +#ifdef EHCI_STATS + temp = scnprintf (next, size, + "irq normal %ld err %ld iaa %ld (lost %ld)\n", + ehci->stats.normal, ehci->stats.error, ehci->stats.iaa, + ehci->stats.lost_iaa); + size -= temp; + next += temp; + + temp = scnprintf (next, size, "complete %ld unlink %ld\n", + ehci->stats.complete, ehci->stats.unlink); + size -= temp; + next += temp; +#endif + +done: + spin_unlock_irqrestore (&ehci->lock, flags); + + return buf->alloc_size - size; +} + +static struct debug_buffer *alloc_buffer(struct usb_bus *bus, + ssize_t (*fill_func)(struct debug_buffer *)) +{ + struct debug_buffer *buf; + + buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL); + + if (buf) { + buf->bus = bus; + buf->fill_func = fill_func; + mutex_init(&buf->mutex); + buf->alloc_size = PAGE_SIZE; + } + + return buf; +} + +static int fill_buffer(struct debug_buffer *buf) +{ + int ret = 0; + + if (!buf->output_buf) + buf->output_buf = vmalloc(buf->alloc_size); + + if (!buf->output_buf) { + ret = -ENOMEM; + goto out; + } + + ret = buf->fill_func(buf); + + if (ret >= 0) { + buf->count = ret; + ret = 0; + } + +out: + return ret; +} + +static ssize_t debug_output(struct file *file, char __user *user_buf, + size_t len, loff_t *offset) +{ + struct debug_buffer *buf = file->private_data; + int ret = 0; + + mutex_lock(&buf->mutex); + if (buf->count == 0) { + ret = fill_buffer(buf); + if (ret != 0) { + mutex_unlock(&buf->mutex); + goto out; + } + } + mutex_unlock(&buf->mutex); + + ret = simple_read_from_buffer(user_buf, len, offset, + buf->output_buf, buf->count); + +out: + return ret; + +} + +static int debug_close(struct inode *inode, struct file *file) +{ + struct debug_buffer *buf = file->private_data; + + if (buf) { + vfree(buf->output_buf); + kfree(buf); + } + + return 0; +} +static int debug_async_open(struct inode *inode, struct file *file) +{ + file->private_data = alloc_buffer(inode->i_private, fill_async_buffer); + + return file->private_data ? 0 : -ENOMEM; +} + +static int debug_periodic_open(struct inode *inode, struct file *file) +{ + struct debug_buffer *buf; + buf = alloc_buffer(inode->i_private, fill_periodic_buffer); + if (!buf) + return -ENOMEM; + + buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE; + file->private_data = buf; + return 0; +} + +static int debug_registers_open(struct inode *inode, struct file *file) +{ + file->private_data = alloc_buffer(inode->i_private, + fill_registers_buffer); + + return file->private_data ? 0 : -ENOMEM; +} + +static inline void create_debug_files (struct ehci_hcd *ehci) +{ + struct usb_bus *bus = &ehci_to_hcd(ehci)->self; + + ehci->debug_dir = debugfs_create_dir(bus->bus_name, ehci_debug_root); + if (!ehci->debug_dir) + return; + + if (!debugfs_create_file("async", S_IRUGO, ehci->debug_dir, bus, + &debug_async_fops)) + goto file_error; + + if (!debugfs_create_file("periodic", S_IRUGO, ehci->debug_dir, bus, + &debug_periodic_fops)) + goto file_error; + + if (!debugfs_create_file("registers", S_IRUGO, ehci->debug_dir, bus, + &debug_registers_fops)) + goto file_error; + + return; + +file_error: + debugfs_remove_recursive(ehci->debug_dir); +} + +static inline void remove_debug_files (struct ehci_hcd *ehci) +{ + debugfs_remove_recursive(ehci->debug_dir); +} + +#endif /* STUB_DEBUG_FILES */ diff --git a/addons/ehci-pci/src/3.10.108/ehci-hcd.c b/addons/ehci-pci/src/3.10.108/ehci-hcd.c new file mode 100644 index 00000000..5a160063 --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/ehci-hcd.c @@ -0,0 +1,1391 @@ +/* + * Enhanced Host Controller Interface (EHCI) driver for USB. + * + * Maintainer: Alan Stern + * + * Copyright (c) 2000-2004 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#if defined(CONFIG_PPC_PS3) +#include +#endif + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI hc_driver implementation ... experimental, incomplete. + * Based on the final 1.0 register interface specification. + * + * USB 2.0 shows up in upcoming www.pcmcia.org technology. + * First was PCMCIA, like ISA; then CardBus, which is PCI. + * Next comes "CardBay", using USB 2.0 signals. + * + * Contains additional contributions by Brad Hards, Rory Bolt, and others. + * Special thanks to Intel and VIA for providing host controllers to + * test this driver on, and Cypress (including In-System Design) for + * providing early devices for those host controllers to talk to! + */ + +#define DRIVER_AUTHOR "David Brownell" +#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver" + +static const char hcd_name [] = "ehci_hcd"; + + +#undef VERBOSE_DEBUG +#undef EHCI_URB_TRACE + +/* magic numbers that can affect system performance */ +#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ +#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ +#define EHCI_TUNE_RL_TT 0 +#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ +#define EHCI_TUNE_MULT_TT 1 +/* + * Some drivers think it's safe to schedule isochronous transfers more than + * 256 ms into the future (partly as a result of an old bug in the scheduling + * code). In an attempt to avoid trouble, we will use a minimum scheduling + * length of 512 frames instead of 256. + */ +#define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */ + +/* Initial IRQ latency: faster than hw default */ +static int log2_irq_thresh = 0; // 0 to 6 +module_param (log2_irq_thresh, int, S_IRUGO); +MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); + +/* initial park setting: slower than hw default */ +static unsigned park = 0; +module_param (park, uint, S_IRUGO); +MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets"); + +/* for flakey hardware, ignore overcurrent indicators */ +static bool ignore_oc = 0; +module_param (ignore_oc, bool, S_IRUGO); +MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications"); + +#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) + +/*-------------------------------------------------------------------------*/ + +#include "ehci.h" +#include "pci-quirks.h" + +/* + * The MosChip MCS9990 controller updates its microframe counter + * a little before the frame counter, and occasionally we will read + * the invalid intermediate value. Avoid problems by checking the + * microframe number (the low-order 3 bits); if they are 0 then + * re-read the register to get the correct value. + */ +static unsigned ehci_moschip_read_frame_index(struct ehci_hcd *ehci) +{ + unsigned uf; + + uf = ehci_readl(ehci, &ehci->regs->frame_index); + if (unlikely((uf & 7) == 0)) + uf = ehci_readl(ehci, &ehci->regs->frame_index); + return uf; +} + +static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci) +{ + if (ehci->frame_index_bug) + return ehci_moschip_read_frame_index(ehci); + return ehci_readl(ehci, &ehci->regs->frame_index); +} + +#include "ehci-dbg.c" + +/*-------------------------------------------------------------------------*/ + +/* + * handshake - spin reading hc until handshake completes or fails + * @ptr: address of hc register to be read + * @mask: bits to look at in result of read + * @done: value of those bits when handshake succeeds + * @usec: timeout in microseconds + * + * Returns negative errno, or zero on success + * + * Success happens when the "mask" bits have the specified value (hardware + * handshake done). There are two failure modes: "usec" have passed (major + * hardware flakeout), or the register reads as all-ones (hardware removed). + * + * That last failure should_only happen in cases like physical cardbus eject + * before driver shutdown. But it also seems to be caused by bugs in cardbus + * bridge shutdown: shutting down the bridge before the devices using it. + */ +static int handshake (struct ehci_hcd *ehci, void __iomem *ptr, + u32 mask, u32 done, int usec) +{ + u32 result; + + do { + result = ehci_readl(ehci, ptr); + if (result == ~(u32)0) /* card removed */ + return -ENODEV; + result &= mask; + if (result == done) + return 0; + udelay (1); + usec--; + } while (usec > 0); + return -ETIMEDOUT; +} + +/* check TDI/ARC silicon is in host mode */ +static int tdi_in_host_mode (struct ehci_hcd *ehci) +{ + u32 tmp; + + tmp = ehci_readl(ehci, &ehci->regs->usbmode); + return (tmp & 3) == USBMODE_CM_HC; +} + +/* + * Force HC to halt state from unknown (EHCI spec section 2.3). + * Must be called with interrupts enabled and the lock not held. + */ +static int ehci_halt (struct ehci_hcd *ehci) +{ + u32 temp; + + spin_lock_irq(&ehci->lock); + + /* disable any irqs left enabled by previous code */ + ehci_writel(ehci, 0, &ehci->regs->intr_enable); + + if (ehci_is_TDI(ehci) && !tdi_in_host_mode(ehci)) { + spin_unlock_irq(&ehci->lock); + return 0; + } + + /* + * This routine gets called during probe before ehci->command + * has been initialized, so we can't rely on its value. + */ + ehci->command &= ~CMD_RUN; + temp = ehci_readl(ehci, &ehci->regs->command); + temp &= ~(CMD_RUN | CMD_IAAD); + ehci_writel(ehci, temp, &ehci->regs->command); + + spin_unlock_irq(&ehci->lock); + synchronize_irq(ehci_to_hcd(ehci)->irq); + + return handshake(ehci, &ehci->regs->status, + STS_HALT, STS_HALT, 16 * 125); +} + +/* put TDI/ARC silicon into EHCI mode */ +static void tdi_reset (struct ehci_hcd *ehci) +{ + u32 tmp; + + tmp = ehci_readl(ehci, &ehci->regs->usbmode); + tmp |= USBMODE_CM_HC; + /* The default byte access to MMR space is LE after + * controller reset. Set the required endian mode + * for transfer buffers to match the host microprocessor + */ + if (ehci_big_endian_mmio(ehci)) + tmp |= USBMODE_BE; + ehci_writel(ehci, tmp, &ehci->regs->usbmode); +} + +/* + * Reset a non-running (STS_HALT == 1) controller. + * Must be called with interrupts enabled and the lock not held. + */ +static int ehci_reset (struct ehci_hcd *ehci) +{ + int retval; + u32 command = ehci_readl(ehci, &ehci->regs->command); + + /* If the EHCI debug controller is active, special care must be + * taken before and after a host controller reset */ + if (ehci->debug && !dbgp_reset_prep(ehci_to_hcd(ehci))) + ehci->debug = NULL; + + command |= CMD_RESET; + dbg_cmd (ehci, "reset", command); + ehci_writel(ehci, command, &ehci->regs->command); + ehci->rh_state = EHCI_RH_HALTED; + ehci->next_statechange = jiffies; + retval = handshake (ehci, &ehci->regs->command, + CMD_RESET, 0, 250 * 1000); + + if (ehci->has_hostpc) { + ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS, + &ehci->regs->usbmode_ex); + ehci_writel(ehci, TXFIFO_DEFAULT, &ehci->regs->txfill_tuning); + } + if (retval) + return retval; + + if (ehci_is_TDI(ehci)) + tdi_reset (ehci); + + if (ehci->debug) + dbgp_external_startup(ehci_to_hcd(ehci)); + + ehci->port_c_suspend = ehci->suspended_ports = + ehci->resuming_ports = 0; + return retval; +} + +/* + * Idle the controller (turn off the schedules). + * Must be called with interrupts enabled and the lock not held. + */ +static void ehci_quiesce (struct ehci_hcd *ehci) +{ + u32 temp; + + if (ehci->rh_state != EHCI_RH_RUNNING) + return; + + /* wait for any schedule enables/disables to take effect */ + temp = (ehci->command << 10) & (STS_ASS | STS_PSS); + handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, temp, 16 * 125); + + /* then disable anything that's still active */ + spin_lock_irq(&ehci->lock); + ehci->command &= ~(CMD_ASE | CMD_PSE); + ehci_writel(ehci, ehci->command, &ehci->regs->command); + spin_unlock_irq(&ehci->lock); + + /* hardware can take 16 microframes to turn off ... */ + handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, 0, 16 * 125); +} + +/*-------------------------------------------------------------------------*/ + +static void end_unlink_async(struct ehci_hcd *ehci); +static void unlink_empty_async(struct ehci_hcd *ehci); +static void unlink_empty_async_suspended(struct ehci_hcd *ehci); +static void ehci_work(struct ehci_hcd *ehci); +static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); +static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); + +#include "ehci-timer.c" +#include "ehci-hub.c" +#include "ehci-mem.c" +#include "ehci-q.c" +#include "ehci-sched.c" +#include "ehci-sysfs.c" + +/*-------------------------------------------------------------------------*/ + +/* On some systems, leaving remote wakeup enabled prevents system shutdown. + * The firmware seems to think that powering off is a wakeup event! + * This routine turns off remote wakeup and everything else, on all ports. + */ +static void ehci_turn_off_all_ports(struct ehci_hcd *ehci) +{ + int port = HCS_N_PORTS(ehci->hcs_params); + + while (port--) + ehci_writel(ehci, PORT_RWC_BITS, + &ehci->regs->port_status[port]); +} + +/* + * Halt HC, turn off all ports, and let the BIOS use the companion controllers. + * Must be called with interrupts enabled and the lock not held. + */ +static void ehci_silence_controller(struct ehci_hcd *ehci) +{ + ehci_halt(ehci); + + spin_lock_irq(&ehci->lock); + ehci->rh_state = EHCI_RH_HALTED; + ehci_turn_off_all_ports(ehci); + + /* make BIOS/etc use companion controller during reboot */ + ehci_writel(ehci, 0, &ehci->regs->configured_flag); + + /* unblock posted writes */ + ehci_readl(ehci, &ehci->regs->configured_flag); + spin_unlock_irq(&ehci->lock); +} + +/* ehci_shutdown kick in for silicon on any bus (not just pci, etc). + * This forcibly disables dma and IRQs, helping kexec and other cases + * where the next system software may expect clean state. + */ +static void ehci_shutdown(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + + spin_lock_irq(&ehci->lock); + ehci->shutdown = true; + ehci->rh_state = EHCI_RH_STOPPING; + ehci->enabled_hrtimer_events = 0; + spin_unlock_irq(&ehci->lock); + + ehci_silence_controller(ehci); + + hrtimer_cancel(&ehci->hrtimer); +} + +/*-------------------------------------------------------------------------*/ + +/* + * ehci_work is called from some interrupts, timers, and so on. + * it calls driver completion functions, after dropping ehci->lock. + */ +static void ehci_work (struct ehci_hcd *ehci) +{ + /* another CPU may drop ehci->lock during a schedule scan while + * it reports urb completions. this flag guards against bogus + * attempts at re-entrant schedule scanning. + */ + if (ehci->scanning) { + ehci->need_rescan = true; + return; + } + ehci->scanning = true; + + rescan: + ehci->need_rescan = false; + if (ehci->async_count) + scan_async(ehci); + if (ehci->intr_count > 0) + scan_intr(ehci); + if (ehci->isoc_count > 0) + scan_isoc(ehci); + if (ehci->need_rescan) + goto rescan; + ehci->scanning = false; + + /* the IO watchdog guards against hardware or driver bugs that + * misplace IRQs, and should let us run completely without IRQs. + * such lossage has been observed on both VT6202 and VT8235. + */ + turn_on_io_watchdog(ehci); +} + +/* + * Called when the ehci_hcd module is removed. + */ +static void ehci_stop (struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + + ehci_dbg (ehci, "stop\n"); + + /* no more interrupts ... */ + + spin_lock_irq(&ehci->lock); + ehci->enabled_hrtimer_events = 0; + spin_unlock_irq(&ehci->lock); + + ehci_quiesce(ehci); + ehci_silence_controller(ehci); + ehci_reset (ehci); + + hrtimer_cancel(&ehci->hrtimer); + remove_sysfs_files(ehci); + remove_debug_files (ehci); + + /* root hub is shut down separately (first, when possible) */ + spin_lock_irq (&ehci->lock); + end_free_itds(ehci); + spin_unlock_irq (&ehci->lock); + ehci_mem_cleanup (ehci); + + if (ehci->amd_pll_fix == 1) + usb_amd_dev_put(); + +#ifdef EHCI_STATS + ehci_dbg(ehci, "irq normal %ld err %ld iaa %ld (lost %ld)\n", + ehci->stats.normal, ehci->stats.error, ehci->stats.iaa, + ehci->stats.lost_iaa); + ehci_dbg (ehci, "complete %ld unlink %ld\n", + ehci->stats.complete, ehci->stats.unlink); +#endif + + dbg_status (ehci, "ehci_stop completed", + ehci_readl(ehci, &ehci->regs->status)); +} + +/* one-time init, only for memory state */ +static int ehci_init(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + u32 temp; + int retval; + u32 hcc_params; + struct ehci_qh_hw *hw; + + spin_lock_init(&ehci->lock); + + /* + * keep io watchdog by default, those good HCDs could turn off it later + */ + ehci->need_io_watchdog = 1; + + hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + ehci->hrtimer.function = ehci_hrtimer_func; + ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT; + + hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); + + /* + * by default set standard 80% (== 100 usec/uframe) max periodic + * bandwidth as required by USB 2.0 + */ + ehci->uframe_periodic_max = 100; + + /* + * hw default: 1K periodic list heads, one per frame. + * periodic_size can shrink by USBCMD update if hcc_params allows. + */ + ehci->periodic_size = DEFAULT_I_TDPS; + INIT_LIST_HEAD(&ehci->async_unlink); + INIT_LIST_HEAD(&ehci->async_idle); + INIT_LIST_HEAD(&ehci->intr_unlink); + INIT_LIST_HEAD(&ehci->intr_qh_list); + INIT_LIST_HEAD(&ehci->cached_itd_list); + INIT_LIST_HEAD(&ehci->cached_sitd_list); + + if (HCC_PGM_FRAMELISTLEN(hcc_params)) { + /* periodic schedule size can be smaller than default */ + switch (EHCI_TUNE_FLS) { + case 0: ehci->periodic_size = 1024; break; + case 1: ehci->periodic_size = 512; break; + case 2: ehci->periodic_size = 256; break; + default: BUG(); + } + } + if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) + return retval; + + /* controllers may cache some of the periodic schedule ... */ + if (HCC_ISOC_CACHE(hcc_params)) // full frame cache + ehci->i_thresh = 0; + else // N microframes cached + ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); + + /* + * dedicate a qh for the async ring head, since we couldn't unlink + * a 'real' qh without stopping the async schedule [4.8]. use it + * as the 'reclamation list head' too. + * its dummy is used in hw_alt_next of many tds, to prevent the qh + * from automatically advancing to the next td after short reads. + */ + ehci->async->qh_next.qh = NULL; + hw = ehci->async->hw; + hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); + hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); +#if defined(CONFIG_PPC_PS3) + hw->hw_info1 |= cpu_to_hc32(ehci, QH_INACTIVATE); +#endif + hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); + hw->hw_qtd_next = EHCI_LIST_END(ehci); + ehci->async->qh_state = QH_STATE_LINKED; + hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma); + + /* clear interrupt enables, set irq latency */ + if (log2_irq_thresh < 0 || log2_irq_thresh > 6) + log2_irq_thresh = 0; + temp = 1 << (16 + log2_irq_thresh); + if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) { + ehci->has_ppcd = 1; + ehci_dbg(ehci, "enable per-port change event\n"); + temp |= CMD_PPCEE; + } + if (HCC_CANPARK(hcc_params)) { + /* HW default park == 3, on hardware that supports it (like + * NVidia and ALI silicon), maximizes throughput on the async + * schedule by avoiding QH fetches between transfers. + * + * With fast usb storage devices and NForce2, "park" seems to + * make problems: throughput reduction (!), data errors... + */ + if (park) { + park = min(park, (unsigned) 3); + temp |= CMD_PARK; + temp |= park << 8; + } + ehci_dbg(ehci, "park %d\n", park); + } + if (HCC_PGM_FRAMELISTLEN(hcc_params)) { + /* periodic schedule size can be smaller than default */ + temp &= ~(3 << 2); + temp |= (EHCI_TUNE_FLS << 2); + } + ehci->command = temp; + + /* Accept arbitrarily long scatter-gather lists */ + if (!(hcd->driver->flags & HCD_LOCAL_MEM)) + hcd->self.sg_tablesize = ~0; + return 0; +} + +/* start HC running; it's halted, ehci_init() has been run (once) */ +static int ehci_run (struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + u32 temp; + u32 hcc_params; + + hcd->uses_new_polling = 1; + + /* EHCI spec section 4.1 */ + + ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list); + ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next); + + /* + * hcc_params controls whether ehci->regs->segment must (!!!) + * be used; it constrains QH/ITD/SITD and QTD locations. + * pci_pool consistent memory always uses segment zero. + * streaming mappings for I/O buffers, like pci_map_single(), + * can return segments above 4GB, if the device allows. + * + * NOTE: the dma mask is visible through dma_supported(), so + * drivers can pass this info along ... like NETIF_F_HIGHDMA, + * Scsi_Host.highmem_io, and so forth. It's readonly to all + * host side drivers though. + */ + hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); + if (HCC_64BIT_ADDR(hcc_params)) { + ehci_writel(ehci, 0, &ehci->regs->segment); +#if 0 +// this is deeply broken on almost all architectures + if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64))) + ehci_info(ehci, "enabled 64bit DMA\n"); +#endif + } + + + // Philips, Intel, and maybe others need CMD_RUN before the + // root hub will detect new devices (why?); NEC doesn't + ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET); + ehci->command |= CMD_RUN; + ehci_writel(ehci, ehci->command, &ehci->regs->command); + dbg_cmd (ehci, "init", ehci->command); + + /* + * Start, enabling full USB 2.0 functionality ... usb 1.1 devices + * are explicitly handed to companion controller(s), so no TT is + * involved with the root hub. (Except where one is integrated, + * and there's no companion controller unless maybe for USB OTG.) + * + * Turning on the CF flag will transfer ownership of all ports + * from the companions to the EHCI controller. If any of the + * companions are in the middle of a port reset at the time, it + * could cause trouble. Write-locking ehci_cf_port_reset_rwsem + * guarantees that no resets are in progress. After we set CF, + * a short delay lets the hardware catch up; new resets shouldn't + * be started before the port switching actions could complete. + */ + down_write(&ehci_cf_port_reset_rwsem); + ehci->rh_state = EHCI_RH_RUNNING; + ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); + ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ + msleep(5); + up_write(&ehci_cf_port_reset_rwsem); + ehci->last_periodic_enable = ktime_get_real(); + + temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); + ehci_info (ehci, + "USB %x.%x started, EHCI %x.%02x%s\n", + ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), + temp >> 8, temp & 0xff, + ignore_oc ? ", overcurrent ignored" : ""); + + ehci_writel(ehci, INTR_MASK, + &ehci->regs->intr_enable); /* Turn On Interrupts */ + + /* GRR this is run-once init(), being done every time the HC starts. + * So long as they're part of class devices, we can't do it init() + * since the class device isn't created that early. + */ + create_debug_files(ehci); + create_sysfs_files(ehci); + + return 0; +} + +int ehci_setup(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + int retval; + + ehci->regs = (void __iomem *)ehci->caps + + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); + dbg_hcs_params(ehci, "reset"); + dbg_hcc_params(ehci, "reset"); + + /* cache this readonly data; minimize chip reads */ + ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); + + ehci->sbrn = HCD_USB2; + + /* data structure init */ + retval = ehci_init(hcd); + if (retval) + return retval; + + retval = ehci_halt(ehci); + if (retval) + return retval; + + ehci_reset(ehci); + + return 0; +} +EXPORT_SYMBOL_GPL(ehci_setup); + +/*-------------------------------------------------------------------------*/ + +static irqreturn_t ehci_irq (struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + u32 status, masked_status, pcd_status = 0, cmd; + int bh; + unsigned long flags; + + /* + * For threadirqs option we use spin_lock_irqsave() variant to prevent + * deadlock with ehci hrtimer callback, because hrtimer callbacks run + * in interrupt context even when threadirqs is specified. We can go + * back to spin_lock() variant when hrtimer callbacks become threaded. + */ + spin_lock_irqsave(&ehci->lock, flags); + + status = ehci_readl(ehci, &ehci->regs->status); + + /* e.g. cardbus physical eject */ + if (status == ~(u32) 0) { + ehci_dbg (ehci, "device removed\n"); + goto dead; + } + + /* + * We don't use STS_FLR, but some controllers don't like it to + * remain on, so mask it out along with the other status bits. + */ + masked_status = status & (INTR_MASK | STS_FLR); + + /* Shared IRQ? */ + if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) { + spin_unlock_irqrestore(&ehci->lock, flags); + return IRQ_NONE; + } + + /* clear (just) interrupts */ + ehci_writel(ehci, masked_status, &ehci->regs->status); + cmd = ehci_readl(ehci, &ehci->regs->command); + bh = 0; + +#ifdef VERBOSE_DEBUG + /* unrequested/ignored: Frame List Rollover */ + dbg_status (ehci, "irq", status); +#endif + + /* INT, ERR, and IAA interrupt rates can be throttled */ + + /* normal [4.15.1.2] or error [4.15.1.1] completion */ + if (likely ((status & (STS_INT|STS_ERR)) != 0)) { + if (likely ((status & STS_ERR) == 0)) + COUNT (ehci->stats.normal); + else + COUNT (ehci->stats.error); + bh = 1; + } + + /* complete the unlinking of some qh [4.15.2.3] */ + if (status & STS_IAA) { + + /* Turn off the IAA watchdog */ + ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_IAA_WATCHDOG); + + /* + * Mild optimization: Allow another IAAD to reset the + * hrtimer, if one occurs before the next expiration. + * In theory we could always cancel the hrtimer, but + * tests show that about half the time it will be reset + * for some other event anyway. + */ + if (ehci->next_hrtimer_event == EHCI_HRTIMER_IAA_WATCHDOG) + ++ehci->next_hrtimer_event; + + /* guard against (alleged) silicon errata */ + if (cmd & CMD_IAAD) + ehci_dbg(ehci, "IAA with IAAD still set?\n"); + if (ehci->iaa_in_progress) + COUNT(ehci->stats.iaa); + end_unlink_async(ehci); + } + + /* remote wakeup [4.3.1] */ + if (status & STS_PCD) { + unsigned i = HCS_N_PORTS (ehci->hcs_params); + u32 ppcd = ~0; + + /* kick root hub later */ + pcd_status = status; + + /* resume root hub? */ + if (ehci->rh_state == EHCI_RH_SUSPENDED) + usb_hcd_resume_root_hub(hcd); + + /* get per-port change detect bits */ + if (ehci->has_ppcd) + ppcd = status >> 16; + + while (i--) { + int pstatus; + + /* leverage per-port change bits feature */ + if (!(ppcd & (1 << i))) + continue; + pstatus = ehci_readl(ehci, + &ehci->regs->port_status[i]); + + if (pstatus & PORT_OWNER) + continue; + if (!(test_bit(i, &ehci->suspended_ports) && + ((pstatus & PORT_RESUME) || + !(pstatus & PORT_SUSPEND)) && + (pstatus & PORT_PE) && + ehci->reset_done[i] == 0)) + continue; + + /* start 20 msec resume signaling from this port, + * and make khubd collect PORT_STAT_C_SUSPEND to + * stop that signaling. Use 5 ms extra for safety, + * like usb_port_resume() does. + */ + ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); + set_bit(i, &ehci->resuming_ports); + ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); + usb_hcd_start_port_resume(&hcd->self, i); + mod_timer(&hcd->rh_timer, ehci->reset_done[i]); + } + } + + /* PCI errors [4.15.2.4] */ + if (unlikely ((status & STS_FATAL) != 0)) { + ehci_err(ehci, "fatal error\n"); + dbg_cmd(ehci, "fatal", cmd); + dbg_status(ehci, "fatal", status); +dead: + usb_hc_died(hcd); + + /* Don't let the controller do anything more */ + ehci->shutdown = true; + ehci->rh_state = EHCI_RH_STOPPING; + ehci->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE); + ehci_writel(ehci, ehci->command, &ehci->regs->command); + ehci_writel(ehci, 0, &ehci->regs->intr_enable); + ehci_handle_controller_death(ehci); + + /* Handle completions when the controller stops */ + bh = 0; + } + + if (bh) + ehci_work (ehci); + spin_unlock_irqrestore(&ehci->lock, flags); + if (pcd_status) + usb_hcd_poll_rh_status(hcd); + return IRQ_HANDLED; +} + +/*-------------------------------------------------------------------------*/ + +/* + * non-error returns are a promise to giveback() the urb later + * we drop ownership so next owner (or urb unlink) can get it + * + * urb + dev is in hcd.self.controller.urb_list + * we're queueing TDs onto software and hardware lists + * + * hcd-specific init for hcpriv hasn't been done yet + * + * NOTE: control, bulk, and interrupt share the same code to append TDs + * to a (possibly active) QH, and the same QH scanning code. + */ +static int ehci_urb_enqueue ( + struct usb_hcd *hcd, + struct urb *urb, + gfp_t mem_flags +) { + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + struct list_head qtd_list; + + INIT_LIST_HEAD (&qtd_list); + + switch (usb_pipetype (urb->pipe)) { + case PIPE_CONTROL: + /* qh_completions() code doesn't handle all the fault cases + * in multi-TD control transfers. Even 1KB is rare anyway. + */ + if (urb->transfer_buffer_length > (16 * 1024)) + return -EMSGSIZE; + /* FALLTHROUGH */ + /* case PIPE_BULK: */ + default: + if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) + return -ENOMEM; + return submit_async(ehci, urb, &qtd_list, mem_flags); + + case PIPE_INTERRUPT: + if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) + return -ENOMEM; + return intr_submit(ehci, urb, &qtd_list, mem_flags); + + case PIPE_ISOCHRONOUS: + if (urb->dev->speed == USB_SPEED_HIGH) + return itd_submit (ehci, urb, mem_flags); + else + return sitd_submit (ehci, urb, mem_flags); + } +} + +/* remove from hardware lists + * completions normally happen asynchronously + */ + +static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + struct ehci_qh *qh; + unsigned long flags; + int rc; + + spin_lock_irqsave (&ehci->lock, flags); + rc = usb_hcd_check_unlink_urb(hcd, urb, status); + if (rc) + goto done; + + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + /* + * We don't expedite dequeue for isochronous URBs. + * Just wait until they complete normally or their + * time slot expires. + */ + } else { + qh = (struct ehci_qh *) urb->hcpriv; + qh->exception = 1; + switch (qh->qh_state) { + case QH_STATE_LINKED: + if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) + start_unlink_intr(ehci, qh); + else + start_unlink_async(ehci, qh); + break; + case QH_STATE_COMPLETING: + qh->dequeue_during_giveback = 1; + break; + case QH_STATE_UNLINK: + case QH_STATE_UNLINK_WAIT: + /* already started */ + break; + case QH_STATE_IDLE: + /* QH might be waiting for a Clear-TT-Buffer */ + qh_completions(ehci, qh); + break; + } + } +done: + spin_unlock_irqrestore (&ehci->lock, flags); + return rc; +} + +/*-------------------------------------------------------------------------*/ + +// bulk qh holds the data toggle + +static void +ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + unsigned long flags; + struct ehci_qh *qh, *tmp; + + /* ASSERT: any requests/urbs are being unlinked */ + /* ASSERT: nobody can be submitting urbs for this any more */ + +rescan: + spin_lock_irqsave (&ehci->lock, flags); + qh = ep->hcpriv; + if (!qh) + goto done; + + /* endpoints can be iso streams. for now, we don't + * accelerate iso completions ... so spin a while. + */ + if (qh->hw == NULL) { + struct ehci_iso_stream *stream = ep->hcpriv; + + if (!list_empty(&stream->td_list)) + goto idle_timeout; + + /* BUG_ON(!list_empty(&stream->free_list)); */ + kfree(stream); + goto done; + } + + qh->exception = 1; + switch (qh->qh_state) { + case QH_STATE_LINKED: + case QH_STATE_COMPLETING: + for (tmp = ehci->async->qh_next.qh; + tmp && tmp != qh; + tmp = tmp->qh_next.qh) + continue; + /* periodic qh self-unlinks on empty, and a COMPLETING qh + * may already be unlinked. + */ + if (tmp) + start_unlink_async(ehci, qh); + /* FALL THROUGH */ + case QH_STATE_UNLINK: /* wait for hw to finish? */ + case QH_STATE_UNLINK_WAIT: +idle_timeout: + spin_unlock_irqrestore (&ehci->lock, flags); + schedule_timeout_uninterruptible(1); + goto rescan; + case QH_STATE_IDLE: /* fully unlinked */ + if (qh->clearing_tt) + goto idle_timeout; + if (list_empty (&qh->qtd_list)) { + qh_destroy(ehci, qh); + break; + } + /* else FALL THROUGH */ + default: + /* caller was supposed to have unlinked any requests; + * that's not our job. just leak this memory. + */ + ehci_err (ehci, "qh %p (#%02x) state %d%s\n", + qh, ep->desc.bEndpointAddress, qh->qh_state, + list_empty (&qh->qtd_list) ? "" : "(has tds)"); + break; + } + done: + ep->hcpriv = NULL; + spin_unlock_irqrestore (&ehci->lock, flags); +} + +static void +ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct ehci_qh *qh; + int eptype = usb_endpoint_type(&ep->desc); + int epnum = usb_endpoint_num(&ep->desc); + int is_out = usb_endpoint_dir_out(&ep->desc); + unsigned long flags; + + if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT) + return; + + spin_lock_irqsave(&ehci->lock, flags); + qh = ep->hcpriv; + + /* For Bulk and Interrupt endpoints we maintain the toggle state + * in the hardware; the toggle bits in udev aren't used at all. + * When an endpoint is reset by usb_clear_halt() we must reset + * the toggle bit in the QH. + */ + if (qh) { + usb_settoggle(qh->dev, epnum, is_out, 0); + if (!list_empty(&qh->qtd_list)) { + WARN_ONCE(1, "clear_halt for a busy endpoint\n"); + } else { + /* The toggle value in the QH can't be updated + * while the QH is active. Unlink it now; + * re-linking will call qh_refresh(). + */ + qh->exception = 1; + if (eptype == USB_ENDPOINT_XFER_BULK) + start_unlink_async(ehci, qh); + else + start_unlink_intr(ehci, qh); + } + } + spin_unlock_irqrestore(&ehci->lock, flags); +} + +static int ehci_get_frame (struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + return (ehci_read_frame_index(ehci) >> 3) % ehci->periodic_size; +} + +/*-------------------------------------------------------------------------*/ + +#ifdef CONFIG_PM + +/* suspend/resume, section 4.3 */ + +/* These routines handle the generic parts of controller suspend/resume */ + +int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + + if (time_before(jiffies, ehci->next_statechange)) + msleep(10); + + /* + * Root hub was already suspended. Disable IRQ emission and + * mark HW unaccessible. The PM and USB cores make sure that + * the root hub is either suspended or stopped. + */ + ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup); + + spin_lock_irq(&ehci->lock); + ehci_writel(ehci, 0, &ehci->regs->intr_enable); + (void) ehci_readl(ehci, &ehci->regs->intr_enable); + + clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + spin_unlock_irq(&ehci->lock); + + return 0; +} +EXPORT_SYMBOL_GPL(ehci_suspend); + +/* Returns 0 if power was preserved, 1 if power was lost */ +int ehci_resume(struct usb_hcd *hcd, bool hibernated) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + + if (time_before(jiffies, ehci->next_statechange)) + msleep(100); + + /* Mark hardware accessible again as we are back to full power by now */ + set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + + if (ehci->shutdown) + return 0; /* Controller is dead */ + + /* + * If CF is still set and we aren't resuming from hibernation + * then we maintained suspend power. + * Just undo the effect of ehci_suspend(). + */ + if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF && + !hibernated) { + int mask = INTR_MASK; + + ehci_prepare_ports_for_controller_resume(ehci); + + spin_lock_irq(&ehci->lock); + if (ehci->shutdown) + goto skip; + + if (!hcd->self.root_hub->do_remote_wakeup) + mask &= ~STS_PCD; + ehci_writel(ehci, mask, &ehci->regs->intr_enable); + ehci_readl(ehci, &ehci->regs->intr_enable); + skip: + spin_unlock_irq(&ehci->lock); + return 0; + } + + /* + * Else reset, to cope with power loss or resume from hibernation + * having let the firmware kick in during reboot. + */ + usb_root_hub_lost_power(hcd->self.root_hub); + (void) ehci_halt(ehci); + (void) ehci_reset(ehci); + + spin_lock_irq(&ehci->lock); + if (ehci->shutdown) + goto skip; + + ehci_writel(ehci, ehci->command, &ehci->regs->command); + ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); + ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ + + ehci->rh_state = EHCI_RH_SUSPENDED; + spin_unlock_irq(&ehci->lock); + + return 1; +} +EXPORT_SYMBOL_GPL(ehci_resume); + +#endif + +/*-------------------------------------------------------------------------*/ + +/* + * Generic structure: This gets copied for platform drivers so that + * individual entries can be overridden as needed. + */ + +static const struct hc_driver ehci_hc_driver = { + .description = hcd_name, + .product_desc = "EHCI Host Controller", + .hcd_priv_size = sizeof(struct ehci_hcd), + + /* + * generic hardware linkage + */ + .irq = ehci_irq, + .flags = HCD_MEMORY | HCD_USB2, + + /* + * basic lifecycle operations + */ + .reset = ehci_setup, + .start = ehci_run, + .stop = ehci_stop, + .shutdown = ehci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ehci_urb_enqueue, + .urb_dequeue = ehci_urb_dequeue, + .endpoint_disable = ehci_endpoint_disable, + .endpoint_reset = ehci_endpoint_reset, + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, + + /* + * scheduling support + */ + .get_frame_number = ehci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ehci_hub_status_data, + .hub_control = ehci_hub_control, + .bus_suspend = ehci_bus_suspend, + .bus_resume = ehci_bus_resume, + .relinquish_port = ehci_relinquish_port, + .port_handed_over = ehci_port_handed_over, +}; + +void ehci_init_driver(struct hc_driver *drv, + const struct ehci_driver_overrides *over) +{ + /* Copy the generic table to drv and then apply the overrides */ + *drv = ehci_hc_driver; + + if (over) { + drv->hcd_priv_size += over->extra_priv_size; + if (over->reset) + drv->reset = over->reset; + } +} +EXPORT_SYMBOL_GPL(ehci_init_driver); + +/*-------------------------------------------------------------------------*/ + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR (DRIVER_AUTHOR); +MODULE_LICENSE ("GPL"); + +#ifdef CONFIG_USB_EHCI_FSL +#include "ehci-fsl.c" +#define PLATFORM_DRIVER ehci_fsl_driver +#endif + +#ifdef CONFIG_USB_EHCI_SH +#include "ehci-sh.c" +#define PLATFORM_DRIVER ehci_hcd_sh_driver +#endif + +#ifdef CONFIG_PPC_PS3 +#include "ehci-ps3.c" +#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver +#endif + +#ifdef CONFIG_USB_EHCI_HCD_PPC_OF +#include "ehci-ppc-of.c" +#define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver +#endif + +#ifdef CONFIG_XPS_USB_HCD_XILINX +#include "ehci-xilinx-of.c" +#define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver +#endif + +#ifdef CONFIG_USB_W90X900_EHCI +#include "ehci-w90x900.c" +#define PLATFORM_DRIVER ehci_hcd_w90x900_driver +#endif + +#ifdef CONFIG_USB_OCTEON_EHCI +#include "ehci-octeon.c" +#define PLATFORM_DRIVER ehci_octeon_driver +#endif + +#ifdef CONFIG_TILE_USB +#include "ehci-tilegx.c" +#define PLATFORM_DRIVER ehci_hcd_tilegx_driver +#endif + +#ifdef CONFIG_USB_EHCI_HCD_PMC_MSP +#include "ehci-pmcmsp.c" +#define PLATFORM_DRIVER ehci_hcd_msp_driver +#endif + +#ifdef CONFIG_USB_EHCI_TEGRA +#include "ehci-tegra.c" +#define PLATFORM_DRIVER tegra_ehci_driver +#endif + +#ifdef CONFIG_SPARC_LEON +#include "ehci-grlib.c" +#define PLATFORM_DRIVER ehci_grlib_driver +#endif + +#ifdef CONFIG_USB_EHCI_MV +#include "ehci-mv.c" +#define PLATFORM_DRIVER ehci_mv_driver +#endif + +#ifdef CONFIG_MIPS_SEAD3 +#include "ehci-sead3.c" +#define PLATFORM_DRIVER ehci_hcd_sead3_driver +#endif + +static int __init ehci_hcd_init(void) +{ + int retval = 0; + + if (usb_disabled()) + return -ENODEV; + + printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name); + set_bit(USB_EHCI_LOADED, &usb_hcds_loaded); + if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) || + test_bit(USB_OHCI_LOADED, &usb_hcds_loaded)) + printk(KERN_WARNING "Warning! ehci_hcd should always be loaded" + " before uhci_hcd and ohci_hcd, not after\n"); + + pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n", + hcd_name, + sizeof(struct ehci_qh), sizeof(struct ehci_qtd), + sizeof(struct ehci_itd), sizeof(struct ehci_sitd)); + +#ifdef DEBUG + ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root); + if (!ehci_debug_root) { + retval = -ENOENT; + goto err_debug; + } +#endif + +#ifdef PLATFORM_DRIVER + retval = platform_driver_register(&PLATFORM_DRIVER); + if (retval < 0) + goto clean0; +#endif + +#ifdef PS3_SYSTEM_BUS_DRIVER + retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER); + if (retval < 0) + goto clean2; +#endif + +#ifdef OF_PLATFORM_DRIVER + retval = platform_driver_register(&OF_PLATFORM_DRIVER); + if (retval < 0) + goto clean3; +#endif + +#ifdef XILINX_OF_PLATFORM_DRIVER + retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER); + if (retval < 0) + goto clean4; +#endif + return retval; + +#ifdef XILINX_OF_PLATFORM_DRIVER + /* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */ +clean4: +#endif +#ifdef OF_PLATFORM_DRIVER + platform_driver_unregister(&OF_PLATFORM_DRIVER); +clean3: +#endif +#ifdef PS3_SYSTEM_BUS_DRIVER + ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); +clean2: +#endif +#ifdef PLATFORM_DRIVER + platform_driver_unregister(&PLATFORM_DRIVER); +clean0: +#endif +#ifdef DEBUG + debugfs_remove(ehci_debug_root); + ehci_debug_root = NULL; +err_debug: +#endif + clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); + return retval; +} +module_init(ehci_hcd_init); + +static void __exit ehci_hcd_cleanup(void) +{ +#ifdef XILINX_OF_PLATFORM_DRIVER + platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); +#endif +#ifdef OF_PLATFORM_DRIVER + platform_driver_unregister(&OF_PLATFORM_DRIVER); +#endif +#ifdef PLATFORM_DRIVER + platform_driver_unregister(&PLATFORM_DRIVER); +#endif +#ifdef PS3_SYSTEM_BUS_DRIVER + ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); +#endif +#ifdef DEBUG + debugfs_remove(ehci_debug_root); +#endif + clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); +} +module_exit(ehci_hcd_cleanup); diff --git a/addons/ehci-pci/src/3.10.108/ehci-hub.c b/addons/ehci-pci/src/3.10.108/ehci-hub.c new file mode 100644 index 00000000..ca6289b4 --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/ehci-hub.c @@ -0,0 +1,1139 @@ +/* + * Copyright (C) 2001-2004 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of ehci-hcd.c */ + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI Root Hub ... the nonsharable stuff + * + * Registers don't need cpu_to_le32, that happens transparently + */ + +/*-------------------------------------------------------------------------*/ +#include + +#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) + +#ifdef CONFIG_PM + +static int ehci_hub_control( + struct usb_hcd *hcd, + u16 typeReq, + u16 wValue, + u16 wIndex, + char *buf, + u16 wLength +); + +/* After a power loss, ports that were owned by the companion must be + * reset so that the companion can still own them. + */ +static void ehci_handover_companion_ports(struct ehci_hcd *ehci) +{ + u32 __iomem *reg; + u32 status; + int port; + __le32 buf; + struct usb_hcd *hcd = ehci_to_hcd(ehci); + + if (!ehci->owned_ports) + return; + + /* Make sure the ports are powered */ + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + if (test_bit(port, &ehci->owned_ports)) { + reg = &ehci->regs->port_status[port]; + status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; + if (!(status & PORT_POWER)) { + status |= PORT_POWER; + ehci_writel(ehci, status, reg); + } + } + } + + /* Give the connections some time to appear */ + msleep(20); + + spin_lock_irq(&ehci->lock); + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + if (test_bit(port, &ehci->owned_ports)) { + reg = &ehci->regs->port_status[port]; + status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; + + /* Port already owned by companion? */ + if (status & PORT_OWNER) + clear_bit(port, &ehci->owned_ports); + else if (test_bit(port, &ehci->companion_ports)) + ehci_writel(ehci, status & ~PORT_PE, reg); + else { + spin_unlock_irq(&ehci->lock); + ehci_hub_control(hcd, SetPortFeature, + USB_PORT_FEAT_RESET, port + 1, + NULL, 0); + spin_lock_irq(&ehci->lock); + } + } + } + spin_unlock_irq(&ehci->lock); + + if (!ehci->owned_ports) + return; + msleep(90); /* Wait for resets to complete */ + + spin_lock_irq(&ehci->lock); + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + if (test_bit(port, &ehci->owned_ports)) { + spin_unlock_irq(&ehci->lock); + ehci_hub_control(hcd, GetPortStatus, + 0, port + 1, + (char *) &buf, sizeof(buf)); + spin_lock_irq(&ehci->lock); + + /* The companion should now own the port, + * but if something went wrong the port must not + * remain enabled. + */ + reg = &ehci->regs->port_status[port]; + status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; + if (status & PORT_OWNER) + ehci_writel(ehci, status | PORT_CSC, reg); + else { + ehci_dbg(ehci, "failed handover port %d: %x\n", + port + 1, status); + ehci_writel(ehci, status & ~PORT_PE, reg); + } + } + } + + ehci->owned_ports = 0; + spin_unlock_irq(&ehci->lock); +} + +static int ehci_port_change(struct ehci_hcd *ehci) +{ + int i = HCS_N_PORTS(ehci->hcs_params); + + /* First check if the controller indicates a change event */ + + if (ehci_readl(ehci, &ehci->regs->status) & STS_PCD) + return 1; + + /* + * Not all controllers appear to update this while going from D3 to D0, + * so check the individual port status registers as well + */ + + while (i--) + if (ehci_readl(ehci, &ehci->regs->port_status[i]) & PORT_CSC) + return 1; + + return 0; +} + +static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, + bool suspending, bool do_wakeup) +{ + int port; + u32 temp; + + /* If remote wakeup is enabled for the root hub but disabled + * for the controller, we must adjust all the port wakeup flags + * when the controller is suspended or resumed. In all other + * cases they don't need to be changed. + */ + if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup) + return; + + spin_lock_irq(&ehci->lock); + + /* clear phy low-power mode before changing wakeup flags */ + if (ehci->has_hostpc) { + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; + + temp = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg); + } + spin_unlock_irq(&ehci->lock); + msleep(5); + spin_lock_irq(&ehci->lock); + } + + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *reg = &ehci->regs->port_status[port]; + u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; + u32 t2 = t1 & ~PORT_WAKE_BITS; + + /* If we are suspending the controller, clear the flags. + * If we are resuming the controller, set the wakeup flags. + */ + if (!suspending) { + if (t1 & PORT_CONNECT) + t2 |= PORT_WKOC_E | PORT_WKDISC_E; + else + t2 |= PORT_WKOC_E | PORT_WKCONN_E; + } + ehci_vdbg(ehci, "port %d, %08x -> %08x\n", + port + 1, t1, t2); + ehci_writel(ehci, t2, reg); + } + + /* enter phy low-power mode again */ + if (ehci->has_hostpc) { + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; + + temp = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg); + } + } + + /* Does the root hub have a port wakeup pending? */ + if (!suspending && ehci_port_change(ehci)) + usb_hcd_resume_root_hub(ehci_to_hcd(ehci)); + + spin_unlock_irq(&ehci->lock); +} + +static int ehci_bus_suspend (struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + int port; + int mask; + int changed; + + ehci_dbg(ehci, "suspend root hub\n"); + + if (time_before (jiffies, ehci->next_statechange)) + msleep(5); + + /* stop the schedules */ + ehci_quiesce(ehci); + + spin_lock_irq (&ehci->lock); + if (ehci->rh_state < EHCI_RH_RUNNING) + goto done; + + /* Once the controller is stopped, port resumes that are already + * in progress won't complete. Hence if remote wakeup is enabled + * for the root hub and any ports are in the middle of a resume or + * remote wakeup, we must fail the suspend. + */ + if (hcd->self.root_hub->do_remote_wakeup) { + if (ehci->resuming_ports) { + spin_unlock_irq(&ehci->lock); + ehci_dbg(ehci, "suspend failed because a port is resuming\n"); + return -EBUSY; + } + } + + /* Unlike other USB host controller types, EHCI doesn't have + * any notion of "global" or bus-wide suspend. The driver has + * to manually suspend all the active unsuspended ports, and + * then manually resume them in the bus_resume() routine. + */ + ehci->bus_suspended = 0; + ehci->owned_ports = 0; + changed = 0; + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *reg = &ehci->regs->port_status [port]; + u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; + u32 t2 = t1 & ~PORT_WAKE_BITS; + + /* keep track of which ports we suspend */ + if (t1 & PORT_OWNER) + set_bit(port, &ehci->owned_ports); + else if ((t1 & PORT_PE) && !(t1 & PORT_SUSPEND)) { + t2 |= PORT_SUSPEND; + set_bit(port, &ehci->bus_suspended); + } + + /* enable remote wakeup on all ports, if told to do so */ + if (hcd->self.root_hub->do_remote_wakeup) { + /* only enable appropriate wake bits, otherwise the + * hardware can not go phy low power mode. If a race + * condition happens here(connection change during bits + * set), the port change detection will finally fix it. + */ + if (t1 & PORT_CONNECT) + t2 |= PORT_WKOC_E | PORT_WKDISC_E; + else + t2 |= PORT_WKOC_E | PORT_WKCONN_E; + } + + if (t1 != t2) { + ehci_vdbg (ehci, "port %d, %08x -> %08x\n", + port + 1, t1, t2); + ehci_writel(ehci, t2, reg); + changed = 1; + } + } + + if (changed && ehci->has_hostpc) { + spin_unlock_irq(&ehci->lock); + msleep(5); /* 5 ms for HCD to enter low-power mode */ + spin_lock_irq(&ehci->lock); + + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; + u32 t3; + + t3 = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); + t3 = ehci_readl(ehci, hostpc_reg); + ehci_dbg(ehci, "Port %d phy low-power mode %s\n", + port, (t3 & HOSTPC_PHCD) ? + "succeeded" : "failed"); + } + } + spin_unlock_irq(&ehci->lock); + + /* Apparently some devices need a >= 1-uframe delay here */ + if (ehci->bus_suspended) + udelay(150); + + /* turn off now-idle HC */ + ehci_halt (ehci); + + spin_lock_irq(&ehci->lock); + if (ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_POLL_DEAD)) + ehci_handle_controller_death(ehci); + if (ehci->rh_state != EHCI_RH_RUNNING) + goto done; + ehci->rh_state = EHCI_RH_SUSPENDED; + + end_unlink_async(ehci); + unlink_empty_async_suspended(ehci); + ehci_handle_intr_unlinks(ehci); + end_free_itds(ehci); + + /* allow remote wakeup */ + mask = INTR_MASK; + if (!hcd->self.root_hub->do_remote_wakeup) + mask &= ~STS_PCD; + ehci_writel(ehci, mask, &ehci->regs->intr_enable); + ehci_readl(ehci, &ehci->regs->intr_enable); + + done: + ehci->next_statechange = jiffies + msecs_to_jiffies(10); + ehci->enabled_hrtimer_events = 0; + ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT; + spin_unlock_irq (&ehci->lock); + + hrtimer_cancel(&ehci->hrtimer); + return 0; +} + + +/* caller has locked the root hub, and should reset/reinit on error */ +static int ehci_bus_resume (struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + u32 temp; + u32 power_okay; + int i; + unsigned long resume_needed = 0; + + if (time_before (jiffies, ehci->next_statechange)) + msleep(5); + spin_lock_irq (&ehci->lock); + if (!HCD_HW_ACCESSIBLE(hcd) || ehci->shutdown) + goto shutdown; + + if (unlikely(ehci->debug)) { + if (!dbgp_reset_prep(hcd)) + ehci->debug = NULL; + else + dbgp_external_startup(hcd); + } + + /* Ideally and we've got a real resume here, and no port's power + * was lost. (For PCI, that means Vaux was maintained.) But we + * could instead be restoring a swsusp snapshot -- so that BIOS was + * the last user of the controller, not reset/pm hardware keeping + * state we gave to it. + */ + power_okay = ehci_readl(ehci, &ehci->regs->intr_enable); + ehci_dbg(ehci, "resume root hub%s\n", + power_okay ? "" : " after power loss"); + + /* at least some APM implementations will try to deliver + * IRQs right away, so delay them until we're ready. + */ + ehci_writel(ehci, 0, &ehci->regs->intr_enable); + + /* re-init operational registers */ + ehci_writel(ehci, 0, &ehci->regs->segment); + ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list); + ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next); + + /* restore CMD_RUN, framelist size, and irq threshold */ + ehci->command |= CMD_RUN; + ehci_writel(ehci, ehci->command, &ehci->regs->command); + ehci->rh_state = EHCI_RH_RUNNING; + + /* + * According to Bugzilla #8190, the port status for some controllers + * will be wrong without a delay. At their wrong status, the port + * is enabled, but not suspended neither resumed. + */ + i = HCS_N_PORTS(ehci->hcs_params); + while (i--) { + temp = ehci_readl(ehci, &ehci->regs->port_status[i]); + if ((temp & PORT_PE) && + !(temp & (PORT_SUSPEND | PORT_RESUME))) { + ehci_dbg(ehci, "Port status(0x%x) is wrong\n", temp); + spin_unlock_irq(&ehci->lock); + msleep(8); + spin_lock_irq(&ehci->lock); + break; + } + } + + if (ehci->shutdown) + goto shutdown; + + /* clear phy low-power mode before resume */ + if (ehci->bus_suspended && ehci->has_hostpc) { + i = HCS_N_PORTS(ehci->hcs_params); + while (i--) { + if (test_bit(i, &ehci->bus_suspended)) { + u32 __iomem *hostpc_reg = + &ehci->regs->hostpc[i]; + + temp = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp & ~HOSTPC_PHCD, + hostpc_reg); + } + } + spin_unlock_irq(&ehci->lock); + msleep(5); + spin_lock_irq(&ehci->lock); + if (ehci->shutdown) + goto shutdown; + } + + /* manually resume the ports we suspended during bus_suspend() */ + i = HCS_N_PORTS (ehci->hcs_params); + while (i--) { + temp = ehci_readl(ehci, &ehci->regs->port_status [i]); + temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); + if (test_bit(i, &ehci->bus_suspended) && + (temp & PORT_SUSPEND)) { + temp |= PORT_RESUME; + set_bit(i, &resume_needed); + } + ehci_writel(ehci, temp, &ehci->regs->port_status [i]); + } + + /* msleep for 20ms only if code is trying to resume port */ + if (resume_needed) { + spin_unlock_irq(&ehci->lock); + msleep(20); + spin_lock_irq(&ehci->lock); + if (ehci->shutdown) + goto shutdown; + } + + i = HCS_N_PORTS (ehci->hcs_params); + while (i--) { + temp = ehci_readl(ehci, &ehci->regs->port_status [i]); + if (test_bit(i, &resume_needed)) { + temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME); + ehci_writel(ehci, temp, &ehci->regs->port_status [i]); + ehci_vdbg (ehci, "resumed port %d\n", i + 1); + } + } + + ehci->next_statechange = jiffies + msecs_to_jiffies(5); + spin_unlock_irq(&ehci->lock); + + ehci_handover_companion_ports(ehci); + + /* Now we can safely re-enable irqs */ + spin_lock_irq(&ehci->lock); + if (ehci->shutdown) + goto shutdown; + ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable); + (void) ehci_readl(ehci, &ehci->regs->intr_enable); + spin_unlock_irq(&ehci->lock); + + return 0; + + shutdown: + spin_unlock_irq(&ehci->lock); + return -ESHUTDOWN; +} + +#else + +#define ehci_bus_suspend NULL +#define ehci_bus_resume NULL + +#endif /* CONFIG_PM */ + +/*-------------------------------------------------------------------------*/ + +/* + * Sets the owner of a port + */ +static void set_owner(struct ehci_hcd *ehci, int portnum, int new_owner) +{ + u32 __iomem *status_reg; + u32 port_status; + int try; + + status_reg = &ehci->regs->port_status[portnum]; + + /* + * The controller won't set the OWNER bit if the port is + * enabled, so this loop will sometimes require at least two + * iterations: one to disable the port and one to set OWNER. + */ + for (try = 4; try > 0; --try) { + spin_lock_irq(&ehci->lock); + port_status = ehci_readl(ehci, status_reg); + if ((port_status & PORT_OWNER) == new_owner + || (port_status & (PORT_OWNER | PORT_CONNECT)) + == 0) + try = 0; + else { + port_status ^= PORT_OWNER; + port_status &= ~(PORT_PE | PORT_RWC_BITS); + ehci_writel(ehci, port_status, status_reg); + } + spin_unlock_irq(&ehci->lock); + if (try > 1) + msleep(5); + } +} + +/*-------------------------------------------------------------------------*/ + +static int check_reset_complete ( + struct ehci_hcd *ehci, + int index, + u32 __iomem *status_reg, + int port_status +) { + if (!(port_status & PORT_CONNECT)) + return port_status; + + /* if reset finished and it's still not enabled -- handoff */ + if (!(port_status & PORT_PE)) { + + /* with integrated TT, there's nobody to hand it to! */ + if (ehci_is_TDI(ehci)) { + ehci_dbg (ehci, + "Failed to enable port %d on root hub TT\n", + index+1); + return port_status; + } + + ehci_dbg (ehci, "port %d full speed --> companion\n", + index + 1); + + // what happens if HCS_N_CC(params) == 0 ? + port_status |= PORT_OWNER; + port_status &= ~PORT_RWC_BITS; + ehci_writel(ehci, port_status, status_reg); + + /* ensure 440EPX ohci controller state is operational */ + if (ehci->has_amcc_usb23) + set_ohci_hcfs(ehci, 1); + } else { + ehci_dbg(ehci, "port %d reset complete, port enabled\n", + index + 1); + /* ensure 440EPx ohci controller state is suspended */ + if (ehci->has_amcc_usb23) + set_ohci_hcfs(ehci, 0); + } + + return port_status; +} + +/*-------------------------------------------------------------------------*/ + + +/* build "status change" packet (one or two bytes) from HC registers */ + +static int +ehci_hub_status_data (struct usb_hcd *hcd, char *buf) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + u32 temp, status; + u32 mask; + int ports, i, retval = 1; + unsigned long flags; + u32 ppcd = ~0; + + /* init status to no-changes */ + buf [0] = 0; + ports = HCS_N_PORTS (ehci->hcs_params); + if (ports > 7) { + buf [1] = 0; + retval++; + } + + /* Inform the core about resumes-in-progress by returning + * a non-zero value even if there are no status changes. + */ + status = ehci->resuming_ports; + + /* Some boards (mostly VIA?) report bogus overcurrent indications, + * causing massive log spam unless we completely ignore them. It + * may be relevant that VIA VT8235 controllers, where PORT_POWER is + * always set, seem to clear PORT_OCC and PORT_CSC when writing to + * PORT_POWER; that's surprising, but maybe within-spec. + */ + if (!ignore_oc) + mask = PORT_CSC | PORT_PEC | PORT_OCC; + else + mask = PORT_CSC | PORT_PEC; + // PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND + + /* no hub change reports (bit 0) for now (power, ...) */ + + /* port N changes (bit N)? */ + spin_lock_irqsave (&ehci->lock, flags); + + /* get per-port change detect bits */ + if (ehci->has_ppcd) + ppcd = ehci_readl(ehci, &ehci->regs->status) >> 16; + + for (i = 0; i < ports; i++) { + /* leverage per-port change bits feature */ + if (ppcd & (1 << i)) + temp = ehci_readl(ehci, &ehci->regs->port_status[i]); + else + temp = 0; + + /* + * Return status information even for ports with OWNER set. + * Otherwise khubd wouldn't see the disconnect event when a + * high-speed device is switched over to the companion + * controller by the user. + */ + + if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend) + || (ehci->reset_done[i] && time_after_eq( + jiffies, ehci->reset_done[i]))) { + if (i < 7) + buf [0] |= 1 << (i + 1); + else + buf [1] |= 1 << (i - 7); + status = STS_PCD; + } + } + + /* If a resume is in progress, make sure it can finish */ + if (ehci->resuming_ports) + mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25)); + + spin_unlock_irqrestore (&ehci->lock, flags); + return status ? retval : 0; +} + +/*-------------------------------------------------------------------------*/ + +static void +ehci_hub_descriptor ( + struct ehci_hcd *ehci, + struct usb_hub_descriptor *desc +) { + int ports = HCS_N_PORTS (ehci->hcs_params); + u16 temp; + + desc->bDescriptorType = 0x29; + desc->bPwrOn2PwrGood = 10; /* ehci 1.0, 2.3.9 says 20ms max */ + desc->bHubContrCurrent = 0; + + desc->bNbrPorts = ports; + temp = 1 + (ports / 8); + desc->bDescLength = 7 + 2 * temp; + + /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */ + memset(&desc->u.hs.DeviceRemovable[0], 0, temp); + memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp); + + temp = 0x0008; /* per-port overcurrent reporting */ + if (HCS_PPC (ehci->hcs_params)) + temp |= 0x0001; /* per-port power control */ + else + temp |= 0x0002; /* no power switching */ +#if 0 +// re-enable when we support USB_PORT_FEAT_INDICATOR below. + if (HCS_INDICATOR (ehci->hcs_params)) + temp |= 0x0080; /* per-port indicators (LEDs) */ +#endif + desc->wHubCharacteristics = cpu_to_le16(temp); +} + +/*-------------------------------------------------------------------------*/ + +static int ehci_hub_control ( + struct usb_hcd *hcd, + u16 typeReq, + u16 wValue, + u16 wIndex, + char *buf, + u16 wLength +) { + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + int ports = HCS_N_PORTS (ehci->hcs_params); + u32 __iomem *status_reg = &ehci->regs->port_status[ + (wIndex & 0xff) - 1]; + u32 __iomem *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1]; + u32 temp, temp1, status; + unsigned long flags; + int retval = 0; + unsigned selector; + + /* + * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. + * HCS_INDICATOR may say we can change LEDs to off/amber/green. + * (track current state ourselves) ... blink for diagnostics, + * power, "this is the one", etc. EHCI spec supports this. + */ + + spin_lock_irqsave (&ehci->lock, flags); + switch (typeReq) { + case ClearHubFeature: + switch (wValue) { + case C_HUB_LOCAL_POWER: + case C_HUB_OVER_CURRENT: + /* no hub-wide feature/status flags */ + break; + default: + goto error; + } + break; + case ClearPortFeature: + if (!wIndex || wIndex > ports) + goto error; + wIndex--; + temp = ehci_readl(ehci, status_reg); + temp &= ~PORT_RWC_BITS; + + /* + * Even if OWNER is set, so the port is owned by the + * companion controller, khubd needs to be able to clear + * the port-change status bits (especially + * USB_PORT_STAT_C_CONNECTION). + */ + + switch (wValue) { + case USB_PORT_FEAT_ENABLE: + ehci_writel(ehci, temp & ~PORT_PE, status_reg); + break; + case USB_PORT_FEAT_C_ENABLE: + ehci_writel(ehci, temp | PORT_PEC, status_reg); + break; + case USB_PORT_FEAT_SUSPEND: + if (temp & PORT_RESET) + goto error; + if (ehci->no_selective_suspend) + break; +#ifdef CONFIG_USB_OTG + if ((hcd->self.otg_port == (wIndex + 1)) + && hcd->self.b_hnp_enable) { + otg_start_hnp(hcd->phy->otg); + break; + } +#endif + if (!(temp & PORT_SUSPEND)) + break; + if ((temp & PORT_PE) == 0) + goto error; + + /* clear phy low-power mode before resume */ + if (ehci->has_hostpc) { + temp1 = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp1 & ~HOSTPC_PHCD, + hostpc_reg); + spin_unlock_irqrestore(&ehci->lock, flags); + msleep(5);/* wait to leave low-power mode */ + spin_lock_irqsave(&ehci->lock, flags); + } + /* resume signaling for 20 msec */ + temp &= ~PORT_WAKE_BITS; + ehci_writel(ehci, temp | PORT_RESUME, status_reg); + ehci->reset_done[wIndex] = jiffies + + msecs_to_jiffies(20); + break; + case USB_PORT_FEAT_C_SUSPEND: + clear_bit(wIndex, &ehci->port_c_suspend); + break; + case USB_PORT_FEAT_POWER: + if (HCS_PPC (ehci->hcs_params)) + ehci_writel(ehci, temp & ~PORT_POWER, + status_reg); + break; + case USB_PORT_FEAT_C_CONNECTION: + ehci_writel(ehci, temp | PORT_CSC, status_reg); + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + ehci_writel(ehci, temp | PORT_OCC, status_reg); + break; + case USB_PORT_FEAT_C_RESET: + /* GetPortStatus clears reset */ + break; + default: + goto error; + } + ehci_readl(ehci, &ehci->regs->command); /* unblock posted write */ + break; + case GetHubDescriptor: + ehci_hub_descriptor (ehci, (struct usb_hub_descriptor *) + buf); + break; + case GetHubStatus: + /* no hub-wide feature/status flags */ + memset (buf, 0, 4); + //cpu_to_le32s ((u32 *) buf); + break; + case GetPortStatus: + if (!wIndex || wIndex > ports) + goto error; + wIndex--; + status = 0; + temp = ehci_readl(ehci, status_reg); + + // wPortChange bits + if (temp & PORT_CSC) + status |= USB_PORT_STAT_C_CONNECTION << 16; + if (temp & PORT_PEC) + status |= USB_PORT_STAT_C_ENABLE << 16; + + if ((temp & PORT_OCC) && !ignore_oc){ + status |= USB_PORT_STAT_C_OVERCURRENT << 16; + + /* + * Hubs should disable port power on over-current. + * However, not all EHCI implementations do this + * automatically, even if they _do_ support per-port + * power switching; they're allowed to just limit the + * current. khubd will turn the power back on. + */ + if (((temp & PORT_OC) || (ehci->need_oc_pp_cycle)) + && HCS_PPC(ehci->hcs_params)) { + ehci_writel(ehci, + temp & ~(PORT_RWC_BITS | PORT_POWER), + status_reg); + temp = ehci_readl(ehci, status_reg); + } + } + + /* whoever resumes must GetPortStatus to complete it!! */ + if (temp & PORT_RESUME) { + + /* Remote Wakeup received? */ + if (!ehci->reset_done[wIndex]) { + /* resume signaling for 20 msec */ + ehci->reset_done[wIndex] = jiffies + + msecs_to_jiffies(20); + usb_hcd_start_port_resume(&hcd->self, wIndex); + set_bit(wIndex, &ehci->resuming_ports); + /* check the port again */ + mod_timer(&ehci_to_hcd(ehci)->rh_timer, + ehci->reset_done[wIndex]); + } + + /* resume completed? */ + else if (time_after_eq(jiffies, + ehci->reset_done[wIndex])) { + clear_bit(wIndex, &ehci->suspended_ports); + set_bit(wIndex, &ehci->port_c_suspend); + ehci->reset_done[wIndex] = 0; + usb_hcd_end_port_resume(&hcd->self, wIndex); + + /* stop resume signaling */ + temp &= ~(PORT_RWC_BITS | + PORT_SUSPEND | PORT_RESUME); + ehci_writel(ehci, temp, status_reg); + clear_bit(wIndex, &ehci->resuming_ports); + retval = handshake(ehci, status_reg, + PORT_RESUME, 0, 2000 /* 2msec */); + if (retval != 0) { + ehci_err(ehci, + "port %d resume error %d\n", + wIndex + 1, retval); + goto error; + } + temp = ehci_readl(ehci, status_reg); + } + } + + /* whoever resets must GetPortStatus to complete it!! */ + if ((temp & PORT_RESET) + && time_after_eq(jiffies, + ehci->reset_done[wIndex])) { + status |= USB_PORT_STAT_C_RESET << 16; + ehci->reset_done [wIndex] = 0; + clear_bit(wIndex, &ehci->resuming_ports); + + /* force reset to complete */ + ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET), + status_reg); + /* REVISIT: some hardware needs 550+ usec to clear + * this bit; seems too long to spin routinely... + */ + retval = handshake(ehci, status_reg, + PORT_RESET, 0, 1000); + if (retval != 0) { + ehci_err (ehci, "port %d reset error %d\n", + wIndex + 1, retval); + goto error; + } + + /* see what we found out */ + temp = check_reset_complete (ehci, wIndex, status_reg, + ehci_readl(ehci, status_reg)); + } + + if (!(temp & (PORT_RESUME|PORT_RESET))) { + ehci->reset_done[wIndex] = 0; + clear_bit(wIndex, &ehci->resuming_ports); + } + + /* transfer dedicated ports to the companion hc */ + if ((temp & PORT_CONNECT) && + test_bit(wIndex, &ehci->companion_ports)) { + temp &= ~PORT_RWC_BITS; + temp |= PORT_OWNER; + ehci_writel(ehci, temp, status_reg); + ehci_dbg(ehci, "port %d --> companion\n", wIndex + 1); + temp = ehci_readl(ehci, status_reg); + } + + /* + * Even if OWNER is set, there's no harm letting khubd + * see the wPortStatus values (they should all be 0 except + * for PORT_POWER anyway). + */ + + if (temp & PORT_CONNECT) { + status |= USB_PORT_STAT_CONNECTION; + // status may be from integrated TT + if (ehci->has_hostpc) { + temp1 = ehci_readl(ehci, hostpc_reg); + status |= ehci_port_speed(ehci, temp1); + } else + status |= ehci_port_speed(ehci, temp); + } + if (temp & PORT_PE) + status |= USB_PORT_STAT_ENABLE; + + /* maybe the port was unsuspended without our knowledge */ + if (temp & (PORT_SUSPEND|PORT_RESUME)) { + status |= USB_PORT_STAT_SUSPEND; + } else if (test_bit(wIndex, &ehci->suspended_ports)) { + clear_bit(wIndex, &ehci->suspended_ports); + clear_bit(wIndex, &ehci->resuming_ports); + ehci->reset_done[wIndex] = 0; + if (temp & PORT_PE) + set_bit(wIndex, &ehci->port_c_suspend); + usb_hcd_end_port_resume(&hcd->self, wIndex); + } + + if (temp & PORT_OC) + status |= USB_PORT_STAT_OVERCURRENT; + if (temp & PORT_RESET) + status |= USB_PORT_STAT_RESET; + if (temp & PORT_POWER) + status |= USB_PORT_STAT_POWER; + if (test_bit(wIndex, &ehci->port_c_suspend)) + status |= USB_PORT_STAT_C_SUSPEND << 16; + +#ifndef VERBOSE_DEBUG + if (status & ~0xffff) /* only if wPortChange is interesting */ +#endif + dbg_port (ehci, "GetStatus", wIndex + 1, temp); + put_unaligned_le32(status, buf); + break; + case SetHubFeature: + switch (wValue) { + case C_HUB_LOCAL_POWER: + case C_HUB_OVER_CURRENT: + /* no hub-wide feature/status flags */ + break; + default: + goto error; + } + break; + case SetPortFeature: + selector = wIndex >> 8; + wIndex &= 0xff; + if (unlikely(ehci->debug)) { + /* If the debug port is active any port + * feature requests should get denied */ + if (wIndex == HCS_DEBUG_PORT(ehci->hcs_params) && + (readl(&ehci->debug->control) & DBGP_ENABLED)) { + retval = -ENODEV; + goto error_exit; + } + } + if (!wIndex || wIndex > ports) + goto error; + wIndex--; + temp = ehci_readl(ehci, status_reg); + if (temp & PORT_OWNER) + break; + + temp &= ~PORT_RWC_BITS; + switch (wValue) { + case USB_PORT_FEAT_SUSPEND: + if (ehci->no_selective_suspend) + break; + if ((temp & PORT_PE) == 0 + || (temp & PORT_RESET) != 0) + goto error; + + /* After above check the port must be connected. + * Set appropriate bit thus could put phy into low power + * mode if we have hostpc feature + */ + temp &= ~PORT_WKCONN_E; + temp |= PORT_WKDISC_E | PORT_WKOC_E; + ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); + if (ehci->has_hostpc) { + spin_unlock_irqrestore(&ehci->lock, flags); + msleep(5);/* 5ms for HCD enter low pwr mode */ + spin_lock_irqsave(&ehci->lock, flags); + temp1 = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp1 | HOSTPC_PHCD, + hostpc_reg); + temp1 = ehci_readl(ehci, hostpc_reg); + ehci_dbg(ehci, "Port%d phy low pwr mode %s\n", + wIndex, (temp1 & HOSTPC_PHCD) ? + "succeeded" : "failed"); + } + set_bit(wIndex, &ehci->suspended_ports); + break; + case USB_PORT_FEAT_POWER: + if (HCS_PPC (ehci->hcs_params)) + ehci_writel(ehci, temp | PORT_POWER, + status_reg); + break; + case USB_PORT_FEAT_RESET: + if (temp & PORT_RESUME) + goto error; + /* line status bits may report this as low speed, + * which can be fine if this root hub has a + * transaction translator built in. + */ + if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT + && !ehci_is_TDI(ehci) + && PORT_USB11 (temp)) { + ehci_dbg (ehci, + "port %d low speed --> companion\n", + wIndex + 1); + temp |= PORT_OWNER; + } else { + ehci_vdbg (ehci, "port %d reset\n", wIndex + 1); + temp |= PORT_RESET; + temp &= ~PORT_PE; + + /* + * caller must wait, then call GetPortStatus + * usb 2.0 spec says 50 ms resets on root + */ + ehci->reset_done [wIndex] = jiffies + + msecs_to_jiffies (50); + } + ehci_writel(ehci, temp, status_reg); + break; + + /* For downstream facing ports (these): one hub port is put + * into test mode according to USB2 11.24.2.13, then the hub + * must be reset (which for root hub now means rmmod+modprobe, + * or else system reboot). See EHCI 2.3.9 and 4.14 for info + * about the EHCI-specific stuff. + */ + case USB_PORT_FEAT_TEST: + if (!selector || selector > 5) + goto error; + spin_unlock_irqrestore(&ehci->lock, flags); + ehci_quiesce(ehci); + spin_lock_irqsave(&ehci->lock, flags); + + /* Put all enabled ports into suspend */ + while (ports--) { + u32 __iomem *sreg = + &ehci->regs->port_status[ports]; + + temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS; + if (temp & PORT_PE) + ehci_writel(ehci, temp | PORT_SUSPEND, + sreg); + } + + spin_unlock_irqrestore(&ehci->lock, flags); + ehci_halt(ehci); + spin_lock_irqsave(&ehci->lock, flags); + + temp = ehci_readl(ehci, status_reg); + temp |= selector << 16; + ehci_writel(ehci, temp, status_reg); + break; + + default: + goto error; + } + ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ + break; + + default: +error: + /* "stall" on error */ + retval = -EPIPE; + } +error_exit: + spin_unlock_irqrestore (&ehci->lock, flags); + return retval; +} + +static void ehci_relinquish_port(struct usb_hcd *hcd, int portnum) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + + if (ehci_is_TDI(ehci)) + return; + set_owner(ehci, --portnum, PORT_OWNER); +} + +static int ehci_port_handed_over(struct usb_hcd *hcd, int portnum) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + u32 __iomem *reg; + + if (ehci_is_TDI(ehci)) + return 0; + reg = &ehci->regs->port_status[portnum - 1]; + return ehci_readl(ehci, reg) & PORT_OWNER; +} diff --git a/addons/ehci-pci/src/3.10.108/ehci-mem.c b/addons/ehci-pci/src/3.10.108/ehci-mem.c new file mode 100644 index 00000000..ef2c3a1e --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/ehci-mem.c @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2001 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of ehci-hcd.c */ + +/*-------------------------------------------------------------------------*/ + +/* + * There's basically three types of memory: + * - data used only by the HCD ... kmalloc is fine + * - async and periodic schedules, shared by HC and HCD ... these + * need to use dma_pool or dma_alloc_coherent + * - driver buffers, read/written by HC ... single shot DMA mapped + * + * There's also "register" data (e.g. PCI or SOC), which is memory mapped. + * No memory seen by this driver is pageable. + */ + +/*-------------------------------------------------------------------------*/ + +/* Allocate the key transfer structures from the previously allocated pool */ + +static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd, + dma_addr_t dma) +{ + memset (qtd, 0, sizeof *qtd); + qtd->qtd_dma = dma; + qtd->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); + qtd->hw_next = EHCI_LIST_END(ehci); + qtd->hw_alt_next = EHCI_LIST_END(ehci); + INIT_LIST_HEAD (&qtd->qtd_list); +} + +static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags) +{ + struct ehci_qtd *qtd; + dma_addr_t dma; + + qtd = dma_pool_alloc (ehci->qtd_pool, flags, &dma); + if (qtd != NULL) { + ehci_qtd_init(ehci, qtd, dma); + } + return qtd; +} + +static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd) +{ + dma_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma); +} + + +static void qh_destroy(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + /* clean qtds first, and know this is not linked */ + if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) { + ehci_dbg (ehci, "unused qh not empty!\n"); + BUG (); + } + if (qh->dummy) + ehci_qtd_free (ehci, qh->dummy); + dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma); + kfree(qh); +} + +static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) +{ + struct ehci_qh *qh; + dma_addr_t dma; + + qh = kzalloc(sizeof *qh, GFP_ATOMIC); + if (!qh) + goto done; + qh->hw = (struct ehci_qh_hw *) + dma_pool_alloc(ehci->qh_pool, flags, &dma); + if (!qh->hw) + goto fail; + memset(qh->hw, 0, sizeof *qh->hw); + qh->qh_dma = dma; + // INIT_LIST_HEAD (&qh->qh_list); + INIT_LIST_HEAD (&qh->qtd_list); + + /* dummy td enables safe urb queuing */ + qh->dummy = ehci_qtd_alloc (ehci, flags); + if (qh->dummy == NULL) { + ehci_dbg (ehci, "no dummy td\n"); + goto fail1; + } +done: + return qh; +fail1: + dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma); +fail: + kfree(qh); + return NULL; +} + +/*-------------------------------------------------------------------------*/ + +/* The queue heads and transfer descriptors are managed from pools tied + * to each of the "per device" structures. + * This is the initialisation and cleanup code. + */ + +static void ehci_mem_cleanup (struct ehci_hcd *ehci) +{ + if (ehci->async) + qh_destroy(ehci, ehci->async); + ehci->async = NULL; + + if (ehci->dummy) + qh_destroy(ehci, ehci->dummy); + ehci->dummy = NULL; + + /* DMA consistent memory and pools */ + if (ehci->qtd_pool) + dma_pool_destroy (ehci->qtd_pool); + ehci->qtd_pool = NULL; + + if (ehci->qh_pool) { + dma_pool_destroy (ehci->qh_pool); + ehci->qh_pool = NULL; + } + + if (ehci->itd_pool) + dma_pool_destroy (ehci->itd_pool); + ehci->itd_pool = NULL; + + if (ehci->sitd_pool) + dma_pool_destroy (ehci->sitd_pool); + ehci->sitd_pool = NULL; + + if (ehci->periodic) + dma_free_coherent (ehci_to_hcd(ehci)->self.controller, + ehci->periodic_size * sizeof (u32), + ehci->periodic, ehci->periodic_dma); + ehci->periodic = NULL; + + /* shadow periodic table */ + kfree(ehci->pshadow); + ehci->pshadow = NULL; +} + +/* remember to add cleanup code (above) if you add anything here */ +static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags) +{ + int i; + + /* QTDs for control/bulk/intr transfers */ + ehci->qtd_pool = dma_pool_create ("ehci_qtd", + ehci_to_hcd(ehci)->self.controller, + sizeof (struct ehci_qtd), + 32 /* byte alignment (for hw parts) */, + 4096 /* can't cross 4K */); + if (!ehci->qtd_pool) { + goto fail; + } + + /* QHs for control/bulk/intr transfers */ + ehci->qh_pool = dma_pool_create ("ehci_qh", + ehci_to_hcd(ehci)->self.controller, + sizeof(struct ehci_qh_hw), + 32 /* byte alignment (for hw parts) */, + 4096 /* can't cross 4K */); + if (!ehci->qh_pool) { + goto fail; + } + ehci->async = ehci_qh_alloc (ehci, flags); + if (!ehci->async) { + goto fail; + } + + /* ITD for high speed ISO transfers */ + ehci->itd_pool = dma_pool_create ("ehci_itd", + ehci_to_hcd(ehci)->self.controller, + sizeof (struct ehci_itd), + 32 /* byte alignment (for hw parts) */, + 4096 /* can't cross 4K */); + if (!ehci->itd_pool) { + goto fail; + } + + /* SITD for full/low speed split ISO transfers */ + ehci->sitd_pool = dma_pool_create ("ehci_sitd", + ehci_to_hcd(ehci)->self.controller, + sizeof (struct ehci_sitd), + 32 /* byte alignment (for hw parts) */, + 4096 /* can't cross 4K */); + if (!ehci->sitd_pool) { + goto fail; + } + + /* Hardware periodic table */ + ehci->periodic = (__le32 *) + dma_alloc_coherent (ehci_to_hcd(ehci)->self.controller, + ehci->periodic_size * sizeof(__le32), + &ehci->periodic_dma, 0); + if (ehci->periodic == NULL) { + goto fail; + } + + if (ehci->use_dummy_qh) { + struct ehci_qh_hw *hw; + ehci->dummy = ehci_qh_alloc(ehci, flags); + if (!ehci->dummy) + goto fail; + + hw = ehci->dummy->hw; + hw->hw_next = EHCI_LIST_END(ehci); + hw->hw_qtd_next = EHCI_LIST_END(ehci); + hw->hw_alt_next = EHCI_LIST_END(ehci); + hw->hw_token &= ~QTD_STS_ACTIVE; + ehci->dummy->hw = hw; + + for (i = 0; i < ehci->periodic_size; i++) + ehci->periodic[i] = ehci->dummy->qh_dma; + } else { + for (i = 0; i < ehci->periodic_size; i++) + ehci->periodic[i] = EHCI_LIST_END(ehci); + } + + /* software shadow of hardware table */ + ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); + if (ehci->pshadow != NULL) + return 0; + +fail: + ehci_dbg (ehci, "couldn't init memory\n"); + ehci_mem_cleanup (ehci); + return -ENOMEM; +} diff --git a/addons/ehci-pci/src/3.10.108/ehci-pci.c b/addons/ehci-pci/src/3.10.108/ehci-pci.c new file mode 100644 index 00000000..fe131565 --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/ehci-pci.c @@ -0,0 +1,464 @@ +/* + * EHCI HCD (Host Controller Driver) PCI Bus Glue. + * + * Copyright (c) 2000-2004 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include + +#include "ehci.h" +#include "pci-quirks.h" + +#define DRIVER_DESC "EHCI PCI platform driver" + +static const char hcd_name[] = "ehci-pci"; + +/* defined here to avoid adding to pci_ids.h for single instance use */ +#define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70 + +/*-------------------------------------------------------------------------*/ +#define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC 0x0939 +static inline bool is_intel_quark_x1000(struct pci_dev *pdev) +{ + return pdev->vendor == PCI_VENDOR_ID_INTEL && + pdev->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC; +} + +/* + * 0x84 is the offset of in/out threshold register, + * and it is the same offset as the register of 'hostpc'. + */ +#define intel_quark_x1000_insnreg01 hostpc + +/* Maximum usable threshold value is 0x7f dwords for both IN and OUT */ +#define INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD 0x007f007f + +/* called after powerup, by probe or system-pm "wakeup" */ +static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) +{ + int retval; + + /* we expect static quirk code to handle the "extended capabilities" + * (currently just BIOS handoff) allowed starting with EHCI 0.96 + */ + + /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */ + retval = pci_set_mwi(pdev); + if (!retval) + ehci_dbg(ehci, "MWI active\n"); + + /* Reset the threshold limit */ + if (is_intel_quark_x1000(pdev)) { + /* + * For the Intel QUARK X1000, raise the I/O threshold to the + * maximum usable value in order to improve performance. + */ + ehci_writel(ehci, INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD, + ehci->regs->intel_quark_x1000_insnreg01); + } + + return 0; +} + +/* called during probe() after chip reset completes */ +static int ehci_pci_setup(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + struct pci_dev *p_smbus; + u8 rev; + u32 temp; + int retval; + + ehci->caps = hcd->regs; + + /* + * ehci_init() causes memory for DMA transfers to be + * allocated. Thus, any vendor-specific workarounds based on + * limiting the type of memory used for DMA transfers must + * happen before ehci_setup() is called. + * + * Most other workarounds can be done either before or after + * init and reset; they are located here too. + */ + switch (pdev->vendor) { + case PCI_VENDOR_ID_TOSHIBA_2: + /* celleb's companion chip */ + if (pdev->device == 0x01b5) { +#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO + ehci->big_endian_mmio = 1; +#else + ehci_warn(ehci, + "unsupported big endian Toshiba quirk\n"); +#endif + } + break; + case PCI_VENDOR_ID_NVIDIA: + /* NVidia reports that certain chips don't handle + * QH, ITD, or SITD addresses above 2GB. (But TD, + * data buffer, and periodic schedule are normal.) + */ + switch (pdev->device) { + case 0x003c: /* MCP04 */ + case 0x005b: /* CK804 */ + case 0x00d8: /* CK8 */ + case 0x00e8: /* CK8S */ + if (pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(31)) < 0) + ehci_warn(ehci, "can't enable NVidia " + "workaround for >2GB RAM\n"); + break; + + /* Some NForce2 chips have problems with selective suspend; + * fixed in newer silicon. + */ + case 0x0068: + if (pdev->revision < 0xa4) + ehci->no_selective_suspend = 1; + break; + } + break; + case PCI_VENDOR_ID_INTEL: + if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB) + hcd->has_tt = 1; + break; + case PCI_VENDOR_ID_TDI: + if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) + hcd->has_tt = 1; + break; + case PCI_VENDOR_ID_AMD: + /* AMD PLL quirk */ + if (usb_amd_find_chipset_info()) + ehci->amd_pll_fix = 1; + /* AMD8111 EHCI doesn't work, according to AMD errata */ + if (pdev->device == 0x7463) { + ehci_info(ehci, "ignoring AMD8111 (errata)\n"); + retval = -EIO; + goto done; + } + + /* + * EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may + * read/write memory space which does not belong to it when + * there is NULL pointer with T-bit set to 1 in the frame list + * table. To avoid the issue, the frame list link pointer + * should always contain a valid pointer to a inactive qh. + */ + if (pdev->device == 0x7808) { + ehci->use_dummy_qh = 1; + ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n"); + } + break; + case PCI_VENDOR_ID_VIA: + if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) { + u8 tmp; + + /* The VT6212 defaults to a 1 usec EHCI sleep time which + * hogs the PCI bus *badly*. Setting bit 5 of 0x4B makes + * that sleep time use the conventional 10 usec. + */ + pci_read_config_byte(pdev, 0x4b, &tmp); + if (tmp & 0x20) + break; + pci_write_config_byte(pdev, 0x4b, tmp | 0x20); + } + break; + case PCI_VENDOR_ID_ATI: + /* AMD PLL quirk */ + if (usb_amd_find_chipset_info()) + ehci->amd_pll_fix = 1; + + /* + * EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may + * read/write memory space which does not belong to it when + * there is NULL pointer with T-bit set to 1 in the frame list + * table. To avoid the issue, the frame list link pointer + * should always contain a valid pointer to a inactive qh. + */ + if (pdev->device == 0x4396) { + ehci->use_dummy_qh = 1; + ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n"); + } + /* SB600 and old version of SB700 have a bug in EHCI controller, + * which causes usb devices lose response in some cases. + */ + if ((pdev->device == 0x4386) || (pdev->device == 0x4396)) { + p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, + PCI_DEVICE_ID_ATI_SBX00_SMBUS, + NULL); + if (!p_smbus) + break; + rev = p_smbus->revision; + if ((pdev->device == 0x4386) || (rev == 0x3a) + || (rev == 0x3b)) { + u8 tmp; + ehci_info(ehci, "applying AMD SB600/SB700 USB " + "freeze workaround\n"); + pci_read_config_byte(pdev, 0x53, &tmp); + pci_write_config_byte(pdev, 0x53, tmp | (1<<3)); + } + pci_dev_put(p_smbus); + } + break; + case PCI_VENDOR_ID_NETMOS: + /* MosChip frame-index-register bug */ + ehci_info(ehci, "applying MosChip frame-index workaround\n"); + ehci->frame_index_bug = 1; + break; + } + + /* optional debug port, normally in the first BAR */ + temp = pci_find_capability(pdev, PCI_CAP_ID_DBG); + if (temp) { + pci_read_config_dword(pdev, temp, &temp); + temp >>= 16; + if (((temp >> 13) & 7) == 1) { + u32 hcs_params = ehci_readl(ehci, + &ehci->caps->hcs_params); + + temp &= 0x1fff; + ehci->debug = hcd->regs + temp; + temp = ehci_readl(ehci, &ehci->debug->control); + ehci_info(ehci, "debug port %d%s\n", + HCS_DEBUG_PORT(hcs_params), + (temp & DBGP_ENABLED) ? " IN USE" : ""); + if (!(temp & DBGP_ENABLED)) + ehci->debug = NULL; + } + } + + retval = ehci_setup(hcd); + if (retval) + return retval; + + /* These workarounds need to be applied after ehci_setup() */ + switch (pdev->vendor) { + case PCI_VENDOR_ID_NEC: + ehci->need_io_watchdog = 0; + break; + case PCI_VENDOR_ID_INTEL: + ehci->need_io_watchdog = 0; + break; + case PCI_VENDOR_ID_NVIDIA: + switch (pdev->device) { + /* MCP89 chips on the MacBookAir3,1 give EPROTO when + * fetching device descriptors unless LPM is disabled. + * There are also intermittent problems enumerating + * devices with PPCD enabled. + */ + case 0x0d9d: + ehci_info(ehci, "disable ppcd for nvidia mcp89\n"); + ehci->has_ppcd = 0; + ehci->command &= ~CMD_PPCEE; + break; + } + break; + } + + /* at least the Genesys GL880S needs fixup here */ + temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params); + temp &= 0x0f; + if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) { + ehci_dbg(ehci, "bogus port configuration: " + "cc=%d x pcc=%d < ports=%d\n", + HCS_N_CC(ehci->hcs_params), + HCS_N_PCC(ehci->hcs_params), + HCS_N_PORTS(ehci->hcs_params)); + + switch (pdev->vendor) { + case 0x17a0: /* GENESYS */ + /* GL880S: should be PORTS=2 */ + temp |= (ehci->hcs_params & ~0xf); + ehci->hcs_params = temp; + break; + case PCI_VENDOR_ID_NVIDIA: + /* NF4: should be PCC=10 */ + break; + } + } + + /* Serial Bus Release Number is at PCI 0x60 offset */ + if (pdev->vendor == PCI_VENDOR_ID_STMICRO + && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST) + ; /* ConneXT has no sbrn register */ + else + pci_read_config_byte(pdev, 0x60, &ehci->sbrn); + + /* Keep this around for a while just in case some EHCI + * implementation uses legacy PCI PM support. This test + * can be removed on 17 Dec 2009 if the dev_warn() hasn't + * been triggered by then. + */ + if (!device_can_wakeup(&pdev->dev)) { + u16 port_wake; + + pci_read_config_word(pdev, 0x62, &port_wake); + if (port_wake & 0x0001) { + dev_warn(&pdev->dev, "Enabling legacy PCI PM\n"); + device_set_wakeup_capable(&pdev->dev, 1); + } + } + +#ifdef CONFIG_PM_RUNTIME + if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev)) + ehci_warn(ehci, "selective suspend/wakeup unavailable\n"); +#endif + + retval = ehci_pci_reinit(ehci, pdev); +done: + return retval; +} + +/*-------------------------------------------------------------------------*/ + +#ifdef CONFIG_PM + +/* suspend/resume, section 4.3 */ + +/* These routines rely on the PCI bus glue + * to handle powerdown and wakeup, and currently also on + * transceivers that don't need any software attention to set up + * the right sort of wakeup. + * Also they depend on separate root hub suspend/resume. + */ + +static bool usb_is_intel_switchable_ehci(struct pci_dev *pdev) +{ + return pdev->class == PCI_CLASS_SERIAL_USB_EHCI && + pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == 0x1E26 || + pdev->device == 0x8C2D || + pdev->device == 0x8C26 || + pdev->device == 0x9C26); +} + +static void ehci_enable_xhci_companion(void) +{ + struct pci_dev *companion = NULL; + + /* The xHCI and EHCI controllers are not on the same PCI slot */ + for_each_pci_dev(companion) { + if (!usb_is_intel_switchable_xhci(companion)) + continue; + usb_enable_xhci_ports(companion); + return; + } +} + +static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + + /* The BIOS on systems with the Intel Panther Point chipset may or may + * not support xHCI natively. That means that during system resume, it + * may switch the ports back to EHCI so that users can use their + * keyboard to select a kernel from GRUB after resume from hibernate. + * + * The BIOS is supposed to remember whether the OS had xHCI ports + * enabled before resume, and switch the ports back to xHCI when the + * BIOS/OS semaphore is written, but we all know we can't trust BIOS + * writers. + * + * Unconditionally switch the ports back to xHCI after a system resume. + * We can't tell whether the EHCI or xHCI controller will be resumed + * first, so we have to do the port switchover in both drivers. Writing + * a '1' to the port switchover registers should have no effect if the + * port was already switched over. + */ + if (usb_is_intel_switchable_ehci(pdev)) + ehci_enable_xhci_companion(); + + if (ehci_resume(hcd, hibernated) != 0) + (void) ehci_pci_reinit(ehci, pdev); + return 0; +} + +#else + +#define ehci_suspend NULL +#define ehci_pci_resume NULL +#endif /* CONFIG_PM */ + +static struct hc_driver __read_mostly ehci_pci_hc_driver; + +static const struct ehci_driver_overrides pci_overrides __initconst = { + .reset = ehci_pci_setup, +}; + +/*-------------------------------------------------------------------------*/ + +/* PCI driver selection metadata; PCI hotplugging uses this */ +static const struct pci_device_id pci_ids [] = { { + /* handle any USB 2.0 EHCI controller */ + PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0), + .driver_data = (unsigned long) &ehci_pci_hc_driver, + }, { + PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_HOST), + .driver_data = (unsigned long) &ehci_pci_hc_driver, + }, + { /* end: all zeroes */ } +}; +MODULE_DEVICE_TABLE(pci, pci_ids); + +/* pci driver glue; this is a "new style" PCI driver module */ +static struct pci_driver ehci_pci_driver = { + .name = (char *) hcd_name, + .id_table = pci_ids, + + .probe = usb_hcd_pci_probe, + .remove = usb_hcd_pci_remove, + .shutdown = usb_hcd_pci_shutdown, + +#ifdef CONFIG_PM + .driver = { + .pm = &usb_hcd_pci_pm_ops + }, +#endif +}; + +static int __init ehci_pci_init(void) +{ + if (usb_disabled()) + return -ENODEV; + + pr_info("%s: " DRIVER_DESC "\n", hcd_name); + + ehci_init_driver(&ehci_pci_hc_driver, &pci_overrides); + + /* Entries for the PCI suspend/resume callbacks are special */ + ehci_pci_hc_driver.pci_suspend = ehci_suspend; + ehci_pci_hc_driver.pci_resume = ehci_pci_resume; + + return pci_register_driver(&ehci_pci_driver); +} +module_init(ehci_pci_init); + +static void __exit ehci_pci_cleanup(void) +{ + pci_unregister_driver(&ehci_pci_driver); +} +module_exit(ehci_pci_cleanup); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("David Brownell"); +MODULE_AUTHOR("Alan Stern"); +MODULE_LICENSE("GPL"); diff --git a/addons/ehci-pci/src/3.10.108/ehci-q.c b/addons/ehci-pci/src/3.10.108/ehci-q.c new file mode 100644 index 00000000..d34b399b --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/ehci-q.c @@ -0,0 +1,1368 @@ +/* + * Copyright (C) 2001-2004 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of ehci-hcd.c */ + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. + * + * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" + * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned + * buffers needed for the larger number). We use one QH per endpoint, queue + * multiple urbs (all three types) per endpoint. URBs may need several qtds. + * + * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with + * interrupts) needs careful scheduling. Performance improvements can be + * an ongoing challenge. That's in "ehci-sched.c". + * + * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, + * or otherwise through transaction translators (TTs) in USB 2.0 hubs using + * (b) special fields in qh entries or (c) split iso entries. TTs will + * buffer low/full speed data so the host collects it at high speed. + */ + +/*-------------------------------------------------------------------------*/ + +/* fill a qtd, returning how much of the buffer we were able to queue up */ + +static int +qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, + size_t len, int token, int maxpacket) +{ + int i, count; + u64 addr = buf; + + /* one buffer entry per 4K ... first might be short or unaligned */ + qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr); + qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32)); + count = 0x1000 - (buf & 0x0fff); /* rest of that page */ + if (likely (len < count)) /* ... iff needed */ + count = len; + else { + buf += 0x1000; + buf &= ~0x0fff; + + /* per-qtd limit: from 16K to 20K (best alignment) */ + for (i = 1; count < len && i < 5; i++) { + addr = buf; + qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr); + qtd->hw_buf_hi[i] = cpu_to_hc32(ehci, + (u32)(addr >> 32)); + buf += 0x1000; + if ((count + 0x1000) < len) + count += 0x1000; + else + count = len; + } + + /* short packets may only terminate transfers */ + if (count != len) + count -= (count % maxpacket); + } + qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token); + qtd->length = count; + + return count; +} + +/*-------------------------------------------------------------------------*/ + +static inline void +qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) +{ + struct ehci_qh_hw *hw = qh->hw; + + /* writes to an active overlay are unsafe */ + WARN_ON(qh->qh_state != QH_STATE_IDLE); + + hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); + hw->hw_alt_next = EHCI_LIST_END(ehci); + + /* Except for control endpoints, we make hardware maintain data + * toggle (like OHCI) ... here (re)initialize the toggle in the QH, + * and set the pseudo-toggle in udev. Only usb_clear_halt() will + * ever clear it. + */ + if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) { + unsigned is_out, epnum; + + is_out = qh->is_out; + epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f; + if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) { + hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); + usb_settoggle (qh->dev, epnum, is_out, 1); + } + } + + hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); +} + +/* if it weren't for a common silicon quirk (writing the dummy into the qh + * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault + * recovery (including urb dequeue) would need software changes to a QH... + */ +static void +qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + struct ehci_qtd *qtd; + + qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list); + + /* + * first qtd may already be partially processed. + * If we come here during unlink, the QH overlay region + * might have reference to the just unlinked qtd. The + * qtd is updated in qh_completions(). Update the QH + * overlay here. + */ + if (qh->hw->hw_token & ACTIVE_BIT(ehci)) + qh->hw->hw_qtd_next = qtd->hw_next; + else + qh_update(ehci, qh, qtd); +} + +/*-------------------------------------------------------------------------*/ + +static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh); + +static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct ehci_qh *qh = ep->hcpriv; + unsigned long flags; + + spin_lock_irqsave(&ehci->lock, flags); + qh->clearing_tt = 0; + if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) + && ehci->rh_state == EHCI_RH_RUNNING) + qh_link_async(ehci, qh); + spin_unlock_irqrestore(&ehci->lock, flags); +} + +static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, + struct urb *urb, u32 token) +{ + + /* If an async split transaction gets an error or is unlinked, + * the TT buffer may be left in an indeterminate state. We + * have to clear the TT buffer. + * + * Note: this routine is never called for Isochronous transfers. + */ + if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { +#ifdef DEBUG + struct usb_device *tt = urb->dev->tt->hub; + dev_dbg(&tt->dev, + "clear tt buffer port %d, a%d ep%d t%08x\n", + urb->dev->ttport, urb->dev->devnum, + usb_pipeendpoint(urb->pipe), token); +#endif /* DEBUG */ + if (!ehci_is_TDI(ehci) + || urb->dev->tt->hub != + ehci_to_hcd(ehci)->self.root_hub) { + if (usb_hub_clear_tt_buffer(urb) == 0) + qh->clearing_tt = 1; + } else { + + /* REVISIT ARC-derived cores don't clear the root + * hub TT buffer in this way... + */ + } + } +} + +static int qtd_copy_status ( + struct ehci_hcd *ehci, + struct urb *urb, + size_t length, + u32 token +) +{ + int status = -EINPROGRESS; + + /* count IN/OUT bytes, not SETUP (even short packets) */ + if (likely (QTD_PID (token) != 2)) + urb->actual_length += length - QTD_LENGTH (token); + + /* don't modify error codes */ + if (unlikely(urb->unlinked)) + return status; + + /* force cleanup after short read; not always an error */ + if (unlikely (IS_SHORT_READ (token))) + status = -EREMOTEIO; + + /* serious "can't proceed" faults reported by the hardware */ + if (token & QTD_STS_HALT) { + if (token & QTD_STS_BABBLE) { + /* FIXME "must" disable babbling device's port too */ + status = -EOVERFLOW; + /* CERR nonzero + halt --> stall */ + } else if (QTD_CERR(token)) { + status = -EPIPE; + + /* In theory, more than one of the following bits can be set + * since they are sticky and the transaction is retried. + * Which to test first is rather arbitrary. + */ + } else if (token & QTD_STS_MMF) { + /* fs/ls interrupt xfer missed the complete-split */ + status = -EPROTO; + } else if (token & QTD_STS_DBE) { + status = (QTD_PID (token) == 1) /* IN ? */ + ? -ENOSR /* hc couldn't read data */ + : -ECOMM; /* hc couldn't write data */ + } else if (token & QTD_STS_XACT) { + /* timeout, bad CRC, wrong PID, etc */ + ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n", + urb->dev->devpath, + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out"); + status = -EPROTO; + } else { /* unknown */ + status = -EPROTO; + } + + ehci_vdbg (ehci, + "dev%d ep%d%s qtd token %08x --> status %d\n", + usb_pipedevice (urb->pipe), + usb_pipeendpoint (urb->pipe), + usb_pipein (urb->pipe) ? "in" : "out", + token, status); + } + + return status; +} + +static void +ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) +__releases(ehci->lock) +__acquires(ehci->lock) +{ + if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { + /* ... update hc-wide periodic stats */ + ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; + } + + if (unlikely(urb->unlinked)) { + COUNT(ehci->stats.unlink); + } else { + /* report non-error and short read status as zero */ + if (status == -EINPROGRESS || status == -EREMOTEIO) + status = 0; + COUNT(ehci->stats.complete); + } + +#ifdef EHCI_URB_TRACE + ehci_dbg (ehci, + "%s %s urb %p ep%d%s status %d len %d/%d\n", + __func__, urb->dev->devpath, urb, + usb_pipeendpoint (urb->pipe), + usb_pipein (urb->pipe) ? "in" : "out", + status, + urb->actual_length, urb->transfer_buffer_length); +#endif + + /* complete() can reenter this HCD */ + usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); + spin_unlock (&ehci->lock); + usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); + spin_lock (&ehci->lock); +} + +static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); + +/* + * Process and free completed qtds for a qh, returning URBs to drivers. + * Chases up to qh->hw_current. Returns nonzero if the caller should + * unlink qh. + */ +static unsigned +qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + struct ehci_qtd *last, *end = qh->dummy; + struct list_head *entry, *tmp; + int last_status; + int stopped; + u8 state; + struct ehci_qh_hw *hw = qh->hw; + + /* completions (or tasks on other cpus) must never clobber HALT + * till we've gone through and cleaned everything up, even when + * they add urbs to this qh's queue or mark them for unlinking. + * + * NOTE: unlinking expects to be done in queue order. + * + * It's a bug for qh->qh_state to be anything other than + * QH_STATE_IDLE, unless our caller is scan_async() or + * scan_intr(). + */ + state = qh->qh_state; + qh->qh_state = QH_STATE_COMPLETING; + stopped = (state == QH_STATE_IDLE); + + rescan: + last = NULL; + last_status = -EINPROGRESS; + qh->dequeue_during_giveback = 0; + + /* remove de-activated QTDs from front of queue. + * after faults (including short reads), cleanup this urb + * then let the queue advance. + * if queue is stopped, handles unlinks. + */ + list_for_each_safe (entry, tmp, &qh->qtd_list) { + struct ehci_qtd *qtd; + struct urb *urb; + u32 token = 0; + + qtd = list_entry (entry, struct ehci_qtd, qtd_list); + urb = qtd->urb; + + /* clean up any state from previous QTD ...*/ + if (last) { + if (likely (last->urb != urb)) { + ehci_urb_done(ehci, last->urb, last_status); + last_status = -EINPROGRESS; + } + ehci_qtd_free (ehci, last); + last = NULL; + } + + /* ignore urbs submitted during completions we reported */ + if (qtd == end) + break; + + /* hardware copies qtd out of qh overlay */ + rmb (); + token = hc32_to_cpu(ehci, qtd->hw_token); + + /* always clean up qtds the hc de-activated */ + retry_xacterr: + if ((token & QTD_STS_ACTIVE) == 0) { + + /* Report Data Buffer Error: non-fatal but useful */ + if (token & QTD_STS_DBE) + ehci_dbg(ehci, + "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n", + urb, + usb_endpoint_num(&urb->ep->desc), + usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out", + urb->transfer_buffer_length, + qtd, + qh); + + /* on STALL, error, and short reads this urb must + * complete and all its qtds must be recycled. + */ + if ((token & QTD_STS_HALT) != 0) { + + /* retry transaction errors until we + * reach the software xacterr limit + */ + if ((token & QTD_STS_XACT) && + QTD_CERR(token) == 0 && + ++qh->xacterrs < QH_XACTERR_MAX && + !urb->unlinked) { + ehci_dbg(ehci, + "detected XactErr len %zu/%zu retry %d\n", + qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); + + /* reset the token in the qtd and the + * qh overlay (which still contains + * the qtd) so that we pick up from + * where we left off + */ + token &= ~QTD_STS_HALT; + token |= QTD_STS_ACTIVE | + (EHCI_TUNE_CERR << 10); + qtd->hw_token = cpu_to_hc32(ehci, + token); + wmb(); + hw->hw_token = cpu_to_hc32(ehci, + token); + goto retry_xacterr; + } + stopped = 1; + + /* magic dummy for some short reads; qh won't advance. + * that silicon quirk can kick in with this dummy too. + * + * other short reads won't stop the queue, including + * control transfers (status stage handles that) or + * most other single-qtd reads ... the queue stops if + * URB_SHORT_NOT_OK was set so the driver submitting + * the urbs could clean it up. + */ + } else if (IS_SHORT_READ (token) + && !(qtd->hw_alt_next + & EHCI_LIST_END(ehci))) { + stopped = 1; + } + + /* stop scanning when we reach qtds the hc is using */ + } else if (likely (!stopped + && ehci->rh_state >= EHCI_RH_RUNNING)) { + break; + + /* scan the whole queue for unlinks whenever it stops */ + } else { + stopped = 1; + + /* cancel everything if we halt, suspend, etc */ + if (ehci->rh_state < EHCI_RH_RUNNING) + last_status = -ESHUTDOWN; + + /* this qtd is active; skip it unless a previous qtd + * for its urb faulted, or its urb was canceled. + */ + else if (last_status == -EINPROGRESS && !urb->unlinked) + continue; + + /* + * If this was the active qtd when the qh was unlinked + * and the overlay's token is active, then the overlay + * hasn't been written back to the qtd yet so use its + * token instead of the qtd's. After the qtd is + * processed and removed, the overlay won't be valid + * any more. + */ + if (state == QH_STATE_IDLE && + qh->qtd_list.next == &qtd->qtd_list && + (hw->hw_token & ACTIVE_BIT(ehci))) { + token = hc32_to_cpu(ehci, hw->hw_token); + hw->hw_token &= ~ACTIVE_BIT(ehci); + + /* An unlink may leave an incomplete + * async transaction in the TT buffer. + * We have to clear it. + */ + ehci_clear_tt_buffer(ehci, qh, urb, token); + } + } + + /* unless we already know the urb's status, collect qtd status + * and update count of bytes transferred. in common short read + * cases with only one data qtd (including control transfers), + * queue processing won't halt. but with two or more qtds (for + * example, with a 32 KB transfer), when the first qtd gets a + * short read the second must be removed by hand. + */ + if (last_status == -EINPROGRESS) { + last_status = qtd_copy_status(ehci, urb, + qtd->length, token); + if (last_status == -EREMOTEIO + && (qtd->hw_alt_next + & EHCI_LIST_END(ehci))) + last_status = -EINPROGRESS; + + /* As part of low/full-speed endpoint-halt processing + * we must clear the TT buffer (11.17.5). + */ + if (unlikely(last_status != -EINPROGRESS && + last_status != -EREMOTEIO)) { + /* The TT's in some hubs malfunction when they + * receive this request following a STALL (they + * stop sending isochronous packets). Since a + * STALL can't leave the TT buffer in a busy + * state (if you believe Figures 11-48 - 11-51 + * in the USB 2.0 spec), we won't clear the TT + * buffer in this case. Strictly speaking this + * is a violation of the spec. + */ + if (last_status != -EPIPE) + ehci_clear_tt_buffer(ehci, qh, urb, + token); + } + } + + /* if we're removing something not at the queue head, + * patch the hardware queue pointer. + */ + if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { + last = list_entry (qtd->qtd_list.prev, + struct ehci_qtd, qtd_list); + last->hw_next = qtd->hw_next; + } + + /* remove qtd; it's recycled after possible urb completion */ + list_del (&qtd->qtd_list); + last = qtd; + + /* reinit the xacterr counter for the next qtd */ + qh->xacterrs = 0; + } + + /* last urb's completion might still need calling */ + if (likely (last != NULL)) { + ehci_urb_done(ehci, last->urb, last_status); + ehci_qtd_free (ehci, last); + } + + /* Do we need to rescan for URBs dequeued during a giveback? */ + if (unlikely(qh->dequeue_during_giveback)) { + /* If the QH is already unlinked, do the rescan now. */ + if (state == QH_STATE_IDLE) + goto rescan; + + /* Otherwise the caller must unlink the QH. */ + } + + /* restore original state; caller must unlink or relink */ + qh->qh_state = state; + + /* be sure the hardware's done with the qh before refreshing + * it after fault cleanup, or recovering from silicon wrongly + * overlaying the dummy qtd (which reduces DMA chatter). + * + * We won't refresh a QH that's linked (after the HC + * stopped the queue). That avoids a race: + * - HC reads first part of QH; + * - CPU updates that first part and the token; + * - HC reads rest of that QH, including token + * Result: HC gets an inconsistent image, and then + * DMAs to/from the wrong memory (corrupting it). + * + * That should be rare for interrupt transfers, + * except maybe high bandwidth ... + */ + if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) + qh->exception = 1; + + /* Let the caller know if the QH needs to be unlinked. */ + return qh->exception; +} + +/*-------------------------------------------------------------------------*/ + +// high bandwidth multiplier, as encoded in highspeed endpoint descriptors +#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) +// ... and packet size, for any kind of endpoint descriptor +#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) + +/* + * reverse of qh_urb_transaction: free a list of TDs. + * used for cleanup after errors, before HC sees an URB's TDs. + */ +static void qtd_list_free ( + struct ehci_hcd *ehci, + struct urb *urb, + struct list_head *qtd_list +) { + struct list_head *entry, *temp; + + list_for_each_safe (entry, temp, qtd_list) { + struct ehci_qtd *qtd; + + qtd = list_entry (entry, struct ehci_qtd, qtd_list); + list_del (&qtd->qtd_list); + ehci_qtd_free (ehci, qtd); + } +} + +/* + * create a list of filled qtds for this URB; won't link into qh. + */ +static struct list_head * +qh_urb_transaction ( + struct ehci_hcd *ehci, + struct urb *urb, + struct list_head *head, + gfp_t flags +) { + struct ehci_qtd *qtd, *qtd_prev; + dma_addr_t buf; + int len, this_sg_len, maxpacket; + int is_input; + u32 token; + int i; + struct scatterlist *sg; + + /* + * URBs map to sequences of QTDs: one logical transaction + */ + qtd = ehci_qtd_alloc (ehci, flags); + if (unlikely (!qtd)) + return NULL; + list_add_tail (&qtd->qtd_list, head); + qtd->urb = urb; + + token = QTD_STS_ACTIVE; + token |= (EHCI_TUNE_CERR << 10); + /* for split transactions, SplitXState initialized to zero */ + + len = urb->transfer_buffer_length; + is_input = usb_pipein (urb->pipe); + if (usb_pipecontrol (urb->pipe)) { + /* SETUP pid */ + qtd_fill(ehci, qtd, urb->setup_dma, + sizeof (struct usb_ctrlrequest), + token | (2 /* "setup" */ << 8), 8); + + /* ... and always at least one more pid */ + token ^= QTD_TOGGLE; + qtd_prev = qtd; + qtd = ehci_qtd_alloc (ehci, flags); + if (unlikely (!qtd)) + goto cleanup; + qtd->urb = urb; + qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); + list_add_tail (&qtd->qtd_list, head); + + /* for zero length DATA stages, STATUS is always IN */ + if (len == 0) + token |= (1 /* "in" */ << 8); + } + + /* + * data transfer stage: buffer setup + */ + i = urb->num_mapped_sgs; + if (len > 0 && i > 0) { + sg = urb->sg; + buf = sg_dma_address(sg); + + /* urb->transfer_buffer_length may be smaller than the + * size of the scatterlist (or vice versa) + */ + this_sg_len = min_t(int, sg_dma_len(sg), len); + } else { + sg = NULL; + buf = urb->transfer_dma; + this_sg_len = len; + } + + if (is_input) + token |= (1 /* "in" */ << 8); + /* else it's already initted to "out" pid (0 << 8) */ + + maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); + + /* + * buffer gets wrapped in one or more qtds; + * last one may be "short" (including zero len) + * and may serve as a control status ack + */ + for (;;) { + int this_qtd_len; + + this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token, + maxpacket); + this_sg_len -= this_qtd_len; + len -= this_qtd_len; + buf += this_qtd_len; + + /* + * short reads advance to a "magic" dummy instead of the next + * qtd ... that forces the queue to stop, for manual cleanup. + * (this will usually be overridden later.) + */ + if (is_input) + qtd->hw_alt_next = ehci->async->hw->hw_alt_next; + + /* qh makes control packets use qtd toggle; maybe switch it */ + if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) + token ^= QTD_TOGGLE; + + if (likely(this_sg_len <= 0)) { + if (--i <= 0 || len <= 0) + break; + sg = sg_next(sg); + buf = sg_dma_address(sg); + this_sg_len = min_t(int, sg_dma_len(sg), len); + } + + qtd_prev = qtd; + qtd = ehci_qtd_alloc (ehci, flags); + if (unlikely (!qtd)) + goto cleanup; + qtd->urb = urb; + qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); + list_add_tail (&qtd->qtd_list, head); + } + + /* + * unless the caller requires manual cleanup after short reads, + * have the alt_next mechanism keep the queue running after the + * last data qtd (the only one, for control and most other cases). + */ + if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 + || usb_pipecontrol (urb->pipe))) + qtd->hw_alt_next = EHCI_LIST_END(ehci); + + /* + * control requests may need a terminating data "status" ack; + * other OUT ones may need a terminating short packet + * (zero length). + */ + if (likely (urb->transfer_buffer_length != 0)) { + int one_more = 0; + + if (usb_pipecontrol (urb->pipe)) { + one_more = 1; + token ^= 0x0100; /* "in" <--> "out" */ + token |= QTD_TOGGLE; /* force DATA1 */ + } else if (usb_pipeout(urb->pipe) + && (urb->transfer_flags & URB_ZERO_PACKET) + && !(urb->transfer_buffer_length % maxpacket)) { + one_more = 1; + } + if (one_more) { + qtd_prev = qtd; + qtd = ehci_qtd_alloc (ehci, flags); + if (unlikely (!qtd)) + goto cleanup; + qtd->urb = urb; + qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); + list_add_tail (&qtd->qtd_list, head); + + /* never any data in such packets */ + qtd_fill(ehci, qtd, 0, 0, token, 0); + } + } + + /* by default, enable interrupt on urb completion */ + if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) + qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); + return head; + +cleanup: + qtd_list_free (ehci, urb, head); + return NULL; +} + +/*-------------------------------------------------------------------------*/ + +// Would be best to create all qh's from config descriptors, +// when each interface/altsetting is established. Unlink +// any previous qh and cancel its urbs first; endpoints are +// implicitly reset then (data toggle too). +// That'd mean updating how usbcore talks to HCDs. (2.7?) + + +/* + * Each QH holds a qtd list; a QH is used for everything except iso. + * + * For interrupt urbs, the scheduler must set the microframe scheduling + * mask(s) each time the QH gets scheduled. For highspeed, that's + * just one microframe in the s-mask. For split interrupt transactions + * there are additional complications: c-mask, maybe FSTNs. + */ +static struct ehci_qh * +qh_make ( + struct ehci_hcd *ehci, + struct urb *urb, + gfp_t flags +) { + struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); + u32 info1 = 0, info2 = 0; + int is_input, type; + int maxp = 0; + struct usb_tt *tt = urb->dev->tt; + struct ehci_qh_hw *hw; + + if (!qh) + return qh; + + /* + * init endpoint/device data for this QH + */ + info1 |= usb_pipeendpoint (urb->pipe) << 8; + info1 |= usb_pipedevice (urb->pipe) << 0; + + is_input = usb_pipein (urb->pipe); + type = usb_pipetype (urb->pipe); + maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input); + + /* 1024 byte maxpacket is a hardware ceiling. High bandwidth + * acts like up to 3KB, but is built from smaller packets. + */ + if (max_packet(maxp) > 1024) { + ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp)); + goto done; + } + + /* Compute interrupt scheduling parameters just once, and save. + * - allowing for high bandwidth, how many nsec/uframe are used? + * - split transactions need a second CSPLIT uframe; same question + * - splits also need a schedule gap (for full/low speed I/O) + * - qh has a polling interval + * + * For control/bulk requests, the HC or TT handles these. + */ + if (type == PIPE_INTERRUPT) { + qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, + is_input, 0, + hb_mult(maxp) * max_packet(maxp))); + qh->start = NO_FRAME; + + if (urb->dev->speed == USB_SPEED_HIGH) { + qh->c_usecs = 0; + qh->gap_uf = 0; + + qh->period = urb->interval >> 3; + if (qh->period == 0 && urb->interval != 1) { + /* NOTE interval 2 or 4 uframes could work. + * But interval 1 scheduling is simpler, and + * includes high bandwidth. + */ + urb->interval = 1; + } else if (qh->period > ehci->periodic_size) { + qh->period = ehci->periodic_size; + urb->interval = qh->period << 3; + } + } else { + int think_time; + + /* gap is f(FS/LS transfer times) */ + qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, + is_input, 0, maxp) / (125 * 1000); + + /* FIXME this just approximates SPLIT/CSPLIT times */ + if (is_input) { // SPLIT, gap, CSPLIT+DATA + qh->c_usecs = qh->usecs + HS_USECS (0); + qh->usecs = HS_USECS (1); + } else { // SPLIT+DATA, gap, CSPLIT + qh->usecs += HS_USECS (1); + qh->c_usecs = HS_USECS (0); + } + + think_time = tt ? tt->think_time : 0; + qh->tt_usecs = NS_TO_US (think_time + + usb_calc_bus_time (urb->dev->speed, + is_input, 0, max_packet (maxp))); + qh->period = urb->interval; + if (qh->period > ehci->periodic_size) { + qh->period = ehci->periodic_size; + urb->interval = qh->period; + } + } + } + + /* support for tt scheduling, and access to toggles */ + qh->dev = urb->dev; + + /* using TT? */ + switch (urb->dev->speed) { + case USB_SPEED_LOW: + info1 |= QH_LOW_SPEED; + /* FALL THROUGH */ + + case USB_SPEED_FULL: + /* EPS 0 means "full" */ + if (type != PIPE_INTERRUPT) + info1 |= (EHCI_TUNE_RL_TT << 28); + if (type == PIPE_CONTROL) { + info1 |= QH_CONTROL_EP; /* for TT */ + info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ + } + info1 |= maxp << 16; + + info2 |= (EHCI_TUNE_MULT_TT << 30); + + /* Some Freescale processors have an erratum in which the + * port number in the queue head was 0..N-1 instead of 1..N. + */ + if (ehci_has_fsl_portno_bug(ehci)) + info2 |= (urb->dev->ttport-1) << 23; + else + info2 |= urb->dev->ttport << 23; + + /* set the address of the TT; for TDI's integrated + * root hub tt, leave it zeroed. + */ + if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub) + info2 |= tt->hub->devnum << 16; + + /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ + + break; + + case USB_SPEED_HIGH: /* no TT involved */ + info1 |= QH_HIGH_SPEED; + if (type == PIPE_CONTROL) { + info1 |= (EHCI_TUNE_RL_HS << 28); + info1 |= 64 << 16; /* usb2 fixed maxpacket */ + info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ + info2 |= (EHCI_TUNE_MULT_HS << 30); + } else if (type == PIPE_BULK) { + info1 |= (EHCI_TUNE_RL_HS << 28); + /* The USB spec says that high speed bulk endpoints + * always use 512 byte maxpacket. But some device + * vendors decided to ignore that, and MSFT is happy + * to help them do so. So now people expect to use + * such nonconformant devices with Linux too; sigh. + */ + info1 |= max_packet(maxp) << 16; + info2 |= (EHCI_TUNE_MULT_HS << 30); + } else { /* PIPE_INTERRUPT */ + info1 |= max_packet (maxp) << 16; + info2 |= hb_mult (maxp) << 30; + } + break; + default: + ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev, + urb->dev->speed); +done: + qh_destroy(ehci, qh); + return NULL; + } + + /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ + + /* init as live, toggle clear */ + qh->qh_state = QH_STATE_IDLE; + hw = qh->hw; + hw->hw_info1 = cpu_to_hc32(ehci, info1); + hw->hw_info2 = cpu_to_hc32(ehci, info2); + qh->is_out = !is_input; + usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); + return qh; +} + +/*-------------------------------------------------------------------------*/ + +static void enable_async(struct ehci_hcd *ehci) +{ + if (ehci->async_count++) + return; + + /* Stop waiting to turn off the async schedule */ + ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC); + + /* Don't start the schedule until ASS is 0 */ + ehci_poll_ASS(ehci); + turn_on_io_watchdog(ehci); +} + +static void disable_async(struct ehci_hcd *ehci) +{ + if (--ehci->async_count) + return; + + /* The async schedule and unlink lists are supposed to be empty */ + WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) || + !list_empty(&ehci->async_idle)); + + /* Don't turn off the schedule until ASS is 1 */ + ehci_poll_ASS(ehci); +} + +/* move qh (and its qtds) onto async queue; maybe enable queue. */ + +static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + __hc32 dma = QH_NEXT(ehci, qh->qh_dma); + struct ehci_qh *head; + + /* Don't link a QH if there's a Clear-TT-Buffer pending */ + if (unlikely(qh->clearing_tt)) + return; + + WARN_ON(qh->qh_state != QH_STATE_IDLE); + + /* clear halt and/or toggle; and maybe recover from silicon quirk */ + qh_refresh(ehci, qh); + + /* splice right after start */ + head = ehci->async; + qh->qh_next = head->qh_next; + qh->hw->hw_next = head->hw->hw_next; + wmb (); + + head->qh_next.qh = qh; + head->hw->hw_next = dma; + + qh->qh_state = QH_STATE_LINKED; + qh->xacterrs = 0; + qh->exception = 0; + /* qtd completions reported later by interrupt */ + + enable_async(ehci); +} + +/*-------------------------------------------------------------------------*/ + +/* + * For control/bulk/interrupt, return QH with these TDs appended. + * Allocates and initializes the QH if necessary. + * Returns null if it can't allocate a QH it needs to. + * If the QH has TDs (urbs) already, that's great. + */ +static struct ehci_qh *qh_append_tds ( + struct ehci_hcd *ehci, + struct urb *urb, + struct list_head *qtd_list, + int epnum, + void **ptr +) +{ + struct ehci_qh *qh = NULL; + __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f); + + qh = (struct ehci_qh *) *ptr; + if (unlikely (qh == NULL)) { + /* can't sleep here, we have ehci->lock... */ + qh = qh_make (ehci, urb, GFP_ATOMIC); + *ptr = qh; + } + if (likely (qh != NULL)) { + struct ehci_qtd *qtd; + + if (unlikely (list_empty (qtd_list))) + qtd = NULL; + else + qtd = list_entry (qtd_list->next, struct ehci_qtd, + qtd_list); + + /* control qh may need patching ... */ + if (unlikely (epnum == 0)) { + + /* usb_reset_device() briefly reverts to address 0 */ + if (usb_pipedevice (urb->pipe) == 0) + qh->hw->hw_info1 &= ~qh_addr_mask; + } + + /* just one way to queue requests: swap with the dummy qtd. + * only hc or qh_refresh() ever modify the overlay. + */ + if (likely (qtd != NULL)) { + struct ehci_qtd *dummy; + dma_addr_t dma; + __hc32 token; + + /* to avoid racing the HC, use the dummy td instead of + * the first td of our list (becomes new dummy). both + * tds stay deactivated until we're done, when the + * HC is allowed to fetch the old dummy (4.10.2). + */ + token = qtd->hw_token; + qtd->hw_token = HALT_BIT(ehci); + + dummy = qh->dummy; + + dma = dummy->qtd_dma; + *dummy = *qtd; + dummy->qtd_dma = dma; + + list_del (&qtd->qtd_list); + list_add (&dummy->qtd_list, qtd_list); + list_splice_tail(qtd_list, &qh->qtd_list); + + ehci_qtd_init(ehci, qtd, qtd->qtd_dma); + qh->dummy = qtd; + + /* hc must see the new dummy at list end */ + dma = qtd->qtd_dma; + qtd = list_entry (qh->qtd_list.prev, + struct ehci_qtd, qtd_list); + qtd->hw_next = QTD_NEXT(ehci, dma); + + /* let the hc process these next qtds */ + wmb (); + dummy->hw_token = token; + + urb->hcpriv = qh; + } + } + return qh; +} + +/*-------------------------------------------------------------------------*/ + +static int +submit_async ( + struct ehci_hcd *ehci, + struct urb *urb, + struct list_head *qtd_list, + gfp_t mem_flags +) { + int epnum; + unsigned long flags; + struct ehci_qh *qh = NULL; + int rc; + + epnum = urb->ep->desc.bEndpointAddress; + +#ifdef EHCI_URB_TRACE + { + struct ehci_qtd *qtd; + qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list); + ehci_dbg(ehci, + "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", + __func__, urb->dev->devpath, urb, + epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", + urb->transfer_buffer_length, + qtd, urb->ep->hcpriv); + } +#endif + + spin_lock_irqsave (&ehci->lock, flags); + if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { + rc = -ESHUTDOWN; + goto done; + } + rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); + if (unlikely(rc)) + goto done; + + qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); + if (unlikely(qh == NULL)) { + usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); + rc = -ENOMEM; + goto done; + } + + /* Control/bulk operations through TTs don't need scheduling, + * the HC and TT handle it when the TT has a buffer ready. + */ + if (likely (qh->qh_state == QH_STATE_IDLE)) + qh_link_async(ehci, qh); + done: + spin_unlock_irqrestore (&ehci->lock, flags); + if (unlikely (qh == NULL)) + qtd_list_free (ehci, urb, qtd_list); + return rc; +} + +/*-------------------------------------------------------------------------*/ + +static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + struct ehci_qh *prev; + + /* Add to the end of the list of QHs waiting for the next IAAD */ + qh->qh_state = QH_STATE_UNLINK_WAIT; + list_add_tail(&qh->unlink_node, &ehci->async_unlink); + + /* Unlink it from the schedule */ + prev = ehci->async; + while (prev->qh_next.qh != qh) + prev = prev->qh_next.qh; + + prev->hw->hw_next = qh->hw->hw_next; + prev->qh_next = qh->qh_next; + if (ehci->qh_scan_next == qh) + ehci->qh_scan_next = qh->qh_next.qh; +} + +static void start_iaa_cycle(struct ehci_hcd *ehci) +{ + /* Do nothing if an IAA cycle is already running */ + if (ehci->iaa_in_progress) + return; + ehci->iaa_in_progress = true; + + /* If the controller isn't running, we don't have to wait for it */ + if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { + end_unlink_async(ehci); + + /* Otherwise start a new IAA cycle */ + } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { + + /* Make sure the unlinks are all visible to the hardware */ + wmb(); + + ehci_writel(ehci, ehci->command | CMD_IAAD, + &ehci->regs->command); + ehci_readl(ehci, &ehci->regs->command); + ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true); + } +} + +/* the async qh for the qtds being unlinked are now gone from the HC */ + +static void end_unlink_async(struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + bool early_exit; + + if (ehci->has_synopsys_hc_bug) + ehci_writel(ehci, (u32) ehci->async->qh_dma, + &ehci->regs->async_next); + + /* The current IAA cycle has ended */ + ehci->iaa_in_progress = false; + + if (list_empty(&ehci->async_unlink)) + return; + qh = list_first_entry(&ehci->async_unlink, struct ehci_qh, + unlink_node); /* QH whose IAA cycle just ended */ + + /* + * If async_unlinking is set then this routine is already running, + * either on the stack or on another CPU. + */ + early_exit = ehci->async_unlinking; + + /* If the controller isn't running, process all the waiting QHs */ + if (ehci->rh_state < EHCI_RH_RUNNING) + list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle); + + /* + * Intel (?) bug: The HC can write back the overlay region even + * after the IAA interrupt occurs. In self-defense, always go + * through two IAA cycles for each QH. + */ + else if (qh->qh_state == QH_STATE_UNLINK_WAIT) { + qh->qh_state = QH_STATE_UNLINK; + early_exit = true; + } + + /* Otherwise process only the first waiting QH (NVIDIA bug?) */ + else + list_move_tail(&qh->unlink_node, &ehci->async_idle); + + /* Start a new IAA cycle if any QHs are waiting for it */ + if (!list_empty(&ehci->async_unlink)) + start_iaa_cycle(ehci); + + /* + * Don't allow nesting or concurrent calls, + * or wait for the second IAA cycle for the next QH. + */ + if (early_exit) + return; + + /* Process the idle QHs */ + ehci->async_unlinking = true; + while (!list_empty(&ehci->async_idle)) { + qh = list_first_entry(&ehci->async_idle, struct ehci_qh, + unlink_node); + list_del(&qh->unlink_node); + + qh->qh_state = QH_STATE_IDLE; + qh->qh_next.qh = NULL; + + if (!list_empty(&qh->qtd_list)) + qh_completions(ehci, qh); + if (!list_empty(&qh->qtd_list) && + ehci->rh_state == EHCI_RH_RUNNING) + qh_link_async(ehci, qh); + disable_async(ehci); + } + ehci->async_unlinking = false; +} + +static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); + +static void unlink_empty_async(struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + struct ehci_qh *qh_to_unlink = NULL; + int count = 0; + + /* Find the last async QH which has been empty for a timer cycle */ + for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) { + if (list_empty(&qh->qtd_list) && + qh->qh_state == QH_STATE_LINKED) { + ++count; + if (qh->unlink_cycle != ehci->async_unlink_cycle) + qh_to_unlink = qh; + } + } + + /* If nothing else is being unlinked, unlink the last empty QH */ + if (list_empty(&ehci->async_unlink) && qh_to_unlink) { + start_unlink_async(ehci, qh_to_unlink); + --count; + } + + /* Other QHs will be handled later */ + if (count > 0) { + ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); + ++ehci->async_unlink_cycle; + } +} + +/* The root hub is suspended; unlink all the async QHs */ +static void __maybe_unused unlink_empty_async_suspended(struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + + while (ehci->async->qh_next.qh) { + qh = ehci->async->qh_next.qh; + WARN_ON(!list_empty(&qh->qtd_list)); + single_unlink_async(ehci, qh); + } + start_iaa_cycle(ehci); +} + +/* makes sure the async qh will become idle */ +/* caller must own ehci->lock */ + +static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + /* If the QH isn't linked then there's nothing we can do. */ + if (qh->qh_state != QH_STATE_LINKED) + return; + + single_unlink_async(ehci, qh); + start_iaa_cycle(ehci); +} + +/*-------------------------------------------------------------------------*/ + +static void scan_async (struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + bool check_unlinks_later = false; + + ehci->qh_scan_next = ehci->async->qh_next.qh; + while (ehci->qh_scan_next) { + qh = ehci->qh_scan_next; + ehci->qh_scan_next = qh->qh_next.qh; + + /* clean any finished work for this qh */ + if (!list_empty(&qh->qtd_list)) { + int temp; + + /* + * Unlinks could happen here; completion reporting + * drops the lock. That's why ehci->qh_scan_next + * always holds the next qh to scan; if the next qh + * gets unlinked then ehci->qh_scan_next is adjusted + * in single_unlink_async(). + */ + temp = qh_completions(ehci, qh); + if (unlikely(temp)) { + start_unlink_async(ehci, qh); + } else if (list_empty(&qh->qtd_list) + && qh->qh_state == QH_STATE_LINKED) { + qh->unlink_cycle = ehci->async_unlink_cycle; + check_unlinks_later = true; + } + } + } + + /* + * Unlink empty entries, reducing DMA usage as well + * as HCD schedule-scanning costs. Delay for any qh + * we just scanned, there's a not-unusual case that it + * doesn't stay idle for long. + */ + if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING && + !(ehci->enabled_hrtimer_events & + BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) { + ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); + ++ehci->async_unlink_cycle; + } +} diff --git a/addons/ehci-pci/src/3.10.108/ehci-sched.c b/addons/ehci-pci/src/3.10.108/ehci-sched.c new file mode 100644 index 00000000..8e3c878f --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/ehci-sched.c @@ -0,0 +1,2327 @@ +/* + * Copyright (c) 2001-2004 by David Brownell + * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of ehci-hcd.c */ + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI scheduled transaction support: interrupt, iso, split iso + * These are called "periodic" transactions in the EHCI spec. + * + * Note that for interrupt transfers, the QH/QTD manipulation is shared + * with the "asynchronous" transaction support (control/bulk transfers). + * The only real difference is in how interrupt transfers are scheduled. + * + * For ISO, we make an "iso_stream" head to serve the same role as a QH. + * It keeps track of every ITD (or SITD) that's linked, and holds enough + * pre-calculated schedule data to make appending to the queue be quick. + */ + +static int ehci_get_frame (struct usb_hcd *hcd); + +/* + * periodic_next_shadow - return "next" pointer on shadow list + * @periodic: host pointer to qh/itd/sitd + * @tag: hardware tag for type of this record + */ +static union ehci_shadow * +periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic, + __hc32 tag) +{ + switch (hc32_to_cpu(ehci, tag)) { + case Q_TYPE_QH: + return &periodic->qh->qh_next; + case Q_TYPE_FSTN: + return &periodic->fstn->fstn_next; + case Q_TYPE_ITD: + return &periodic->itd->itd_next; + // case Q_TYPE_SITD: + default: + return &periodic->sitd->sitd_next; + } +} + +static __hc32 * +shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic, + __hc32 tag) +{ + switch (hc32_to_cpu(ehci, tag)) { + /* our ehci_shadow.qh is actually software part */ + case Q_TYPE_QH: + return &periodic->qh->hw->hw_next; + /* others are hw parts */ + default: + return periodic->hw_next; + } +} + +/* caller must hold ehci->lock */ +static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) +{ + union ehci_shadow *prev_p = &ehci->pshadow[frame]; + __hc32 *hw_p = &ehci->periodic[frame]; + union ehci_shadow here = *prev_p; + + /* find predecessor of "ptr"; hw and shadow lists are in sync */ + while (here.ptr && here.ptr != ptr) { + prev_p = periodic_next_shadow(ehci, prev_p, + Q_NEXT_TYPE(ehci, *hw_p)); + hw_p = shadow_next_periodic(ehci, &here, + Q_NEXT_TYPE(ehci, *hw_p)); + here = *prev_p; + } + /* an interrupt entry (at list end) could have been shared */ + if (!here.ptr) + return; + + /* update shadow and hardware lists ... the old "next" pointers + * from ptr may still be in use, the caller updates them. + */ + *prev_p = *periodic_next_shadow(ehci, &here, + Q_NEXT_TYPE(ehci, *hw_p)); + + if (!ehci->use_dummy_qh || + *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)) + != EHCI_LIST_END(ehci)) + *hw_p = *shadow_next_periodic(ehci, &here, + Q_NEXT_TYPE(ehci, *hw_p)); + else + *hw_p = ehci->dummy->qh_dma; +} + +/* how many of the uframe's 125 usecs are allocated? */ +static unsigned short +periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) +{ + __hc32 *hw_p = &ehci->periodic [frame]; + union ehci_shadow *q = &ehci->pshadow [frame]; + unsigned usecs = 0; + struct ehci_qh_hw *hw; + + while (q->ptr) { + switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) { + case Q_TYPE_QH: + hw = q->qh->hw; + /* is it in the S-mask? */ + if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe)) + usecs += q->qh->usecs; + /* ... or C-mask? */ + if (hw->hw_info2 & cpu_to_hc32(ehci, + 1 << (8 + uframe))) + usecs += q->qh->c_usecs; + hw_p = &hw->hw_next; + q = &q->qh->qh_next; + break; + // case Q_TYPE_FSTN: + default: + /* for "save place" FSTNs, count the relevant INTR + * bandwidth from the previous frame + */ + if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) { + ehci_dbg (ehci, "ignoring FSTN cost ...\n"); + } + hw_p = &q->fstn->hw_next; + q = &q->fstn->fstn_next; + break; + case Q_TYPE_ITD: + if (q->itd->hw_transaction[uframe]) + usecs += q->itd->stream->usecs; + hw_p = &q->itd->hw_next; + q = &q->itd->itd_next; + break; + case Q_TYPE_SITD: + /* is it in the S-mask? (count SPLIT, DATA) */ + if (q->sitd->hw_uframe & cpu_to_hc32(ehci, + 1 << uframe)) { + if (q->sitd->hw_fullspeed_ep & + cpu_to_hc32(ehci, 1<<31)) + usecs += q->sitd->stream->usecs; + else /* worst case for OUT start-split */ + usecs += HS_USECS_ISO (188); + } + + /* ... C-mask? (count CSPLIT, DATA) */ + if (q->sitd->hw_uframe & + cpu_to_hc32(ehci, 1 << (8 + uframe))) { + /* worst case for IN complete-split */ + usecs += q->sitd->stream->c_usecs; + } + + hw_p = &q->sitd->hw_next; + q = &q->sitd->sitd_next; + break; + } + } +#ifdef DEBUG + if (usecs > ehci->uframe_periodic_max) + ehci_err (ehci, "uframe %d sched overrun: %d usecs\n", + frame * 8 + uframe, usecs); +#endif + return usecs; +} + +/*-------------------------------------------------------------------------*/ + +static int same_tt (struct usb_device *dev1, struct usb_device *dev2) +{ + if (!dev1->tt || !dev2->tt) + return 0; + if (dev1->tt != dev2->tt) + return 0; + if (dev1->tt->multi) + return dev1->ttport == dev2->ttport; + else + return 1; +} + +#ifdef CONFIG_USB_EHCI_TT_NEWSCHED + +/* Which uframe does the low/fullspeed transfer start in? + * + * The parameter is the mask of ssplits in "H-frame" terms + * and this returns the transfer start uframe in "B-frame" terms, + * which allows both to match, e.g. a ssplit in "H-frame" uframe 0 + * will cause a transfer in "B-frame" uframe 0. "B-frames" lag + * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7. + */ +static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask) +{ + unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask); + if (!smask) { + ehci_err(ehci, "invalid empty smask!\n"); + /* uframe 7 can't have bw so this will indicate failure */ + return 7; + } + return ffs(smask) - 1; +} + +static const unsigned char +max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; + +/* carryover low/fullspeed bandwidth that crosses uframe boundries */ +static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) +{ + int i; + for (i=0; i<7; i++) { + if (max_tt_usecs[i] < tt_usecs[i]) { + tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i]; + tt_usecs[i] = max_tt_usecs[i]; + } + } +} + +/* How many of the tt's periodic downstream 1000 usecs are allocated? + * + * While this measures the bandwidth in terms of usecs/uframe, + * the low/fullspeed bus has no notion of uframes, so any particular + * low/fullspeed transfer can "carry over" from one uframe to the next, + * since the TT just performs downstream transfers in sequence. + * + * For example two separate 100 usec transfers can start in the same uframe, + * and the second one would "carry over" 75 usecs into the next uframe. + */ +static void +periodic_tt_usecs ( + struct ehci_hcd *ehci, + struct usb_device *dev, + unsigned frame, + unsigned short tt_usecs[8] +) +{ + __hc32 *hw_p = &ehci->periodic [frame]; + union ehci_shadow *q = &ehci->pshadow [frame]; + unsigned char uf; + + memset(tt_usecs, 0, 16); + + while (q->ptr) { + switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) { + case Q_TYPE_ITD: + hw_p = &q->itd->hw_next; + q = &q->itd->itd_next; + continue; + case Q_TYPE_QH: + if (same_tt(dev, q->qh->dev)) { + uf = tt_start_uframe(ehci, q->qh->hw->hw_info2); + tt_usecs[uf] += q->qh->tt_usecs; + } + hw_p = &q->qh->hw->hw_next; + q = &q->qh->qh_next; + continue; + case Q_TYPE_SITD: + if (same_tt(dev, q->sitd->urb->dev)) { + uf = tt_start_uframe(ehci, q->sitd->hw_uframe); + tt_usecs[uf] += q->sitd->stream->tt_usecs; + } + hw_p = &q->sitd->hw_next; + q = &q->sitd->sitd_next; + continue; + // case Q_TYPE_FSTN: + default: + ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n", + frame); + hw_p = &q->fstn->hw_next; + q = &q->fstn->fstn_next; + } + } + + carryover_tt_bandwidth(tt_usecs); + + if (max_tt_usecs[7] < tt_usecs[7]) + ehci_err(ehci, "frame %d tt sched overrun: %d usecs\n", + frame, tt_usecs[7] - max_tt_usecs[7]); +} + +/* + * Return true if the device's tt's downstream bus is available for a + * periodic transfer of the specified length (usecs), starting at the + * specified frame/uframe. Note that (as summarized in section 11.19 + * of the usb 2.0 spec) TTs can buffer multiple transactions for each + * uframe. + * + * The uframe parameter is when the fullspeed/lowspeed transfer + * should be executed in "B-frame" terms, which is the same as the + * highspeed ssplit's uframe (which is in "H-frame" terms). For example + * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0. + * See the EHCI spec sec 4.5 and fig 4.7. + * + * This checks if the full/lowspeed bus, at the specified starting uframe, + * has the specified bandwidth available, according to rules listed + * in USB 2.0 spec section 11.18.1 fig 11-60. + * + * This does not check if the transfer would exceed the max ssplit + * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4, + * since proper scheduling limits ssplits to less than 16 per uframe. + */ +static int tt_available ( + struct ehci_hcd *ehci, + unsigned period, + struct usb_device *dev, + unsigned frame, + unsigned uframe, + u16 usecs +) +{ + if ((period == 0) || (uframe >= 7)) /* error */ + return 0; + + for (; frame < ehci->periodic_size; frame += period) { + unsigned short tt_usecs[8]; + + periodic_tt_usecs (ehci, dev, frame, tt_usecs); + + ehci_vdbg(ehci, "tt frame %d check %d usecs start uframe %d in" + " schedule %d/%d/%d/%d/%d/%d/%d/%d\n", + frame, usecs, uframe, + tt_usecs[0], tt_usecs[1], tt_usecs[2], tt_usecs[3], + tt_usecs[4], tt_usecs[5], tt_usecs[6], tt_usecs[7]); + + if (max_tt_usecs[uframe] <= tt_usecs[uframe]) { + ehci_vdbg(ehci, "frame %d uframe %d fully scheduled\n", + frame, uframe); + return 0; + } + + /* special case for isoc transfers larger than 125us: + * the first and each subsequent fully used uframe + * must be empty, so as to not illegally delay + * already scheduled transactions + */ + if (125 < usecs) { + int ufs = (usecs / 125); + int i; + for (i = uframe; i < (uframe + ufs) && i < 8; i++) + if (0 < tt_usecs[i]) { + ehci_vdbg(ehci, + "multi-uframe xfer can't fit " + "in frame %d uframe %d\n", + frame, i); + return 0; + } + } + + tt_usecs[uframe] += usecs; + + carryover_tt_bandwidth(tt_usecs); + + /* fail if the carryover pushed bw past the last uframe's limit */ + if (max_tt_usecs[7] < tt_usecs[7]) { + ehci_vdbg(ehci, + "tt unavailable usecs %d frame %d uframe %d\n", + usecs, frame, uframe); + return 0; + } + } + + return 1; +} + +#else + +/* return true iff the device's transaction translator is available + * for a periodic transfer starting at the specified frame, using + * all the uframes in the mask. + */ +static int tt_no_collision ( + struct ehci_hcd *ehci, + unsigned period, + struct usb_device *dev, + unsigned frame, + u32 uf_mask +) +{ + if (period == 0) /* error */ + return 0; + + /* note bandwidth wastage: split never follows csplit + * (different dev or endpoint) until the next uframe. + * calling convention doesn't make that distinction. + */ + for (; frame < ehci->periodic_size; frame += period) { + union ehci_shadow here; + __hc32 type; + struct ehci_qh_hw *hw; + + here = ehci->pshadow [frame]; + type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]); + while (here.ptr) { + switch (hc32_to_cpu(ehci, type)) { + case Q_TYPE_ITD: + type = Q_NEXT_TYPE(ehci, here.itd->hw_next); + here = here.itd->itd_next; + continue; + case Q_TYPE_QH: + hw = here.qh->hw; + if (same_tt (dev, here.qh->dev)) { + u32 mask; + + mask = hc32_to_cpu(ehci, + hw->hw_info2); + /* "knows" no gap is needed */ + mask |= mask >> 8; + if (mask & uf_mask) + break; + } + type = Q_NEXT_TYPE(ehci, hw->hw_next); + here = here.qh->qh_next; + continue; + case Q_TYPE_SITD: + if (same_tt (dev, here.sitd->urb->dev)) { + u16 mask; + + mask = hc32_to_cpu(ehci, here.sitd + ->hw_uframe); + /* FIXME assumes no gap for IN! */ + mask |= mask >> 8; + if (mask & uf_mask) + break; + } + type = Q_NEXT_TYPE(ehci, here.sitd->hw_next); + here = here.sitd->sitd_next; + continue; + // case Q_TYPE_FSTN: + default: + ehci_dbg (ehci, + "periodic frame %d bogus type %d\n", + frame, type); + } + + /* collision or error */ + return 0; + } + } + + /* no collision */ + return 1; +} + +#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */ + +/*-------------------------------------------------------------------------*/ + +static void enable_periodic(struct ehci_hcd *ehci) +{ + if (ehci->periodic_count++) + return; + + /* Stop waiting to turn off the periodic schedule */ + ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC); + + /* Don't start the schedule until PSS is 0 */ + ehci_poll_PSS(ehci); + turn_on_io_watchdog(ehci); +} + +static void disable_periodic(struct ehci_hcd *ehci) +{ + if (--ehci->periodic_count) + return; + + /* Don't turn off the schedule until PSS is 1 */ + ehci_poll_PSS(ehci); +} + +/*-------------------------------------------------------------------------*/ + +/* periodic schedule slots have iso tds (normal or split) first, then a + * sparse tree for active interrupt transfers. + * + * this just links in a qh; caller guarantees uframe masks are set right. + * no FSTN support (yet; ehci 0.96+) + */ +static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + unsigned i; + unsigned period = qh->period; + + dev_dbg (&qh->dev->dev, + "link qh%d-%04x/%p start %d [%d/%d us]\n", + period, hc32_to_cpup(ehci, &qh->hw->hw_info2) + & (QH_CMASK | QH_SMASK), + qh, qh->start, qh->usecs, qh->c_usecs); + + /* high bandwidth, or otherwise every microframe */ + if (period == 0) + period = 1; + + for (i = qh->start; i < ehci->periodic_size; i += period) { + union ehci_shadow *prev = &ehci->pshadow[i]; + __hc32 *hw_p = &ehci->periodic[i]; + union ehci_shadow here = *prev; + __hc32 type = 0; + + /* skip the iso nodes at list head */ + while (here.ptr) { + type = Q_NEXT_TYPE(ehci, *hw_p); + if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) + break; + prev = periodic_next_shadow(ehci, prev, type); + hw_p = shadow_next_periodic(ehci, &here, type); + here = *prev; + } + + /* sorting each branch by period (slow-->fast) + * enables sharing interior tree nodes + */ + while (here.ptr && qh != here.qh) { + if (qh->period > here.qh->period) + break; + prev = &here.qh->qh_next; + hw_p = &here.qh->hw->hw_next; + here = *prev; + } + /* link in this qh, unless some earlier pass did that */ + if (qh != here.qh) { + qh->qh_next = here; + if (here.qh) + qh->hw->hw_next = *hw_p; + wmb (); + prev->qh = qh; + *hw_p = QH_NEXT (ehci, qh->qh_dma); + } + } + qh->qh_state = QH_STATE_LINKED; + qh->xacterrs = 0; + qh->exception = 0; + + /* update per-qh bandwidth for usbfs */ + ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period + ? ((qh->usecs + qh->c_usecs) / qh->period) + : (qh->usecs * 8); + + list_add(&qh->intr_node, &ehci->intr_qh_list); + + /* maybe enable periodic schedule processing */ + ++ehci->intr_count; + enable_periodic(ehci); +} + +static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + unsigned i; + unsigned period; + + /* + * If qh is for a low/full-speed device, simply unlinking it + * could interfere with an ongoing split transaction. To unlink + * it safely would require setting the QH_INACTIVATE bit and + * waiting at least one frame, as described in EHCI 4.12.2.5. + * + * We won't bother with any of this. Instead, we assume that the + * only reason for unlinking an interrupt QH while the current URB + * is still active is to dequeue all the URBs (flush the whole + * endpoint queue). + * + * If rebalancing the periodic schedule is ever implemented, this + * approach will no longer be valid. + */ + + /* high bandwidth, or otherwise part of every microframe */ + if ((period = qh->period) == 0) + period = 1; + + for (i = qh->start; i < ehci->periodic_size; i += period) + periodic_unlink (ehci, i, qh); + + /* update per-qh bandwidth for usbfs */ + ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period + ? ((qh->usecs + qh->c_usecs) / qh->period) + : (qh->usecs * 8); + + dev_dbg (&qh->dev->dev, + "unlink qh%d-%04x/%p start %d [%d/%d us]\n", + qh->period, + hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK), + qh, qh->start, qh->usecs, qh->c_usecs); + + /* qh->qh_next still "live" to HC */ + qh->qh_state = QH_STATE_UNLINK; + qh->qh_next.ptr = NULL; + + if (ehci->qh_scan_next == qh) + ehci->qh_scan_next = list_entry(qh->intr_node.next, + struct ehci_qh, intr_node); + list_del(&qh->intr_node); +} + +static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + /* If the QH isn't linked then there's nothing we can do. */ + if (qh->qh_state != QH_STATE_LINKED) + return; + + qh_unlink_periodic (ehci, qh); + + /* Make sure the unlinks are visible before starting the timer */ + wmb(); + + /* + * The EHCI spec doesn't say how long it takes the controller to + * stop accessing an unlinked interrupt QH. The timer delay is + * 9 uframes; presumably that will be long enough. + */ + qh->unlink_cycle = ehci->intr_unlink_cycle; + + /* New entries go at the end of the intr_unlink list */ + list_add_tail(&qh->unlink_node, &ehci->intr_unlink); + + if (ehci->intr_unlinking) + ; /* Avoid recursive calls */ + else if (ehci->rh_state < EHCI_RH_RUNNING) + ehci_handle_intr_unlinks(ehci); + else if (ehci->intr_unlink.next == &qh->unlink_node) { + ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true); + ++ehci->intr_unlink_cycle; + } +} + +static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + struct ehci_qh_hw *hw = qh->hw; + int rc; + + qh->qh_state = QH_STATE_IDLE; + hw->hw_next = EHCI_LIST_END(ehci); + + if (!list_empty(&qh->qtd_list)) + qh_completions(ehci, qh); + + /* reschedule QH iff another request is queued */ + if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) { + rc = qh_schedule(ehci, qh); + if (rc == 0) { + qh_refresh(ehci, qh); + qh_link_periodic(ehci, qh); + } + + /* An error here likely indicates handshake failure + * or no space left in the schedule. Neither fault + * should happen often ... + * + * FIXME kill the now-dysfunctional queued urbs + */ + else { + ehci_err(ehci, "can't reschedule qh %p, err %d\n", + qh, rc); + } + } + + /* maybe turn off periodic schedule */ + --ehci->intr_count; + disable_periodic(ehci); +} + +/*-------------------------------------------------------------------------*/ + +static int check_period ( + struct ehci_hcd *ehci, + unsigned frame, + unsigned uframe, + unsigned period, + unsigned usecs +) { + int claimed; + + /* complete split running into next frame? + * given FSTN support, we could sometimes check... + */ + if (uframe >= 8) + return 0; + + /* convert "usecs we need" to "max already claimed" */ + usecs = ehci->uframe_periodic_max - usecs; + + /* we "know" 2 and 4 uframe intervals were rejected; so + * for period 0, check _every_ microframe in the schedule. + */ + if (unlikely (period == 0)) { + do { + for (uframe = 0; uframe < 7; uframe++) { + claimed = periodic_usecs (ehci, frame, uframe); + if (claimed > usecs) + return 0; + } + } while ((frame += 1) < ehci->periodic_size); + + /* just check the specified uframe, at that period */ + } else { + do { + claimed = periodic_usecs (ehci, frame, uframe); + if (claimed > usecs) + return 0; + } while ((frame += period) < ehci->periodic_size); + } + + // success! + return 1; +} + +static int check_intr_schedule ( + struct ehci_hcd *ehci, + unsigned frame, + unsigned uframe, + const struct ehci_qh *qh, + __hc32 *c_maskp +) +{ + int retval = -ENOSPC; + u8 mask = 0; + + if (qh->c_usecs && uframe >= 6) /* FSTN territory? */ + goto done; + + if (!check_period (ehci, frame, uframe, qh->period, qh->usecs)) + goto done; + if (!qh->c_usecs) { + retval = 0; + *c_maskp = 0; + goto done; + } + +#ifdef CONFIG_USB_EHCI_TT_NEWSCHED + if (tt_available (ehci, qh->period, qh->dev, frame, uframe, + qh->tt_usecs)) { + unsigned i; + + /* TODO : this may need FSTN for SSPLIT in uframe 5. */ + for (i=uframe+1; i<8 && iperiod, qh->c_usecs)) + goto done; + else + mask |= 1 << i; + + retval = 0; + + *c_maskp = cpu_to_hc32(ehci, mask << 8); + } +#else + /* Make sure this tt's buffer is also available for CSPLITs. + * We pessimize a bit; probably the typical full speed case + * doesn't need the second CSPLIT. + * + * NOTE: both SPLIT and CSPLIT could be checked in just + * one smart pass... + */ + mask = 0x03 << (uframe + qh->gap_uf); + *c_maskp = cpu_to_hc32(ehci, mask << 8); + + mask |= 1 << uframe; + if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) { + if (!check_period (ehci, frame, uframe + qh->gap_uf + 1, + qh->period, qh->c_usecs)) + goto done; + if (!check_period (ehci, frame, uframe + qh->gap_uf, + qh->period, qh->c_usecs)) + goto done; + retval = 0; + } +#endif +done: + return retval; +} + +/* "first fit" scheduling policy used the first time through, + * or when the previous schedule slot can't be re-used. + */ +static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + int status; + unsigned uframe; + __hc32 c_mask; + unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ + struct ehci_qh_hw *hw = qh->hw; + + hw->hw_next = EHCI_LIST_END(ehci); + frame = qh->start; + + /* reuse the previous schedule slots, if we can */ + if (frame < qh->period) { + uframe = ffs(hc32_to_cpup(ehci, &hw->hw_info2) & QH_SMASK); + status = check_intr_schedule (ehci, frame, --uframe, + qh, &c_mask); + } else { + uframe = 0; + c_mask = 0; + status = -ENOSPC; + } + + /* else scan the schedule to find a group of slots such that all + * uframes have enough periodic bandwidth available. + */ + if (status) { + /* "normal" case, uframing flexible except with splits */ + if (qh->period) { + int i; + + for (i = qh->period; status && i > 0; --i) { + frame = ++ehci->random_frame % qh->period; + for (uframe = 0; uframe < 8; uframe++) { + status = check_intr_schedule (ehci, + frame, uframe, qh, + &c_mask); + if (status == 0) + break; + } + } + + /* qh->period == 0 means every uframe */ + } else { + frame = 0; + status = check_intr_schedule (ehci, 0, 0, qh, &c_mask); + } + if (status) + goto done; + qh->start = frame; + + /* reset S-frame and (maybe) C-frame masks */ + hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK)); + hw->hw_info2 |= qh->period + ? cpu_to_hc32(ehci, 1 << uframe) + : cpu_to_hc32(ehci, QH_SMASK); + hw->hw_info2 |= c_mask; + } else + ehci_dbg (ehci, "reused qh %p schedule\n", qh); + +done: + return status; +} + +static int intr_submit ( + struct ehci_hcd *ehci, + struct urb *urb, + struct list_head *qtd_list, + gfp_t mem_flags +) { + unsigned epnum; + unsigned long flags; + struct ehci_qh *qh; + int status; + struct list_head empty; + + /* get endpoint and transfer/schedule data */ + epnum = urb->ep->desc.bEndpointAddress; + + spin_lock_irqsave (&ehci->lock, flags); + + if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { + status = -ESHUTDOWN; + goto done_not_linked; + } + status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); + if (unlikely(status)) + goto done_not_linked; + + /* get qh and force any scheduling errors */ + INIT_LIST_HEAD (&empty); + qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv); + if (qh == NULL) { + status = -ENOMEM; + goto done; + } + if (qh->qh_state == QH_STATE_IDLE) { + if ((status = qh_schedule (ehci, qh)) != 0) + goto done; + } + + /* then queue the urb's tds to the qh */ + qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); + BUG_ON (qh == NULL); + + /* stuff into the periodic schedule */ + if (qh->qh_state == QH_STATE_IDLE) { + qh_refresh(ehci, qh); + qh_link_periodic(ehci, qh); + } + + /* ... update usbfs periodic stats */ + ehci_to_hcd(ehci)->self.bandwidth_int_reqs++; + +done: + if (unlikely(status)) + usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); +done_not_linked: + spin_unlock_irqrestore (&ehci->lock, flags); + if (status) + qtd_list_free (ehci, urb, qtd_list); + + return status; +} + +static void scan_intr(struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + + list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list, + intr_node) { + + /* clean any finished work for this qh */ + if (!list_empty(&qh->qtd_list)) { + int temp; + + /* + * Unlinks could happen here; completion reporting + * drops the lock. That's why ehci->qh_scan_next + * always holds the next qh to scan; if the next qh + * gets unlinked then ehci->qh_scan_next is adjusted + * in qh_unlink_periodic(). + */ + temp = qh_completions(ehci, qh); + if (unlikely(temp || (list_empty(&qh->qtd_list) && + qh->qh_state == QH_STATE_LINKED))) + start_unlink_intr(ehci, qh); + } + } +} + +/*-------------------------------------------------------------------------*/ + +/* ehci_iso_stream ops work with both ITD and SITD */ + +static struct ehci_iso_stream * +iso_stream_alloc (gfp_t mem_flags) +{ + struct ehci_iso_stream *stream; + + stream = kzalloc(sizeof *stream, mem_flags); + if (likely (stream != NULL)) { + INIT_LIST_HEAD(&stream->td_list); + INIT_LIST_HEAD(&stream->free_list); + stream->next_uframe = -1; + } + return stream; +} + +static void +iso_stream_init ( + struct ehci_hcd *ehci, + struct ehci_iso_stream *stream, + struct usb_device *dev, + int pipe, + unsigned interval +) +{ + static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f }; + + u32 buf1; + unsigned epnum, maxp; + int is_input; + long bandwidth; + + /* + * this might be a "high bandwidth" highspeed endpoint, + * as encoded in the ep descriptor's wMaxPacket field + */ + epnum = usb_pipeendpoint (pipe); + is_input = usb_pipein (pipe) ? USB_DIR_IN : 0; + maxp = usb_maxpacket(dev, pipe, !is_input); + if (is_input) { + buf1 = (1 << 11); + } else { + buf1 = 0; + } + + /* knows about ITD vs SITD */ + if (dev->speed == USB_SPEED_HIGH) { + unsigned multi = hb_mult(maxp); + + stream->highspeed = 1; + + maxp = max_packet(maxp); + buf1 |= maxp; + maxp *= multi; + + stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum); + stream->buf1 = cpu_to_hc32(ehci, buf1); + stream->buf2 = cpu_to_hc32(ehci, multi); + + /* usbfs wants to report the average usecs per frame tied up + * when transfers on this endpoint are scheduled ... + */ + stream->usecs = HS_USECS_ISO (maxp); + bandwidth = stream->usecs * 8; + bandwidth /= interval; + + } else { + u32 addr; + int think_time; + int hs_transfers; + + addr = dev->ttport << 24; + if (!ehci_is_TDI(ehci) + || (dev->tt->hub != + ehci_to_hcd(ehci)->self.root_hub)) + addr |= dev->tt->hub->devnum << 16; + addr |= epnum << 8; + addr |= dev->devnum; + stream->usecs = HS_USECS_ISO (maxp); + think_time = dev->tt ? dev->tt->think_time : 0; + stream->tt_usecs = NS_TO_US (think_time + usb_calc_bus_time ( + dev->speed, is_input, 1, maxp)); + hs_transfers = max (1u, (maxp + 187) / 188); + if (is_input) { + u32 tmp; + + addr |= 1 << 31; + stream->c_usecs = stream->usecs; + stream->usecs = HS_USECS_ISO (1); + stream->raw_mask = 1; + + /* c-mask as specified in USB 2.0 11.18.4 3.c */ + tmp = (1 << (hs_transfers + 2)) - 1; + stream->raw_mask |= tmp << (8 + 2); + } else + stream->raw_mask = smask_out [hs_transfers - 1]; + bandwidth = stream->usecs + stream->c_usecs; + bandwidth /= interval << 3; + + /* stream->splits gets created from raw_mask later */ + stream->address = cpu_to_hc32(ehci, addr); + } + stream->bandwidth = bandwidth; + + stream->udev = dev; + + stream->bEndpointAddress = is_input | epnum; + stream->interval = interval; + stream->maxp = maxp; +} + +static struct ehci_iso_stream * +iso_stream_find (struct ehci_hcd *ehci, struct urb *urb) +{ + unsigned epnum; + struct ehci_iso_stream *stream; + struct usb_host_endpoint *ep; + unsigned long flags; + + epnum = usb_pipeendpoint (urb->pipe); + if (usb_pipein(urb->pipe)) + ep = urb->dev->ep_in[epnum]; + else + ep = urb->dev->ep_out[epnum]; + + spin_lock_irqsave (&ehci->lock, flags); + stream = ep->hcpriv; + + if (unlikely (stream == NULL)) { + stream = iso_stream_alloc(GFP_ATOMIC); + if (likely (stream != NULL)) { + ep->hcpriv = stream; + stream->ep = ep; + iso_stream_init(ehci, stream, urb->dev, urb->pipe, + urb->interval); + } + + /* if dev->ep [epnum] is a QH, hw is set */ + } else if (unlikely (stream->hw != NULL)) { + ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n", + urb->dev->devpath, epnum, + usb_pipein(urb->pipe) ? "in" : "out"); + stream = NULL; + } + + spin_unlock_irqrestore (&ehci->lock, flags); + return stream; +} + +/*-------------------------------------------------------------------------*/ + +/* ehci_iso_sched ops can be ITD-only or SITD-only */ + +static struct ehci_iso_sched * +iso_sched_alloc (unsigned packets, gfp_t mem_flags) +{ + struct ehci_iso_sched *iso_sched; + int size = sizeof *iso_sched; + + size += packets * sizeof (struct ehci_iso_packet); + iso_sched = kzalloc(size, mem_flags); + if (likely (iso_sched != NULL)) { + INIT_LIST_HEAD (&iso_sched->td_list); + } + return iso_sched; +} + +static inline void +itd_sched_init( + struct ehci_hcd *ehci, + struct ehci_iso_sched *iso_sched, + struct ehci_iso_stream *stream, + struct urb *urb +) +{ + unsigned i; + dma_addr_t dma = urb->transfer_dma; + + /* how many uframes are needed for these transfers */ + iso_sched->span = urb->number_of_packets * stream->interval; + + /* figure out per-uframe itd fields that we'll need later + * when we fit new itds into the schedule. + */ + for (i = 0; i < urb->number_of_packets; i++) { + struct ehci_iso_packet *uframe = &iso_sched->packet [i]; + unsigned length; + dma_addr_t buf; + u32 trans; + + length = urb->iso_frame_desc [i].length; + buf = dma + urb->iso_frame_desc [i].offset; + + trans = EHCI_ISOC_ACTIVE; + trans |= buf & 0x0fff; + if (unlikely (((i + 1) == urb->number_of_packets)) + && !(urb->transfer_flags & URB_NO_INTERRUPT)) + trans |= EHCI_ITD_IOC; + trans |= length << 16; + uframe->transaction = cpu_to_hc32(ehci, trans); + + /* might need to cross a buffer page within a uframe */ + uframe->bufp = (buf & ~(u64)0x0fff); + buf += length; + if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff)))) + uframe->cross = 1; + } +} + +static void +iso_sched_free ( + struct ehci_iso_stream *stream, + struct ehci_iso_sched *iso_sched +) +{ + if (!iso_sched) + return; + // caller must hold ehci->lock! + list_splice (&iso_sched->td_list, &stream->free_list); + kfree (iso_sched); +} + +static int +itd_urb_transaction ( + struct ehci_iso_stream *stream, + struct ehci_hcd *ehci, + struct urb *urb, + gfp_t mem_flags +) +{ + struct ehci_itd *itd; + dma_addr_t itd_dma; + int i; + unsigned num_itds; + struct ehci_iso_sched *sched; + unsigned long flags; + + sched = iso_sched_alloc (urb->number_of_packets, mem_flags); + if (unlikely (sched == NULL)) + return -ENOMEM; + + itd_sched_init(ehci, sched, stream, urb); + + if (urb->interval < 8) + num_itds = 1 + (sched->span + 7) / 8; + else + num_itds = urb->number_of_packets; + + /* allocate/init ITDs */ + spin_lock_irqsave (&ehci->lock, flags); + for (i = 0; i < num_itds; i++) { + + /* + * Use iTDs from the free list, but not iTDs that may + * still be in use by the hardware. + */ + if (likely(!list_empty(&stream->free_list))) { + itd = list_first_entry(&stream->free_list, + struct ehci_itd, itd_list); + if (itd->frame == ehci->now_frame) + goto alloc_itd; + list_del (&itd->itd_list); + itd_dma = itd->itd_dma; + } else { + alloc_itd: + spin_unlock_irqrestore (&ehci->lock, flags); + itd = dma_pool_alloc (ehci->itd_pool, mem_flags, + &itd_dma); + spin_lock_irqsave (&ehci->lock, flags); + if (!itd) { + iso_sched_free(stream, sched); + spin_unlock_irqrestore(&ehci->lock, flags); + return -ENOMEM; + } + } + + memset (itd, 0, sizeof *itd); + itd->itd_dma = itd_dma; + itd->frame = 9999; /* an invalid value */ + list_add (&itd->itd_list, &sched->td_list); + } + spin_unlock_irqrestore (&ehci->lock, flags); + + /* temporarily store schedule info in hcpriv */ + urb->hcpriv = sched; + urb->error_count = 0; + return 0; +} + +/*-------------------------------------------------------------------------*/ + +static inline int +itd_slot_ok ( + struct ehci_hcd *ehci, + u32 mod, + u32 uframe, + u8 usecs, + u32 period +) +{ + uframe %= period; + do { + /* can't commit more than uframe_periodic_max usec */ + if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7) + > (ehci->uframe_periodic_max - usecs)) + return 0; + + /* we know urb->interval is 2^N uframes */ + uframe += period; + } while (uframe < mod); + return 1; +} + +static inline int +sitd_slot_ok ( + struct ehci_hcd *ehci, + u32 mod, + struct ehci_iso_stream *stream, + u32 uframe, + struct ehci_iso_sched *sched, + u32 period_uframes +) +{ + u32 mask, tmp; + u32 frame, uf; + + mask = stream->raw_mask << (uframe & 7); + + /* for IN, don't wrap CSPLIT into the next frame */ + if (mask & ~0xffff) + return 0; + + /* check bandwidth */ + uframe %= period_uframes; + frame = uframe >> 3; + +#ifdef CONFIG_USB_EHCI_TT_NEWSCHED + /* The tt's fullspeed bus bandwidth must be available. + * tt_available scheduling guarantees 10+% for control/bulk. + */ + uf = uframe & 7; + if (!tt_available(ehci, period_uframes >> 3, + stream->udev, frame, uf, stream->tt_usecs)) + return 0; +#else + /* tt must be idle for start(s), any gap, and csplit. + * assume scheduling slop leaves 10+% for control/bulk. + */ + if (!tt_no_collision(ehci, period_uframes >> 3, + stream->udev, frame, mask)) + return 0; +#endif + + /* this multi-pass logic is simple, but performance may + * suffer when the schedule data isn't cached. + */ + do { + u32 max_used; + + frame = uframe >> 3; + uf = uframe & 7; + + /* check starts (OUT uses more than one) */ + max_used = ehci->uframe_periodic_max - stream->usecs; + for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) { + if (periodic_usecs (ehci, frame, uf) > max_used) + return 0; + } + + /* for IN, check CSPLIT */ + if (stream->c_usecs) { + uf = uframe & 7; + max_used = ehci->uframe_periodic_max - stream->c_usecs; + do { + tmp = 1 << uf; + tmp <<= 8; + if ((stream->raw_mask & tmp) == 0) + continue; + if (periodic_usecs (ehci, frame, uf) + > max_used) + return 0; + } while (++uf < 8); + } + + /* we know urb->interval is 2^N uframes */ + uframe += period_uframes; + } while (uframe < mod); + + stream->splits = cpu_to_hc32(ehci, stream->raw_mask << (uframe & 7)); + return 1; +} + +/* + * This scheduler plans almost as far into the future as it has actual + * periodic schedule slots. (Affected by TUNE_FLS, which defaults to + * "as small as possible" to be cache-friendlier.) That limits the size + * transfers you can stream reliably; avoid more than 64 msec per urb. + * Also avoid queue depths of less than ehci's worst irq latency (affected + * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter, + * and other factors); or more than about 230 msec total (for portability, + * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler! + */ + +#define SCHEDULING_DELAY 40 /* microframes */ + +static int +iso_stream_schedule ( + struct ehci_hcd *ehci, + struct urb *urb, + struct ehci_iso_stream *stream +) +{ + u32 now, base, next, start, period, span; + int status; + unsigned mod = ehci->periodic_size << 3; + struct ehci_iso_sched *sched = urb->hcpriv; + + period = urb->interval; + span = sched->span; + if (!stream->highspeed) { + period <<= 3; + span <<= 3; + } + + now = ehci_read_frame_index(ehci) & (mod - 1); + + /* Typical case: reuse current schedule, stream is still active. + * Hopefully there are no gaps from the host falling behind + * (irq delays etc). If there are, the behavior depends on + * whether URB_ISO_ASAP is set. + */ + if (likely (!list_empty (&stream->td_list))) { + + /* Take the isochronous scheduling threshold into account */ + if (ehci->i_thresh) + next = now + ehci->i_thresh; /* uframe cache */ + else + next = (now + 2 + 7) & ~0x07; /* full frame cache */ + + /* + * Use ehci->last_iso_frame as the base. There can't be any + * TDs scheduled for earlier than that. + */ + base = ehci->last_iso_frame << 3; + next = (next - base) & (mod - 1); + start = (stream->next_uframe - base) & (mod - 1); + + /* Is the schedule already full? */ + if (unlikely(start < period)) { + ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n", + urb, stream->next_uframe, base, + period, mod); + status = -ENOSPC; + goto fail; + } + + /* Behind the scheduling threshold? */ + if (unlikely(start < next)) { + unsigned now2 = (now - base) & (mod - 1); + + /* USB_ISO_ASAP: Round up to the first available slot */ + if (urb->transfer_flags & URB_ISO_ASAP) + start += (next - start + period - 1) & -period; + + /* + * Not ASAP: Use the next slot in the stream, + * no matter what. + */ + else if (start + span - period < now2) { + ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n", + urb, start + base, + span - period, now2 + base); + } + } + + start += base; + } + + /* need to schedule; when's the next (u)frame we could start? + * this is bigger than ehci->i_thresh allows; scheduling itself + * isn't free, the delay should handle reasonably slow cpus. it + * can also help high bandwidth if the dma and irq loads don't + * jump until after the queue is primed. + */ + else { + int done = 0; + + base = now & ~0x07; + start = base + SCHEDULING_DELAY; + + /* find a uframe slot with enough bandwidth. + * Early uframes are more precious because full-speed + * iso IN transfers can't use late uframes, + * and therefore they should be allocated last. + */ + next = start; + start += period; + do { + start--; + /* check schedule: enough space? */ + if (stream->highspeed) { + if (itd_slot_ok(ehci, mod, start, + stream->usecs, period)) + done = 1; + } else { + if ((start % 8) >= 6) + continue; + if (sitd_slot_ok(ehci, mod, stream, + start, sched, period)) + done = 1; + } + } while (start > next && !done); + + /* no room in the schedule */ + if (!done) { + ehci_dbg(ehci, "iso sched full %p", urb); + status = -ENOSPC; + goto fail; + } + } + + /* Tried to schedule too far into the future? */ + if (unlikely(start - base + span - period >= mod)) { + ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n", + urb, start - base, span - period, mod); + status = -EFBIG; + goto fail; + } + + stream->next_uframe = start & (mod - 1); + + /* report high speed start in uframes; full speed, in frames */ + urb->start_frame = stream->next_uframe; + if (!stream->highspeed) + urb->start_frame >>= 3; + + /* Make sure scan_isoc() sees these */ + if (ehci->isoc_count == 0) + ehci->last_iso_frame = now >> 3; + return 0; + + fail: + iso_sched_free(stream, sched); + urb->hcpriv = NULL; + return status; +} + +/*-------------------------------------------------------------------------*/ + +static inline void +itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream, + struct ehci_itd *itd) +{ + int i; + + /* it's been recently zeroed */ + itd->hw_next = EHCI_LIST_END(ehci); + itd->hw_bufp [0] = stream->buf0; + itd->hw_bufp [1] = stream->buf1; + itd->hw_bufp [2] = stream->buf2; + + for (i = 0; i < 8; i++) + itd->index[i] = -1; + + /* All other fields are filled when scheduling */ +} + +static inline void +itd_patch( + struct ehci_hcd *ehci, + struct ehci_itd *itd, + struct ehci_iso_sched *iso_sched, + unsigned index, + u16 uframe +) +{ + struct ehci_iso_packet *uf = &iso_sched->packet [index]; + unsigned pg = itd->pg; + + // BUG_ON (pg == 6 && uf->cross); + + uframe &= 0x07; + itd->index [uframe] = index; + + itd->hw_transaction[uframe] = uf->transaction; + itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12); + itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0); + itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32)); + + /* iso_frame_desc[].offset must be strictly increasing */ + if (unlikely (uf->cross)) { + u64 bufp = uf->bufp + 4096; + + itd->pg = ++pg; + itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0); + itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32)); + } +} + +static inline void +itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) +{ + union ehci_shadow *prev = &ehci->pshadow[frame]; + __hc32 *hw_p = &ehci->periodic[frame]; + union ehci_shadow here = *prev; + __hc32 type = 0; + + /* skip any iso nodes which might belong to previous microframes */ + while (here.ptr) { + type = Q_NEXT_TYPE(ehci, *hw_p); + if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) + break; + prev = periodic_next_shadow(ehci, prev, type); + hw_p = shadow_next_periodic(ehci, &here, type); + here = *prev; + } + + itd->itd_next = here; + itd->hw_next = *hw_p; + prev->itd = itd; + itd->frame = frame; + wmb (); + *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); +} + +/* fit urb's itds into the selected schedule slot; activate as needed */ +static void itd_link_urb( + struct ehci_hcd *ehci, + struct urb *urb, + unsigned mod, + struct ehci_iso_stream *stream +) +{ + int packet; + unsigned next_uframe, uframe, frame; + struct ehci_iso_sched *iso_sched = urb->hcpriv; + struct ehci_itd *itd; + + next_uframe = stream->next_uframe & (mod - 1); + + if (unlikely (list_empty(&stream->td_list))) { + ehci_to_hcd(ehci)->self.bandwidth_allocated + += stream->bandwidth; + ehci_vdbg (ehci, + "schedule devp %s ep%d%s-iso period %d start %d.%d\n", + urb->dev->devpath, stream->bEndpointAddress & 0x0f, + (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", + urb->interval, + next_uframe >> 3, next_uframe & 0x7); + } + + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_pll_fix == 1) + usb_amd_quirk_pll_disable(); + } + + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; + + /* fill iTDs uframe by uframe */ + for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) { + if (itd == NULL) { + /* ASSERT: we have all necessary itds */ + // BUG_ON (list_empty (&iso_sched->td_list)); + + /* ASSERT: no itds for this endpoint in this uframe */ + + itd = list_entry (iso_sched->td_list.next, + struct ehci_itd, itd_list); + list_move_tail (&itd->itd_list, &stream->td_list); + itd->stream = stream; + itd->urb = urb; + itd_init (ehci, stream, itd); + } + + uframe = next_uframe & 0x07; + frame = next_uframe >> 3; + + itd_patch(ehci, itd, iso_sched, packet, uframe); + + next_uframe += stream->interval; + next_uframe &= mod - 1; + packet++; + + /* link completed itds into the schedule */ + if (((next_uframe >> 3) != frame) + || packet == urb->number_of_packets) { + itd_link(ehci, frame & (ehci->periodic_size - 1), itd); + itd = NULL; + } + } + stream->next_uframe = next_uframe; + + /* don't need that schedule data any more */ + iso_sched_free (stream, iso_sched); + urb->hcpriv = stream; + + ++ehci->isoc_count; + enable_periodic(ehci); +} + +#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) + +/* Process and recycle a completed ITD. Return true iff its urb completed, + * and hence its completion callback probably added things to the hardware + * schedule. + * + * Note that we carefully avoid recycling this descriptor until after any + * completion callback runs, so that it won't be reused quickly. That is, + * assuming (a) no more than two urbs per frame on this endpoint, and also + * (b) only this endpoint's completions submit URBs. It seems some silicon + * corrupts things if you reuse completed descriptors very quickly... + */ +static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd) +{ + struct urb *urb = itd->urb; + struct usb_iso_packet_descriptor *desc; + u32 t; + unsigned uframe; + int urb_index = -1; + struct ehci_iso_stream *stream = itd->stream; + struct usb_device *dev; + bool retval = false; + + /* for each uframe with a packet */ + for (uframe = 0; uframe < 8; uframe++) { + if (likely (itd->index[uframe] == -1)) + continue; + urb_index = itd->index[uframe]; + desc = &urb->iso_frame_desc [urb_index]; + + t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]); + itd->hw_transaction [uframe] = 0; + + /* report transfer status */ + if (unlikely (t & ISO_ERRS)) { + urb->error_count++; + if (t & EHCI_ISOC_BUF_ERR) + desc->status = usb_pipein (urb->pipe) + ? -ENOSR /* hc couldn't read */ + : -ECOMM; /* hc couldn't write */ + else if (t & EHCI_ISOC_BABBLE) + desc->status = -EOVERFLOW; + else /* (t & EHCI_ISOC_XACTERR) */ + desc->status = -EPROTO; + + /* HC need not update length with this error */ + if (!(t & EHCI_ISOC_BABBLE)) { + desc->actual_length = EHCI_ITD_LENGTH(t); + urb->actual_length += desc->actual_length; + } + } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) { + desc->status = 0; + desc->actual_length = EHCI_ITD_LENGTH(t); + urb->actual_length += desc->actual_length; + } else { + /* URB was too late */ + urb->error_count++; + } + } + + /* handle completion now? */ + if (likely ((urb_index + 1) != urb->number_of_packets)) + goto done; + + /* ASSERT: it's really the last itd for this urb + list_for_each_entry (itd, &stream->td_list, itd_list) + BUG_ON (itd->urb == urb); + */ + + /* give urb back to the driver; completion often (re)submits */ + dev = urb->dev; + ehci_urb_done(ehci, urb, 0); + retval = true; + urb = NULL; + + --ehci->isoc_count; + disable_periodic(ehci); + + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_pll_fix == 1) + usb_amd_quirk_pll_enable(); + } + + if (unlikely(list_is_singular(&stream->td_list))) { + ehci_to_hcd(ehci)->self.bandwidth_allocated + -= stream->bandwidth; + ehci_vdbg (ehci, + "deschedule devp %s ep%d%s-iso\n", + dev->devpath, stream->bEndpointAddress & 0x0f, + (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); + } + +done: + itd->urb = NULL; + + /* Add to the end of the free list for later reuse */ + list_move_tail(&itd->itd_list, &stream->free_list); + + /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */ + if (list_empty(&stream->td_list)) { + list_splice_tail_init(&stream->free_list, + &ehci->cached_itd_list); + start_free_itds(ehci); + } + + return retval; +} + +/*-------------------------------------------------------------------------*/ + +static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, + gfp_t mem_flags) +{ + int status = -EINVAL; + unsigned long flags; + struct ehci_iso_stream *stream; + + /* Get iso_stream head */ + stream = iso_stream_find (ehci, urb); + if (unlikely (stream == NULL)) { + ehci_dbg (ehci, "can't get iso stream\n"); + return -ENOMEM; + } + if (unlikely (urb->interval != stream->interval)) { + ehci_dbg (ehci, "can't change iso interval %d --> %d\n", + stream->interval, urb->interval); + goto done; + } + +#ifdef EHCI_URB_TRACE + ehci_dbg (ehci, + "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n", + __func__, urb->dev->devpath, urb, + usb_pipeendpoint (urb->pipe), + usb_pipein (urb->pipe) ? "in" : "out", + urb->transfer_buffer_length, + urb->number_of_packets, urb->interval, + stream); +#endif + + /* allocate ITDs w/o locking anything */ + status = itd_urb_transaction (stream, ehci, urb, mem_flags); + if (unlikely (status < 0)) { + ehci_dbg (ehci, "can't init itds\n"); + goto done; + } + + /* schedule ... need to lock */ + spin_lock_irqsave (&ehci->lock, flags); + if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { + status = -ESHUTDOWN; + goto done_not_linked; + } + status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); + if (unlikely(status)) + goto done_not_linked; + status = iso_stream_schedule(ehci, urb, stream); + if (likely (status == 0)) + itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); + else + usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); + done_not_linked: + spin_unlock_irqrestore (&ehci->lock, flags); + done: + return status; +} + +/*-------------------------------------------------------------------------*/ + +/* + * "Split ISO TDs" ... used for USB 1.1 devices going through the + * TTs in USB 2.0 hubs. These need microframe scheduling. + */ + +static inline void +sitd_sched_init( + struct ehci_hcd *ehci, + struct ehci_iso_sched *iso_sched, + struct ehci_iso_stream *stream, + struct urb *urb +) +{ + unsigned i; + dma_addr_t dma = urb->transfer_dma; + + /* how many frames are needed for these transfers */ + iso_sched->span = urb->number_of_packets * stream->interval; + + /* figure out per-frame sitd fields that we'll need later + * when we fit new sitds into the schedule. + */ + for (i = 0; i < urb->number_of_packets; i++) { + struct ehci_iso_packet *packet = &iso_sched->packet [i]; + unsigned length; + dma_addr_t buf; + u32 trans; + + length = urb->iso_frame_desc [i].length & 0x03ff; + buf = dma + urb->iso_frame_desc [i].offset; + + trans = SITD_STS_ACTIVE; + if (((i + 1) == urb->number_of_packets) + && !(urb->transfer_flags & URB_NO_INTERRUPT)) + trans |= SITD_IOC; + trans |= length << 16; + packet->transaction = cpu_to_hc32(ehci, trans); + + /* might need to cross a buffer page within a td */ + packet->bufp = buf; + packet->buf1 = (buf + length) & ~0x0fff; + if (packet->buf1 != (buf & ~(u64)0x0fff)) + packet->cross = 1; + + /* OUT uses multiple start-splits */ + if (stream->bEndpointAddress & USB_DIR_IN) + continue; + length = (length + 187) / 188; + if (length > 1) /* BEGIN vs ALL */ + length |= 1 << 3; + packet->buf1 |= length; + } +} + +static int +sitd_urb_transaction ( + struct ehci_iso_stream *stream, + struct ehci_hcd *ehci, + struct urb *urb, + gfp_t mem_flags +) +{ + struct ehci_sitd *sitd; + dma_addr_t sitd_dma; + int i; + struct ehci_iso_sched *iso_sched; + unsigned long flags; + + iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags); + if (iso_sched == NULL) + return -ENOMEM; + + sitd_sched_init(ehci, iso_sched, stream, urb); + + /* allocate/init sITDs */ + spin_lock_irqsave (&ehci->lock, flags); + for (i = 0; i < urb->number_of_packets; i++) { + + /* NOTE: for now, we don't try to handle wraparound cases + * for IN (using sitd->hw_backpointer, like a FSTN), which + * means we never need two sitds for full speed packets. + */ + + /* + * Use siTDs from the free list, but not siTDs that may + * still be in use by the hardware. + */ + if (likely(!list_empty(&stream->free_list))) { + sitd = list_first_entry(&stream->free_list, + struct ehci_sitd, sitd_list); + if (sitd->frame == ehci->now_frame) + goto alloc_sitd; + list_del (&sitd->sitd_list); + sitd_dma = sitd->sitd_dma; + } else { + alloc_sitd: + spin_unlock_irqrestore (&ehci->lock, flags); + sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags, + &sitd_dma); + spin_lock_irqsave (&ehci->lock, flags); + if (!sitd) { + iso_sched_free(stream, iso_sched); + spin_unlock_irqrestore(&ehci->lock, flags); + return -ENOMEM; + } + } + + memset (sitd, 0, sizeof *sitd); + sitd->sitd_dma = sitd_dma; + sitd->frame = 9999; /* an invalid value */ + list_add (&sitd->sitd_list, &iso_sched->td_list); + } + + /* temporarily store schedule info in hcpriv */ + urb->hcpriv = iso_sched; + urb->error_count = 0; + + spin_unlock_irqrestore (&ehci->lock, flags); + return 0; +} + +/*-------------------------------------------------------------------------*/ + +static inline void +sitd_patch( + struct ehci_hcd *ehci, + struct ehci_iso_stream *stream, + struct ehci_sitd *sitd, + struct ehci_iso_sched *iso_sched, + unsigned index +) +{ + struct ehci_iso_packet *uf = &iso_sched->packet [index]; + u64 bufp = uf->bufp; + + sitd->hw_next = EHCI_LIST_END(ehci); + sitd->hw_fullspeed_ep = stream->address; + sitd->hw_uframe = stream->splits; + sitd->hw_results = uf->transaction; + sitd->hw_backpointer = EHCI_LIST_END(ehci); + + bufp = uf->bufp; + sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp); + sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32); + + sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1); + if (uf->cross) + bufp += 4096; + sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32); + sitd->index = index; +} + +static inline void +sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd) +{ + /* note: sitd ordering could matter (CSPLIT then SSPLIT) */ + sitd->sitd_next = ehci->pshadow [frame]; + sitd->hw_next = ehci->periodic [frame]; + ehci->pshadow [frame].sitd = sitd; + sitd->frame = frame; + wmb (); + ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD); +} + +/* fit urb's sitds into the selected schedule slot; activate as needed */ +static void sitd_link_urb( + struct ehci_hcd *ehci, + struct urb *urb, + unsigned mod, + struct ehci_iso_stream *stream +) +{ + int packet; + unsigned next_uframe; + struct ehci_iso_sched *sched = urb->hcpriv; + struct ehci_sitd *sitd; + + next_uframe = stream->next_uframe; + + if (list_empty(&stream->td_list)) { + /* usbfs ignores TT bandwidth */ + ehci_to_hcd(ehci)->self.bandwidth_allocated + += stream->bandwidth; + ehci_vdbg (ehci, + "sched devp %s ep%d%s-iso [%d] %dms/%04x\n", + urb->dev->devpath, stream->bEndpointAddress & 0x0f, + (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", + (next_uframe >> 3) & (ehci->periodic_size - 1), + stream->interval, hc32_to_cpu(ehci, stream->splits)); + } + + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_pll_fix == 1) + usb_amd_quirk_pll_disable(); + } + + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; + + /* fill sITDs frame by frame */ + for (packet = 0, sitd = NULL; + packet < urb->number_of_packets; + packet++) { + + /* ASSERT: we have all necessary sitds */ + BUG_ON (list_empty (&sched->td_list)); + + /* ASSERT: no itds for this endpoint in this frame */ + + sitd = list_entry (sched->td_list.next, + struct ehci_sitd, sitd_list); + list_move_tail (&sitd->sitd_list, &stream->td_list); + sitd->stream = stream; + sitd->urb = urb; + + sitd_patch(ehci, stream, sitd, sched, packet); + sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1), + sitd); + + next_uframe += stream->interval << 3; + } + stream->next_uframe = next_uframe & (mod - 1); + + /* don't need that schedule data any more */ + iso_sched_free (stream, sched); + urb->hcpriv = stream; + + ++ehci->isoc_count; + enable_periodic(ehci); +} + +/*-------------------------------------------------------------------------*/ + +#define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \ + | SITD_STS_XACT | SITD_STS_MMF) + +/* Process and recycle a completed SITD. Return true iff its urb completed, + * and hence its completion callback probably added things to the hardware + * schedule. + * + * Note that we carefully avoid recycling this descriptor until after any + * completion callback runs, so that it won't be reused quickly. That is, + * assuming (a) no more than two urbs per frame on this endpoint, and also + * (b) only this endpoint's completions submit URBs. It seems some silicon + * corrupts things if you reuse completed descriptors very quickly... + */ +static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd) +{ + struct urb *urb = sitd->urb; + struct usb_iso_packet_descriptor *desc; + u32 t; + int urb_index = -1; + struct ehci_iso_stream *stream = sitd->stream; + struct usb_device *dev; + bool retval = false; + + urb_index = sitd->index; + desc = &urb->iso_frame_desc [urb_index]; + t = hc32_to_cpup(ehci, &sitd->hw_results); + + /* report transfer status */ + if (unlikely(t & SITD_ERRS)) { + urb->error_count++; + if (t & SITD_STS_DBE) + desc->status = usb_pipein (urb->pipe) + ? -ENOSR /* hc couldn't read */ + : -ECOMM; /* hc couldn't write */ + else if (t & SITD_STS_BABBLE) + desc->status = -EOVERFLOW; + else /* XACT, MMF, etc */ + desc->status = -EPROTO; + } else if (unlikely(t & SITD_STS_ACTIVE)) { + /* URB was too late */ + urb->error_count++; + } else { + desc->status = 0; + desc->actual_length = desc->length - SITD_LENGTH(t); + urb->actual_length += desc->actual_length; + } + + /* handle completion now? */ + if ((urb_index + 1) != urb->number_of_packets) + goto done; + + /* ASSERT: it's really the last sitd for this urb + list_for_each_entry (sitd, &stream->td_list, sitd_list) + BUG_ON (sitd->urb == urb); + */ + + /* give urb back to the driver; completion often (re)submits */ + dev = urb->dev; + ehci_urb_done(ehci, urb, 0); + retval = true; + urb = NULL; + + --ehci->isoc_count; + disable_periodic(ehci); + + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_pll_fix == 1) + usb_amd_quirk_pll_enable(); + } + + if (list_is_singular(&stream->td_list)) { + ehci_to_hcd(ehci)->self.bandwidth_allocated + -= stream->bandwidth; + ehci_vdbg (ehci, + "deschedule devp %s ep%d%s-iso\n", + dev->devpath, stream->bEndpointAddress & 0x0f, + (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); + } + +done: + sitd->urb = NULL; + + /* Add to the end of the free list for later reuse */ + list_move_tail(&sitd->sitd_list, &stream->free_list); + + /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */ + if (list_empty(&stream->td_list)) { + list_splice_tail_init(&stream->free_list, + &ehci->cached_sitd_list); + start_free_itds(ehci); + } + + return retval; +} + + +static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, + gfp_t mem_flags) +{ + int status = -EINVAL; + unsigned long flags; + struct ehci_iso_stream *stream; + + /* Get iso_stream head */ + stream = iso_stream_find (ehci, urb); + if (stream == NULL) { + ehci_dbg (ehci, "can't get iso stream\n"); + return -ENOMEM; + } + if (urb->interval != stream->interval) { + ehci_dbg (ehci, "can't change iso interval %d --> %d\n", + stream->interval, urb->interval); + goto done; + } + +#ifdef EHCI_URB_TRACE + ehci_dbg (ehci, + "submit %p dev%s ep%d%s-iso len %d\n", + urb, urb->dev->devpath, + usb_pipeendpoint (urb->pipe), + usb_pipein (urb->pipe) ? "in" : "out", + urb->transfer_buffer_length); +#endif + + /* allocate SITDs */ + status = sitd_urb_transaction (stream, ehci, urb, mem_flags); + if (status < 0) { + ehci_dbg (ehci, "can't init sitds\n"); + goto done; + } + + /* schedule ... need to lock */ + spin_lock_irqsave (&ehci->lock, flags); + if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { + status = -ESHUTDOWN; + goto done_not_linked; + } + status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); + if (unlikely(status)) + goto done_not_linked; + status = iso_stream_schedule(ehci, urb, stream); + if (status == 0) + sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); + else + usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); + done_not_linked: + spin_unlock_irqrestore (&ehci->lock, flags); + done: + return status; +} + +/*-------------------------------------------------------------------------*/ + +static void scan_isoc(struct ehci_hcd *ehci) +{ + unsigned uf, now_frame, frame; + unsigned fmask = ehci->periodic_size - 1; + bool modified, live; + + /* + * When running, scan from last scan point up to "now" + * else clean up by scanning everything that's left. + * Touches as few pages as possible: cache-friendly. + */ + if (ehci->rh_state >= EHCI_RH_RUNNING) { + uf = ehci_read_frame_index(ehci); + now_frame = (uf >> 3) & fmask; + live = true; + } else { + now_frame = (ehci->last_iso_frame - 1) & fmask; + live = false; + } + ehci->now_frame = now_frame; + + frame = ehci->last_iso_frame; + for (;;) { + union ehci_shadow q, *q_p; + __hc32 type, *hw_p; + +restart: + /* scan each element in frame's queue for completions */ + q_p = &ehci->pshadow [frame]; + hw_p = &ehci->periodic [frame]; + q.ptr = q_p->ptr; + type = Q_NEXT_TYPE(ehci, *hw_p); + modified = false; + + while (q.ptr != NULL) { + switch (hc32_to_cpu(ehci, type)) { + case Q_TYPE_ITD: + /* If this ITD is still active, leave it for + * later processing ... check the next entry. + * No need to check for activity unless the + * frame is current. + */ + if (frame == now_frame && live) { + rmb(); + for (uf = 0; uf < 8; uf++) { + if (q.itd->hw_transaction[uf] & + ITD_ACTIVE(ehci)) + break; + } + if (uf < 8) { + q_p = &q.itd->itd_next; + hw_p = &q.itd->hw_next; + type = Q_NEXT_TYPE(ehci, + q.itd->hw_next); + q = *q_p; + break; + } + } + + /* Take finished ITDs out of the schedule + * and process them: recycle, maybe report + * URB completion. HC won't cache the + * pointer for much longer, if at all. + */ + *q_p = q.itd->itd_next; + if (!ehci->use_dummy_qh || + q.itd->hw_next != EHCI_LIST_END(ehci)) + *hw_p = q.itd->hw_next; + else + *hw_p = ehci->dummy->qh_dma; + type = Q_NEXT_TYPE(ehci, q.itd->hw_next); + wmb(); + modified = itd_complete (ehci, q.itd); + q = *q_p; + break; + case Q_TYPE_SITD: + /* If this SITD is still active, leave it for + * later processing ... check the next entry. + * No need to check for activity unless the + * frame is current. + */ + if (((frame == now_frame) || + (((frame + 1) & fmask) == now_frame)) + && live + && (q.sitd->hw_results & + SITD_ACTIVE(ehci))) { + + q_p = &q.sitd->sitd_next; + hw_p = &q.sitd->hw_next; + type = Q_NEXT_TYPE(ehci, + q.sitd->hw_next); + q = *q_p; + break; + } + + /* Take finished SITDs out of the schedule + * and process them: recycle, maybe report + * URB completion. + */ + *q_p = q.sitd->sitd_next; + if (!ehci->use_dummy_qh || + q.sitd->hw_next != EHCI_LIST_END(ehci)) + *hw_p = q.sitd->hw_next; + else + *hw_p = ehci->dummy->qh_dma; + type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); + wmb(); + modified = sitd_complete (ehci, q.sitd); + q = *q_p; + break; + default: + ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n", + type, frame, q.ptr); + // BUG (); + /* FALL THROUGH */ + case Q_TYPE_QH: + case Q_TYPE_FSTN: + /* End of the iTDs and siTDs */ + q.ptr = NULL; + break; + } + + /* assume completion callbacks modify the queue */ + if (unlikely(modified && ehci->isoc_count > 0)) + goto restart; + } + + /* Stop when we have reached the current frame */ + if (frame == now_frame) + break; + + /* The last frame may still have active siTDs */ + ehci->last_iso_frame = frame; + frame = (frame + 1) & fmask; + } +} diff --git a/addons/ehci-pci/src/3.10.108/ehci-sysfs.c b/addons/ehci-pci/src/3.10.108/ehci-sysfs.c new file mode 100644 index 00000000..06590242 --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/ehci-sysfs.c @@ -0,0 +1,190 @@ +/* + * Copyright (C) 2007 by Alan Stern + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of ehci-hcd.c */ + + +/* Display the ports dedicated to the companion controller */ +static ssize_t show_companion(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ehci_hcd *ehci; + int nports, index, n; + int count = PAGE_SIZE; + char *ptr = buf; + + ehci = hcd_to_ehci(dev_get_drvdata(dev)); + nports = HCS_N_PORTS(ehci->hcs_params); + + for (index = 0; index < nports; ++index) { + if (test_bit(index, &ehci->companion_ports)) { + n = scnprintf(ptr, count, "%d\n", index + 1); + ptr += n; + count -= n; + } + } + return ptr - buf; +} + +/* + * Dedicate or undedicate a port to the companion controller. + * Syntax is "[-]portnum", where a leading '-' sign means + * return control of the port to the EHCI controller. + */ +static ssize_t store_companion(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ehci_hcd *ehci; + int portnum, new_owner; + + ehci = hcd_to_ehci(dev_get_drvdata(dev)); + new_owner = PORT_OWNER; /* Owned by companion */ + if (sscanf(buf, "%d", &portnum) != 1) + return -EINVAL; + if (portnum < 0) { + portnum = - portnum; + new_owner = 0; /* Owned by EHCI */ + } + if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params)) + return -ENOENT; + portnum--; + if (new_owner) + set_bit(portnum, &ehci->companion_ports); + else + clear_bit(portnum, &ehci->companion_ports); + set_owner(ehci, portnum, new_owner); + return count; +} +static DEVICE_ATTR(companion, 0644, show_companion, store_companion); + + +/* + * Display / Set uframe_periodic_max + */ +static ssize_t show_uframe_periodic_max(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ehci_hcd *ehci; + int n; + + ehci = hcd_to_ehci(dev_get_drvdata(dev)); + n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max); + return n; +} + + +static ssize_t store_uframe_periodic_max(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ehci_hcd *ehci; + unsigned uframe_periodic_max; + unsigned frame, uframe; + unsigned short allocated_max; + unsigned long flags; + ssize_t ret; + + ehci = hcd_to_ehci(dev_get_drvdata(dev)); + if (kstrtouint(buf, 0, &uframe_periodic_max) < 0) + return -EINVAL; + + if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) { + ehci_info(ehci, "rejecting invalid request for " + "uframe_periodic_max=%u\n", uframe_periodic_max); + return -EINVAL; + } + + ret = -EINVAL; + + /* + * lock, so that our checking does not race with possible periodic + * bandwidth allocation through submitting new urbs. + */ + spin_lock_irqsave (&ehci->lock, flags); + + /* + * for request to decrease max periodic bandwidth, we have to check + * every microframe in the schedule to see whether the decrease is + * possible. + */ + if (uframe_periodic_max < ehci->uframe_periodic_max) { + allocated_max = 0; + + for (frame = 0; frame < ehci->periodic_size; ++frame) + for (uframe = 0; uframe < 7; ++uframe) + allocated_max = max(allocated_max, + periodic_usecs (ehci, frame, uframe)); + + if (allocated_max > uframe_periodic_max) { + ehci_info(ehci, + "cannot decrease uframe_periodic_max becase " + "periodic bandwidth is already allocated " + "(%u > %u)\n", + allocated_max, uframe_periodic_max); + goto out_unlock; + } + } + + /* increasing is always ok */ + + ehci_info(ehci, "setting max periodic bandwidth to %u%% " + "(== %u usec/uframe)\n", + 100*uframe_periodic_max/125, uframe_periodic_max); + + if (uframe_periodic_max != 100) + ehci_warn(ehci, "max periodic bandwidth set is non-standard\n"); + + ehci->uframe_periodic_max = uframe_periodic_max; + ret = count; + +out_unlock: + spin_unlock_irqrestore (&ehci->lock, flags); + return ret; +} +static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, store_uframe_periodic_max); + + +static inline int create_sysfs_files(struct ehci_hcd *ehci) +{ + struct device *controller = ehci_to_hcd(ehci)->self.controller; + int i = 0; + + /* with integrated TT there is no companion! */ + if (!ehci_is_TDI(ehci)) + i = device_create_file(controller, &dev_attr_companion); + if (i) + goto out; + + i = device_create_file(controller, &dev_attr_uframe_periodic_max); +out: + return i; +} + +static inline void remove_sysfs_files(struct ehci_hcd *ehci) +{ + struct device *controller = ehci_to_hcd(ehci)->self.controller; + + /* with integrated TT there is no companion! */ + if (!ehci_is_TDI(ehci)) + device_remove_file(controller, &dev_attr_companion); + + device_remove_file(controller, &dev_attr_uframe_periodic_max); +} diff --git a/addons/ehci-pci/src/3.10.108/ehci-timer.c b/addons/ehci-pci/src/3.10.108/ehci-timer.c new file mode 100644 index 00000000..11e5b32f --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/ehci-timer.c @@ -0,0 +1,401 @@ +/* + * Copyright (C) 2012 by Alan Stern + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +/* This file is part of ehci-hcd.c */ + +/*-------------------------------------------------------------------------*/ + +/* Set a bit in the USBCMD register */ +static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit) +{ + ehci->command |= bit; + ehci_writel(ehci, ehci->command, &ehci->regs->command); + + /* unblock posted write */ + ehci_readl(ehci, &ehci->regs->command); +} + +/* Clear a bit in the USBCMD register */ +static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit) +{ + ehci->command &= ~bit; + ehci_writel(ehci, ehci->command, &ehci->regs->command); + + /* unblock posted write */ + ehci_readl(ehci, &ehci->regs->command); +} + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI timer support... Now using hrtimers. + * + * Lots of different events are triggered from ehci->hrtimer. Whenever + * the timer routine runs, it checks each possible event; events that are + * currently enabled and whose expiration time has passed get handled. + * The set of enabled events is stored as a collection of bitflags in + * ehci->enabled_hrtimer_events, and they are numbered in order of + * increasing delay values (ranging between 1 ms and 100 ms). + * + * Rather than implementing a sorted list or tree of all pending events, + * we keep track only of the lowest-numbered pending event, in + * ehci->next_hrtimer_event. Whenever ehci->hrtimer gets restarted, its + * expiration time is set to the timeout value for this event. + * + * As a result, events might not get handled right away; the actual delay + * could be anywhere up to twice the requested delay. This doesn't + * matter, because none of the events are especially time-critical. The + * ones that matter most all have a delay of 1 ms, so they will be + * handled after 2 ms at most, which is okay. In addition to this, we + * allow for an expiration range of 1 ms. + */ + +/* + * Delay lengths for the hrtimer event types. + * Keep this list sorted by delay length, in the same order as + * the event types indexed by enum ehci_hrtimer_event in ehci.h. + */ +static unsigned event_delays_ns[] = { + 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_ASS */ + 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_PSS */ + 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_DEAD */ + 1125 * NSEC_PER_USEC, /* EHCI_HRTIMER_UNLINK_INTR */ + 2 * NSEC_PER_MSEC, /* EHCI_HRTIMER_FREE_ITDS */ + 6 * NSEC_PER_MSEC, /* EHCI_HRTIMER_ASYNC_UNLINKS */ + 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IAA_WATCHDOG */ + 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_PERIODIC */ + 15 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_ASYNC */ + 100 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IO_WATCHDOG */ +}; + +/* Enable a pending hrtimer event */ +static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event, + bool resched) +{ + ktime_t *timeout = &ehci->hr_timeouts[event]; + + if (resched) + *timeout = ktime_add(ktime_get(), + ktime_set(0, event_delays_ns[event])); + ehci->enabled_hrtimer_events |= (1 << event); + + /* Track only the lowest-numbered pending event */ + if (event < ehci->next_hrtimer_event) { + ehci->next_hrtimer_event = event; + hrtimer_start_range_ns(&ehci->hrtimer, *timeout, + NSEC_PER_MSEC, HRTIMER_MODE_ABS); + } +} + + +/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */ +static void ehci_poll_ASS(struct ehci_hcd *ehci) +{ + unsigned actual, want; + + /* Don't enable anything if the controller isn't running (e.g., died) */ + if (ehci->rh_state != EHCI_RH_RUNNING) + return; + + want = (ehci->command & CMD_ASE) ? STS_ASS : 0; + actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS; + + if (want != actual) { + + /* Poll again later, but give up after about 2-4 ms */ + if (ehci->ASS_poll_count++ < 2) { + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); + return; + } + ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n", + want, actual); + } + ehci->ASS_poll_count = 0; + + /* The status is up-to-date; restart or stop the schedule as needed */ + if (want == 0) { /* Stopped */ + if (ehci->async_count > 0) + ehci_set_command_bit(ehci, CMD_ASE); + + } else { /* Running */ + if (ehci->async_count == 0) { + + /* Turn off the schedule after a while */ + ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC, + true); + } + } +} + +/* Turn off the async schedule after a brief delay */ +static void ehci_disable_ASE(struct ehci_hcd *ehci) +{ + ehci_clear_command_bit(ehci, CMD_ASE); +} + + +/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */ +static void ehci_poll_PSS(struct ehci_hcd *ehci) +{ + unsigned actual, want; + + /* Don't do anything if the controller isn't running (e.g., died) */ + if (ehci->rh_state != EHCI_RH_RUNNING) + return; + + want = (ehci->command & CMD_PSE) ? STS_PSS : 0; + actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS; + + if (want != actual) { + + /* Poll again later, but give up after about 2-4 ms */ + if (ehci->PSS_poll_count++ < 2) { + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); + return; + } + ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n", + want, actual); + } + ehci->PSS_poll_count = 0; + + /* The status is up-to-date; restart or stop the schedule as needed */ + if (want == 0) { /* Stopped */ + if (ehci->periodic_count > 0) + ehci_set_command_bit(ehci, CMD_PSE); + + } else { /* Running */ + if (ehci->periodic_count == 0) { + + /* Turn off the schedule after a while */ + ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC, + true); + } + } +} + +/* Turn off the periodic schedule after a brief delay */ +static void ehci_disable_PSE(struct ehci_hcd *ehci) +{ + ehci_clear_command_bit(ehci, CMD_PSE); +} + + +/* Poll the STS_HALT status bit; see when a dead controller stops */ +static void ehci_handle_controller_death(struct ehci_hcd *ehci) +{ + if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) { + + /* Give up after a few milliseconds */ + if (ehci->died_poll_count++ < 5) { + /* Try again later */ + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true); + return; + } + ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n"); + } + + /* Clean up the mess */ + ehci->rh_state = EHCI_RH_HALTED; + ehci_writel(ehci, 0, &ehci->regs->configured_flag); + ehci_writel(ehci, 0, &ehci->regs->intr_enable); + ehci_work(ehci); + end_unlink_async(ehci); + + /* Not in process context, so don't try to reset the controller */ +} + + +/* Handle unlinked interrupt QHs once they are gone from the hardware */ +static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci) +{ + bool stopped = (ehci->rh_state < EHCI_RH_RUNNING); + + /* + * Process all the QHs on the intr_unlink list that were added + * before the current unlink cycle began. The list is in + * temporal order, so stop when we reach the first entry in the + * current cycle. But if the root hub isn't running then + * process all the QHs on the list. + */ + ehci->intr_unlinking = true; + while (!list_empty(&ehci->intr_unlink)) { + struct ehci_qh *qh; + + qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh, + unlink_node); + if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle) + break; + list_del(&qh->unlink_node); + end_unlink_intr(ehci, qh); + } + + /* Handle remaining entries later */ + if (!list_empty(&ehci->intr_unlink)) { + ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true); + ++ehci->intr_unlink_cycle; + } + ehci->intr_unlinking = false; +} + + +/* Start another free-iTDs/siTDs cycle */ +static void start_free_itds(struct ehci_hcd *ehci) +{ + if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) { + ehci->last_itd_to_free = list_entry( + ehci->cached_itd_list.prev, + struct ehci_itd, itd_list); + ehci->last_sitd_to_free = list_entry( + ehci->cached_sitd_list.prev, + struct ehci_sitd, sitd_list); + ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true); + } +} + +/* Wait for controller to stop using old iTDs and siTDs */ +static void end_free_itds(struct ehci_hcd *ehci) +{ + struct ehci_itd *itd, *n; + struct ehci_sitd *sitd, *sn; + + if (ehci->rh_state < EHCI_RH_RUNNING) { + ehci->last_itd_to_free = NULL; + ehci->last_sitd_to_free = NULL; + } + + list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { + list_del(&itd->itd_list); + dma_pool_free(ehci->itd_pool, itd, itd->itd_dma); + if (itd == ehci->last_itd_to_free) + break; + } + list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) { + list_del(&sitd->sitd_list); + dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma); + if (sitd == ehci->last_sitd_to_free) + break; + } + + if (!list_empty(&ehci->cached_itd_list) || + !list_empty(&ehci->cached_sitd_list)) + start_free_itds(ehci); +} + + +/* Handle lost (or very late) IAA interrupts */ +static void ehci_iaa_watchdog(struct ehci_hcd *ehci) +{ + u32 cmd, status; + + /* + * Lost IAA irqs wedge things badly; seen first with a vt8235. + * So we need this watchdog, but must protect it against both + * (a) SMP races against real IAA firing and retriggering, and + * (b) clean HC shutdown, when IAA watchdog was pending. + */ + if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING) + return; + + /* If we get here, IAA is *REALLY* late. It's barely + * conceivable that the system is so busy that CMD_IAAD + * is still legitimately set, so let's be sure it's + * clear before we read STS_IAA. (The HC should clear + * CMD_IAAD when it sets STS_IAA.) + */ + cmd = ehci_readl(ehci, &ehci->regs->command); + + /* + * If IAA is set here it either legitimately triggered + * after the watchdog timer expired (_way_ late, so we'll + * still count it as lost) ... or a silicon erratum: + * - VIA seems to set IAA without triggering the IRQ; + * - IAAD potentially cleared without setting IAA. + */ + status = ehci_readl(ehci, &ehci->regs->status); + if ((status & STS_IAA) || !(cmd & CMD_IAAD)) { + COUNT(ehci->stats.lost_iaa); + ehci_writel(ehci, STS_IAA, &ehci->regs->status); + } + + ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd); + end_unlink_async(ehci); +} + + +/* Enable the I/O watchdog, if appropriate */ +static void turn_on_io_watchdog(struct ehci_hcd *ehci) +{ + /* Not needed if the controller isn't running or it's already enabled */ + if (ehci->rh_state != EHCI_RH_RUNNING || + (ehci->enabled_hrtimer_events & + BIT(EHCI_HRTIMER_IO_WATCHDOG))) + return; + + /* + * Isochronous transfers always need the watchdog. + * For other sorts we use it only if the flag is set. + */ + if (ehci->isoc_count > 0 || (ehci->need_io_watchdog && + ehci->async_count + ehci->intr_count > 0)) + ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true); +} + + +/* + * Handler functions for the hrtimer event types. + * Keep this array in the same order as the event types indexed by + * enum ehci_hrtimer_event in ehci.h. + */ +static void (*event_handlers[])(struct ehci_hcd *) = { + ehci_poll_ASS, /* EHCI_HRTIMER_POLL_ASS */ + ehci_poll_PSS, /* EHCI_HRTIMER_POLL_PSS */ + ehci_handle_controller_death, /* EHCI_HRTIMER_POLL_DEAD */ + ehci_handle_intr_unlinks, /* EHCI_HRTIMER_UNLINK_INTR */ + end_free_itds, /* EHCI_HRTIMER_FREE_ITDS */ + unlink_empty_async, /* EHCI_HRTIMER_ASYNC_UNLINKS */ + ehci_iaa_watchdog, /* EHCI_HRTIMER_IAA_WATCHDOG */ + ehci_disable_PSE, /* EHCI_HRTIMER_DISABLE_PERIODIC */ + ehci_disable_ASE, /* EHCI_HRTIMER_DISABLE_ASYNC */ + ehci_work, /* EHCI_HRTIMER_IO_WATCHDOG */ +}; + +static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t) +{ + struct ehci_hcd *ehci = container_of(t, struct ehci_hcd, hrtimer); + ktime_t now; + unsigned long events; + unsigned long flags; + unsigned e; + + spin_lock_irqsave(&ehci->lock, flags); + + events = ehci->enabled_hrtimer_events; + ehci->enabled_hrtimer_events = 0; + ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT; + + /* + * Check each pending event. If its time has expired, handle + * the event; otherwise re-enable it. + */ + now = ktime_get(); + for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) { + if (now.tv64 >= ehci->hr_timeouts[e].tv64) + event_handlers[e](ehci); + else + ehci_enable_event(ehci, e, false); + } + + spin_unlock_irqrestore(&ehci->lock, flags); + return HRTIMER_NORESTART; +} diff --git a/addons/ehci-pci/src/3.10.108/ehci.h b/addons/ehci-pci/src/3.10.108/ehci.h new file mode 100644 index 00000000..03322d9c --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/ehci.h @@ -0,0 +1,825 @@ +/* + * Copyright (c) 2001-2002 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_EHCI_HCD_H +#define __LINUX_EHCI_HCD_H + +/* definitions used for the EHCI driver */ + +/* + * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to + * __leXX (normally) or __beXX (given EHCI_BIG_ENDIAN_DESC), depending on + * the host controller implementation. + * + * To facilitate the strongest possible byte-order checking from "sparse" + * and so on, we use __leXX unless that's not practical. + */ +#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_DESC +typedef __u32 __bitwise __hc32; +typedef __u16 __bitwise __hc16; +#else +#define __hc32 __le32 +#define __hc16 __le16 +#endif + +/* statistics can be kept for tuning/monitoring */ +#ifdef DEBUG +#define EHCI_STATS +#endif + +struct ehci_stats { + /* irq usage */ + unsigned long normal; + unsigned long error; + unsigned long iaa; + unsigned long lost_iaa; + + /* termination of urbs from core */ + unsigned long complete; + unsigned long unlink; +}; + +/* ehci_hcd->lock guards shared data against other CPUs: + * ehci_hcd: async, unlink, periodic (and shadow), ... + * usb_host_endpoint: hcpriv + * ehci_qh: qh_next, qtd_list + * ehci_qtd: qtd_list + * + * Also, hold this lock when talking to HC registers or + * when updating hw_* fields in shared qh/qtd/... structures. + */ + +#define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */ + +/* + * ehci_rh_state values of EHCI_RH_RUNNING or above mean that the + * controller may be doing DMA. Lower values mean there's no DMA. + */ +enum ehci_rh_state { + EHCI_RH_HALTED, + EHCI_RH_SUSPENDED, + EHCI_RH_RUNNING, + EHCI_RH_STOPPING +}; + +/* + * Timer events, ordered by increasing delay length. + * Always update event_delays_ns[] and event_handlers[] (defined in + * ehci-timer.c) in parallel with this list. + */ +enum ehci_hrtimer_event { + EHCI_HRTIMER_POLL_ASS, /* Poll for async schedule off */ + EHCI_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */ + EHCI_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */ + EHCI_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */ + EHCI_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */ + EHCI_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */ + EHCI_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */ + EHCI_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */ + EHCI_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */ + EHCI_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */ + EHCI_HRTIMER_NUM_EVENTS /* Must come last */ +}; +#define EHCI_HRTIMER_NO_EVENT 99 + +struct ehci_hcd { /* one per controller */ + /* timing support */ + enum ehci_hrtimer_event next_hrtimer_event; + unsigned enabled_hrtimer_events; + ktime_t hr_timeouts[EHCI_HRTIMER_NUM_EVENTS]; + struct hrtimer hrtimer; + + int PSS_poll_count; + int ASS_poll_count; + int died_poll_count; + + /* glue to PCI and HCD framework */ + struct ehci_caps __iomem *caps; + struct ehci_regs __iomem *regs; + struct ehci_dbg_port __iomem *debug; + + __u32 hcs_params; /* cached register copy */ + spinlock_t lock; + enum ehci_rh_state rh_state; + + /* general schedule support */ + bool scanning:1; + bool need_rescan:1; + bool intr_unlinking:1; + bool iaa_in_progress:1; + bool async_unlinking:1; + bool shutdown:1; + struct ehci_qh *qh_scan_next; + + /* async schedule support */ + struct ehci_qh *async; + struct ehci_qh *dummy; /* For AMD quirk use */ + struct list_head async_unlink; + struct list_head async_idle; + unsigned async_unlink_cycle; + unsigned async_count; /* async activity count */ + + /* periodic schedule support */ +#define DEFAULT_I_TDPS 1024 /* some HCs can do less */ + unsigned periodic_size; + __hc32 *periodic; /* hw periodic table */ + dma_addr_t periodic_dma; + struct list_head intr_qh_list; + unsigned i_thresh; /* uframes HC might cache */ + + union ehci_shadow *pshadow; /* mirror hw periodic table */ + struct list_head intr_unlink; + unsigned intr_unlink_cycle; + unsigned now_frame; /* frame from HC hardware */ + unsigned last_iso_frame; /* last frame scanned for iso */ + unsigned intr_count; /* intr activity count */ + unsigned isoc_count; /* isoc activity count */ + unsigned periodic_count; /* periodic activity count */ + unsigned uframe_periodic_max; /* max periodic time per uframe */ + + + /* list of itds & sitds completed while now_frame was still active */ + struct list_head cached_itd_list; + struct ehci_itd *last_itd_to_free; + struct list_head cached_sitd_list; + struct ehci_sitd *last_sitd_to_free; + + /* per root hub port */ + unsigned long reset_done [EHCI_MAX_ROOT_PORTS]; + + /* bit vectors (one bit per port) */ + unsigned long bus_suspended; /* which ports were + already suspended at the start of a bus suspend */ + unsigned long companion_ports; /* which ports are + dedicated to the companion controller */ + unsigned long owned_ports; /* which ports are + owned by the companion during a bus suspend */ + unsigned long port_c_suspend; /* which ports have + the change-suspend feature turned on */ + unsigned long suspended_ports; /* which ports are + suspended */ + unsigned long resuming_ports; /* which ports have + started to resume */ + + /* per-HC memory pools (could be per-bus, but ...) */ + struct dma_pool *qh_pool; /* qh per active urb */ + struct dma_pool *qtd_pool; /* one or more per qh */ + struct dma_pool *itd_pool; /* itd per iso urb */ + struct dma_pool *sitd_pool; /* sitd per split iso urb */ + + unsigned random_frame; + unsigned long next_statechange; + ktime_t last_periodic_enable; + u32 command; + + /* SILICON QUIRKS */ + unsigned no_selective_suspend:1; + unsigned has_fsl_port_bug:1; /* FreeScale */ + unsigned big_endian_mmio:1; + unsigned big_endian_desc:1; + unsigned big_endian_capbase:1; + unsigned has_amcc_usb23:1; + unsigned need_io_watchdog:1; + unsigned amd_pll_fix:1; + unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/ + unsigned has_synopsys_hc_bug:1; /* Synopsys HC */ + unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */ + unsigned need_oc_pp_cycle:1; /* MPC834X port power */ + unsigned imx28_write_fix:1; /* For Freescale i.MX28 */ + + /* required for usb32 quirk */ + #define OHCI_CTRL_HCFS (3 << 6) + #define OHCI_USB_OPER (2 << 6) + #define OHCI_USB_SUSPEND (3 << 6) + + #define OHCI_HCCTRL_OFFSET 0x4 + #define OHCI_HCCTRL_LEN 0x4 + __hc32 *ohci_hcctrl_reg; + unsigned has_hostpc:1; + unsigned has_ppcd:1; /* support per-port change bits */ + u8 sbrn; /* packed release number */ + + /* irq statistics */ +#ifdef EHCI_STATS + struct ehci_stats stats; +# define COUNT(x) do { (x)++; } while (0) +#else +# define COUNT(x) do {} while (0) +#endif + + /* debug files */ +#ifdef DEBUG + struct dentry *debug_dir; +#endif + + /* platform-specific data -- must come last */ + unsigned long priv[0] __aligned(sizeof(s64)); +}; + +/* convert between an HCD pointer and the corresponding EHCI_HCD */ +static inline struct ehci_hcd *hcd_to_ehci (struct usb_hcd *hcd) +{ + return (struct ehci_hcd *) (hcd->hcd_priv); +} +static inline struct usb_hcd *ehci_to_hcd (struct ehci_hcd *ehci) +{ + return container_of ((void *) ehci, struct usb_hcd, hcd_priv); +} + +/*-------------------------------------------------------------------------*/ + +#include + +/*-------------------------------------------------------------------------*/ + +#define QTD_NEXT(ehci, dma) cpu_to_hc32(ehci, (u32)dma) + +/* + * EHCI Specification 0.95 Section 3.5 + * QTD: describe data transfer components (buffer, direction, ...) + * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram". + * + * These are associated only with "QH" (Queue Head) structures, + * used with control, bulk, and interrupt transfers. + */ +struct ehci_qtd { + /* first part defined by EHCI spec */ + __hc32 hw_next; /* see EHCI 3.5.1 */ + __hc32 hw_alt_next; /* see EHCI 3.5.2 */ + __hc32 hw_token; /* see EHCI 3.5.3 */ +#define QTD_TOGGLE (1 << 31) /* data toggle */ +#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff) +#define QTD_IOC (1 << 15) /* interrupt on complete */ +#define QTD_CERR(tok) (((tok)>>10) & 0x3) +#define QTD_PID(tok) (((tok)>>8) & 0x3) +#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */ +#define QTD_STS_HALT (1 << 6) /* halted on error */ +#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */ +#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */ +#define QTD_STS_XACT (1 << 3) /* device gave illegal response */ +#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */ +#define QTD_STS_STS (1 << 1) /* split transaction state */ +#define QTD_STS_PING (1 << 0) /* issue PING? */ + +#define ACTIVE_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_ACTIVE) +#define HALT_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_HALT) +#define STATUS_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_STS) + + __hc32 hw_buf [5]; /* see EHCI 3.5.4 */ + __hc32 hw_buf_hi [5]; /* Appendix B */ + + /* the rest is HCD-private */ + dma_addr_t qtd_dma; /* qtd address */ + struct list_head qtd_list; /* sw qtd list */ + struct urb *urb; /* qtd's urb */ + size_t length; /* length of buffer */ +} __attribute__ ((aligned (32))); + +/* mask NakCnt+T in qh->hw_alt_next */ +#define QTD_MASK(ehci) cpu_to_hc32 (ehci, ~0x1f) + +#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1) + +/*-------------------------------------------------------------------------*/ + +/* type tag from {qh,itd,sitd,fstn}->hw_next */ +#define Q_NEXT_TYPE(ehci,dma) ((dma) & cpu_to_hc32(ehci, 3 << 1)) + +/* + * Now the following defines are not converted using the + * cpu_to_le32() macro anymore, since we have to support + * "dynamic" switching between be and le support, so that the driver + * can be used on one system with SoC EHCI controller using big-endian + * descriptors as well as a normal little-endian PCI EHCI controller. + */ +/* values for that type tag */ +#define Q_TYPE_ITD (0 << 1) +#define Q_TYPE_QH (1 << 1) +#define Q_TYPE_SITD (2 << 1) +#define Q_TYPE_FSTN (3 << 1) + +/* next async queue entry, or pointer to interrupt/periodic QH */ +#define QH_NEXT(ehci,dma) (cpu_to_hc32(ehci, (((u32)dma)&~0x01f)|Q_TYPE_QH)) + +/* for periodic/async schedules and qtd lists, mark end of list */ +#define EHCI_LIST_END(ehci) cpu_to_hc32(ehci, 1) /* "null pointer" to hw */ + +/* + * Entries in periodic shadow table are pointers to one of four kinds + * of data structure. That's dictated by the hardware; a type tag is + * encoded in the low bits of the hardware's periodic schedule. Use + * Q_NEXT_TYPE to get the tag. + * + * For entries in the async schedule, the type tag always says "qh". + */ +union ehci_shadow { + struct ehci_qh *qh; /* Q_TYPE_QH */ + struct ehci_itd *itd; /* Q_TYPE_ITD */ + struct ehci_sitd *sitd; /* Q_TYPE_SITD */ + struct ehci_fstn *fstn; /* Q_TYPE_FSTN */ + __hc32 *hw_next; /* (all types) */ + void *ptr; +}; + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI Specification 0.95 Section 3.6 + * QH: describes control/bulk/interrupt endpoints + * See Fig 3-7 "Queue Head Structure Layout". + * + * These appear in both the async and (for interrupt) periodic schedules. + */ + +/* first part defined by EHCI spec */ +struct ehci_qh_hw { + __hc32 hw_next; /* see EHCI 3.6.1 */ + __hc32 hw_info1; /* see EHCI 3.6.2 */ +#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */ +#define QH_HEAD (1 << 15) /* Head of async reclamation list */ +#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */ +#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */ +#define QH_LOW_SPEED (1 << 12) +#define QH_FULL_SPEED (0 << 12) +#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */ + __hc32 hw_info2; /* see EHCI 3.6.2 */ +#define QH_SMASK 0x000000ff +#define QH_CMASK 0x0000ff00 +#define QH_HUBADDR 0x007f0000 +#define QH_HUBPORT 0x3f800000 +#define QH_MULT 0xc0000000 + __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */ + + /* qtd overlay (hardware parts of a struct ehci_qtd) */ + __hc32 hw_qtd_next; + __hc32 hw_alt_next; + __hc32 hw_token; + __hc32 hw_buf [5]; + __hc32 hw_buf_hi [5]; +} __attribute__ ((aligned(32))); + +struct ehci_qh { + struct ehci_qh_hw *hw; /* Must come first */ + /* the rest is HCD-private */ + dma_addr_t qh_dma; /* address of qh */ + union ehci_shadow qh_next; /* ptr to qh; or periodic */ + struct list_head qtd_list; /* sw qtd list */ + struct list_head intr_node; /* list of intr QHs */ + struct ehci_qtd *dummy; + struct list_head unlink_node; + + unsigned unlink_cycle; + + u8 qh_state; +#define QH_STATE_LINKED 1 /* HC sees this */ +#define QH_STATE_UNLINK 2 /* HC may still see this */ +#define QH_STATE_IDLE 3 /* HC doesn't see this */ +#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */ +#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */ + + u8 xacterrs; /* XactErr retry counter */ +#define QH_XACTERR_MAX 32 /* XactErr retry limit */ + + /* periodic schedule info */ + u8 usecs; /* intr bandwidth */ + u8 gap_uf; /* uframes split/csplit gap */ + u8 c_usecs; /* ... split completion bw */ + u16 tt_usecs; /* tt downstream bandwidth */ + unsigned short period; /* polling interval */ + unsigned short start; /* where polling starts */ +#define NO_FRAME ((unsigned short)~0) /* pick new start */ + + struct usb_device *dev; /* access to TT */ + unsigned is_out:1; /* bulk or intr OUT */ + unsigned clearing_tt:1; /* Clear-TT-Buf in progress */ + unsigned dequeue_during_giveback:1; + unsigned exception:1; /* got a fault, or an unlink + was requested */ +}; + +/*-------------------------------------------------------------------------*/ + +/* description of one iso transaction (up to 3 KB data if highspeed) */ +struct ehci_iso_packet { + /* These will be copied to iTD when scheduling */ + u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */ + __hc32 transaction; /* itd->hw_transaction[i] |= */ + u8 cross; /* buf crosses pages */ + /* for full speed OUT splits */ + u32 buf1; +}; + +/* temporary schedule data for packets from iso urbs (both speeds) + * each packet is one logical usb transaction to the device (not TT), + * beginning at stream->next_uframe + */ +struct ehci_iso_sched { + struct list_head td_list; + unsigned span; + struct ehci_iso_packet packet [0]; +}; + +/* + * ehci_iso_stream - groups all (s)itds for this endpoint. + * acts like a qh would, if EHCI had them for ISO. + */ +struct ehci_iso_stream { + /* first field matches ehci_hq, but is NULL */ + struct ehci_qh_hw *hw; + + u8 bEndpointAddress; + u8 highspeed; + struct list_head td_list; /* queued itds/sitds */ + struct list_head free_list; /* list of unused itds/sitds */ + struct usb_device *udev; + struct usb_host_endpoint *ep; + + /* output of (re)scheduling */ + int next_uframe; + __hc32 splits; + + /* the rest is derived from the endpoint descriptor, + * trusting urb->interval == f(epdesc->bInterval) and + * including the extra info for hw_bufp[0..2] + */ + u8 usecs, c_usecs; + u16 interval; + u16 tt_usecs; + u16 maxp; + u16 raw_mask; + unsigned bandwidth; + + /* This is used to initialize iTD's hw_bufp fields */ + __hc32 buf0; + __hc32 buf1; + __hc32 buf2; + + /* this is used to initialize sITD's tt info */ + __hc32 address; +}; + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI Specification 0.95 Section 3.3 + * Fig 3-4 "Isochronous Transaction Descriptor (iTD)" + * + * Schedule records for high speed iso xfers + */ +struct ehci_itd { + /* first part defined by EHCI spec */ + __hc32 hw_next; /* see EHCI 3.3.1 */ + __hc32 hw_transaction [8]; /* see EHCI 3.3.2 */ +#define EHCI_ISOC_ACTIVE (1<<31) /* activate transfer this slot */ +#define EHCI_ISOC_BUF_ERR (1<<30) /* Data buffer error */ +#define EHCI_ISOC_BABBLE (1<<29) /* babble detected */ +#define EHCI_ISOC_XACTERR (1<<28) /* XactErr - transaction error */ +#define EHCI_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff) +#define EHCI_ITD_IOC (1 << 15) /* interrupt on complete */ + +#define ITD_ACTIVE(ehci) cpu_to_hc32(ehci, EHCI_ISOC_ACTIVE) + + __hc32 hw_bufp [7]; /* see EHCI 3.3.3 */ + __hc32 hw_bufp_hi [7]; /* Appendix B */ + + /* the rest is HCD-private */ + dma_addr_t itd_dma; /* for this itd */ + union ehci_shadow itd_next; /* ptr to periodic q entry */ + + struct urb *urb; + struct ehci_iso_stream *stream; /* endpoint's queue */ + struct list_head itd_list; /* list of stream's itds */ + + /* any/all hw_transactions here may be used by that urb */ + unsigned frame; /* where scheduled */ + unsigned pg; + unsigned index[8]; /* in urb->iso_frame_desc */ +} __attribute__ ((aligned (32))); + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI Specification 0.95 Section 3.4 + * siTD, aka split-transaction isochronous Transfer Descriptor + * ... describe full speed iso xfers through TT in hubs + * see Figure 3-5 "Split-transaction Isochronous Transaction Descriptor (siTD) + */ +struct ehci_sitd { + /* first part defined by EHCI spec */ + __hc32 hw_next; +/* uses bit field macros above - see EHCI 0.95 Table 3-8 */ + __hc32 hw_fullspeed_ep; /* EHCI table 3-9 */ + __hc32 hw_uframe; /* EHCI table 3-10 */ + __hc32 hw_results; /* EHCI table 3-11 */ +#define SITD_IOC (1 << 31) /* interrupt on completion */ +#define SITD_PAGE (1 << 30) /* buffer 0/1 */ +#define SITD_LENGTH(x) (0x3ff & ((x)>>16)) +#define SITD_STS_ACTIVE (1 << 7) /* HC may execute this */ +#define SITD_STS_ERR (1 << 6) /* error from TT */ +#define SITD_STS_DBE (1 << 5) /* data buffer error (in HC) */ +#define SITD_STS_BABBLE (1 << 4) /* device was babbling */ +#define SITD_STS_XACT (1 << 3) /* illegal IN response */ +#define SITD_STS_MMF (1 << 2) /* incomplete split transaction */ +#define SITD_STS_STS (1 << 1) /* split transaction state */ + +#define SITD_ACTIVE(ehci) cpu_to_hc32(ehci, SITD_STS_ACTIVE) + + __hc32 hw_buf [2]; /* EHCI table 3-12 */ + __hc32 hw_backpointer; /* EHCI table 3-13 */ + __hc32 hw_buf_hi [2]; /* Appendix B */ + + /* the rest is HCD-private */ + dma_addr_t sitd_dma; + union ehci_shadow sitd_next; /* ptr to periodic q entry */ + + struct urb *urb; + struct ehci_iso_stream *stream; /* endpoint's queue */ + struct list_head sitd_list; /* list of stream's sitds */ + unsigned frame; + unsigned index; +} __attribute__ ((aligned (32))); + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI Specification 0.96 Section 3.7 + * Periodic Frame Span Traversal Node (FSTN) + * + * Manages split interrupt transactions (using TT) that span frame boundaries + * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN + * makes the HC jump (back) to a QH to scan for fs/ls QH completions until + * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work. + */ +struct ehci_fstn { + __hc32 hw_next; /* any periodic q entry */ + __hc32 hw_prev; /* qh or EHCI_LIST_END */ + + /* the rest is HCD-private */ + dma_addr_t fstn_dma; + union ehci_shadow fstn_next; /* ptr to periodic q entry */ +} __attribute__ ((aligned (32))); + +/*-------------------------------------------------------------------------*/ + +/* Prepare the PORTSC wakeup flags during controller suspend/resume */ + +#define ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup) \ + ehci_adjust_port_wakeup_flags(ehci, true, do_wakeup); + +#define ehci_prepare_ports_for_controller_resume(ehci) \ + ehci_adjust_port_wakeup_flags(ehci, false, false); + +/*-------------------------------------------------------------------------*/ + +#ifdef CONFIG_USB_EHCI_ROOT_HUB_TT + +/* + * Some EHCI controllers have a Transaction Translator built into the + * root hub. This is a non-standard feature. Each controller will need + * to add code to the following inline functions, and call them as + * needed (mostly in root hub code). + */ + +#define ehci_is_TDI(e) (ehci_to_hcd(e)->has_tt) + +/* Returns the speed of a device attached to a port on the root hub. */ +static inline unsigned int +ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc) +{ + if (ehci_is_TDI(ehci)) { + switch ((portsc >> (ehci->has_hostpc ? 25 : 26)) & 3) { + case 0: + return 0; + case 1: + return USB_PORT_STAT_LOW_SPEED; + case 2: + default: + return USB_PORT_STAT_HIGH_SPEED; + } + } + return USB_PORT_STAT_HIGH_SPEED; +} + +#else + +#define ehci_is_TDI(e) (0) + +#define ehci_port_speed(ehci, portsc) USB_PORT_STAT_HIGH_SPEED +#endif + +/*-------------------------------------------------------------------------*/ + +#ifdef CONFIG_PPC_83xx +/* Some Freescale processors have an erratum in which the TT + * port number in the queue head was 0..N-1 instead of 1..N. + */ +#define ehci_has_fsl_portno_bug(e) ((e)->has_fsl_port_bug) +#else +#define ehci_has_fsl_portno_bug(e) (0) +#endif + +/* + * While most USB host controllers implement their registers in + * little-endian format, a minority (celleb companion chip) implement + * them in big endian format. + * + * This attempts to support either format at compile time without a + * runtime penalty, or both formats with the additional overhead + * of checking a flag bit. + * + * ehci_big_endian_capbase is a special quirk for controllers that + * implement the HC capability registers as separate registers and not + * as fields of a 32-bit register. + */ + +#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO +#define ehci_big_endian_mmio(e) ((e)->big_endian_mmio) +#define ehci_big_endian_capbase(e) ((e)->big_endian_capbase) +#else +#define ehci_big_endian_mmio(e) 0 +#define ehci_big_endian_capbase(e) 0 +#endif + +/* + * Big-endian read/write functions are arch-specific. + * Other arches can be added if/when they're needed. + */ +#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_IXP4XX) +#define readl_be(addr) __raw_readl((__force unsigned *)addr) +#define writel_be(val, addr) __raw_writel(val, (__force unsigned *)addr) +#endif + +static inline unsigned int ehci_readl(const struct ehci_hcd *ehci, + __u32 __iomem * regs) +{ +#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO + return ehci_big_endian_mmio(ehci) ? + readl_be(regs) : + readl(regs); +#else + return readl(regs); +#endif +} + +#ifdef CONFIG_SOC_IMX28 +static inline void imx28_ehci_writel(const unsigned int val, + volatile __u32 __iomem *addr) +{ + __asm__ ("swp %0, %0, [%1]" : : "r"(val), "r"(addr)); +} +#else +static inline void imx28_ehci_writel(const unsigned int val, + volatile __u32 __iomem *addr) +{ +} +#endif +static inline void ehci_writel(const struct ehci_hcd *ehci, + const unsigned int val, __u32 __iomem *regs) +{ +#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO + ehci_big_endian_mmio(ehci) ? + writel_be(val, regs) : + writel(val, regs); +#else + if (ehci->imx28_write_fix) + imx28_ehci_writel(val, regs); + else + writel(val, regs); +#endif +} + +/* + * On certain ppc-44x SoC there is a HW issue, that could only worked around with + * explicit suspend/operate of OHCI. This function hereby makes sense only on that arch. + * Other common bits are dependent on has_amcc_usb23 quirk flag. + */ +#ifdef CONFIG_44x +static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational) +{ + u32 hc_control; + + hc_control = (readl_be(ehci->ohci_hcctrl_reg) & ~OHCI_CTRL_HCFS); + if (operational) + hc_control |= OHCI_USB_OPER; + else + hc_control |= OHCI_USB_SUSPEND; + + writel_be(hc_control, ehci->ohci_hcctrl_reg); + (void) readl_be(ehci->ohci_hcctrl_reg); +} +#else +static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational) +{ } +#endif + +/*-------------------------------------------------------------------------*/ + +/* + * The AMCC 440EPx not only implements its EHCI registers in big-endian + * format, but also its DMA data structures (descriptors). + * + * EHCI controllers accessed through PCI work normally (little-endian + * everywhere), so we won't bother supporting a BE-only mode for now. + */ +#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_DESC +#define ehci_big_endian_desc(e) ((e)->big_endian_desc) + +/* cpu to ehci */ +static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x) +{ + return ehci_big_endian_desc(ehci) + ? (__force __hc32)cpu_to_be32(x) + : (__force __hc32)cpu_to_le32(x); +} + +/* ehci to cpu */ +static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x) +{ + return ehci_big_endian_desc(ehci) + ? be32_to_cpu((__force __be32)x) + : le32_to_cpu((__force __le32)x); +} + +static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x) +{ + return ehci_big_endian_desc(ehci) + ? be32_to_cpup((__force __be32 *)x) + : le32_to_cpup((__force __le32 *)x); +} + +#else + +/* cpu to ehci */ +static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x) +{ + return cpu_to_le32(x); +} + +/* ehci to cpu */ +static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x) +{ + return le32_to_cpu(x); +} + +static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x) +{ + return le32_to_cpup(x); +} + +#endif + +/*-------------------------------------------------------------------------*/ + +#define ehci_dbg(ehci, fmt, args...) \ + dev_dbg(ehci_to_hcd(ehci)->self.controller , fmt , ## args) +#define ehci_err(ehci, fmt, args...) \ + dev_err(ehci_to_hcd(ehci)->self.controller , fmt , ## args) +#define ehci_info(ehci, fmt, args...) \ + dev_info(ehci_to_hcd(ehci)->self.controller , fmt , ## args) +#define ehci_warn(ehci, fmt, args...) \ + dev_warn(ehci_to_hcd(ehci)->self.controller , fmt , ## args) + +#ifdef VERBOSE_DEBUG +# define ehci_vdbg ehci_dbg +#else + static inline void ehci_vdbg(struct ehci_hcd *ehci, ...) {} +#endif + +#ifndef DEBUG +#define STUB_DEBUG_FILES +#endif /* DEBUG */ + +/*-------------------------------------------------------------------------*/ + +/* Declarations of things exported for use by ehci platform drivers */ + +struct ehci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *hcd); +}; + +extern void ehci_init_driver(struct hc_driver *drv, + const struct ehci_driver_overrides *over); +extern int ehci_setup(struct usb_hcd *hcd); + +#ifdef CONFIG_PM +extern int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup); +extern int ehci_resume(struct usb_hcd *hcd, bool hibernated); +#endif /* CONFIG_PM */ + +#endif /* __LINUX_EHCI_HCD_H */ diff --git a/addons/ehci-pci/src/3.10.108/pci-quirks.c b/addons/ehci-pci/src/3.10.108/pci-quirks.c new file mode 100644 index 00000000..66c90588 --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/pci-quirks.c @@ -0,0 +1,1001 @@ +/* + * This file contains code to reset and initialize USB host controllers. + * Some of it includes work-arounds for PCI hardware and BIOS quirks. + * It may need to run early during booting -- before USB would normally + * initialize -- to ensure that Linux doesn't use any legacy modes. + * + * Copyright (c) 1999 Martin Mares + * (and others) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "pci-quirks.h" +#include "xhci-ext-caps.h" + + +#define UHCI_USBLEGSUP 0xc0 /* legacy support */ +#define UHCI_USBCMD 0 /* command register */ +#define UHCI_USBINTR 4 /* interrupt register */ +#define UHCI_USBLEGSUP_RWC 0x8f00 /* the R/WC bits */ +#define UHCI_USBLEGSUP_RO 0x5040 /* R/O and reserved bits */ +#define UHCI_USBCMD_RUN 0x0001 /* RUN/STOP bit */ +#define UHCI_USBCMD_HCRESET 0x0002 /* Host Controller reset */ +#define UHCI_USBCMD_EGSM 0x0008 /* Global Suspend Mode */ +#define UHCI_USBCMD_CONFIGURE 0x0040 /* Config Flag */ +#define UHCI_USBINTR_RESUME 0x0002 /* Resume interrupt enable */ + +#define OHCI_CONTROL 0x04 +#define OHCI_CMDSTATUS 0x08 +#define OHCI_INTRSTATUS 0x0c +#define OHCI_INTRENABLE 0x10 +#define OHCI_INTRDISABLE 0x14 +#define OHCI_FMINTERVAL 0x34 +#define OHCI_HCFS (3 << 6) /* hc functional state */ +#define OHCI_HCR (1 << 0) /* host controller reset */ +#define OHCI_OCR (1 << 3) /* ownership change request */ +#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ +#define OHCI_CTRL_IR (1 << 8) /* interrupt routing */ +#define OHCI_INTR_OC (1 << 30) /* ownership change */ + +#define EHCI_HCC_PARAMS 0x08 /* extended capabilities */ +#define EHCI_USBCMD 0 /* command register */ +#define EHCI_USBCMD_RUN (1 << 0) /* RUN/STOP bit */ +#define EHCI_USBSTS 4 /* status register */ +#define EHCI_USBSTS_HALTED (1 << 12) /* HCHalted bit */ +#define EHCI_USBINTR 8 /* interrupt register */ +#define EHCI_CONFIGFLAG 0x40 /* configured flag register */ +#define EHCI_USBLEGSUP 0 /* legacy support register */ +#define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */ +#define EHCI_USBLEGSUP_OS (1 << 24) /* OS semaphore */ +#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */ +#define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */ + +/* AMD quirk use */ +#define AB_REG_BAR_LOW 0xe0 +#define AB_REG_BAR_HIGH 0xe1 +#define AB_REG_BAR_SB700 0xf0 +#define AB_INDX(addr) ((addr) + 0x00) +#define AB_DATA(addr) ((addr) + 0x04) +#define AX_INDXC 0x30 +#define AX_DATAC 0x34 + +#define NB_PCIE_INDX_ADDR 0xe0 +#define NB_PCIE_INDX_DATA 0xe4 +#define PCIE_P_CNTL 0x10040 +#define BIF_NB 0x10002 +#define NB_PIF0_PWRDOWN_0 0x01100012 +#define NB_PIF0_PWRDOWN_1 0x01100013 + +#define USB_INTEL_XUSB2PR 0xD0 +#define USB_INTEL_USB2PRM 0xD4 +#define USB_INTEL_USB3_PSSEN 0xD8 +#define USB_INTEL_USB3PRM 0xDC + +static struct amd_chipset_info { + struct pci_dev *nb_dev; + struct pci_dev *smbus_dev; + int nb_type; + int sb_type; + int isoc_reqs; + int probe_count; + int probe_result; +} amd_chipset; + +static DEFINE_SPINLOCK(amd_lock); + +int usb_amd_find_chipset_info(void) +{ + u8 rev = 0; + unsigned long flags; + struct amd_chipset_info info; + int ret; + + spin_lock_irqsave(&amd_lock, flags); + + /* probe only once */ + if (amd_chipset.probe_count > 0) { + amd_chipset.probe_count++; + spin_unlock_irqrestore(&amd_lock, flags); + return amd_chipset.probe_result; + } + memset(&info, 0, sizeof(info)); + spin_unlock_irqrestore(&amd_lock, flags); + + info.smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, 0x4385, NULL); + if (info.smbus_dev) { + rev = info.smbus_dev->revision; + if (rev >= 0x40) + info.sb_type = 1; + else if (rev >= 0x30 && rev <= 0x3b) + info.sb_type = 3; + } else { + info.smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, + 0x780b, NULL); + if (!info.smbus_dev) { + ret = 0; + goto commit; + } + + rev = info.smbus_dev->revision; + if (rev >= 0x11 && rev <= 0x18) + info.sb_type = 2; + } + + if (info.sb_type == 0) { + if (info.smbus_dev) { + pci_dev_put(info.smbus_dev); + info.smbus_dev = NULL; + } + ret = 0; + goto commit; + } + + info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); + if (info.nb_dev) { + info.nb_type = 1; + } else { + info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL); + if (info.nb_dev) { + info.nb_type = 2; + } else { + info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, + 0x9600, NULL); + if (info.nb_dev) + info.nb_type = 3; + } + } + + ret = info.probe_result = 1; + printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); + +commit: + + spin_lock_irqsave(&amd_lock, flags); + if (amd_chipset.probe_count > 0) { + /* race - someone else was faster - drop devices */ + + /* Mark that we where here */ + amd_chipset.probe_count++; + ret = amd_chipset.probe_result; + + spin_unlock_irqrestore(&amd_lock, flags); + + if (info.nb_dev) + pci_dev_put(info.nb_dev); + if (info.smbus_dev) + pci_dev_put(info.smbus_dev); + + } else { + /* no race - commit the result */ + info.probe_count++; + amd_chipset = info; + spin_unlock_irqrestore(&amd_lock, flags); + } + + return ret; +} +EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); + +/* + * The hardware normally enables the A-link power management feature, which + * lets the system lower the power consumption in idle states. + * + * This USB quirk prevents the link going into that lower power state + * during isochronous transfers. + * + * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of + * some AMD platforms may stutter or have breaks occasionally. + */ +static void usb_amd_quirk_pll(int disable) +{ + u32 addr, addr_low, addr_high, val; + u32 bit = disable ? 0 : 1; + unsigned long flags; + + spin_lock_irqsave(&amd_lock, flags); + + if (disable) { + amd_chipset.isoc_reqs++; + if (amd_chipset.isoc_reqs > 1) { + spin_unlock_irqrestore(&amd_lock, flags); + return; + } + } else { + amd_chipset.isoc_reqs--; + if (amd_chipset.isoc_reqs > 0) { + spin_unlock_irqrestore(&amd_lock, flags); + return; + } + } + + if (amd_chipset.sb_type == 1 || amd_chipset.sb_type == 2) { + outb_p(AB_REG_BAR_LOW, 0xcd6); + addr_low = inb_p(0xcd7); + outb_p(AB_REG_BAR_HIGH, 0xcd6); + addr_high = inb_p(0xcd7); + addr = addr_high << 8 | addr_low; + + outl_p(0x30, AB_INDX(addr)); + outl_p(0x40, AB_DATA(addr)); + outl_p(0x34, AB_INDX(addr)); + val = inl_p(AB_DATA(addr)); + } else if (amd_chipset.sb_type == 3) { + pci_read_config_dword(amd_chipset.smbus_dev, + AB_REG_BAR_SB700, &addr); + outl(AX_INDXC, AB_INDX(addr)); + outl(0x40, AB_DATA(addr)); + outl(AX_DATAC, AB_INDX(addr)); + val = inl(AB_DATA(addr)); + } else { + spin_unlock_irqrestore(&amd_lock, flags); + return; + } + + if (disable) { + val &= ~0x08; + val |= (1 << 4) | (1 << 9); + } else { + val |= 0x08; + val &= ~((1 << 4) | (1 << 9)); + } + outl_p(val, AB_DATA(addr)); + + if (!amd_chipset.nb_dev) { + spin_unlock_irqrestore(&amd_lock, flags); + return; + } + + if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) { + addr = PCIE_P_CNTL; + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_ADDR, addr); + pci_read_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, &val); + + val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12)); + val |= bit | (bit << 3) | (bit << 12); + val |= ((!bit) << 4) | ((!bit) << 9); + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, val); + + addr = BIF_NB; + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_ADDR, addr); + pci_read_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, &val); + val &= ~(1 << 8); + val |= bit << 8; + + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, val); + } else if (amd_chipset.nb_type == 2) { + addr = NB_PIF0_PWRDOWN_0; + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_ADDR, addr); + pci_read_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, &val); + if (disable) + val &= ~(0x3f << 7); + else + val |= 0x3f << 7; + + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, val); + + addr = NB_PIF0_PWRDOWN_1; + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_ADDR, addr); + pci_read_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, &val); + if (disable) + val &= ~(0x3f << 7); + else + val |= 0x3f << 7; + + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, val); + } + + spin_unlock_irqrestore(&amd_lock, flags); + return; +} + +void usb_amd_quirk_pll_disable(void) +{ + usb_amd_quirk_pll(1); +} +EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable); + +void usb_amd_quirk_pll_enable(void) +{ + usb_amd_quirk_pll(0); +} +EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable); + +void usb_amd_dev_put(void) +{ + struct pci_dev *nb, *smbus; + unsigned long flags; + + spin_lock_irqsave(&amd_lock, flags); + + amd_chipset.probe_count--; + if (amd_chipset.probe_count > 0) { + spin_unlock_irqrestore(&amd_lock, flags); + return; + } + + /* save them to pci_dev_put outside of spinlock */ + nb = amd_chipset.nb_dev; + smbus = amd_chipset.smbus_dev; + + amd_chipset.nb_dev = NULL; + amd_chipset.smbus_dev = NULL; + amd_chipset.nb_type = 0; + amd_chipset.sb_type = 0; + amd_chipset.isoc_reqs = 0; + amd_chipset.probe_result = 0; + + spin_unlock_irqrestore(&amd_lock, flags); + + if (nb) + pci_dev_put(nb); + if (smbus) + pci_dev_put(smbus); +} +EXPORT_SYMBOL_GPL(usb_amd_dev_put); + +/* + * Make sure the controller is completely inactive, unable to + * generate interrupts or do DMA. + */ +void uhci_reset_hc(struct pci_dev *pdev, unsigned long base) +{ + /* Turn off PIRQ enable and SMI enable. (This also turns off the + * BIOS's USB Legacy Support.) Turn off all the R/WC bits too. + */ + pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC); + + /* Reset the HC - this will force us to get a + * new notification of any already connected + * ports due to the virtual disconnect that it + * implies. + */ + outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD); + mb(); + udelay(5); + if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET) + dev_warn(&pdev->dev, "HCRESET not completed yet!\n"); + + /* Just to be safe, disable interrupt requests and + * make sure the controller is stopped. + */ + outw(0, base + UHCI_USBINTR); + outw(0, base + UHCI_USBCMD); +} +EXPORT_SYMBOL_GPL(uhci_reset_hc); + +/* + * Initialize a controller that was newly discovered or has just been + * resumed. In either case we can't be sure of its previous state. + * + * Returns: 1 if the controller was reset, 0 otherwise. + */ +int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base) +{ + u16 legsup; + unsigned int cmd, intr; + + /* + * When restarting a suspended controller, we expect all the + * settings to be the same as we left them: + * + * PIRQ and SMI disabled, no R/W bits set in USBLEGSUP; + * Controller is stopped and configured with EGSM set; + * No interrupts enabled except possibly Resume Detect. + * + * If any of these conditions are violated we do a complete reset. + */ + pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup); + if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) { + dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n", + __func__, legsup); + goto reset_needed; + } + + cmd = inw(base + UHCI_USBCMD); + if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) || + !(cmd & UHCI_USBCMD_EGSM)) { + dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n", + __func__, cmd); + goto reset_needed; + } + + intr = inw(base + UHCI_USBINTR); + if (intr & (~UHCI_USBINTR_RESUME)) { + dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n", + __func__, intr); + goto reset_needed; + } + return 0; + +reset_needed: + dev_dbg(&pdev->dev, "Performing full reset\n"); + uhci_reset_hc(pdev, base); + return 1; +} +EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc); + +static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask) +{ + u16 cmd; + return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask); +} + +#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO) +#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY) + +static void quirk_usb_handoff_uhci(struct pci_dev *pdev) +{ + unsigned long base = 0; + int i; + + if (!pio_enabled(pdev)) + return; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) + if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) { + base = pci_resource_start(pdev, i); + break; + } + + if (base) + uhci_check_and_reset_hc(pdev, base); +} + +static int mmio_resource_enabled(struct pci_dev *pdev, int idx) +{ + return pci_resource_start(pdev, idx) && mmio_enabled(pdev); +} + +static void quirk_usb_handoff_ohci(struct pci_dev *pdev) +{ + void __iomem *base; + u32 control; + u32 fminterval = 0; + bool no_fminterval = false; + int cnt; + + if (!mmio_resource_enabled(pdev, 0)) + return; + + base = pci_ioremap_bar(pdev, 0); + if (base == NULL) + return; + + /* + * ULi M5237 OHCI controller locks the whole system when accessing + * the OHCI_FMINTERVAL offset. + */ + if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237) + no_fminterval = true; + + control = readl(base + OHCI_CONTROL); + +/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */ +#ifdef __hppa__ +#define OHCI_CTRL_MASK (OHCI_CTRL_RWC | OHCI_CTRL_IR) +#else +#define OHCI_CTRL_MASK OHCI_CTRL_RWC + + if (control & OHCI_CTRL_IR) { + int wait_time = 500; /* arbitrary; 5 seconds */ + writel(OHCI_INTR_OC, base + OHCI_INTRENABLE); + writel(OHCI_OCR, base + OHCI_CMDSTATUS); + while (wait_time > 0 && + readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) { + wait_time -= 10; + msleep(10); + } + if (wait_time <= 0) + dev_warn(&pdev->dev, "OHCI: BIOS handoff failed" + " (BIOS bug?) %08x\n", + readl(base + OHCI_CONTROL)); + } +#endif + + /* disable interrupts */ + writel((u32) ~0, base + OHCI_INTRDISABLE); + + /* Reset the USB bus, if the controller isn't already in RESET */ + if (control & OHCI_HCFS) { + /* Go into RESET, preserving RWC (and possibly IR) */ + writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); + readl(base + OHCI_CONTROL); + + /* drive bus reset for at least 50 ms (7.1.7.5) */ + msleep(50); + } + + /* software reset of the controller, preserving HcFmInterval */ + if (!no_fminterval) + fminterval = readl(base + OHCI_FMINTERVAL); + + writel(OHCI_HCR, base + OHCI_CMDSTATUS); + + /* reset requires max 10 us delay */ + for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ + if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) + break; + udelay(1); + } + + if (!no_fminterval) + writel(fminterval, base + OHCI_FMINTERVAL); + + /* Now the controller is safely in SUSPEND and nothing can wake it up */ + iounmap(base); +} + +static const struct dmi_system_id ehci_dmi_nohandoff_table[] = { + { + /* Pegatron Lucid (ExoPC) */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"), + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"), + }, + }, + { + /* Pegatron Lucid (Ordissimo AIRIS) */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "M11JB"), + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), + }, + }, + { + /* Pegatron Lucid (Ordissimo) */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"), + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), + }, + }, + { + /* HASEE E200 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"), + DMI_MATCH(DMI_BOARD_NAME, "E210"), + DMI_MATCH(DMI_BIOS_VERSION, "6.00"), + }, + }, + { } +}; + +static void ehci_bios_handoff(struct pci_dev *pdev, + void __iomem *op_reg_base, + u32 cap, u8 offset) +{ + int try_handoff = 1, tried_handoff = 0; + + /* + * The Pegatron Lucid tablet sporadically waits for 98 seconds trying + * the handoff on its unused controller. Skip it. + * + * The HASEE E200 hangs when the semaphore is set (bugzilla #77021). + */ + if (pdev->vendor == 0x8086 && (pdev->device == 0x283a || + pdev->device == 0x27cc)) { + if (dmi_check_system(ehci_dmi_nohandoff_table)) + try_handoff = 0; + } + + if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) { + dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n"); + +#if 0 +/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on, + * but that seems dubious in general (the BIOS left it off intentionally) + * and is known to prevent some systems from booting. so we won't do this + * unless maybe we can determine when we're on a system that needs SMI forced. + */ + /* BIOS workaround (?): be sure the pre-Linux code + * receives the SMI + */ + pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val); + pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, + val | EHCI_USBLEGCTLSTS_SOOE); +#endif + + /* some systems get upset if this semaphore is + * set for any other reason than forcing a BIOS + * handoff.. + */ + pci_write_config_byte(pdev, offset + 3, 1); + } + + /* if boot firmware now owns EHCI, spin till it hands it over. */ + if (try_handoff) { + int msec = 1000; + while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) { + tried_handoff = 1; + msleep(10); + msec -= 10; + pci_read_config_dword(pdev, offset, &cap); + } + } + + if (cap & EHCI_USBLEGSUP_BIOS) { + /* well, possibly buggy BIOS... try to shut it down, + * and hope nothing goes too wrong + */ + if (try_handoff) + dev_warn(&pdev->dev, "EHCI: BIOS handoff failed" + " (BIOS bug?) %08x\n", cap); + pci_write_config_byte(pdev, offset + 2, 0); + } + + /* just in case, always disable EHCI SMIs */ + pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0); + + /* If the BIOS ever owned the controller then we can't expect + * any power sessions to remain intact. + */ + if (tried_handoff) + writel(0, op_reg_base + EHCI_CONFIGFLAG); +} + +static void quirk_usb_disable_ehci(struct pci_dev *pdev) +{ + void __iomem *base, *op_reg_base; + u32 hcc_params, cap, val; + u8 offset, cap_length; + int wait_time, count = 256/4; + + if (!mmio_resource_enabled(pdev, 0)) + return; + + base = pci_ioremap_bar(pdev, 0); + if (base == NULL) + return; + + cap_length = readb(base); + op_reg_base = base + cap_length; + + /* EHCI 0.96 and later may have "extended capabilities" + * spec section 5.1 explains the bios handoff, e.g. for + * booting from USB disk or using a usb keyboard + */ + hcc_params = readl(base + EHCI_HCC_PARAMS); + offset = (hcc_params >> 8) & 0xff; + while (offset && --count) { + pci_read_config_dword(pdev, offset, &cap); + + switch (cap & 0xff) { + case 1: + ehci_bios_handoff(pdev, op_reg_base, cap, offset); + break; + case 0: /* Illegal reserved cap, set cap=0 so we exit */ + cap = 0; /* then fallthrough... */ + default: + dev_warn(&pdev->dev, "EHCI: unrecognized capability " + "%02x\n", cap & 0xff); + } + offset = (cap >> 8) & 0xff; + } + if (!count) + dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n"); + + /* + * halt EHCI & disable its interrupts in any case + */ + val = readl(op_reg_base + EHCI_USBSTS); + if ((val & EHCI_USBSTS_HALTED) == 0) { + val = readl(op_reg_base + EHCI_USBCMD); + val &= ~EHCI_USBCMD_RUN; + writel(val, op_reg_base + EHCI_USBCMD); + + wait_time = 2000; + do { + writel(0x3f, op_reg_base + EHCI_USBSTS); + udelay(100); + wait_time -= 100; + val = readl(op_reg_base + EHCI_USBSTS); + if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) { + break; + } + } while (wait_time > 0); + } + writel(0, op_reg_base + EHCI_USBINTR); + writel(0x3f, op_reg_base + EHCI_USBSTS); + + iounmap(base); +} + +/* + * handshake - spin reading a register until handshake completes + * @ptr: address of hc register to be read + * @mask: bits to look at in result of read + * @done: value of those bits when handshake succeeds + * @wait_usec: timeout in microseconds + * @delay_usec: delay in microseconds to wait between polling + * + * Polls a register every delay_usec microseconds. + * Returns 0 when the mask bits have the value done. + * Returns -ETIMEDOUT if this condition is not true after + * wait_usec microseconds have passed. + */ +static int handshake(void __iomem *ptr, u32 mask, u32 done, + int wait_usec, int delay_usec) +{ + u32 result; + + do { + result = readl(ptr); + result &= mask; + if (result == done) + return 0; + udelay(delay_usec); + wait_usec -= delay_usec; + } while (wait_usec > 0); + return -ETIMEDOUT; +} + +#define PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI 0x8C31 +#define PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI 0x9C31 + +bool usb_is_intel_ppt_switchable_xhci(struct pci_dev *pdev) +{ + return pdev->class == PCI_CLASS_SERIAL_USB_XHCI && + pdev->vendor == PCI_VENDOR_ID_INTEL && + pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI; +} + +/* The Intel Lynx Point chipset also has switchable ports. */ +bool usb_is_intel_lpt_switchable_xhci(struct pci_dev *pdev) +{ + return pdev->class == PCI_CLASS_SERIAL_USB_XHCI && + pdev->vendor == PCI_VENDOR_ID_INTEL && + (pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_LYNX_POINT_LP_XHCI); +} + +bool usb_is_intel_switchable_xhci(struct pci_dev *pdev) +{ + return usb_is_intel_ppt_switchable_xhci(pdev) || + usb_is_intel_lpt_switchable_xhci(pdev); +} +EXPORT_SYMBOL_GPL(usb_is_intel_switchable_xhci); + +/* + * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that + * share some number of ports. These ports can be switched between either + * controller. Not all of the ports under the EHCI host controller may be + * switchable. + * + * The ports should be switched over to xHCI before PCI probes for any device + * start. This avoids active devices under EHCI being disconnected during the + * port switchover, which could cause loss of data on USB storage devices, or + * failed boot when the root file system is on a USB mass storage device and is + * enumerated under EHCI first. + * + * We write into the xHC's PCI configuration space in some Intel-specific + * registers to switch the ports over. The USB 3.0 terminations and the USB + * 2.0 data wires are switched separately. We want to enable the SuperSpeed + * terminations before switching the USB 2.0 wires over, so that USB 3.0 + * devices connect at SuperSpeed, rather than at USB 2.0 speeds. + */ +void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) +{ + u32 ports_available; + + /* Don't switchover the ports if the user hasn't compiled the xHCI + * driver. Otherwise they will see "dead" USB ports that don't power + * the devices. + */ + if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) { + dev_warn(&xhci_pdev->dev, + "CONFIG_USB_XHCI_HCD is turned off, " + "defaulting to EHCI.\n"); + dev_warn(&xhci_pdev->dev, + "USB 3.0 devices will work at USB 2.0 speeds.\n"); + usb_disable_xhci_ports(xhci_pdev); + return; + } + + /* Read USB3PRM, the USB 3.0 Port Routing Mask Register + * Indicate the ports that can be changed from OS. + */ + pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM, + &ports_available); + + dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n", + ports_available); + + /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable + * Register, to turn on SuperSpeed terminations for the + * switchable ports. + */ + pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, + cpu_to_le32(ports_available)); + + pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, + &ports_available); + dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled " + "under xHCI: 0x%x\n", ports_available); + + /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register + * Indicate the USB 2.0 ports to be controlled by the xHCI host. + */ + + pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM, + &ports_available); + + dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n", + ports_available); + + /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to + * switch the USB 2.0 power and data lines over to the xHCI + * host. + */ + pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, + cpu_to_le32(ports_available)); + + pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, + &ports_available); + dev_dbg(&xhci_pdev->dev, "USB 2.0 ports that are now switched over " + "to xHCI: 0x%x\n", ports_available); +} +EXPORT_SYMBOL_GPL(usb_enable_xhci_ports); + +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) +{ + pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0); + pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0); +} +EXPORT_SYMBOL_GPL(usb_disable_xhci_ports); + +/** + * PCI Quirks for xHCI. + * + * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS. + * It signals to the BIOS that the OS wants control of the host controller, + * and then waits 5 seconds for the BIOS to hand over control. + * If we timeout, assume the BIOS is broken and take control anyway. + */ +static void quirk_usb_handoff_xhci(struct pci_dev *pdev) +{ + void __iomem *base; + int ext_cap_offset; + void __iomem *op_reg_base; + u32 val; + int timeout; + int len = pci_resource_len(pdev, 0); + + if (!mmio_resource_enabled(pdev, 0)) + return; + + base = ioremap_nocache(pci_resource_start(pdev, 0), len); + if (base == NULL) + return; + + /* + * Find the Legacy Support Capability register - + * this is optional for xHCI host controllers. + */ + ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET); + do { + if ((ext_cap_offset + sizeof(val)) > len) { + /* We're reading garbage from the controller */ + dev_warn(&pdev->dev, + "xHCI controller failing to respond"); + return; + } + + if (!ext_cap_offset) + /* We've reached the end of the extended capabilities */ + goto hc_init; + + val = readl(base + ext_cap_offset); + if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY) + break; + ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset); + } while (1); + + /* If the BIOS owns the HC, signal that the OS wants it, and wait */ + if (val & XHCI_HC_BIOS_OWNED) { + writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); + + /* Wait for 5 seconds with 10 microsecond polling interval */ + timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, + 0, 5000, 10); + + /* Assume a buggy BIOS and take HC ownership anyway */ + if (timeout) { + dev_warn(&pdev->dev, "xHCI BIOS handoff failed" + " (BIOS bug ?) %08x\n", val); + writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset); + } + } + + val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + /* Mask off (turn off) any enabled SMIs */ + val &= XHCI_LEGACY_DISABLE_SMI; + /* Mask all SMI events bits, RW1C */ + val |= XHCI_LEGACY_SMI_EVENTS; + /* Disable any BIOS SMIs and clear all SMI events*/ + writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + +hc_init: + if (usb_is_intel_switchable_xhci(pdev)) + usb_enable_xhci_ports(pdev); + + op_reg_base = base + XHCI_HC_LENGTH(readl(base)); + + /* Wait for the host controller to be ready before writing any + * operational or runtime registers. Wait 5 seconds and no more. + */ + timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, + 5000, 10); + /* Assume a buggy HC and start HC initialization anyway */ + if (timeout) { + val = readl(op_reg_base + XHCI_STS_OFFSET); + dev_warn(&pdev->dev, + "xHCI HW not ready after 5 sec (HC bug?) " + "status = 0x%x\n", val); + } + + /* Send the halt and disable interrupts command */ + val = readl(op_reg_base + XHCI_CMD_OFFSET); + val &= ~(XHCI_CMD_RUN | XHCI_IRQS); + writel(val, op_reg_base + XHCI_CMD_OFFSET); + + /* Wait for the HC to halt - poll every 125 usec (one microframe). */ + timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1, + XHCI_MAX_HALT_USEC, 125); + if (timeout) { + val = readl(op_reg_base + XHCI_STS_OFFSET); + dev_warn(&pdev->dev, + "xHCI HW did not halt within %d usec " + "status = 0x%x\n", XHCI_MAX_HALT_USEC, val); + } + + iounmap(base); +} + +static void quirk_usb_early_handoff(struct pci_dev *pdev) +{ + /* Skip Netlogic mips SoC's internal PCI USB controller. + * This device does not need/support EHCI/OHCI handoff + */ + if (pdev->vendor == 0x184e) /* vendor Netlogic */ + return; + if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI && + pdev->class != PCI_CLASS_SERIAL_USB_OHCI && + pdev->class != PCI_CLASS_SERIAL_USB_EHCI && + pdev->class != PCI_CLASS_SERIAL_USB_XHCI) + return; + + if (pci_enable_device(pdev) < 0) { + dev_warn(&pdev->dev, "Can't enable PCI device, " + "BIOS handoff failed.\n"); + return; + } + if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI) + quirk_usb_handoff_uhci(pdev); + else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI) + quirk_usb_handoff_ohci(pdev); + else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI) + quirk_usb_disable_ehci(pdev); + else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) + quirk_usb_handoff_xhci(pdev); + pci_disable_device(pdev); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); diff --git a/addons/ehci-pci/src/3.10.108/pci-quirks.h b/addons/ehci-pci/src/3.10.108/pci-quirks.h new file mode 100644 index 00000000..7f69a391 --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/pci-quirks.h @@ -0,0 +1,21 @@ +#ifndef __LINUX_USB_PCI_QUIRKS_H +#define __LINUX_USB_PCI_QUIRKS_H + +#ifdef CONFIG_PCI +void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); +int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); +int usb_amd_find_chipset_info(void); +void usb_amd_dev_put(void); +void usb_amd_quirk_pll_disable(void); +void usb_amd_quirk_pll_enable(void); +bool usb_is_intel_switchable_xhci(struct pci_dev *pdev); +void usb_enable_xhci_ports(struct pci_dev *xhci_pdev); +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); +#else +static inline void usb_amd_quirk_pll_disable(void) {} +static inline void usb_amd_quirk_pll_enable(void) {} +static inline void usb_amd_dev_put(void) {} +static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} +#endif /* CONFIG_PCI */ + +#endif /* __LINUX_USB_PCI_QUIRKS_H */ diff --git a/addons/ehci-pci/src/3.10.108/xhci-ext-caps.h b/addons/ehci-pci/src/3.10.108/xhci-ext-caps.h new file mode 100644 index 00000000..377f4242 --- /dev/null +++ b/addons/ehci-pci/src/3.10.108/xhci-ext-caps.h @@ -0,0 +1,155 @@ +/* + * xHCI host controller driver + * + * Copyright (C) 2008 Intel Corp. + * + * Author: Sarah Sharp + * Some code borrowed from the Linux EHCI driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +/* Up to 16 ms to halt an HC */ +#define XHCI_MAX_HALT_USEC (16*1000) +/* HC not running - set to 1 when run/stop bit is cleared. */ +#define XHCI_STS_HALT (1<<0) + +/* HCCPARAMS offset from PCI base address */ +#define XHCI_HCC_PARAMS_OFFSET 0x10 +/* HCCPARAMS contains the first extended capability pointer */ +#define XHCI_HCC_EXT_CAPS(p) (((p)>>16)&0xffff) + +/* Command and Status registers offset from the Operational Registers address */ +#define XHCI_CMD_OFFSET 0x00 +#define XHCI_STS_OFFSET 0x04 + +#define XHCI_MAX_EXT_CAPS 50 + +/* Capability Register */ +/* bits 7:0 - how long is the Capabilities register */ +#define XHCI_HC_LENGTH(p) (((p)>>00)&0x00ff) + +/* Extended capability register fields */ +#define XHCI_EXT_CAPS_ID(p) (((p)>>0)&0xff) +#define XHCI_EXT_CAPS_NEXT(p) (((p)>>8)&0xff) +#define XHCI_EXT_CAPS_VAL(p) ((p)>>16) +/* Extended capability IDs - ID 0 reserved */ +#define XHCI_EXT_CAPS_LEGACY 1 +#define XHCI_EXT_CAPS_PROTOCOL 2 +#define XHCI_EXT_CAPS_PM 3 +#define XHCI_EXT_CAPS_VIRT 4 +#define XHCI_EXT_CAPS_ROUTE 5 +/* IDs 6-9 reserved */ +#define XHCI_EXT_CAPS_DEBUG 10 +/* USB Legacy Support Capability - section 7.1.1 */ +#define XHCI_HC_BIOS_OWNED (1 << 16) +#define XHCI_HC_OS_OWNED (1 << 24) + +/* USB Legacy Support Capability - section 7.1.1 */ +/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */ +#define XHCI_LEGACY_SUPPORT_OFFSET (0x00) + +/* USB Legacy Support Control and Status Register - section 7.1.2 */ +/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */ +#define XHCI_LEGACY_CONTROL_OFFSET (0x04) +/* bits 1:3, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */ +#define XHCI_LEGACY_DISABLE_SMI ((0x7 << 1) + (0xff << 5) + (0x7 << 17)) +#define XHCI_LEGACY_SMI_EVENTS (0x7 << 29) + +/* USB 2.0 xHCI 0.96 L1C capability - section 7.2.2.1.3.2 */ +#define XHCI_L1C (1 << 16) + +/* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */ +#define XHCI_HLC (1 << 19) + +/* command register values to disable interrupts and halt the HC */ +/* start/stop HC execution - do not write unless HC is halted*/ +#define XHCI_CMD_RUN (1 << 0) +/* Event Interrupt Enable - get irq when EINT bit is set in USBSTS register */ +#define XHCI_CMD_EIE (1 << 2) +/* Host System Error Interrupt Enable - get irq when HSEIE bit set in USBSTS */ +#define XHCI_CMD_HSEIE (1 << 3) +/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */ +#define XHCI_CMD_EWE (1 << 10) + +#define XHCI_IRQS (XHCI_CMD_EIE | XHCI_CMD_HSEIE | XHCI_CMD_EWE) + +/* true: Controller Not Ready to accept doorbell or op reg writes after reset */ +#define XHCI_STS_CNR (1 << 11) + +#include + +/** + * Return the next extended capability pointer register. + * + * @base PCI register base address. + * + * @ext_offset Offset of the 32-bit register that contains the extended + * capabilites pointer. If searching for the first extended capability, pass + * in XHCI_HCC_PARAMS_OFFSET. If searching for the next extended capability, + * pass in the offset of the current extended capability register. + * + * Returns 0 if there is no next extended capability register or returns the register offset + * from the PCI registers base address. + */ +static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset) +{ + u32 next; + + next = readl(base + ext_offset); + + if (ext_offset == XHCI_HCC_PARAMS_OFFSET) { + /* Find the first extended capability */ + next = XHCI_HCC_EXT_CAPS(next); + ext_offset = 0; + } else { + /* Find the next extended capability */ + next = XHCI_EXT_CAPS_NEXT(next); + } + + if (!next) + return 0; + /* + * Address calculation from offset of extended capabilities + * (or HCCPARAMS) register - see section 5.3.6 and section 7. + */ + return ext_offset + (next << 2); +} + +/** + * Find the offset of the extended capabilities with capability ID id. + * + * @base PCI MMIO registers base address. + * @ext_offset Offset from base of the first extended capability to look at, + * or the address of HCCPARAMS. + * @id Extended capability ID to search for. + * + * This uses an arbitrary limit of XHCI_MAX_EXT_CAPS extended capabilities + * to make sure that the list doesn't contain a loop. + */ +static inline int xhci_find_ext_cap_by_id(void __iomem *base, int ext_offset, int id) +{ + u32 val; + int limit = XHCI_MAX_EXT_CAPS; + + while (ext_offset && limit > 0) { + val = readl(base + ext_offset); + if (XHCI_EXT_CAPS_ID(val) == id) + break; + ext_offset = xhci_find_next_cap_offset(base, ext_offset); + limit--; + } + if (limit > 0) + return ext_offset; + return 0; +} diff --git a/addons/ehci-pci/src/4.4.180/Makefile b/addons/ehci-pci/src/4.4.180/Makefile new file mode 100644 index 00000000..898ba621 --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/Makefile @@ -0,0 +1,3 @@ +obj-y += pci-quirks.o +obj-m += ehci-hcd.o +obj-m += ehci-pci.o diff --git a/addons/ehci-pci/src/4.4.180/ehci-dbg.c b/addons/ehci-pci/src/4.4.180/ehci-dbg.c new file mode 100644 index 00000000..8e0b9377 --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/ehci-dbg.c @@ -0,0 +1,1085 @@ +/* + * Copyright (c) 2001-2002 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of ehci-hcd.c */ + +#ifdef CONFIG_DYNAMIC_DEBUG + +/* check the values in the HCSPARAMS register + * (host controller _Structural_ parameters) + * see EHCI spec, Table 2-4 for each value + */ +static void dbg_hcs_params (struct ehci_hcd *ehci, char *label) +{ + u32 params = ehci_readl(ehci, &ehci->caps->hcs_params); + + ehci_dbg (ehci, + "%s hcs_params 0x%x dbg=%d%s cc=%d pcc=%d%s%s ports=%d\n", + label, params, + HCS_DEBUG_PORT (params), + HCS_INDICATOR (params) ? " ind" : "", + HCS_N_CC (params), + HCS_N_PCC (params), + HCS_PORTROUTED (params) ? "" : " ordered", + HCS_PPC (params) ? "" : " !ppc", + HCS_N_PORTS (params) + ); + /* Port routing, per EHCI 0.95 Spec, Section 2.2.5 */ + if (HCS_PORTROUTED (params)) { + int i; + char buf [46], tmp [7], byte; + + buf[0] = 0; + for (i = 0; i < HCS_N_PORTS (params); i++) { + // FIXME MIPS won't readb() ... + byte = readb (&ehci->caps->portroute[(i>>1)]); + sprintf(tmp, "%d ", + ((i & 0x1) ? ((byte)&0xf) : ((byte>>4)&0xf))); + strcat(buf, tmp); + } + ehci_dbg (ehci, "%s portroute %s\n", + label, buf); + } +} +#else + +static inline void dbg_hcs_params (struct ehci_hcd *ehci, char *label) {} + +#endif + +#ifdef CONFIG_DYNAMIC_DEBUG + +/* check the values in the HCCPARAMS register + * (host controller _Capability_ parameters) + * see EHCI Spec, Table 2-5 for each value + * */ +static void dbg_hcc_params (struct ehci_hcd *ehci, char *label) +{ + u32 params = ehci_readl(ehci, &ehci->caps->hcc_params); + + if (HCC_ISOC_CACHE (params)) { + ehci_dbg (ehci, + "%s hcc_params %04x caching frame %s%s%s\n", + label, params, + HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024", + HCC_CANPARK(params) ? " park" : "", + HCC_64BIT_ADDR(params) ? " 64 bit addr" : ""); + } else { + ehci_dbg (ehci, + "%s hcc_params %04x thresh %d uframes %s%s%s%s%s%s%s\n", + label, + params, + HCC_ISOC_THRES(params), + HCC_PGM_FRAMELISTLEN(params) ? "256/512/1024" : "1024", + HCC_CANPARK(params) ? " park" : "", + HCC_64BIT_ADDR(params) ? " 64 bit addr" : "", + HCC_LPM(params) ? " LPM" : "", + HCC_PER_PORT_CHANGE_EVENT(params) ? " ppce" : "", + HCC_HW_PREFETCH(params) ? " hw prefetch" : "", + HCC_32FRAME_PERIODIC_LIST(params) ? + " 32 periodic list" : ""); + } +} +#else + +static inline void dbg_hcc_params (struct ehci_hcd *ehci, char *label) {} + +#endif + +#ifdef CONFIG_DYNAMIC_DEBUG + +static void __maybe_unused +dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd) +{ + ehci_dbg(ehci, "%s td %p n%08x %08x t%08x p0=%08x\n", label, qtd, + hc32_to_cpup(ehci, &qtd->hw_next), + hc32_to_cpup(ehci, &qtd->hw_alt_next), + hc32_to_cpup(ehci, &qtd->hw_token), + hc32_to_cpup(ehci, &qtd->hw_buf [0])); + if (qtd->hw_buf [1]) + ehci_dbg(ehci, " p1=%08x p2=%08x p3=%08x p4=%08x\n", + hc32_to_cpup(ehci, &qtd->hw_buf[1]), + hc32_to_cpup(ehci, &qtd->hw_buf[2]), + hc32_to_cpup(ehci, &qtd->hw_buf[3]), + hc32_to_cpup(ehci, &qtd->hw_buf[4])); +} + +static void __maybe_unused +dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + struct ehci_qh_hw *hw = qh->hw; + + ehci_dbg (ehci, "%s qh %p n%08x info %x %x qtd %x\n", label, + qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current); + dbg_qtd("overlay", ehci, (struct ehci_qtd *) &hw->hw_qtd_next); +} + +static void __maybe_unused +dbg_itd (const char *label, struct ehci_hcd *ehci, struct ehci_itd *itd) +{ + ehci_dbg (ehci, "%s [%d] itd %p, next %08x, urb %p\n", + label, itd->frame, itd, hc32_to_cpu(ehci, itd->hw_next), + itd->urb); + ehci_dbg (ehci, + " trans: %08x %08x %08x %08x %08x %08x %08x %08x\n", + hc32_to_cpu(ehci, itd->hw_transaction[0]), + hc32_to_cpu(ehci, itd->hw_transaction[1]), + hc32_to_cpu(ehci, itd->hw_transaction[2]), + hc32_to_cpu(ehci, itd->hw_transaction[3]), + hc32_to_cpu(ehci, itd->hw_transaction[4]), + hc32_to_cpu(ehci, itd->hw_transaction[5]), + hc32_to_cpu(ehci, itd->hw_transaction[6]), + hc32_to_cpu(ehci, itd->hw_transaction[7])); + ehci_dbg (ehci, + " buf: %08x %08x %08x %08x %08x %08x %08x\n", + hc32_to_cpu(ehci, itd->hw_bufp[0]), + hc32_to_cpu(ehci, itd->hw_bufp[1]), + hc32_to_cpu(ehci, itd->hw_bufp[2]), + hc32_to_cpu(ehci, itd->hw_bufp[3]), + hc32_to_cpu(ehci, itd->hw_bufp[4]), + hc32_to_cpu(ehci, itd->hw_bufp[5]), + hc32_to_cpu(ehci, itd->hw_bufp[6])); + ehci_dbg (ehci, " index: %d %d %d %d %d %d %d %d\n", + itd->index[0], itd->index[1], itd->index[2], + itd->index[3], itd->index[4], itd->index[5], + itd->index[6], itd->index[7]); +} + +static void __maybe_unused +dbg_sitd (const char *label, struct ehci_hcd *ehci, struct ehci_sitd *sitd) +{ + ehci_dbg (ehci, "%s [%d] sitd %p, next %08x, urb %p\n", + label, sitd->frame, sitd, hc32_to_cpu(ehci, sitd->hw_next), + sitd->urb); + ehci_dbg (ehci, + " addr %08x sched %04x result %08x buf %08x %08x\n", + hc32_to_cpu(ehci, sitd->hw_fullspeed_ep), + hc32_to_cpu(ehci, sitd->hw_uframe), + hc32_to_cpu(ehci, sitd->hw_results), + hc32_to_cpu(ehci, sitd->hw_buf[0]), + hc32_to_cpu(ehci, sitd->hw_buf[1])); +} + +static int __maybe_unused +dbg_status_buf (char *buf, unsigned len, const char *label, u32 status) +{ + return scnprintf (buf, len, + "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s%s", + label, label [0] ? " " : "", status, + (status & STS_PPCE_MASK) ? " PPCE" : "", + (status & STS_ASS) ? " Async" : "", + (status & STS_PSS) ? " Periodic" : "", + (status & STS_RECL) ? " Recl" : "", + (status & STS_HALT) ? " Halt" : "", + (status & STS_IAA) ? " IAA" : "", + (status & STS_FATAL) ? " FATAL" : "", + (status & STS_FLR) ? " FLR" : "", + (status & STS_PCD) ? " PCD" : "", + (status & STS_ERR) ? " ERR" : "", + (status & STS_INT) ? " INT" : "" + ); +} + +static int __maybe_unused +dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable) +{ + return scnprintf (buf, len, + "%s%sintrenable %02x%s%s%s%s%s%s%s", + label, label [0] ? " " : "", enable, + (enable & STS_PPCE_MASK) ? " PPCE" : "", + (enable & STS_IAA) ? " IAA" : "", + (enable & STS_FATAL) ? " FATAL" : "", + (enable & STS_FLR) ? " FLR" : "", + (enable & STS_PCD) ? " PCD" : "", + (enable & STS_ERR) ? " ERR" : "", + (enable & STS_INT) ? " INT" : "" + ); +} + +static const char *const fls_strings [] = + { "1024", "512", "256", "??" }; + +static int +dbg_command_buf (char *buf, unsigned len, const char *label, u32 command) +{ + return scnprintf (buf, len, + "%s%scommand %07x %s%s%s%s%s%s=%d ithresh=%d%s%s%s%s " + "period=%s%s %s", + label, label [0] ? " " : "", command, + (command & CMD_HIRD) ? " HIRD" : "", + (command & CMD_PPCEE) ? " PPCEE" : "", + (command & CMD_FSP) ? " FSP" : "", + (command & CMD_ASPE) ? " ASPE" : "", + (command & CMD_PSPE) ? " PSPE" : "", + (command & CMD_PARK) ? " park" : "(park)", + CMD_PARK_CNT (command), + (command >> 16) & 0x3f, + (command & CMD_LRESET) ? " LReset" : "", + (command & CMD_IAAD) ? " IAAD" : "", + (command & CMD_ASE) ? " Async" : "", + (command & CMD_PSE) ? " Periodic" : "", + fls_strings [(command >> 2) & 0x3], + (command & CMD_RESET) ? " Reset" : "", + (command & CMD_RUN) ? "RUN" : "HALT" + ); +} + +static int +dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status) +{ + char *sig; + + /* signaling state */ + switch (status & (3 << 10)) { + case 0 << 10: sig = "se0"; break; + case 1 << 10: sig = "k"; break; /* low speed */ + case 2 << 10: sig = "j"; break; + default: sig = "?"; break; + } + + return scnprintf (buf, len, + "%s%sport:%d status %06x %d %s%s%s%s%s%s " + "sig=%s%s%s%s%s%s%s%s%s%s%s", + label, label [0] ? " " : "", port, status, + status>>25,/*device address */ + (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ACK ? + " ACK" : "", + (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_NYET ? + " NYET" : "", + (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_STALL ? + " STALL" : "", + (status & PORT_SSTS)>>23 == PORTSC_SUSPEND_STS_ERR ? + " ERR" : "", + (status & PORT_POWER) ? " POWER" : "", + (status & PORT_OWNER) ? " OWNER" : "", + sig, + (status & PORT_LPM) ? " LPM" : "", + (status & PORT_RESET) ? " RESET" : "", + (status & PORT_SUSPEND) ? " SUSPEND" : "", + (status & PORT_RESUME) ? " RESUME" : "", + (status & PORT_OCC) ? " OCC" : "", + (status & PORT_OC) ? " OC" : "", + (status & PORT_PEC) ? " PEC" : "", + (status & PORT_PE) ? " PE" : "", + (status & PORT_CSC) ? " CSC" : "", + (status & PORT_CONNECT) ? " CONNECT" : ""); +} + +#else +static inline void __maybe_unused +dbg_qh (char *label, struct ehci_hcd *ehci, struct ehci_qh *qh) +{} + +static inline int __maybe_unused +dbg_status_buf (char *buf, unsigned len, const char *label, u32 status) +{ return 0; } + +static inline int __maybe_unused +dbg_command_buf (char *buf, unsigned len, const char *label, u32 command) +{ return 0; } + +static inline int __maybe_unused +dbg_intr_buf (char *buf, unsigned len, const char *label, u32 enable) +{ return 0; } + +static inline int __maybe_unused +dbg_port_buf (char *buf, unsigned len, const char *label, int port, u32 status) +{ return 0; } + +#endif /* CONFIG_DYNAMIC_DEBUG */ + +/* functions have the "wrong" filename when they're output... */ +#define dbg_status(ehci, label, status) { \ + char _buf [80]; \ + dbg_status_buf (_buf, sizeof _buf, label, status); \ + ehci_dbg (ehci, "%s\n", _buf); \ +} + +#define dbg_cmd(ehci, label, command) { \ + char _buf [80]; \ + dbg_command_buf (_buf, sizeof _buf, label, command); \ + ehci_dbg (ehci, "%s\n", _buf); \ +} + +#define dbg_port(ehci, label, port, status) { \ + char _buf [80]; \ + dbg_port_buf (_buf, sizeof _buf, label, port, status); \ + ehci_dbg (ehci, "%s\n", _buf); \ +} + +/*-------------------------------------------------------------------------*/ + +#ifdef STUB_DEBUG_FILES + +static inline void create_debug_files (struct ehci_hcd *bus) { } +static inline void remove_debug_files (struct ehci_hcd *bus) { } + +#else + +/* troubleshooting help: expose state in debugfs */ + +static int debug_async_open(struct inode *, struct file *); +static int debug_bandwidth_open(struct inode *, struct file *); +static int debug_periodic_open(struct inode *, struct file *); +static int debug_registers_open(struct inode *, struct file *); + +static ssize_t debug_output(struct file*, char __user*, size_t, loff_t*); +static int debug_close(struct inode *, struct file *); + +static const struct file_operations debug_async_fops = { + .owner = THIS_MODULE, + .open = debug_async_open, + .read = debug_output, + .release = debug_close, + .llseek = default_llseek, +}; +static const struct file_operations debug_bandwidth_fops = { + .owner = THIS_MODULE, + .open = debug_bandwidth_open, + .read = debug_output, + .release = debug_close, + .llseek = default_llseek, +}; +static const struct file_operations debug_periodic_fops = { + .owner = THIS_MODULE, + .open = debug_periodic_open, + .read = debug_output, + .release = debug_close, + .llseek = default_llseek, +}; +static const struct file_operations debug_registers_fops = { + .owner = THIS_MODULE, + .open = debug_registers_open, + .read = debug_output, + .release = debug_close, + .llseek = default_llseek, +}; + +static struct dentry *ehci_debug_root; + +struct debug_buffer { + ssize_t (*fill_func)(struct debug_buffer *); /* fill method */ + struct usb_bus *bus; + struct mutex mutex; /* protect filling of buffer */ + size_t count; /* number of characters filled into buffer */ + char *output_buf; + size_t alloc_size; +}; + +#define speed_char(info1) ({ char tmp; \ + switch (info1 & (3 << 12)) { \ + case QH_FULL_SPEED: tmp = 'f'; break; \ + case QH_LOW_SPEED: tmp = 'l'; break; \ + case QH_HIGH_SPEED: tmp = 'h'; break; \ + default: tmp = '?'; break; \ + } tmp; }) + +static inline char token_mark(struct ehci_hcd *ehci, __hc32 token) +{ + __u32 v = hc32_to_cpu(ehci, token); + + if (v & QTD_STS_ACTIVE) + return '*'; + if (v & QTD_STS_HALT) + return '-'; + if (!IS_SHORT_READ (v)) + return ' '; + /* tries to advance through hw_alt_next */ + return '/'; +} + +static void qh_lines ( + struct ehci_hcd *ehci, + struct ehci_qh *qh, + char **nextp, + unsigned *sizep +) +{ + u32 scratch; + u32 hw_curr; + struct list_head *entry; + struct ehci_qtd *td; + unsigned temp; + unsigned size = *sizep; + char *next = *nextp; + char mark; + __le32 list_end = EHCI_LIST_END(ehci); + struct ehci_qh_hw *hw = qh->hw; + + if (hw->hw_qtd_next == list_end) /* NEC does this */ + mark = '@'; + else + mark = token_mark(ehci, hw->hw_token); + if (mark == '/') { /* qh_alt_next controls qh advance? */ + if ((hw->hw_alt_next & QTD_MASK(ehci)) + == ehci->async->hw->hw_alt_next) + mark = '#'; /* blocked */ + else if (hw->hw_alt_next == list_end) + mark = '.'; /* use hw_qtd_next */ + /* else alt_next points to some other qtd */ + } + scratch = hc32_to_cpup(ehci, &hw->hw_info1); + hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &hw->hw_current) : 0; + temp = scnprintf (next, size, + "qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)", + qh, scratch & 0x007f, + speed_char (scratch), + (scratch >> 8) & 0x000f, + scratch, hc32_to_cpup(ehci, &hw->hw_info2), + hc32_to_cpup(ehci, &hw->hw_token), mark, + (cpu_to_hc32(ehci, QTD_TOGGLE) & hw->hw_token) + ? "data1" : "data0", + (hc32_to_cpup(ehci, &hw->hw_alt_next) >> 1) & 0x0f); + size -= temp; + next += temp; + + /* hc may be modifying the list as we read it ... */ + list_for_each (entry, &qh->qtd_list) { + td = list_entry (entry, struct ehci_qtd, qtd_list); + scratch = hc32_to_cpup(ehci, &td->hw_token); + mark = ' '; + if (hw_curr == td->qtd_dma) + mark = '*'; + else if (hw->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma)) + mark = '+'; + else if (QTD_LENGTH (scratch)) { + if (td->hw_alt_next == ehci->async->hw->hw_alt_next) + mark = '#'; + else if (td->hw_alt_next != list_end) + mark = '/'; + } + temp = snprintf (next, size, + "\n\t%p%c%s len=%d %08x urb %p", + td, mark, ({ char *tmp; + switch ((scratch>>8)&0x03) { + case 0: tmp = "out"; break; + case 1: tmp = "in"; break; + case 2: tmp = "setup"; break; + default: tmp = "?"; break; + } tmp;}), + (scratch >> 16) & 0x7fff, + scratch, + td->urb); + if (size < temp) + temp = size; + size -= temp; + next += temp; + if (temp == size) + goto done; + } + + temp = snprintf (next, size, "\n"); + if (size < temp) + temp = size; + size -= temp; + next += temp; + +done: + *sizep = size; + *nextp = next; +} + +static ssize_t fill_async_buffer(struct debug_buffer *buf) +{ + struct usb_hcd *hcd; + struct ehci_hcd *ehci; + unsigned long flags; + unsigned temp, size; + char *next; + struct ehci_qh *qh; + + hcd = bus_to_hcd(buf->bus); + ehci = hcd_to_ehci (hcd); + next = buf->output_buf; + size = buf->alloc_size; + + *next = 0; + + /* dumps a snapshot of the async schedule. + * usually empty except for long-term bulk reads, or head. + * one QH per line, and TDs we know about + */ + spin_lock_irqsave (&ehci->lock, flags); + for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh) + qh_lines (ehci, qh, &next, &size); + if (!list_empty(&ehci->async_unlink) && size > 0) { + temp = scnprintf(next, size, "\nunlink =\n"); + size -= temp; + next += temp; + + list_for_each_entry(qh, &ehci->async_unlink, unlink_node) { + if (size <= 0) + break; + qh_lines(ehci, qh, &next, &size); + } + } + spin_unlock_irqrestore (&ehci->lock, flags); + + return strlen(buf->output_buf); +} + +static ssize_t fill_bandwidth_buffer(struct debug_buffer *buf) +{ + struct ehci_hcd *ehci; + struct ehci_tt *tt; + struct ehci_per_sched *ps; + unsigned temp, size; + char *next; + unsigned i; + u8 *bw; + u16 *bf; + u8 budget[EHCI_BANDWIDTH_SIZE]; + + ehci = hcd_to_ehci(bus_to_hcd(buf->bus)); + next = buf->output_buf; + size = buf->alloc_size; + + *next = 0; + + spin_lock_irq(&ehci->lock); + + /* Dump the HS bandwidth table */ + temp = scnprintf(next, size, + "HS bandwidth allocation (us per microframe)\n"); + size -= temp; + next += temp; + for (i = 0; i < EHCI_BANDWIDTH_SIZE; i += 8) { + bw = &ehci->bandwidth[i]; + temp = scnprintf(next, size, + "%2u: %4u%4u%4u%4u%4u%4u%4u%4u\n", + i, bw[0], bw[1], bw[2], bw[3], + bw[4], bw[5], bw[6], bw[7]); + size -= temp; + next += temp; + } + + /* Dump all the FS/LS tables */ + list_for_each_entry(tt, &ehci->tt_list, tt_list) { + temp = scnprintf(next, size, + "\nTT %s port %d FS/LS bandwidth allocation (us per frame)\n", + dev_name(&tt->usb_tt->hub->dev), + tt->tt_port + !!tt->usb_tt->multi); + size -= temp; + next += temp; + + bf = tt->bandwidth; + temp = scnprintf(next, size, + " %5u%5u%5u%5u%5u%5u%5u%5u\n", + bf[0], bf[1], bf[2], bf[3], + bf[4], bf[5], bf[6], bf[7]); + size -= temp; + next += temp; + + temp = scnprintf(next, size, + "FS/LS budget (us per microframe)\n"); + size -= temp; + next += temp; + compute_tt_budget(budget, tt); + for (i = 0; i < EHCI_BANDWIDTH_SIZE; i += 8) { + bw = &budget[i]; + temp = scnprintf(next, size, + "%2u: %4u%4u%4u%4u%4u%4u%4u%4u\n", + i, bw[0], bw[1], bw[2], bw[3], + bw[4], bw[5], bw[6], bw[7]); + size -= temp; + next += temp; + } + list_for_each_entry(ps, &tt->ps_list, ps_list) { + temp = scnprintf(next, size, + "%s ep %02x: %4u @ %2u.%u+%u mask %04x\n", + dev_name(&ps->udev->dev), + ps->ep->desc.bEndpointAddress, + ps->tt_usecs, + ps->bw_phase, ps->phase_uf, + ps->bw_period, ps->cs_mask); + size -= temp; + next += temp; + } + } + spin_unlock_irq(&ehci->lock); + + return next - buf->output_buf; +} + +#define DBG_SCHED_LIMIT 64 +static ssize_t fill_periodic_buffer(struct debug_buffer *buf) +{ + struct usb_hcd *hcd; + struct ehci_hcd *ehci; + unsigned long flags; + union ehci_shadow p, *seen; + unsigned temp, size, seen_count; + char *next; + unsigned i; + __hc32 tag; + + seen = kmalloc(DBG_SCHED_LIMIT * sizeof *seen, GFP_ATOMIC); + if (!seen) + return 0; + seen_count = 0; + + hcd = bus_to_hcd(buf->bus); + ehci = hcd_to_ehci (hcd); + next = buf->output_buf; + size = buf->alloc_size; + + temp = scnprintf (next, size, "size = %d\n", ehci->periodic_size); + size -= temp; + next += temp; + + /* dump a snapshot of the periodic schedule. + * iso changes, interrupt usually doesn't. + */ + spin_lock_irqsave (&ehci->lock, flags); + for (i = 0; i < ehci->periodic_size; i++) { + p = ehci->pshadow [i]; + if (likely (!p.ptr)) + continue; + tag = Q_NEXT_TYPE(ehci, ehci->periodic [i]); + + temp = scnprintf (next, size, "%4d: ", i); + size -= temp; + next += temp; + + do { + struct ehci_qh_hw *hw; + + switch (hc32_to_cpu(ehci, tag)) { + case Q_TYPE_QH: + hw = p.qh->hw; + temp = scnprintf (next, size, " qh%d-%04x/%p", + p.qh->ps.period, + hc32_to_cpup(ehci, + &hw->hw_info2) + /* uframe masks */ + & (QH_CMASK | QH_SMASK), + p.qh); + size -= temp; + next += temp; + /* don't repeat what follows this qh */ + for (temp = 0; temp < seen_count; temp++) { + if (seen [temp].ptr != p.ptr) + continue; + if (p.qh->qh_next.ptr) { + temp = scnprintf (next, size, + " ..."); + size -= temp; + next += temp; + } + break; + } + /* show more info the first time around */ + if (temp == seen_count) { + u32 scratch = hc32_to_cpup(ehci, + &hw->hw_info1); + struct ehci_qtd *qtd; + char *type = ""; + + /* count tds, get ep direction */ + temp = 0; + list_for_each_entry (qtd, + &p.qh->qtd_list, + qtd_list) { + temp++; + switch (0x03 & (hc32_to_cpu( + ehci, + qtd->hw_token) >> 8)) { + case 0: type = "out"; continue; + case 1: type = "in"; continue; + } + } + + temp = scnprintf (next, size, + " (%c%d ep%d%s " + "[%d/%d] q%d p%d)", + speed_char (scratch), + scratch & 0x007f, + (scratch >> 8) & 0x000f, type, + p.qh->ps.usecs, + p.qh->ps.c_usecs, + temp, + 0x7ff & (scratch >> 16)); + + if (seen_count < DBG_SCHED_LIMIT) + seen [seen_count++].qh = p.qh; + } else + temp = 0; + tag = Q_NEXT_TYPE(ehci, hw->hw_next); + p = p.qh->qh_next; + break; + case Q_TYPE_FSTN: + temp = scnprintf (next, size, + " fstn-%8x/%p", p.fstn->hw_prev, + p.fstn); + tag = Q_NEXT_TYPE(ehci, p.fstn->hw_next); + p = p.fstn->fstn_next; + break; + case Q_TYPE_ITD: + temp = scnprintf (next, size, + " itd/%p", p.itd); + tag = Q_NEXT_TYPE(ehci, p.itd->hw_next); + p = p.itd->itd_next; + break; + case Q_TYPE_SITD: + temp = scnprintf (next, size, + " sitd%d-%04x/%p", + p.sitd->stream->ps.period, + hc32_to_cpup(ehci, &p.sitd->hw_uframe) + & 0x0000ffff, + p.sitd); + tag = Q_NEXT_TYPE(ehci, p.sitd->hw_next); + p = p.sitd->sitd_next; + break; + } + size -= temp; + next += temp; + } while (p.ptr); + + temp = scnprintf (next, size, "\n"); + size -= temp; + next += temp; + } + spin_unlock_irqrestore (&ehci->lock, flags); + kfree (seen); + + return buf->alloc_size - size; +} +#undef DBG_SCHED_LIMIT + +static const char *rh_state_string(struct ehci_hcd *ehci) +{ + switch (ehci->rh_state) { + case EHCI_RH_HALTED: + return "halted"; + case EHCI_RH_SUSPENDED: + return "suspended"; + case EHCI_RH_RUNNING: + return "running"; + case EHCI_RH_STOPPING: + return "stopping"; + } + return "?"; +} + +static ssize_t fill_registers_buffer(struct debug_buffer *buf) +{ + struct usb_hcd *hcd; + struct ehci_hcd *ehci; + unsigned long flags; + unsigned temp, size, i; + char *next, scratch [80]; + static char fmt [] = "%*s\n"; + static char label [] = ""; + + hcd = bus_to_hcd(buf->bus); + ehci = hcd_to_ehci (hcd); + next = buf->output_buf; + size = buf->alloc_size; + + spin_lock_irqsave (&ehci->lock, flags); + + if (!HCD_HW_ACCESSIBLE(hcd)) { + size = scnprintf (next, size, + "bus %s, device %s\n" + "%s\n" + "SUSPENDED (no register access)\n", + hcd->self.controller->bus->name, + dev_name(hcd->self.controller), + hcd->product_desc); + goto done; + } + + /* Capability Registers */ + i = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); + temp = scnprintf (next, size, + "bus %s, device %s\n" + "%s\n" + "EHCI %x.%02x, rh state %s\n", + hcd->self.controller->bus->name, + dev_name(hcd->self.controller), + hcd->product_desc, + i >> 8, i & 0x0ff, rh_state_string(ehci)); + size -= temp; + next += temp; + +#ifdef CONFIG_PCI + /* EHCI 0.96 and later may have "extended capabilities" */ + if (dev_is_pci(hcd->self.controller)) { + struct pci_dev *pdev; + u32 offset, cap, cap2; + unsigned count = 256/4; + + pdev = to_pci_dev(ehci_to_hcd(ehci)->self.controller); + offset = HCC_EXT_CAPS(ehci_readl(ehci, + &ehci->caps->hcc_params)); + while (offset && count--) { + pci_read_config_dword (pdev, offset, &cap); + switch (cap & 0xff) { + case 1: + temp = scnprintf (next, size, + "ownership %08x%s%s\n", cap, + (cap & (1 << 24)) ? " linux" : "", + (cap & (1 << 16)) ? " firmware" : ""); + size -= temp; + next += temp; + + offset += 4; + pci_read_config_dword (pdev, offset, &cap2); + temp = scnprintf (next, size, + "SMI sts/enable 0x%08x\n", cap2); + size -= temp; + next += temp; + break; + case 0: /* illegal reserved capability */ + cap = 0; + /* FALLTHROUGH */ + default: /* unknown */ + break; + } + offset = (cap >> 8) & 0xff; + } + } +#endif + + // FIXME interpret both types of params + i = ehci_readl(ehci, &ehci->caps->hcs_params); + temp = scnprintf (next, size, "structural params 0x%08x\n", i); + size -= temp; + next += temp; + + i = ehci_readl(ehci, &ehci->caps->hcc_params); + temp = scnprintf (next, size, "capability params 0x%08x\n", i); + size -= temp; + next += temp; + + /* Operational Registers */ + temp = dbg_status_buf (scratch, sizeof scratch, label, + ehci_readl(ehci, &ehci->regs->status)); + temp = scnprintf (next, size, fmt, temp, scratch); + size -= temp; + next += temp; + + temp = dbg_command_buf (scratch, sizeof scratch, label, + ehci_readl(ehci, &ehci->regs->command)); + temp = scnprintf (next, size, fmt, temp, scratch); + size -= temp; + next += temp; + + temp = dbg_intr_buf (scratch, sizeof scratch, label, + ehci_readl(ehci, &ehci->regs->intr_enable)); + temp = scnprintf (next, size, fmt, temp, scratch); + size -= temp; + next += temp; + + temp = scnprintf (next, size, "uframe %04x\n", + ehci_read_frame_index(ehci)); + size -= temp; + next += temp; + + for (i = 1; i <= HCS_N_PORTS (ehci->hcs_params); i++) { + temp = dbg_port_buf (scratch, sizeof scratch, label, i, + ehci_readl(ehci, + &ehci->regs->port_status[i - 1])); + temp = scnprintf (next, size, fmt, temp, scratch); + size -= temp; + next += temp; + if (i == HCS_DEBUG_PORT(ehci->hcs_params) && ehci->debug) { + temp = scnprintf (next, size, + " debug control %08x\n", + ehci_readl(ehci, + &ehci->debug->control)); + size -= temp; + next += temp; + } + } + + if (!list_empty(&ehci->async_unlink)) { + temp = scnprintf(next, size, "async unlink qh %p\n", + list_first_entry(&ehci->async_unlink, + struct ehci_qh, unlink_node)); + size -= temp; + next += temp; + } + +#ifdef EHCI_STATS + temp = scnprintf (next, size, + "irq normal %ld err %ld iaa %ld (lost %ld)\n", + ehci->stats.normal, ehci->stats.error, ehci->stats.iaa, + ehci->stats.lost_iaa); + size -= temp; + next += temp; + + temp = scnprintf (next, size, "complete %ld unlink %ld\n", + ehci->stats.complete, ehci->stats.unlink); + size -= temp; + next += temp; +#endif + +done: + spin_unlock_irqrestore (&ehci->lock, flags); + + return buf->alloc_size - size; +} + +static struct debug_buffer *alloc_buffer(struct usb_bus *bus, + ssize_t (*fill_func)(struct debug_buffer *)) +{ + struct debug_buffer *buf; + + buf = kzalloc(sizeof(struct debug_buffer), GFP_KERNEL); + + if (buf) { + buf->bus = bus; + buf->fill_func = fill_func; + mutex_init(&buf->mutex); + buf->alloc_size = PAGE_SIZE; + } + + return buf; +} + +static int fill_buffer(struct debug_buffer *buf) +{ + int ret = 0; + + if (!buf->output_buf) + buf->output_buf = vmalloc(buf->alloc_size); + + if (!buf->output_buf) { + ret = -ENOMEM; + goto out; + } + + ret = buf->fill_func(buf); + + if (ret >= 0) { + buf->count = ret; + ret = 0; + } + +out: + return ret; +} + +static ssize_t debug_output(struct file *file, char __user *user_buf, + size_t len, loff_t *offset) +{ + struct debug_buffer *buf = file->private_data; + int ret = 0; + + mutex_lock(&buf->mutex); + if (buf->count == 0) { + ret = fill_buffer(buf); + if (ret != 0) { + mutex_unlock(&buf->mutex); + goto out; + } + } + mutex_unlock(&buf->mutex); + + ret = simple_read_from_buffer(user_buf, len, offset, + buf->output_buf, buf->count); + +out: + return ret; + +} + +static int debug_close(struct inode *inode, struct file *file) +{ + struct debug_buffer *buf = file->private_data; + + if (buf) { + vfree(buf->output_buf); + kfree(buf); + } + + return 0; +} + +static int debug_async_open(struct inode *inode, struct file *file) +{ + file->private_data = alloc_buffer(inode->i_private, fill_async_buffer); + + return file->private_data ? 0 : -ENOMEM; +} + +static int debug_bandwidth_open(struct inode *inode, struct file *file) +{ + file->private_data = alloc_buffer(inode->i_private, + fill_bandwidth_buffer); + + return file->private_data ? 0 : -ENOMEM; +} + +static int debug_periodic_open(struct inode *inode, struct file *file) +{ + struct debug_buffer *buf; + buf = alloc_buffer(inode->i_private, fill_periodic_buffer); + if (!buf) + return -ENOMEM; + + buf->alloc_size = (sizeof(void *) == 4 ? 6 : 8)*PAGE_SIZE; + file->private_data = buf; + return 0; +} + +static int debug_registers_open(struct inode *inode, struct file *file) +{ + file->private_data = alloc_buffer(inode->i_private, + fill_registers_buffer); + + return file->private_data ? 0 : -ENOMEM; +} + +static inline void create_debug_files (struct ehci_hcd *ehci) +{ + struct usb_bus *bus = &ehci_to_hcd(ehci)->self; + + ehci->debug_dir = debugfs_create_dir(bus->bus_name, ehci_debug_root); + if (!ehci->debug_dir) + return; + + if (!debugfs_create_file("async", S_IRUGO, ehci->debug_dir, bus, + &debug_async_fops)) + goto file_error; + + if (!debugfs_create_file("bandwidth", S_IRUGO, ehci->debug_dir, bus, + &debug_bandwidth_fops)) + goto file_error; + + if (!debugfs_create_file("periodic", S_IRUGO, ehci->debug_dir, bus, + &debug_periodic_fops)) + goto file_error; + + if (!debugfs_create_file("registers", S_IRUGO, ehci->debug_dir, bus, + &debug_registers_fops)) + goto file_error; + + return; + +file_error: + debugfs_remove_recursive(ehci->debug_dir); +} + +static inline void remove_debug_files (struct ehci_hcd *ehci) +{ + debugfs_remove_recursive(ehci->debug_dir); +} + +#endif /* STUB_DEBUG_FILES */ diff --git a/addons/ehci-pci/src/4.4.180/ehci-hcd.c b/addons/ehci-pci/src/4.4.180/ehci-hcd.c new file mode 100644 index 00000000..f7661d97 --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/ehci-hcd.c @@ -0,0 +1,1396 @@ +/* + * Enhanced Host Controller Interface (EHCI) driver for USB. + * + * Maintainer: Alan Stern + * + * Copyright (c) 2000-2004 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#if defined(CONFIG_PPC_PS3) +#include +#endif + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI hc_driver implementation ... experimental, incomplete. + * Based on the final 1.0 register interface specification. + * + * USB 2.0 shows up in upcoming www.pcmcia.org technology. + * First was PCMCIA, like ISA; then CardBus, which is PCI. + * Next comes "CardBay", using USB 2.0 signals. + * + * Contains additional contributions by Brad Hards, Rory Bolt, and others. + * Special thanks to Intel and VIA for providing host controllers to + * test this driver on, and Cypress (including In-System Design) for + * providing early devices for those host controllers to talk to! + */ + +#define DRIVER_AUTHOR "David Brownell" +#define DRIVER_DESC "USB 2.0 'Enhanced' Host Controller (EHCI) Driver" + +static const char hcd_name [] = "ehci_hcd"; + + +#undef EHCI_URB_TRACE + +/* magic numbers that can affect system performance */ +#define EHCI_TUNE_CERR 3 /* 0-3 qtd retries; 0 == don't stop */ +#define EHCI_TUNE_RL_HS 4 /* nak throttle; see 4.9 */ +#define EHCI_TUNE_RL_TT 0 +#define EHCI_TUNE_MULT_HS 1 /* 1-3 transactions/uframe; 4.10.3 */ +#define EHCI_TUNE_MULT_TT 1 +/* + * Some drivers think it's safe to schedule isochronous transfers more than + * 256 ms into the future (partly as a result of an old bug in the scheduling + * code). In an attempt to avoid trouble, we will use a minimum scheduling + * length of 512 frames instead of 256. + */ +#define EHCI_TUNE_FLS 1 /* (medium) 512-frame schedule */ + +/* Initial IRQ latency: faster than hw default */ +static int log2_irq_thresh = 0; // 0 to 6 +module_param (log2_irq_thresh, int, S_IRUGO); +MODULE_PARM_DESC (log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); + +/* initial park setting: slower than hw default */ +static unsigned park = 0; +module_param (park, uint, S_IRUGO); +MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets"); + +/* for flakey hardware, ignore overcurrent indicators */ +static bool ignore_oc = 0; +module_param (ignore_oc, bool, S_IRUGO); +MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications"); + +#define INTR_MASK (STS_IAA | STS_FATAL | STS_PCD | STS_ERR | STS_INT) + +/*-------------------------------------------------------------------------*/ + +#include "ehci.h" +#include "pci-quirks.h" + +static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE], + struct ehci_tt *tt); + +/* + * The MosChip MCS9990 controller updates its microframe counter + * a little before the frame counter, and occasionally we will read + * the invalid intermediate value. Avoid problems by checking the + * microframe number (the low-order 3 bits); if they are 0 then + * re-read the register to get the correct value. + */ +static unsigned ehci_moschip_read_frame_index(struct ehci_hcd *ehci) +{ + unsigned uf; + + uf = ehci_readl(ehci, &ehci->regs->frame_index); + if (unlikely((uf & 7) == 0)) + uf = ehci_readl(ehci, &ehci->regs->frame_index); + return uf; +} + +static inline unsigned ehci_read_frame_index(struct ehci_hcd *ehci) +{ + if (ehci->frame_index_bug) + return ehci_moschip_read_frame_index(ehci); + return ehci_readl(ehci, &ehci->regs->frame_index); +} + +#include "ehci-dbg.c" + +/*-------------------------------------------------------------------------*/ + +/* + * ehci_handshake - spin reading hc until handshake completes or fails + * @ptr: address of hc register to be read + * @mask: bits to look at in result of read + * @done: value of those bits when handshake succeeds + * @usec: timeout in microseconds + * + * Returns negative errno, or zero on success + * + * Success happens when the "mask" bits have the specified value (hardware + * handshake done). There are two failure modes: "usec" have passed (major + * hardware flakeout), or the register reads as all-ones (hardware removed). + * + * That last failure should_only happen in cases like physical cardbus eject + * before driver shutdown. But it also seems to be caused by bugs in cardbus + * bridge shutdown: shutting down the bridge before the devices using it. + */ +int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr, + u32 mask, u32 done, int usec) +{ + u32 result; + + do { + result = ehci_readl(ehci, ptr); + if (result == ~(u32)0) /* card removed */ + return -ENODEV; + result &= mask; + if (result == done) + return 0; + udelay (1); + usec--; + } while (usec > 0); + return -ETIMEDOUT; +} +EXPORT_SYMBOL_GPL(ehci_handshake); + +/* check TDI/ARC silicon is in host mode */ +static int tdi_in_host_mode (struct ehci_hcd *ehci) +{ + u32 tmp; + + tmp = ehci_readl(ehci, &ehci->regs->usbmode); + return (tmp & 3) == USBMODE_CM_HC; +} + +/* + * Force HC to halt state from unknown (EHCI spec section 2.3). + * Must be called with interrupts enabled and the lock not held. + */ +static int ehci_halt (struct ehci_hcd *ehci) +{ + u32 temp; + + spin_lock_irq(&ehci->lock); + + /* disable any irqs left enabled by previous code */ + ehci_writel(ehci, 0, &ehci->regs->intr_enable); + + if (ehci_is_TDI(ehci) && !tdi_in_host_mode(ehci)) { + spin_unlock_irq(&ehci->lock); + return 0; + } + + /* + * This routine gets called during probe before ehci->command + * has been initialized, so we can't rely on its value. + */ + ehci->command &= ~CMD_RUN; + temp = ehci_readl(ehci, &ehci->regs->command); + temp &= ~(CMD_RUN | CMD_IAAD); + ehci_writel(ehci, temp, &ehci->regs->command); + + spin_unlock_irq(&ehci->lock); + synchronize_irq(ehci_to_hcd(ehci)->irq); + + return ehci_handshake(ehci, &ehci->regs->status, + STS_HALT, STS_HALT, 16 * 125); +} + +/* put TDI/ARC silicon into EHCI mode */ +static void tdi_reset (struct ehci_hcd *ehci) +{ + u32 tmp; + + tmp = ehci_readl(ehci, &ehci->regs->usbmode); + tmp |= USBMODE_CM_HC; + /* The default byte access to MMR space is LE after + * controller reset. Set the required endian mode + * for transfer buffers to match the host microprocessor + */ + if (ehci_big_endian_mmio(ehci)) + tmp |= USBMODE_BE; + ehci_writel(ehci, tmp, &ehci->regs->usbmode); +} + +/* + * Reset a non-running (STS_HALT == 1) controller. + * Must be called with interrupts enabled and the lock not held. + */ +int ehci_reset(struct ehci_hcd *ehci) +{ + int retval; + u32 command = ehci_readl(ehci, &ehci->regs->command); + + /* If the EHCI debug controller is active, special care must be + * taken before and after a host controller reset */ + if (ehci->debug && !dbgp_reset_prep(ehci_to_hcd(ehci))) + ehci->debug = NULL; + + command |= CMD_RESET; + dbg_cmd (ehci, "reset", command); + ehci_writel(ehci, command, &ehci->regs->command); + ehci->rh_state = EHCI_RH_HALTED; + ehci->next_statechange = jiffies; + retval = ehci_handshake(ehci, &ehci->regs->command, + CMD_RESET, 0, 250 * 1000); + + if (ehci->has_hostpc) { + ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS, + &ehci->regs->usbmode_ex); + ehci_writel(ehci, TXFIFO_DEFAULT, &ehci->regs->txfill_tuning); + } + if (retval) + return retval; + + if (ehci_is_TDI(ehci)) + tdi_reset (ehci); + + if (ehci->debug) + dbgp_external_startup(ehci_to_hcd(ehci)); + + ehci->port_c_suspend = ehci->suspended_ports = + ehci->resuming_ports = 0; + return retval; +} +EXPORT_SYMBOL_GPL(ehci_reset); + +/* + * Idle the controller (turn off the schedules). + * Must be called with interrupts enabled and the lock not held. + */ +static void ehci_quiesce (struct ehci_hcd *ehci) +{ + u32 temp; + + if (ehci->rh_state != EHCI_RH_RUNNING) + return; + + /* wait for any schedule enables/disables to take effect */ + temp = (ehci->command << 10) & (STS_ASS | STS_PSS); + ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, temp, + 16 * 125); + + /* then disable anything that's still active */ + spin_lock_irq(&ehci->lock); + ehci->command &= ~(CMD_ASE | CMD_PSE); + ehci_writel(ehci, ehci->command, &ehci->regs->command); + spin_unlock_irq(&ehci->lock); + + /* hardware can take 16 microframes to turn off ... */ + ehci_handshake(ehci, &ehci->regs->status, STS_ASS | STS_PSS, 0, + 16 * 125); +} + +/*-------------------------------------------------------------------------*/ + +static void end_unlink_async(struct ehci_hcd *ehci); +static void unlink_empty_async(struct ehci_hcd *ehci); +static void unlink_empty_async_suspended(struct ehci_hcd *ehci); +static void ehci_work(struct ehci_hcd *ehci); +static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); +static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh); +static int ehci_port_power(struct ehci_hcd *ehci, int portnum, bool enable); + +#include "ehci-timer.c" +#include "ehci-hub.c" +#include "ehci-mem.c" +#include "ehci-q.c" +#include "ehci-sched.c" +#include "ehci-sysfs.c" + +/*-------------------------------------------------------------------------*/ + +/* On some systems, leaving remote wakeup enabled prevents system shutdown. + * The firmware seems to think that powering off is a wakeup event! + * This routine turns off remote wakeup and everything else, on all ports. + */ +static void ehci_turn_off_all_ports(struct ehci_hcd *ehci) +{ + int port = HCS_N_PORTS(ehci->hcs_params); + + while (port--) { + spin_unlock_irq(&ehci->lock); + ehci_port_power(ehci, port, false); + spin_lock_irq(&ehci->lock); + ehci_writel(ehci, PORT_RWC_BITS, + &ehci->regs->port_status[port]); + } +} + +/* + * Halt HC, turn off all ports, and let the BIOS use the companion controllers. + * Must be called with interrupts enabled and the lock not held. + */ +static void ehci_silence_controller(struct ehci_hcd *ehci) +{ + ehci_halt(ehci); + + spin_lock_irq(&ehci->lock); + ehci->rh_state = EHCI_RH_HALTED; + ehci_turn_off_all_ports(ehci); + + /* make BIOS/etc use companion controller during reboot */ + ehci_writel(ehci, 0, &ehci->regs->configured_flag); + + /* unblock posted writes */ + ehci_readl(ehci, &ehci->regs->configured_flag); + spin_unlock_irq(&ehci->lock); +} + +/* ehci_shutdown kick in for silicon on any bus (not just pci, etc). + * This forcibly disables dma and IRQs, helping kexec and other cases + * where the next system software may expect clean state. + */ +static void ehci_shutdown(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + + spin_lock_irq(&ehci->lock); + ehci->shutdown = true; + ehci->rh_state = EHCI_RH_STOPPING; + ehci->enabled_hrtimer_events = 0; + spin_unlock_irq(&ehci->lock); + + ehci_silence_controller(ehci); + + hrtimer_cancel(&ehci->hrtimer); +} + +/*-------------------------------------------------------------------------*/ + +/* + * ehci_work is called from some interrupts, timers, and so on. + * it calls driver completion functions, after dropping ehci->lock. + */ +static void ehci_work (struct ehci_hcd *ehci) +{ + /* another CPU may drop ehci->lock during a schedule scan while + * it reports urb completions. this flag guards against bogus + * attempts at re-entrant schedule scanning. + */ + if (ehci->scanning) { + ehci->need_rescan = true; + return; + } + ehci->scanning = true; + + rescan: + ehci->need_rescan = false; + if (ehci->async_count) + scan_async(ehci); + if (ehci->intr_count > 0) + scan_intr(ehci); + if (ehci->isoc_count > 0) + scan_isoc(ehci); + if (ehci->need_rescan) + goto rescan; + ehci->scanning = false; + + /* the IO watchdog guards against hardware or driver bugs that + * misplace IRQs, and should let us run completely without IRQs. + * such lossage has been observed on both VT6202 and VT8235. + */ + turn_on_io_watchdog(ehci); +} + +/* + * Called when the ehci_hcd module is removed. + */ +static void ehci_stop (struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + + ehci_dbg (ehci, "stop\n"); + + /* no more interrupts ... */ + + spin_lock_irq(&ehci->lock); + ehci->enabled_hrtimer_events = 0; + spin_unlock_irq(&ehci->lock); + + ehci_quiesce(ehci); + ehci_silence_controller(ehci); + ehci_reset (ehci); + + hrtimer_cancel(&ehci->hrtimer); + remove_sysfs_files(ehci); + remove_debug_files (ehci); + + /* root hub is shut down separately (first, when possible) */ + spin_lock_irq (&ehci->lock); + end_free_itds(ehci); + spin_unlock_irq (&ehci->lock); + ehci_mem_cleanup (ehci); + + if (ehci->amd_pll_fix == 1) + usb_amd_dev_put(); + + dbg_status (ehci, "ehci_stop completed", + ehci_readl(ehci, &ehci->regs->status)); +} + +/* one-time init, only for memory state */ +static int ehci_init(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + u32 temp; + int retval; + u32 hcc_params; + struct ehci_qh_hw *hw; + + spin_lock_init(&ehci->lock); + + /* + * keep io watchdog by default, those good HCDs could turn off it later + */ + ehci->need_io_watchdog = 1; + + hrtimer_init(&ehci->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + ehci->hrtimer.function = ehci_hrtimer_func; + ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT; + + hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); + + /* + * by default set standard 80% (== 100 usec/uframe) max periodic + * bandwidth as required by USB 2.0 + */ + ehci->uframe_periodic_max = 100; + + /* + * hw default: 1K periodic list heads, one per frame. + * periodic_size can shrink by USBCMD update if hcc_params allows. + */ + ehci->periodic_size = DEFAULT_I_TDPS; + INIT_LIST_HEAD(&ehci->async_unlink); + INIT_LIST_HEAD(&ehci->async_idle); + INIT_LIST_HEAD(&ehci->intr_unlink_wait); + INIT_LIST_HEAD(&ehci->intr_unlink); + INIT_LIST_HEAD(&ehci->intr_qh_list); + INIT_LIST_HEAD(&ehci->cached_itd_list); + INIT_LIST_HEAD(&ehci->cached_sitd_list); + INIT_LIST_HEAD(&ehci->tt_list); + + if (HCC_PGM_FRAMELISTLEN(hcc_params)) { + /* periodic schedule size can be smaller than default */ + switch (EHCI_TUNE_FLS) { + case 0: ehci->periodic_size = 1024; break; + case 1: ehci->periodic_size = 512; break; + case 2: ehci->periodic_size = 256; break; + default: BUG(); + } + } + if ((retval = ehci_mem_init(ehci, GFP_KERNEL)) < 0) + return retval; + + /* controllers may cache some of the periodic schedule ... */ + if (HCC_ISOC_CACHE(hcc_params)) // full frame cache + ehci->i_thresh = 0; + else // N microframes cached + ehci->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); + + /* + * dedicate a qh for the async ring head, since we couldn't unlink + * a 'real' qh without stopping the async schedule [4.8]. use it + * as the 'reclamation list head' too. + * its dummy is used in hw_alt_next of many tds, to prevent the qh + * from automatically advancing to the next td after short reads. + */ + ehci->async->qh_next.qh = NULL; + hw = ehci->async->hw; + hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); + hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); +#if defined(CONFIG_PPC_PS3) + hw->hw_info1 |= cpu_to_hc32(ehci, QH_INACTIVATE); +#endif + hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); + hw->hw_qtd_next = EHCI_LIST_END(ehci); + ehci->async->qh_state = QH_STATE_LINKED; + hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma); + + /* clear interrupt enables, set irq latency */ + if (log2_irq_thresh < 0 || log2_irq_thresh > 6) + log2_irq_thresh = 0; + temp = 1 << (16 + log2_irq_thresh); + if (HCC_PER_PORT_CHANGE_EVENT(hcc_params)) { + ehci->has_ppcd = 1; + ehci_dbg(ehci, "enable per-port change event\n"); + temp |= CMD_PPCEE; + } + if (HCC_CANPARK(hcc_params)) { + /* HW default park == 3, on hardware that supports it (like + * NVidia and ALI silicon), maximizes throughput on the async + * schedule by avoiding QH fetches between transfers. + * + * With fast usb storage devices and NForce2, "park" seems to + * make problems: throughput reduction (!), data errors... + */ + if (park) { + park = min(park, (unsigned) 3); + temp |= CMD_PARK; + temp |= park << 8; + } + ehci_dbg(ehci, "park %d\n", park); + } + if (HCC_PGM_FRAMELISTLEN(hcc_params)) { + /* periodic schedule size can be smaller than default */ + temp &= ~(3 << 2); + temp |= (EHCI_TUNE_FLS << 2); + } + ehci->command = temp; + + /* Accept arbitrarily long scatter-gather lists */ + if (!(hcd->driver->flags & HCD_LOCAL_MEM)) + hcd->self.sg_tablesize = ~0; + return 0; +} + +/* start HC running; it's halted, ehci_init() has been run (once) */ +static int ehci_run (struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + u32 temp; + u32 hcc_params; + + hcd->uses_new_polling = 1; + + /* EHCI spec section 4.1 */ + + ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list); + ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next); + + /* + * hcc_params controls whether ehci->regs->segment must (!!!) + * be used; it constrains QH/ITD/SITD and QTD locations. + * pci_pool consistent memory always uses segment zero. + * streaming mappings for I/O buffers, like pci_map_single(), + * can return segments above 4GB, if the device allows. + * + * NOTE: the dma mask is visible through dev->dma_mask, so + * drivers can pass this info along ... like NETIF_F_HIGHDMA, + * Scsi_Host.highmem_io, and so forth. It's readonly to all + * host side drivers though. + */ + hcc_params = ehci_readl(ehci, &ehci->caps->hcc_params); + if (HCC_64BIT_ADDR(hcc_params)) { + ehci_writel(ehci, 0, &ehci->regs->segment); +#if 0 +// this is deeply broken on almost all architectures + if (!dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64))) + ehci_info(ehci, "enabled 64bit DMA\n"); +#endif + } + + + // Philips, Intel, and maybe others need CMD_RUN before the + // root hub will detect new devices (why?); NEC doesn't + ehci->command &= ~(CMD_LRESET|CMD_IAAD|CMD_PSE|CMD_ASE|CMD_RESET); + ehci->command |= CMD_RUN; + ehci_writel(ehci, ehci->command, &ehci->regs->command); + dbg_cmd (ehci, "init", ehci->command); + + /* + * Start, enabling full USB 2.0 functionality ... usb 1.1 devices + * are explicitly handed to companion controller(s), so no TT is + * involved with the root hub. (Except where one is integrated, + * and there's no companion controller unless maybe for USB OTG.) + * + * Turning on the CF flag will transfer ownership of all ports + * from the companions to the EHCI controller. If any of the + * companions are in the middle of a port reset at the time, it + * could cause trouble. Write-locking ehci_cf_port_reset_rwsem + * guarantees that no resets are in progress. After we set CF, + * a short delay lets the hardware catch up; new resets shouldn't + * be started before the port switching actions could complete. + */ + down_write(&ehci_cf_port_reset_rwsem); + ehci->rh_state = EHCI_RH_RUNNING; + ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); + ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ + msleep(5); + up_write(&ehci_cf_port_reset_rwsem); + ehci->last_periodic_enable = ktime_get_real(); + + temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); + ehci_info (ehci, + "USB %x.%x started, EHCI %x.%02x%s\n", + ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), + temp >> 8, temp & 0xff, + ignore_oc ? ", overcurrent ignored" : ""); + + ehci_writel(ehci, INTR_MASK, + &ehci->regs->intr_enable); /* Turn On Interrupts */ + + /* GRR this is run-once init(), being done every time the HC starts. + * So long as they're part of class devices, we can't do it init() + * since the class device isn't created that early. + */ + create_debug_files(ehci); + create_sysfs_files(ehci); + + return 0; +} + +int ehci_setup(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + int retval; + + ehci->regs = (void __iomem *)ehci->caps + + HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); + dbg_hcs_params(ehci, "reset"); + dbg_hcc_params(ehci, "reset"); + + /* cache this readonly data; minimize chip reads */ + ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); + + ehci->sbrn = HCD_USB2; + + /* data structure init */ + retval = ehci_init(hcd); + if (retval) + return retval; + + retval = ehci_halt(ehci); + if (retval) + return retval; + + ehci_reset(ehci); + + return 0; +} +EXPORT_SYMBOL_GPL(ehci_setup); + +/*-------------------------------------------------------------------------*/ + +static irqreturn_t ehci_irq (struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + u32 status, masked_status, pcd_status = 0, cmd; + int bh; + unsigned long flags; + + /* + * For threadirqs option we use spin_lock_irqsave() variant to prevent + * deadlock with ehci hrtimer callback, because hrtimer callbacks run + * in interrupt context even when threadirqs is specified. We can go + * back to spin_lock() variant when hrtimer callbacks become threaded. + */ + spin_lock_irqsave(&ehci->lock, flags); + + status = ehci_readl(ehci, &ehci->regs->status); + + /* e.g. cardbus physical eject */ + if (status == ~(u32) 0) { + ehci_dbg (ehci, "device removed\n"); + goto dead; + } + + /* + * We don't use STS_FLR, but some controllers don't like it to + * remain on, so mask it out along with the other status bits. + */ + masked_status = status & (INTR_MASK | STS_FLR); + + /* Shared IRQ? */ + if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) { + spin_unlock_irqrestore(&ehci->lock, flags); + return IRQ_NONE; + } + + /* clear (just) interrupts */ + ehci_writel(ehci, masked_status, &ehci->regs->status); + cmd = ehci_readl(ehci, &ehci->regs->command); + bh = 0; + + /* normal [4.15.1.2] or error [4.15.1.1] completion */ + if (likely ((status & (STS_INT|STS_ERR)) != 0)) { + if (likely ((status & STS_ERR) == 0)) + COUNT (ehci->stats.normal); + else + COUNT (ehci->stats.error); + bh = 1; + } + + /* complete the unlinking of some qh [4.15.2.3] */ + if (status & STS_IAA) { + + /* Turn off the IAA watchdog */ + ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_IAA_WATCHDOG); + + /* + * Mild optimization: Allow another IAAD to reset the + * hrtimer, if one occurs before the next expiration. + * In theory we could always cancel the hrtimer, but + * tests show that about half the time it will be reset + * for some other event anyway. + */ + if (ehci->next_hrtimer_event == EHCI_HRTIMER_IAA_WATCHDOG) + ++ehci->next_hrtimer_event; + + /* guard against (alleged) silicon errata */ + if (cmd & CMD_IAAD) + ehci_dbg(ehci, "IAA with IAAD still set?\n"); + if (ehci->iaa_in_progress) + COUNT(ehci->stats.iaa); + end_unlink_async(ehci); + } + + /* remote wakeup [4.3.1] */ + if (status & STS_PCD) { + unsigned i = HCS_N_PORTS (ehci->hcs_params); + u32 ppcd = ~0; + + /* kick root hub later */ + pcd_status = status; + + /* resume root hub? */ + if (ehci->rh_state == EHCI_RH_SUSPENDED) + usb_hcd_resume_root_hub(hcd); + + /* get per-port change detect bits */ + if (ehci->has_ppcd) + ppcd = status >> 16; + + while (i--) { + int pstatus; + + /* leverage per-port change bits feature */ + if (!(ppcd & (1 << i))) + continue; + pstatus = ehci_readl(ehci, + &ehci->regs->port_status[i]); + + if (pstatus & PORT_OWNER) + continue; + if (!(test_bit(i, &ehci->suspended_ports) && + ((pstatus & PORT_RESUME) || + !(pstatus & PORT_SUSPEND)) && + (pstatus & PORT_PE) && + ehci->reset_done[i] == 0)) + continue; + + /* start USB_RESUME_TIMEOUT msec resume signaling from + * this port, and make hub_wq collect + * PORT_STAT_C_SUSPEND to stop that signaling. + */ + ehci->reset_done[i] = jiffies + + msecs_to_jiffies(USB_RESUME_TIMEOUT); + set_bit(i, &ehci->resuming_ports); + ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); + usb_hcd_start_port_resume(&hcd->self, i); + mod_timer(&hcd->rh_timer, ehci->reset_done[i]); + } + } + + /* PCI errors [4.15.2.4] */ + if (unlikely ((status & STS_FATAL) != 0)) { + ehci_err(ehci, "fatal error\n"); + dbg_cmd(ehci, "fatal", cmd); + dbg_status(ehci, "fatal", status); +dead: + usb_hc_died(hcd); + + /* Don't let the controller do anything more */ + ehci->shutdown = true; + ehci->rh_state = EHCI_RH_STOPPING; + ehci->command &= ~(CMD_RUN | CMD_ASE | CMD_PSE); + ehci_writel(ehci, ehci->command, &ehci->regs->command); + ehci_writel(ehci, 0, &ehci->regs->intr_enable); + ehci_handle_controller_death(ehci); + + /* Handle completions when the controller stops */ + bh = 0; + } + + if (bh) + ehci_work (ehci); + spin_unlock_irqrestore(&ehci->lock, flags); + if (pcd_status) + usb_hcd_poll_rh_status(hcd); + return IRQ_HANDLED; +} + +/*-------------------------------------------------------------------------*/ + +/* + * non-error returns are a promise to giveback() the urb later + * we drop ownership so next owner (or urb unlink) can get it + * + * urb + dev is in hcd.self.controller.urb_list + * we're queueing TDs onto software and hardware lists + * + * hcd-specific init for hcpriv hasn't been done yet + * + * NOTE: control, bulk, and interrupt share the same code to append TDs + * to a (possibly active) QH, and the same QH scanning code. + */ +static int ehci_urb_enqueue ( + struct usb_hcd *hcd, + struct urb *urb, + gfp_t mem_flags +) { + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + struct list_head qtd_list; + + INIT_LIST_HEAD (&qtd_list); + + switch (usb_pipetype (urb->pipe)) { + case PIPE_CONTROL: + /* qh_completions() code doesn't handle all the fault cases + * in multi-TD control transfers. Even 1KB is rare anyway. + */ + if (urb->transfer_buffer_length > (16 * 1024)) + return -EMSGSIZE; + /* FALLTHROUGH */ + /* case PIPE_BULK: */ + default: + if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) + return -ENOMEM; + return submit_async(ehci, urb, &qtd_list, mem_flags); + + case PIPE_INTERRUPT: + if (!qh_urb_transaction (ehci, urb, &qtd_list, mem_flags)) + return -ENOMEM; + return intr_submit(ehci, urb, &qtd_list, mem_flags); + + case PIPE_ISOCHRONOUS: + if (urb->dev->speed == USB_SPEED_HIGH) + return itd_submit (ehci, urb, mem_flags); + else + return sitd_submit (ehci, urb, mem_flags); + } +} + +/* remove from hardware lists + * completions normally happen asynchronously + */ + +static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + struct ehci_qh *qh; + unsigned long flags; + int rc; + + spin_lock_irqsave (&ehci->lock, flags); + rc = usb_hcd_check_unlink_urb(hcd, urb, status); + if (rc) + goto done; + + if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) { + /* + * We don't expedite dequeue for isochronous URBs. + * Just wait until they complete normally or their + * time slot expires. + */ + } else { + qh = (struct ehci_qh *) urb->hcpriv; + qh->exception = 1; + switch (qh->qh_state) { + case QH_STATE_LINKED: + if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) + start_unlink_intr(ehci, qh); + else + start_unlink_async(ehci, qh); + break; + case QH_STATE_COMPLETING: + qh->dequeue_during_giveback = 1; + break; + case QH_STATE_UNLINK: + case QH_STATE_UNLINK_WAIT: + /* already started */ + break; + case QH_STATE_IDLE: + /* QH might be waiting for a Clear-TT-Buffer */ + qh_completions(ehci, qh); + break; + } + } +done: + spin_unlock_irqrestore (&ehci->lock, flags); + return rc; +} + +/*-------------------------------------------------------------------------*/ + +// bulk qh holds the data toggle + +static void +ehci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + unsigned long flags; + struct ehci_qh *qh; + + /* ASSERT: any requests/urbs are being unlinked */ + /* ASSERT: nobody can be submitting urbs for this any more */ + +rescan: + spin_lock_irqsave (&ehci->lock, flags); + qh = ep->hcpriv; + if (!qh) + goto done; + + /* endpoints can be iso streams. for now, we don't + * accelerate iso completions ... so spin a while. + */ + if (qh->hw == NULL) { + struct ehci_iso_stream *stream = ep->hcpriv; + + if (!list_empty(&stream->td_list)) + goto idle_timeout; + + /* BUG_ON(!list_empty(&stream->free_list)); */ + reserve_release_iso_bandwidth(ehci, stream, -1); + kfree(stream); + goto done; + } + + qh->exception = 1; + switch (qh->qh_state) { + case QH_STATE_LINKED: + WARN_ON(!list_empty(&qh->qtd_list)); + if (usb_endpoint_type(&ep->desc) != USB_ENDPOINT_XFER_INT) + start_unlink_async(ehci, qh); + else + start_unlink_intr(ehci, qh); + /* FALL THROUGH */ + case QH_STATE_COMPLETING: /* already in unlinking */ + case QH_STATE_UNLINK: /* wait for hw to finish? */ + case QH_STATE_UNLINK_WAIT: +idle_timeout: + spin_unlock_irqrestore (&ehci->lock, flags); + schedule_timeout_uninterruptible(1); + goto rescan; + case QH_STATE_IDLE: /* fully unlinked */ + if (qh->clearing_tt) + goto idle_timeout; + if (list_empty (&qh->qtd_list)) { + if (qh->ps.bw_uperiod) + reserve_release_intr_bandwidth(ehci, qh, -1); + qh_destroy(ehci, qh); + break; + } + /* else FALL THROUGH */ + default: + /* caller was supposed to have unlinked any requests; + * that's not our job. just leak this memory. + */ + ehci_err (ehci, "qh %p (#%02x) state %d%s\n", + qh, ep->desc.bEndpointAddress, qh->qh_state, + list_empty (&qh->qtd_list) ? "" : "(has tds)"); + break; + } + done: + ep->hcpriv = NULL; + spin_unlock_irqrestore (&ehci->lock, flags); +} + +static void +ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct ehci_qh *qh; + int eptype = usb_endpoint_type(&ep->desc); + int epnum = usb_endpoint_num(&ep->desc); + int is_out = usb_endpoint_dir_out(&ep->desc); + unsigned long flags; + + if (eptype != USB_ENDPOINT_XFER_BULK && eptype != USB_ENDPOINT_XFER_INT) + return; + + spin_lock_irqsave(&ehci->lock, flags); + qh = ep->hcpriv; + + /* For Bulk and Interrupt endpoints we maintain the toggle state + * in the hardware; the toggle bits in udev aren't used at all. + * When an endpoint is reset by usb_clear_halt() we must reset + * the toggle bit in the QH. + */ + if (qh) { + if (!list_empty(&qh->qtd_list)) { + WARN_ONCE(1, "clear_halt for a busy endpoint\n"); + } else { + /* The toggle value in the QH can't be updated + * while the QH is active. Unlink it now; + * re-linking will call qh_refresh(). + */ + usb_settoggle(qh->ps.udev, epnum, is_out, 0); + qh->exception = 1; + if (eptype == USB_ENDPOINT_XFER_BULK) + start_unlink_async(ehci, qh); + else + start_unlink_intr(ehci, qh); + } + } + spin_unlock_irqrestore(&ehci->lock, flags); +} + +static int ehci_get_frame (struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + return (ehci_read_frame_index(ehci) >> 3) % ehci->periodic_size; +} + +/*-------------------------------------------------------------------------*/ + +/* Device addition and removal */ + +static void ehci_remove_device(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + + spin_lock_irq(&ehci->lock); + drop_tt(udev); + spin_unlock_irq(&ehci->lock); +} + +/*-------------------------------------------------------------------------*/ + +#ifdef CONFIG_PM + +/* suspend/resume, section 4.3 */ + +/* These routines handle the generic parts of controller suspend/resume */ + +int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + + if (time_before(jiffies, ehci->next_statechange)) + msleep(10); + + /* + * Root hub was already suspended. Disable IRQ emission and + * mark HW unaccessible. The PM and USB cores make sure that + * the root hub is either suspended or stopped. + */ + ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup); + + spin_lock_irq(&ehci->lock); + ehci_writel(ehci, 0, &ehci->regs->intr_enable); + (void) ehci_readl(ehci, &ehci->regs->intr_enable); + + clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + spin_unlock_irq(&ehci->lock); + + synchronize_irq(hcd->irq); + + /* Check for race with a wakeup request */ + if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) { + ehci_resume(hcd, false); + return -EBUSY; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ehci_suspend); + +/* Returns 0 if power was preserved, 1 if power was lost */ +int ehci_resume(struct usb_hcd *hcd, bool force_reset) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + + if (time_before(jiffies, ehci->next_statechange)) + msleep(100); + + /* Mark hardware accessible again as we are back to full power by now */ + set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); + + if (ehci->shutdown) + return 0; /* Controller is dead */ + + /* + * If CF is still set and reset isn't forced + * then we maintained suspend power. + * Just undo the effect of ehci_suspend(). + */ + if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF && + !force_reset) { + int mask = INTR_MASK; + + ehci_prepare_ports_for_controller_resume(ehci); + + spin_lock_irq(&ehci->lock); + if (ehci->shutdown) + goto skip; + + if (!hcd->self.root_hub->do_remote_wakeup) + mask &= ~STS_PCD; + ehci_writel(ehci, mask, &ehci->regs->intr_enable); + ehci_readl(ehci, &ehci->regs->intr_enable); + skip: + spin_unlock_irq(&ehci->lock); + return 0; + } + + /* + * Else reset, to cope with power loss or resume from hibernation + * having let the firmware kick in during reboot. + */ + usb_root_hub_lost_power(hcd->self.root_hub); + (void) ehci_halt(ehci); + (void) ehci_reset(ehci); + + spin_lock_irq(&ehci->lock); + if (ehci->shutdown) + goto skip; + + ehci_writel(ehci, ehci->command, &ehci->regs->command); + ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); + ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ + + ehci->rh_state = EHCI_RH_SUSPENDED; + spin_unlock_irq(&ehci->lock); + + return 1; +} +EXPORT_SYMBOL_GPL(ehci_resume); + +#endif + +/*-------------------------------------------------------------------------*/ + +/* + * Generic structure: This gets copied for platform drivers so that + * individual entries can be overridden as needed. + */ + +static const struct hc_driver ehci_hc_driver = { + .description = hcd_name, + .product_desc = "EHCI Host Controller", + .hcd_priv_size = sizeof(struct ehci_hcd), + + /* + * generic hardware linkage + */ + .irq = ehci_irq, + .flags = HCD_MEMORY | HCD_USB2 | HCD_BH, + + /* + * basic lifecycle operations + */ + .reset = ehci_setup, + .start = ehci_run, + .stop = ehci_stop, + .shutdown = ehci_shutdown, + + /* + * managing i/o requests and associated device resources + */ + .urb_enqueue = ehci_urb_enqueue, + .urb_dequeue = ehci_urb_dequeue, + .endpoint_disable = ehci_endpoint_disable, + .endpoint_reset = ehci_endpoint_reset, + .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, + + /* + * scheduling support + */ + .get_frame_number = ehci_get_frame, + + /* + * root hub support + */ + .hub_status_data = ehci_hub_status_data, + .hub_control = ehci_hub_control, + .bus_suspend = ehci_bus_suspend, + .bus_resume = ehci_bus_resume, + .relinquish_port = ehci_relinquish_port, + .port_handed_over = ehci_port_handed_over, + + /* + * device support + */ + .free_dev = ehci_remove_device, +}; + +void ehci_init_driver(struct hc_driver *drv, + const struct ehci_driver_overrides *over) +{ + /* Copy the generic table to drv and then apply the overrides */ + *drv = ehci_hc_driver; + + if (over) { + drv->hcd_priv_size += over->extra_priv_size; + if (over->reset) + drv->reset = over->reset; + if (over->port_power) + drv->port_power = over->port_power; + } +} +EXPORT_SYMBOL_GPL(ehci_init_driver); + +/*-------------------------------------------------------------------------*/ + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR (DRIVER_AUTHOR); +MODULE_LICENSE ("GPL"); + +#ifdef CONFIG_USB_EHCI_SH +#include "ehci-sh.c" +#define PLATFORM_DRIVER ehci_hcd_sh_driver +#endif + +#ifdef CONFIG_PPC_PS3 +#include "ehci-ps3.c" +#define PS3_SYSTEM_BUS_DRIVER ps3_ehci_driver +#endif + +#ifdef CONFIG_USB_EHCI_HCD_PPC_OF +#include "ehci-ppc-of.c" +#define OF_PLATFORM_DRIVER ehci_hcd_ppc_of_driver +#endif + +#ifdef CONFIG_XPS_USB_HCD_XILINX +#include "ehci-xilinx-of.c" +#define XILINX_OF_PLATFORM_DRIVER ehci_hcd_xilinx_of_driver +#endif + +#ifdef CONFIG_TILE_USB +#include "ehci-tilegx.c" +#define PLATFORM_DRIVER ehci_hcd_tilegx_driver +#endif + +#ifdef CONFIG_USB_EHCI_HCD_PMC_MSP +#include "ehci-pmcmsp.c" +#define PLATFORM_DRIVER ehci_hcd_msp_driver +#endif + +#ifdef CONFIG_SPARC_LEON +#include "ehci-grlib.c" +#define PLATFORM_DRIVER ehci_grlib_driver +#endif + +#ifdef CONFIG_USB_EHCI_MV +#include "ehci-mv.c" +#define PLATFORM_DRIVER ehci_mv_driver +#endif + +#ifdef CONFIG_MIPS_SEAD3 +#include "ehci-sead3.c" +#define PLATFORM_DRIVER ehci_hcd_sead3_driver +#endif + +static int __init ehci_hcd_init(void) +{ + int retval = 0; + + if (usb_disabled()) + return -ENODEV; + + printk(KERN_INFO "%s: " DRIVER_DESC "\n", hcd_name); + set_bit(USB_EHCI_LOADED, &usb_hcds_loaded); + if (test_bit(USB_UHCI_LOADED, &usb_hcds_loaded) || + test_bit(USB_OHCI_LOADED, &usb_hcds_loaded)) + printk(KERN_WARNING "Warning! ehci_hcd should always be loaded" + " before uhci_hcd and ohci_hcd, not after\n"); + + pr_debug("%s: block sizes: qh %Zd qtd %Zd itd %Zd sitd %Zd\n", + hcd_name, + sizeof(struct ehci_qh), sizeof(struct ehci_qtd), + sizeof(struct ehci_itd), sizeof(struct ehci_sitd)); + +#ifdef CONFIG_DYNAMIC_DEBUG + ehci_debug_root = debugfs_create_dir("ehci", usb_debug_root); + if (!ehci_debug_root) { + retval = -ENOENT; + goto err_debug; + } +#endif + +#ifdef PLATFORM_DRIVER + retval = platform_driver_register(&PLATFORM_DRIVER); + if (retval < 0) + goto clean0; +#endif + +#ifdef PS3_SYSTEM_BUS_DRIVER + retval = ps3_ehci_driver_register(&PS3_SYSTEM_BUS_DRIVER); + if (retval < 0) + goto clean2; +#endif + +#ifdef OF_PLATFORM_DRIVER + retval = platform_driver_register(&OF_PLATFORM_DRIVER); + if (retval < 0) + goto clean3; +#endif + +#ifdef XILINX_OF_PLATFORM_DRIVER + retval = platform_driver_register(&XILINX_OF_PLATFORM_DRIVER); + if (retval < 0) + goto clean4; +#endif + return retval; + +#ifdef XILINX_OF_PLATFORM_DRIVER + /* platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); */ +clean4: +#endif +#ifdef OF_PLATFORM_DRIVER + platform_driver_unregister(&OF_PLATFORM_DRIVER); +clean3: +#endif +#ifdef PS3_SYSTEM_BUS_DRIVER + ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); +clean2: +#endif +#ifdef PLATFORM_DRIVER + platform_driver_unregister(&PLATFORM_DRIVER); +clean0: +#endif +#ifdef CONFIG_DYNAMIC_DEBUG + debugfs_remove(ehci_debug_root); + ehci_debug_root = NULL; +err_debug: +#endif + clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); + return retval; +} +module_init(ehci_hcd_init); + +static void __exit ehci_hcd_cleanup(void) +{ +#ifdef XILINX_OF_PLATFORM_DRIVER + platform_driver_unregister(&XILINX_OF_PLATFORM_DRIVER); +#endif +#ifdef OF_PLATFORM_DRIVER + platform_driver_unregister(&OF_PLATFORM_DRIVER); +#endif +#ifdef PLATFORM_DRIVER + platform_driver_unregister(&PLATFORM_DRIVER); +#endif +#ifdef PS3_SYSTEM_BUS_DRIVER + ps3_ehci_driver_unregister(&PS3_SYSTEM_BUS_DRIVER); +#endif +#ifdef CONFIG_DYNAMIC_DEBUG + debugfs_remove(ehci_debug_root); +#endif + clear_bit(USB_EHCI_LOADED, &usb_hcds_loaded); +} +module_exit(ehci_hcd_cleanup); diff --git a/addons/ehci-pci/src/4.4.180/ehci-hub.c b/addons/ehci-pci/src/4.4.180/ehci-hub.c new file mode 100644 index 00000000..086a7115 --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/ehci-hub.c @@ -0,0 +1,1329 @@ +/* + * Copyright (C) 2001-2004 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of ehci-hcd.c */ + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI Root Hub ... the nonsharable stuff + * + * Registers don't need cpu_to_le32, that happens transparently + */ + +/*-------------------------------------------------------------------------*/ +#include + +#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) + +#ifdef CONFIG_PM + +static int persist_enabled_on_companion(struct usb_device *udev, void *unused) +{ + return !udev->maxchild && udev->persist_enabled && + udev->bus->root_hub->speed < USB_SPEED_HIGH; +} + +/* After a power loss, ports that were owned by the companion must be + * reset so that the companion can still own them. + */ +static void ehci_handover_companion_ports(struct ehci_hcd *ehci) +{ + u32 __iomem *reg; + u32 status; + int port; + __le32 buf; + struct usb_hcd *hcd = ehci_to_hcd(ehci); + + if (!ehci->owned_ports) + return; + + /* + * USB 1.1 devices are mostly HIDs, which don't need to persist across + * suspends. If we ensure that none of our companion's devices have + * persist_enabled (by looking through all USB 1.1 buses in the system), + * we can skip this and avoid slowing resume down. Devices without + * persist will just get reenumerated shortly after resume anyway. + */ + if (!usb_for_each_dev(NULL, persist_enabled_on_companion)) + return; + + /* Make sure the ports are powered */ + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + if (test_bit(port, &ehci->owned_ports)) { + reg = &ehci->regs->port_status[port]; + status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; + if (!(status & PORT_POWER)) + ehci_port_power(ehci, port, true); + } + } + + /* Give the connections some time to appear */ + msleep(20); + + spin_lock_irq(&ehci->lock); + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + if (test_bit(port, &ehci->owned_ports)) { + reg = &ehci->regs->port_status[port]; + status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; + + /* Port already owned by companion? */ + if (status & PORT_OWNER) + clear_bit(port, &ehci->owned_ports); + else if (test_bit(port, &ehci->companion_ports)) + ehci_writel(ehci, status & ~PORT_PE, reg); + else { + spin_unlock_irq(&ehci->lock); + ehci_hub_control(hcd, SetPortFeature, + USB_PORT_FEAT_RESET, port + 1, + NULL, 0); + spin_lock_irq(&ehci->lock); + } + } + } + spin_unlock_irq(&ehci->lock); + + if (!ehci->owned_ports) + return; + msleep(90); /* Wait for resets to complete */ + + spin_lock_irq(&ehci->lock); + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + if (test_bit(port, &ehci->owned_ports)) { + spin_unlock_irq(&ehci->lock); + ehci_hub_control(hcd, GetPortStatus, + 0, port + 1, + (char *) &buf, sizeof(buf)); + spin_lock_irq(&ehci->lock); + + /* The companion should now own the port, + * but if something went wrong the port must not + * remain enabled. + */ + reg = &ehci->regs->port_status[port]; + status = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; + if (status & PORT_OWNER) + ehci_writel(ehci, status | PORT_CSC, reg); + else { + ehci_dbg(ehci, "failed handover port %d: %x\n", + port + 1, status); + ehci_writel(ehci, status & ~PORT_PE, reg); + } + } + } + + ehci->owned_ports = 0; + spin_unlock_irq(&ehci->lock); +} + +static int ehci_port_change(struct ehci_hcd *ehci) +{ + int i = HCS_N_PORTS(ehci->hcs_params); + + /* First check if the controller indicates a change event */ + + if (ehci_readl(ehci, &ehci->regs->status) & STS_PCD) + return 1; + + /* + * Not all controllers appear to update this while going from D3 to D0, + * so check the individual port status registers as well + */ + + while (i--) + if (ehci_readl(ehci, &ehci->regs->port_status[i]) & PORT_CSC) + return 1; + + return 0; +} + +void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, + bool suspending, bool do_wakeup) +{ + int port; + u32 temp; + + /* If remote wakeup is enabled for the root hub but disabled + * for the controller, we must adjust all the port wakeup flags + * when the controller is suspended or resumed. In all other + * cases they don't need to be changed. + */ + if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || do_wakeup) + return; + + spin_lock_irq(&ehci->lock); + + /* clear phy low-power mode before changing wakeup flags */ + if (ehci->has_tdi_phy_lpm) { + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; + + temp = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg); + } + spin_unlock_irq(&ehci->lock); + msleep(5); + spin_lock_irq(&ehci->lock); + } + + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *reg = &ehci->regs->port_status[port]; + u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; + u32 t2 = t1 & ~PORT_WAKE_BITS; + + /* If we are suspending the controller, clear the flags. + * If we are resuming the controller, set the wakeup flags. + */ + if (!suspending) { + if (t1 & PORT_CONNECT) + t2 |= PORT_WKOC_E | PORT_WKDISC_E; + else + t2 |= PORT_WKOC_E | PORT_WKCONN_E; + } + ehci_writel(ehci, t2, reg); + } + + /* enter phy low-power mode again */ + if (ehci->has_tdi_phy_lpm) { + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; + + temp = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg); + } + } + + /* Does the root hub have a port wakeup pending? */ + if (!suspending && ehci_port_change(ehci)) + usb_hcd_resume_root_hub(ehci_to_hcd(ehci)); + + spin_unlock_irq(&ehci->lock); +} +EXPORT_SYMBOL_GPL(ehci_adjust_port_wakeup_flags); + +static int ehci_bus_suspend (struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + int port; + int mask; + int changed; + bool fs_idle_delay; + + ehci_dbg(ehci, "suspend root hub\n"); + + if (time_before (jiffies, ehci->next_statechange)) + msleep(5); + + /* stop the schedules */ + ehci_quiesce(ehci); + + spin_lock_irq (&ehci->lock); + if (ehci->rh_state < EHCI_RH_RUNNING) + goto done; + + /* Once the controller is stopped, port resumes that are already + * in progress won't complete. Hence if remote wakeup is enabled + * for the root hub and any ports are in the middle of a resume or + * remote wakeup, we must fail the suspend. + */ + if (hcd->self.root_hub->do_remote_wakeup) { + if (ehci->resuming_ports) { + spin_unlock_irq(&ehci->lock); + ehci_dbg(ehci, "suspend failed because a port is resuming\n"); + return -EBUSY; + } + } + + /* Unlike other USB host controller types, EHCI doesn't have + * any notion of "global" or bus-wide suspend. The driver has + * to manually suspend all the active unsuspended ports, and + * then manually resume them in the bus_resume() routine. + */ + ehci->bus_suspended = 0; + ehci->owned_ports = 0; + changed = 0; + fs_idle_delay = false; + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *reg = &ehci->regs->port_status [port]; + u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; + u32 t2 = t1 & ~PORT_WAKE_BITS; + + /* keep track of which ports we suspend */ + if (t1 & PORT_OWNER) + set_bit(port, &ehci->owned_ports); + else if ((t1 & PORT_PE) && !(t1 & PORT_SUSPEND)) { + t2 |= PORT_SUSPEND; + set_bit(port, &ehci->bus_suspended); + } + + /* enable remote wakeup on all ports, if told to do so */ + if (hcd->self.root_hub->do_remote_wakeup) { + /* only enable appropriate wake bits, otherwise the + * hardware can not go phy low power mode. If a race + * condition happens here(connection change during bits + * set), the port change detection will finally fix it. + */ + if (t1 & PORT_CONNECT) + t2 |= PORT_WKOC_E | PORT_WKDISC_E; + else + t2 |= PORT_WKOC_E | PORT_WKCONN_E; + } + + if (t1 != t2) { + /* + * On some controllers, Wake-On-Disconnect will + * generate false wakeup signals until the bus + * switches over to full-speed idle. For their + * sake, add a delay if we need one. + */ + if ((t2 & PORT_WKDISC_E) && + ehci_port_speed(ehci, t2) == + USB_PORT_STAT_HIGH_SPEED) + fs_idle_delay = true; + ehci_writel(ehci, t2, reg); + changed = 1; + } + } + spin_unlock_irq(&ehci->lock); + + if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) { + /* + * Wait for HCD to enter low-power mode or for the bus + * to switch to full-speed idle. + */ + usleep_range(5000, 5500); + } + + if (changed && ehci->has_tdi_phy_lpm) { + spin_lock_irq(&ehci->lock); + port = HCS_N_PORTS(ehci->hcs_params); + while (port--) { + u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port]; + u32 t3; + + t3 = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); + t3 = ehci_readl(ehci, hostpc_reg); + ehci_dbg(ehci, "Port %d phy low-power mode %s\n", + port, (t3 & HOSTPC_PHCD) ? + "succeeded" : "failed"); + } + spin_unlock_irq(&ehci->lock); + } + + /* Apparently some devices need a >= 1-uframe delay here */ + if (ehci->bus_suspended) + udelay(150); + + /* turn off now-idle HC */ + ehci_halt (ehci); + + spin_lock_irq(&ehci->lock); + if (ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_POLL_DEAD)) + ehci_handle_controller_death(ehci); + if (ehci->rh_state != EHCI_RH_RUNNING) + goto done; + ehci->rh_state = EHCI_RH_SUSPENDED; + + end_unlink_async(ehci); + unlink_empty_async_suspended(ehci); + ehci_handle_start_intr_unlinks(ehci); + ehci_handle_intr_unlinks(ehci); + end_free_itds(ehci); + + /* allow remote wakeup */ + mask = INTR_MASK; + if (!hcd->self.root_hub->do_remote_wakeup) + mask &= ~STS_PCD; + ehci_writel(ehci, mask, &ehci->regs->intr_enable); + ehci_readl(ehci, &ehci->regs->intr_enable); + + done: + ehci->next_statechange = jiffies + msecs_to_jiffies(10); + ehci->enabled_hrtimer_events = 0; + ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT; + spin_unlock_irq (&ehci->lock); + + hrtimer_cancel(&ehci->hrtimer); + return 0; +} + + +/* caller has locked the root hub, and should reset/reinit on error */ +static int ehci_bus_resume (struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + u32 temp; + u32 power_okay; + int i; + unsigned long resume_needed = 0; + + if (time_before (jiffies, ehci->next_statechange)) + msleep(5); + spin_lock_irq (&ehci->lock); + if (!HCD_HW_ACCESSIBLE(hcd) || ehci->shutdown) + goto shutdown; + + if (unlikely(ehci->debug)) { + if (!dbgp_reset_prep(hcd)) + ehci->debug = NULL; + else + dbgp_external_startup(hcd); + } + + /* Ideally and we've got a real resume here, and no port's power + * was lost. (For PCI, that means Vaux was maintained.) But we + * could instead be restoring a swsusp snapshot -- so that BIOS was + * the last user of the controller, not reset/pm hardware keeping + * state we gave to it. + */ + power_okay = ehci_readl(ehci, &ehci->regs->intr_enable); + ehci_dbg(ehci, "resume root hub%s\n", + power_okay ? "" : " after power loss"); + + /* at least some APM implementations will try to deliver + * IRQs right away, so delay them until we're ready. + */ + ehci_writel(ehci, 0, &ehci->regs->intr_enable); + + /* re-init operational registers */ + ehci_writel(ehci, 0, &ehci->regs->segment); + ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list); + ehci_writel(ehci, (u32) ehci->async->qh_dma, &ehci->regs->async_next); + + /* restore CMD_RUN, framelist size, and irq threshold */ + ehci->command |= CMD_RUN; + ehci_writel(ehci, ehci->command, &ehci->regs->command); + ehci->rh_state = EHCI_RH_RUNNING; + + /* + * According to Bugzilla #8190, the port status for some controllers + * will be wrong without a delay. At their wrong status, the port + * is enabled, but not suspended neither resumed. + */ + i = HCS_N_PORTS(ehci->hcs_params); + while (i--) { + temp = ehci_readl(ehci, &ehci->regs->port_status[i]); + if ((temp & PORT_PE) && + !(temp & (PORT_SUSPEND | PORT_RESUME))) { + ehci_dbg(ehci, "Port status(0x%x) is wrong\n", temp); + spin_unlock_irq(&ehci->lock); + msleep(8); + spin_lock_irq(&ehci->lock); + break; + } + } + + if (ehci->shutdown) + goto shutdown; + + /* clear phy low-power mode before resume */ + if (ehci->bus_suspended && ehci->has_tdi_phy_lpm) { + i = HCS_N_PORTS(ehci->hcs_params); + while (i--) { + if (test_bit(i, &ehci->bus_suspended)) { + u32 __iomem *hostpc_reg = + &ehci->regs->hostpc[i]; + + temp = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp & ~HOSTPC_PHCD, + hostpc_reg); + } + } + spin_unlock_irq(&ehci->lock); + msleep(5); + spin_lock_irq(&ehci->lock); + if (ehci->shutdown) + goto shutdown; + } + + /* manually resume the ports we suspended during bus_suspend() */ + i = HCS_N_PORTS (ehci->hcs_params); + while (i--) { + temp = ehci_readl(ehci, &ehci->regs->port_status [i]); + temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); + if (test_bit(i, &ehci->bus_suspended) && + (temp & PORT_SUSPEND)) { + temp |= PORT_RESUME; + set_bit(i, &resume_needed); + } + ehci_writel(ehci, temp, &ehci->regs->port_status [i]); + } + + /* + * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume + * port + */ + if (resume_needed) { + spin_unlock_irq(&ehci->lock); + msleep(USB_RESUME_TIMEOUT); + spin_lock_irq(&ehci->lock); + if (ehci->shutdown) + goto shutdown; + } + + i = HCS_N_PORTS (ehci->hcs_params); + while (i--) { + temp = ehci_readl(ehci, &ehci->regs->port_status [i]); + if (test_bit(i, &resume_needed)) { + temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME); + ehci_writel(ehci, temp, &ehci->regs->port_status [i]); + } + } + + ehci->next_statechange = jiffies + msecs_to_jiffies(5); + spin_unlock_irq(&ehci->lock); + + ehci_handover_companion_ports(ehci); + + /* Now we can safely re-enable irqs */ + spin_lock_irq(&ehci->lock); + if (ehci->shutdown) + goto shutdown; + ehci_writel(ehci, INTR_MASK, &ehci->regs->intr_enable); + (void) ehci_readl(ehci, &ehci->regs->intr_enable); + spin_unlock_irq(&ehci->lock); + + return 0; + + shutdown: + spin_unlock_irq(&ehci->lock); + return -ESHUTDOWN; +} + +#else + +#define ehci_bus_suspend NULL +#define ehci_bus_resume NULL + +#endif /* CONFIG_PM */ + +/*-------------------------------------------------------------------------*/ + +/* + * Sets the owner of a port + */ +static void set_owner(struct ehci_hcd *ehci, int portnum, int new_owner) +{ + u32 __iomem *status_reg; + u32 port_status; + int try; + + status_reg = &ehci->regs->port_status[portnum]; + + /* + * The controller won't set the OWNER bit if the port is + * enabled, so this loop will sometimes require at least two + * iterations: one to disable the port and one to set OWNER. + */ + for (try = 4; try > 0; --try) { + spin_lock_irq(&ehci->lock); + port_status = ehci_readl(ehci, status_reg); + if ((port_status & PORT_OWNER) == new_owner + || (port_status & (PORT_OWNER | PORT_CONNECT)) + == 0) + try = 0; + else { + port_status ^= PORT_OWNER; + port_status &= ~(PORT_PE | PORT_RWC_BITS); + ehci_writel(ehci, port_status, status_reg); + } + spin_unlock_irq(&ehci->lock); + if (try > 1) + msleep(5); + } +} + +/*-------------------------------------------------------------------------*/ + +static int check_reset_complete ( + struct ehci_hcd *ehci, + int index, + u32 __iomem *status_reg, + int port_status +) { + if (!(port_status & PORT_CONNECT)) + return port_status; + + /* if reset finished and it's still not enabled -- handoff */ + if (!(port_status & PORT_PE)) { + + /* with integrated TT, there's nobody to hand it to! */ + if (ehci_is_TDI(ehci)) { + ehci_dbg (ehci, + "Failed to enable port %d on root hub TT\n", + index+1); + return port_status; + } + + ehci_dbg (ehci, "port %d full speed --> companion\n", + index + 1); + + // what happens if HCS_N_CC(params) == 0 ? + port_status |= PORT_OWNER; + port_status &= ~PORT_RWC_BITS; + ehci_writel(ehci, port_status, status_reg); + + /* ensure 440EPX ohci controller state is operational */ + if (ehci->has_amcc_usb23) + set_ohci_hcfs(ehci, 1); + } else { + ehci_dbg(ehci, "port %d reset complete, port enabled\n", + index + 1); + /* ensure 440EPx ohci controller state is suspended */ + if (ehci->has_amcc_usb23) + set_ohci_hcfs(ehci, 0); + } + + return port_status; +} + +/*-------------------------------------------------------------------------*/ + + +/* build "status change" packet (one or two bytes) from HC registers */ + +static int +ehci_hub_status_data (struct usb_hcd *hcd, char *buf) +{ + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + u32 temp, status; + u32 mask; + int ports, i, retval = 1; + unsigned long flags; + u32 ppcd = ~0; + + /* init status to no-changes */ + buf [0] = 0; + ports = HCS_N_PORTS (ehci->hcs_params); + if (ports > 7) { + buf [1] = 0; + retval++; + } + + /* Inform the core about resumes-in-progress by returning + * a non-zero value even if there are no status changes. + */ + status = ehci->resuming_ports; + + /* Some boards (mostly VIA?) report bogus overcurrent indications, + * causing massive log spam unless we completely ignore them. It + * may be relevant that VIA VT8235 controllers, where PORT_POWER is + * always set, seem to clear PORT_OCC and PORT_CSC when writing to + * PORT_POWER; that's surprising, but maybe within-spec. + */ + if (!ignore_oc) + mask = PORT_CSC | PORT_PEC | PORT_OCC; + else + mask = PORT_CSC | PORT_PEC; + // PORT_RESUME from hardware ~= PORT_STAT_C_SUSPEND + + /* no hub change reports (bit 0) for now (power, ...) */ + + /* port N changes (bit N)? */ + spin_lock_irqsave (&ehci->lock, flags); + + /* get per-port change detect bits */ + if (ehci->has_ppcd) + ppcd = ehci_readl(ehci, &ehci->regs->status) >> 16; + + for (i = 0; i < ports; i++) { + /* leverage per-port change bits feature */ + if (ppcd & (1 << i)) + temp = ehci_readl(ehci, &ehci->regs->port_status[i]); + else + temp = 0; + + /* + * Return status information even for ports with OWNER set. + * Otherwise hub_wq wouldn't see the disconnect event when a + * high-speed device is switched over to the companion + * controller by the user. + */ + + if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend) + || (ehci->reset_done[i] && time_after_eq( + jiffies, ehci->reset_done[i]))) { + if (i < 7) + buf [0] |= 1 << (i + 1); + else + buf [1] |= 1 << (i - 7); + status = STS_PCD; + } + } + + /* If a resume is in progress, make sure it can finish */ + if (ehci->resuming_ports) + mod_timer(&hcd->rh_timer, jiffies + msecs_to_jiffies(25)); + + spin_unlock_irqrestore (&ehci->lock, flags); + return status ? retval : 0; +} + +/*-------------------------------------------------------------------------*/ + +static void +ehci_hub_descriptor ( + struct ehci_hcd *ehci, + struct usb_hub_descriptor *desc +) { + int ports = HCS_N_PORTS (ehci->hcs_params); + u16 temp; + + desc->bDescriptorType = USB_DT_HUB; + desc->bPwrOn2PwrGood = 10; /* ehci 1.0, 2.3.9 says 20ms max */ + desc->bHubContrCurrent = 0; + + desc->bNbrPorts = ports; + temp = 1 + (ports / 8); + desc->bDescLength = 7 + 2 * temp; + + /* two bitmaps: ports removable, and usb 1.0 legacy PortPwrCtrlMask */ + memset(&desc->u.hs.DeviceRemovable[0], 0, temp); + memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp); + + temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */ + if (HCS_PPC (ehci->hcs_params)) + temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */ + else + temp |= HUB_CHAR_NO_LPSM; /* no power switching */ +#if 0 +// re-enable when we support USB_PORT_FEAT_INDICATOR below. + if (HCS_INDICATOR (ehci->hcs_params)) + temp |= HUB_CHAR_PORTIND; /* per-port indicators (LEDs) */ +#endif + desc->wHubCharacteristics = cpu_to_le16(temp); +} + +/*-------------------------------------------------------------------------*/ +#ifdef CONFIG_USB_HCD_TEST_MODE + +#define EHSET_TEST_SINGLE_STEP_SET_FEATURE 0x06 + +static void usb_ehset_completion(struct urb *urb) +{ + struct completion *done = urb->context; + + complete(done); +} +static int submit_single_step_set_feature( + struct usb_hcd *hcd, + struct urb *urb, + int is_setup +); + +/* + * Allocate and initialize a control URB. This request will be used by the + * EHSET SINGLE_STEP_SET_FEATURE test in which the DATA and STATUS stages + * of the GetDescriptor request are sent 15 seconds after the SETUP stage. + * Return NULL if failed. + */ +static struct urb *request_single_step_set_feature_urb( + struct usb_device *udev, + void *dr, + void *buf, + struct completion *done +) { + struct urb *urb; + struct usb_hcd *hcd = bus_to_hcd(udev->bus); + struct usb_host_endpoint *ep; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return NULL; + + urb->pipe = usb_rcvctrlpipe(udev, 0); + ep = (usb_pipein(urb->pipe) ? udev->ep_in : udev->ep_out) + [usb_pipeendpoint(urb->pipe)]; + if (!ep) { + usb_free_urb(urb); + return NULL; + } + + urb->ep = ep; + urb->dev = udev; + urb->setup_packet = (void *)dr; + urb->transfer_buffer = buf; + urb->transfer_buffer_length = USB_DT_DEVICE_SIZE; + urb->complete = usb_ehset_completion; + urb->status = -EINPROGRESS; + urb->actual_length = 0; + urb->transfer_flags = URB_DIR_IN; + usb_get_urb(urb); + atomic_inc(&urb->use_count); + atomic_inc(&urb->dev->urbnum); + urb->setup_dma = dma_map_single( + hcd->self.controller, + urb->setup_packet, + sizeof(struct usb_ctrlrequest), + DMA_TO_DEVICE); + urb->transfer_dma = dma_map_single( + hcd->self.controller, + urb->transfer_buffer, + urb->transfer_buffer_length, + DMA_FROM_DEVICE); + urb->context = done; + return urb; +} + +static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port) +{ + int retval = -ENOMEM; + struct usb_ctrlrequest *dr; + struct urb *urb; + struct usb_device *udev; + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct usb_device_descriptor *buf; + DECLARE_COMPLETION_ONSTACK(done); + + /* Obtain udev of the rhub's child port */ + udev = usb_hub_find_child(hcd->self.root_hub, port); + if (!udev) { + ehci_err(ehci, "No device attached to the RootHub\n"); + return -ENODEV; + } + buf = kmalloc(USB_DT_DEVICE_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); + if (!dr) { + kfree(buf); + return -ENOMEM; + } + + /* Fill Setup packet for GetDescriptor */ + dr->bRequestType = USB_DIR_IN; + dr->bRequest = USB_REQ_GET_DESCRIPTOR; + dr->wValue = cpu_to_le16(USB_DT_DEVICE << 8); + dr->wIndex = 0; + dr->wLength = cpu_to_le16(USB_DT_DEVICE_SIZE); + urb = request_single_step_set_feature_urb(udev, dr, buf, &done); + if (!urb) + goto cleanup; + + /* Submit just the SETUP stage */ + retval = submit_single_step_set_feature(hcd, urb, 1); + if (retval) + goto out1; + if (!wait_for_completion_timeout(&done, msecs_to_jiffies(2000))) { + usb_kill_urb(urb); + retval = -ETIMEDOUT; + ehci_err(ehci, "%s SETUP stage timed out on ep0\n", __func__); + goto out1; + } + msleep(15 * 1000); + + /* Complete remaining DATA and STATUS stages using the same URB */ + urb->status = -EINPROGRESS; + usb_get_urb(urb); + atomic_inc(&urb->use_count); + atomic_inc(&urb->dev->urbnum); + retval = submit_single_step_set_feature(hcd, urb, 0); + if (!retval && !wait_for_completion_timeout(&done, + msecs_to_jiffies(2000))) { + usb_kill_urb(urb); + retval = -ETIMEDOUT; + ehci_err(ehci, "%s IN stage timed out on ep0\n", __func__); + } +out1: + usb_free_urb(urb); +cleanup: + kfree(dr); + kfree(buf); + return retval; +} +#endif /* CONFIG_USB_HCD_TEST_MODE */ +/*-------------------------------------------------------------------------*/ + +int ehci_hub_control( + struct usb_hcd *hcd, + u16 typeReq, + u16 wValue, + u16 wIndex, + char *buf, + u16 wLength +) { + struct ehci_hcd *ehci = hcd_to_ehci (hcd); + int ports = HCS_N_PORTS (ehci->hcs_params); + u32 __iomem *status_reg = &ehci->regs->port_status[ + (wIndex & 0xff) - 1]; + u32 __iomem *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1]; + u32 temp, temp1, status; + unsigned long flags; + int retval = 0; + unsigned selector; + + /* + * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. + * HCS_INDICATOR may say we can change LEDs to off/amber/green. + * (track current state ourselves) ... blink for diagnostics, + * power, "this is the one", etc. EHCI spec supports this. + */ + + spin_lock_irqsave (&ehci->lock, flags); + switch (typeReq) { + case ClearHubFeature: + switch (wValue) { + case C_HUB_LOCAL_POWER: + case C_HUB_OVER_CURRENT: + /* no hub-wide feature/status flags */ + break; + default: + goto error; + } + break; + case ClearPortFeature: + if (!wIndex || wIndex > ports) + goto error; + wIndex--; + temp = ehci_readl(ehci, status_reg); + temp &= ~PORT_RWC_BITS; + + /* + * Even if OWNER is set, so the port is owned by the + * companion controller, hub_wq needs to be able to clear + * the port-change status bits (especially + * USB_PORT_STAT_C_CONNECTION). + */ + + switch (wValue) { + case USB_PORT_FEAT_ENABLE: + ehci_writel(ehci, temp & ~PORT_PE, status_reg); + break; + case USB_PORT_FEAT_C_ENABLE: + ehci_writel(ehci, temp | PORT_PEC, status_reg); + break; + case USB_PORT_FEAT_SUSPEND: + if (temp & PORT_RESET) + goto error; + if (ehci->no_selective_suspend) + break; +#ifdef CONFIG_USB_OTG + if ((hcd->self.otg_port == (wIndex + 1)) + && hcd->self.b_hnp_enable) { + otg_start_hnp(hcd->usb_phy->otg); + break; + } +#endif + if (!(temp & PORT_SUSPEND)) + break; + if ((temp & PORT_PE) == 0) + goto error; + + /* clear phy low-power mode before resume */ + if (ehci->has_tdi_phy_lpm) { + temp1 = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp1 & ~HOSTPC_PHCD, + hostpc_reg); + spin_unlock_irqrestore(&ehci->lock, flags); + msleep(5);/* wait to leave low-power mode */ + spin_lock_irqsave(&ehci->lock, flags); + } + /* resume signaling for 20 msec */ + temp &= ~PORT_WAKE_BITS; + ehci_writel(ehci, temp | PORT_RESUME, status_reg); + ehci->reset_done[wIndex] = jiffies + + msecs_to_jiffies(USB_RESUME_TIMEOUT); + set_bit(wIndex, &ehci->resuming_ports); + usb_hcd_start_port_resume(&hcd->self, wIndex); + break; + case USB_PORT_FEAT_C_SUSPEND: + clear_bit(wIndex, &ehci->port_c_suspend); + break; + case USB_PORT_FEAT_POWER: + if (HCS_PPC(ehci->hcs_params)) { + spin_unlock_irqrestore(&ehci->lock, flags); + ehci_port_power(ehci, wIndex, false); + spin_lock_irqsave(&ehci->lock, flags); + } + break; + case USB_PORT_FEAT_C_CONNECTION: + ehci_writel(ehci, temp | PORT_CSC, status_reg); + break; + case USB_PORT_FEAT_C_OVER_CURRENT: + ehci_writel(ehci, temp | PORT_OCC, status_reg); + break; + case USB_PORT_FEAT_C_RESET: + /* GetPortStatus clears reset */ + break; + default: + goto error; + } + ehci_readl(ehci, &ehci->regs->command); /* unblock posted write */ + break; + case GetHubDescriptor: + ehci_hub_descriptor (ehci, (struct usb_hub_descriptor *) + buf); + break; + case GetHubStatus: + /* no hub-wide feature/status flags */ + memset (buf, 0, 4); + //cpu_to_le32s ((u32 *) buf); + break; + case GetPortStatus: + if (!wIndex || wIndex > ports) + goto error; + wIndex--; + status = 0; + temp = ehci_readl(ehci, status_reg); + + // wPortChange bits + if (temp & PORT_CSC) + status |= USB_PORT_STAT_C_CONNECTION << 16; + if (temp & PORT_PEC) + status |= USB_PORT_STAT_C_ENABLE << 16; + + if ((temp & PORT_OCC) && !ignore_oc){ + status |= USB_PORT_STAT_C_OVERCURRENT << 16; + + /* + * Hubs should disable port power on over-current. + * However, not all EHCI implementations do this + * automatically, even if they _do_ support per-port + * power switching; they're allowed to just limit the + * current. hub_wq will turn the power back on. + */ + if (((temp & PORT_OC) || (ehci->need_oc_pp_cycle)) + && HCS_PPC(ehci->hcs_params)) { + spin_unlock_irqrestore(&ehci->lock, flags); + ehci_port_power(ehci, wIndex, false); + spin_lock_irqsave(&ehci->lock, flags); + temp = ehci_readl(ehci, status_reg); + } + } + + /* no reset or resume pending */ + if (!ehci->reset_done[wIndex]) { + + /* Remote Wakeup received? */ + if (temp & PORT_RESUME) { + /* resume signaling for 20 msec */ + ehci->reset_done[wIndex] = jiffies + + msecs_to_jiffies(20); + usb_hcd_start_port_resume(&hcd->self, wIndex); + set_bit(wIndex, &ehci->resuming_ports); + /* check the port again */ + mod_timer(&ehci_to_hcd(ehci)->rh_timer, + ehci->reset_done[wIndex]); + } + + /* reset or resume not yet complete */ + } else if (!time_after_eq(jiffies, ehci->reset_done[wIndex])) { + ; /* wait until it is complete */ + + /* resume completed */ + } else if (test_bit(wIndex, &ehci->resuming_ports)) { + clear_bit(wIndex, &ehci->suspended_ports); + set_bit(wIndex, &ehci->port_c_suspend); + ehci->reset_done[wIndex] = 0; + usb_hcd_end_port_resume(&hcd->self, wIndex); + + /* stop resume signaling */ + temp &= ~(PORT_RWC_BITS | PORT_SUSPEND | PORT_RESUME); + ehci_writel(ehci, temp, status_reg); + clear_bit(wIndex, &ehci->resuming_ports); + retval = ehci_handshake(ehci, status_reg, + PORT_RESUME, 0, 2000 /* 2msec */); + if (retval != 0) { + ehci_err(ehci, "port %d resume error %d\n", + wIndex + 1, retval); + goto error; + } + temp = ehci_readl(ehci, status_reg); + + /* whoever resets must GetPortStatus to complete it!! */ + } else { + status |= USB_PORT_STAT_C_RESET << 16; + ehci->reset_done [wIndex] = 0; + + /* force reset to complete */ + ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET), + status_reg); + /* REVISIT: some hardware needs 550+ usec to clear + * this bit; seems too long to spin routinely... + */ + retval = ehci_handshake(ehci, status_reg, + PORT_RESET, 0, 1000); + if (retval != 0) { + ehci_err (ehci, "port %d reset error %d\n", + wIndex + 1, retval); + goto error; + } + + /* see what we found out */ + temp = check_reset_complete (ehci, wIndex, status_reg, + ehci_readl(ehci, status_reg)); + } + + /* transfer dedicated ports to the companion hc */ + if ((temp & PORT_CONNECT) && + test_bit(wIndex, &ehci->companion_ports)) { + temp &= ~PORT_RWC_BITS; + temp |= PORT_OWNER; + ehci_writel(ehci, temp, status_reg); + ehci_dbg(ehci, "port %d --> companion\n", wIndex + 1); + temp = ehci_readl(ehci, status_reg); + } + + /* + * Even if OWNER is set, there's no harm letting hub_wq + * see the wPortStatus values (they should all be 0 except + * for PORT_POWER anyway). + */ + + if (temp & PORT_CONNECT) { + status |= USB_PORT_STAT_CONNECTION; + // status may be from integrated TT + if (ehci->has_hostpc) { + temp1 = ehci_readl(ehci, hostpc_reg); + status |= ehci_port_speed(ehci, temp1); + } else + status |= ehci_port_speed(ehci, temp); + } + if (temp & PORT_PE) + status |= USB_PORT_STAT_ENABLE; + + /* maybe the port was unsuspended without our knowledge */ + if (temp & (PORT_SUSPEND|PORT_RESUME)) { + status |= USB_PORT_STAT_SUSPEND; + } else if (test_bit(wIndex, &ehci->suspended_ports)) { + clear_bit(wIndex, &ehci->suspended_ports); + clear_bit(wIndex, &ehci->resuming_ports); + ehci->reset_done[wIndex] = 0; + if (temp & PORT_PE) + set_bit(wIndex, &ehci->port_c_suspend); + usb_hcd_end_port_resume(&hcd->self, wIndex); + } + + if (temp & PORT_OC) + status |= USB_PORT_STAT_OVERCURRENT; + if (temp & PORT_RESET) + status |= USB_PORT_STAT_RESET; + if (temp & PORT_POWER) + status |= USB_PORT_STAT_POWER; + if (test_bit(wIndex, &ehci->port_c_suspend)) + status |= USB_PORT_STAT_C_SUSPEND << 16; + + if (status & ~0xffff) /* only if wPortChange is interesting */ + dbg_port(ehci, "GetStatus", wIndex + 1, temp); + put_unaligned_le32(status, buf); + break; + case SetHubFeature: + switch (wValue) { + case C_HUB_LOCAL_POWER: + case C_HUB_OVER_CURRENT: + /* no hub-wide feature/status flags */ + break; + default: + goto error; + } + break; + case SetPortFeature: + selector = wIndex >> 8; + wIndex &= 0xff; + if (unlikely(ehci->debug)) { + /* If the debug port is active any port + * feature requests should get denied */ + if (wIndex == HCS_DEBUG_PORT(ehci->hcs_params) && + (readl(&ehci->debug->control) & DBGP_ENABLED)) { + retval = -ENODEV; + goto error_exit; + } + } + if (!wIndex || wIndex > ports) + goto error; + wIndex--; + temp = ehci_readl(ehci, status_reg); + if (temp & PORT_OWNER) + break; + + temp &= ~PORT_RWC_BITS; + switch (wValue) { + case USB_PORT_FEAT_SUSPEND: + if (ehci->no_selective_suspend) + break; + if ((temp & PORT_PE) == 0 + || (temp & PORT_RESET) != 0) + goto error; + + /* After above check the port must be connected. + * Set appropriate bit thus could put phy into low power + * mode if we have tdi_phy_lpm feature + */ + temp &= ~PORT_WKCONN_E; + temp |= PORT_WKDISC_E | PORT_WKOC_E; + ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); + if (ehci->has_tdi_phy_lpm) { + spin_unlock_irqrestore(&ehci->lock, flags); + msleep(5);/* 5ms for HCD enter low pwr mode */ + spin_lock_irqsave(&ehci->lock, flags); + temp1 = ehci_readl(ehci, hostpc_reg); + ehci_writel(ehci, temp1 | HOSTPC_PHCD, + hostpc_reg); + temp1 = ehci_readl(ehci, hostpc_reg); + ehci_dbg(ehci, "Port%d phy low pwr mode %s\n", + wIndex, (temp1 & HOSTPC_PHCD) ? + "succeeded" : "failed"); + } + set_bit(wIndex, &ehci->suspended_ports); + break; + case USB_PORT_FEAT_POWER: + if (HCS_PPC(ehci->hcs_params)) { + spin_unlock_irqrestore(&ehci->lock, flags); + ehci_port_power(ehci, wIndex, true); + spin_lock_irqsave(&ehci->lock, flags); + } + break; + case USB_PORT_FEAT_RESET: + if (temp & (PORT_SUSPEND|PORT_RESUME)) + goto error; + /* line status bits may report this as low speed, + * which can be fine if this root hub has a + * transaction translator built in. + */ + if ((temp & (PORT_PE|PORT_CONNECT)) == PORT_CONNECT + && !ehci_is_TDI(ehci) + && PORT_USB11 (temp)) { + ehci_dbg (ehci, + "port %d low speed --> companion\n", + wIndex + 1); + temp |= PORT_OWNER; + } else { + temp |= PORT_RESET; + temp &= ~PORT_PE; + + /* + * caller must wait, then call GetPortStatus + * usb 2.0 spec says 50 ms resets on root + */ + ehci->reset_done [wIndex] = jiffies + + msecs_to_jiffies (50); + + /* + * Force full-speed connect for FSL high-speed + * erratum; disable HS Chirp by setting PFSC bit + */ + if (ehci_has_fsl_hs_errata(ehci)) + temp |= (1 << PORTSC_FSL_PFSC); + } + ehci_writel(ehci, temp, status_reg); + break; + + /* For downstream facing ports (these): one hub port is put + * into test mode according to USB2 11.24.2.13, then the hub + * must be reset (which for root hub now means rmmod+modprobe, + * or else system reboot). See EHCI 2.3.9 and 4.14 for info + * about the EHCI-specific stuff. + */ + case USB_PORT_FEAT_TEST: +#ifdef CONFIG_USB_HCD_TEST_MODE + if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) { + spin_unlock_irqrestore(&ehci->lock, flags); + retval = ehset_single_step_set_feature(hcd, + wIndex + 1); + spin_lock_irqsave(&ehci->lock, flags); + break; + } +#endif + if (!selector || selector > 5) + goto error; + spin_unlock_irqrestore(&ehci->lock, flags); + ehci_quiesce(ehci); + spin_lock_irqsave(&ehci->lock, flags); + + /* Put all enabled ports into suspend */ + while (ports--) { + u32 __iomem *sreg = + &ehci->regs->port_status[ports]; + + temp = ehci_readl(ehci, sreg) & ~PORT_RWC_BITS; + if (temp & PORT_PE) + ehci_writel(ehci, temp | PORT_SUSPEND, + sreg); + } + + spin_unlock_irqrestore(&ehci->lock, flags); + ehci_halt(ehci); + spin_lock_irqsave(&ehci->lock, flags); + + temp = ehci_readl(ehci, status_reg); + temp |= selector << 16; + ehci_writel(ehci, temp, status_reg); + break; + + default: + goto error; + } + ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ + break; + + default: +error: + /* "stall" on error */ + retval = -EPIPE; + } +error_exit: + spin_unlock_irqrestore (&ehci->lock, flags); + return retval; +} +EXPORT_SYMBOL_GPL(ehci_hub_control); + +static void ehci_relinquish_port(struct usb_hcd *hcd, int portnum) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + + if (ehci_is_TDI(ehci)) + return; + set_owner(ehci, --portnum, PORT_OWNER); +} + +static int ehci_port_handed_over(struct usb_hcd *hcd, int portnum) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + u32 __iomem *reg; + + if (ehci_is_TDI(ehci)) + return 0; + reg = &ehci->regs->port_status[portnum - 1]; + return ehci_readl(ehci, reg) & PORT_OWNER; +} + +static int ehci_port_power(struct ehci_hcd *ehci, int portnum, bool enable) +{ + struct usb_hcd *hcd = ehci_to_hcd(ehci); + u32 __iomem *status_reg = &ehci->regs->port_status[portnum]; + u32 temp = ehci_readl(ehci, status_reg) & ~PORT_RWC_BITS; + + if (enable) + ehci_writel(ehci, temp | PORT_POWER, status_reg); + else + ehci_writel(ehci, temp & ~PORT_POWER, status_reg); + + if (hcd->driver->port_power) + hcd->driver->port_power(hcd, portnum, enable); + + return 0; +} diff --git a/addons/ehci-pci/src/4.4.180/ehci-mem.c b/addons/ehci-pci/src/4.4.180/ehci-mem.c new file mode 100644 index 00000000..b6205fac --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/ehci-mem.c @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2001 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of ehci-hcd.c */ + +/*-------------------------------------------------------------------------*/ + +/* + * There's basically three types of memory: + * - data used only by the HCD ... kmalloc is fine + * - async and periodic schedules, shared by HC and HCD ... these + * need to use dma_pool or dma_alloc_coherent + * - driver buffers, read/written by HC ... single shot DMA mapped + * + * There's also "register" data (e.g. PCI or SOC), which is memory mapped. + * No memory seen by this driver is pageable. + */ + +/*-------------------------------------------------------------------------*/ + +/* Allocate the key transfer structures from the previously allocated pool */ + +static inline void ehci_qtd_init(struct ehci_hcd *ehci, struct ehci_qtd *qtd, + dma_addr_t dma) +{ + memset (qtd, 0, sizeof *qtd); + qtd->qtd_dma = dma; + qtd->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); + qtd->hw_next = EHCI_LIST_END(ehci); + qtd->hw_alt_next = EHCI_LIST_END(ehci); + INIT_LIST_HEAD (&qtd->qtd_list); +} + +static struct ehci_qtd *ehci_qtd_alloc (struct ehci_hcd *ehci, gfp_t flags) +{ + struct ehci_qtd *qtd; + dma_addr_t dma; + + qtd = dma_pool_alloc (ehci->qtd_pool, flags, &dma); + if (qtd != NULL) { + ehci_qtd_init(ehci, qtd, dma); + } + return qtd; +} + +static inline void ehci_qtd_free (struct ehci_hcd *ehci, struct ehci_qtd *qtd) +{ + dma_pool_free (ehci->qtd_pool, qtd, qtd->qtd_dma); +} + + +static void qh_destroy(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + /* clean qtds first, and know this is not linked */ + if (!list_empty (&qh->qtd_list) || qh->qh_next.ptr) { + ehci_dbg (ehci, "unused qh not empty!\n"); + BUG (); + } + if (qh->dummy) + ehci_qtd_free (ehci, qh->dummy); + dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma); + kfree(qh); +} + +static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags) +{ + struct ehci_qh *qh; + dma_addr_t dma; + + qh = kzalloc(sizeof *qh, GFP_ATOMIC); + if (!qh) + goto done; + qh->hw = (struct ehci_qh_hw *) + dma_pool_alloc(ehci->qh_pool, flags, &dma); + if (!qh->hw) + goto fail; + memset(qh->hw, 0, sizeof *qh->hw); + qh->qh_dma = dma; + // INIT_LIST_HEAD (&qh->qh_list); + INIT_LIST_HEAD (&qh->qtd_list); + INIT_LIST_HEAD(&qh->unlink_node); + + /* dummy td enables safe urb queuing */ + qh->dummy = ehci_qtd_alloc (ehci, flags); + if (qh->dummy == NULL) { + ehci_dbg (ehci, "no dummy td\n"); + goto fail1; + } +done: + return qh; +fail1: + dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma); +fail: + kfree(qh); + return NULL; +} + +/*-------------------------------------------------------------------------*/ + +/* The queue heads and transfer descriptors are managed from pools tied + * to each of the "per device" structures. + * This is the initialisation and cleanup code. + */ + +static void ehci_mem_cleanup (struct ehci_hcd *ehci) +{ + if (ehci->async) + qh_destroy(ehci, ehci->async); + ehci->async = NULL; + + if (ehci->dummy) + qh_destroy(ehci, ehci->dummy); + ehci->dummy = NULL; + + /* DMA consistent memory and pools */ + if (ehci->qtd_pool) + dma_pool_destroy (ehci->qtd_pool); + ehci->qtd_pool = NULL; + + if (ehci->qh_pool) { + dma_pool_destroy (ehci->qh_pool); + ehci->qh_pool = NULL; + } + + if (ehci->itd_pool) + dma_pool_destroy (ehci->itd_pool); + ehci->itd_pool = NULL; + + if (ehci->sitd_pool) + dma_pool_destroy (ehci->sitd_pool); + ehci->sitd_pool = NULL; + + if (ehci->periodic) + dma_free_coherent (ehci_to_hcd(ehci)->self.controller, + ehci->periodic_size * sizeof (u32), + ehci->periodic, ehci->periodic_dma); + ehci->periodic = NULL; + + /* shadow periodic table */ + kfree(ehci->pshadow); + ehci->pshadow = NULL; +} + +/* remember to add cleanup code (above) if you add anything here */ +static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags) +{ + int i; + + /* QTDs for control/bulk/intr transfers */ + ehci->qtd_pool = dma_pool_create ("ehci_qtd", + ehci_to_hcd(ehci)->self.controller, + sizeof (struct ehci_qtd), + 32 /* byte alignment (for hw parts) */, + 4096 /* can't cross 4K */); + if (!ehci->qtd_pool) { + goto fail; + } + + /* QHs for control/bulk/intr transfers */ + ehci->qh_pool = dma_pool_create ("ehci_qh", + ehci_to_hcd(ehci)->self.controller, + sizeof(struct ehci_qh_hw), + 32 /* byte alignment (for hw parts) */, + 4096 /* can't cross 4K */); + if (!ehci->qh_pool) { + goto fail; + } + ehci->async = ehci_qh_alloc (ehci, flags); + if (!ehci->async) { + goto fail; + } + + /* ITD for high speed ISO transfers */ + ehci->itd_pool = dma_pool_create ("ehci_itd", + ehci_to_hcd(ehci)->self.controller, + sizeof (struct ehci_itd), + 32 /* byte alignment (for hw parts) */, + 4096 /* can't cross 4K */); + if (!ehci->itd_pool) { + goto fail; + } + + /* SITD for full/low speed split ISO transfers */ + ehci->sitd_pool = dma_pool_create ("ehci_sitd", + ehci_to_hcd(ehci)->self.controller, + sizeof (struct ehci_sitd), + 32 /* byte alignment (for hw parts) */, + 4096 /* can't cross 4K */); + if (!ehci->sitd_pool) { + goto fail; + } + + /* Hardware periodic table */ + ehci->periodic = (__le32 *) + dma_alloc_coherent (ehci_to_hcd(ehci)->self.controller, + ehci->periodic_size * sizeof(__le32), + &ehci->periodic_dma, flags); + if (ehci->periodic == NULL) { + goto fail; + } + + if (ehci->use_dummy_qh) { + struct ehci_qh_hw *hw; + ehci->dummy = ehci_qh_alloc(ehci, flags); + if (!ehci->dummy) + goto fail; + + hw = ehci->dummy->hw; + hw->hw_next = EHCI_LIST_END(ehci); + hw->hw_qtd_next = EHCI_LIST_END(ehci); + hw->hw_alt_next = EHCI_LIST_END(ehci); + ehci->dummy->hw = hw; + + for (i = 0; i < ehci->periodic_size; i++) + ehci->periodic[i] = cpu_to_hc32(ehci, + ehci->dummy->qh_dma); + } else { + for (i = 0; i < ehci->periodic_size; i++) + ehci->periodic[i] = EHCI_LIST_END(ehci); + } + + /* software shadow of hardware table */ + ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); + if (ehci->pshadow != NULL) + return 0; + +fail: + ehci_dbg (ehci, "couldn't init memory\n"); + ehci_mem_cleanup (ehci); + return -ENOMEM; +} diff --git a/addons/ehci-pci/src/4.4.180/ehci-pci.c b/addons/ehci-pci/src/4.4.180/ehci-pci.c new file mode 100644 index 00000000..2a5d2fd7 --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/ehci-pci.c @@ -0,0 +1,435 @@ +/* + * EHCI HCD (Host Controller Driver) PCI Bus Glue. + * + * Copyright (c) 2000-2004 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include + +#include "ehci.h" +#include "pci-quirks.h" + +#define DRIVER_DESC "EHCI PCI platform driver" + +static const char hcd_name[] = "ehci-pci"; + +/* defined here to avoid adding to pci_ids.h for single instance use */ +#define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70 + +/*-------------------------------------------------------------------------*/ +#define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC 0x0939 +static inline bool is_intel_quark_x1000(struct pci_dev *pdev) +{ + return pdev->vendor == PCI_VENDOR_ID_INTEL && + pdev->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC; +} + +/* + * This is the list of PCI IDs for the devices that have EHCI USB class and + * specific drivers for that. One of the example is a ChipIdea device installed + * on some Intel MID platforms. + */ +static const struct pci_device_id bypass_pci_id_table[] = { + /* ChipIdea on Intel MID platform */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0811), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0829), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe006), }, + {} +}; + +static inline bool is_bypassed_id(struct pci_dev *pdev) +{ + return !!pci_match_id(bypass_pci_id_table, pdev); +} + +/* + * 0x84 is the offset of in/out threshold register, + * and it is the same offset as the register of 'hostpc'. + */ +#define intel_quark_x1000_insnreg01 hostpc + +/* Maximum usable threshold value is 0x7f dwords for both IN and OUT */ +#define INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD 0x007f007f + +/* called after powerup, by probe or system-pm "wakeup" */ +static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) +{ + int retval; + + /* we expect static quirk code to handle the "extended capabilities" + * (currently just BIOS handoff) allowed starting with EHCI 0.96 + */ + + /* PCI Memory-Write-Invalidate cycle support is optional (uncommon) */ + retval = pci_set_mwi(pdev); + if (!retval) + ehci_dbg(ehci, "MWI active\n"); + + /* Reset the threshold limit */ + if (is_intel_quark_x1000(pdev)) { + /* + * For the Intel QUARK X1000, raise the I/O threshold to the + * maximum usable value in order to improve performance. + */ + ehci_writel(ehci, INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD, + ehci->regs->intel_quark_x1000_insnreg01); + } + + return 0; +} + +/* called during probe() after chip reset completes */ +static int ehci_pci_setup(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + u32 temp; + int retval; + + ehci->caps = hcd->regs; + + /* + * ehci_init() causes memory for DMA transfers to be + * allocated. Thus, any vendor-specific workarounds based on + * limiting the type of memory used for DMA transfers must + * happen before ehci_setup() is called. + * + * Most other workarounds can be done either before or after + * init and reset; they are located here too. + */ + switch (pdev->vendor) { + case PCI_VENDOR_ID_TOSHIBA_2: + /* celleb's companion chip */ + if (pdev->device == 0x01b5) { +#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO + ehci->big_endian_mmio = 1; +#else + ehci_warn(ehci, + "unsupported big endian Toshiba quirk\n"); +#endif + } + break; + case PCI_VENDOR_ID_NVIDIA: + /* NVidia reports that certain chips don't handle + * QH, ITD, or SITD addresses above 2GB. (But TD, + * data buffer, and periodic schedule are normal.) + */ + switch (pdev->device) { + case 0x003c: /* MCP04 */ + case 0x005b: /* CK804 */ + case 0x00d8: /* CK8 */ + case 0x00e8: /* CK8S */ + if (pci_set_consistent_dma_mask(pdev, + DMA_BIT_MASK(31)) < 0) + ehci_warn(ehci, "can't enable NVidia " + "workaround for >2GB RAM\n"); + break; + + /* Some NForce2 chips have problems with selective suspend; + * fixed in newer silicon. + */ + case 0x0068: + if (pdev->revision < 0xa4) + ehci->no_selective_suspend = 1; + break; + } + break; + case PCI_VENDOR_ID_INTEL: + if (pdev->device == PCI_DEVICE_ID_INTEL_CE4100_USB) + hcd->has_tt = 1; + break; + case PCI_VENDOR_ID_TDI: + if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) + hcd->has_tt = 1; + break; + case PCI_VENDOR_ID_AMD: + /* AMD PLL quirk */ + if (usb_amd_find_chipset_info()) + ehci->amd_pll_fix = 1; + /* AMD8111 EHCI doesn't work, according to AMD errata */ + if (pdev->device == 0x7463) { + ehci_info(ehci, "ignoring AMD8111 (errata)\n"); + retval = -EIO; + goto done; + } + + /* + * EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may + * read/write memory space which does not belong to it when + * there is NULL pointer with T-bit set to 1 in the frame list + * table. To avoid the issue, the frame list link pointer + * should always contain a valid pointer to a inactive qh. + */ + if (pdev->device == 0x7808) { + ehci->use_dummy_qh = 1; + ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n"); + } + break; + case PCI_VENDOR_ID_VIA: + if (pdev->device == 0x3104 && (pdev->revision & 0xf0) == 0x60) { + u8 tmp; + + /* The VT6212 defaults to a 1 usec EHCI sleep time which + * hogs the PCI bus *badly*. Setting bit 5 of 0x4B makes + * that sleep time use the conventional 10 usec. + */ + pci_read_config_byte(pdev, 0x4b, &tmp); + if (tmp & 0x20) + break; + pci_write_config_byte(pdev, 0x4b, tmp | 0x20); + } + break; + case PCI_VENDOR_ID_ATI: + /* AMD PLL quirk */ + if (usb_amd_find_chipset_info()) + ehci->amd_pll_fix = 1; + + /* + * EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may + * read/write memory space which does not belong to it when + * there is NULL pointer with T-bit set to 1 in the frame list + * table. To avoid the issue, the frame list link pointer + * should always contain a valid pointer to a inactive qh. + */ + if (pdev->device == 0x4396) { + ehci->use_dummy_qh = 1; + ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI dummy qh workaround\n"); + } + /* SB600 and old version of SB700 have a bug in EHCI controller, + * which causes usb devices lose response in some cases. + */ + if ((pdev->device == 0x4386 || pdev->device == 0x4396) && + usb_amd_hang_symptom_quirk()) { + u8 tmp; + ehci_info(ehci, "applying AMD SB600/SB700 USB freeze workaround\n"); + pci_read_config_byte(pdev, 0x53, &tmp); + pci_write_config_byte(pdev, 0x53, tmp | (1<<3)); + } + break; + case PCI_VENDOR_ID_NETMOS: + /* MosChip frame-index-register bug */ + ehci_info(ehci, "applying MosChip frame-index workaround\n"); + ehci->frame_index_bug = 1; + break; + } + + /* optional debug port, normally in the first BAR */ + temp = pci_find_capability(pdev, PCI_CAP_ID_DBG); + if (temp) { + pci_read_config_dword(pdev, temp, &temp); + temp >>= 16; + if (((temp >> 13) & 7) == 1) { + u32 hcs_params = ehci_readl(ehci, + &ehci->caps->hcs_params); + + temp &= 0x1fff; + ehci->debug = hcd->regs + temp; + temp = ehci_readl(ehci, &ehci->debug->control); + ehci_info(ehci, "debug port %d%s\n", + HCS_DEBUG_PORT(hcs_params), + (temp & DBGP_ENABLED) ? " IN USE" : ""); + if (!(temp & DBGP_ENABLED)) + ehci->debug = NULL; + } + } + + retval = ehci_setup(hcd); + if (retval) + return retval; + + /* These workarounds need to be applied after ehci_setup() */ + switch (pdev->vendor) { + case PCI_VENDOR_ID_NEC: + ehci->need_io_watchdog = 0; + break; + case PCI_VENDOR_ID_INTEL: + ehci->need_io_watchdog = 0; + break; + case PCI_VENDOR_ID_NVIDIA: + switch (pdev->device) { + /* MCP89 chips on the MacBookAir3,1 give EPROTO when + * fetching device descriptors unless LPM is disabled. + * There are also intermittent problems enumerating + * devices with PPCD enabled. + */ + case 0x0d9d: + ehci_info(ehci, "disable ppcd for nvidia mcp89\n"); + ehci->has_ppcd = 0; + ehci->command &= ~CMD_PPCEE; + break; + } + break; + } + + /* at least the Genesys GL880S needs fixup here */ + temp = HCS_N_CC(ehci->hcs_params) * HCS_N_PCC(ehci->hcs_params); + temp &= 0x0f; + if (temp && HCS_N_PORTS(ehci->hcs_params) > temp) { + ehci_dbg(ehci, "bogus port configuration: " + "cc=%d x pcc=%d < ports=%d\n", + HCS_N_CC(ehci->hcs_params), + HCS_N_PCC(ehci->hcs_params), + HCS_N_PORTS(ehci->hcs_params)); + + switch (pdev->vendor) { + case 0x17a0: /* GENESYS */ + /* GL880S: should be PORTS=2 */ + temp |= (ehci->hcs_params & ~0xf); + ehci->hcs_params = temp; + break; + case PCI_VENDOR_ID_NVIDIA: + /* NF4: should be PCC=10 */ + break; + } + } + + /* Serial Bus Release Number is at PCI 0x60 offset */ + if (pdev->vendor == PCI_VENDOR_ID_STMICRO + && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST) + ; /* ConneXT has no sbrn register */ + else + pci_read_config_byte(pdev, 0x60, &ehci->sbrn); + + /* Keep this around for a while just in case some EHCI + * implementation uses legacy PCI PM support. This test + * can be removed on 17 Dec 2009 if the dev_warn() hasn't + * been triggered by then. + */ + if (!device_can_wakeup(&pdev->dev)) { + u16 port_wake; + + pci_read_config_word(pdev, 0x62, &port_wake); + if (port_wake & 0x0001) { + dev_warn(&pdev->dev, "Enabling legacy PCI PM\n"); + device_set_wakeup_capable(&pdev->dev, 1); + } + } + +#ifdef CONFIG_PM + if (ehci->no_selective_suspend && device_can_wakeup(&pdev->dev)) + ehci_warn(ehci, "selective suspend/wakeup unavailable\n"); +#endif + + retval = ehci_pci_reinit(ehci, pdev); +done: + return retval; +} + +/*-------------------------------------------------------------------------*/ + +#ifdef CONFIG_PM + +/* suspend/resume, section 4.3 */ + +/* These routines rely on the PCI bus glue + * to handle powerdown and wakeup, and currently also on + * transceivers that don't need any software attention to set up + * the right sort of wakeup. + * Also they depend on separate root hub suspend/resume. + */ + +static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct pci_dev *pdev = to_pci_dev(hcd->self.controller); + + if (ehci_resume(hcd, hibernated) != 0) + (void) ehci_pci_reinit(ehci, pdev); + return 0; +} + +#else + +#define ehci_suspend NULL +#define ehci_pci_resume NULL +#endif /* CONFIG_PM */ + +static struct hc_driver __read_mostly ehci_pci_hc_driver; + +static const struct ehci_driver_overrides pci_overrides __initconst = { + .reset = ehci_pci_setup, +}; + +/*-------------------------------------------------------------------------*/ + +static int ehci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + if (is_bypassed_id(pdev)) + return -ENODEV; + return usb_hcd_pci_probe(pdev, id); +} + +/* PCI driver selection metadata; PCI hotplugging uses this */ +static const struct pci_device_id pci_ids [] = { { + /* handle any USB 2.0 EHCI controller */ + PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0), + .driver_data = (unsigned long) &ehci_pci_hc_driver, + }, { + PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_HOST), + .driver_data = (unsigned long) &ehci_pci_hc_driver, + }, + { /* end: all zeroes */ } +}; +MODULE_DEVICE_TABLE(pci, pci_ids); + +/* pci driver glue; this is a "new style" PCI driver module */ +static struct pci_driver ehci_pci_driver = { + .name = (char *) hcd_name, + .id_table = pci_ids, + + .probe = ehci_pci_probe, + .remove = usb_hcd_pci_remove, + .shutdown = usb_hcd_pci_shutdown, + +#ifdef CONFIG_PM + .driver = { + .pm = &usb_hcd_pci_pm_ops + }, +#endif +}; + +static int __init ehci_pci_init(void) +{ + if (usb_disabled()) + return -ENODEV; + + pr_info("%s: " DRIVER_DESC "\n", hcd_name); + + ehci_init_driver(&ehci_pci_hc_driver, &pci_overrides); + + /* Entries for the PCI suspend/resume callbacks are special */ + ehci_pci_hc_driver.pci_suspend = ehci_suspend; + ehci_pci_hc_driver.pci_resume = ehci_pci_resume; + + return pci_register_driver(&ehci_pci_driver); +} +module_init(ehci_pci_init); + +static void __exit ehci_pci_cleanup(void) +{ + pci_unregister_driver(&ehci_pci_driver); +} +module_exit(ehci_pci_cleanup); + +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_AUTHOR("David Brownell"); +MODULE_AUTHOR("Alan Stern"); +MODULE_LICENSE("GPL"); diff --git a/addons/ehci-pci/src/4.4.180/ehci-q.c b/addons/ehci-pci/src/4.4.180/ehci-q.c new file mode 100644 index 00000000..54f5332f --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/ehci-q.c @@ -0,0 +1,1476 @@ +/* + * Copyright (C) 2001-2004 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of ehci-hcd.c */ + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI hardware queue manipulation ... the core. QH/QTD manipulation. + * + * Control, bulk, and interrupt traffic all use "qh" lists. They list "qtd" + * entries describing USB transactions, max 16-20kB/entry (with 4kB-aligned + * buffers needed for the larger number). We use one QH per endpoint, queue + * multiple urbs (all three types) per endpoint. URBs may need several qtds. + * + * ISO traffic uses "ISO TD" (itd, and sitd) records, and (along with + * interrupts) needs careful scheduling. Performance improvements can be + * an ongoing challenge. That's in "ehci-sched.c". + * + * USB 1.1 devices are handled (a) by "companion" OHCI or UHCI root hubs, + * or otherwise through transaction translators (TTs) in USB 2.0 hubs using + * (b) special fields in qh entries or (c) split iso entries. TTs will + * buffer low/full speed data so the host collects it at high speed. + */ + +/*-------------------------------------------------------------------------*/ + +/* fill a qtd, returning how much of the buffer we were able to queue up */ + +static int +qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf, + size_t len, int token, int maxpacket) +{ + int i, count; + u64 addr = buf; + + /* one buffer entry per 4K ... first might be short or unaligned */ + qtd->hw_buf[0] = cpu_to_hc32(ehci, (u32)addr); + qtd->hw_buf_hi[0] = cpu_to_hc32(ehci, (u32)(addr >> 32)); + count = 0x1000 - (buf & 0x0fff); /* rest of that page */ + if (likely (len < count)) /* ... iff needed */ + count = len; + else { + buf += 0x1000; + buf &= ~0x0fff; + + /* per-qtd limit: from 16K to 20K (best alignment) */ + for (i = 1; count < len && i < 5; i++) { + addr = buf; + qtd->hw_buf[i] = cpu_to_hc32(ehci, (u32)addr); + qtd->hw_buf_hi[i] = cpu_to_hc32(ehci, + (u32)(addr >> 32)); + buf += 0x1000; + if ((count + 0x1000) < len) + count += 0x1000; + else + count = len; + } + + /* short packets may only terminate transfers */ + if (count != len) + count -= (count % maxpacket); + } + qtd->hw_token = cpu_to_hc32(ehci, (count << 16) | token); + qtd->length = count; + + return count; +} + +/*-------------------------------------------------------------------------*/ + +static inline void +qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd) +{ + struct ehci_qh_hw *hw = qh->hw; + + /* writes to an active overlay are unsafe */ + WARN_ON(qh->qh_state != QH_STATE_IDLE); + + hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma); + hw->hw_alt_next = EHCI_LIST_END(ehci); + + /* Except for control endpoints, we make hardware maintain data + * toggle (like OHCI) ... here (re)initialize the toggle in the QH, + * and set the pseudo-toggle in udev. Only usb_clear_halt() will + * ever clear it. + */ + if (!(hw->hw_info1 & cpu_to_hc32(ehci, QH_TOGGLE_CTL))) { + unsigned is_out, epnum; + + is_out = qh->is_out; + epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f; + if (unlikely(!usb_gettoggle(qh->ps.udev, epnum, is_out))) { + hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE); + usb_settoggle(qh->ps.udev, epnum, is_out, 1); + } + } + + hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING); +} + +/* if it weren't for a common silicon quirk (writing the dummy into the qh + * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault + * recovery (including urb dequeue) would need software changes to a QH... + */ +static void +qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + struct ehci_qtd *qtd; + + qtd = list_entry(qh->qtd_list.next, struct ehci_qtd, qtd_list); + + /* + * first qtd may already be partially processed. + * If we come here during unlink, the QH overlay region + * might have reference to the just unlinked qtd. The + * qtd is updated in qh_completions(). Update the QH + * overlay here. + */ + if (qh->hw->hw_token & ACTIVE_BIT(ehci)) + qh->hw->hw_qtd_next = qtd->hw_next; + else + qh_update(ehci, qh, qtd); +} + +/*-------------------------------------------------------------------------*/ + +static void qh_link_async(struct ehci_hcd *ehci, struct ehci_qh *qh); + +static void ehci_clear_tt_buffer_complete(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct ehci_qh *qh = ep->hcpriv; + unsigned long flags; + + spin_lock_irqsave(&ehci->lock, flags); + qh->clearing_tt = 0; + if (qh->qh_state == QH_STATE_IDLE && !list_empty(&qh->qtd_list) + && ehci->rh_state == EHCI_RH_RUNNING) + qh_link_async(ehci, qh); + spin_unlock_irqrestore(&ehci->lock, flags); +} + +static void ehci_clear_tt_buffer(struct ehci_hcd *ehci, struct ehci_qh *qh, + struct urb *urb, u32 token) +{ + + /* If an async split transaction gets an error or is unlinked, + * the TT buffer may be left in an indeterminate state. We + * have to clear the TT buffer. + * + * Note: this routine is never called for Isochronous transfers. + */ + if (urb->dev->tt && !usb_pipeint(urb->pipe) && !qh->clearing_tt) { +#ifdef CONFIG_DYNAMIC_DEBUG + struct usb_device *tt = urb->dev->tt->hub; + dev_dbg(&tt->dev, + "clear tt buffer port %d, a%d ep%d t%08x\n", + urb->dev->ttport, urb->dev->devnum, + usb_pipeendpoint(urb->pipe), token); +#endif /* CONFIG_DYNAMIC_DEBUG */ + if (!ehci_is_TDI(ehci) + || urb->dev->tt->hub != + ehci_to_hcd(ehci)->self.root_hub) { + if (usb_hub_clear_tt_buffer(urb) == 0) + qh->clearing_tt = 1; + } else { + + /* REVISIT ARC-derived cores don't clear the root + * hub TT buffer in this way... + */ + } + } +} + +static int qtd_copy_status ( + struct ehci_hcd *ehci, + struct urb *urb, + size_t length, + u32 token +) +{ + int status = -EINPROGRESS; + + /* count IN/OUT bytes, not SETUP (even short packets) */ + if (likely (QTD_PID (token) != 2)) + urb->actual_length += length - QTD_LENGTH (token); + + /* don't modify error codes */ + if (unlikely(urb->unlinked)) + return status; + + /* force cleanup after short read; not always an error */ + if (unlikely (IS_SHORT_READ (token))) + status = -EREMOTEIO; + + /* serious "can't proceed" faults reported by the hardware */ + if (token & QTD_STS_HALT) { + if (token & QTD_STS_BABBLE) { + /* FIXME "must" disable babbling device's port too */ + status = -EOVERFLOW; + /* CERR nonzero + halt --> stall */ + } else if (QTD_CERR(token)) { + status = -EPIPE; + + /* In theory, more than one of the following bits can be set + * since they are sticky and the transaction is retried. + * Which to test first is rather arbitrary. + */ + } else if (token & QTD_STS_MMF) { + /* fs/ls interrupt xfer missed the complete-split */ + status = -EPROTO; + } else if (token & QTD_STS_DBE) { + status = (QTD_PID (token) == 1) /* IN ? */ + ? -ENOSR /* hc couldn't read data */ + : -ECOMM; /* hc couldn't write data */ + } else if (token & QTD_STS_XACT) { + /* timeout, bad CRC, wrong PID, etc */ + ehci_dbg(ehci, "devpath %s ep%d%s 3strikes\n", + urb->dev->devpath, + usb_pipeendpoint(urb->pipe), + usb_pipein(urb->pipe) ? "in" : "out"); + status = -EPROTO; + } else { /* unknown */ + status = -EPROTO; + } + } + + return status; +} + +static void +ehci_urb_done(struct ehci_hcd *ehci, struct urb *urb, int status) +{ + if (usb_pipetype(urb->pipe) == PIPE_INTERRUPT) { + /* ... update hc-wide periodic stats */ + ehci_to_hcd(ehci)->self.bandwidth_int_reqs--; + } + + if (unlikely(urb->unlinked)) { + COUNT(ehci->stats.unlink); + } else { + /* report non-error and short read status as zero */ + if (status == -EINPROGRESS || status == -EREMOTEIO) + status = 0; + COUNT(ehci->stats.complete); + } + +#ifdef EHCI_URB_TRACE + ehci_dbg (ehci, + "%s %s urb %p ep%d%s status %d len %d/%d\n", + __func__, urb->dev->devpath, urb, + usb_pipeendpoint (urb->pipe), + usb_pipein (urb->pipe) ? "in" : "out", + status, + urb->actual_length, urb->transfer_buffer_length); +#endif + + usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); + usb_hcd_giveback_urb(ehci_to_hcd(ehci), urb, status); +} + +static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh); + +/* + * Process and free completed qtds for a qh, returning URBs to drivers. + * Chases up to qh->hw_current. Returns nonzero if the caller should + * unlink qh. + */ +static unsigned +qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + struct ehci_qtd *last, *end = qh->dummy; + struct list_head *entry, *tmp; + int last_status; + int stopped; + u8 state; + struct ehci_qh_hw *hw = qh->hw; + + /* completions (or tasks on other cpus) must never clobber HALT + * till we've gone through and cleaned everything up, even when + * they add urbs to this qh's queue or mark them for unlinking. + * + * NOTE: unlinking expects to be done in queue order. + * + * It's a bug for qh->qh_state to be anything other than + * QH_STATE_IDLE, unless our caller is scan_async() or + * scan_intr(). + */ + state = qh->qh_state; + qh->qh_state = QH_STATE_COMPLETING; + stopped = (state == QH_STATE_IDLE); + + rescan: + last = NULL; + last_status = -EINPROGRESS; + qh->dequeue_during_giveback = 0; + + /* remove de-activated QTDs from front of queue. + * after faults (including short reads), cleanup this urb + * then let the queue advance. + * if queue is stopped, handles unlinks. + */ + list_for_each_safe (entry, tmp, &qh->qtd_list) { + struct ehci_qtd *qtd; + struct urb *urb; + u32 token = 0; + + qtd = list_entry (entry, struct ehci_qtd, qtd_list); + urb = qtd->urb; + + /* clean up any state from previous QTD ...*/ + if (last) { + if (likely (last->urb != urb)) { + ehci_urb_done(ehci, last->urb, last_status); + last_status = -EINPROGRESS; + } + ehci_qtd_free (ehci, last); + last = NULL; + } + + /* ignore urbs submitted during completions we reported */ + if (qtd == end) + break; + + /* hardware copies qtd out of qh overlay */ + rmb (); + token = hc32_to_cpu(ehci, qtd->hw_token); + + /* always clean up qtds the hc de-activated */ + retry_xacterr: + if ((token & QTD_STS_ACTIVE) == 0) { + + /* Report Data Buffer Error: non-fatal but useful */ + if (token & QTD_STS_DBE) + ehci_dbg(ehci, + "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n", + urb, + usb_endpoint_num(&urb->ep->desc), + usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out", + urb->transfer_buffer_length, + qtd, + qh); + + /* on STALL, error, and short reads this urb must + * complete and all its qtds must be recycled. + */ + if ((token & QTD_STS_HALT) != 0) { + + /* retry transaction errors until we + * reach the software xacterr limit + */ + if ((token & QTD_STS_XACT) && + QTD_CERR(token) == 0 && + ++qh->xacterrs < QH_XACTERR_MAX && + !urb->unlinked) { + ehci_dbg(ehci, + "detected XactErr len %zu/%zu retry %d\n", + qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); + + /* reset the token in the qtd and the + * qh overlay (which still contains + * the qtd) so that we pick up from + * where we left off + */ + token &= ~QTD_STS_HALT; + token |= QTD_STS_ACTIVE | + (EHCI_TUNE_CERR << 10); + qtd->hw_token = cpu_to_hc32(ehci, + token); + wmb(); + hw->hw_token = cpu_to_hc32(ehci, + token); + goto retry_xacterr; + } + stopped = 1; + + /* magic dummy for some short reads; qh won't advance. + * that silicon quirk can kick in with this dummy too. + * + * other short reads won't stop the queue, including + * control transfers (status stage handles that) or + * most other single-qtd reads ... the queue stops if + * URB_SHORT_NOT_OK was set so the driver submitting + * the urbs could clean it up. + */ + } else if (IS_SHORT_READ (token) + && !(qtd->hw_alt_next + & EHCI_LIST_END(ehci))) { + stopped = 1; + } + + /* stop scanning when we reach qtds the hc is using */ + } else if (likely (!stopped + && ehci->rh_state >= EHCI_RH_RUNNING)) { + break; + + /* scan the whole queue for unlinks whenever it stops */ + } else { + stopped = 1; + + /* cancel everything if we halt, suspend, etc */ + if (ehci->rh_state < EHCI_RH_RUNNING) + last_status = -ESHUTDOWN; + + /* this qtd is active; skip it unless a previous qtd + * for its urb faulted, or its urb was canceled. + */ + else if (last_status == -EINPROGRESS && !urb->unlinked) + continue; + + /* + * If this was the active qtd when the qh was unlinked + * and the overlay's token is active, then the overlay + * hasn't been written back to the qtd yet so use its + * token instead of the qtd's. After the qtd is + * processed and removed, the overlay won't be valid + * any more. + */ + if (state == QH_STATE_IDLE && + qh->qtd_list.next == &qtd->qtd_list && + (hw->hw_token & ACTIVE_BIT(ehci))) { + token = hc32_to_cpu(ehci, hw->hw_token); + hw->hw_token &= ~ACTIVE_BIT(ehci); + + /* An unlink may leave an incomplete + * async transaction in the TT buffer. + * We have to clear it. + */ + ehci_clear_tt_buffer(ehci, qh, urb, token); + } + } + + /* unless we already know the urb's status, collect qtd status + * and update count of bytes transferred. in common short read + * cases with only one data qtd (including control transfers), + * queue processing won't halt. but with two or more qtds (for + * example, with a 32 KB transfer), when the first qtd gets a + * short read the second must be removed by hand. + */ + if (last_status == -EINPROGRESS) { + last_status = qtd_copy_status(ehci, urb, + qtd->length, token); + if (last_status == -EREMOTEIO + && (qtd->hw_alt_next + & EHCI_LIST_END(ehci))) + last_status = -EINPROGRESS; + + /* As part of low/full-speed endpoint-halt processing + * we must clear the TT buffer (11.17.5). + */ + if (unlikely(last_status != -EINPROGRESS && + last_status != -EREMOTEIO)) { + /* The TT's in some hubs malfunction when they + * receive this request following a STALL (they + * stop sending isochronous packets). Since a + * STALL can't leave the TT buffer in a busy + * state (if you believe Figures 11-48 - 11-51 + * in the USB 2.0 spec), we won't clear the TT + * buffer in this case. Strictly speaking this + * is a violation of the spec. + */ + if (last_status != -EPIPE) + ehci_clear_tt_buffer(ehci, qh, urb, + token); + } + } + + /* if we're removing something not at the queue head, + * patch the hardware queue pointer. + */ + if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { + last = list_entry (qtd->qtd_list.prev, + struct ehci_qtd, qtd_list); + last->hw_next = qtd->hw_next; + } + + /* remove qtd; it's recycled after possible urb completion */ + list_del (&qtd->qtd_list); + last = qtd; + + /* reinit the xacterr counter for the next qtd */ + qh->xacterrs = 0; + } + + /* last urb's completion might still need calling */ + if (likely (last != NULL)) { + ehci_urb_done(ehci, last->urb, last_status); + ehci_qtd_free (ehci, last); + } + + /* Do we need to rescan for URBs dequeued during a giveback? */ + if (unlikely(qh->dequeue_during_giveback)) { + /* If the QH is already unlinked, do the rescan now. */ + if (state == QH_STATE_IDLE) + goto rescan; + + /* Otherwise the caller must unlink the QH. */ + } + + /* restore original state; caller must unlink or relink */ + qh->qh_state = state; + + /* be sure the hardware's done with the qh before refreshing + * it after fault cleanup, or recovering from silicon wrongly + * overlaying the dummy qtd (which reduces DMA chatter). + * + * We won't refresh a QH that's linked (after the HC + * stopped the queue). That avoids a race: + * - HC reads first part of QH; + * - CPU updates that first part and the token; + * - HC reads rest of that QH, including token + * Result: HC gets an inconsistent image, and then + * DMAs to/from the wrong memory (corrupting it). + * + * That should be rare for interrupt transfers, + * except maybe high bandwidth ... + */ + if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) + qh->exception = 1; + + /* Let the caller know if the QH needs to be unlinked. */ + return qh->exception; +} + +/*-------------------------------------------------------------------------*/ + +// high bandwidth multiplier, as encoded in highspeed endpoint descriptors +#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) +// ... and packet size, for any kind of endpoint descriptor +#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) + +/* + * reverse of qh_urb_transaction: free a list of TDs. + * used for cleanup after errors, before HC sees an URB's TDs. + */ +static void qtd_list_free ( + struct ehci_hcd *ehci, + struct urb *urb, + struct list_head *qtd_list +) { + struct list_head *entry, *temp; + + list_for_each_safe (entry, temp, qtd_list) { + struct ehci_qtd *qtd; + + qtd = list_entry (entry, struct ehci_qtd, qtd_list); + list_del (&qtd->qtd_list); + ehci_qtd_free (ehci, qtd); + } +} + +/* + * create a list of filled qtds for this URB; won't link into qh. + */ +static struct list_head * +qh_urb_transaction ( + struct ehci_hcd *ehci, + struct urb *urb, + struct list_head *head, + gfp_t flags +) { + struct ehci_qtd *qtd, *qtd_prev; + dma_addr_t buf; + int len, this_sg_len, maxpacket; + int is_input; + u32 token; + int i; + struct scatterlist *sg; + + /* + * URBs map to sequences of QTDs: one logical transaction + */ + qtd = ehci_qtd_alloc (ehci, flags); + if (unlikely (!qtd)) + return NULL; + list_add_tail (&qtd->qtd_list, head); + qtd->urb = urb; + + token = QTD_STS_ACTIVE; + token |= (EHCI_TUNE_CERR << 10); + /* for split transactions, SplitXState initialized to zero */ + + len = urb->transfer_buffer_length; + is_input = usb_pipein (urb->pipe); + if (usb_pipecontrol (urb->pipe)) { + /* SETUP pid */ + qtd_fill(ehci, qtd, urb->setup_dma, + sizeof (struct usb_ctrlrequest), + token | (2 /* "setup" */ << 8), 8); + + /* ... and always at least one more pid */ + token ^= QTD_TOGGLE; + qtd_prev = qtd; + qtd = ehci_qtd_alloc (ehci, flags); + if (unlikely (!qtd)) + goto cleanup; + qtd->urb = urb; + qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); + list_add_tail (&qtd->qtd_list, head); + + /* for zero length DATA stages, STATUS is always IN */ + if (len == 0) + token |= (1 /* "in" */ << 8); + } + + /* + * data transfer stage: buffer setup + */ + i = urb->num_mapped_sgs; + if (len > 0 && i > 0) { + sg = urb->sg; + buf = sg_dma_address(sg); + + /* urb->transfer_buffer_length may be smaller than the + * size of the scatterlist (or vice versa) + */ + this_sg_len = min_t(int, sg_dma_len(sg), len); + } else { + sg = NULL; + buf = urb->transfer_dma; + this_sg_len = len; + } + + if (is_input) + token |= (1 /* "in" */ << 8); + /* else it's already initted to "out" pid (0 << 8) */ + + maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); + + /* + * buffer gets wrapped in one or more qtds; + * last one may be "short" (including zero len) + * and may serve as a control status ack + */ + for (;;) { + int this_qtd_len; + + this_qtd_len = qtd_fill(ehci, qtd, buf, this_sg_len, token, + maxpacket); + this_sg_len -= this_qtd_len; + len -= this_qtd_len; + buf += this_qtd_len; + + /* + * short reads advance to a "magic" dummy instead of the next + * qtd ... that forces the queue to stop, for manual cleanup. + * (this will usually be overridden later.) + */ + if (is_input) + qtd->hw_alt_next = ehci->async->hw->hw_alt_next; + + /* qh makes control packets use qtd toggle; maybe switch it */ + if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) + token ^= QTD_TOGGLE; + + if (likely(this_sg_len <= 0)) { + if (--i <= 0 || len <= 0) + break; + sg = sg_next(sg); + buf = sg_dma_address(sg); + this_sg_len = min_t(int, sg_dma_len(sg), len); + } + + qtd_prev = qtd; + qtd = ehci_qtd_alloc (ehci, flags); + if (unlikely (!qtd)) + goto cleanup; + qtd->urb = urb; + qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); + list_add_tail (&qtd->qtd_list, head); + } + + /* + * unless the caller requires manual cleanup after short reads, + * have the alt_next mechanism keep the queue running after the + * last data qtd (the only one, for control and most other cases). + */ + if (likely ((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 + || usb_pipecontrol (urb->pipe))) + qtd->hw_alt_next = EHCI_LIST_END(ehci); + + /* + * control requests may need a terminating data "status" ack; + * other OUT ones may need a terminating short packet + * (zero length). + */ + if (likely (urb->transfer_buffer_length != 0)) { + int one_more = 0; + + if (usb_pipecontrol (urb->pipe)) { + one_more = 1; + token ^= 0x0100; /* "in" <--> "out" */ + token |= QTD_TOGGLE; /* force DATA1 */ + } else if (usb_pipeout(urb->pipe) + && (urb->transfer_flags & URB_ZERO_PACKET) + && !(urb->transfer_buffer_length % maxpacket)) { + one_more = 1; + } + if (one_more) { + qtd_prev = qtd; + qtd = ehci_qtd_alloc (ehci, flags); + if (unlikely (!qtd)) + goto cleanup; + qtd->urb = urb; + qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); + list_add_tail (&qtd->qtd_list, head); + + /* never any data in such packets */ + qtd_fill(ehci, qtd, 0, 0, token, 0); + } + } + + /* by default, enable interrupt on urb completion */ + if (likely (!(urb->transfer_flags & URB_NO_INTERRUPT))) + qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); + return head; + +cleanup: + qtd_list_free (ehci, urb, head); + return NULL; +} + +/*-------------------------------------------------------------------------*/ + +// Would be best to create all qh's from config descriptors, +// when each interface/altsetting is established. Unlink +// any previous qh and cancel its urbs first; endpoints are +// implicitly reset then (data toggle too). +// That'd mean updating how usbcore talks to HCDs. (2.7?) + + +/* + * Each QH holds a qtd list; a QH is used for everything except iso. + * + * For interrupt urbs, the scheduler must set the microframe scheduling + * mask(s) each time the QH gets scheduled. For highspeed, that's + * just one microframe in the s-mask. For split interrupt transactions + * there are additional complications: c-mask, maybe FSTNs. + */ +static struct ehci_qh * +qh_make ( + struct ehci_hcd *ehci, + struct urb *urb, + gfp_t flags +) { + struct ehci_qh *qh = ehci_qh_alloc (ehci, flags); + u32 info1 = 0, info2 = 0; + int is_input, type; + int maxp = 0; + struct usb_tt *tt = urb->dev->tt; + struct ehci_qh_hw *hw; + + if (!qh) + return qh; + + /* + * init endpoint/device data for this QH + */ + info1 |= usb_pipeendpoint (urb->pipe) << 8; + info1 |= usb_pipedevice (urb->pipe) << 0; + + is_input = usb_pipein (urb->pipe); + type = usb_pipetype (urb->pipe); + maxp = usb_maxpacket (urb->dev, urb->pipe, !is_input); + + /* 1024 byte maxpacket is a hardware ceiling. High bandwidth + * acts like up to 3KB, but is built from smaller packets. + */ + if (max_packet(maxp) > 1024) { + ehci_dbg(ehci, "bogus qh maxpacket %d\n", max_packet(maxp)); + goto done; + } + + /* Compute interrupt scheduling parameters just once, and save. + * - allowing for high bandwidth, how many nsec/uframe are used? + * - split transactions need a second CSPLIT uframe; same question + * - splits also need a schedule gap (for full/low speed I/O) + * - qh has a polling interval + * + * For control/bulk requests, the HC or TT handles these. + */ + if (type == PIPE_INTERRUPT) { + unsigned tmp; + + qh->ps.usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, + is_input, 0, + hb_mult(maxp) * max_packet(maxp))); + qh->ps.phase = NO_FRAME; + + if (urb->dev->speed == USB_SPEED_HIGH) { + qh->ps.c_usecs = 0; + qh->gap_uf = 0; + + if (urb->interval > 1 && urb->interval < 8) { + /* NOTE interval 2 or 4 uframes could work. + * But interval 1 scheduling is simpler, and + * includes high bandwidth. + */ + urb->interval = 1; + } else if (urb->interval > ehci->periodic_size << 3) { + urb->interval = ehci->periodic_size << 3; + } + qh->ps.period = urb->interval >> 3; + + /* period for bandwidth allocation */ + tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE, + 1 << (urb->ep->desc.bInterval - 1)); + + /* Allow urb->interval to override */ + qh->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval); + qh->ps.bw_period = qh->ps.bw_uperiod >> 3; + } else { + int think_time; + + /* gap is f(FS/LS transfer times) */ + qh->gap_uf = 1 + usb_calc_bus_time (urb->dev->speed, + is_input, 0, maxp) / (125 * 1000); + + /* FIXME this just approximates SPLIT/CSPLIT times */ + if (is_input) { // SPLIT, gap, CSPLIT+DATA + qh->ps.c_usecs = qh->ps.usecs + HS_USECS(0); + qh->ps.usecs = HS_USECS(1); + } else { // SPLIT+DATA, gap, CSPLIT + qh->ps.usecs += HS_USECS(1); + qh->ps.c_usecs = HS_USECS(0); + } + + think_time = tt ? tt->think_time : 0; + qh->ps.tt_usecs = NS_TO_US(think_time + + usb_calc_bus_time (urb->dev->speed, + is_input, 0, max_packet (maxp))); + if (urb->interval > ehci->periodic_size) + urb->interval = ehci->periodic_size; + qh->ps.period = urb->interval; + + /* period for bandwidth allocation */ + tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES, + urb->ep->desc.bInterval); + tmp = rounddown_pow_of_two(tmp); + + /* Allow urb->interval to override */ + qh->ps.bw_period = min_t(unsigned, tmp, urb->interval); + qh->ps.bw_uperiod = qh->ps.bw_period << 3; + } + } + + /* support for tt scheduling, and access to toggles */ + qh->ps.udev = urb->dev; + qh->ps.ep = urb->ep; + + /* using TT? */ + switch (urb->dev->speed) { + case USB_SPEED_LOW: + info1 |= QH_LOW_SPEED; + /* FALL THROUGH */ + + case USB_SPEED_FULL: + /* EPS 0 means "full" */ + if (type != PIPE_INTERRUPT) + info1 |= (EHCI_TUNE_RL_TT << 28); + if (type == PIPE_CONTROL) { + info1 |= QH_CONTROL_EP; /* for TT */ + info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ + } + info1 |= maxp << 16; + + info2 |= (EHCI_TUNE_MULT_TT << 30); + + /* Some Freescale processors have an erratum in which the + * port number in the queue head was 0..N-1 instead of 1..N. + */ + if (ehci_has_fsl_portno_bug(ehci)) + info2 |= (urb->dev->ttport-1) << 23; + else + info2 |= urb->dev->ttport << 23; + + /* set the address of the TT; for TDI's integrated + * root hub tt, leave it zeroed. + */ + if (tt && tt->hub != ehci_to_hcd(ehci)->self.root_hub) + info2 |= tt->hub->devnum << 16; + + /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ + + break; + + case USB_SPEED_HIGH: /* no TT involved */ + info1 |= QH_HIGH_SPEED; + if (type == PIPE_CONTROL) { + info1 |= (EHCI_TUNE_RL_HS << 28); + info1 |= 64 << 16; /* usb2 fixed maxpacket */ + info1 |= QH_TOGGLE_CTL; /* toggle from qtd */ + info2 |= (EHCI_TUNE_MULT_HS << 30); + } else if (type == PIPE_BULK) { + info1 |= (EHCI_TUNE_RL_HS << 28); + /* The USB spec says that high speed bulk endpoints + * always use 512 byte maxpacket. But some device + * vendors decided to ignore that, and MSFT is happy + * to help them do so. So now people expect to use + * such nonconformant devices with Linux too; sigh. + */ + info1 |= max_packet(maxp) << 16; + info2 |= (EHCI_TUNE_MULT_HS << 30); + } else { /* PIPE_INTERRUPT */ + info1 |= max_packet (maxp) << 16; + info2 |= hb_mult (maxp) << 30; + } + break; + default: + ehci_dbg(ehci, "bogus dev %p speed %d\n", urb->dev, + urb->dev->speed); +done: + qh_destroy(ehci, qh); + return NULL; + } + + /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ + + /* init as live, toggle clear */ + qh->qh_state = QH_STATE_IDLE; + hw = qh->hw; + hw->hw_info1 = cpu_to_hc32(ehci, info1); + hw->hw_info2 = cpu_to_hc32(ehci, info2); + qh->is_out = !is_input; + usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1); + return qh; +} + +/*-------------------------------------------------------------------------*/ + +static void enable_async(struct ehci_hcd *ehci) +{ + if (ehci->async_count++) + return; + + /* Stop waiting to turn off the async schedule */ + ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_ASYNC); + + /* Don't start the schedule until ASS is 0 */ + ehci_poll_ASS(ehci); + turn_on_io_watchdog(ehci); +} + +static void disable_async(struct ehci_hcd *ehci) +{ + if (--ehci->async_count) + return; + + /* The async schedule and unlink lists are supposed to be empty */ + WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) || + !list_empty(&ehci->async_idle)); + + /* Don't turn off the schedule until ASS is 1 */ + ehci_poll_ASS(ehci); +} + +/* move qh (and its qtds) onto async queue; maybe enable queue. */ + +static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + __hc32 dma = QH_NEXT(ehci, qh->qh_dma); + struct ehci_qh *head; + + /* Don't link a QH if there's a Clear-TT-Buffer pending */ + if (unlikely(qh->clearing_tt)) + return; + + WARN_ON(qh->qh_state != QH_STATE_IDLE); + + /* clear halt and/or toggle; and maybe recover from silicon quirk */ + qh_refresh(ehci, qh); + + /* splice right after start */ + head = ehci->async; + qh->qh_next = head->qh_next; + qh->hw->hw_next = head->hw->hw_next; + wmb (); + + head->qh_next.qh = qh; + head->hw->hw_next = dma; + + qh->qh_state = QH_STATE_LINKED; + qh->xacterrs = 0; + qh->exception = 0; + /* qtd completions reported later by interrupt */ + + enable_async(ehci); +} + +/*-------------------------------------------------------------------------*/ + +/* + * For control/bulk/interrupt, return QH with these TDs appended. + * Allocates and initializes the QH if necessary. + * Returns null if it can't allocate a QH it needs to. + * If the QH has TDs (urbs) already, that's great. + */ +static struct ehci_qh *qh_append_tds ( + struct ehci_hcd *ehci, + struct urb *urb, + struct list_head *qtd_list, + int epnum, + void **ptr +) +{ + struct ehci_qh *qh = NULL; + __hc32 qh_addr_mask = cpu_to_hc32(ehci, 0x7f); + + qh = (struct ehci_qh *) *ptr; + if (unlikely (qh == NULL)) { + /* can't sleep here, we have ehci->lock... */ + qh = qh_make (ehci, urb, GFP_ATOMIC); + *ptr = qh; + } + if (likely (qh != NULL)) { + struct ehci_qtd *qtd; + + if (unlikely (list_empty (qtd_list))) + qtd = NULL; + else + qtd = list_entry (qtd_list->next, struct ehci_qtd, + qtd_list); + + /* control qh may need patching ... */ + if (unlikely (epnum == 0)) { + + /* usb_reset_device() briefly reverts to address 0 */ + if (usb_pipedevice (urb->pipe) == 0) + qh->hw->hw_info1 &= ~qh_addr_mask; + } + + /* just one way to queue requests: swap with the dummy qtd. + * only hc or qh_refresh() ever modify the overlay. + */ + if (likely (qtd != NULL)) { + struct ehci_qtd *dummy; + dma_addr_t dma; + __hc32 token; + + /* to avoid racing the HC, use the dummy td instead of + * the first td of our list (becomes new dummy). both + * tds stay deactivated until we're done, when the + * HC is allowed to fetch the old dummy (4.10.2). + */ + token = qtd->hw_token; + qtd->hw_token = HALT_BIT(ehci); + + dummy = qh->dummy; + + dma = dummy->qtd_dma; + *dummy = *qtd; + dummy->qtd_dma = dma; + + list_del (&qtd->qtd_list); + list_add (&dummy->qtd_list, qtd_list); + list_splice_tail(qtd_list, &qh->qtd_list); + + ehci_qtd_init(ehci, qtd, qtd->qtd_dma); + qh->dummy = qtd; + + /* hc must see the new dummy at list end */ + dma = qtd->qtd_dma; + qtd = list_entry (qh->qtd_list.prev, + struct ehci_qtd, qtd_list); + qtd->hw_next = QTD_NEXT(ehci, dma); + + /* let the hc process these next qtds */ + wmb (); + dummy->hw_token = token; + + urb->hcpriv = qh; + } + } + return qh; +} + +/*-------------------------------------------------------------------------*/ + +static int +submit_async ( + struct ehci_hcd *ehci, + struct urb *urb, + struct list_head *qtd_list, + gfp_t mem_flags +) { + int epnum; + unsigned long flags; + struct ehci_qh *qh = NULL; + int rc; + + epnum = urb->ep->desc.bEndpointAddress; + +#ifdef EHCI_URB_TRACE + { + struct ehci_qtd *qtd; + qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list); + ehci_dbg(ehci, + "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", + __func__, urb->dev->devpath, urb, + epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", + urb->transfer_buffer_length, + qtd, urb->ep->hcpriv); + } +#endif + + spin_lock_irqsave (&ehci->lock, flags); + if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { + rc = -ESHUTDOWN; + goto done; + } + rc = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); + if (unlikely(rc)) + goto done; + + qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); + if (unlikely(qh == NULL)) { + usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); + rc = -ENOMEM; + goto done; + } + + /* Control/bulk operations through TTs don't need scheduling, + * the HC and TT handle it when the TT has a buffer ready. + */ + if (likely (qh->qh_state == QH_STATE_IDLE)) + qh_link_async(ehci, qh); + done: + spin_unlock_irqrestore (&ehci->lock, flags); + if (unlikely (qh == NULL)) + qtd_list_free (ehci, urb, qtd_list); + return rc; +} + +/*-------------------------------------------------------------------------*/ +#ifdef CONFIG_USB_HCD_TEST_MODE +/* + * This function creates the qtds and submits them for the + * SINGLE_STEP_SET_FEATURE Test. + * This is done in two parts: first SETUP req for GetDesc is sent then + * 15 seconds later, the IN stage for GetDesc starts to req data from dev + * + * is_setup : i/p arguement decides which of the two stage needs to be + * performed; TRUE - SETUP and FALSE - IN+STATUS + * Returns 0 if success + */ +static int submit_single_step_set_feature( + struct usb_hcd *hcd, + struct urb *urb, + int is_setup +) { + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + struct list_head qtd_list; + struct list_head *head; + + struct ehci_qtd *qtd, *qtd_prev; + dma_addr_t buf; + int len, maxpacket; + u32 token; + + INIT_LIST_HEAD(&qtd_list); + head = &qtd_list; + + /* URBs map to sequences of QTDs: one logical transaction */ + qtd = ehci_qtd_alloc(ehci, GFP_KERNEL); + if (unlikely(!qtd)) + return -1; + list_add_tail(&qtd->qtd_list, head); + qtd->urb = urb; + + token = QTD_STS_ACTIVE; + token |= (EHCI_TUNE_CERR << 10); + + len = urb->transfer_buffer_length; + /* + * Check if the request is to perform just the SETUP stage (getDesc) + * as in SINGLE_STEP_SET_FEATURE test, DATA stage (IN) happens + * 15 secs after the setup + */ + if (is_setup) { + /* SETUP pid */ + qtd_fill(ehci, qtd, urb->setup_dma, + sizeof(struct usb_ctrlrequest), + token | (2 /* "setup" */ << 8), 8); + + submit_async(ehci, urb, &qtd_list, GFP_ATOMIC); + return 0; /*Return now; we shall come back after 15 seconds*/ + } + + /* + * IN: data transfer stage: buffer setup : start the IN txn phase for + * the get_Desc SETUP which was sent 15seconds back + */ + token ^= QTD_TOGGLE; /*We need to start IN with DATA-1 Pid-sequence*/ + buf = urb->transfer_dma; + + token |= (1 /* "in" */ << 8); /*This is IN stage*/ + + maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, 0)); + + qtd_fill(ehci, qtd, buf, len, token, maxpacket); + + /* + * Our IN phase shall always be a short read; so keep the queue running + * and let it advance to the next qtd which zero length OUT status + */ + qtd->hw_alt_next = EHCI_LIST_END(ehci); + + /* STATUS stage for GetDesc control request */ + token ^= 0x0100; /* "in" <--> "out" */ + token |= QTD_TOGGLE; /* force DATA1 */ + + qtd_prev = qtd; + qtd = ehci_qtd_alloc(ehci, GFP_ATOMIC); + if (unlikely(!qtd)) + goto cleanup; + qtd->urb = urb; + qtd_prev->hw_next = QTD_NEXT(ehci, qtd->qtd_dma); + list_add_tail(&qtd->qtd_list, head); + + /* dont fill any data in such packets */ + qtd_fill(ehci, qtd, 0, 0, token, 0); + + /* by default, enable interrupt on urb completion */ + if (likely(!(urb->transfer_flags & URB_NO_INTERRUPT))) + qtd->hw_token |= cpu_to_hc32(ehci, QTD_IOC); + + submit_async(ehci, urb, &qtd_list, GFP_KERNEL); + + return 0; + +cleanup: + qtd_list_free(ehci, urb, head); + return -1; +} +#endif /* CONFIG_USB_HCD_TEST_MODE */ + +/*-------------------------------------------------------------------------*/ + +static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + struct ehci_qh *prev; + + /* Add to the end of the list of QHs waiting for the next IAAD */ + qh->qh_state = QH_STATE_UNLINK_WAIT; + list_add_tail(&qh->unlink_node, &ehci->async_unlink); + + /* Unlink it from the schedule */ + prev = ehci->async; + while (prev->qh_next.qh != qh) + prev = prev->qh_next.qh; + + prev->hw->hw_next = qh->hw->hw_next; + prev->qh_next = qh->qh_next; + if (ehci->qh_scan_next == qh) + ehci->qh_scan_next = qh->qh_next.qh; +} + +static void start_iaa_cycle(struct ehci_hcd *ehci) +{ + /* Do nothing if an IAA cycle is already running */ + if (ehci->iaa_in_progress) + return; + ehci->iaa_in_progress = true; + + /* If the controller isn't running, we don't have to wait for it */ + if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) { + end_unlink_async(ehci); + + /* Otherwise start a new IAA cycle */ + } else if (likely(ehci->rh_state == EHCI_RH_RUNNING)) { + + /* Make sure the unlinks are all visible to the hardware */ + wmb(); + + ehci_writel(ehci, ehci->command | CMD_IAAD, + &ehci->regs->command); + ehci_readl(ehci, &ehci->regs->command); + ehci_enable_event(ehci, EHCI_HRTIMER_IAA_WATCHDOG, true); + } +} + +/* the async qh for the qtds being unlinked are now gone from the HC */ + +static void end_unlink_async(struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + bool early_exit; + + if (ehci->has_synopsys_hc_bug) + ehci_writel(ehci, (u32) ehci->async->qh_dma, + &ehci->regs->async_next); + + /* The current IAA cycle has ended */ + ehci->iaa_in_progress = false; + + if (list_empty(&ehci->async_unlink)) + return; + qh = list_first_entry(&ehci->async_unlink, struct ehci_qh, + unlink_node); /* QH whose IAA cycle just ended */ + + /* + * If async_unlinking is set then this routine is already running, + * either on the stack or on another CPU. + */ + early_exit = ehci->async_unlinking; + + /* If the controller isn't running, process all the waiting QHs */ + if (ehci->rh_state < EHCI_RH_RUNNING) + list_splice_tail_init(&ehci->async_unlink, &ehci->async_idle); + + /* + * Intel (?) bug: The HC can write back the overlay region even + * after the IAA interrupt occurs. In self-defense, always go + * through two IAA cycles for each QH. + */ + else if (qh->qh_state == QH_STATE_UNLINK_WAIT) { + qh->qh_state = QH_STATE_UNLINK; + early_exit = true; + } + + /* Otherwise process only the first waiting QH (NVIDIA bug?) */ + else + list_move_tail(&qh->unlink_node, &ehci->async_idle); + + /* Start a new IAA cycle if any QHs are waiting for it */ + if (!list_empty(&ehci->async_unlink)) + start_iaa_cycle(ehci); + + /* + * Don't allow nesting or concurrent calls, + * or wait for the second IAA cycle for the next QH. + */ + if (early_exit) + return; + + /* Process the idle QHs */ + ehci->async_unlinking = true; + while (!list_empty(&ehci->async_idle)) { + qh = list_first_entry(&ehci->async_idle, struct ehci_qh, + unlink_node); + list_del(&qh->unlink_node); + + qh->qh_state = QH_STATE_IDLE; + qh->qh_next.qh = NULL; + + if (!list_empty(&qh->qtd_list)) + qh_completions(ehci, qh); + if (!list_empty(&qh->qtd_list) && + ehci->rh_state == EHCI_RH_RUNNING) + qh_link_async(ehci, qh); + disable_async(ehci); + } + ehci->async_unlinking = false; +} + +static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh); + +static void unlink_empty_async(struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + struct ehci_qh *qh_to_unlink = NULL; + int count = 0; + + /* Find the last async QH which has been empty for a timer cycle */ + for (qh = ehci->async->qh_next.qh; qh; qh = qh->qh_next.qh) { + if (list_empty(&qh->qtd_list) && + qh->qh_state == QH_STATE_LINKED) { + ++count; + if (qh->unlink_cycle != ehci->async_unlink_cycle) + qh_to_unlink = qh; + } + } + + /* If nothing else is being unlinked, unlink the last empty QH */ + if (list_empty(&ehci->async_unlink) && qh_to_unlink) { + start_unlink_async(ehci, qh_to_unlink); + --count; + } + + /* Other QHs will be handled later */ + if (count > 0) { + ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); + ++ehci->async_unlink_cycle; + } +} + +/* The root hub is suspended; unlink all the async QHs */ +static void __maybe_unused unlink_empty_async_suspended(struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + + while (ehci->async->qh_next.qh) { + qh = ehci->async->qh_next.qh; + WARN_ON(!list_empty(&qh->qtd_list)); + single_unlink_async(ehci, qh); + } + start_iaa_cycle(ehci); +} + +/* makes sure the async qh will become idle */ +/* caller must own ehci->lock */ + +static void start_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + /* If the QH isn't linked then there's nothing we can do. */ + if (qh->qh_state != QH_STATE_LINKED) + return; + + single_unlink_async(ehci, qh); + start_iaa_cycle(ehci); +} + +/*-------------------------------------------------------------------------*/ + +static void scan_async (struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + bool check_unlinks_later = false; + + ehci->qh_scan_next = ehci->async->qh_next.qh; + while (ehci->qh_scan_next) { + qh = ehci->qh_scan_next; + ehci->qh_scan_next = qh->qh_next.qh; + + /* clean any finished work for this qh */ + if (!list_empty(&qh->qtd_list)) { + int temp; + + /* + * Unlinks could happen here; completion reporting + * drops the lock. That's why ehci->qh_scan_next + * always holds the next qh to scan; if the next qh + * gets unlinked then ehci->qh_scan_next is adjusted + * in single_unlink_async(). + */ + temp = qh_completions(ehci, qh); + if (unlikely(temp)) { + start_unlink_async(ehci, qh); + } else if (list_empty(&qh->qtd_list) + && qh->qh_state == QH_STATE_LINKED) { + qh->unlink_cycle = ehci->async_unlink_cycle; + check_unlinks_later = true; + } + } + } + + /* + * Unlink empty entries, reducing DMA usage as well + * as HCD schedule-scanning costs. Delay for any qh + * we just scanned, there's a not-unusual case that it + * doesn't stay idle for long. + */ + if (check_unlinks_later && ehci->rh_state == EHCI_RH_RUNNING && + !(ehci->enabled_hrtimer_events & + BIT(EHCI_HRTIMER_ASYNC_UNLINKS))) { + ehci_enable_event(ehci, EHCI_HRTIMER_ASYNC_UNLINKS, true); + ++ehci->async_unlink_cycle; + } +} diff --git a/addons/ehci-pci/src/4.4.180/ehci-sched.c b/addons/ehci-pci/src/4.4.180/ehci-sched.c new file mode 100644 index 00000000..f9a33277 --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/ehci-sched.c @@ -0,0 +1,2517 @@ +/* + * Copyright (c) 2001-2004 by David Brownell + * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of ehci-hcd.c */ + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI scheduled transaction support: interrupt, iso, split iso + * These are called "periodic" transactions in the EHCI spec. + * + * Note that for interrupt transfers, the QH/QTD manipulation is shared + * with the "asynchronous" transaction support (control/bulk transfers). + * The only real difference is in how interrupt transfers are scheduled. + * + * For ISO, we make an "iso_stream" head to serve the same role as a QH. + * It keeps track of every ITD (or SITD) that's linked, and holds enough + * pre-calculated schedule data to make appending to the queue be quick. + */ + +static int ehci_get_frame (struct usb_hcd *hcd); + +/* + * periodic_next_shadow - return "next" pointer on shadow list + * @periodic: host pointer to qh/itd/sitd + * @tag: hardware tag for type of this record + */ +static union ehci_shadow * +periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic, + __hc32 tag) +{ + switch (hc32_to_cpu(ehci, tag)) { + case Q_TYPE_QH: + return &periodic->qh->qh_next; + case Q_TYPE_FSTN: + return &periodic->fstn->fstn_next; + case Q_TYPE_ITD: + return &periodic->itd->itd_next; + // case Q_TYPE_SITD: + default: + return &periodic->sitd->sitd_next; + } +} + +static __hc32 * +shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic, + __hc32 tag) +{ + switch (hc32_to_cpu(ehci, tag)) { + /* our ehci_shadow.qh is actually software part */ + case Q_TYPE_QH: + return &periodic->qh->hw->hw_next; + /* others are hw parts */ + default: + return periodic->hw_next; + } +} + +/* caller must hold ehci->lock */ +static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) +{ + union ehci_shadow *prev_p = &ehci->pshadow[frame]; + __hc32 *hw_p = &ehci->periodic[frame]; + union ehci_shadow here = *prev_p; + + /* find predecessor of "ptr"; hw and shadow lists are in sync */ + while (here.ptr && here.ptr != ptr) { + prev_p = periodic_next_shadow(ehci, prev_p, + Q_NEXT_TYPE(ehci, *hw_p)); + hw_p = shadow_next_periodic(ehci, &here, + Q_NEXT_TYPE(ehci, *hw_p)); + here = *prev_p; + } + /* an interrupt entry (at list end) could have been shared */ + if (!here.ptr) + return; + + /* update shadow and hardware lists ... the old "next" pointers + * from ptr may still be in use, the caller updates them. + */ + *prev_p = *periodic_next_shadow(ehci, &here, + Q_NEXT_TYPE(ehci, *hw_p)); + + if (!ehci->use_dummy_qh || + *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)) + != EHCI_LIST_END(ehci)) + *hw_p = *shadow_next_periodic(ehci, &here, + Q_NEXT_TYPE(ehci, *hw_p)); + else + *hw_p = cpu_to_hc32(ehci, ehci->dummy->qh_dma); +} + +/*-------------------------------------------------------------------------*/ + +/* Bandwidth and TT management */ + +/* Find the TT data structure for this device; create it if necessary */ +static struct ehci_tt *find_tt(struct usb_device *udev) +{ + struct usb_tt *utt = udev->tt; + struct ehci_tt *tt, **tt_index, **ptt; + unsigned port; + bool allocated_index = false; + + if (!utt) + return NULL; /* Not below a TT */ + + /* + * Find/create our data structure. + * For hubs with a single TT, we get it directly. + * For hubs with multiple TTs, there's an extra level of pointers. + */ + tt_index = NULL; + if (utt->multi) { + tt_index = utt->hcpriv; + if (!tt_index) { /* Create the index array */ + tt_index = kzalloc(utt->hub->maxchild * + sizeof(*tt_index), GFP_ATOMIC); + if (!tt_index) + return ERR_PTR(-ENOMEM); + utt->hcpriv = tt_index; + allocated_index = true; + } + port = udev->ttport - 1; + ptt = &tt_index[port]; + } else { + port = 0; + ptt = (struct ehci_tt **) &utt->hcpriv; + } + + tt = *ptt; + if (!tt) { /* Create the ehci_tt */ + struct ehci_hcd *ehci = + hcd_to_ehci(bus_to_hcd(udev->bus)); + + tt = kzalloc(sizeof(*tt), GFP_ATOMIC); + if (!tt) { + if (allocated_index) { + utt->hcpriv = NULL; + kfree(tt_index); + } + return ERR_PTR(-ENOMEM); + } + list_add_tail(&tt->tt_list, &ehci->tt_list); + INIT_LIST_HEAD(&tt->ps_list); + tt->usb_tt = utt; + tt->tt_port = port; + *ptt = tt; + } + + return tt; +} + +/* Release the TT above udev, if it's not in use */ +static void drop_tt(struct usb_device *udev) +{ + struct usb_tt *utt = udev->tt; + struct ehci_tt *tt, **tt_index, **ptt; + int cnt, i; + + if (!utt || !utt->hcpriv) + return; /* Not below a TT, or never allocated */ + + cnt = 0; + if (utt->multi) { + tt_index = utt->hcpriv; + ptt = &tt_index[udev->ttport - 1]; + + /* How many entries are left in tt_index? */ + for (i = 0; i < utt->hub->maxchild; ++i) + cnt += !!tt_index[i]; + } else { + tt_index = NULL; + ptt = (struct ehci_tt **) &utt->hcpriv; + } + + tt = *ptt; + if (!tt || !list_empty(&tt->ps_list)) + return; /* never allocated, or still in use */ + + list_del(&tt->tt_list); + *ptt = NULL; + kfree(tt); + if (cnt == 1) { + utt->hcpriv = NULL; + kfree(tt_index); + } +} + +static void bandwidth_dbg(struct ehci_hcd *ehci, int sign, char *type, + struct ehci_per_sched *ps) +{ + dev_dbg(&ps->udev->dev, + "ep %02x: %s %s @ %u+%u (%u.%u+%u) [%u/%u us] mask %04x\n", + ps->ep->desc.bEndpointAddress, + (sign >= 0 ? "reserve" : "release"), type, + (ps->bw_phase << 3) + ps->phase_uf, ps->bw_uperiod, + ps->phase, ps->phase_uf, ps->period, + ps->usecs, ps->c_usecs, ps->cs_mask); +} + +static void reserve_release_intr_bandwidth(struct ehci_hcd *ehci, + struct ehci_qh *qh, int sign) +{ + unsigned start_uf; + unsigned i, j, m; + int usecs = qh->ps.usecs; + int c_usecs = qh->ps.c_usecs; + int tt_usecs = qh->ps.tt_usecs; + struct ehci_tt *tt; + + if (qh->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */ + return; + start_uf = qh->ps.bw_phase << 3; + + bandwidth_dbg(ehci, sign, "intr", &qh->ps); + + if (sign < 0) { /* Release bandwidth */ + usecs = -usecs; + c_usecs = -c_usecs; + tt_usecs = -tt_usecs; + } + + /* Entire transaction (high speed) or start-split (full/low speed) */ + for (i = start_uf + qh->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE; + i += qh->ps.bw_uperiod) + ehci->bandwidth[i] += usecs; + + /* Complete-split (full/low speed) */ + if (qh->ps.c_usecs) { + /* NOTE: adjustments needed for FSTN */ + for (i = start_uf; i < EHCI_BANDWIDTH_SIZE; + i += qh->ps.bw_uperiod) { + for ((j = 2, m = 1 << (j+8)); j < 8; (++j, m <<= 1)) { + if (qh->ps.cs_mask & m) + ehci->bandwidth[i+j] += c_usecs; + } + } + } + + /* FS/LS bus bandwidth */ + if (tt_usecs) { + tt = find_tt(qh->ps.udev); + if (sign > 0) + list_add_tail(&qh->ps.ps_list, &tt->ps_list); + else + list_del(&qh->ps.ps_list); + + for (i = start_uf >> 3; i < EHCI_BANDWIDTH_FRAMES; + i += qh->ps.bw_period) + tt->bandwidth[i] += tt_usecs; + } +} + +/*-------------------------------------------------------------------------*/ + +static void compute_tt_budget(u8 budget_table[EHCI_BANDWIDTH_SIZE], + struct ehci_tt *tt) +{ + struct ehci_per_sched *ps; + unsigned uframe, uf, x; + u8 *budget_line; + + if (!tt) + return; + memset(budget_table, 0, EHCI_BANDWIDTH_SIZE); + + /* Add up the contributions from all the endpoints using this TT */ + list_for_each_entry(ps, &tt->ps_list, ps_list) { + for (uframe = ps->bw_phase << 3; uframe < EHCI_BANDWIDTH_SIZE; + uframe += ps->bw_uperiod) { + budget_line = &budget_table[uframe]; + x = ps->tt_usecs; + + /* propagate the time forward */ + for (uf = ps->phase_uf; uf < 8; ++uf) { + x += budget_line[uf]; + + /* Each microframe lasts 125 us */ + if (x <= 125) { + budget_line[uf] = x; + break; + } else { + budget_line[uf] = 125; + x -= 125; + } + } + } + } +} + +static int __maybe_unused same_tt(struct usb_device *dev1, + struct usb_device *dev2) +{ + if (!dev1->tt || !dev2->tt) + return 0; + if (dev1->tt != dev2->tt) + return 0; + if (dev1->tt->multi) + return dev1->ttport == dev2->ttport; + else + return 1; +} + +#ifdef CONFIG_USB_EHCI_TT_NEWSCHED + +/* Which uframe does the low/fullspeed transfer start in? + * + * The parameter is the mask of ssplits in "H-frame" terms + * and this returns the transfer start uframe in "B-frame" terms, + * which allows both to match, e.g. a ssplit in "H-frame" uframe 0 + * will cause a transfer in "B-frame" uframe 0. "B-frames" lag + * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7. + */ +static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask) +{ + unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask); + if (!smask) { + ehci_err(ehci, "invalid empty smask!\n"); + /* uframe 7 can't have bw so this will indicate failure */ + return 7; + } + return ffs(smask) - 1; +} + +static const unsigned char +max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; + +/* carryover low/fullspeed bandwidth that crosses uframe boundries */ +static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) +{ + int i; + for (i=0; i<7; i++) { + if (max_tt_usecs[i] < tt_usecs[i]) { + tt_usecs[i+1] += tt_usecs[i] - max_tt_usecs[i]; + tt_usecs[i] = max_tt_usecs[i]; + } + } +} + +/* + * Return true if the device's tt's downstream bus is available for a + * periodic transfer of the specified length (usecs), starting at the + * specified frame/uframe. Note that (as summarized in section 11.19 + * of the usb 2.0 spec) TTs can buffer multiple transactions for each + * uframe. + * + * The uframe parameter is when the fullspeed/lowspeed transfer + * should be executed in "B-frame" terms, which is the same as the + * highspeed ssplit's uframe (which is in "H-frame" terms). For example + * a ssplit in "H-frame" 0 causes a transfer in "B-frame" 0. + * See the EHCI spec sec 4.5 and fig 4.7. + * + * This checks if the full/lowspeed bus, at the specified starting uframe, + * has the specified bandwidth available, according to rules listed + * in USB 2.0 spec section 11.18.1 fig 11-60. + * + * This does not check if the transfer would exceed the max ssplit + * limit of 16, specified in USB 2.0 spec section 11.18.4 requirement #4, + * since proper scheduling limits ssplits to less than 16 per uframe. + */ +static int tt_available ( + struct ehci_hcd *ehci, + struct ehci_per_sched *ps, + struct ehci_tt *tt, + unsigned frame, + unsigned uframe +) +{ + unsigned period = ps->bw_period; + unsigned usecs = ps->tt_usecs; + + if ((period == 0) || (uframe >= 7)) /* error */ + return 0; + + for (frame &= period - 1; frame < EHCI_BANDWIDTH_FRAMES; + frame += period) { + unsigned i, uf; + unsigned short tt_usecs[8]; + + if (tt->bandwidth[frame] + usecs > 900) + return 0; + + uf = frame << 3; + for (i = 0; i < 8; (++i, ++uf)) + tt_usecs[i] = ehci->tt_budget[uf]; + + if (max_tt_usecs[uframe] <= tt_usecs[uframe]) + return 0; + + /* special case for isoc transfers larger than 125us: + * the first and each subsequent fully used uframe + * must be empty, so as to not illegally delay + * already scheduled transactions + */ + if (125 < usecs) { + int ufs = (usecs / 125); + + for (i = uframe; i < (uframe + ufs) && i < 8; i++) + if (0 < tt_usecs[i]) + return 0; + } + + tt_usecs[uframe] += usecs; + + carryover_tt_bandwidth(tt_usecs); + + /* fail if the carryover pushed bw past the last uframe's limit */ + if (max_tt_usecs[7] < tt_usecs[7]) + return 0; + } + + return 1; +} + +#else + +/* return true iff the device's transaction translator is available + * for a periodic transfer starting at the specified frame, using + * all the uframes in the mask. + */ +static int tt_no_collision ( + struct ehci_hcd *ehci, + unsigned period, + struct usb_device *dev, + unsigned frame, + u32 uf_mask +) +{ + if (period == 0) /* error */ + return 0; + + /* note bandwidth wastage: split never follows csplit + * (different dev or endpoint) until the next uframe. + * calling convention doesn't make that distinction. + */ + for (; frame < ehci->periodic_size; frame += period) { + union ehci_shadow here; + __hc32 type; + struct ehci_qh_hw *hw; + + here = ehci->pshadow [frame]; + type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]); + while (here.ptr) { + switch (hc32_to_cpu(ehci, type)) { + case Q_TYPE_ITD: + type = Q_NEXT_TYPE(ehci, here.itd->hw_next); + here = here.itd->itd_next; + continue; + case Q_TYPE_QH: + hw = here.qh->hw; + if (same_tt(dev, here.qh->ps.udev)) { + u32 mask; + + mask = hc32_to_cpu(ehci, + hw->hw_info2); + /* "knows" no gap is needed */ + mask |= mask >> 8; + if (mask & uf_mask) + break; + } + type = Q_NEXT_TYPE(ehci, hw->hw_next); + here = here.qh->qh_next; + continue; + case Q_TYPE_SITD: + if (same_tt (dev, here.sitd->urb->dev)) { + u16 mask; + + mask = hc32_to_cpu(ehci, here.sitd + ->hw_uframe); + /* FIXME assumes no gap for IN! */ + mask |= mask >> 8; + if (mask & uf_mask) + break; + } + type = Q_NEXT_TYPE(ehci, here.sitd->hw_next); + here = here.sitd->sitd_next; + continue; + // case Q_TYPE_FSTN: + default: + ehci_dbg (ehci, + "periodic frame %d bogus type %d\n", + frame, type); + } + + /* collision or error */ + return 0; + } + } + + /* no collision */ + return 1; +} + +#endif /* CONFIG_USB_EHCI_TT_NEWSCHED */ + +/*-------------------------------------------------------------------------*/ + +static void enable_periodic(struct ehci_hcd *ehci) +{ + if (ehci->periodic_count++) + return; + + /* Stop waiting to turn off the periodic schedule */ + ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC); + + /* Don't start the schedule until PSS is 0 */ + ehci_poll_PSS(ehci); + turn_on_io_watchdog(ehci); +} + +static void disable_periodic(struct ehci_hcd *ehci) +{ + if (--ehci->periodic_count) + return; + + /* Don't turn off the schedule until PSS is 1 */ + ehci_poll_PSS(ehci); +} + +/*-------------------------------------------------------------------------*/ + +/* periodic schedule slots have iso tds (normal or split) first, then a + * sparse tree for active interrupt transfers. + * + * this just links in a qh; caller guarantees uframe masks are set right. + * no FSTN support (yet; ehci 0.96+) + */ +static void qh_link_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + unsigned i; + unsigned period = qh->ps.period; + + dev_dbg(&qh->ps.udev->dev, + "link qh%d-%04x/%p start %d [%d/%d us]\n", + period, hc32_to_cpup(ehci, &qh->hw->hw_info2) + & (QH_CMASK | QH_SMASK), + qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs); + + /* high bandwidth, or otherwise every microframe */ + if (period == 0) + period = 1; + + for (i = qh->ps.phase; i < ehci->periodic_size; i += period) { + union ehci_shadow *prev = &ehci->pshadow[i]; + __hc32 *hw_p = &ehci->periodic[i]; + union ehci_shadow here = *prev; + __hc32 type = 0; + + /* skip the iso nodes at list head */ + while (here.ptr) { + type = Q_NEXT_TYPE(ehci, *hw_p); + if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) + break; + prev = periodic_next_shadow(ehci, prev, type); + hw_p = shadow_next_periodic(ehci, &here, type); + here = *prev; + } + + /* sorting each branch by period (slow-->fast) + * enables sharing interior tree nodes + */ + while (here.ptr && qh != here.qh) { + if (qh->ps.period > here.qh->ps.period) + break; + prev = &here.qh->qh_next; + hw_p = &here.qh->hw->hw_next; + here = *prev; + } + /* link in this qh, unless some earlier pass did that */ + if (qh != here.qh) { + qh->qh_next = here; + if (here.qh) + qh->hw->hw_next = *hw_p; + wmb (); + prev->qh = qh; + *hw_p = QH_NEXT (ehci, qh->qh_dma); + } + } + qh->qh_state = QH_STATE_LINKED; + qh->xacterrs = 0; + qh->exception = 0; + + /* update per-qh bandwidth for debugfs */ + ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->ps.bw_period + ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period) + : (qh->ps.usecs * 8); + + list_add(&qh->intr_node, &ehci->intr_qh_list); + + /* maybe enable periodic schedule processing */ + ++ehci->intr_count; + enable_periodic(ehci); +} + +static void qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + unsigned i; + unsigned period; + + /* + * If qh is for a low/full-speed device, simply unlinking it + * could interfere with an ongoing split transaction. To unlink + * it safely would require setting the QH_INACTIVATE bit and + * waiting at least one frame, as described in EHCI 4.12.2.5. + * + * We won't bother with any of this. Instead, we assume that the + * only reason for unlinking an interrupt QH while the current URB + * is still active is to dequeue all the URBs (flush the whole + * endpoint queue). + * + * If rebalancing the periodic schedule is ever implemented, this + * approach will no longer be valid. + */ + + /* high bandwidth, or otherwise part of every microframe */ + period = qh->ps.period ? : 1; + + for (i = qh->ps.phase; i < ehci->periodic_size; i += period) + periodic_unlink (ehci, i, qh); + + /* update per-qh bandwidth for debugfs */ + ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->ps.bw_period + ? ((qh->ps.usecs + qh->ps.c_usecs) / qh->ps.bw_period) + : (qh->ps.usecs * 8); + + dev_dbg(&qh->ps.udev->dev, + "unlink qh%d-%04x/%p start %d [%d/%d us]\n", + qh->ps.period, + hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK), + qh, qh->ps.phase, qh->ps.usecs, qh->ps.c_usecs); + + /* qh->qh_next still "live" to HC */ + qh->qh_state = QH_STATE_UNLINK; + qh->qh_next.ptr = NULL; + + if (ehci->qh_scan_next == qh) + ehci->qh_scan_next = list_entry(qh->intr_node.next, + struct ehci_qh, intr_node); + list_del(&qh->intr_node); +} + +static void cancel_unlink_wait_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + if (qh->qh_state != QH_STATE_LINKED || + list_empty(&qh->unlink_node)) + return; + + list_del_init(&qh->unlink_node); + + /* + * TODO: disable the event of EHCI_HRTIMER_START_UNLINK_INTR for + * avoiding unnecessary CPU wakeup + */ +} + +static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + /* If the QH isn't linked then there's nothing we can do. */ + if (qh->qh_state != QH_STATE_LINKED) + return; + + /* if the qh is waiting for unlink, cancel it now */ + cancel_unlink_wait_intr(ehci, qh); + + qh_unlink_periodic (ehci, qh); + + /* Make sure the unlinks are visible before starting the timer */ + wmb(); + + /* + * The EHCI spec doesn't say how long it takes the controller to + * stop accessing an unlinked interrupt QH. The timer delay is + * 9 uframes; presumably that will be long enough. + */ + qh->unlink_cycle = ehci->intr_unlink_cycle; + + /* New entries go at the end of the intr_unlink list */ + list_add_tail(&qh->unlink_node, &ehci->intr_unlink); + + if (ehci->intr_unlinking) + ; /* Avoid recursive calls */ + else if (ehci->rh_state < EHCI_RH_RUNNING) + ehci_handle_intr_unlinks(ehci); + else if (ehci->intr_unlink.next == &qh->unlink_node) { + ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true); + ++ehci->intr_unlink_cycle; + } +} + +/* + * It is common only one intr URB is scheduled on one qh, and + * given complete() is run in tasklet context, introduce a bit + * delay to avoid unlink qh too early. + */ +static void start_unlink_intr_wait(struct ehci_hcd *ehci, + struct ehci_qh *qh) +{ + qh->unlink_cycle = ehci->intr_unlink_wait_cycle; + + /* New entries go at the end of the intr_unlink_wait list */ + list_add_tail(&qh->unlink_node, &ehci->intr_unlink_wait); + + if (ehci->rh_state < EHCI_RH_RUNNING) + ehci_handle_start_intr_unlinks(ehci); + else if (ehci->intr_unlink_wait.next == &qh->unlink_node) { + ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true); + ++ehci->intr_unlink_wait_cycle; + } +} + +static void end_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + struct ehci_qh_hw *hw = qh->hw; + int rc; + + qh->qh_state = QH_STATE_IDLE; + hw->hw_next = EHCI_LIST_END(ehci); + + if (!list_empty(&qh->qtd_list)) + qh_completions(ehci, qh); + + /* reschedule QH iff another request is queued */ + if (!list_empty(&qh->qtd_list) && ehci->rh_state == EHCI_RH_RUNNING) { + rc = qh_schedule(ehci, qh); + if (rc == 0) { + qh_refresh(ehci, qh); + qh_link_periodic(ehci, qh); + } + + /* An error here likely indicates handshake failure + * or no space left in the schedule. Neither fault + * should happen often ... + * + * FIXME kill the now-dysfunctional queued urbs + */ + else { + ehci_err(ehci, "can't reschedule qh %p, err %d\n", + qh, rc); + } + } + + /* maybe turn off periodic schedule */ + --ehci->intr_count; + disable_periodic(ehci); +} + +/*-------------------------------------------------------------------------*/ + +static int check_period ( + struct ehci_hcd *ehci, + unsigned frame, + unsigned uframe, + unsigned uperiod, + unsigned usecs +) { + /* complete split running into next frame? + * given FSTN support, we could sometimes check... + */ + if (uframe >= 8) + return 0; + + /* convert "usecs we need" to "max already claimed" */ + usecs = ehci->uframe_periodic_max - usecs; + + for (uframe += frame << 3; uframe < EHCI_BANDWIDTH_SIZE; + uframe += uperiod) { + if (ehci->bandwidth[uframe] > usecs) + return 0; + } + + // success! + return 1; +} + +static int check_intr_schedule ( + struct ehci_hcd *ehci, + unsigned frame, + unsigned uframe, + struct ehci_qh *qh, + unsigned *c_maskp, + struct ehci_tt *tt +) +{ + int retval = -ENOSPC; + u8 mask = 0; + + if (qh->ps.c_usecs && uframe >= 6) /* FSTN territory? */ + goto done; + + if (!check_period(ehci, frame, uframe, qh->ps.bw_uperiod, qh->ps.usecs)) + goto done; + if (!qh->ps.c_usecs) { + retval = 0; + *c_maskp = 0; + goto done; + } + +#ifdef CONFIG_USB_EHCI_TT_NEWSCHED + if (tt_available(ehci, &qh->ps, tt, frame, uframe)) { + unsigned i; + + /* TODO : this may need FSTN for SSPLIT in uframe 5. */ + for (i = uframe+2; i < 8 && i <= uframe+4; i++) + if (!check_period(ehci, frame, i, + qh->ps.bw_uperiod, qh->ps.c_usecs)) + goto done; + else + mask |= 1 << i; + + retval = 0; + + *c_maskp = mask; + } +#else + /* Make sure this tt's buffer is also available for CSPLITs. + * We pessimize a bit; probably the typical full speed case + * doesn't need the second CSPLIT. + * + * NOTE: both SPLIT and CSPLIT could be checked in just + * one smart pass... + */ + mask = 0x03 << (uframe + qh->gap_uf); + *c_maskp = mask; + + mask |= 1 << uframe; + if (tt_no_collision(ehci, qh->ps.bw_period, qh->ps.udev, frame, mask)) { + if (!check_period(ehci, frame, uframe + qh->gap_uf + 1, + qh->ps.bw_uperiod, qh->ps.c_usecs)) + goto done; + if (!check_period(ehci, frame, uframe + qh->gap_uf, + qh->ps.bw_uperiod, qh->ps.c_usecs)) + goto done; + retval = 0; + } +#endif +done: + return retval; +} + +/* "first fit" scheduling policy used the first time through, + * or when the previous schedule slot can't be re-used. + */ +static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + int status = 0; + unsigned uframe; + unsigned c_mask; + struct ehci_qh_hw *hw = qh->hw; + struct ehci_tt *tt; + + hw->hw_next = EHCI_LIST_END(ehci); + + /* reuse the previous schedule slots, if we can */ + if (qh->ps.phase != NO_FRAME) { + ehci_dbg(ehci, "reused qh %p schedule\n", qh); + return 0; + } + + uframe = 0; + c_mask = 0; + tt = find_tt(qh->ps.udev); + if (IS_ERR(tt)) { + status = PTR_ERR(tt); + goto done; + } + compute_tt_budget(ehci->tt_budget, tt); + + /* else scan the schedule to find a group of slots such that all + * uframes have enough periodic bandwidth available. + */ + /* "normal" case, uframing flexible except with splits */ + if (qh->ps.bw_period) { + int i; + unsigned frame; + + for (i = qh->ps.bw_period; i > 0; --i) { + frame = ++ehci->random_frame & (qh->ps.bw_period - 1); + for (uframe = 0; uframe < 8; uframe++) { + status = check_intr_schedule(ehci, + frame, uframe, qh, &c_mask, tt); + if (status == 0) + goto got_it; + } + } + + /* qh->ps.bw_period == 0 means every uframe */ + } else { + status = check_intr_schedule(ehci, 0, 0, qh, &c_mask, tt); + } + if (status) + goto done; + + got_it: + qh->ps.phase = (qh->ps.period ? ehci->random_frame & + (qh->ps.period - 1) : 0); + qh->ps.bw_phase = qh->ps.phase & (qh->ps.bw_period - 1); + qh->ps.phase_uf = uframe; + qh->ps.cs_mask = qh->ps.period ? + (c_mask << 8) | (1 << uframe) : + QH_SMASK; + + /* reset S-frame and (maybe) C-frame masks */ + hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK)); + hw->hw_info2 |= cpu_to_hc32(ehci, qh->ps.cs_mask); + reserve_release_intr_bandwidth(ehci, qh, 1); + +done: + return status; +} + +static int intr_submit ( + struct ehci_hcd *ehci, + struct urb *urb, + struct list_head *qtd_list, + gfp_t mem_flags +) { + unsigned epnum; + unsigned long flags; + struct ehci_qh *qh; + int status; + struct list_head empty; + + /* get endpoint and transfer/schedule data */ + epnum = urb->ep->desc.bEndpointAddress; + + spin_lock_irqsave (&ehci->lock, flags); + + if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { + status = -ESHUTDOWN; + goto done_not_linked; + } + status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); + if (unlikely(status)) + goto done_not_linked; + + /* get qh and force any scheduling errors */ + INIT_LIST_HEAD (&empty); + qh = qh_append_tds(ehci, urb, &empty, epnum, &urb->ep->hcpriv); + if (qh == NULL) { + status = -ENOMEM; + goto done; + } + if (qh->qh_state == QH_STATE_IDLE) { + if ((status = qh_schedule (ehci, qh)) != 0) + goto done; + } + + /* then queue the urb's tds to the qh */ + qh = qh_append_tds(ehci, urb, qtd_list, epnum, &urb->ep->hcpriv); + BUG_ON (qh == NULL); + + /* stuff into the periodic schedule */ + if (qh->qh_state == QH_STATE_IDLE) { + qh_refresh(ehci, qh); + qh_link_periodic(ehci, qh); + } else { + /* cancel unlink wait for the qh */ + cancel_unlink_wait_intr(ehci, qh); + } + + /* ... update usbfs periodic stats */ + ehci_to_hcd(ehci)->self.bandwidth_int_reqs++; + +done: + if (unlikely(status)) + usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); +done_not_linked: + spin_unlock_irqrestore (&ehci->lock, flags); + if (status) + qtd_list_free (ehci, urb, qtd_list); + + return status; +} + +static void scan_intr(struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + + list_for_each_entry_safe(qh, ehci->qh_scan_next, &ehci->intr_qh_list, + intr_node) { + + /* clean any finished work for this qh */ + if (!list_empty(&qh->qtd_list)) { + int temp; + + /* + * Unlinks could happen here; completion reporting + * drops the lock. That's why ehci->qh_scan_next + * always holds the next qh to scan; if the next qh + * gets unlinked then ehci->qh_scan_next is adjusted + * in qh_unlink_periodic(). + */ + temp = qh_completions(ehci, qh); + if (unlikely(temp)) + start_unlink_intr(ehci, qh); + else if (unlikely(list_empty(&qh->qtd_list) && + qh->qh_state == QH_STATE_LINKED)) + start_unlink_intr_wait(ehci, qh); + } + } +} + +/*-------------------------------------------------------------------------*/ + +/* ehci_iso_stream ops work with both ITD and SITD */ + +static struct ehci_iso_stream * +iso_stream_alloc (gfp_t mem_flags) +{ + struct ehci_iso_stream *stream; + + stream = kzalloc(sizeof *stream, mem_flags); + if (likely (stream != NULL)) { + INIT_LIST_HEAD(&stream->td_list); + INIT_LIST_HEAD(&stream->free_list); + stream->next_uframe = NO_FRAME; + stream->ps.phase = NO_FRAME; + } + return stream; +} + +static void +iso_stream_init ( + struct ehci_hcd *ehci, + struct ehci_iso_stream *stream, + struct urb *urb +) +{ + static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f }; + + struct usb_device *dev = urb->dev; + u32 buf1; + unsigned epnum, maxp; + int is_input; + unsigned tmp; + + /* + * this might be a "high bandwidth" highspeed endpoint, + * as encoded in the ep descriptor's wMaxPacket field + */ + epnum = usb_pipeendpoint(urb->pipe); + is_input = usb_pipein(urb->pipe) ? USB_DIR_IN : 0; + maxp = usb_endpoint_maxp(&urb->ep->desc); + if (is_input) { + buf1 = (1 << 11); + } else { + buf1 = 0; + } + + /* knows about ITD vs SITD */ + if (dev->speed == USB_SPEED_HIGH) { + unsigned multi = hb_mult(maxp); + + stream->highspeed = 1; + + maxp = max_packet(maxp); + buf1 |= maxp; + maxp *= multi; + + stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum); + stream->buf1 = cpu_to_hc32(ehci, buf1); + stream->buf2 = cpu_to_hc32(ehci, multi); + + /* usbfs wants to report the average usecs per frame tied up + * when transfers on this endpoint are scheduled ... + */ + stream->ps.usecs = HS_USECS_ISO(maxp); + + /* period for bandwidth allocation */ + tmp = min_t(unsigned, EHCI_BANDWIDTH_SIZE, + 1 << (urb->ep->desc.bInterval - 1)); + + /* Allow urb->interval to override */ + stream->ps.bw_uperiod = min_t(unsigned, tmp, urb->interval); + + stream->uperiod = urb->interval; + stream->ps.period = urb->interval >> 3; + stream->bandwidth = stream->ps.usecs * 8 / + stream->ps.bw_uperiod; + + } else { + u32 addr; + int think_time; + int hs_transfers; + + addr = dev->ttport << 24; + if (!ehci_is_TDI(ehci) + || (dev->tt->hub != + ehci_to_hcd(ehci)->self.root_hub)) + addr |= dev->tt->hub->devnum << 16; + addr |= epnum << 8; + addr |= dev->devnum; + stream->ps.usecs = HS_USECS_ISO(maxp); + think_time = dev->tt ? dev->tt->think_time : 0; + stream->ps.tt_usecs = NS_TO_US(think_time + usb_calc_bus_time( + dev->speed, is_input, 1, maxp)); + hs_transfers = max (1u, (maxp + 187) / 188); + if (is_input) { + u32 tmp; + + addr |= 1 << 31; + stream->ps.c_usecs = stream->ps.usecs; + stream->ps.usecs = HS_USECS_ISO(1); + stream->ps.cs_mask = 1; + + /* c-mask as specified in USB 2.0 11.18.4 3.c */ + tmp = (1 << (hs_transfers + 2)) - 1; + stream->ps.cs_mask |= tmp << (8 + 2); + } else + stream->ps.cs_mask = smask_out[hs_transfers - 1]; + + /* period for bandwidth allocation */ + tmp = min_t(unsigned, EHCI_BANDWIDTH_FRAMES, + 1 << (urb->ep->desc.bInterval - 1)); + + /* Allow urb->interval to override */ + stream->ps.bw_period = min_t(unsigned, tmp, urb->interval); + stream->ps.bw_uperiod = stream->ps.bw_period << 3; + + stream->ps.period = urb->interval; + stream->uperiod = urb->interval << 3; + stream->bandwidth = (stream->ps.usecs + stream->ps.c_usecs) / + stream->ps.bw_period; + + /* stream->splits gets created from cs_mask later */ + stream->address = cpu_to_hc32(ehci, addr); + } + + stream->ps.udev = dev; + stream->ps.ep = urb->ep; + + stream->bEndpointAddress = is_input | epnum; + stream->maxp = maxp; +} + +static struct ehci_iso_stream * +iso_stream_find (struct ehci_hcd *ehci, struct urb *urb) +{ + unsigned epnum; + struct ehci_iso_stream *stream; + struct usb_host_endpoint *ep; + unsigned long flags; + + epnum = usb_pipeendpoint (urb->pipe); + if (usb_pipein(urb->pipe)) + ep = urb->dev->ep_in[epnum]; + else + ep = urb->dev->ep_out[epnum]; + + spin_lock_irqsave (&ehci->lock, flags); + stream = ep->hcpriv; + + if (unlikely (stream == NULL)) { + stream = iso_stream_alloc(GFP_ATOMIC); + if (likely (stream != NULL)) { + ep->hcpriv = stream; + iso_stream_init(ehci, stream, urb); + } + + /* if dev->ep [epnum] is a QH, hw is set */ + } else if (unlikely (stream->hw != NULL)) { + ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n", + urb->dev->devpath, epnum, + usb_pipein(urb->pipe) ? "in" : "out"); + stream = NULL; + } + + spin_unlock_irqrestore (&ehci->lock, flags); + return stream; +} + +/*-------------------------------------------------------------------------*/ + +/* ehci_iso_sched ops can be ITD-only or SITD-only */ + +static struct ehci_iso_sched * +iso_sched_alloc (unsigned packets, gfp_t mem_flags) +{ + struct ehci_iso_sched *iso_sched; + int size = sizeof *iso_sched; + + size += packets * sizeof (struct ehci_iso_packet); + iso_sched = kzalloc(size, mem_flags); + if (likely (iso_sched != NULL)) { + INIT_LIST_HEAD (&iso_sched->td_list); + } + return iso_sched; +} + +static inline void +itd_sched_init( + struct ehci_hcd *ehci, + struct ehci_iso_sched *iso_sched, + struct ehci_iso_stream *stream, + struct urb *urb +) +{ + unsigned i; + dma_addr_t dma = urb->transfer_dma; + + /* how many uframes are needed for these transfers */ + iso_sched->span = urb->number_of_packets * stream->uperiod; + + /* figure out per-uframe itd fields that we'll need later + * when we fit new itds into the schedule. + */ + for (i = 0; i < urb->number_of_packets; i++) { + struct ehci_iso_packet *uframe = &iso_sched->packet [i]; + unsigned length; + dma_addr_t buf; + u32 trans; + + length = urb->iso_frame_desc [i].length; + buf = dma + urb->iso_frame_desc [i].offset; + + trans = EHCI_ISOC_ACTIVE; + trans |= buf & 0x0fff; + if (unlikely (((i + 1) == urb->number_of_packets)) + && !(urb->transfer_flags & URB_NO_INTERRUPT)) + trans |= EHCI_ITD_IOC; + trans |= length << 16; + uframe->transaction = cpu_to_hc32(ehci, trans); + + /* might need to cross a buffer page within a uframe */ + uframe->bufp = (buf & ~(u64)0x0fff); + buf += length; + if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff)))) + uframe->cross = 1; + } +} + +static void +iso_sched_free ( + struct ehci_iso_stream *stream, + struct ehci_iso_sched *iso_sched +) +{ + if (!iso_sched) + return; + // caller must hold ehci->lock! + list_splice (&iso_sched->td_list, &stream->free_list); + kfree (iso_sched); +} + +static int +itd_urb_transaction ( + struct ehci_iso_stream *stream, + struct ehci_hcd *ehci, + struct urb *urb, + gfp_t mem_flags +) +{ + struct ehci_itd *itd; + dma_addr_t itd_dma; + int i; + unsigned num_itds; + struct ehci_iso_sched *sched; + unsigned long flags; + + sched = iso_sched_alloc (urb->number_of_packets, mem_flags); + if (unlikely (sched == NULL)) + return -ENOMEM; + + itd_sched_init(ehci, sched, stream, urb); + + if (urb->interval < 8) + num_itds = 1 + (sched->span + 7) / 8; + else + num_itds = urb->number_of_packets; + + /* allocate/init ITDs */ + spin_lock_irqsave (&ehci->lock, flags); + for (i = 0; i < num_itds; i++) { + + /* + * Use iTDs from the free list, but not iTDs that may + * still be in use by the hardware. + */ + if (likely(!list_empty(&stream->free_list))) { + itd = list_first_entry(&stream->free_list, + struct ehci_itd, itd_list); + if (itd->frame == ehci->now_frame) + goto alloc_itd; + list_del (&itd->itd_list); + itd_dma = itd->itd_dma; + } else { + alloc_itd: + spin_unlock_irqrestore (&ehci->lock, flags); + itd = dma_pool_alloc (ehci->itd_pool, mem_flags, + &itd_dma); + spin_lock_irqsave (&ehci->lock, flags); + if (!itd) { + iso_sched_free(stream, sched); + spin_unlock_irqrestore(&ehci->lock, flags); + return -ENOMEM; + } + } + + memset (itd, 0, sizeof *itd); + itd->itd_dma = itd_dma; + itd->frame = NO_FRAME; + list_add (&itd->itd_list, &sched->td_list); + } + spin_unlock_irqrestore (&ehci->lock, flags); + + /* temporarily store schedule info in hcpriv */ + urb->hcpriv = sched; + urb->error_count = 0; + return 0; +} + +/*-------------------------------------------------------------------------*/ + +static void reserve_release_iso_bandwidth(struct ehci_hcd *ehci, + struct ehci_iso_stream *stream, int sign) +{ + unsigned uframe; + unsigned i, j; + unsigned s_mask, c_mask, m; + int usecs = stream->ps.usecs; + int c_usecs = stream->ps.c_usecs; + int tt_usecs = stream->ps.tt_usecs; + struct ehci_tt *tt; + + if (stream->ps.phase == NO_FRAME) /* Bandwidth wasn't reserved */ + return; + uframe = stream->ps.bw_phase << 3; + + bandwidth_dbg(ehci, sign, "iso", &stream->ps); + + if (sign < 0) { /* Release bandwidth */ + usecs = -usecs; + c_usecs = -c_usecs; + tt_usecs = -tt_usecs; + } + + if (!stream->splits) { /* High speed */ + for (i = uframe + stream->ps.phase_uf; i < EHCI_BANDWIDTH_SIZE; + i += stream->ps.bw_uperiod) + ehci->bandwidth[i] += usecs; + + } else { /* Full speed */ + s_mask = stream->ps.cs_mask; + c_mask = s_mask >> 8; + + /* NOTE: adjustment needed for frame overflow */ + for (i = uframe; i < EHCI_BANDWIDTH_SIZE; + i += stream->ps.bw_uperiod) { + for ((j = stream->ps.phase_uf, m = 1 << j); j < 8; + (++j, m <<= 1)) { + if (s_mask & m) + ehci->bandwidth[i+j] += usecs; + else if (c_mask & m) + ehci->bandwidth[i+j] += c_usecs; + } + } + + tt = find_tt(stream->ps.udev); + if (sign > 0) + list_add_tail(&stream->ps.ps_list, &tt->ps_list); + else + list_del(&stream->ps.ps_list); + + for (i = uframe >> 3; i < EHCI_BANDWIDTH_FRAMES; + i += stream->ps.bw_period) + tt->bandwidth[i] += tt_usecs; + } +} + +static inline int +itd_slot_ok ( + struct ehci_hcd *ehci, + struct ehci_iso_stream *stream, + unsigned uframe +) +{ + unsigned usecs; + + /* convert "usecs we need" to "max already claimed" */ + usecs = ehci->uframe_periodic_max - stream->ps.usecs; + + for (uframe &= stream->ps.bw_uperiod - 1; uframe < EHCI_BANDWIDTH_SIZE; + uframe += stream->ps.bw_uperiod) { + if (ehci->bandwidth[uframe] > usecs) + return 0; + } + return 1; +} + +static inline int +sitd_slot_ok ( + struct ehci_hcd *ehci, + struct ehci_iso_stream *stream, + unsigned uframe, + struct ehci_iso_sched *sched, + struct ehci_tt *tt +) +{ + unsigned mask, tmp; + unsigned frame, uf; + + mask = stream->ps.cs_mask << (uframe & 7); + + /* for OUT, don't wrap SSPLIT into H-microframe 7 */ + if (((stream->ps.cs_mask & 0xff) << (uframe & 7)) >= (1 << 7)) + return 0; + + /* for IN, don't wrap CSPLIT into the next frame */ + if (mask & ~0xffff) + return 0; + + /* check bandwidth */ + uframe &= stream->ps.bw_uperiod - 1; + frame = uframe >> 3; + +#ifdef CONFIG_USB_EHCI_TT_NEWSCHED + /* The tt's fullspeed bus bandwidth must be available. + * tt_available scheduling guarantees 10+% for control/bulk. + */ + uf = uframe & 7; + if (!tt_available(ehci, &stream->ps, tt, frame, uf)) + return 0; +#else + /* tt must be idle for start(s), any gap, and csplit. + * assume scheduling slop leaves 10+% for control/bulk. + */ + if (!tt_no_collision(ehci, stream->ps.bw_period, + stream->ps.udev, frame, mask)) + return 0; +#endif + + do { + unsigned max_used; + unsigned i; + + /* check starts (OUT uses more than one) */ + uf = uframe; + max_used = ehci->uframe_periodic_max - stream->ps.usecs; + for (tmp = stream->ps.cs_mask & 0xff; tmp; tmp >>= 1, uf++) { + if (ehci->bandwidth[uf] > max_used) + return 0; + } + + /* for IN, check CSPLIT */ + if (stream->ps.c_usecs) { + max_used = ehci->uframe_periodic_max - + stream->ps.c_usecs; + uf = uframe & ~7; + tmp = 1 << (2+8); + for (i = (uframe & 7) + 2; i < 8; (++i, tmp <<= 1)) { + if ((stream->ps.cs_mask & tmp) == 0) + continue; + if (ehci->bandwidth[uf+i] > max_used) + return 0; + } + } + + uframe += stream->ps.bw_uperiod; + } while (uframe < EHCI_BANDWIDTH_SIZE); + + stream->ps.cs_mask <<= uframe & 7; + stream->splits = cpu_to_hc32(ehci, stream->ps.cs_mask); + return 1; +} + +/* + * This scheduler plans almost as far into the future as it has actual + * periodic schedule slots. (Affected by TUNE_FLS, which defaults to + * "as small as possible" to be cache-friendlier.) That limits the size + * transfers you can stream reliably; avoid more than 64 msec per urb. + * Also avoid queue depths of less than ehci's worst irq latency (affected + * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter, + * and other factors); or more than about 230 msec total (for portability, + * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler! + */ + +static int +iso_stream_schedule ( + struct ehci_hcd *ehci, + struct urb *urb, + struct ehci_iso_stream *stream +) +{ + u32 now, base, next, start, period, span, now2; + u32 wrap = 0, skip = 0; + int status = 0; + unsigned mod = ehci->periodic_size << 3; + struct ehci_iso_sched *sched = urb->hcpriv; + bool empty = list_empty(&stream->td_list); + bool new_stream = false; + + period = stream->uperiod; + span = sched->span; + if (!stream->highspeed) + span <<= 3; + + /* Start a new isochronous stream? */ + if (unlikely(empty && !hcd_periodic_completion_in_progress( + ehci_to_hcd(ehci), urb->ep))) { + + /* Schedule the endpoint */ + if (stream->ps.phase == NO_FRAME) { + int done = 0; + struct ehci_tt *tt = find_tt(stream->ps.udev); + + if (IS_ERR(tt)) { + status = PTR_ERR(tt); + goto fail; + } + compute_tt_budget(ehci->tt_budget, tt); + + start = ((-(++ehci->random_frame)) << 3) & (period - 1); + + /* find a uframe slot with enough bandwidth. + * Early uframes are more precious because full-speed + * iso IN transfers can't use late uframes, + * and therefore they should be allocated last. + */ + next = start; + start += period; + do { + start--; + /* check schedule: enough space? */ + if (stream->highspeed) { + if (itd_slot_ok(ehci, stream, start)) + done = 1; + } else { + if ((start % 8) >= 6) + continue; + if (sitd_slot_ok(ehci, stream, start, + sched, tt)) + done = 1; + } + } while (start > next && !done); + + /* no room in the schedule */ + if (!done) { + ehci_dbg(ehci, "iso sched full %p", urb); + status = -ENOSPC; + goto fail; + } + stream->ps.phase = (start >> 3) & + (stream->ps.period - 1); + stream->ps.bw_phase = stream->ps.phase & + (stream->ps.bw_period - 1); + stream->ps.phase_uf = start & 7; + reserve_release_iso_bandwidth(ehci, stream, 1); + } + + /* New stream is already scheduled; use the upcoming slot */ + else { + start = (stream->ps.phase << 3) + stream->ps.phase_uf; + } + + stream->next_uframe = start; + new_stream = true; + } + + now = ehci_read_frame_index(ehci) & (mod - 1); + + /* Take the isochronous scheduling threshold into account */ + if (ehci->i_thresh) + next = now + ehci->i_thresh; /* uframe cache */ + else + next = (now + 2 + 7) & ~0x07; /* full frame cache */ + + /* If needed, initialize last_iso_frame so that this URB will be seen */ + if (ehci->isoc_count == 0) + ehci->last_iso_frame = now >> 3; + + /* + * Use ehci->last_iso_frame as the base. There can't be any + * TDs scheduled for earlier than that. + */ + base = ehci->last_iso_frame << 3; + next = (next - base) & (mod - 1); + start = (stream->next_uframe - base) & (mod - 1); + + if (unlikely(new_stream)) + goto do_ASAP; + + /* + * Typical case: reuse current schedule, stream may still be active. + * Hopefully there are no gaps from the host falling behind + * (irq delays etc). If there are, the behavior depends on + * whether URB_ISO_ASAP is set. + */ + now2 = (now - base) & (mod - 1); + + /* Is the schedule about to wrap around? */ + if (unlikely(!empty && start < period)) { + ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n", + urb, stream->next_uframe, base, period, mod); + status = -EFBIG; + goto fail; + } + + /* Is the next packet scheduled after the base time? */ + if (likely(!empty || start <= now2 + period)) { + + /* URB_ISO_ASAP: make sure that start >= next */ + if (unlikely(start < next && + (urb->transfer_flags & URB_ISO_ASAP))) + goto do_ASAP; + + /* Otherwise use start, if it's not in the past */ + if (likely(start >= now2)) + goto use_start; + + /* Otherwise we got an underrun while the queue was empty */ + } else { + if (urb->transfer_flags & URB_ISO_ASAP) + goto do_ASAP; + wrap = mod; + now2 += mod; + } + + /* How many uframes and packets do we need to skip? */ + skip = (now2 - start + period - 1) & -period; + if (skip >= span) { /* Entirely in the past? */ + ehci_dbg(ehci, "iso underrun %p (%u+%u < %u) [%u]\n", + urb, start + base, span - period, now2 + base, + base); + + /* Try to keep the last TD intact for scanning later */ + skip = span - period; + + /* Will it come before the current scan position? */ + if (empty) { + skip = span; /* Skip the entire URB */ + status = 1; /* and give it back immediately */ + iso_sched_free(stream, sched); + sched = NULL; + } + } + urb->error_count = skip / period; + if (sched) + sched->first_packet = urb->error_count; + goto use_start; + + do_ASAP: + /* Use the first slot after "next" */ + start = next + ((start - next) & (period - 1)); + + use_start: + /* Tried to schedule too far into the future? */ + if (unlikely(start + span - period >= mod + wrap)) { + ehci_dbg(ehci, "request %p would overflow (%u+%u >= %u)\n", + urb, start, span - period, mod + wrap); + status = -EFBIG; + goto fail; + } + + start += base; + stream->next_uframe = (start + skip) & (mod - 1); + + /* report high speed start in uframes; full speed, in frames */ + urb->start_frame = start & (mod - 1); + if (!stream->highspeed) + urb->start_frame >>= 3; + return status; + + fail: + iso_sched_free(stream, sched); + urb->hcpriv = NULL; + return status; +} + +/*-------------------------------------------------------------------------*/ + +static inline void +itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream, + struct ehci_itd *itd) +{ + int i; + + /* it's been recently zeroed */ + itd->hw_next = EHCI_LIST_END(ehci); + itd->hw_bufp [0] = stream->buf0; + itd->hw_bufp [1] = stream->buf1; + itd->hw_bufp [2] = stream->buf2; + + for (i = 0; i < 8; i++) + itd->index[i] = -1; + + /* All other fields are filled when scheduling */ +} + +static inline void +itd_patch( + struct ehci_hcd *ehci, + struct ehci_itd *itd, + struct ehci_iso_sched *iso_sched, + unsigned index, + u16 uframe +) +{ + struct ehci_iso_packet *uf = &iso_sched->packet [index]; + unsigned pg = itd->pg; + + // BUG_ON (pg == 6 && uf->cross); + + uframe &= 0x07; + itd->index [uframe] = index; + + itd->hw_transaction[uframe] = uf->transaction; + itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12); + itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0); + itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32)); + + /* iso_frame_desc[].offset must be strictly increasing */ + if (unlikely (uf->cross)) { + u64 bufp = uf->bufp + 4096; + + itd->pg = ++pg; + itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0); + itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32)); + } +} + +static inline void +itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) +{ + union ehci_shadow *prev = &ehci->pshadow[frame]; + __hc32 *hw_p = &ehci->periodic[frame]; + union ehci_shadow here = *prev; + __hc32 type = 0; + + /* skip any iso nodes which might belong to previous microframes */ + while (here.ptr) { + type = Q_NEXT_TYPE(ehci, *hw_p); + if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) + break; + prev = periodic_next_shadow(ehci, prev, type); + hw_p = shadow_next_periodic(ehci, &here, type); + here = *prev; + } + + itd->itd_next = here; + itd->hw_next = *hw_p; + prev->itd = itd; + itd->frame = frame; + wmb (); + *hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); +} + +/* fit urb's itds into the selected schedule slot; activate as needed */ +static void itd_link_urb( + struct ehci_hcd *ehci, + struct urb *urb, + unsigned mod, + struct ehci_iso_stream *stream +) +{ + int packet; + unsigned next_uframe, uframe, frame; + struct ehci_iso_sched *iso_sched = urb->hcpriv; + struct ehci_itd *itd; + + next_uframe = stream->next_uframe & (mod - 1); + + if (unlikely (list_empty(&stream->td_list))) + ehci_to_hcd(ehci)->self.bandwidth_allocated + += stream->bandwidth; + + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_pll_fix == 1) + usb_amd_quirk_pll_disable(); + } + + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; + + /* fill iTDs uframe by uframe */ + for (packet = iso_sched->first_packet, itd = NULL; + packet < urb->number_of_packets;) { + if (itd == NULL) { + /* ASSERT: we have all necessary itds */ + // BUG_ON (list_empty (&iso_sched->td_list)); + + /* ASSERT: no itds for this endpoint in this uframe */ + + itd = list_entry (iso_sched->td_list.next, + struct ehci_itd, itd_list); + list_move_tail (&itd->itd_list, &stream->td_list); + itd->stream = stream; + itd->urb = urb; + itd_init (ehci, stream, itd); + } + + uframe = next_uframe & 0x07; + frame = next_uframe >> 3; + + itd_patch(ehci, itd, iso_sched, packet, uframe); + + next_uframe += stream->uperiod; + next_uframe &= mod - 1; + packet++; + + /* link completed itds into the schedule */ + if (((next_uframe >> 3) != frame) + || packet == urb->number_of_packets) { + itd_link(ehci, frame & (ehci->periodic_size - 1), itd); + itd = NULL; + } + } + stream->next_uframe = next_uframe; + + /* don't need that schedule data any more */ + iso_sched_free (stream, iso_sched); + urb->hcpriv = stream; + + ++ehci->isoc_count; + enable_periodic(ehci); +} + +#define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) + +/* Process and recycle a completed ITD. Return true iff its urb completed, + * and hence its completion callback probably added things to the hardware + * schedule. + * + * Note that we carefully avoid recycling this descriptor until after any + * completion callback runs, so that it won't be reused quickly. That is, + * assuming (a) no more than two urbs per frame on this endpoint, and also + * (b) only this endpoint's completions submit URBs. It seems some silicon + * corrupts things if you reuse completed descriptors very quickly... + */ +static bool itd_complete(struct ehci_hcd *ehci, struct ehci_itd *itd) +{ + struct urb *urb = itd->urb; + struct usb_iso_packet_descriptor *desc; + u32 t; + unsigned uframe; + int urb_index = -1; + struct ehci_iso_stream *stream = itd->stream; + struct usb_device *dev; + bool retval = false; + + /* for each uframe with a packet */ + for (uframe = 0; uframe < 8; uframe++) { + if (likely (itd->index[uframe] == -1)) + continue; + urb_index = itd->index[uframe]; + desc = &urb->iso_frame_desc [urb_index]; + + t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]); + itd->hw_transaction [uframe] = 0; + + /* report transfer status */ + if (unlikely (t & ISO_ERRS)) { + urb->error_count++; + if (t & EHCI_ISOC_BUF_ERR) + desc->status = usb_pipein (urb->pipe) + ? -ENOSR /* hc couldn't read */ + : -ECOMM; /* hc couldn't write */ + else if (t & EHCI_ISOC_BABBLE) + desc->status = -EOVERFLOW; + else /* (t & EHCI_ISOC_XACTERR) */ + desc->status = -EPROTO; + + /* HC need not update length with this error */ + if (!(t & EHCI_ISOC_BABBLE)) { + desc->actual_length = EHCI_ITD_LENGTH(t); + urb->actual_length += desc->actual_length; + } + } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) { + desc->status = 0; + desc->actual_length = EHCI_ITD_LENGTH(t); + urb->actual_length += desc->actual_length; + } else { + /* URB was too late */ + urb->error_count++; + } + } + + /* handle completion now? */ + if (likely ((urb_index + 1) != urb->number_of_packets)) + goto done; + + /* ASSERT: it's really the last itd for this urb + list_for_each_entry (itd, &stream->td_list, itd_list) + BUG_ON (itd->urb == urb); + */ + + /* give urb back to the driver; completion often (re)submits */ + dev = urb->dev; + ehci_urb_done(ehci, urb, 0); + retval = true; + urb = NULL; + + --ehci->isoc_count; + disable_periodic(ehci); + + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_pll_fix == 1) + usb_amd_quirk_pll_enable(); + } + + if (unlikely(list_is_singular(&stream->td_list))) + ehci_to_hcd(ehci)->self.bandwidth_allocated + -= stream->bandwidth; + +done: + itd->urb = NULL; + + /* Add to the end of the free list for later reuse */ + list_move_tail(&itd->itd_list, &stream->free_list); + + /* Recycle the iTDs when the pipeline is empty (ep no longer in use) */ + if (list_empty(&stream->td_list)) { + list_splice_tail_init(&stream->free_list, + &ehci->cached_itd_list); + start_free_itds(ehci); + } + + return retval; +} + +/*-------------------------------------------------------------------------*/ + +static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, + gfp_t mem_flags) +{ + int status = -EINVAL; + unsigned long flags; + struct ehci_iso_stream *stream; + + /* Get iso_stream head */ + stream = iso_stream_find (ehci, urb); + if (unlikely (stream == NULL)) { + ehci_dbg (ehci, "can't get iso stream\n"); + return -ENOMEM; + } + if (unlikely(urb->interval != stream->uperiod)) { + ehci_dbg (ehci, "can't change iso interval %d --> %d\n", + stream->uperiod, urb->interval); + goto done; + } + +#ifdef EHCI_URB_TRACE + ehci_dbg (ehci, + "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n", + __func__, urb->dev->devpath, urb, + usb_pipeendpoint (urb->pipe), + usb_pipein (urb->pipe) ? "in" : "out", + urb->transfer_buffer_length, + urb->number_of_packets, urb->interval, + stream); +#endif + + /* allocate ITDs w/o locking anything */ + status = itd_urb_transaction (stream, ehci, urb, mem_flags); + if (unlikely (status < 0)) { + ehci_dbg (ehci, "can't init itds\n"); + goto done; + } + + /* schedule ... need to lock */ + spin_lock_irqsave (&ehci->lock, flags); + if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { + status = -ESHUTDOWN; + goto done_not_linked; + } + status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); + if (unlikely(status)) + goto done_not_linked; + status = iso_stream_schedule(ehci, urb, stream); + if (likely(status == 0)) { + itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); + } else if (status > 0) { + status = 0; + ehci_urb_done(ehci, urb, 0); + } else { + usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); + } + done_not_linked: + spin_unlock_irqrestore (&ehci->lock, flags); + done: + return status; +} + +/*-------------------------------------------------------------------------*/ + +/* + * "Split ISO TDs" ... used for USB 1.1 devices going through the + * TTs in USB 2.0 hubs. These need microframe scheduling. + */ + +static inline void +sitd_sched_init( + struct ehci_hcd *ehci, + struct ehci_iso_sched *iso_sched, + struct ehci_iso_stream *stream, + struct urb *urb +) +{ + unsigned i; + dma_addr_t dma = urb->transfer_dma; + + /* how many frames are needed for these transfers */ + iso_sched->span = urb->number_of_packets * stream->ps.period; + + /* figure out per-frame sitd fields that we'll need later + * when we fit new sitds into the schedule. + */ + for (i = 0; i < urb->number_of_packets; i++) { + struct ehci_iso_packet *packet = &iso_sched->packet [i]; + unsigned length; + dma_addr_t buf; + u32 trans; + + length = urb->iso_frame_desc [i].length & 0x03ff; + buf = dma + urb->iso_frame_desc [i].offset; + + trans = SITD_STS_ACTIVE; + if (((i + 1) == urb->number_of_packets) + && !(urb->transfer_flags & URB_NO_INTERRUPT)) + trans |= SITD_IOC; + trans |= length << 16; + packet->transaction = cpu_to_hc32(ehci, trans); + + /* might need to cross a buffer page within a td */ + packet->bufp = buf; + packet->buf1 = (buf + length) & ~0x0fff; + if (packet->buf1 != (buf & ~(u64)0x0fff)) + packet->cross = 1; + + /* OUT uses multiple start-splits */ + if (stream->bEndpointAddress & USB_DIR_IN) + continue; + length = (length + 187) / 188; + if (length > 1) /* BEGIN vs ALL */ + length |= 1 << 3; + packet->buf1 |= length; + } +} + +static int +sitd_urb_transaction ( + struct ehci_iso_stream *stream, + struct ehci_hcd *ehci, + struct urb *urb, + gfp_t mem_flags +) +{ + struct ehci_sitd *sitd; + dma_addr_t sitd_dma; + int i; + struct ehci_iso_sched *iso_sched; + unsigned long flags; + + iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags); + if (iso_sched == NULL) + return -ENOMEM; + + sitd_sched_init(ehci, iso_sched, stream, urb); + + /* allocate/init sITDs */ + spin_lock_irqsave (&ehci->lock, flags); + for (i = 0; i < urb->number_of_packets; i++) { + + /* NOTE: for now, we don't try to handle wraparound cases + * for IN (using sitd->hw_backpointer, like a FSTN), which + * means we never need two sitds for full speed packets. + */ + + /* + * Use siTDs from the free list, but not siTDs that may + * still be in use by the hardware. + */ + if (likely(!list_empty(&stream->free_list))) { + sitd = list_first_entry(&stream->free_list, + struct ehci_sitd, sitd_list); + if (sitd->frame == ehci->now_frame) + goto alloc_sitd; + list_del (&sitd->sitd_list); + sitd_dma = sitd->sitd_dma; + } else { + alloc_sitd: + spin_unlock_irqrestore (&ehci->lock, flags); + sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags, + &sitd_dma); + spin_lock_irqsave (&ehci->lock, flags); + if (!sitd) { + iso_sched_free(stream, iso_sched); + spin_unlock_irqrestore(&ehci->lock, flags); + return -ENOMEM; + } + } + + memset (sitd, 0, sizeof *sitd); + sitd->sitd_dma = sitd_dma; + sitd->frame = NO_FRAME; + list_add (&sitd->sitd_list, &iso_sched->td_list); + } + + /* temporarily store schedule info in hcpriv */ + urb->hcpriv = iso_sched; + urb->error_count = 0; + + spin_unlock_irqrestore (&ehci->lock, flags); + return 0; +} + +/*-------------------------------------------------------------------------*/ + +static inline void +sitd_patch( + struct ehci_hcd *ehci, + struct ehci_iso_stream *stream, + struct ehci_sitd *sitd, + struct ehci_iso_sched *iso_sched, + unsigned index +) +{ + struct ehci_iso_packet *uf = &iso_sched->packet [index]; + u64 bufp = uf->bufp; + + sitd->hw_next = EHCI_LIST_END(ehci); + sitd->hw_fullspeed_ep = stream->address; + sitd->hw_uframe = stream->splits; + sitd->hw_results = uf->transaction; + sitd->hw_backpointer = EHCI_LIST_END(ehci); + + bufp = uf->bufp; + sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp); + sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32); + + sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1); + if (uf->cross) + bufp += 4096; + sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32); + sitd->index = index; +} + +static inline void +sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd) +{ + /* note: sitd ordering could matter (CSPLIT then SSPLIT) */ + sitd->sitd_next = ehci->pshadow [frame]; + sitd->hw_next = ehci->periodic [frame]; + ehci->pshadow [frame].sitd = sitd; + sitd->frame = frame; + wmb (); + ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD); +} + +/* fit urb's sitds into the selected schedule slot; activate as needed */ +static void sitd_link_urb( + struct ehci_hcd *ehci, + struct urb *urb, + unsigned mod, + struct ehci_iso_stream *stream +) +{ + int packet; + unsigned next_uframe; + struct ehci_iso_sched *sched = urb->hcpriv; + struct ehci_sitd *sitd; + + next_uframe = stream->next_uframe; + + if (list_empty(&stream->td_list)) + /* usbfs ignores TT bandwidth */ + ehci_to_hcd(ehci)->self.bandwidth_allocated + += stream->bandwidth; + + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_pll_fix == 1) + usb_amd_quirk_pll_disable(); + } + + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; + + /* fill sITDs frame by frame */ + for (packet = sched->first_packet, sitd = NULL; + packet < urb->number_of_packets; + packet++) { + + /* ASSERT: we have all necessary sitds */ + BUG_ON (list_empty (&sched->td_list)); + + /* ASSERT: no itds for this endpoint in this frame */ + + sitd = list_entry (sched->td_list.next, + struct ehci_sitd, sitd_list); + list_move_tail (&sitd->sitd_list, &stream->td_list); + sitd->stream = stream; + sitd->urb = urb; + + sitd_patch(ehci, stream, sitd, sched, packet); + sitd_link(ehci, (next_uframe >> 3) & (ehci->periodic_size - 1), + sitd); + + next_uframe += stream->uperiod; + } + stream->next_uframe = next_uframe & (mod - 1); + + /* don't need that schedule data any more */ + iso_sched_free (stream, sched); + urb->hcpriv = stream; + + ++ehci->isoc_count; + enable_periodic(ehci); +} + +/*-------------------------------------------------------------------------*/ + +#define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \ + | SITD_STS_XACT | SITD_STS_MMF) + +/* Process and recycle a completed SITD. Return true iff its urb completed, + * and hence its completion callback probably added things to the hardware + * schedule. + * + * Note that we carefully avoid recycling this descriptor until after any + * completion callback runs, so that it won't be reused quickly. That is, + * assuming (a) no more than two urbs per frame on this endpoint, and also + * (b) only this endpoint's completions submit URBs. It seems some silicon + * corrupts things if you reuse completed descriptors very quickly... + */ +static bool sitd_complete(struct ehci_hcd *ehci, struct ehci_sitd *sitd) +{ + struct urb *urb = sitd->urb; + struct usb_iso_packet_descriptor *desc; + u32 t; + int urb_index = -1; + struct ehci_iso_stream *stream = sitd->stream; + struct usb_device *dev; + bool retval = false; + + urb_index = sitd->index; + desc = &urb->iso_frame_desc [urb_index]; + t = hc32_to_cpup(ehci, &sitd->hw_results); + + /* report transfer status */ + if (unlikely(t & SITD_ERRS)) { + urb->error_count++; + if (t & SITD_STS_DBE) + desc->status = usb_pipein (urb->pipe) + ? -ENOSR /* hc couldn't read */ + : -ECOMM; /* hc couldn't write */ + else if (t & SITD_STS_BABBLE) + desc->status = -EOVERFLOW; + else /* XACT, MMF, etc */ + desc->status = -EPROTO; + } else if (unlikely(t & SITD_STS_ACTIVE)) { + /* URB was too late */ + urb->error_count++; + } else { + desc->status = 0; + desc->actual_length = desc->length - SITD_LENGTH(t); + urb->actual_length += desc->actual_length; + } + + /* handle completion now? */ + if ((urb_index + 1) != urb->number_of_packets) + goto done; + + /* ASSERT: it's really the last sitd for this urb + list_for_each_entry (sitd, &stream->td_list, sitd_list) + BUG_ON (sitd->urb == urb); + */ + + /* give urb back to the driver; completion often (re)submits */ + dev = urb->dev; + ehci_urb_done(ehci, urb, 0); + retval = true; + urb = NULL; + + --ehci->isoc_count; + disable_periodic(ehci); + + ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; + if (ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs == 0) { + if (ehci->amd_pll_fix == 1) + usb_amd_quirk_pll_enable(); + } + + if (list_is_singular(&stream->td_list)) + ehci_to_hcd(ehci)->self.bandwidth_allocated + -= stream->bandwidth; + +done: + sitd->urb = NULL; + + /* Add to the end of the free list for later reuse */ + list_move_tail(&sitd->sitd_list, &stream->free_list); + + /* Recycle the siTDs when the pipeline is empty (ep no longer in use) */ + if (list_empty(&stream->td_list)) { + list_splice_tail_init(&stream->free_list, + &ehci->cached_sitd_list); + start_free_itds(ehci); + } + + return retval; +} + + +static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, + gfp_t mem_flags) +{ + int status = -EINVAL; + unsigned long flags; + struct ehci_iso_stream *stream; + + /* Get iso_stream head */ + stream = iso_stream_find (ehci, urb); + if (stream == NULL) { + ehci_dbg (ehci, "can't get iso stream\n"); + return -ENOMEM; + } + if (urb->interval != stream->ps.period) { + ehci_dbg (ehci, "can't change iso interval %d --> %d\n", + stream->ps.period, urb->interval); + goto done; + } + +#ifdef EHCI_URB_TRACE + ehci_dbg (ehci, + "submit %p dev%s ep%d%s-iso len %d\n", + urb, urb->dev->devpath, + usb_pipeendpoint (urb->pipe), + usb_pipein (urb->pipe) ? "in" : "out", + urb->transfer_buffer_length); +#endif + + /* allocate SITDs */ + status = sitd_urb_transaction (stream, ehci, urb, mem_flags); + if (status < 0) { + ehci_dbg (ehci, "can't init sitds\n"); + goto done; + } + + /* schedule ... need to lock */ + spin_lock_irqsave (&ehci->lock, flags); + if (unlikely(!HCD_HW_ACCESSIBLE(ehci_to_hcd(ehci)))) { + status = -ESHUTDOWN; + goto done_not_linked; + } + status = usb_hcd_link_urb_to_ep(ehci_to_hcd(ehci), urb); + if (unlikely(status)) + goto done_not_linked; + status = iso_stream_schedule(ehci, urb, stream); + if (likely(status == 0)) { + sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); + } else if (status > 0) { + status = 0; + ehci_urb_done(ehci, urb, 0); + } else { + usb_hcd_unlink_urb_from_ep(ehci_to_hcd(ehci), urb); + } + done_not_linked: + spin_unlock_irqrestore (&ehci->lock, flags); + done: + return status; +} + +/*-------------------------------------------------------------------------*/ + +static void scan_isoc(struct ehci_hcd *ehci) +{ + unsigned uf, now_frame, frame; + unsigned fmask = ehci->periodic_size - 1; + bool modified, live; + + /* + * When running, scan from last scan point up to "now" + * else clean up by scanning everything that's left. + * Touches as few pages as possible: cache-friendly. + */ + if (ehci->rh_state >= EHCI_RH_RUNNING) { + uf = ehci_read_frame_index(ehci); + now_frame = (uf >> 3) & fmask; + live = true; + } else { + now_frame = (ehci->last_iso_frame - 1) & fmask; + live = false; + } + ehci->now_frame = now_frame; + + frame = ehci->last_iso_frame; + for (;;) { + union ehci_shadow q, *q_p; + __hc32 type, *hw_p; + +restart: + /* scan each element in frame's queue for completions */ + q_p = &ehci->pshadow [frame]; + hw_p = &ehci->periodic [frame]; + q.ptr = q_p->ptr; + type = Q_NEXT_TYPE(ehci, *hw_p); + modified = false; + + while (q.ptr != NULL) { + switch (hc32_to_cpu(ehci, type)) { + case Q_TYPE_ITD: + /* If this ITD is still active, leave it for + * later processing ... check the next entry. + * No need to check for activity unless the + * frame is current. + */ + if (frame == now_frame && live) { + rmb(); + for (uf = 0; uf < 8; uf++) { + if (q.itd->hw_transaction[uf] & + ITD_ACTIVE(ehci)) + break; + } + if (uf < 8) { + q_p = &q.itd->itd_next; + hw_p = &q.itd->hw_next; + type = Q_NEXT_TYPE(ehci, + q.itd->hw_next); + q = *q_p; + break; + } + } + + /* Take finished ITDs out of the schedule + * and process them: recycle, maybe report + * URB completion. HC won't cache the + * pointer for much longer, if at all. + */ + *q_p = q.itd->itd_next; + if (!ehci->use_dummy_qh || + q.itd->hw_next != EHCI_LIST_END(ehci)) + *hw_p = q.itd->hw_next; + else + *hw_p = cpu_to_hc32(ehci, + ehci->dummy->qh_dma); + type = Q_NEXT_TYPE(ehci, q.itd->hw_next); + wmb(); + modified = itd_complete (ehci, q.itd); + q = *q_p; + break; + case Q_TYPE_SITD: + /* If this SITD is still active, leave it for + * later processing ... check the next entry. + * No need to check for activity unless the + * frame is current. + */ + if (((frame == now_frame) || + (((frame + 1) & fmask) == now_frame)) + && live + && (q.sitd->hw_results & + SITD_ACTIVE(ehci))) { + + q_p = &q.sitd->sitd_next; + hw_p = &q.sitd->hw_next; + type = Q_NEXT_TYPE(ehci, + q.sitd->hw_next); + q = *q_p; + break; + } + + /* Take finished SITDs out of the schedule + * and process them: recycle, maybe report + * URB completion. + */ + *q_p = q.sitd->sitd_next; + if (!ehci->use_dummy_qh || + q.sitd->hw_next != EHCI_LIST_END(ehci)) + *hw_p = q.sitd->hw_next; + else + *hw_p = cpu_to_hc32(ehci, + ehci->dummy->qh_dma); + type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); + wmb(); + modified = sitd_complete (ehci, q.sitd); + q = *q_p; + break; + default: + ehci_dbg(ehci, "corrupt type %d frame %d shadow %p\n", + type, frame, q.ptr); + // BUG (); + /* FALL THROUGH */ + case Q_TYPE_QH: + case Q_TYPE_FSTN: + /* End of the iTDs and siTDs */ + q.ptr = NULL; + break; + } + + /* assume completion callbacks modify the queue */ + if (unlikely(modified && ehci->isoc_count > 0)) + goto restart; + } + + /* Stop when we have reached the current frame */ + if (frame == now_frame) + break; + + /* The last frame may still have active siTDs */ + ehci->last_iso_frame = frame; + frame = (frame + 1) & fmask; + } +} diff --git a/addons/ehci-pci/src/4.4.180/ehci-sysfs.c b/addons/ehci-pci/src/4.4.180/ehci-sysfs.c new file mode 100644 index 00000000..5216f2b0 --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/ehci-sysfs.c @@ -0,0 +1,187 @@ +/* + * Copyright (C) 2007 by Alan Stern + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +/* this file is part of ehci-hcd.c */ + + +/* Display the ports dedicated to the companion controller */ +static ssize_t show_companion(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ehci_hcd *ehci; + int nports, index, n; + int count = PAGE_SIZE; + char *ptr = buf; + + ehci = hcd_to_ehci(dev_get_drvdata(dev)); + nports = HCS_N_PORTS(ehci->hcs_params); + + for (index = 0; index < nports; ++index) { + if (test_bit(index, &ehci->companion_ports)) { + n = scnprintf(ptr, count, "%d\n", index + 1); + ptr += n; + count -= n; + } + } + return ptr - buf; +} + +/* + * Dedicate or undedicate a port to the companion controller. + * Syntax is "[-]portnum", where a leading '-' sign means + * return control of the port to the EHCI controller. + */ +static ssize_t store_companion(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ehci_hcd *ehci; + int portnum, new_owner; + + ehci = hcd_to_ehci(dev_get_drvdata(dev)); + new_owner = PORT_OWNER; /* Owned by companion */ + if (sscanf(buf, "%d", &portnum) != 1) + return -EINVAL; + if (portnum < 0) { + portnum = - portnum; + new_owner = 0; /* Owned by EHCI */ + } + if (portnum <= 0 || portnum > HCS_N_PORTS(ehci->hcs_params)) + return -ENOENT; + portnum--; + if (new_owner) + set_bit(portnum, &ehci->companion_ports); + else + clear_bit(portnum, &ehci->companion_ports); + set_owner(ehci, portnum, new_owner); + return count; +} +static DEVICE_ATTR(companion, 0644, show_companion, store_companion); + + +/* + * Display / Set uframe_periodic_max + */ +static ssize_t show_uframe_periodic_max(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct ehci_hcd *ehci; + int n; + + ehci = hcd_to_ehci(dev_get_drvdata(dev)); + n = scnprintf(buf, PAGE_SIZE, "%d\n", ehci->uframe_periodic_max); + return n; +} + + +static ssize_t store_uframe_periodic_max(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ehci_hcd *ehci; + unsigned uframe_periodic_max; + unsigned uframe; + unsigned long flags; + ssize_t ret; + + ehci = hcd_to_ehci(dev_get_drvdata(dev)); + if (kstrtouint(buf, 0, &uframe_periodic_max) < 0) + return -EINVAL; + + if (uframe_periodic_max < 100 || uframe_periodic_max >= 125) { + ehci_info(ehci, "rejecting invalid request for " + "uframe_periodic_max=%u\n", uframe_periodic_max); + return -EINVAL; + } + + ret = -EINVAL; + + /* + * lock, so that our checking does not race with possible periodic + * bandwidth allocation through submitting new urbs. + */ + spin_lock_irqsave (&ehci->lock, flags); + + /* + * for request to decrease max periodic bandwidth, we have to check + * to see whether the decrease is possible. + */ + if (uframe_periodic_max < ehci->uframe_periodic_max) { + u8 allocated_max = 0; + + for (uframe = 0; uframe < EHCI_BANDWIDTH_SIZE; ++uframe) + allocated_max = max(allocated_max, + ehci->bandwidth[uframe]); + + if (allocated_max > uframe_periodic_max) { + ehci_info(ehci, + "cannot decrease uframe_periodic_max because " + "periodic bandwidth is already allocated " + "(%u > %u)\n", + allocated_max, uframe_periodic_max); + goto out_unlock; + } + } + + /* increasing is always ok */ + + ehci_info(ehci, "setting max periodic bandwidth to %u%% " + "(== %u usec/uframe)\n", + 100*uframe_periodic_max/125, uframe_periodic_max); + + if (uframe_periodic_max != 100) + ehci_warn(ehci, "max periodic bandwidth set is non-standard\n"); + + ehci->uframe_periodic_max = uframe_periodic_max; + ret = count; + +out_unlock: + spin_unlock_irqrestore (&ehci->lock, flags); + return ret; +} +static DEVICE_ATTR(uframe_periodic_max, 0644, show_uframe_periodic_max, store_uframe_periodic_max); + + +static inline int create_sysfs_files(struct ehci_hcd *ehci) +{ + struct device *controller = ehci_to_hcd(ehci)->self.controller; + int i = 0; + + /* with integrated TT there is no companion! */ + if (!ehci_is_TDI(ehci)) + i = device_create_file(controller, &dev_attr_companion); + if (i) + goto out; + + i = device_create_file(controller, &dev_attr_uframe_periodic_max); +out: + return i; +} + +static inline void remove_sysfs_files(struct ehci_hcd *ehci) +{ + struct device *controller = ehci_to_hcd(ehci)->self.controller; + + /* with integrated TT there is no companion! */ + if (!ehci_is_TDI(ehci)) + device_remove_file(controller, &dev_attr_companion); + + device_remove_file(controller, &dev_attr_uframe_periodic_max); +} diff --git a/addons/ehci-pci/src/4.4.180/ehci-timer.c b/addons/ehci-pci/src/4.4.180/ehci-timer.c new file mode 100644 index 00000000..424ac5d8 --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/ehci-timer.c @@ -0,0 +1,433 @@ +/* + * Copyright (C) 2012 by Alan Stern + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + */ + +/* This file is part of ehci-hcd.c */ + +/*-------------------------------------------------------------------------*/ + +/* Set a bit in the USBCMD register */ +static void ehci_set_command_bit(struct ehci_hcd *ehci, u32 bit) +{ + ehci->command |= bit; + ehci_writel(ehci, ehci->command, &ehci->regs->command); + + /* unblock posted write */ + ehci_readl(ehci, &ehci->regs->command); +} + +/* Clear a bit in the USBCMD register */ +static void ehci_clear_command_bit(struct ehci_hcd *ehci, u32 bit) +{ + ehci->command &= ~bit; + ehci_writel(ehci, ehci->command, &ehci->regs->command); + + /* unblock posted write */ + ehci_readl(ehci, &ehci->regs->command); +} + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI timer support... Now using hrtimers. + * + * Lots of different events are triggered from ehci->hrtimer. Whenever + * the timer routine runs, it checks each possible event; events that are + * currently enabled and whose expiration time has passed get handled. + * The set of enabled events is stored as a collection of bitflags in + * ehci->enabled_hrtimer_events, and they are numbered in order of + * increasing delay values (ranging between 1 ms and 100 ms). + * + * Rather than implementing a sorted list or tree of all pending events, + * we keep track only of the lowest-numbered pending event, in + * ehci->next_hrtimer_event. Whenever ehci->hrtimer gets restarted, its + * expiration time is set to the timeout value for this event. + * + * As a result, events might not get handled right away; the actual delay + * could be anywhere up to twice the requested delay. This doesn't + * matter, because none of the events are especially time-critical. The + * ones that matter most all have a delay of 1 ms, so they will be + * handled after 2 ms at most, which is okay. In addition to this, we + * allow for an expiration range of 1 ms. + */ + +/* + * Delay lengths for the hrtimer event types. + * Keep this list sorted by delay length, in the same order as + * the event types indexed by enum ehci_hrtimer_event in ehci.h. + */ +static unsigned event_delays_ns[] = { + 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_ASS */ + 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_PSS */ + 1 * NSEC_PER_MSEC, /* EHCI_HRTIMER_POLL_DEAD */ + 1125 * NSEC_PER_USEC, /* EHCI_HRTIMER_UNLINK_INTR */ + 2 * NSEC_PER_MSEC, /* EHCI_HRTIMER_FREE_ITDS */ + 5 * NSEC_PER_MSEC, /* EHCI_HRTIMER_START_UNLINK_INTR */ + 6 * NSEC_PER_MSEC, /* EHCI_HRTIMER_ASYNC_UNLINKS */ + 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IAA_WATCHDOG */ + 10 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_PERIODIC */ + 15 * NSEC_PER_MSEC, /* EHCI_HRTIMER_DISABLE_ASYNC */ + 100 * NSEC_PER_MSEC, /* EHCI_HRTIMER_IO_WATCHDOG */ +}; + +/* Enable a pending hrtimer event */ +static void ehci_enable_event(struct ehci_hcd *ehci, unsigned event, + bool resched) +{ + ktime_t *timeout = &ehci->hr_timeouts[event]; + + if (resched) + *timeout = ktime_add(ktime_get(), + ktime_set(0, event_delays_ns[event])); + ehci->enabled_hrtimer_events |= (1 << event); + + /* Track only the lowest-numbered pending event */ + if (event < ehci->next_hrtimer_event) { + ehci->next_hrtimer_event = event; + hrtimer_start_range_ns(&ehci->hrtimer, *timeout, + NSEC_PER_MSEC, HRTIMER_MODE_ABS); + } +} + + +/* Poll the STS_ASS status bit; see when it agrees with CMD_ASE */ +static void ehci_poll_ASS(struct ehci_hcd *ehci) +{ + unsigned actual, want; + + /* Don't enable anything if the controller isn't running (e.g., died) */ + if (ehci->rh_state != EHCI_RH_RUNNING) + return; + + want = (ehci->command & CMD_ASE) ? STS_ASS : 0; + actual = ehci_readl(ehci, &ehci->regs->status) & STS_ASS; + + if (want != actual) { + + /* Poll again later, but give up after about 2-4 ms */ + if (ehci->ASS_poll_count++ < 2) { + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_ASS, true); + return; + } + ehci_dbg(ehci, "Waited too long for the async schedule status (%x/%x), giving up\n", + want, actual); + } + ehci->ASS_poll_count = 0; + + /* The status is up-to-date; restart or stop the schedule as needed */ + if (want == 0) { /* Stopped */ + if (ehci->async_count > 0) + ehci_set_command_bit(ehci, CMD_ASE); + + } else { /* Running */ + if (ehci->async_count == 0) { + + /* Turn off the schedule after a while */ + ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_ASYNC, + true); + } + } +} + +/* Turn off the async schedule after a brief delay */ +static void ehci_disable_ASE(struct ehci_hcd *ehci) +{ + ehci_clear_command_bit(ehci, CMD_ASE); +} + + +/* Poll the STS_PSS status bit; see when it agrees with CMD_PSE */ +static void ehci_poll_PSS(struct ehci_hcd *ehci) +{ + unsigned actual, want; + + /* Don't do anything if the controller isn't running (e.g., died) */ + if (ehci->rh_state != EHCI_RH_RUNNING) + return; + + want = (ehci->command & CMD_PSE) ? STS_PSS : 0; + actual = ehci_readl(ehci, &ehci->regs->status) & STS_PSS; + + if (want != actual) { + + /* Poll again later, but give up after about 2-4 ms */ + if (ehci->PSS_poll_count++ < 2) { + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_PSS, true); + return; + } + ehci_dbg(ehci, "Waited too long for the periodic schedule status (%x/%x), giving up\n", + want, actual); + } + ehci->PSS_poll_count = 0; + + /* The status is up-to-date; restart or stop the schedule as needed */ + if (want == 0) { /* Stopped */ + if (ehci->periodic_count > 0) + ehci_set_command_bit(ehci, CMD_PSE); + + } else { /* Running */ + if (ehci->periodic_count == 0) { + + /* Turn off the schedule after a while */ + ehci_enable_event(ehci, EHCI_HRTIMER_DISABLE_PERIODIC, + true); + } + } +} + +/* Turn off the periodic schedule after a brief delay */ +static void ehci_disable_PSE(struct ehci_hcd *ehci) +{ + ehci_clear_command_bit(ehci, CMD_PSE); +} + + +/* Poll the STS_HALT status bit; see when a dead controller stops */ +static void ehci_handle_controller_death(struct ehci_hcd *ehci) +{ + if (!(ehci_readl(ehci, &ehci->regs->status) & STS_HALT)) { + + /* Give up after a few milliseconds */ + if (ehci->died_poll_count++ < 5) { + /* Try again later */ + ehci_enable_event(ehci, EHCI_HRTIMER_POLL_DEAD, true); + return; + } + ehci_warn(ehci, "Waited too long for the controller to stop, giving up\n"); + } + + /* Clean up the mess */ + ehci->rh_state = EHCI_RH_HALTED; + ehci_writel(ehci, 0, &ehci->regs->configured_flag); + ehci_writel(ehci, 0, &ehci->regs->intr_enable); + ehci_work(ehci); + end_unlink_async(ehci); + + /* Not in process context, so don't try to reset the controller */ +} + +/* start to unlink interrupt QHs */ +static void ehci_handle_start_intr_unlinks(struct ehci_hcd *ehci) +{ + bool stopped = (ehci->rh_state < EHCI_RH_RUNNING); + + /* + * Process all the QHs on the intr_unlink list that were added + * before the current unlink cycle began. The list is in + * temporal order, so stop when we reach the first entry in the + * current cycle. But if the root hub isn't running then + * process all the QHs on the list. + */ + while (!list_empty(&ehci->intr_unlink_wait)) { + struct ehci_qh *qh; + + qh = list_first_entry(&ehci->intr_unlink_wait, + struct ehci_qh, unlink_node); + if (!stopped && (qh->unlink_cycle == + ehci->intr_unlink_wait_cycle)) + break; + list_del_init(&qh->unlink_node); + start_unlink_intr(ehci, qh); + } + + /* Handle remaining entries later */ + if (!list_empty(&ehci->intr_unlink_wait)) { + ehci_enable_event(ehci, EHCI_HRTIMER_START_UNLINK_INTR, true); + ++ehci->intr_unlink_wait_cycle; + } +} + +/* Handle unlinked interrupt QHs once they are gone from the hardware */ +static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci) +{ + bool stopped = (ehci->rh_state < EHCI_RH_RUNNING); + + /* + * Process all the QHs on the intr_unlink list that were added + * before the current unlink cycle began. The list is in + * temporal order, so stop when we reach the first entry in the + * current cycle. But if the root hub isn't running then + * process all the QHs on the list. + */ + ehci->intr_unlinking = true; + while (!list_empty(&ehci->intr_unlink)) { + struct ehci_qh *qh; + + qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh, + unlink_node); + if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle) + break; + list_del_init(&qh->unlink_node); + end_unlink_intr(ehci, qh); + } + + /* Handle remaining entries later */ + if (!list_empty(&ehci->intr_unlink)) { + ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true); + ++ehci->intr_unlink_cycle; + } + ehci->intr_unlinking = false; +} + + +/* Start another free-iTDs/siTDs cycle */ +static void start_free_itds(struct ehci_hcd *ehci) +{ + if (!(ehci->enabled_hrtimer_events & BIT(EHCI_HRTIMER_FREE_ITDS))) { + ehci->last_itd_to_free = list_entry( + ehci->cached_itd_list.prev, + struct ehci_itd, itd_list); + ehci->last_sitd_to_free = list_entry( + ehci->cached_sitd_list.prev, + struct ehci_sitd, sitd_list); + ehci_enable_event(ehci, EHCI_HRTIMER_FREE_ITDS, true); + } +} + +/* Wait for controller to stop using old iTDs and siTDs */ +static void end_free_itds(struct ehci_hcd *ehci) +{ + struct ehci_itd *itd, *n; + struct ehci_sitd *sitd, *sn; + + if (ehci->rh_state < EHCI_RH_RUNNING) { + ehci->last_itd_to_free = NULL; + ehci->last_sitd_to_free = NULL; + } + + list_for_each_entry_safe(itd, n, &ehci->cached_itd_list, itd_list) { + list_del(&itd->itd_list); + dma_pool_free(ehci->itd_pool, itd, itd->itd_dma); + if (itd == ehci->last_itd_to_free) + break; + } + list_for_each_entry_safe(sitd, sn, &ehci->cached_sitd_list, sitd_list) { + list_del(&sitd->sitd_list); + dma_pool_free(ehci->sitd_pool, sitd, sitd->sitd_dma); + if (sitd == ehci->last_sitd_to_free) + break; + } + + if (!list_empty(&ehci->cached_itd_list) || + !list_empty(&ehci->cached_sitd_list)) + start_free_itds(ehci); +} + + +/* Handle lost (or very late) IAA interrupts */ +static void ehci_iaa_watchdog(struct ehci_hcd *ehci) +{ + u32 cmd, status; + + /* + * Lost IAA irqs wedge things badly; seen first with a vt8235. + * So we need this watchdog, but must protect it against both + * (a) SMP races against real IAA firing and retriggering, and + * (b) clean HC shutdown, when IAA watchdog was pending. + */ + if (!ehci->iaa_in_progress || ehci->rh_state != EHCI_RH_RUNNING) + return; + + /* If we get here, IAA is *REALLY* late. It's barely + * conceivable that the system is so busy that CMD_IAAD + * is still legitimately set, so let's be sure it's + * clear before we read STS_IAA. (The HC should clear + * CMD_IAAD when it sets STS_IAA.) + */ + cmd = ehci_readl(ehci, &ehci->regs->command); + + /* + * If IAA is set here it either legitimately triggered + * after the watchdog timer expired (_way_ late, so we'll + * still count it as lost) ... or a silicon erratum: + * - VIA seems to set IAA without triggering the IRQ; + * - IAAD potentially cleared without setting IAA. + */ + status = ehci_readl(ehci, &ehci->regs->status); + if ((status & STS_IAA) || !(cmd & CMD_IAAD)) { + COUNT(ehci->stats.lost_iaa); + ehci_writel(ehci, STS_IAA, &ehci->regs->status); + } + + ehci_dbg(ehci, "IAA watchdog: status %x cmd %x\n", status, cmd); + end_unlink_async(ehci); +} + + +/* Enable the I/O watchdog, if appropriate */ +static void turn_on_io_watchdog(struct ehci_hcd *ehci) +{ + /* Not needed if the controller isn't running or it's already enabled */ + if (ehci->rh_state != EHCI_RH_RUNNING || + (ehci->enabled_hrtimer_events & + BIT(EHCI_HRTIMER_IO_WATCHDOG))) + return; + + /* + * Isochronous transfers always need the watchdog. + * For other sorts we use it only if the flag is set. + */ + if (ehci->isoc_count > 0 || (ehci->need_io_watchdog && + ehci->async_count + ehci->intr_count > 0)) + ehci_enable_event(ehci, EHCI_HRTIMER_IO_WATCHDOG, true); +} + + +/* + * Handler functions for the hrtimer event types. + * Keep this array in the same order as the event types indexed by + * enum ehci_hrtimer_event in ehci.h. + */ +static void (*event_handlers[])(struct ehci_hcd *) = { + ehci_poll_ASS, /* EHCI_HRTIMER_POLL_ASS */ + ehci_poll_PSS, /* EHCI_HRTIMER_POLL_PSS */ + ehci_handle_controller_death, /* EHCI_HRTIMER_POLL_DEAD */ + ehci_handle_intr_unlinks, /* EHCI_HRTIMER_UNLINK_INTR */ + end_free_itds, /* EHCI_HRTIMER_FREE_ITDS */ + ehci_handle_start_intr_unlinks, /* EHCI_HRTIMER_START_UNLINK_INTR */ + unlink_empty_async, /* EHCI_HRTIMER_ASYNC_UNLINKS */ + ehci_iaa_watchdog, /* EHCI_HRTIMER_IAA_WATCHDOG */ + ehci_disable_PSE, /* EHCI_HRTIMER_DISABLE_PERIODIC */ + ehci_disable_ASE, /* EHCI_HRTIMER_DISABLE_ASYNC */ + ehci_work, /* EHCI_HRTIMER_IO_WATCHDOG */ +}; + +static enum hrtimer_restart ehci_hrtimer_func(struct hrtimer *t) +{ + struct ehci_hcd *ehci = container_of(t, struct ehci_hcd, hrtimer); + ktime_t now; + unsigned long events; + unsigned long flags; + unsigned e; + + spin_lock_irqsave(&ehci->lock, flags); + + events = ehci->enabled_hrtimer_events; + ehci->enabled_hrtimer_events = 0; + ehci->next_hrtimer_event = EHCI_HRTIMER_NO_EVENT; + + /* + * Check each pending event. If its time has expired, handle + * the event; otherwise re-enable it. + */ + now = ktime_get(); + for_each_set_bit(e, &events, EHCI_HRTIMER_NUM_EVENTS) { + if (now.tv64 >= ehci->hr_timeouts[e].tv64) + event_handlers[e](ehci); + else + ehci_enable_event(ehci, e, false); + } + + spin_unlock_irqrestore(&ehci->lock, flags); + return HRTIMER_NORESTART; +} diff --git a/addons/ehci-pci/src/4.4.180/ehci.h b/addons/ehci-pci/src/4.4.180/ehci.h new file mode 100644 index 00000000..46f62e41 --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/ehci.h @@ -0,0 +1,895 @@ +/* + * Copyright (c) 2001-2002 by David Brownell + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __LINUX_EHCI_HCD_H +#define __LINUX_EHCI_HCD_H + +/* definitions used for the EHCI driver */ + +/* + * __hc32 and __hc16 are "Host Controller" types, they may be equivalent to + * __leXX (normally) or __beXX (given EHCI_BIG_ENDIAN_DESC), depending on + * the host controller implementation. + * + * To facilitate the strongest possible byte-order checking from "sparse" + * and so on, we use __leXX unless that's not practical. + */ +#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_DESC +typedef __u32 __bitwise __hc32; +typedef __u16 __bitwise __hc16; +#else +#define __hc32 __le32 +#define __hc16 __le16 +#endif + +/* statistics can be kept for tuning/monitoring */ +#ifdef CONFIG_DYNAMIC_DEBUG +#define EHCI_STATS +#endif + +struct ehci_stats { + /* irq usage */ + unsigned long normal; + unsigned long error; + unsigned long iaa; + unsigned long lost_iaa; + + /* termination of urbs from core */ + unsigned long complete; + unsigned long unlink; +}; + +/* + * Scheduling and budgeting information for periodic transfers, for both + * high-speed devices and full/low-speed devices lying behind a TT. + */ +struct ehci_per_sched { + struct usb_device *udev; /* access to the TT */ + struct usb_host_endpoint *ep; + struct list_head ps_list; /* node on ehci_tt's ps_list */ + u16 tt_usecs; /* time on the FS/LS bus */ + u16 cs_mask; /* C-mask and S-mask bytes */ + u16 period; /* actual period in frames */ + u16 phase; /* actual phase, frame part */ + u8 bw_phase; /* same, for bandwidth + reservation */ + u8 phase_uf; /* uframe part of the phase */ + u8 usecs, c_usecs; /* times on the HS bus */ + u8 bw_uperiod; /* period in microframes, for + bandwidth reservation */ + u8 bw_period; /* same, in frames */ +}; +#define NO_FRAME 29999 /* frame not assigned yet */ + +/* ehci_hcd->lock guards shared data against other CPUs: + * ehci_hcd: async, unlink, periodic (and shadow), ... + * usb_host_endpoint: hcpriv + * ehci_qh: qh_next, qtd_list + * ehci_qtd: qtd_list + * + * Also, hold this lock when talking to HC registers or + * when updating hw_* fields in shared qh/qtd/... structures. + */ + +#define EHCI_MAX_ROOT_PORTS 15 /* see HCS_N_PORTS */ + +/* + * ehci_rh_state values of EHCI_RH_RUNNING or above mean that the + * controller may be doing DMA. Lower values mean there's no DMA. + */ +enum ehci_rh_state { + EHCI_RH_HALTED, + EHCI_RH_SUSPENDED, + EHCI_RH_RUNNING, + EHCI_RH_STOPPING +}; + +/* + * Timer events, ordered by increasing delay length. + * Always update event_delays_ns[] and event_handlers[] (defined in + * ehci-timer.c) in parallel with this list. + */ +enum ehci_hrtimer_event { + EHCI_HRTIMER_POLL_ASS, /* Poll for async schedule off */ + EHCI_HRTIMER_POLL_PSS, /* Poll for periodic schedule off */ + EHCI_HRTIMER_POLL_DEAD, /* Wait for dead controller to stop */ + EHCI_HRTIMER_UNLINK_INTR, /* Wait for interrupt QH unlink */ + EHCI_HRTIMER_FREE_ITDS, /* Wait for unused iTDs and siTDs */ + EHCI_HRTIMER_START_UNLINK_INTR, /* Unlink empty interrupt QHs */ + EHCI_HRTIMER_ASYNC_UNLINKS, /* Unlink empty async QHs */ + EHCI_HRTIMER_IAA_WATCHDOG, /* Handle lost IAA interrupts */ + EHCI_HRTIMER_DISABLE_PERIODIC, /* Wait to disable periodic sched */ + EHCI_HRTIMER_DISABLE_ASYNC, /* Wait to disable async sched */ + EHCI_HRTIMER_IO_WATCHDOG, /* Check for missing IRQs */ + EHCI_HRTIMER_NUM_EVENTS /* Must come last */ +}; +#define EHCI_HRTIMER_NO_EVENT 99 + +struct ehci_hcd { /* one per controller */ + /* timing support */ + enum ehci_hrtimer_event next_hrtimer_event; + unsigned enabled_hrtimer_events; + ktime_t hr_timeouts[EHCI_HRTIMER_NUM_EVENTS]; + struct hrtimer hrtimer; + + int PSS_poll_count; + int ASS_poll_count; + int died_poll_count; + + /* glue to PCI and HCD framework */ + struct ehci_caps __iomem *caps; + struct ehci_regs __iomem *regs; + struct ehci_dbg_port __iomem *debug; + + __u32 hcs_params; /* cached register copy */ + spinlock_t lock; + enum ehci_rh_state rh_state; + + /* general schedule support */ + bool scanning:1; + bool need_rescan:1; + bool intr_unlinking:1; + bool iaa_in_progress:1; + bool async_unlinking:1; + bool shutdown:1; + struct ehci_qh *qh_scan_next; + + /* async schedule support */ + struct ehci_qh *async; + struct ehci_qh *dummy; /* For AMD quirk use */ + struct list_head async_unlink; + struct list_head async_idle; + unsigned async_unlink_cycle; + unsigned async_count; /* async activity count */ + + /* periodic schedule support */ +#define DEFAULT_I_TDPS 1024 /* some HCs can do less */ + unsigned periodic_size; + __hc32 *periodic; /* hw periodic table */ + dma_addr_t periodic_dma; + struct list_head intr_qh_list; + unsigned i_thresh; /* uframes HC might cache */ + + union ehci_shadow *pshadow; /* mirror hw periodic table */ + struct list_head intr_unlink_wait; + struct list_head intr_unlink; + unsigned intr_unlink_wait_cycle; + unsigned intr_unlink_cycle; + unsigned now_frame; /* frame from HC hardware */ + unsigned last_iso_frame; /* last frame scanned for iso */ + unsigned intr_count; /* intr activity count */ + unsigned isoc_count; /* isoc activity count */ + unsigned periodic_count; /* periodic activity count */ + unsigned uframe_periodic_max; /* max periodic time per uframe */ + + + /* list of itds & sitds completed while now_frame was still active */ + struct list_head cached_itd_list; + struct ehci_itd *last_itd_to_free; + struct list_head cached_sitd_list; + struct ehci_sitd *last_sitd_to_free; + + /* per root hub port */ + unsigned long reset_done [EHCI_MAX_ROOT_PORTS]; + + /* bit vectors (one bit per port) */ + unsigned long bus_suspended; /* which ports were + already suspended at the start of a bus suspend */ + unsigned long companion_ports; /* which ports are + dedicated to the companion controller */ + unsigned long owned_ports; /* which ports are + owned by the companion during a bus suspend */ + unsigned long port_c_suspend; /* which ports have + the change-suspend feature turned on */ + unsigned long suspended_ports; /* which ports are + suspended */ + unsigned long resuming_ports; /* which ports have + started to resume */ + + /* per-HC memory pools (could be per-bus, but ...) */ + struct dma_pool *qh_pool; /* qh per active urb */ + struct dma_pool *qtd_pool; /* one or more per qh */ + struct dma_pool *itd_pool; /* itd per iso urb */ + struct dma_pool *sitd_pool; /* sitd per split iso urb */ + + unsigned random_frame; + unsigned long next_statechange; + ktime_t last_periodic_enable; + u32 command; + + /* SILICON QUIRKS */ + unsigned no_selective_suspend:1; + unsigned has_fsl_port_bug:1; /* FreeScale */ + unsigned has_fsl_hs_errata:1; /* Freescale HS quirk */ + unsigned big_endian_mmio:1; + unsigned big_endian_desc:1; + unsigned big_endian_capbase:1; + unsigned has_amcc_usb23:1; + unsigned need_io_watchdog:1; + unsigned amd_pll_fix:1; + unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/ + unsigned has_synopsys_hc_bug:1; /* Synopsys HC */ + unsigned frame_index_bug:1; /* MosChip (AKA NetMos) */ + unsigned need_oc_pp_cycle:1; /* MPC834X port power */ + unsigned imx28_write_fix:1; /* For Freescale i.MX28 */ + + /* required for usb32 quirk */ + #define OHCI_CTRL_HCFS (3 << 6) + #define OHCI_USB_OPER (2 << 6) + #define OHCI_USB_SUSPEND (3 << 6) + + #define OHCI_HCCTRL_OFFSET 0x4 + #define OHCI_HCCTRL_LEN 0x4 + __hc32 *ohci_hcctrl_reg; + unsigned has_hostpc:1; + unsigned has_tdi_phy_lpm:1; + unsigned has_ppcd:1; /* support per-port change bits */ + u8 sbrn; /* packed release number */ + + /* irq statistics */ +#ifdef EHCI_STATS + struct ehci_stats stats; +# define COUNT(x) do { (x)++; } while (0) +#else +# define COUNT(x) do {} while (0) +#endif + + /* debug files */ +#ifdef CONFIG_DYNAMIC_DEBUG + struct dentry *debug_dir; +#endif + + /* bandwidth usage */ +#define EHCI_BANDWIDTH_SIZE 64 +#define EHCI_BANDWIDTH_FRAMES (EHCI_BANDWIDTH_SIZE >> 3) + u8 bandwidth[EHCI_BANDWIDTH_SIZE]; + /* us allocated per uframe */ + u8 tt_budget[EHCI_BANDWIDTH_SIZE]; + /* us budgeted per uframe */ + struct list_head tt_list; + + /* platform-specific data -- must come last */ + unsigned long priv[0] __aligned(sizeof(s64)); +}; + +/* convert between an HCD pointer and the corresponding EHCI_HCD */ +static inline struct ehci_hcd *hcd_to_ehci (struct usb_hcd *hcd) +{ + return (struct ehci_hcd *) (hcd->hcd_priv); +} +static inline struct usb_hcd *ehci_to_hcd (struct ehci_hcd *ehci) +{ + return container_of ((void *) ehci, struct usb_hcd, hcd_priv); +} + +/*-------------------------------------------------------------------------*/ + +#include + +/*-------------------------------------------------------------------------*/ + +#define QTD_NEXT(ehci, dma) cpu_to_hc32(ehci, (u32)dma) + +/* + * EHCI Specification 0.95 Section 3.5 + * QTD: describe data transfer components (buffer, direction, ...) + * See Fig 3-6 "Queue Element Transfer Descriptor Block Diagram". + * + * These are associated only with "QH" (Queue Head) structures, + * used with control, bulk, and interrupt transfers. + */ +struct ehci_qtd { + /* first part defined by EHCI spec */ + __hc32 hw_next; /* see EHCI 3.5.1 */ + __hc32 hw_alt_next; /* see EHCI 3.5.2 */ + __hc32 hw_token; /* see EHCI 3.5.3 */ +#define QTD_TOGGLE (1 << 31) /* data toggle */ +#define QTD_LENGTH(tok) (((tok)>>16) & 0x7fff) +#define QTD_IOC (1 << 15) /* interrupt on complete */ +#define QTD_CERR(tok) (((tok)>>10) & 0x3) +#define QTD_PID(tok) (((tok)>>8) & 0x3) +#define QTD_STS_ACTIVE (1 << 7) /* HC may execute this */ +#define QTD_STS_HALT (1 << 6) /* halted on error */ +#define QTD_STS_DBE (1 << 5) /* data buffer error (in HC) */ +#define QTD_STS_BABBLE (1 << 4) /* device was babbling (qtd halted) */ +#define QTD_STS_XACT (1 << 3) /* device gave illegal response */ +#define QTD_STS_MMF (1 << 2) /* incomplete split transaction */ +#define QTD_STS_STS (1 << 1) /* split transaction state */ +#define QTD_STS_PING (1 << 0) /* issue PING? */ + +#define ACTIVE_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_ACTIVE) +#define HALT_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_HALT) +#define STATUS_BIT(ehci) cpu_to_hc32(ehci, QTD_STS_STS) + + __hc32 hw_buf [5]; /* see EHCI 3.5.4 */ + __hc32 hw_buf_hi [5]; /* Appendix B */ + + /* the rest is HCD-private */ + dma_addr_t qtd_dma; /* qtd address */ + struct list_head qtd_list; /* sw qtd list */ + struct urb *urb; /* qtd's urb */ + size_t length; /* length of buffer */ +} __attribute__ ((aligned (32))); + +/* mask NakCnt+T in qh->hw_alt_next */ +#define QTD_MASK(ehci) cpu_to_hc32 (ehci, ~0x1f) + +#define IS_SHORT_READ(token) (QTD_LENGTH (token) != 0 && QTD_PID (token) == 1) + +/*-------------------------------------------------------------------------*/ + +/* type tag from {qh,itd,sitd,fstn}->hw_next */ +#define Q_NEXT_TYPE(ehci,dma) ((dma) & cpu_to_hc32(ehci, 3 << 1)) + +/* + * Now the following defines are not converted using the + * cpu_to_le32() macro anymore, since we have to support + * "dynamic" switching between be and le support, so that the driver + * can be used on one system with SoC EHCI controller using big-endian + * descriptors as well as a normal little-endian PCI EHCI controller. + */ +/* values for that type tag */ +#define Q_TYPE_ITD (0 << 1) +#define Q_TYPE_QH (1 << 1) +#define Q_TYPE_SITD (2 << 1) +#define Q_TYPE_FSTN (3 << 1) + +/* next async queue entry, or pointer to interrupt/periodic QH */ +#define QH_NEXT(ehci,dma) (cpu_to_hc32(ehci, (((u32)dma)&~0x01f)|Q_TYPE_QH)) + +/* for periodic/async schedules and qtd lists, mark end of list */ +#define EHCI_LIST_END(ehci) cpu_to_hc32(ehci, 1) /* "null pointer" to hw */ + +/* + * Entries in periodic shadow table are pointers to one of four kinds + * of data structure. That's dictated by the hardware; a type tag is + * encoded in the low bits of the hardware's periodic schedule. Use + * Q_NEXT_TYPE to get the tag. + * + * For entries in the async schedule, the type tag always says "qh". + */ +union ehci_shadow { + struct ehci_qh *qh; /* Q_TYPE_QH */ + struct ehci_itd *itd; /* Q_TYPE_ITD */ + struct ehci_sitd *sitd; /* Q_TYPE_SITD */ + struct ehci_fstn *fstn; /* Q_TYPE_FSTN */ + __hc32 *hw_next; /* (all types) */ + void *ptr; +}; + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI Specification 0.95 Section 3.6 + * QH: describes control/bulk/interrupt endpoints + * See Fig 3-7 "Queue Head Structure Layout". + * + * These appear in both the async and (for interrupt) periodic schedules. + */ + +/* first part defined by EHCI spec */ +struct ehci_qh_hw { + __hc32 hw_next; /* see EHCI 3.6.1 */ + __hc32 hw_info1; /* see EHCI 3.6.2 */ +#define QH_CONTROL_EP (1 << 27) /* FS/LS control endpoint */ +#define QH_HEAD (1 << 15) /* Head of async reclamation list */ +#define QH_TOGGLE_CTL (1 << 14) /* Data toggle control */ +#define QH_HIGH_SPEED (2 << 12) /* Endpoint speed */ +#define QH_LOW_SPEED (1 << 12) +#define QH_FULL_SPEED (0 << 12) +#define QH_INACTIVATE (1 << 7) /* Inactivate on next transaction */ + __hc32 hw_info2; /* see EHCI 3.6.2 */ +#define QH_SMASK 0x000000ff +#define QH_CMASK 0x0000ff00 +#define QH_HUBADDR 0x007f0000 +#define QH_HUBPORT 0x3f800000 +#define QH_MULT 0xc0000000 + __hc32 hw_current; /* qtd list - see EHCI 3.6.4 */ + + /* qtd overlay (hardware parts of a struct ehci_qtd) */ + __hc32 hw_qtd_next; + __hc32 hw_alt_next; + __hc32 hw_token; + __hc32 hw_buf [5]; + __hc32 hw_buf_hi [5]; +} __attribute__ ((aligned(32))); + +struct ehci_qh { + struct ehci_qh_hw *hw; /* Must come first */ + /* the rest is HCD-private */ + dma_addr_t qh_dma; /* address of qh */ + union ehci_shadow qh_next; /* ptr to qh; or periodic */ + struct list_head qtd_list; /* sw qtd list */ + struct list_head intr_node; /* list of intr QHs */ + struct ehci_qtd *dummy; + struct list_head unlink_node; + struct ehci_per_sched ps; /* scheduling info */ + + unsigned unlink_cycle; + + u8 qh_state; +#define QH_STATE_LINKED 1 /* HC sees this */ +#define QH_STATE_UNLINK 2 /* HC may still see this */ +#define QH_STATE_IDLE 3 /* HC doesn't see this */ +#define QH_STATE_UNLINK_WAIT 4 /* LINKED and on unlink q */ +#define QH_STATE_COMPLETING 5 /* don't touch token.HALT */ + + u8 xacterrs; /* XactErr retry counter */ +#define QH_XACTERR_MAX 32 /* XactErr retry limit */ + + u8 gap_uf; /* uframes split/csplit gap */ + + unsigned is_out:1; /* bulk or intr OUT */ + unsigned clearing_tt:1; /* Clear-TT-Buf in progress */ + unsigned dequeue_during_giveback:1; + unsigned exception:1; /* got a fault, or an unlink + was requested */ +}; + +/*-------------------------------------------------------------------------*/ + +/* description of one iso transaction (up to 3 KB data if highspeed) */ +struct ehci_iso_packet { + /* These will be copied to iTD when scheduling */ + u64 bufp; /* itd->hw_bufp{,_hi}[pg] |= */ + __hc32 transaction; /* itd->hw_transaction[i] |= */ + u8 cross; /* buf crosses pages */ + /* for full speed OUT splits */ + u32 buf1; +}; + +/* temporary schedule data for packets from iso urbs (both speeds) + * each packet is one logical usb transaction to the device (not TT), + * beginning at stream->next_uframe + */ +struct ehci_iso_sched { + struct list_head td_list; + unsigned span; + unsigned first_packet; + struct ehci_iso_packet packet [0]; +}; + +/* + * ehci_iso_stream - groups all (s)itds for this endpoint. + * acts like a qh would, if EHCI had them for ISO. + */ +struct ehci_iso_stream { + /* first field matches ehci_hq, but is NULL */ + struct ehci_qh_hw *hw; + + u8 bEndpointAddress; + u8 highspeed; + struct list_head td_list; /* queued itds/sitds */ + struct list_head free_list; /* list of unused itds/sitds */ + + /* output of (re)scheduling */ + struct ehci_per_sched ps; /* scheduling info */ + unsigned next_uframe; + __hc32 splits; + + /* the rest is derived from the endpoint descriptor, + * including the extra info for hw_bufp[0..2] + */ + u16 uperiod; /* period in uframes */ + u16 maxp; + unsigned bandwidth; + + /* This is used to initialize iTD's hw_bufp fields */ + __hc32 buf0; + __hc32 buf1; + __hc32 buf2; + + /* this is used to initialize sITD's tt info */ + __hc32 address; +}; + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI Specification 0.95 Section 3.3 + * Fig 3-4 "Isochronous Transaction Descriptor (iTD)" + * + * Schedule records for high speed iso xfers + */ +struct ehci_itd { + /* first part defined by EHCI spec */ + __hc32 hw_next; /* see EHCI 3.3.1 */ + __hc32 hw_transaction [8]; /* see EHCI 3.3.2 */ +#define EHCI_ISOC_ACTIVE (1<<31) /* activate transfer this slot */ +#define EHCI_ISOC_BUF_ERR (1<<30) /* Data buffer error */ +#define EHCI_ISOC_BABBLE (1<<29) /* babble detected */ +#define EHCI_ISOC_XACTERR (1<<28) /* XactErr - transaction error */ +#define EHCI_ITD_LENGTH(tok) (((tok)>>16) & 0x0fff) +#define EHCI_ITD_IOC (1 << 15) /* interrupt on complete */ + +#define ITD_ACTIVE(ehci) cpu_to_hc32(ehci, EHCI_ISOC_ACTIVE) + + __hc32 hw_bufp [7]; /* see EHCI 3.3.3 */ + __hc32 hw_bufp_hi [7]; /* Appendix B */ + + /* the rest is HCD-private */ + dma_addr_t itd_dma; /* for this itd */ + union ehci_shadow itd_next; /* ptr to periodic q entry */ + + struct urb *urb; + struct ehci_iso_stream *stream; /* endpoint's queue */ + struct list_head itd_list; /* list of stream's itds */ + + /* any/all hw_transactions here may be used by that urb */ + unsigned frame; /* where scheduled */ + unsigned pg; + unsigned index[8]; /* in urb->iso_frame_desc */ +} __attribute__ ((aligned (32))); + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI Specification 0.95 Section 3.4 + * siTD, aka split-transaction isochronous Transfer Descriptor + * ... describe full speed iso xfers through TT in hubs + * see Figure 3-5 "Split-transaction Isochronous Transaction Descriptor (siTD) + */ +struct ehci_sitd { + /* first part defined by EHCI spec */ + __hc32 hw_next; +/* uses bit field macros above - see EHCI 0.95 Table 3-8 */ + __hc32 hw_fullspeed_ep; /* EHCI table 3-9 */ + __hc32 hw_uframe; /* EHCI table 3-10 */ + __hc32 hw_results; /* EHCI table 3-11 */ +#define SITD_IOC (1 << 31) /* interrupt on completion */ +#define SITD_PAGE (1 << 30) /* buffer 0/1 */ +#define SITD_LENGTH(x) (0x3ff & ((x)>>16)) +#define SITD_STS_ACTIVE (1 << 7) /* HC may execute this */ +#define SITD_STS_ERR (1 << 6) /* error from TT */ +#define SITD_STS_DBE (1 << 5) /* data buffer error (in HC) */ +#define SITD_STS_BABBLE (1 << 4) /* device was babbling */ +#define SITD_STS_XACT (1 << 3) /* illegal IN response */ +#define SITD_STS_MMF (1 << 2) /* incomplete split transaction */ +#define SITD_STS_STS (1 << 1) /* split transaction state */ + +#define SITD_ACTIVE(ehci) cpu_to_hc32(ehci, SITD_STS_ACTIVE) + + __hc32 hw_buf [2]; /* EHCI table 3-12 */ + __hc32 hw_backpointer; /* EHCI table 3-13 */ + __hc32 hw_buf_hi [2]; /* Appendix B */ + + /* the rest is HCD-private */ + dma_addr_t sitd_dma; + union ehci_shadow sitd_next; /* ptr to periodic q entry */ + + struct urb *urb; + struct ehci_iso_stream *stream; /* endpoint's queue */ + struct list_head sitd_list; /* list of stream's sitds */ + unsigned frame; + unsigned index; +} __attribute__ ((aligned (32))); + +/*-------------------------------------------------------------------------*/ + +/* + * EHCI Specification 0.96 Section 3.7 + * Periodic Frame Span Traversal Node (FSTN) + * + * Manages split interrupt transactions (using TT) that span frame boundaries + * into uframes 0/1; see 4.12.2.2. In those uframes, a "save place" FSTN + * makes the HC jump (back) to a QH to scan for fs/ls QH completions until + * it hits a "restore" FSTN; then it returns to finish other uframe 0/1 work. + */ +struct ehci_fstn { + __hc32 hw_next; /* any periodic q entry */ + __hc32 hw_prev; /* qh or EHCI_LIST_END */ + + /* the rest is HCD-private */ + dma_addr_t fstn_dma; + union ehci_shadow fstn_next; /* ptr to periodic q entry */ +} __attribute__ ((aligned (32))); + +/*-------------------------------------------------------------------------*/ + +/* + * USB-2.0 Specification Sections 11.14 and 11.18 + * Scheduling and budgeting split transactions using TTs + * + * A hub can have a single TT for all its ports, or multiple TTs (one for each + * port). The bandwidth and budgeting information for the full/low-speed bus + * below each TT is self-contained and independent of the other TTs or the + * high-speed bus. + * + * "Bandwidth" refers to the number of microseconds on the FS/LS bus allocated + * to an interrupt or isochronous endpoint for each frame. "Budget" refers to + * the best-case estimate of the number of full-speed bytes allocated to an + * endpoint for each microframe within an allocated frame. + * + * Removal of an endpoint invalidates a TT's budget. Instead of trying to + * keep an up-to-date record, we recompute the budget when it is needed. + */ + +struct ehci_tt { + u16 bandwidth[EHCI_BANDWIDTH_FRAMES]; + + struct list_head tt_list; /* List of all ehci_tt's */ + struct list_head ps_list; /* Items using this TT */ + struct usb_tt *usb_tt; + int tt_port; /* TT port number */ +}; + +/*-------------------------------------------------------------------------*/ + +/* Prepare the PORTSC wakeup flags during controller suspend/resume */ + +#define ehci_prepare_ports_for_controller_suspend(ehci, do_wakeup) \ + ehci_adjust_port_wakeup_flags(ehci, true, do_wakeup); + +#define ehci_prepare_ports_for_controller_resume(ehci) \ + ehci_adjust_port_wakeup_flags(ehci, false, false); + +/*-------------------------------------------------------------------------*/ + +#ifdef CONFIG_USB_EHCI_ROOT_HUB_TT + +/* + * Some EHCI controllers have a Transaction Translator built into the + * root hub. This is a non-standard feature. Each controller will need + * to add code to the following inline functions, and call them as + * needed (mostly in root hub code). + */ + +#define ehci_is_TDI(e) (ehci_to_hcd(e)->has_tt) + +/* Returns the speed of a device attached to a port on the root hub. */ +static inline unsigned int +ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc) +{ + if (ehci_is_TDI(ehci)) { + switch ((portsc >> (ehci->has_hostpc ? 25 : 26)) & 3) { + case 0: + return 0; + case 1: + return USB_PORT_STAT_LOW_SPEED; + case 2: + default: + return USB_PORT_STAT_HIGH_SPEED; + } + } + return USB_PORT_STAT_HIGH_SPEED; +} + +#else + +#define ehci_is_TDI(e) (0) + +#define ehci_port_speed(ehci, portsc) USB_PORT_STAT_HIGH_SPEED +#endif + +/*-------------------------------------------------------------------------*/ + +#ifdef CONFIG_PPC_83xx +/* Some Freescale processors have an erratum in which the TT + * port number in the queue head was 0..N-1 instead of 1..N. + */ +#define ehci_has_fsl_portno_bug(e) ((e)->has_fsl_port_bug) +#else +#define ehci_has_fsl_portno_bug(e) (0) +#endif + +#define PORTSC_FSL_PFSC 24 /* Port Force Full-Speed Connect */ + +#if defined(CONFIG_PPC_85xx) +/* Some Freescale processors have an erratum (USB A-005275) in which + * incoming packets get corrupted in HS mode + */ +#define ehci_has_fsl_hs_errata(e) ((e)->has_fsl_hs_errata) +#else +#define ehci_has_fsl_hs_errata(e) (0) +#endif + +/* + * While most USB host controllers implement their registers in + * little-endian format, a minority (celleb companion chip) implement + * them in big endian format. + * + * This attempts to support either format at compile time without a + * runtime penalty, or both formats with the additional overhead + * of checking a flag bit. + * + * ehci_big_endian_capbase is a special quirk for controllers that + * implement the HC capability registers as separate registers and not + * as fields of a 32-bit register. + */ + +#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO +#define ehci_big_endian_mmio(e) ((e)->big_endian_mmio) +#define ehci_big_endian_capbase(e) ((e)->big_endian_capbase) +#else +#define ehci_big_endian_mmio(e) 0 +#define ehci_big_endian_capbase(e) 0 +#endif + +/* + * Big-endian read/write functions are arch-specific. + * Other arches can be added if/when they're needed. + */ +#if defined(CONFIG_ARM) && defined(CONFIG_ARCH_IXP4XX) +#define readl_be(addr) __raw_readl((__force unsigned *)addr) +#define writel_be(val, addr) __raw_writel(val, (__force unsigned *)addr) +#endif + +static inline unsigned int ehci_readl(const struct ehci_hcd *ehci, + __u32 __iomem * regs) +{ +#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO + return ehci_big_endian_mmio(ehci) ? + readl_be(regs) : + readl(regs); +#else + return readl(regs); +#endif +} + +#ifdef CONFIG_SOC_IMX28 +static inline void imx28_ehci_writel(const unsigned int val, + volatile __u32 __iomem *addr) +{ + __asm__ ("swp %0, %0, [%1]" : : "r"(val), "r"(addr)); +} +#else +static inline void imx28_ehci_writel(const unsigned int val, + volatile __u32 __iomem *addr) +{ +} +#endif +static inline void ehci_writel(const struct ehci_hcd *ehci, + const unsigned int val, __u32 __iomem *regs) +{ +#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_MMIO + ehci_big_endian_mmio(ehci) ? + writel_be(val, regs) : + writel(val, regs); +#else + if (ehci->imx28_write_fix) + imx28_ehci_writel(val, regs); + else + writel(val, regs); +#endif +} + +/* + * On certain ppc-44x SoC there is a HW issue, that could only worked around with + * explicit suspend/operate of OHCI. This function hereby makes sense only on that arch. + * Other common bits are dependent on has_amcc_usb23 quirk flag. + */ +#ifdef CONFIG_44x +static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational) +{ + u32 hc_control; + + hc_control = (readl_be(ehci->ohci_hcctrl_reg) & ~OHCI_CTRL_HCFS); + if (operational) + hc_control |= OHCI_USB_OPER; + else + hc_control |= OHCI_USB_SUSPEND; + + writel_be(hc_control, ehci->ohci_hcctrl_reg); + (void) readl_be(ehci->ohci_hcctrl_reg); +} +#else +static inline void set_ohci_hcfs(struct ehci_hcd *ehci, int operational) +{ } +#endif + +/*-------------------------------------------------------------------------*/ + +/* + * The AMCC 440EPx not only implements its EHCI registers in big-endian + * format, but also its DMA data structures (descriptors). + * + * EHCI controllers accessed through PCI work normally (little-endian + * everywhere), so we won't bother supporting a BE-only mode for now. + */ +#ifdef CONFIG_USB_EHCI_BIG_ENDIAN_DESC +#define ehci_big_endian_desc(e) ((e)->big_endian_desc) + +/* cpu to ehci */ +static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x) +{ + return ehci_big_endian_desc(ehci) + ? (__force __hc32)cpu_to_be32(x) + : (__force __hc32)cpu_to_le32(x); +} + +/* ehci to cpu */ +static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x) +{ + return ehci_big_endian_desc(ehci) + ? be32_to_cpu((__force __be32)x) + : le32_to_cpu((__force __le32)x); +} + +static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x) +{ + return ehci_big_endian_desc(ehci) + ? be32_to_cpup((__force __be32 *)x) + : le32_to_cpup((__force __le32 *)x); +} + +#else + +/* cpu to ehci */ +static inline __hc32 cpu_to_hc32 (const struct ehci_hcd *ehci, const u32 x) +{ + return cpu_to_le32(x); +} + +/* ehci to cpu */ +static inline u32 hc32_to_cpu (const struct ehci_hcd *ehci, const __hc32 x) +{ + return le32_to_cpu(x); +} + +static inline u32 hc32_to_cpup (const struct ehci_hcd *ehci, const __hc32 *x) +{ + return le32_to_cpup(x); +} + +#endif + +/*-------------------------------------------------------------------------*/ + +#define ehci_dbg(ehci, fmt, args...) \ + dev_dbg(ehci_to_hcd(ehci)->self.controller , fmt , ## args) +#define ehci_err(ehci, fmt, args...) \ + dev_err(ehci_to_hcd(ehci)->self.controller , fmt , ## args) +#define ehci_info(ehci, fmt, args...) \ + dev_info(ehci_to_hcd(ehci)->self.controller , fmt , ## args) +#define ehci_warn(ehci, fmt, args...) \ + dev_warn(ehci_to_hcd(ehci)->self.controller , fmt , ## args) + + +#ifndef CONFIG_DYNAMIC_DEBUG +#define STUB_DEBUG_FILES +#endif + +/*-------------------------------------------------------------------------*/ + +/* Declarations of things exported for use by ehci platform drivers */ + +struct ehci_driver_overrides { + size_t extra_priv_size; + int (*reset)(struct usb_hcd *hcd); + int (*port_power)(struct usb_hcd *hcd, + int portnum, bool enable); +}; + +extern void ehci_init_driver(struct hc_driver *drv, + const struct ehci_driver_overrides *over); +extern int ehci_setup(struct usb_hcd *hcd); +extern int ehci_handshake(struct ehci_hcd *ehci, void __iomem *ptr, + u32 mask, u32 done, int usec); +extern int ehci_reset(struct ehci_hcd *ehci); + +#ifdef CONFIG_PM +extern int ehci_suspend(struct usb_hcd *hcd, bool do_wakeup); +extern int ehci_resume(struct usb_hcd *hcd, bool force_reset); +extern void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, + bool suspending, bool do_wakeup); +#endif /* CONFIG_PM */ + +extern int ehci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, + u16 wIndex, char *buf, u16 wLength); + +#endif /* __LINUX_EHCI_HCD_H */ diff --git a/addons/ehci-pci/src/4.4.180/pci-quirks.c b/addons/ehci-pci/src/4.4.180/pci-quirks.c new file mode 100644 index 00000000..89e9494c --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/pci-quirks.c @@ -0,0 +1,1105 @@ +/* + * This file contains code to reset and initialize USB host controllers. + * Some of it includes work-arounds for PCI hardware and BIOS quirks. + * It may need to run early during booting -- before USB would normally + * initialize -- to ensure that Linux doesn't use any legacy modes. + * + * Copyright (c) 1999 Martin Mares + * (and others) + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "pci-quirks.h" +#include "xhci-ext-caps.h" + + +#define UHCI_USBLEGSUP 0xc0 /* legacy support */ +#define UHCI_USBCMD 0 /* command register */ +#define UHCI_USBINTR 4 /* interrupt register */ +#define UHCI_USBLEGSUP_RWC 0x8f00 /* the R/WC bits */ +#define UHCI_USBLEGSUP_RO 0x5040 /* R/O and reserved bits */ +#define UHCI_USBCMD_RUN 0x0001 /* RUN/STOP bit */ +#define UHCI_USBCMD_HCRESET 0x0002 /* Host Controller reset */ +#define UHCI_USBCMD_EGSM 0x0008 /* Global Suspend Mode */ +#define UHCI_USBCMD_CONFIGURE 0x0040 /* Config Flag */ +#define UHCI_USBINTR_RESUME 0x0002 /* Resume interrupt enable */ + +#define OHCI_CONTROL 0x04 +#define OHCI_CMDSTATUS 0x08 +#define OHCI_INTRSTATUS 0x0c +#define OHCI_INTRENABLE 0x10 +#define OHCI_INTRDISABLE 0x14 +#define OHCI_FMINTERVAL 0x34 +#define OHCI_HCFS (3 << 6) /* hc functional state */ +#define OHCI_HCR (1 << 0) /* host controller reset */ +#define OHCI_OCR (1 << 3) /* ownership change request */ +#define OHCI_CTRL_RWC (1 << 9) /* remote wakeup connected */ +#define OHCI_CTRL_IR (1 << 8) /* interrupt routing */ +#define OHCI_INTR_OC (1 << 30) /* ownership change */ + +#define EHCI_HCC_PARAMS 0x08 /* extended capabilities */ +#define EHCI_USBCMD 0 /* command register */ +#define EHCI_USBCMD_RUN (1 << 0) /* RUN/STOP bit */ +#define EHCI_USBSTS 4 /* status register */ +#define EHCI_USBSTS_HALTED (1 << 12) /* HCHalted bit */ +#define EHCI_USBINTR 8 /* interrupt register */ +#define EHCI_CONFIGFLAG 0x40 /* configured flag register */ +#define EHCI_USBLEGSUP 0 /* legacy support register */ +#define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */ +#define EHCI_USBLEGSUP_OS (1 << 24) /* OS semaphore */ +#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */ +#define EHCI_USBLEGCTLSTS_SOOE (1 << 13) /* SMI on ownership change */ + +/* AMD quirk use */ +#define AB_REG_BAR_LOW 0xe0 +#define AB_REG_BAR_HIGH 0xe1 +#define AB_REG_BAR_SB700 0xf0 +#define AB_INDX(addr) ((addr) + 0x00) +#define AB_DATA(addr) ((addr) + 0x04) +#define AX_INDXC 0x30 +#define AX_DATAC 0x34 + +#define NB_PCIE_INDX_ADDR 0xe0 +#define NB_PCIE_INDX_DATA 0xe4 +#define PCIE_P_CNTL 0x10040 +#define BIF_NB 0x10002 +#define NB_PIF0_PWRDOWN_0 0x01100012 +#define NB_PIF0_PWRDOWN_1 0x01100013 + +#define USB_INTEL_XUSB2PR 0xD0 +#define USB_INTEL_USB2PRM 0xD4 +#define USB_INTEL_USB3_PSSEN 0xD8 +#define USB_INTEL_USB3PRM 0xDC + +/* + * amd_chipset_gen values represent AMD different chipset generations + */ +enum amd_chipset_gen { + NOT_AMD_CHIPSET = 0, + AMD_CHIPSET_SB600, + AMD_CHIPSET_SB700, + AMD_CHIPSET_SB800, + AMD_CHIPSET_HUDSON2, + AMD_CHIPSET_BOLTON, + AMD_CHIPSET_YANGTZE, + AMD_CHIPSET_TAISHAN, + AMD_CHIPSET_UNKNOWN, +}; + +struct amd_chipset_type { + enum amd_chipset_gen gen; + u8 rev; +}; + +static struct amd_chipset_info { + struct pci_dev *nb_dev; + struct pci_dev *smbus_dev; + int nb_type; + struct amd_chipset_type sb_type; + int isoc_reqs; + int probe_count; + int probe_result; +} amd_chipset; + +static DEFINE_SPINLOCK(amd_lock); + +/* + * amd_chipset_sb_type_init - initialize amd chipset southbridge type + * + * AMD FCH/SB generation and revision is identified by SMBus controller + * vendor, device and revision IDs. + * + * Returns: 1 if it is an AMD chipset, 0 otherwise. + */ +static int amd_chipset_sb_type_init(struct amd_chipset_info *pinfo) +{ + u8 rev = 0; + pinfo->sb_type.gen = AMD_CHIPSET_UNKNOWN; + + pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_ATI, + PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL); + if (pinfo->smbus_dev) { + rev = pinfo->smbus_dev->revision; + if (rev >= 0x10 && rev <= 0x1f) + pinfo->sb_type.gen = AMD_CHIPSET_SB600; + else if (rev >= 0x30 && rev <= 0x3f) + pinfo->sb_type.gen = AMD_CHIPSET_SB700; + else if (rev >= 0x40 && rev <= 0x4f) + pinfo->sb_type.gen = AMD_CHIPSET_SB800; + } else { + pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, + PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL); + + if (pinfo->smbus_dev) { + rev = pinfo->smbus_dev->revision; + if (rev >= 0x11 && rev <= 0x14) + pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2; + else if (rev >= 0x15 && rev <= 0x18) + pinfo->sb_type.gen = AMD_CHIPSET_BOLTON; + else if (rev >= 0x39 && rev <= 0x3a) + pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE; + } else { + pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD, + 0x145c, NULL); + if (pinfo->smbus_dev) { + rev = pinfo->smbus_dev->revision; + pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN; + } else { + pinfo->sb_type.gen = NOT_AMD_CHIPSET; + return 0; + } + } + } + pinfo->sb_type.rev = rev; + return 1; +} + +void sb800_prefetch(struct device *dev, int on) +{ + u16 misc; + struct pci_dev *pdev = to_pci_dev(dev); + + pci_read_config_word(pdev, 0x50, &misc); + if (on == 0) + pci_write_config_word(pdev, 0x50, misc & 0xfcff); + else + pci_write_config_word(pdev, 0x50, misc | 0x0300); +} +EXPORT_SYMBOL_GPL(sb800_prefetch); + +int usb_amd_find_chipset_info(void) +{ + unsigned long flags; + struct amd_chipset_info info; + int ret; + + spin_lock_irqsave(&amd_lock, flags); + + /* probe only once */ + if (amd_chipset.probe_count > 0) { + amd_chipset.probe_count++; + spin_unlock_irqrestore(&amd_lock, flags); + return amd_chipset.probe_result; + } + memset(&info, 0, sizeof(info)); + spin_unlock_irqrestore(&amd_lock, flags); + + if (!amd_chipset_sb_type_init(&info)) { + ret = 0; + goto commit; + } + + /* Below chipset generations needn't enable AMD PLL quirk */ + if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN || + info.sb_type.gen == AMD_CHIPSET_SB600 || + info.sb_type.gen == AMD_CHIPSET_YANGTZE || + (info.sb_type.gen == AMD_CHIPSET_SB700 && + info.sb_type.rev > 0x3b)) { + if (info.smbus_dev) { + pci_dev_put(info.smbus_dev); + info.smbus_dev = NULL; + } + ret = 0; + goto commit; + } + + info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x9601, NULL); + if (info.nb_dev) { + info.nb_type = 1; + } else { + info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, 0x1510, NULL); + if (info.nb_dev) { + info.nb_type = 2; + } else { + info.nb_dev = pci_get_device(PCI_VENDOR_ID_AMD, + 0x9600, NULL); + if (info.nb_dev) + info.nb_type = 3; + } + } + + ret = info.probe_result = 1; + printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n"); + +commit: + + spin_lock_irqsave(&amd_lock, flags); + if (amd_chipset.probe_count > 0) { + /* race - someone else was faster - drop devices */ + + /* Mark that we where here */ + amd_chipset.probe_count++; + ret = amd_chipset.probe_result; + + spin_unlock_irqrestore(&amd_lock, flags); + + pci_dev_put(info.nb_dev); + pci_dev_put(info.smbus_dev); + + } else { + /* no race - commit the result */ + info.probe_count++; + amd_chipset = info; + spin_unlock_irqrestore(&amd_lock, flags); + } + + return ret; +} +EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info); + +int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev) +{ + /* Make sure amd chipset type has already been initialized */ + usb_amd_find_chipset_info(); + if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE || + amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) { + dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n"); + return 1; + } + return 0; +} +EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk); + +bool usb_amd_hang_symptom_quirk(void) +{ + u8 rev; + + usb_amd_find_chipset_info(); + rev = amd_chipset.sb_type.rev; + /* SB600 and old version of SB700 have hang symptom bug */ + return amd_chipset.sb_type.gen == AMD_CHIPSET_SB600 || + (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 && + rev >= 0x3a && rev <= 0x3b); +} +EXPORT_SYMBOL_GPL(usb_amd_hang_symptom_quirk); + +bool usb_amd_prefetch_quirk(void) +{ + usb_amd_find_chipset_info(); + /* SB800 needs pre-fetch fix */ + return amd_chipset.sb_type.gen == AMD_CHIPSET_SB800; +} +EXPORT_SYMBOL_GPL(usb_amd_prefetch_quirk); + +/* + * The hardware normally enables the A-link power management feature, which + * lets the system lower the power consumption in idle states. + * + * This USB quirk prevents the link going into that lower power state + * during isochronous transfers. + * + * Without this quirk, isochronous stream on OHCI/EHCI/xHCI controllers of + * some AMD platforms may stutter or have breaks occasionally. + */ +static void usb_amd_quirk_pll(int disable) +{ + u32 addr, addr_low, addr_high, val; + u32 bit = disable ? 0 : 1; + unsigned long flags; + + spin_lock_irqsave(&amd_lock, flags); + + if (disable) { + amd_chipset.isoc_reqs++; + if (amd_chipset.isoc_reqs > 1) { + spin_unlock_irqrestore(&amd_lock, flags); + return; + } + } else { + amd_chipset.isoc_reqs--; + if (amd_chipset.isoc_reqs > 0) { + spin_unlock_irqrestore(&amd_lock, flags); + return; + } + } + + if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB800 || + amd_chipset.sb_type.gen == AMD_CHIPSET_HUDSON2 || + amd_chipset.sb_type.gen == AMD_CHIPSET_BOLTON) { + outb_p(AB_REG_BAR_LOW, 0xcd6); + addr_low = inb_p(0xcd7); + outb_p(AB_REG_BAR_HIGH, 0xcd6); + addr_high = inb_p(0xcd7); + addr = addr_high << 8 | addr_low; + + outl_p(0x30, AB_INDX(addr)); + outl_p(0x40, AB_DATA(addr)); + outl_p(0x34, AB_INDX(addr)); + val = inl_p(AB_DATA(addr)); + } else if (amd_chipset.sb_type.gen == AMD_CHIPSET_SB700 && + amd_chipset.sb_type.rev <= 0x3b) { + pci_read_config_dword(amd_chipset.smbus_dev, + AB_REG_BAR_SB700, &addr); + outl(AX_INDXC, AB_INDX(addr)); + outl(0x40, AB_DATA(addr)); + outl(AX_DATAC, AB_INDX(addr)); + val = inl(AB_DATA(addr)); + } else { + spin_unlock_irqrestore(&amd_lock, flags); + return; + } + + if (disable) { + val &= ~0x08; + val |= (1 << 4) | (1 << 9); + } else { + val |= 0x08; + val &= ~((1 << 4) | (1 << 9)); + } + outl_p(val, AB_DATA(addr)); + + if (!amd_chipset.nb_dev) { + spin_unlock_irqrestore(&amd_lock, flags); + return; + } + + if (amd_chipset.nb_type == 1 || amd_chipset.nb_type == 3) { + addr = PCIE_P_CNTL; + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_ADDR, addr); + pci_read_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, &val); + + val &= ~(1 | (1 << 3) | (1 << 4) | (1 << 9) | (1 << 12)); + val |= bit | (bit << 3) | (bit << 12); + val |= ((!bit) << 4) | ((!bit) << 9); + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, val); + + addr = BIF_NB; + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_ADDR, addr); + pci_read_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, &val); + val &= ~(1 << 8); + val |= bit << 8; + + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, val); + } else if (amd_chipset.nb_type == 2) { + addr = NB_PIF0_PWRDOWN_0; + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_ADDR, addr); + pci_read_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, &val); + if (disable) + val &= ~(0x3f << 7); + else + val |= 0x3f << 7; + + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, val); + + addr = NB_PIF0_PWRDOWN_1; + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_ADDR, addr); + pci_read_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, &val); + if (disable) + val &= ~(0x3f << 7); + else + val |= 0x3f << 7; + + pci_write_config_dword(amd_chipset.nb_dev, + NB_PCIE_INDX_DATA, val); + } + + spin_unlock_irqrestore(&amd_lock, flags); + return; +} + +void usb_amd_quirk_pll_disable(void) +{ + usb_amd_quirk_pll(1); +} +EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable); + +void usb_amd_quirk_pll_enable(void) +{ + usb_amd_quirk_pll(0); +} +EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_enable); + +void usb_amd_dev_put(void) +{ + struct pci_dev *nb, *smbus; + unsigned long flags; + + spin_lock_irqsave(&amd_lock, flags); + + amd_chipset.probe_count--; + if (amd_chipset.probe_count > 0) { + spin_unlock_irqrestore(&amd_lock, flags); + return; + } + + /* save them to pci_dev_put outside of spinlock */ + nb = amd_chipset.nb_dev; + smbus = amd_chipset.smbus_dev; + + amd_chipset.nb_dev = NULL; + amd_chipset.smbus_dev = NULL; + amd_chipset.nb_type = 0; + memset(&amd_chipset.sb_type, 0, sizeof(amd_chipset.sb_type)); + amd_chipset.isoc_reqs = 0; + amd_chipset.probe_result = 0; + + spin_unlock_irqrestore(&amd_lock, flags); + + pci_dev_put(nb); + pci_dev_put(smbus); +} +EXPORT_SYMBOL_GPL(usb_amd_dev_put); + +/* + * Make sure the controller is completely inactive, unable to + * generate interrupts or do DMA. + */ +void uhci_reset_hc(struct pci_dev *pdev, unsigned long base) +{ + /* Turn off PIRQ enable and SMI enable. (This also turns off the + * BIOS's USB Legacy Support.) Turn off all the R/WC bits too. + */ + pci_write_config_word(pdev, UHCI_USBLEGSUP, UHCI_USBLEGSUP_RWC); + + /* Reset the HC - this will force us to get a + * new notification of any already connected + * ports due to the virtual disconnect that it + * implies. + */ + outw(UHCI_USBCMD_HCRESET, base + UHCI_USBCMD); + mb(); + udelay(5); + if (inw(base + UHCI_USBCMD) & UHCI_USBCMD_HCRESET) + dev_warn(&pdev->dev, "HCRESET not completed yet!\n"); + + /* Just to be safe, disable interrupt requests and + * make sure the controller is stopped. + */ + outw(0, base + UHCI_USBINTR); + outw(0, base + UHCI_USBCMD); +} +EXPORT_SYMBOL_GPL(uhci_reset_hc); + +/* + * Initialize a controller that was newly discovered or has just been + * resumed. In either case we can't be sure of its previous state. + * + * Returns: 1 if the controller was reset, 0 otherwise. + */ +int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base) +{ + u16 legsup; + unsigned int cmd, intr; + + /* + * When restarting a suspended controller, we expect all the + * settings to be the same as we left them: + * + * PIRQ and SMI disabled, no R/W bits set in USBLEGSUP; + * Controller is stopped and configured with EGSM set; + * No interrupts enabled except possibly Resume Detect. + * + * If any of these conditions are violated we do a complete reset. + */ + pci_read_config_word(pdev, UHCI_USBLEGSUP, &legsup); + if (legsup & ~(UHCI_USBLEGSUP_RO | UHCI_USBLEGSUP_RWC)) { + dev_dbg(&pdev->dev, "%s: legsup = 0x%04x\n", + __func__, legsup); + goto reset_needed; + } + + cmd = inw(base + UHCI_USBCMD); + if ((cmd & UHCI_USBCMD_RUN) || !(cmd & UHCI_USBCMD_CONFIGURE) || + !(cmd & UHCI_USBCMD_EGSM)) { + dev_dbg(&pdev->dev, "%s: cmd = 0x%04x\n", + __func__, cmd); + goto reset_needed; + } + + intr = inw(base + UHCI_USBINTR); + if (intr & (~UHCI_USBINTR_RESUME)) { + dev_dbg(&pdev->dev, "%s: intr = 0x%04x\n", + __func__, intr); + goto reset_needed; + } + return 0; + +reset_needed: + dev_dbg(&pdev->dev, "Performing full reset\n"); + uhci_reset_hc(pdev, base); + return 1; +} +EXPORT_SYMBOL_GPL(uhci_check_and_reset_hc); + +static inline int io_type_enabled(struct pci_dev *pdev, unsigned int mask) +{ + u16 cmd; + return !pci_read_config_word(pdev, PCI_COMMAND, &cmd) && (cmd & mask); +} + +#define pio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_IO) +#define mmio_enabled(dev) io_type_enabled(dev, PCI_COMMAND_MEMORY) + +static void quirk_usb_handoff_uhci(struct pci_dev *pdev) +{ + unsigned long base = 0; + int i; + + if (!pio_enabled(pdev)) + return; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) + if ((pci_resource_flags(pdev, i) & IORESOURCE_IO)) { + base = pci_resource_start(pdev, i); + break; + } + + if (base) + uhci_check_and_reset_hc(pdev, base); +} + +static int mmio_resource_enabled(struct pci_dev *pdev, int idx) +{ + return pci_resource_start(pdev, idx) && mmio_enabled(pdev); +} + +static void quirk_usb_handoff_ohci(struct pci_dev *pdev) +{ + void __iomem *base; + u32 control; + u32 fminterval = 0; + bool no_fminterval = false; + int cnt; + + if (!mmio_resource_enabled(pdev, 0)) + return; + + base = pci_ioremap_bar(pdev, 0); + if (base == NULL) + return; + + /* + * ULi M5237 OHCI controller locks the whole system when accessing + * the OHCI_FMINTERVAL offset. + */ + if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237) + no_fminterval = true; + + control = readl(base + OHCI_CONTROL); + +/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */ +#ifdef __hppa__ +#define OHCI_CTRL_MASK (OHCI_CTRL_RWC | OHCI_CTRL_IR) +#else +#define OHCI_CTRL_MASK OHCI_CTRL_RWC + + if (control & OHCI_CTRL_IR) { + int wait_time = 500; /* arbitrary; 5 seconds */ + writel(OHCI_INTR_OC, base + OHCI_INTRENABLE); + writel(OHCI_OCR, base + OHCI_CMDSTATUS); + while (wait_time > 0 && + readl(base + OHCI_CONTROL) & OHCI_CTRL_IR) { + wait_time -= 10; + msleep(10); + } + if (wait_time <= 0) + dev_warn(&pdev->dev, + "OHCI: BIOS handoff failed (BIOS bug?) %08x\n", + readl(base + OHCI_CONTROL)); + } +#endif + + /* disable interrupts */ + writel((u32) ~0, base + OHCI_INTRDISABLE); + + /* Reset the USB bus, if the controller isn't already in RESET */ + if (control & OHCI_HCFS) { + /* Go into RESET, preserving RWC (and possibly IR) */ + writel(control & OHCI_CTRL_MASK, base + OHCI_CONTROL); + readl(base + OHCI_CONTROL); + + /* drive bus reset for at least 50 ms (7.1.7.5) */ + msleep(50); + } + + /* software reset of the controller, preserving HcFmInterval */ + if (!no_fminterval) + fminterval = readl(base + OHCI_FMINTERVAL); + + writel(OHCI_HCR, base + OHCI_CMDSTATUS); + + /* reset requires max 10 us delay */ + for (cnt = 30; cnt > 0; --cnt) { /* ... allow extra time */ + if ((readl(base + OHCI_CMDSTATUS) & OHCI_HCR) == 0) + break; + udelay(1); + } + + if (!no_fminterval) + writel(fminterval, base + OHCI_FMINTERVAL); + + /* Now the controller is safely in SUSPEND and nothing can wake it up */ + iounmap(base); +} + +static const struct dmi_system_id ehci_dmi_nohandoff_table[] = { + { + /* Pegatron Lucid (ExoPC) */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "EXOPG06411"), + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-CE-133"), + }, + }, + { + /* Pegatron Lucid (Ordissimo AIRIS) */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "M11JB"), + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), + }, + }, + { + /* Pegatron Lucid (Ordissimo) */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"), + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), + }, + }, + { + /* HASEE E200 */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"), + DMI_MATCH(DMI_BOARD_NAME, "E210"), + DMI_MATCH(DMI_BIOS_VERSION, "6.00"), + }, + }, + { } +}; + +static void ehci_bios_handoff(struct pci_dev *pdev, + void __iomem *op_reg_base, + u32 cap, u8 offset) +{ + int try_handoff = 1, tried_handoff = 0; + + /* + * The Pegatron Lucid tablet sporadically waits for 98 seconds trying + * the handoff on its unused controller. Skip it. + * + * The HASEE E200 hangs when the semaphore is set (bugzilla #77021). + */ + if (pdev->vendor == 0x8086 && (pdev->device == 0x283a || + pdev->device == 0x27cc)) { + if (dmi_check_system(ehci_dmi_nohandoff_table)) + try_handoff = 0; + } + + if (try_handoff && (cap & EHCI_USBLEGSUP_BIOS)) { + dev_dbg(&pdev->dev, "EHCI: BIOS handoff\n"); + +#if 0 +/* aleksey_gorelov@phoenix.com reports that some systems need SMI forced on, + * but that seems dubious in general (the BIOS left it off intentionally) + * and is known to prevent some systems from booting. so we won't do this + * unless maybe we can determine when we're on a system that needs SMI forced. + */ + /* BIOS workaround (?): be sure the pre-Linux code + * receives the SMI + */ + pci_read_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, &val); + pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, + val | EHCI_USBLEGCTLSTS_SOOE); +#endif + + /* some systems get upset if this semaphore is + * set for any other reason than forcing a BIOS + * handoff.. + */ + pci_write_config_byte(pdev, offset + 3, 1); + } + + /* if boot firmware now owns EHCI, spin till it hands it over. */ + if (try_handoff) { + int msec = 1000; + while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) { + tried_handoff = 1; + msleep(10); + msec -= 10; + pci_read_config_dword(pdev, offset, &cap); + } + } + + if (cap & EHCI_USBLEGSUP_BIOS) { + /* well, possibly buggy BIOS... try to shut it down, + * and hope nothing goes too wrong + */ + if (try_handoff) + dev_warn(&pdev->dev, + "EHCI: BIOS handoff failed (BIOS bug?) %08x\n", + cap); + pci_write_config_byte(pdev, offset + 2, 0); + } + + /* just in case, always disable EHCI SMIs */ + pci_write_config_dword(pdev, offset + EHCI_USBLEGCTLSTS, 0); + + /* If the BIOS ever owned the controller then we can't expect + * any power sessions to remain intact. + */ + if (tried_handoff) + writel(0, op_reg_base + EHCI_CONFIGFLAG); +} + +static void quirk_usb_disable_ehci(struct pci_dev *pdev) +{ + void __iomem *base, *op_reg_base; + u32 hcc_params, cap, val; + u8 offset, cap_length; + int wait_time, count = 256/4; + + if (!mmio_resource_enabled(pdev, 0)) + return; + + base = pci_ioremap_bar(pdev, 0); + if (base == NULL) + return; + + cap_length = readb(base); + op_reg_base = base + cap_length; + + /* EHCI 0.96 and later may have "extended capabilities" + * spec section 5.1 explains the bios handoff, e.g. for + * booting from USB disk or using a usb keyboard + */ + hcc_params = readl(base + EHCI_HCC_PARAMS); + offset = (hcc_params >> 8) & 0xff; + while (offset && --count) { + pci_read_config_dword(pdev, offset, &cap); + + switch (cap & 0xff) { + case 1: + ehci_bios_handoff(pdev, op_reg_base, cap, offset); + break; + case 0: /* Illegal reserved cap, set cap=0 so we exit */ + cap = 0; /* then fallthrough... */ + default: + dev_warn(&pdev->dev, + "EHCI: unrecognized capability %02x\n", + cap & 0xff); + } + offset = (cap >> 8) & 0xff; + } + if (!count) + dev_printk(KERN_DEBUG, &pdev->dev, "EHCI: capability loop?\n"); + + /* + * halt EHCI & disable its interrupts in any case + */ + val = readl(op_reg_base + EHCI_USBSTS); + if ((val & EHCI_USBSTS_HALTED) == 0) { + val = readl(op_reg_base + EHCI_USBCMD); + val &= ~EHCI_USBCMD_RUN; + writel(val, op_reg_base + EHCI_USBCMD); + + wait_time = 2000; + do { + writel(0x3f, op_reg_base + EHCI_USBSTS); + udelay(100); + wait_time -= 100; + val = readl(op_reg_base + EHCI_USBSTS); + if ((val == ~(u32)0) || (val & EHCI_USBSTS_HALTED)) { + break; + } + } while (wait_time > 0); + } + writel(0, op_reg_base + EHCI_USBINTR); + writel(0x3f, op_reg_base + EHCI_USBSTS); + + iounmap(base); +} + +/* + * handshake - spin reading a register until handshake completes + * @ptr: address of hc register to be read + * @mask: bits to look at in result of read + * @done: value of those bits when handshake succeeds + * @wait_usec: timeout in microseconds + * @delay_usec: delay in microseconds to wait between polling + * + * Polls a register every delay_usec microseconds. + * Returns 0 when the mask bits have the value done. + * Returns -ETIMEDOUT if this condition is not true after + * wait_usec microseconds have passed. + */ +static int handshake(void __iomem *ptr, u32 mask, u32 done, + int wait_usec, int delay_usec) +{ + u32 result; + + do { + result = readl(ptr); + result &= mask; + if (result == done) + return 0; + udelay(delay_usec); + wait_usec -= delay_usec; + } while (wait_usec > 0); + return -ETIMEDOUT; +} + +/* + * Intel's Panther Point chipset has two host controllers (EHCI and xHCI) that + * share some number of ports. These ports can be switched between either + * controller. Not all of the ports under the EHCI host controller may be + * switchable. + * + * The ports should be switched over to xHCI before PCI probes for any device + * start. This avoids active devices under EHCI being disconnected during the + * port switchover, which could cause loss of data on USB storage devices, or + * failed boot when the root file system is on a USB mass storage device and is + * enumerated under EHCI first. + * + * We write into the xHC's PCI configuration space in some Intel-specific + * registers to switch the ports over. The USB 3.0 terminations and the USB + * 2.0 data wires are switched separately. We want to enable the SuperSpeed + * terminations before switching the USB 2.0 wires over, so that USB 3.0 + * devices connect at SuperSpeed, rather than at USB 2.0 speeds. + */ +void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev) +{ + u32 ports_available; + bool ehci_found = false; + struct pci_dev *companion = NULL; + + /* Sony VAIO t-series with subsystem device ID 90a8 is not capable of + * switching ports from EHCI to xHCI + */ + if (xhci_pdev->subsystem_vendor == PCI_VENDOR_ID_SONY && + xhci_pdev->subsystem_device == 0x90a8) + return; + + /* make sure an intel EHCI controller exists */ + for_each_pci_dev(companion) { + if (companion->class == PCI_CLASS_SERIAL_USB_EHCI && + companion->vendor == PCI_VENDOR_ID_INTEL) { + ehci_found = true; + break; + } + } + + if (!ehci_found) + return; + + /* Don't switchover the ports if the user hasn't compiled the xHCI + * driver. Otherwise they will see "dead" USB ports that don't power + * the devices. + */ + if (!IS_ENABLED(CONFIG_USB_XHCI_HCD)) { + dev_warn(&xhci_pdev->dev, + "CONFIG_USB_XHCI_HCD is turned off, defaulting to EHCI.\n"); + dev_warn(&xhci_pdev->dev, + "USB 3.0 devices will work at USB 2.0 speeds.\n"); + usb_disable_xhci_ports(xhci_pdev); + return; + } + + /* Read USB3PRM, the USB 3.0 Port Routing Mask Register + * Indicate the ports that can be changed from OS. + */ + pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM, + &ports_available); + + dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n", + ports_available); + + /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable + * Register, to turn on SuperSpeed terminations for the + * switchable ports. + */ + pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, + ports_available); + + pci_read_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, + &ports_available); + dev_dbg(&xhci_pdev->dev, + "USB 3.0 ports that are now enabled under xHCI: 0x%x\n", + ports_available); + + /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register + * Indicate the USB 2.0 ports to be controlled by the xHCI host. + */ + + pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM, + &ports_available); + + dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n", + ports_available); + + /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to + * switch the USB 2.0 power and data lines over to the xHCI + * host. + */ + pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, + ports_available); + + pci_read_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, + &ports_available); + dev_dbg(&xhci_pdev->dev, + "USB 2.0 ports that are now switched over to xHCI: 0x%x\n", + ports_available); +} +EXPORT_SYMBOL_GPL(usb_enable_intel_xhci_ports); + +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) +{ + pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0); + pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0); +} +EXPORT_SYMBOL_GPL(usb_disable_xhci_ports); + +/** + * PCI Quirks for xHCI. + * + * Takes care of the handoff between the Pre-OS (i.e. BIOS) and the OS. + * It signals to the BIOS that the OS wants control of the host controller, + * and then waits 1 second for the BIOS to hand over control. + * If we timeout, assume the BIOS is broken and take control anyway. + */ +static void quirk_usb_handoff_xhci(struct pci_dev *pdev) +{ + void __iomem *base; + int ext_cap_offset; + void __iomem *op_reg_base; + u32 val; + int timeout; + int len = pci_resource_len(pdev, 0); + + if (!mmio_resource_enabled(pdev, 0)) + return; + + base = ioremap_nocache(pci_resource_start(pdev, 0), len); + if (base == NULL) + return; + + /* + * Find the Legacy Support Capability register - + * this is optional for xHCI host controllers. + */ + ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET); + do { + if ((ext_cap_offset + sizeof(val)) > len) { + /* We're reading garbage from the controller */ + dev_warn(&pdev->dev, + "xHCI controller failing to respond"); + return; + } + + if (!ext_cap_offset) + /* We've reached the end of the extended capabilities */ + goto hc_init; + + val = readl(base + ext_cap_offset); + if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY) + break; + ext_cap_offset = xhci_find_next_cap_offset(base, ext_cap_offset); + } while (1); + + /* If the BIOS owns the HC, signal that the OS wants it, and wait */ + if (val & XHCI_HC_BIOS_OWNED) { + writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); + + /* Wait for 1 second with 10 microsecond polling interval */ + timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, + 0, 1000000, 10); + + /* Assume a buggy BIOS and take HC ownership anyway */ + if (timeout) { + dev_warn(&pdev->dev, + "xHCI BIOS handoff failed (BIOS bug ?) %08x\n", + val); + writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset); + } + } + + val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + /* Mask off (turn off) any enabled SMIs */ + val &= XHCI_LEGACY_DISABLE_SMI; + /* Mask all SMI events bits, RW1C */ + val |= XHCI_LEGACY_SMI_EVENTS; + /* Disable any BIOS SMIs and clear all SMI events*/ + writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + +hc_init: + if (pdev->vendor == PCI_VENDOR_ID_INTEL) + usb_enable_intel_xhci_ports(pdev); + + op_reg_base = base + XHCI_HC_LENGTH(readl(base)); + + /* Wait for the host controller to be ready before writing any + * operational or runtime registers. Wait 5 seconds and no more. + */ + timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, + 5000000, 10); + /* Assume a buggy HC and start HC initialization anyway */ + if (timeout) { + val = readl(op_reg_base + XHCI_STS_OFFSET); + dev_warn(&pdev->dev, + "xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n", + val); + } + + /* Send the halt and disable interrupts command */ + val = readl(op_reg_base + XHCI_CMD_OFFSET); + val &= ~(XHCI_CMD_RUN | XHCI_IRQS); + writel(val, op_reg_base + XHCI_CMD_OFFSET); + + /* Wait for the HC to halt - poll every 125 usec (one microframe). */ + timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1, + XHCI_MAX_HALT_USEC, 125); + if (timeout) { + val = readl(op_reg_base + XHCI_STS_OFFSET); + dev_warn(&pdev->dev, + "xHCI HW did not halt within %d usec status = 0x%x\n", + XHCI_MAX_HALT_USEC, val); + } + + iounmap(base); +} + +static void quirk_usb_early_handoff(struct pci_dev *pdev) +{ + /* Skip Netlogic mips SoC's internal PCI USB controller. + * This device does not need/support EHCI/OHCI handoff + */ + if (pdev->vendor == 0x184e) /* vendor Netlogic */ + return; + if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI && + pdev->class != PCI_CLASS_SERIAL_USB_OHCI && + pdev->class != PCI_CLASS_SERIAL_USB_EHCI && + pdev->class != PCI_CLASS_SERIAL_USB_XHCI) + return; + + if (pci_enable_device(pdev) < 0) { + dev_warn(&pdev->dev, + "Can't enable PCI device, BIOS handoff failed.\n"); + return; + } + if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI) + quirk_usb_handoff_uhci(pdev); + else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI) + quirk_usb_handoff_ohci(pdev); + else if (pdev->class == PCI_CLASS_SERIAL_USB_EHCI) + quirk_usb_disable_ehci(pdev); + else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) + quirk_usb_handoff_xhci(pdev); + pci_disable_device(pdev); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); diff --git a/addons/ehci-pci/src/4.4.180/pci-quirks.h b/addons/ehci-pci/src/4.4.180/pci-quirks.h new file mode 100644 index 00000000..c622ddf2 --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/pci-quirks.h @@ -0,0 +1,26 @@ +#ifndef __LINUX_USB_PCI_QUIRKS_H +#define __LINUX_USB_PCI_QUIRKS_H + +#ifdef CONFIG_PCI +void uhci_reset_hc(struct pci_dev *pdev, unsigned long base); +int uhci_check_and_reset_hc(struct pci_dev *pdev, unsigned long base); +int usb_amd_find_chipset_info(void); +int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *pdev); +bool usb_amd_hang_symptom_quirk(void); +bool usb_amd_prefetch_quirk(void); +void usb_amd_dev_put(void); +void usb_amd_quirk_pll_disable(void); +void usb_amd_quirk_pll_enable(void); +void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); +void sb800_prefetch(struct device *dev, int on); +#else +struct pci_dev; +static inline void usb_amd_quirk_pll_disable(void) {} +static inline void usb_amd_quirk_pll_enable(void) {} +static inline void usb_amd_dev_put(void) {} +static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} +static inline void sb800_prefetch(struct device *dev, int on) {} +#endif /* CONFIG_PCI */ + +#endif /* __LINUX_USB_PCI_QUIRKS_H */ diff --git a/addons/ehci-pci/src/4.4.180/xhci-ext-caps.h b/addons/ehci-pci/src/4.4.180/xhci-ext-caps.h new file mode 100644 index 00000000..9fe3225e --- /dev/null +++ b/addons/ehci-pci/src/4.4.180/xhci-ext-caps.h @@ -0,0 +1,156 @@ +/* + * xHCI host controller driver + * + * Copyright (C) 2008 Intel Corp. + * + * Author: Sarah Sharp + * Some code borrowed from the Linux EHCI driver. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software Foundation, + * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +/* Up to 16 ms to halt an HC */ +#define XHCI_MAX_HALT_USEC (16*1000) +/* HC not running - set to 1 when run/stop bit is cleared. */ +#define XHCI_STS_HALT (1<<0) + +/* HCCPARAMS offset from PCI base address */ +#define XHCI_HCC_PARAMS_OFFSET 0x10 +/* HCCPARAMS contains the first extended capability pointer */ +#define XHCI_HCC_EXT_CAPS(p) (((p)>>16)&0xffff) + +/* Command and Status registers offset from the Operational Registers address */ +#define XHCI_CMD_OFFSET 0x00 +#define XHCI_STS_OFFSET 0x04 + +#define XHCI_MAX_EXT_CAPS 50 + +/* Capability Register */ +/* bits 7:0 - how long is the Capabilities register */ +#define XHCI_HC_LENGTH(p) (((p)>>00)&0x00ff) + +/* Extended capability register fields */ +#define XHCI_EXT_CAPS_ID(p) (((p)>>0)&0xff) +#define XHCI_EXT_CAPS_NEXT(p) (((p)>>8)&0xff) +#define XHCI_EXT_CAPS_VAL(p) ((p)>>16) +/* Extended capability IDs - ID 0 reserved */ +#define XHCI_EXT_CAPS_LEGACY 1 +#define XHCI_EXT_CAPS_PROTOCOL 2 +#define XHCI_EXT_CAPS_PM 3 +#define XHCI_EXT_CAPS_VIRT 4 +#define XHCI_EXT_CAPS_ROUTE 5 +/* IDs 6-9 reserved */ +#define XHCI_EXT_CAPS_DEBUG 10 +/* USB Legacy Support Capability - section 7.1.1 */ +#define XHCI_HC_BIOS_OWNED (1 << 16) +#define XHCI_HC_OS_OWNED (1 << 24) + +/* USB Legacy Support Capability - section 7.1.1 */ +/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */ +#define XHCI_LEGACY_SUPPORT_OFFSET (0x00) + +/* USB Legacy Support Control and Status Register - section 7.1.2 */ +/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */ +#define XHCI_LEGACY_CONTROL_OFFSET (0x04) +/* bits 1:3, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */ +#define XHCI_LEGACY_DISABLE_SMI ((0x7 << 1) + (0xff << 5) + (0x7 << 17)) +#define XHCI_LEGACY_SMI_EVENTS (0x7 << 29) + +/* USB 2.0 xHCI 0.96 L1C capability - section 7.2.2.1.3.2 */ +#define XHCI_L1C (1 << 16) + +/* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */ +#define XHCI_HLC (1 << 19) +#define XHCI_BLC (1 << 20) + +/* command register values to disable interrupts and halt the HC */ +/* start/stop HC execution - do not write unless HC is halted*/ +#define XHCI_CMD_RUN (1 << 0) +/* Event Interrupt Enable - get irq when EINT bit is set in USBSTS register */ +#define XHCI_CMD_EIE (1 << 2) +/* Host System Error Interrupt Enable - get irq when HSEIE bit set in USBSTS */ +#define XHCI_CMD_HSEIE (1 << 3) +/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */ +#define XHCI_CMD_EWE (1 << 10) + +#define XHCI_IRQS (XHCI_CMD_EIE | XHCI_CMD_HSEIE | XHCI_CMD_EWE) + +/* true: Controller Not Ready to accept doorbell or op reg writes after reset */ +#define XHCI_STS_CNR (1 << 11) + +#include + +/** + * Return the next extended capability pointer register. + * + * @base PCI register base address. + * + * @ext_offset Offset of the 32-bit register that contains the extended + * capabilites pointer. If searching for the first extended capability, pass + * in XHCI_HCC_PARAMS_OFFSET. If searching for the next extended capability, + * pass in the offset of the current extended capability register. + * + * Returns 0 if there is no next extended capability register or returns the register offset + * from the PCI registers base address. + */ +static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset) +{ + u32 next; + + next = readl(base + ext_offset); + + if (ext_offset == XHCI_HCC_PARAMS_OFFSET) { + /* Find the first extended capability */ + next = XHCI_HCC_EXT_CAPS(next); + ext_offset = 0; + } else { + /* Find the next extended capability */ + next = XHCI_EXT_CAPS_NEXT(next); + } + + if (!next) + return 0; + /* + * Address calculation from offset of extended capabilities + * (or HCCPARAMS) register - see section 5.3.6 and section 7. + */ + return ext_offset + (next << 2); +} + +/** + * Find the offset of the extended capabilities with capability ID id. + * + * @base PCI MMIO registers base address. + * @ext_offset Offset from base of the first extended capability to look at, + * or the address of HCCPARAMS. + * @id Extended capability ID to search for. + * + * This uses an arbitrary limit of XHCI_MAX_EXT_CAPS extended capabilities + * to make sure that the list doesn't contain a loop. + */ +static inline int xhci_find_ext_cap_by_id(void __iomem *base, int ext_offset, int id) +{ + u32 val; + int limit = XHCI_MAX_EXT_CAPS; + + while (ext_offset && limit > 0) { + val = readl(base + ext_offset); + if (XHCI_EXT_CAPS_ID(val) == id) + break; + ext_offset = xhci_find_next_cap_offset(base, ext_offset); + limit--; + } + if (limit > 0) + return ext_offset; + return 0; +} diff --git a/addons/mpt3sas/install.sh b/addons/mpt3sas/install.sh new file mode 100644 index 00000000..89e48b44 --- /dev/null +++ b/addons/mpt3sas/install.sh @@ -0,0 +1,4 @@ +if [ "${1}" = "rd" ]; then + echo "Installing module for LSI MPT Fusion SAS 3.0 Device adapter" + ${INSMOD} "/modules/mpt3sas.ko" ${PARAMS} +fi diff --git a/addons/mpt3sas/manifest.yml b/addons/mpt3sas/manifest.yml new file mode 100644 index 00000000..5fae0962 --- /dev/null +++ b/addons/mpt3sas/manifest.yml @@ -0,0 +1,29 @@ +version: 1 +name: mpt3sas +description: "Driver for LSI MPT Fusion SAS 3.0 Device" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true + diff --git a/addons/mpt3sas/src/3.10.108/Makefile b/addons/mpt3sas/src/3.10.108/Makefile new file mode 100644 index 00000000..9f013afc --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/Makefile @@ -0,0 +1,7 @@ +obj-m += mpt3sas.o +mpt3sas-y += mpt3sas_base.o \ + mpt3sas_config.o \ + mpt3sas_scsih.o \ + mpt3sas_transport.o \ + mpt3sas_ctl.o \ + mpt3sas_trigger_diag.o diff --git a/addons/mpt3sas/src/3.10.108/mpi/mpi2.h b/addons/mpt3sas/src/3.10.108/mpi/mpi2.h new file mode 100644 index 00000000..03317ffe --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpi/mpi2.h @@ -0,0 +1,1164 @@ +/* + * Copyright (c) 2000-2012 LSI Corporation. + * + * + * Name: mpi2.h + * Title: MPI Message independent structures and definitions + * including System Interface Register Set and + * scatter/gather formats. + * Creation Date: June 21, 2006 + * + * mpi2.h Version: 02.00.26 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved ReplyPostHostIndex register to offset 0x6C of the + * MPI2_SYSTEM_INTERFACE_REGS and modified the define for + * MPI2_REPLY_POST_HOST_INDEX_OFFSET. + * Added union of request descriptors. + * Added union of reply descriptors. + * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_VERSION_02_00. + * Fixed the size of the FunctionDependent5 field in the + * MPI2_DEFAULT_REPLY structure. + * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT. + * Removed the MPI-defined Fault Codes and extended the + * product specific codes up to 0xEFFF. + * Added a sixth key value for the WriteSequence register + * and changed the flush value to 0x0. + * Added message function codes for Diagnostic Buffer Post + * and Diagnsotic Release. + * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED + * Moved MPI2_VERSION_UNION from mpi2_ioc.h. + * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT. + * Added #defines for marking a reply descriptor as unused. + * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved LUN field defines from mpi2_init.h. + * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT. + * In all request and reply descriptors, replaced VF_ID + * field with MSIxIndex field. + * Removed DevHandle field from + * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those + * bytes reserved. + * Added RAID Accelerator functionality. + * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MSI-x index mask and shift for Reply Post Host + * Index register. + * Added function code for Host Based Discovery Action. + * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. + * Added defines for product-specific range of message + * function codes, 0xF0 to 0xFF. + * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. + * Added alternative defines for the SGE Direction bit. + * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. + * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_FUNCTION_SEND_HOST_MESSAGE. + * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT. + * Incorporating additions for MPI v2.5. + * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT. + * Added Hard Reset delay timings. + * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_H +#define MPI2_H + +/***************************************************************************** +* +* MPI Version Definitions +* +*****************************************************************************/ + +#define MPI2_VERSION_MAJOR_MASK (0xFF00) +#define MPI2_VERSION_MAJOR_SHIFT (8) +#define MPI2_VERSION_MINOR_MASK (0x00FF) +#define MPI2_VERSION_MINOR_SHIFT (0) + +/*major version for all MPI v2.x */ +#define MPI2_VERSION_MAJOR (0x02) + +/*minor version for MPI v2.0 compatible products */ +#define MPI2_VERSION_MINOR (0x00) +#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI2_VERSION_MINOR) +#define MPI2_VERSION_02_00 (0x0200) + +/*minor version for MPI v2.5 compatible products */ +#define MPI25_VERSION_MINOR (0x05) +#define MPI25_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI25_VERSION_MINOR) +#define MPI2_VERSION_02_05 (0x0205) + +/*Unit and Dev versioning for this MPI header set */ +#define MPI2_HEADER_VERSION_UNIT (0x1A) +#define MPI2_HEADER_VERSION_DEV (0x00) +#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) +#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) +#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF) +#define MPI2_HEADER_VERSION_DEV_SHIFT (0) +#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \ + MPI2_HEADER_VERSION_DEV) + +/***************************************************************************** +* +* IOC State Definitions +* +*****************************************************************************/ + +#define MPI2_IOC_STATE_RESET (0x00000000) +#define MPI2_IOC_STATE_READY (0x10000000) +#define MPI2_IOC_STATE_OPERATIONAL (0x20000000) +#define MPI2_IOC_STATE_FAULT (0x40000000) + +#define MPI2_IOC_STATE_MASK (0xF0000000) +#define MPI2_IOC_STATE_SHIFT (28) + +/*Fault state range for prodcut specific codes */ +#define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000) +#define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF) + +/***************************************************************************** +* +* System Interface Register Definitions +* +*****************************************************************************/ + +typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS { + U32 Doorbell; /*0x00 */ + U32 WriteSequence; /*0x04 */ + U32 HostDiagnostic; /*0x08 */ + U32 Reserved1; /*0x0C */ + U32 DiagRWData; /*0x10 */ + U32 DiagRWAddressLow; /*0x14 */ + U32 DiagRWAddressHigh; /*0x18 */ + U32 Reserved2[5]; /*0x1C */ + U32 HostInterruptStatus; /*0x30 */ + U32 HostInterruptMask; /*0x34 */ + U32 DCRData; /*0x38 */ + U32 DCRAddress; /*0x3C */ + U32 Reserved3[2]; /*0x40 */ + U32 ReplyFreeHostIndex; /*0x48 */ + U32 Reserved4[8]; /*0x4C */ + U32 ReplyPostHostIndex; /*0x6C */ + U32 Reserved5; /*0x70 */ + U32 HCBSize; /*0x74 */ + U32 HCBAddressLow; /*0x78 */ + U32 HCBAddressHigh; /*0x7C */ + U32 Reserved6[16]; /*0x80 */ + U32 RequestDescriptorPostLow; /*0xC0 */ + U32 RequestDescriptorPostHigh; /*0xC4 */ + U32 Reserved7[14]; /*0xC8 */ +} MPI2_SYSTEM_INTERFACE_REGS, + *PTR_MPI2_SYSTEM_INTERFACE_REGS, + Mpi2SystemInterfaceRegs_t, + *pMpi2SystemInterfaceRegs_t; + +/* + *Defines for working with the Doorbell register. + */ +#define MPI2_DOORBELL_OFFSET (0x00000000) + +/*IOC --> System values */ +#define MPI2_DOORBELL_USED (0x08000000) +#define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000) +#define MPI2_DOORBELL_WHO_INIT_SHIFT (24) +#define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF) +#define MPI2_DOORBELL_DATA_MASK (0x0000FFFF) + +/*System --> IOC values */ +#define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000) +#define MPI2_DOORBELL_FUNCTION_SHIFT (24) +#define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000) +#define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16) + +/* + *Defines for the WriteSequence register + */ +#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004) +#define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F) +#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0) +#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF) +#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4) +#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB) +#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2) +#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7) +#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) + +/* + *Defines for the HostDiagnostic register + */ +#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008) + +#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800) +#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000) +#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800) + +#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400) +#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200) +#define MPI2_DIAG_HCB_MODE (0x00000100) +#define MPI2_DIAG_DIAG_WRITE_ENABLE (0x00000080) +#define MPI2_DIAG_FLASH_BAD_SIG (0x00000040) +#define MPI2_DIAG_RESET_HISTORY (0x00000020) +#define MPI2_DIAG_DIAG_RW_ENABLE (0x00000010) +#define MPI2_DIAG_RESET_ADAPTER (0x00000004) +#define MPI2_DIAG_HOLD_IOC_RESET (0x00000002) + +/* + *Offsets for DiagRWData and address + */ +#define MPI2_DIAG_RW_DATA_OFFSET (0x00000010) +#define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014) +#define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018) + +/* + *Defines for the HostInterruptStatus register + */ +#define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030) +#define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000) +#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS +#define MPI2_HIS_RESET_IRQ_STATUS (0x40000000) +#define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008) +#define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001) +#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS + +/* + *Defines for the HostInterruptMask register + */ +#define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034) +#define MPI2_HIM_RESET_IRQ_MASK (0x40000000) +#define MPI2_HIM_REPLY_INT_MASK (0x00000008) +#define MPI2_HIM_RIM MPI2_HIM_REPLY_INT_MASK +#define MPI2_HIM_IOC2SYS_DB_MASK (0x00000001) +#define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK + +/* + *Offsets for DCRData and address + */ +#define MPI2_DCR_DATA_OFFSET (0x00000038) +#define MPI2_DCR_ADDRESS_OFFSET (0x0000003C) + +/* + *Offset for the Reply Free Queue + */ +#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048) + +/* + *Defines for the Reply Descriptor Post Queue + */ +#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) +#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF) +#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000) +#define MPI2_RPHI_MSIX_INDEX_SHIFT (24) + +/* + *Defines for the HCBSize and address + */ +#define MPI2_HCB_SIZE_OFFSET (0x00000074) +#define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000) +#define MPI2_HCB_SIZE_HCB_ENABLE (0x00000001) + +#define MPI2_HCB_ADDRESS_LOW_OFFSET (0x00000078) +#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C) + +/* + *Offsets for the Request Queue + */ +#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0) +#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4) + +/*Hard Reset delay timings */ +#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000) +#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000) +#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000) + +/***************************************************************************** +* +* Message Descriptors +* +*****************************************************************************/ + +/*Request Descriptors */ + +/*Default Request Descriptor */ +typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 DescriptorTypeDependent; /*0x06 */ +} MPI2_DEFAULT_REQUEST_DESCRIPTOR, + *PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR, + Mpi2DefaultRequestDescriptor_t, + *pMpi2DefaultRequestDescriptor_t; + +/*defines for the RequestFlags field */ +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E) +#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) +#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02) +#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) +#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08) +#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A) +#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C) + +#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01) + +/*High Priority Request Descriptor */ +typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 Reserved1; /*0x06 */ +} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, + *PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, + Mpi2HighPriorityRequestDescriptor_t, + *pMpi2HighPriorityRequestDescriptor_t; + +/*SCSI IO Request Descriptor */ +typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 DevHandle; /*0x06 */ +} MPI2_SCSI_IO_REQUEST_DESCRIPTOR, + *PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR, + Mpi2SCSIIORequestDescriptor_t, + *pMpi2SCSIIORequestDescriptor_t; + +/*SCSI Target Request Descriptor */ +typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 IoIndex; /*0x06 */ +} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, + *PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, + Mpi2SCSITargetRequestDescriptor_t, + *pMpi2SCSITargetRequestDescriptor_t; + +/*RAID Accelerator Request Descriptor */ +typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 Reserved; /*0x06 */ +} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, + *PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, + Mpi2RAIDAcceleratorRequestDescriptor_t, + *pMpi2RAIDAcceleratorRequestDescriptor_t; + +/*Fast Path SCSI IO Request Descriptor */ +typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR + MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, + *PTR_MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, + Mpi25FastPathSCSIIORequestDescriptor_t, + *pMpi25FastPathSCSIIORequestDescriptor_t; + +/*union of Request Descriptors */ +typedef union _MPI2_REQUEST_DESCRIPTOR_UNION { + MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; + MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; + MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; + MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; + MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; + MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO; + U64 Words; +} MPI2_REQUEST_DESCRIPTOR_UNION, + *PTR_MPI2_REQUEST_DESCRIPTOR_UNION, + Mpi2RequestDescriptorUnion_t, + *pMpi2RequestDescriptorUnion_t; + +/*Reply Descriptors */ + +/*Default Reply Descriptor */ +typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 DescriptorTypeDependent1; /*0x02 */ + U32 DescriptorTypeDependent2; /*0x04 */ +} MPI2_DEFAULT_REPLY_DESCRIPTOR, + *PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR, + Mpi2DefaultReplyDescriptor_t, + *pMpi2DefaultReplyDescriptor_t; + +/*defines for the ReplyFlags field */ +#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) +#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) +#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01) +#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02) +#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03) +#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05) +#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06) +#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) + +/*values for marking a reply descriptor as unused */ +#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF) +#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF) + +/*Address Reply Descriptor */ +typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U32 ReplyFrameAddress; /*0x04 */ +} MPI2_ADDRESS_REPLY_DESCRIPTOR, + *PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR, + Mpi2AddressReplyDescriptor_t, + *pMpi2AddressReplyDescriptor_t; + +#define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00) + +/*SCSI IO Success Reply Descriptor */ +typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 TaskTag; /*0x04 */ + U16 Reserved1; /*0x06 */ +} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + Mpi2SCSIIOSuccessReplyDescriptor_t, + *pMpi2SCSIIOSuccessReplyDescriptor_t; + +/*TargetAssist Success Reply Descriptor */ +typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U8 SequenceNumber; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 IoIndex; /*0x06 */ +} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, + Mpi2TargetAssistSuccessReplyDescriptor_t, + *pMpi2TargetAssistSuccessReplyDescriptor_t; + +/*Target Command Buffer Reply Descriptor */ +typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U8 VP_ID; /*0x02 */ + U8 Flags; /*0x03 */ + U16 InitiatorDevHandle; /*0x04 */ + U16 IoIndex; /*0x06 */ +} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, + *PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, + Mpi2TargetCommandBufferReplyDescriptor_t, + *pMpi2TargetCommandBufferReplyDescriptor_t; + +/*defines for Flags field */ +#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F) + +/*RAID Accelerator Success Reply Descriptor */ +typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U32 Reserved; /*0x04 */ +} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, + Mpi2RAIDAcceleratorSuccessReplyDescriptor_t, + *pMpi2RAIDAcceleratorSuccessReplyDescriptor_t; + +/*Fast Path SCSI IO Success Reply Descriptor */ +typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR + MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + Mpi25FastPathSCSIIOSuccessReplyDescriptor_t, + *pMpi25FastPathSCSIIOSuccessReplyDescriptor_t; + +/*union of Reply Descriptors */ +typedef union _MPI2_REPLY_DESCRIPTORS_UNION { + MPI2_DEFAULT_REPLY_DESCRIPTOR Default; + MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; + MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; + MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; + MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; + MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; + MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess; + U64 Words; +} MPI2_REPLY_DESCRIPTORS_UNION, + *PTR_MPI2_REPLY_DESCRIPTORS_UNION, + Mpi2ReplyDescriptorsUnion_t, + *pMpi2ReplyDescriptorsUnion_t; + +/***************************************************************************** +* +* Message Functions +* +*****************************************************************************/ + +#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) +#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) +#define MPI2_FUNCTION_IOC_INIT (0x02) +#define MPI2_FUNCTION_IOC_FACTS (0x03) +#define MPI2_FUNCTION_CONFIG (0x04) +#define MPI2_FUNCTION_PORT_FACTS (0x05) +#define MPI2_FUNCTION_PORT_ENABLE (0x06) +#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07) +#define MPI2_FUNCTION_EVENT_ACK (0x08) +#define MPI2_FUNCTION_FW_DOWNLOAD (0x09) +#define MPI2_FUNCTION_TARGET_ASSIST (0x0B) +#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C) +#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D) +#define MPI2_FUNCTION_FW_UPLOAD (0x12) +#define MPI2_FUNCTION_RAID_ACTION (0x15) +#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) +#define MPI2_FUNCTION_TOOLBOX (0x17) +#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) +#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A) +#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B) +#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C) +#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D) +#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) +#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) +#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) +#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) +#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) +#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) +#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31) +#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) +#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) + +/*Doorbell functions */ +#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40) +#define MPI2_FUNCTION_HANDSHAKE (0x42) + +/***************************************************************************** +* +* IOC Status Values +* +*****************************************************************************/ + +/*mask for IOCStatus status value */ +#define MPI2_IOCSTATUS_MASK (0x7FFF) + +/**************************************************************************** +* Common IOCStatus values for all replies +****************************************************************************/ + +#define MPI2_IOCSTATUS_SUCCESS (0x0000) +#define MPI2_IOCSTATUS_INVALID_FUNCTION (0x0001) +#define MPI2_IOCSTATUS_BUSY (0x0002) +#define MPI2_IOCSTATUS_INVALID_SGL (0x0003) +#define MPI2_IOCSTATUS_INTERNAL_ERROR (0x0004) +#define MPI2_IOCSTATUS_INVALID_VPID (0x0005) +#define MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) +#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007) +#define MPI2_IOCSTATUS_INVALID_STATE (0x0008) +#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009) + +/**************************************************************************** +* Config IOCStatus values +****************************************************************************/ + +#define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020) +#define MPI2_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021) +#define MPI2_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022) +#define MPI2_IOCSTATUS_CONFIG_INVALID_DATA (0x0023) +#define MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024) +#define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025) + +/**************************************************************************** +* SCSI IO Reply +****************************************************************************/ + +#define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040) +#define MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042) +#define MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043) +#define MPI2_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044) +#define MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045) +#define MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046) +#define MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047) +#define MPI2_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048) +#define MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049) +#define MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A) +#define MPI2_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B) +#define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C) + +/**************************************************************************** +* For use by SCSI Initiator and SCSI Target end-to-end data protection +****************************************************************************/ + +#define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D) +#define MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E) +#define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F) + +/**************************************************************************** +* SCSI Target values +****************************************************************************/ + +#define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062) +#define MPI2_IOCSTATUS_TARGET_ABORTED (0x0063) +#define MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064) +#define MPI2_IOCSTATUS_TARGET_NO_CONNECTION (0x0065) +#define MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A) +#define MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D) +#define MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E) +#define MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F) +#define MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070) +#define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071) + +/**************************************************************************** +* Serial Attached SCSI values +****************************************************************************/ + +#define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090) +#define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091) + +/**************************************************************************** +* Diagnostic Buffer Post / Diagnostic Release values +****************************************************************************/ + +#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0) + +/**************************************************************************** +* RAID Accelerator values +****************************************************************************/ + +#define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0) + +/**************************************************************************** +* IOCStatus flag to indicate that log info is available +****************************************************************************/ + +#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000) + +/**************************************************************************** +* IOCLogInfo Types +****************************************************************************/ + +#define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000) +#define MPI2_IOCLOGINFO_TYPE_SHIFT (28) +#define MPI2_IOCLOGINFO_TYPE_NONE (0x0) +#define MPI2_IOCLOGINFO_TYPE_SCSI (0x1) +#define MPI2_IOCLOGINFO_TYPE_FC (0x2) +#define MPI2_IOCLOGINFO_TYPE_SAS (0x3) +#define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4) +#define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF) + +/***************************************************************************** +* +* Standard Message Structures +* +*****************************************************************************/ + +/**************************************************************************** +*Request Message Header for all request messages +****************************************************************************/ + +typedef struct _MPI2_REQUEST_HEADER { + U16 FunctionDependent1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 FunctionDependent2; /*0x04 */ + U8 FunctionDependent3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ +} MPI2_REQUEST_HEADER, *PTR_MPI2_REQUEST_HEADER, + MPI2RequestHeader_t, *pMPI2RequestHeader_t; + +/**************************************************************************** +* Default Reply +****************************************************************************/ + +typedef struct _MPI2_DEFAULT_REPLY { + U16 FunctionDependent1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 FunctionDependent2; /*0x04 */ + U8 FunctionDependent3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U16 FunctionDependent5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_DEFAULT_REPLY, *PTR_MPI2_DEFAULT_REPLY, + MPI2DefaultReply_t, *pMPI2DefaultReply_t; + +/*common version structure/union used in messages and configuration pages */ + +typedef struct _MPI2_VERSION_STRUCT { + U8 Dev; /*0x00 */ + U8 Unit; /*0x01 */ + U8 Minor; /*0x02 */ + U8 Major; /*0x03 */ +} MPI2_VERSION_STRUCT; + +typedef union _MPI2_VERSION_UNION { + MPI2_VERSION_STRUCT Struct; + U32 Word; +} MPI2_VERSION_UNION; + +/*LUN field defines, common to many structures */ +#define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF) +#define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000) +#define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF) +#define MPI2_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000) +#define MPI2_LUN_LEVEL_1_WORD (0xFF00) +#define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00) + +/***************************************************************************** +* +* Fusion-MPT MPI Scatter Gather Elements +* +*****************************************************************************/ + +/**************************************************************************** +* MPI Simple Element structures +****************************************************************************/ + +typedef struct _MPI2_SGE_SIMPLE32 { + U32 FlagsLength; + U32 Address; +} MPI2_SGE_SIMPLE32, *PTR_MPI2_SGE_SIMPLE32, + Mpi2SGESimple32_t, *pMpi2SGESimple32_t; + +typedef struct _MPI2_SGE_SIMPLE64 { + U32 FlagsLength; + U64 Address; +} MPI2_SGE_SIMPLE64, *PTR_MPI2_SGE_SIMPLE64, + Mpi2SGESimple64_t, *pMpi2SGESimple64_t; + +typedef struct _MPI2_SGE_SIMPLE_UNION { + U32 FlagsLength; + union { + U32 Address32; + U64 Address64; + } u; +} MPI2_SGE_SIMPLE_UNION, + *PTR_MPI2_SGE_SIMPLE_UNION, + Mpi2SGESimpleUnion_t, + *pMpi2SGESimpleUnion_t; + +/**************************************************************************** +* MPI Chain Element structures - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_CHAIN32 { + U16 Length; + U8 NextChainOffset; + U8 Flags; + U32 Address; +} MPI2_SGE_CHAIN32, *PTR_MPI2_SGE_CHAIN32, + Mpi2SGEChain32_t, *pMpi2SGEChain32_t; + +typedef struct _MPI2_SGE_CHAIN64 { + U16 Length; + U8 NextChainOffset; + U8 Flags; + U64 Address; +} MPI2_SGE_CHAIN64, *PTR_MPI2_SGE_CHAIN64, + Mpi2SGEChain64_t, *pMpi2SGEChain64_t; + +typedef struct _MPI2_SGE_CHAIN_UNION { + U16 Length; + U8 NextChainOffset; + U8 Flags; + union { + U32 Address32; + U64 Address64; + } u; +} MPI2_SGE_CHAIN_UNION, + *PTR_MPI2_SGE_CHAIN_UNION, + Mpi2SGEChainUnion_t, + *pMpi2SGEChainUnion_t; + +/**************************************************************************** +* MPI Transaction Context Element structures - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_TRANSACTION32 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[1]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION32, + *PTR_MPI2_SGE_TRANSACTION32, + Mpi2SGETransaction32_t, + *pMpi2SGETransaction32_t; + +typedef struct _MPI2_SGE_TRANSACTION64 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[2]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION64, + *PTR_MPI2_SGE_TRANSACTION64, + Mpi2SGETransaction64_t, + *pMpi2SGETransaction64_t; + +typedef struct _MPI2_SGE_TRANSACTION96 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[3]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION96, *PTR_MPI2_SGE_TRANSACTION96, + Mpi2SGETransaction96_t, *pMpi2SGETransaction96_t; + +typedef struct _MPI2_SGE_TRANSACTION128 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[4]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION128, *PTR_MPI2_SGE_TRANSACTION128, + Mpi2SGETransaction_t128, *pMpi2SGETransaction_t128; + +typedef struct _MPI2_SGE_TRANSACTION_UNION { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + union { + U32 TransactionContext32[1]; + U32 TransactionContext64[2]; + U32 TransactionContext96[3]; + U32 TransactionContext128[4]; + } u; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION_UNION, + *PTR_MPI2_SGE_TRANSACTION_UNION, + Mpi2SGETransactionUnion_t, + *pMpi2SGETransactionUnion_t; + +/**************************************************************************** +* MPI SGE union for IO SGL's - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_MPI_SGE_IO_UNION { + union { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_CHAIN_UNION Chain; + } u; +} MPI2_MPI_SGE_IO_UNION, *PTR_MPI2_MPI_SGE_IO_UNION, + Mpi2MpiSGEIOUnion_t, *pMpi2MpiSGEIOUnion_t; + +/**************************************************************************** +* MPI SGE union for SGL's with Simple and Transaction elements - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION { + union { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_TRANSACTION_UNION Transaction; + } u; +} MPI2_SGE_TRANS_SIMPLE_UNION, + *PTR_MPI2_SGE_TRANS_SIMPLE_UNION, + Mpi2SGETransSimpleUnion_t, + *pMpi2SGETransSimpleUnion_t; + +/**************************************************************************** +* All MPI SGE types union +****************************************************************************/ + +typedef struct _MPI2_MPI_SGE_UNION { + union { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_CHAIN_UNION Chain; + MPI2_SGE_TRANSACTION_UNION Transaction; + } u; +} MPI2_MPI_SGE_UNION, *PTR_MPI2_MPI_SGE_UNION, + Mpi2MpiSgeUnion_t, *pMpi2MpiSgeUnion_t; + +/**************************************************************************** +* MPI SGE field definition and masks +****************************************************************************/ + +/*Flags field bit definitions */ + +#define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80) +#define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40) +#define MPI2_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30) +#define MPI2_SGE_FLAGS_LOCAL_ADDRESS (0x08) +#define MPI2_SGE_FLAGS_DIRECTION (0x04) +#define MPI2_SGE_FLAGS_ADDRESS_SIZE (0x02) +#define MPI2_SGE_FLAGS_END_OF_LIST (0x01) + +#define MPI2_SGE_FLAGS_SHIFT (24) + +#define MPI2_SGE_LENGTH_MASK (0x00FFFFFF) +#define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF) + +/*Element Type */ + +#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00) +#define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10) +#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30) +#define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30) + +/*Address location */ + +#define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00) + +/*Direction */ + +#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00) +#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04) + +#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST) +#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC) + +/*Address Size */ + +#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00) +#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02) + +/*Context Size */ + +#define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00) +#define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02) +#define MPI2_SGE_FLAGS_96_BIT_CONTEXT (0x04) +#define MPI2_SGE_FLAGS_128_BIT_CONTEXT (0x06) + +#define MPI2_SGE_CHAIN_OFFSET_MASK (0x00FF0000) +#define MPI2_SGE_CHAIN_OFFSET_SHIFT (16) + +/**************************************************************************** +* MPI SGE operation Macros +****************************************************************************/ + +/*SIMPLE FlagsLength manipulations... */ +#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT) +#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> \ + MPI2_SGE_FLAGS_SHIFT) +#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK) +#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK) + +#define MPI2_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_SGE_SET_FLAGS(f) | \ + MPI2_SGE_LENGTH(l)) + +#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength) +#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength) +#define MPI2_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \ + MPI2_SGE_SET_FLAGS_LENGTH(f, l)) + +/*CAUTION - The following are READ-MODIFY-WRITE! */ +#define MPI2_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \ + MPI2_SGE_SET_FLAGS(f)) +#define MPI2_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \ + MPI2_SGE_LENGTH(l)) + +#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> \ + MPI2_SGE_CHAIN_OFFSET_SHIFT) + +/***************************************************************************** +* +* Fusion-MPT IEEE Scatter Gather Elements +* +*****************************************************************************/ + +/**************************************************************************** +* IEEE Simple Element structures +****************************************************************************/ + +/*MPI2_IEEE_SGE_SIMPLE32 is for MPI v2.0 products only */ +typedef struct _MPI2_IEEE_SGE_SIMPLE32 { + U32 Address; + U32 FlagsLength; +} MPI2_IEEE_SGE_SIMPLE32, *PTR_MPI2_IEEE_SGE_SIMPLE32, + Mpi2IeeeSgeSimple32_t, *pMpi2IeeeSgeSimple32_t; + +typedef struct _MPI2_IEEE_SGE_SIMPLE64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 Reserved2; + U8 Flags; +} MPI2_IEEE_SGE_SIMPLE64, *PTR_MPI2_IEEE_SGE_SIMPLE64, + Mpi2IeeeSgeSimple64_t, *pMpi2IeeeSgeSimple64_t; + +typedef union _MPI2_IEEE_SGE_SIMPLE_UNION { + MPI2_IEEE_SGE_SIMPLE32 Simple32; + MPI2_IEEE_SGE_SIMPLE64 Simple64; +} MPI2_IEEE_SGE_SIMPLE_UNION, + *PTR_MPI2_IEEE_SGE_SIMPLE_UNION, + Mpi2IeeeSgeSimpleUnion_t, + *pMpi2IeeeSgeSimpleUnion_t; + +/**************************************************************************** +* IEEE Chain Element structures +****************************************************************************/ + +/*MPI2_IEEE_SGE_CHAIN32 is for MPI v2.0 products only */ +typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32; + +/*MPI2_IEEE_SGE_CHAIN64 is for MPI v2.0 products only */ +typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64; + +typedef union _MPI2_IEEE_SGE_CHAIN_UNION { + MPI2_IEEE_SGE_CHAIN32 Chain32; + MPI2_IEEE_SGE_CHAIN64 Chain64; +} MPI2_IEEE_SGE_CHAIN_UNION, + *PTR_MPI2_IEEE_SGE_CHAIN_UNION, + Mpi2IeeeSgeChainUnion_t, + *pMpi2IeeeSgeChainUnion_t; + +/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 products only */ +typedef struct _MPI25_IEEE_SGE_CHAIN64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 NextChainOffset; + U8 Flags; +} MPI25_IEEE_SGE_CHAIN64, + *PTR_MPI25_IEEE_SGE_CHAIN64, + Mpi25IeeeSgeChain64_t, + *pMpi25IeeeSgeChain64_t; + +/**************************************************************************** +* All IEEE SGE types union +****************************************************************************/ + +/*MPI2_IEEE_SGE_UNION is for MPI v2.0 products only */ +typedef struct _MPI2_IEEE_SGE_UNION { + union { + MPI2_IEEE_SGE_SIMPLE_UNION Simple; + MPI2_IEEE_SGE_CHAIN_UNION Chain; + } u; +} MPI2_IEEE_SGE_UNION, *PTR_MPI2_IEEE_SGE_UNION, + Mpi2IeeeSgeUnion_t, *pMpi2IeeeSgeUnion_t; + +/**************************************************************************** +* IEEE SGE union for IO SGL's +****************************************************************************/ + +typedef union _MPI25_SGE_IO_UNION { + MPI2_IEEE_SGE_SIMPLE64 IeeeSimple; + MPI25_IEEE_SGE_CHAIN64 IeeeChain; +} MPI25_SGE_IO_UNION, *PTR_MPI25_SGE_IO_UNION, + Mpi25SGEIOUnion_t, *pMpi25SGEIOUnion_t; + +/**************************************************************************** +* IEEE SGE field definitions and masks +****************************************************************************/ + +/*Flags field bit definitions */ + +#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80) +#define MPI25_IEEE_SGE_FLAGS_END_OF_LIST (0x40) + +#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24) + +#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF) + +/*Element Type */ + +#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00) +#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) + +/*Data Location Address Space */ + +#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03) +#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) +#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) +#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) +#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) +#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03) +#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \ + (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) + +/**************************************************************************** +* IEEE SGE operation Macros +****************************************************************************/ + +/*SIMPLE FlagsLength manipulations... */ +#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT) +#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) \ + >> MPI2_IEEE32_SGE_FLAGS_SHIFT) +#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK) + +#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) |\ + MPI2_IEEE32_SGE_LENGTH(l)) + +#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) \ + MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength) +#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) \ + MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength) +#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \ + MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l)) + +/*CAUTION - The following are READ-MODIFY-WRITE! */ +#define MPI2_IEEE32_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \ + MPI2_IEEE32_SGE_SET_FLAGS(f)) +#define MPI2_IEEE32_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \ + MPI2_IEEE32_SGE_LENGTH(l)) + +/***************************************************************************** +* +* Fusion-MPT MPI/IEEE Scatter Gather Unions +* +*****************************************************************************/ + +typedef union _MPI2_SIMPLE_SGE_UNION { + MPI2_SGE_SIMPLE_UNION MpiSimple; + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; +} MPI2_SIMPLE_SGE_UNION, *PTR_MPI2_SIMPLE_SGE_UNION, + Mpi2SimpleSgeUntion_t, *pMpi2SimpleSgeUntion_t; + +typedef union _MPI2_SGE_IO_UNION { + MPI2_SGE_SIMPLE_UNION MpiSimple; + MPI2_SGE_CHAIN_UNION MpiChain; + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; +} MPI2_SGE_IO_UNION, *PTR_MPI2_SGE_IO_UNION, + Mpi2SGEIOUnion_t, *pMpi2SGEIOUnion_t; + +/**************************************************************************** +* +* Values for SGLFlags field, used in many request messages with an SGL +* +****************************************************************************/ + +/*values for MPI SGL Data Location Address Space subfield */ +#define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C) +#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00) +#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04) +#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) +#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C) +/*values for SGL Type subfield */ +#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03) +#define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00) +#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01) +#define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02) + +#endif diff --git a/addons/mpt3sas/src/3.10.108/mpi/mpi2_cnfg.h b/addons/mpt3sas/src/3.10.108/mpi/mpi2_cnfg.h new file mode 100644 index 00000000..d8b2c3ee --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpi/mpi2_cnfg.h @@ -0,0 +1,3323 @@ +/* + * Copyright (c) 2000-2011 LSI Corporation. + * + * + * Name: mpi2_cnfg.h + * Title: MPI Configuration messages and pages + * Creation Date: November 10, 2006 + * + * mpi2_cnfg.h Version: 02.00.22 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags. + * Added Manufacturing Page 11. + * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE + * define. + * 06-26-07 02.00.02 Adding generic structure for product-specific + * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS. + * Rework of BIOS Page 2 configuration page. + * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the + * forms. + * Added configuration pages IOC Page 8 and Driver + * Persistent Mapping Page 0. + * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated + * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1, + * RAID Physical Disk Pages 0 and 1, RAID Configuration + * Page 0). + * Added new value for AccessStatus field of SAS Device + * Page 0 (_SATA_NEEDS_INITIALIZATION). + * 10-31-07 02.00.04 Added missing SEPDevHandle field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for + * NVDATA. + * Modified IOC Page 7 to use masks and added field for + * SASBroadcastPrimitiveMasks. + * Added MPI2_CONFIG_PAGE_BIOS_4. + * Added MPI2_CONFIG_PAGE_LOG_0. + * 02-29-08 02.00.06 Modified various names to make them 32-character unique. + * Added SAS Device IDs. + * Updated Integrated RAID configuration pages including + * Manufacturing Page 4, IOC Page 6, and RAID Configuration + * Page 0. + * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA. + * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION. + * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING. + * Added missing MaxNumRoutedSasAddresses field to + * MPI2_CONFIG_PAGE_EXPANDER_0. + * Added SAS Port Page 0. + * Modified structure layout for + * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0. + * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use + * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array. + * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF + * to 0x000000FF. + * Added two new values for the Physical Disk Coercion Size + * bits in the Flags field of Manufacturing Page 4. + * Added product-specific Manufacturing pages 16 to 31. + * Modified Flags bits for controlling write cache on SATA + * drives in IO Unit Page 1. + * Added new bit to AdditionalControlFlags of SAS IO Unit + * Page 1 to control Invalid Topology Correction. + * Added additional defines for RAID Volume Page 0 + * VolumeStatusFlags field. + * Modified meaning of RAID Volume Page 0 VolumeSettings + * define for auto-configure of hot-swap drives. + * Added SupportedPhysDisks field to RAID Volume Page 1 and + * added related defines. + * Added PhysDiskAttributes field (and related defines) to + * RAID Physical Disk Page 0. + * Added MPI2_SAS_PHYINFO_PHY_VACANT define. + * Added three new DiscoveryStatus bits for SAS IO Unit + * Page 0 and SAS Expander Page 0. + * Removed multiplexing information from SAS IO Unit pages. + * Added BootDeviceWaitTime field to SAS IO Unit Page 4. + * Removed Zone Address Resolved bit from PhyInfo and from + * Expander Page 0 Flags field. + * Added two new AccessStatus values to SAS Device Page 0 + * for indicating routing problems. Added 3 reserved words + * to this page. + * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3. + * Inserted missing reserved field into structure for IOC + * Page 6. + * Added more pending task bits to RAID Volume Page 0 + * VolumeStatusFlags defines. + * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define. + * Added a new DiscoveryStatus bit for SAS IO Unit Page 0 + * and SAS Expander Page 0 to flag a downstream initiator + * when in simplified routing mode. + * Removed SATA Init Failure defines for DiscoveryStatus + * fields of SAS IO Unit Page 0 and SAS Expander Page 0. + * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. + * Added PortGroups, DmaGroup, and ControlGroup fields to + * SAS Device Page 0. + * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO + * Unit Page 6. + * Added expander reduced functionality data to SAS + * Expander Page 0. + * Added SAS PHY Page 2 and SAS PHY Page 3. + * 07-30-09 02.00.12 Added IO Unit Page 7. + * Added new device ids. + * Added SAS IO Unit Page 5. + * Added partial and slumber power management capable flags + * to SAS Device Page 0 Flags field. + * Added PhyInfo defines for power condition. + * Added Ethernet configuration pages. + * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. + * Added SAS PHY Page 4 structure and defines. + * 02-10-10 02.00.14 Modified the comments for the configuration page + * structures that contain an array of data. The host + * should use the "count" field in the page data (e.g. the + * NumPhys field) to determine the number of valid elements + * in the array. + * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines. + * Added PowerManagementCapabilities to IO Unit Page 7. + * Added PortWidthModGroup field to + * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. + * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT + * define. + * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. + * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. + * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) + * defines. + * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to + * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for + * the Pinout field. + * Added BoardTemperature and BoardTemperatureUnits fields + * to MPI2_CONFIG_PAGE_IO_UNIT_7. + * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define + * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. + * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST. + * Added IO Unit Page 8, IO Unit Page 9, + * and IO Unit Page 10. + * Added SASNotifyPrimitiveMasks field to + * MPI2_CONFIG_PAGE_IOC_7. + * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec). + * 05-25-11 02.00.20 Cleaned up a few comments. + * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities + * for PCIe link as obsolete. + * Added SpinupFlags field containing a Disable Spin-up bit + * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO + * Unit Page 4. + * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. + * Added UEFIVersion field to BIOS Page 1 and defined new + * BiosOptions bits. + * Incorporating additions for MPI v2.5. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_CNFG_H +#define MPI2_CNFG_H + +/***************************************************************************** +* Configuration Page Header and defines +*****************************************************************************/ + +/*Config Page Header */ +typedef struct _MPI2_CONFIG_PAGE_HEADER { + U8 PageVersion; /*0x00 */ + U8 PageLength; /*0x01 */ + U8 PageNumber; /*0x02 */ + U8 PageType; /*0x03 */ +} MPI2_CONFIG_PAGE_HEADER, *PTR_MPI2_CONFIG_PAGE_HEADER, + Mpi2ConfigPageHeader_t, *pMpi2ConfigPageHeader_t; + +typedef union _MPI2_CONFIG_PAGE_HEADER_UNION { + MPI2_CONFIG_PAGE_HEADER Struct; + U8 Bytes[4]; + U16 Word16[2]; + U32 Word32; +} MPI2_CONFIG_PAGE_HEADER_UNION, *PTR_MPI2_CONFIG_PAGE_HEADER_UNION, + Mpi2ConfigPageHeaderUnion, *pMpi2ConfigPageHeaderUnion; + +/*Extended Config Page Header */ +typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER { + U8 PageVersion; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 PageNumber; /*0x02 */ + U8 PageType; /*0x03 */ + U16 ExtPageLength; /*0x04 */ + U8 ExtPageType; /*0x06 */ + U8 Reserved2; /*0x07 */ +} MPI2_CONFIG_EXTENDED_PAGE_HEADER, + *PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER, + Mpi2ConfigExtendedPageHeader_t, + *pMpi2ConfigExtendedPageHeader_t; + +typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { + MPI2_CONFIG_PAGE_HEADER Struct; + MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext; + U8 Bytes[8]; + U16 Word16[4]; + U32 Word32[2]; +} MPI2_CONFIG_EXT_PAGE_HEADER_UNION, + *PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION, + Mpi2ConfigPageExtendedHeaderUnion, + *pMpi2ConfigPageExtendedHeaderUnion; + + +/*PageType field values */ +#define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00) +#define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10) +#define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20) +#define MPI2_CONFIG_PAGEATTR_MASK (0xF0) + +#define MPI2_CONFIG_PAGETYPE_IO_UNIT (0x00) +#define MPI2_CONFIG_PAGETYPE_IOC (0x01) +#define MPI2_CONFIG_PAGETYPE_BIOS (0x02) +#define MPI2_CONFIG_PAGETYPE_RAID_VOLUME (0x08) +#define MPI2_CONFIG_PAGETYPE_MANUFACTURING (0x09) +#define MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A) +#define MPI2_CONFIG_PAGETYPE_EXTENDED (0x0F) +#define MPI2_CONFIG_PAGETYPE_MASK (0x0F) + +#define MPI2_CONFIG_TYPENUM_MASK (0x0FFF) + + +/*ExtPageType field values */ +#define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_PHY (0x13) +#define MPI2_CONFIG_EXTPAGETYPE_LOG (0x14) +#define MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15) +#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16) +#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) +#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19) +#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A) + + +/***************************************************************************** +* PageAddress defines +*****************************************************************************/ + +/*RAID Volume PageAddress format */ +#define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000) +#define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000) + +#define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF) + + +/*RAID Physical Disk PageAddress format */ +#define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000) +#define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000) +#define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000) +#define MPI2_PHYSDISK_PGAD_FORM_DEVHANDLE (0x20000000) + +#define MPI2_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF) +#define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF) + + +/*SAS Expander PageAddress format */ +#define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000) + +#define MPI2_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF) +#define MPI2_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000) +#define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16) + + +/*SAS Device PageAddress format */ +#define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000) + +#define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF) + + +/*SAS PHY PageAddress format */ +#define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000) +#define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000) + +#define MPI2_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF) +#define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF) + + +/*SAS Port PageAddress format */ +#define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000) +#define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000) + +#define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF) + + +/*SAS Enclosure PageAddress format */ +#define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000) + +#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF) + + +/*RAID Configuration PageAddress format */ +#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000) +#define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000) +#define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000) +#define MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG (0x20000000) + +#define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF) + + +/*Driver Persistent Mapping PageAddress format */ +#define MPI2_DPM_PGAD_FORM_MASK (0xF0000000) +#define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000) + +#define MPI2_DPM_PGAD_ENTRY_COUNT_MASK (0x0FFF0000) +#define MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT (16) +#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF) + + +/*Ethernet PageAddress format */ +#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000) +#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000) + +#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF) + + + +/**************************************************************************** +* Configuration messages +****************************************************************************/ + +/*Configuration Request Message */ +typedef struct _MPI2_CONFIG_REQUEST { + U8 Action; /*0x00 */ + U8 SGLFlags; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 ExtPageLength; /*0x04 */ + U8 ExtPageType; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U8 Reserved2; /*0x0C */ + U8 ProxyVF_ID; /*0x0D */ + U16 Reserved4; /*0x0E */ + U32 Reserved3; /*0x10 */ + MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */ + U32 PageAddress; /*0x18 */ + MPI2_SGE_IO_UNION PageBufferSGE; /*0x1C */ +} MPI2_CONFIG_REQUEST, *PTR_MPI2_CONFIG_REQUEST, + Mpi2ConfigRequest_t, *pMpi2ConfigRequest_t; + +/*values for the Action field */ +#define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00) +#define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01) +#define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02) +#define MPI2_CONFIG_ACTION_PAGE_DEFAULT (0x03) +#define MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04) +#define MPI2_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05) +#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06) +#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07) + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + + +/*Config Reply Message */ +typedef struct _MPI2_CONFIG_REPLY { + U8 Action; /*0x00 */ + U8 SGLFlags; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 ExtPageLength; /*0x04 */ + U8 ExtPageType; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U16 Reserved2; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */ +} MPI2_CONFIG_REPLY, *PTR_MPI2_CONFIG_REPLY, + Mpi2ConfigReply_t, *pMpi2ConfigReply_t; + + + +/***************************************************************************** +* +* C o n f i g u r a t i o n P a g e s +* +*****************************************************************************/ + +/**************************************************************************** +* Manufacturing Config pages +****************************************************************************/ + +#define MPI2_MFGPAGE_VENDORID_LSI (0x1000) + +/*MPI v2.0 SAS products */ +#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070) +#define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072) +#define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074) +#define MPI2_MFGPAGE_DEVID_SAS2108_2 (0x0076) +#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077) +#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) +#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) + +#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E) + +#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) +#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) +#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) +#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083) +#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084) +#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085) +#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086) +#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087) +#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E) + +/*MPI v2.5 SAS products */ +#define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096) +#define MPI25_MFGPAGE_DEVID_SAS3008 (0x0097) +#define MPI25_MFGPAGE_DEVID_SAS3108_1 (0x0090) +#define MPI25_MFGPAGE_DEVID_SAS3108_2 (0x0091) +#define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094) +#define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095) + + + + +/*Manufacturing Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 ChipName[16]; /*0x04 */ + U8 ChipRevision[8]; /*0x14 */ + U8 BoardName[16]; /*0x1C */ + U8 BoardAssembly[16]; /*0x2C */ + U8 BoardTracerNumber[16]; /*0x3C */ +} MPI2_CONFIG_PAGE_MAN_0, + *PTR_MPI2_CONFIG_PAGE_MAN_0, + Mpi2ManufacturingPage0_t, + *pMpi2ManufacturingPage0_t; + +#define MPI2_MANUFACTURING0_PAGEVERSION (0x00) + + +/*Manufacturing Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 VPD[256]; /*0x04 */ +} MPI2_CONFIG_PAGE_MAN_1, + *PTR_MPI2_CONFIG_PAGE_MAN_1, + Mpi2ManufacturingPage1_t, + *pMpi2ManufacturingPage1_t; + +#define MPI2_MANUFACTURING1_PAGEVERSION (0x00) + + +typedef struct _MPI2_CHIP_REVISION_ID { + U16 DeviceID; /*0x00 */ + U8 PCIRevisionID; /*0x02 */ + U8 Reserved; /*0x03 */ +} MPI2_CHIP_REVISION_ID, *PTR_MPI2_CHIP_REVISION_ID, + Mpi2ChipRevisionId_t, *pMpi2ChipRevisionId_t; + + +/*Manufacturing Page 2 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check Header.PageLength at runtime. + */ +#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS +#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_2 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + MPI2_CHIP_REVISION_ID ChipId; /*0x04 */ + U32 + HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/*0x08 */ +} MPI2_CONFIG_PAGE_MAN_2, + *PTR_MPI2_CONFIG_PAGE_MAN_2, + Mpi2ManufacturingPage2_t, + *pMpi2ManufacturingPage2_t; + +#define MPI2_MANUFACTURING2_PAGEVERSION (0x00) + + +/*Manufacturing Page 3 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check Header.PageLength at runtime. + */ +#ifndef MPI2_MAN_PAGE_3_INFO_WORDS +#define MPI2_MAN_PAGE_3_INFO_WORDS (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_3 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + MPI2_CHIP_REVISION_ID ChipId; /*0x04 */ + U32 + Info[MPI2_MAN_PAGE_3_INFO_WORDS];/*0x08 */ +} MPI2_CONFIG_PAGE_MAN_3, + *PTR_MPI2_CONFIG_PAGE_MAN_3, + Mpi2ManufacturingPage3_t, + *pMpi2ManufacturingPage3_t; + +#define MPI2_MANUFACTURING3_PAGEVERSION (0x00) + + +/*Manufacturing Page 4 */ + +typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS { + U8 PowerSaveFlags; /*0x00 */ + U8 InternalOperationsSleepTime; /*0x01 */ + U8 InternalOperationsRunTime; /*0x02 */ + U8 HostIdleTime; /*0x03 */ +} MPI2_MANPAGE4_PWR_SAVE_SETTINGS, + *PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS, + Mpi2ManPage4PwrSaveSettings_t, + *pMpi2ManPage4PwrSaveSettings_t; + +/*defines for the PowerSaveFlags field */ +#define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03) +#define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00) +#define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01) +#define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02) + +typedef struct _MPI2_CONFIG_PAGE_MAN_4 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Flags; /*0x08 */ + U8 InquirySize; /*0x0C */ + U8 Reserved2; /*0x0D */ + U16 Reserved3; /*0x0E */ + U8 InquiryData[56]; /*0x10 */ + U32 RAID0VolumeSettings; /*0x48 */ + U32 RAID1EVolumeSettings; /*0x4C */ + U32 RAID1VolumeSettings; /*0x50 */ + U32 RAID10VolumeSettings; /*0x54 */ + U32 Reserved4; /*0x58 */ + U32 Reserved5; /*0x5C */ + MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /*0x60 */ + U8 MaxOCEDisks; /*0x64 */ + U8 ResyncRate; /*0x65 */ + U16 DataScrubDuration; /*0x66 */ + U8 MaxHotSpares; /*0x68 */ + U8 MaxPhysDisksPerVol; /*0x69 */ + U8 MaxPhysDisks; /*0x6A */ + U8 MaxVolumes; /*0x6B */ +} MPI2_CONFIG_PAGE_MAN_4, + *PTR_MPI2_CONFIG_PAGE_MAN_4, + Mpi2ManufacturingPage4_t, + *pMpi2ManufacturingPage4_t; + +#define MPI2_MANUFACTURING4_PAGEVERSION (0x0A) + +/*Manufacturing Page 4 Flags field */ +#define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000) +#define MPI2_MANPAGE4_METADATA_512MB (0x00000000) + +#define MPI2_MANPAGE4_MIX_SSD_SAS_SATA (0x00008000) +#define MPI2_MANPAGE4_MIX_SSD_AND_NON_SSD (0x00004000) +#define MPI2_MANPAGE4_HIDE_PHYSDISK_NON_IR (0x00002000) + +#define MPI2_MANPAGE4_MASK_PHYSDISK_COERCION (0x00001C00) +#define MPI2_MANPAGE4_PHYSDISK_COERCION_1GB (0x00000000) +#define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION (0x00000400) +#define MPI2_MANPAGE4_PHYSDISK_ADAPTIVE_COERCION (0x00000800) +#define MPI2_MANPAGE4_PHYSDISK_ZERO_COERCION (0x00000C00) + +#define MPI2_MANPAGE4_MASK_BAD_BLOCK_MARKING (0x00000300) +#define MPI2_MANPAGE4_DEFAULT_BAD_BLOCK_MARKING (0x00000000) +#define MPI2_MANPAGE4_TABLE_BAD_BLOCK_MARKING (0x00000100) +#define MPI2_MANPAGE4_WRITE_LONG_BAD_BLOCK_MARKING (0x00000200) + +#define MPI2_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x00000080) +#define MPI2_MANPAGE4_RAID10_DISABLE (0x00000040) +#define MPI2_MANPAGE4_RAID1E_DISABLE (0x00000020) +#define MPI2_MANPAGE4_RAID1_DISABLE (0x00000010) +#define MPI2_MANPAGE4_RAID0_DISABLE (0x00000008) +#define MPI2_MANPAGE4_IR_MODEPAGE8_DISABLE (0x00000004) +#define MPI2_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x00000002) +#define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001) + + +/*Manufacturing Page 5 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES +#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1) +#endif + +typedef struct _MPI2_MANUFACTURING5_ENTRY { + U64 WWID; /*0x00 */ + U64 DeviceName; /*0x08 */ +} MPI2_MANUFACTURING5_ENTRY, + *PTR_MPI2_MANUFACTURING5_ENTRY, + Mpi2Manufacturing5Entry_t, + *pMpi2Manufacturing5Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_MAN_5 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumPhys; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ + MPI2_MANUFACTURING5_ENTRY + Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/*0x08 */ +} MPI2_CONFIG_PAGE_MAN_5, + *PTR_MPI2_CONFIG_PAGE_MAN_5, + Mpi2ManufacturingPage5_t, + *pMpi2ManufacturingPage5_t; + +#define MPI2_MANUFACTURING5_PAGEVERSION (0x03) + + +/*Manufacturing Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_6 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 ProductSpecificInfo;/*0x04 */ +} MPI2_CONFIG_PAGE_MAN_6, + *PTR_MPI2_CONFIG_PAGE_MAN_6, + Mpi2ManufacturingPage6_t, + *pMpi2ManufacturingPage6_t; + +#define MPI2_MANUFACTURING6_PAGEVERSION (0x00) + + +/*Manufacturing Page 7 */ + +typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO { + U32 Pinout; /*0x00 */ + U8 Connector[16]; /*0x04 */ + U8 Location; /*0x14 */ + U8 ReceptacleID; /*0x15 */ + U16 Slot; /*0x16 */ + U32 Reserved2; /*0x18 */ +} MPI2_MANPAGE7_CONNECTOR_INFO, + *PTR_MPI2_MANPAGE7_CONNECTOR_INFO, + Mpi2ManPage7ConnectorInfo_t, + *pMpi2ManPage7ConnectorInfo_t; + +/*defines for the Pinout field */ +#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00) +#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8) + +#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF) +#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00) +#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01) +#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02) +#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03) +#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04) +#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07) +#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08) +#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C) +#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D) + +/*defines for the Location field */ +#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01) +#define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02) +#define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04) +#define MPI2_MANPAGE7_LOCATION_SWITCHABLE (0x08) +#define MPI2_MANPAGE7_LOCATION_AUTO (0x10) +#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20) +#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX +#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_7 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U32 Flags; /*0x0C */ + U8 EnclosureName[16]; /*0x10 */ + U8 NumPhys; /*0x20 */ + U8 Reserved3; /*0x21 */ + U16 Reserved4; /*0x22 */ + MPI2_MANPAGE7_CONNECTOR_INFO + ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /*0x24 */ +} MPI2_CONFIG_PAGE_MAN_7, + *PTR_MPI2_CONFIG_PAGE_MAN_7, + Mpi2ManufacturingPage7_t, + *pMpi2ManufacturingPage7_t; + +#define MPI2_MANUFACTURING7_PAGEVERSION (0x01) + +/*defines for the Flags field */ +#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) + + +/* + *Generic structure to use for product-specific manufacturing pages + *(currently Manufacturing Page 8 through Manufacturing Page 31). + */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_PS { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 ProductSpecificInfo;/*0x04 */ +} MPI2_CONFIG_PAGE_MAN_PS, + *PTR_MPI2_CONFIG_PAGE_MAN_PS, + Mpi2ManufacturingPagePS_t, + *pMpi2ManufacturingPagePS_t; + +#define MPI2_MANUFACTURING8_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING9_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING10_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING11_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING12_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING13_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING14_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING15_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING16_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING17_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING18_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING19_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING20_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING21_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING22_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING23_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING24_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING25_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING26_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING27_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING28_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING29_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING30_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING31_PAGEVERSION (0x00) + + +/**************************************************************************** +* IO Unit Config Pages +****************************************************************************/ + +/*IO Unit Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U64 UniqueValue; /*0x04 */ + MPI2_VERSION_UNION NvdataVersionDefault; /*0x08 */ + MPI2_VERSION_UNION NvdataVersionPersistent; /*0x0A */ +} MPI2_CONFIG_PAGE_IO_UNIT_0, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_0, + Mpi2IOUnitPage0_t, *pMpi2IOUnitPage0_t; + +#define MPI2_IOUNITPAGE0_PAGEVERSION (0x02) + + +/*IO Unit Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Flags; /*0x04 */ +} MPI2_CONFIG_PAGE_IO_UNIT_1, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_1, + Mpi2IOUnitPage1_t, *pMpi2IOUnitPage1_t; + +#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) + +/*IO Unit Page 1 Flags defines */ +#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000) +#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000) +#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) +#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) +#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9) +#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000) +#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200) +#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400) +#define MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100) +#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040) +#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020) +#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004) + + +/*IO Unit Page 3 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for GPIOCount at runtime. + */ +#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX +#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 GPIOCount; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U16 + GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/*0x08 */ +} MPI2_CONFIG_PAGE_IO_UNIT_3, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_3, + Mpi2IOUnitPage3_t, *pMpi2IOUnitPage3_t; + +#define MPI2_IOUNITPAGE3_PAGEVERSION (0x01) + +/*defines for IO Unit Page 3 GPIOVal field */ +#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC) +#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2) +#define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000) +#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001) + + +/*IO Unit Page 5 */ + +/* + *Upper layer code (drivers, utilities, etc.) should leave this define set to + *one and check the value returned for NumDmaEngines at runtime. + */ +#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES +#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U64 + RaidAcceleratorBufferBaseAddress; /*0x04 */ + U64 + RaidAcceleratorBufferSize; /*0x0C */ + U64 + RaidAcceleratorControlBaseAddress; /*0x14 */ + U8 RAControlSize; /*0x1C */ + U8 NumDmaEngines; /*0x1D */ + U8 RAMinControlSize; /*0x1E */ + U8 RAMaxControlSize; /*0x1F */ + U32 Reserved1; /*0x20 */ + U32 Reserved2; /*0x24 */ + U32 Reserved3; /*0x28 */ + U32 + DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /*0x2C */ +} MPI2_CONFIG_PAGE_IO_UNIT_5, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_5, + Mpi2IOUnitPage5_t, *pMpi2IOUnitPage5_t; + +#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00) + +/*defines for IO Unit Page 5 DmaEngineCapabilities field */ +#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFF00) +#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16) + +#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008) +#define MPI2_IOUNITPAGE5_DMA_CAP_PARITY_GENERATION (0x0004) +#define MPI2_IOUNITPAGE5_DMA_CAP_HASHING (0x0002) +#define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001) + + +/*IO Unit Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 Flags; /*0x04 */ + U8 RAHostControlSize; /*0x06 */ + U8 Reserved0; /*0x07 */ + U64 + RaidAcceleratorHostControlBaseAddress; /*0x08 */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ + U32 Reserved3; /*0x18 */ +} MPI2_CONFIG_PAGE_IO_UNIT_6, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_6, + Mpi2IOUnitPage6_t, *pMpi2IOUnitPage6_t; + +#define MPI2_IOUNITPAGE6_PAGEVERSION (0x00) + +/*defines for IO Unit Page 6 Flags field */ +#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001) + + +/*IO Unit Page 7 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 CurrentPowerMode; /*0x04 */ + U8 PreviousPowerMode; /*0x05 */ + U8 PCIeWidth; /*0x06 */ + U8 PCIeSpeed; /*0x07 */ + U32 ProcessorState; /*0x08 */ + U32 + PowerManagementCapabilities; /*0x0C */ + U16 IOCTemperature; /*0x10 */ + U8 + IOCTemperatureUnits; /*0x12 */ + U8 IOCSpeed; /*0x13 */ + U16 BoardTemperature; /*0x14 */ + U8 + BoardTemperatureUnits; /*0x16 */ + U8 Reserved3; /*0x17 */ +} MPI2_CONFIG_PAGE_IO_UNIT_7, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, + Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t; + +#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02) + +/*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ +#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0) +#define MPI25_IOUNITPAGE7_PM_INIT_UNAVAILABLE (0x00) +#define MPI25_IOUNITPAGE7_PM_INIT_HOST (0x40) +#define MPI25_IOUNITPAGE7_PM_INIT_IO_UNIT (0x80) +#define MPI25_IOUNITPAGE7_PM_INIT_PCIE_DPA (0xC0) + +#define MPI25_IOUNITPAGE7_PM_MODE_MASK (0x07) +#define MPI25_IOUNITPAGE7_PM_MODE_UNAVAILABLE (0x00) +#define MPI25_IOUNITPAGE7_PM_MODE_UNKNOWN (0x01) +#define MPI25_IOUNITPAGE7_PM_MODE_FULL_POWER (0x04) +#define MPI25_IOUNITPAGE7_PM_MODE_REDUCED_POWER (0x05) +#define MPI25_IOUNITPAGE7_PM_MODE_STANDBY (0x06) + + +/*defines for IO Unit Page 7 PCIeWidth field */ +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08) + +/*defines for IO Unit Page 7 PCIeSpeed field */ +#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02) + +/*defines for IO Unit Page 7 ProcessorState field */ +#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F) +#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0) + +#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01) +#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02) + +/*defines for IO Unit Page 7 PowerManagementCapabilities field */ +#define MPI25_IOUNITPAGE7_PMCAP_DPA_FULL_PWR_MODE (0x00400000) +#define MPI25_IOUNITPAGE7_PMCAP_DPA_REDUCED_PWR_MODE (0x00200000) +#define MPI25_IOUNITPAGE7_PMCAP_DPA_STANDBY_MODE (0x00100000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_FULL_PWR_MODE (0x00040000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_REDUCED_PWR_MODE (0x00020000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_STANDBY_MODE (0x00010000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_FULL_PWR_MODE (0x00004000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_REDUCED_PWR_MODE (0x00002000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_STANDBY_MODE (0x00001000) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_12_5_PCT_IOCSPEED (0x00000400) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_25_0_PCT_IOCSPEED (0x00000200) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_50_0_PCT_IOCSPEED (0x00000100) +#define MPI25_IOUNITPAGE7_PMCAP_IO_12_5_PCT_IOCSPEED (0x00000040) +#define MPI25_IOUNITPAGE7_PMCAP_IO_25_0_PCT_IOCSPEED (0x00000020) +#define MPI25_IOUNITPAGE7_PMCAP_IO_50_0_PCT_IOCSPEED (0x00000010) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_WIDTH_CHANGE_PCIE (0x00000008) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_SPEED_CHANGE_PCIE (0x00000004) +#define MPI25_IOUNITPAGE7_PMCAP_IO_WIDTH_CHANGE_PCIE (0x00000002) +#define MPI25_IOUNITPAGE7_PMCAP_IO_SPEED_CHANGE_PCIE (0x00000001) + +/*obsolete names for the PowerManagementCapabilities bits (above) */ +#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400) +#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200) +#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100) +#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /*obsolete */ +#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /*obsolete */ + + +/*defines for IO Unit Page 7 IOCTemperatureUnits field */ +#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01) +#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02) + +/*defines for IO Unit Page 7 IOCSpeed field */ +#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01) +#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02) +#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04) +#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08) + +/*defines for IO Unit Page 7 BoardTemperatureUnits field */ +#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) +#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) + + +/*IO Unit Page 8 */ + +#define MPI2_IOUNIT8_NUM_THRESHOLDS (4) + +typedef struct _MPI2_IOUNIT8_SENSOR { + U16 Flags; /*0x00 */ + U16 Reserved1; /*0x02 */ + U16 + Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /*0x04 */ + U32 Reserved2; /*0x0C */ + U32 Reserved3; /*0x10 */ + U32 Reserved4; /*0x14 */ +} MPI2_IOUNIT8_SENSOR, *PTR_MPI2_IOUNIT8_SENSOR, + Mpi2IOUnit8Sensor_t, *pMpi2IOUnit8Sensor_t; + +/*defines for IO Unit Page 8 Sensor Flags field */ +#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumSensors at runtime. + */ +#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES +#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U8 NumSensors; /*0x0C */ + U8 PollingInterval; /*0x0D */ + U16 Reserved3; /*0x0E */ + MPI2_IOUNIT8_SENSOR + Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/*0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_8, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_8, + Mpi2IOUnitPage8_t, *pMpi2IOUnitPage8_t; + +#define MPI2_IOUNITPAGE8_PAGEVERSION (0x00) + + +/*IO Unit Page 9 */ + +typedef struct _MPI2_IOUNIT9_SENSOR { + U16 CurrentTemperature; /*0x00 */ + U16 Reserved1; /*0x02 */ + U8 Flags; /*0x04 */ + U8 Reserved2; /*0x05 */ + U16 Reserved3; /*0x06 */ + U32 Reserved4; /*0x08 */ + U32 Reserved5; /*0x0C */ +} MPI2_IOUNIT9_SENSOR, *PTR_MPI2_IOUNIT9_SENSOR, + Mpi2IOUnit9Sensor_t, *pMpi2IOUnit9Sensor_t; + +/*defines for IO Unit Page 9 Sensor Flags field */ +#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumSensors at runtime. + */ +#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES +#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U8 NumSensors; /*0x0C */ + U8 Reserved4; /*0x0D */ + U16 Reserved3; /*0x0E */ + MPI2_IOUNIT9_SENSOR + Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/*0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_9, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_9, + Mpi2IOUnitPage9_t, *pMpi2IOUnitPage9_t; + +#define MPI2_IOUNITPAGE9_PAGEVERSION (0x00) + + +/*IO Unit Page 10 */ + +typedef struct _MPI2_IOUNIT10_FUNCTION { + U8 CreditPercent; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ +} MPI2_IOUNIT10_FUNCTION, + *PTR_MPI2_IOUNIT10_FUNCTION, + Mpi2IOUnit10Function_t, + *pMpi2IOUnit10Function_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumFunctions at runtime. + */ +#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES +#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumFunctions; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ + MPI2_IOUNIT10_FUNCTION + Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/*0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_10, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_10, + Mpi2IOUnitPage10_t, *pMpi2IOUnitPage10_t; + +#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01) + + + +/**************************************************************************** +* IOC Config Pages +****************************************************************************/ + +/*IOC Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U16 VendorID; /*0x0C */ + U16 DeviceID; /*0x0E */ + U8 RevisionID; /*0x10 */ + U8 Reserved3; /*0x11 */ + U16 Reserved4; /*0x12 */ + U32 ClassCode; /*0x14 */ + U16 SubsystemVendorID; /*0x18 */ + U16 SubsystemID; /*0x1A */ +} MPI2_CONFIG_PAGE_IOC_0, + *PTR_MPI2_CONFIG_PAGE_IOC_0, + Mpi2IOCPage0_t, *pMpi2IOCPage0_t; + +#define MPI2_IOCPAGE0_PAGEVERSION (0x02) + + +/*IOC Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Flags; /*0x04 */ + U32 CoalescingTimeout; /*0x08 */ + U8 CoalescingDepth; /*0x0C */ + U8 PCISlotNum; /*0x0D */ + U8 PCIBusNum; /*0x0E */ + U8 PCIDomainSegment; /*0x0F */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ +} MPI2_CONFIG_PAGE_IOC_1, + *PTR_MPI2_CONFIG_PAGE_IOC_1, + Mpi2IOCPage1_t, *pMpi2IOCPage1_t; + +#define MPI2_IOCPAGE1_PAGEVERSION (0x05) + +/*defines for IOC Page 1 Flags field */ +#define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001) + +#define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF) +#define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF) +#define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF) + +/*IOC Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_6 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 + CapabilitiesFlags; /*0x04 */ + U8 MaxDrivesRAID0; /*0x08 */ + U8 MaxDrivesRAID1; /*0x09 */ + U8 + MaxDrivesRAID1E; /*0x0A */ + U8 + MaxDrivesRAID10; /*0x0B */ + U8 MinDrivesRAID0; /*0x0C */ + U8 MinDrivesRAID1; /*0x0D */ + U8 + MinDrivesRAID1E; /*0x0E */ + U8 + MinDrivesRAID10; /*0x0F */ + U32 Reserved1; /*0x10 */ + U8 + MaxGlobalHotSpares; /*0x14 */ + U8 MaxPhysDisks; /*0x15 */ + U8 MaxVolumes; /*0x16 */ + U8 MaxConfigs; /*0x17 */ + U8 MaxOCEDisks; /*0x18 */ + U8 Reserved2; /*0x19 */ + U16 Reserved3; /*0x1A */ + U32 + SupportedStripeSizeMapRAID0; /*0x1C */ + U32 + SupportedStripeSizeMapRAID1E; /*0x20 */ + U32 + SupportedStripeSizeMapRAID10; /*0x24 */ + U32 Reserved4; /*0x28 */ + U32 Reserved5; /*0x2C */ + U16 + DefaultMetadataSize; /*0x30 */ + U16 Reserved6; /*0x32 */ + U16 + MaxBadBlockTableEntries; /*0x34 */ + U16 Reserved7; /*0x36 */ + U32 + IRNvsramVersion; /*0x38 */ +} MPI2_CONFIG_PAGE_IOC_6, + *PTR_MPI2_CONFIG_PAGE_IOC_6, + Mpi2IOCPage6_t, *pMpi2IOCPage6_t; + +#define MPI2_IOCPAGE6_PAGEVERSION (0x05) + +/*defines for IOC Page 6 CapabilitiesFlags */ +#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID0_SUPPORT (0x00000002) +#define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001) + + +/*IOC Page 7 */ + +#define MPI2_IOCPAGE7_EVENTMASK_WORDS (4) + +typedef struct _MPI2_CONFIG_PAGE_IOC_7 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 + EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/*0x08 */ + U16 SASBroadcastPrimitiveMasks; /*0x18 */ + U16 SASNotifyPrimitiveMasks; /*0x1A */ + U32 Reserved3; /*0x1C */ +} MPI2_CONFIG_PAGE_IOC_7, + *PTR_MPI2_CONFIG_PAGE_IOC_7, + Mpi2IOCPage7_t, *pMpi2IOCPage7_t; + +#define MPI2_IOCPAGE7_PAGEVERSION (0x02) + + +/*IOC Page 8 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_8 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumDevsPerEnclosure; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U16 MaxPersistentEntries; /*0x08 */ + U16 MaxNumPhysicalMappedIDs; /*0x0A */ + U16 Flags; /*0x0C */ + U16 Reserved3; /*0x0E */ + U16 IRVolumeMappingFlags; /*0x10 */ + U16 Reserved4; /*0x12 */ + U32 Reserved5; /*0x14 */ +} MPI2_CONFIG_PAGE_IOC_8, + *PTR_MPI2_CONFIG_PAGE_IOC_8, + Mpi2IOCPage8_t, *pMpi2IOCPage8_t; + +#define MPI2_IOCPAGE8_PAGEVERSION (0x00) + +/*defines for IOC Page 8 Flags field */ +#define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020) +#define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010) + +#define MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE (0x0000000E) +#define MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING (0x00000000) +#define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING (0x00000002) + +#define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001) +#define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000) + +/*defines for IOC Page 8 IRVolumeMappingFlags */ +#define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003) +#define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000) +#define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001) + + +/**************************************************************************** +* BIOS Config Pages +****************************************************************************/ + +/*BIOS Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_BIOS_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 BiosOptions; /*0x04 */ + U32 IOCSettings; /*0x08 */ + U32 Reserved1; /*0x0C */ + U32 DeviceSettings; /*0x10 */ + U16 NumberOfDevices; /*0x14 */ + U16 UEFIVersion; /*0x16 */ + U16 IOTimeoutBlockDevicesNonRM; /*0x18 */ + U16 IOTimeoutSequential; /*0x1A */ + U16 IOTimeoutOther; /*0x1C */ + U16 IOTimeoutBlockDevicesRM; /*0x1E */ +} MPI2_CONFIG_PAGE_BIOS_1, + *PTR_MPI2_CONFIG_PAGE_BIOS_1, + Mpi2BiosPage1_t, *pMpi2BiosPage1_t; + +#define MPI2_BIOSPAGE1_PAGEVERSION (0x05) + +/*values for BIOS Page 1 BiosOptions field */ +#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006) +#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002) +#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004) + +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001) + +/*values for BIOS Page 1 IOCSettings field */ +#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000) +#define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000) + +#define MPI2_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0) +#define MPI2_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040) +#define MPI2_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080) + +#define MPI2_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030) +#define MPI2_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010) +#define MPI2_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020) +#define MPI2_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030) + +#define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008) + +/*values for BIOS Page 1 DeviceSettings field */ +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001) + +/*defines for BIOS Page 1 UEFIVersion field */ +#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00) +#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8) +#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF) +#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0) + + + +/*BIOS Page 2 */ + +typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER { + U32 Reserved1; /*0x00 */ + U32 Reserved2; /*0x04 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ + U32 Reserved5; /*0x10 */ + U32 Reserved6; /*0x14 */ +} MPI2_BOOT_DEVICE_ADAPTER_ORDER, + *PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER, + Mpi2BootDeviceAdapterOrder_t, + *pMpi2BootDeviceAdapterOrder_t; + +typedef struct _MPI2_BOOT_DEVICE_SAS_WWID { + U64 SASAddress; /*0x00 */ + U8 LUN[8]; /*0x08 */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ +} MPI2_BOOT_DEVICE_SAS_WWID, + *PTR_MPI2_BOOT_DEVICE_SAS_WWID, + Mpi2BootDeviceSasWwid_t, + *pMpi2BootDeviceSasWwid_t; + +typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT { + U64 EnclosureLogicalID; /*0x00 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ + U16 SlotNumber; /*0x10 */ + U16 Reserved3; /*0x12 */ + U32 Reserved4; /*0x14 */ +} MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, + *PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, + Mpi2BootDeviceEnclosureSlot_t, + *pMpi2BootDeviceEnclosureSlot_t; + +typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME { + U64 DeviceName; /*0x00 */ + U8 LUN[8]; /*0x08 */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ +} MPI2_BOOT_DEVICE_DEVICE_NAME, + *PTR_MPI2_BOOT_DEVICE_DEVICE_NAME, + Mpi2BootDeviceDeviceName_t, + *pMpi2BootDeviceDeviceName_t; + +typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE { + MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder; + MPI2_BOOT_DEVICE_SAS_WWID SasWwid; + MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot; + MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName; +} MPI2_BIOSPAGE2_BOOT_DEVICE, + *PTR_MPI2_BIOSPAGE2_BOOT_DEVICE, + Mpi2BiosPage2BootDevice_t, + *pMpi2BiosPage2BootDevice_t; + +typedef struct _MPI2_CONFIG_PAGE_BIOS_2 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U32 Reserved3; /*0x0C */ + U32 Reserved4; /*0x10 */ + U32 Reserved5; /*0x14 */ + U32 Reserved6; /*0x18 */ + U8 ReqBootDeviceForm; /*0x1C */ + U8 Reserved7; /*0x1D */ + U16 Reserved8; /*0x1E */ + MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /*0x20 */ + U8 ReqAltBootDeviceForm; /*0x38 */ + U8 Reserved9; /*0x39 */ + U16 Reserved10; /*0x3A */ + MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /*0x3C */ + U8 CurrentBootDeviceForm; /*0x58 */ + U8 Reserved11; /*0x59 */ + U16 Reserved12; /*0x5A */ + MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /*0x58 */ +} MPI2_CONFIG_PAGE_BIOS_2, *PTR_MPI2_CONFIG_PAGE_BIOS_2, + Mpi2BiosPage2_t, *pMpi2BiosPage2_t; + +#define MPI2_BIOSPAGE2_PAGEVERSION (0x04) + +/*values for BIOS Page 2 BootDeviceForm fields */ +#define MPI2_BIOSPAGE2_FORM_MASK (0x0F) +#define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00) +#define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05) +#define MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06) +#define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07) + + +/*BIOS Page 3 */ + +typedef struct _MPI2_ADAPTER_INFO { + U8 PciBusNumber; /*0x00 */ + U8 PciDeviceAndFunctionNumber; /*0x01 */ + U16 AdapterFlags; /*0x02 */ +} MPI2_ADAPTER_INFO, *PTR_MPI2_ADAPTER_INFO, + Mpi2AdapterInfo_t, *pMpi2AdapterInfo_t; + +#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001) +#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002) + +typedef struct _MPI2_CONFIG_PAGE_BIOS_3 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 GlobalFlags; /*0x04 */ + U32 BiosVersion; /*0x08 */ + MPI2_ADAPTER_INFO AdapterOrder[4]; /*0x0C */ + U32 Reserved1; /*0x1C */ +} MPI2_CONFIG_PAGE_BIOS_3, + *PTR_MPI2_CONFIG_PAGE_BIOS_3, + Mpi2BiosPage3_t, *pMpi2BiosPage3_t; + +#define MPI2_BIOSPAGE3_PAGEVERSION (0x00) + +/*values for BIOS Page 3 GlobalFlags */ +#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002) +#define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004) +#define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010) + +#define MPI2_BIOSPAGE3_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0) +#define MPI2_BIOSPAGE3_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000) +#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DISPLAY (0x00000020) +#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040) + + +/*BIOS Page 4 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES +#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1) +#endif + +typedef struct _MPI2_BIOS4_ENTRY { + U64 ReassignmentWWID; /*0x00 */ + U64 ReassignmentDeviceName; /*0x08 */ +} MPI2_BIOS4_ENTRY, *PTR_MPI2_BIOS4_ENTRY, + Mpi2MBios4Entry_t, *pMpi2Bios4Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_BIOS_4 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumPhys; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + MPI2_BIOS4_ENTRY + Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /*0x08 */ +} MPI2_CONFIG_PAGE_BIOS_4, *PTR_MPI2_CONFIG_PAGE_BIOS_4, + Mpi2BiosPage4_t, *pMpi2BiosPage4_t; + +#define MPI2_BIOSPAGE4_PAGEVERSION (0x01) + + +/**************************************************************************** +* RAID Volume Config Pages +****************************************************************************/ + +/*RAID Volume Page 0 */ + +typedef struct _MPI2_RAIDVOL0_PHYS_DISK { + U8 RAIDSetNum; /*0x00 */ + U8 PhysDiskMap; /*0x01 */ + U8 PhysDiskNum; /*0x02 */ + U8 Reserved; /*0x03 */ +} MPI2_RAIDVOL0_PHYS_DISK, *PTR_MPI2_RAIDVOL0_PHYS_DISK, + Mpi2RaidVol0PhysDisk_t, *pMpi2RaidVol0PhysDisk_t; + +/*defines for the PhysDiskMap field */ +#define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01) +#define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02) + +typedef struct _MPI2_RAIDVOL0_SETTINGS { + U16 Settings; /*0x00 */ + U8 HotSparePool; /*0x01 */ + U8 Reserved; /*0x02 */ +} MPI2_RAIDVOL0_SETTINGS, *PTR_MPI2_RAIDVOL0_SETTINGS, + Mpi2RaidVol0Settings_t, + *pMpi2RaidVol0Settings_t; + +/*RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */ +#define MPI2_RAID_HOT_SPARE_POOL_0 (0x01) +#define MPI2_RAID_HOT_SPARE_POOL_1 (0x02) +#define MPI2_RAID_HOT_SPARE_POOL_2 (0x04) +#define MPI2_RAID_HOT_SPARE_POOL_3 (0x08) +#define MPI2_RAID_HOT_SPARE_POOL_4 (0x10) +#define MPI2_RAID_HOT_SPARE_POOL_5 (0x20) +#define MPI2_RAID_HOT_SPARE_POOL_6 (0x40) +#define MPI2_RAID_HOT_SPARE_POOL_7 (0x80) + +/*RAID Volume Page 0 VolumeSettings defines */ +#define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008) +#define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004) + +#define MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING (0x0003) +#define MPI2_RAIDVOL0_SETTING_UNCHANGED (0x0000) +#define MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING (0x0001) +#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhysDisks at runtime. + */ +#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX +#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x04 */ + U8 VolumeState; /*0x06 */ + U8 VolumeType; /*0x07 */ + U32 VolumeStatusFlags; /*0x08 */ + MPI2_RAIDVOL0_SETTINGS VolumeSettings; /*0x0C */ + U64 MaxLBA; /*0x10 */ + U32 StripeSize; /*0x18 */ + U16 BlockSize; /*0x1C */ + U16 Reserved1; /*0x1E */ + U8 SupportedPhysDisks;/*0x20 */ + U8 ResyncRate; /*0x21 */ + U16 DataScrubDuration; /*0x22 */ + U8 NumPhysDisks; /*0x24 */ + U8 Reserved2; /*0x25 */ + U8 Reserved3; /*0x26 */ + U8 InactiveStatus; /*0x27 */ + MPI2_RAIDVOL0_PHYS_DISK + PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /*0x28 */ +} MPI2_CONFIG_PAGE_RAID_VOL_0, + *PTR_MPI2_CONFIG_PAGE_RAID_VOL_0, + Mpi2RaidVolPage0_t, *pMpi2RaidVolPage0_t; + +#define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A) + +/*values for RAID VolumeState */ +#define MPI2_RAID_VOL_STATE_MISSING (0x00) +#define MPI2_RAID_VOL_STATE_FAILED (0x01) +#define MPI2_RAID_VOL_STATE_INITIALIZING (0x02) +#define MPI2_RAID_VOL_STATE_ONLINE (0x03) +#define MPI2_RAID_VOL_STATE_DEGRADED (0x04) +#define MPI2_RAID_VOL_STATE_OPTIMAL (0x05) + +/*values for RAID VolumeType */ +#define MPI2_RAID_VOL_TYPE_RAID0 (0x00) +#define MPI2_RAID_VOL_TYPE_RAID1E (0x01) +#define MPI2_RAID_VOL_TYPE_RAID1 (0x02) +#define MPI2_RAID_VOL_TYPE_RAID10 (0x05) +#define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF) + +/*values for RAID Volume Page 0 VolumeStatusFlags field */ +#define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000) +#define MPI2_RAIDVOL0_STATUS_FLAG_USER_CONSIST_PENDING (0x00400000) +#define MPI2_RAIDVOL0_STATUS_FLAG_MAKE_DATA_CONSISTENT (0x00200000) +#define MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB (0x00100000) +#define MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK (0x00080000) +#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000) +#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000) +#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000) +#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080) +#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040) +#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020) +#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_1E_ADJACENT_MIRROR (0x00000010) +#define MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x00000008) +#define MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x00000004) +#define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002) +#define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001) + +/*values for RAID Volume Page 0 SupportedPhysDisks field */ +#define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08) +#define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04) +#define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02) +#define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01) + +/*values for RAID Volume Page 0 InactiveStatus field */ +#define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00) +#define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01) +#define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02) +#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03) +#define MPI2_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04) +#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05) +#define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06) + + +/*RAID Volume Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x04 */ + U16 Reserved0; /*0x06 */ + U8 GUID[24]; /*0x08 */ + U8 Name[16]; /*0x20 */ + U64 WWID; /*0x30 */ + U32 Reserved1; /*0x38 */ + U32 Reserved2; /*0x3C */ +} MPI2_CONFIG_PAGE_RAID_VOL_1, + *PTR_MPI2_CONFIG_PAGE_RAID_VOL_1, + Mpi2RaidVolPage1_t, *pMpi2RaidVolPage1_t; + +#define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03) + + +/**************************************************************************** +* RAID Physical Disk Config Pages +****************************************************************************/ + +/*RAID Physical Disk Page 0 */ + +typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS { + U16 Reserved1; /*0x00 */ + U8 HotSparePool; /*0x02 */ + U8 Reserved2; /*0x03 */ +} MPI2_RAIDPHYSDISK0_SETTINGS, + *PTR_MPI2_RAIDPHYSDISK0_SETTINGS, + Mpi2RaidPhysDisk0Settings_t, + *pMpi2RaidPhysDisk0Settings_t; + +/*use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */ + +typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA { + U8 VendorID[8]; /*0x00 */ + U8 ProductID[16]; /*0x08 */ + U8 ProductRevLevel[4]; /*0x18 */ + U8 SerialNum[32]; /*0x1C */ +} MPI2_RAIDPHYSDISK0_INQUIRY_DATA, + *PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA, + Mpi2RaidPhysDisk0InquiryData_t, + *pMpi2RaidPhysDisk0InquiryData_t; + +typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x04 */ + U8 Reserved1; /*0x06 */ + U8 PhysDiskNum; /*0x07 */ + MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /*0x08 */ + U32 Reserved2; /*0x0C */ + MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /*0x10 */ + U32 Reserved3; /*0x4C */ + U8 PhysDiskState; /*0x50 */ + U8 OfflineReason; /*0x51 */ + U8 IncompatibleReason; /*0x52 */ + U8 PhysDiskAttributes; /*0x53 */ + U32 PhysDiskStatusFlags;/*0x54 */ + U64 DeviceMaxLBA; /*0x58 */ + U64 HostMaxLBA; /*0x60 */ + U64 CoercedMaxLBA; /*0x68 */ + U16 BlockSize; /*0x70 */ + U16 Reserved5; /*0x72 */ + U32 Reserved6; /*0x74 */ +} MPI2_CONFIG_PAGE_RD_PDISK_0, + *PTR_MPI2_CONFIG_PAGE_RD_PDISK_0, + Mpi2RaidPhysDiskPage0_t, + *pMpi2RaidPhysDiskPage0_t; + +#define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05) + +/*PhysDiskState defines */ +#define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00) +#define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01) +#define MPI2_RAID_PD_STATE_OFFLINE (0x02) +#define MPI2_RAID_PD_STATE_ONLINE (0x03) +#define MPI2_RAID_PD_STATE_HOT_SPARE (0x04) +#define MPI2_RAID_PD_STATE_DEGRADED (0x05) +#define MPI2_RAID_PD_STATE_REBUILDING (0x06) +#define MPI2_RAID_PD_STATE_OPTIMAL (0x07) + +/*OfflineReason defines */ +#define MPI2_PHYSDISK0_ONLINE (0x00) +#define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01) +#define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03) +#define MPI2_PHYSDISK0_OFFLINE_INITIALIZING (0x04) +#define MPI2_PHYSDISK0_OFFLINE_REQUESTED (0x05) +#define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06) +#define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF) + +/*IncompatibleReason defines */ +#define MPI2_PHYSDISK0_COMPATIBLE (0x00) +#define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01) +#define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02) +#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03) +#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04) +#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05) +#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06) +#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF) + +/*PhysDiskAttributes defines */ +#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C) +#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08) +#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04) + +#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03) +#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02) +#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01) + +/*PhysDiskStatusFlags defines */ +#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040) +#define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020) +#define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010) +#define MPI2_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00000000) +#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x00000008) +#define MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x00000004) +#define MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED (0x00000002) +#define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001) + + +/*RAID Physical Disk Page 1 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhysDiskPaths at runtime. + */ +#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX +#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1) +#endif + +typedef struct _MPI2_RAIDPHYSDISK1_PATH { + U16 DevHandle; /*0x00 */ + U16 Reserved1; /*0x02 */ + U64 WWID; /*0x04 */ + U64 OwnerWWID; /*0x0C */ + U8 OwnerIdentifier; /*0x14 */ + U8 Reserved2; /*0x15 */ + U16 Flags; /*0x16 */ +} MPI2_RAIDPHYSDISK1_PATH, *PTR_MPI2_RAIDPHYSDISK1_PATH, + Mpi2RaidPhysDisk1Path_t, + *pMpi2RaidPhysDisk1Path_t; + +/*RAID Physical Disk Page 1 Physical Disk Path Flags field defines */ +#define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004) +#define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002) +#define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001) + +typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumPhysDiskPaths; /*0x04 */ + U8 PhysDiskNum; /*0x05 */ + U16 Reserved1; /*0x06 */ + U32 Reserved2; /*0x08 */ + MPI2_RAIDPHYSDISK1_PATH + PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/*0x0C */ +} MPI2_CONFIG_PAGE_RD_PDISK_1, + *PTR_MPI2_CONFIG_PAGE_RD_PDISK_1, + Mpi2RaidPhysDiskPage1_t, + *pMpi2RaidPhysDiskPage1_t; + +#define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02) + + +/**************************************************************************** +* values for fields used by several types of SAS Config Pages +****************************************************************************/ + +/*values for NegotiatedLinkRates fields */ +#define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0) +#define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4) +#define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) +/*link rates used for Negotiated Physical and Logical Link Rate */ +#define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00) +#define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01) +#define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02) +#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03) +#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04) +#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05) +#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06) +#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08) +#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09) +#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A) +#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B) + + +/*values for AttachedPhyInfo fields */ +#define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040) +#define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020) +#define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010) + +#define MPI2_SAS_APHYINFO_REASON_MASK (0x0000000F) +#define MPI2_SAS_APHYINFO_REASON_UNKNOWN (0x00000000) +#define MPI2_SAS_APHYINFO_REASON_POWER_ON (0x00000001) +#define MPI2_SAS_APHYINFO_REASON_HARD_RESET (0x00000002) +#define MPI2_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003) +#define MPI2_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004) +#define MPI2_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005) +#define MPI2_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006) +#define MPI2_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007) +#define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008) + + +/*values for PhyInfo fields */ +#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) + +#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000) +#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27) +#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000) +#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000) +#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000) + +#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000) +#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000) +#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000) +#define MPI2_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000) +#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS (0x00200000) +#define MPI2_SAS_PHYINFO_ZONING_ENABLED (0x00100000) + +#define MPI2_SAS_PHYINFO_REASON_MASK (0x000F0000) +#define MPI2_SAS_PHYINFO_REASON_UNKNOWN (0x00000000) +#define MPI2_SAS_PHYINFO_REASON_POWER_ON (0x00010000) +#define MPI2_SAS_PHYINFO_REASON_HARD_RESET (0x00020000) +#define MPI2_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000) +#define MPI2_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000) +#define MPI2_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000) +#define MPI2_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000) +#define MPI2_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000) +#define MPI2_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000) + +#define MPI2_SAS_PHYINFO_MULTIPLEXING_SUPPORTED (0x00008000) +#define MPI2_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000) +#define MPI2_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000) +#define MPI2_SAS_PHYINFO_VIRTUAL_PHY (0x00001000) + +#define MPI2_SAS_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00) +#define MPI2_SAS_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8) + +#define MPI2_SAS_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0) +#define MPI2_SAS_PHYINFO_DIRECT_ROUTING (0x00000000) +#define MPI2_SAS_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010) +#define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020) + + +/*values for SAS ProgrammedLinkRate fields */ +#define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0) +#define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00) +#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80) +#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90) +#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0) +#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F) +#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00) +#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08) +#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09) +#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A) +#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B) + + +/*values for SAS HwLinkRate fields */ +#define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0) +#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80) +#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90) +#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0) +#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F) +#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08) +#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09) +#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A) +#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B) + + + +/**************************************************************************** +* SAS IO Unit Config Pages +****************************************************************************/ + +/*SAS IO Unit Page 0 */ + +typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA { + U8 Port; /*0x00 */ + U8 PortFlags; /*0x01 */ + U8 PhyFlags; /*0x02 */ + U8 NegotiatedLinkRate; /*0x03 */ + U32 ControllerPhyDeviceInfo;/*0x04 */ + U16 AttachedDevHandle; /*0x08 */ + U16 ControllerDevHandle; /*0x0A */ + U32 DiscoveryStatus; /*0x0C */ + U32 Reserved; /*0x10 */ +} MPI2_SAS_IO_UNIT0_PHY_DATA, + *PTR_MPI2_SAS_IO_UNIT0_PHY_DATA, + Mpi2SasIOUnit0PhyData_t, + *pMpi2SasIOUnit0PhyData_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT0_PHY_MAX +#define MPI2_SAS_IOUNIT0_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1;/*0x08 */ + U8 NumPhys; /*0x0C */ + U8 Reserved2;/*0x0D */ + U16 Reserved3;/*0x0E */ + MPI2_SAS_IO_UNIT0_PHY_DATA + PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /*0x10 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_0, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0, + Mpi2SasIOUnitPage0_t, *pMpi2SasIOUnitPage0_t; + +#define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05) + +/*values for SAS IO Unit Page 0 PortFlags */ +#define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08) +#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01) + +/*values for SAS IO Unit Page 0 PhyFlags */ +#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10) +#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) + +/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/*see mpi2_sas.h for values for + *SAS IO Unit Page 0 ControllerPhyDeviceInfo values */ + +/*values for SAS IO Unit Page 0 DiscoveryStatus */ +#define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_SASIOUNIT0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_SASIOUNIT0_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_SASIOUNIT0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_SASIOUNIT0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_SASIOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_SASIOUNIT0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_SASIOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_SASIOUNIT0_DS_TABLE_LINK (0x00000400) +#define MPI2_SASIOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_SASIOUNIT0_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_SASIOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_SASIOUNIT0_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_SASIOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_SASIOUNIT0_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_SASIOUNIT0_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_SASIOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001) + + +/*SAS IO Unit Page 1 */ + +typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA { + U8 Port; /*0x00 */ + U8 PortFlags; /*0x01 */ + U8 PhyFlags; /*0x02 */ + U8 MaxMinLinkRate; /*0x03 */ + U32 ControllerPhyDeviceInfo; /*0x04 */ + U16 MaxTargetPortConnectTime; /*0x08 */ + U16 Reserved1; /*0x0A */ +} MPI2_SAS_IO_UNIT1_PHY_DATA, + *PTR_MPI2_SAS_IO_UNIT1_PHY_DATA, + Mpi2SasIOUnit1PhyData_t, + *pMpi2SasIOUnit1PhyData_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT1_PHY_MAX +#define MPI2_SAS_IOUNIT1_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U16 + ControlFlags; /*0x08 */ + U16 + SASNarrowMaxQueueDepth; /*0x0A */ + U16 + AdditionalControlFlags; /*0x0C */ + U16 + SASWideMaxQueueDepth; /*0x0E */ + U8 + NumPhys; /*0x10 */ + U8 + SATAMaxQDepth; /*0x11 */ + U8 + ReportDeviceMissingDelay; /*0x12 */ + U8 + IODeviceMissingDelay; /*0x13 */ + MPI2_SAS_IO_UNIT1_PHY_DATA + PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /*0x14 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_1, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1, + Mpi2SasIOUnitPage1_t, *pMpi2SasIOUnitPage1_t; + +#define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09) + +/*values for SAS IO Unit Page 1 ControlFlags */ +#define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000) + +#define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600) +#define MPI2_SASIOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x0) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x1) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x2) + +#define MPI2_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080) +#define MPI2_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040) +#define MPI2_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020) +#define MPI2_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010) +#define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008) +#define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004) +#define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002) +#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) + +/*values for SAS IO Unit Page 1 AdditionalControlFlags */ +#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080) +#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040) +#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020) +#define MPI2_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010) +#define MPI2_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008) +#define MPI2_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004) +#define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002) +#define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001) + +/*defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */ +#define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F) +#define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80) + +/*values for SAS IO Unit Page 1 PortFlags */ +#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01) + +/*values for SAS IO Unit Page 1 PhyFlags */ +#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) +#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) + +/*values for SAS IO Unit Page 1 MaxMinLinkRate */ +#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0) +#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80) +#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90) +#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0) +#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0) +#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F) +#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08) +#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09) +#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A) +#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B) + +/*see mpi2_sas.h for values for + *SAS IO Unit Page 1 ControllerPhyDeviceInfo values */ + + +/*SAS IO Unit Page 4 */ + +typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP { + U8 MaxTargetSpinup; /*0x00 */ + U8 SpinupDelay; /*0x01 */ + U8 SpinupFlags; /*0x02 */ + U8 Reserved1; /*0x03 */ +} MPI2_SAS_IOUNIT4_SPINUP_GROUP, + *PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP, + Mpi2SasIOUnit4SpinupGroup_t, + *pMpi2SasIOUnit4SpinupGroup_t; +/*defines for SAS IO Unit Page 4 SpinupFlags */ +#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01) + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT4_PHY_MAX +#define MPI2_SAS_IOUNIT4_PHY_MAX (4) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header;/*0x00 */ + MPI2_SAS_IOUNIT4_SPINUP_GROUP + SpinupGroupParameters[4]; /*0x08 */ + U32 + Reserved1; /*0x18 */ + U32 + Reserved2; /*0x1C */ + U32 + Reserved3; /*0x20 */ + U8 + BootDeviceWaitTime; /*0x24 */ + U8 + Reserved4; /*0x25 */ + U16 + Reserved5; /*0x26 */ + U8 + NumPhys; /*0x28 */ + U8 + PEInitialSpinupDelay; /*0x29 */ + U8 + PEReplyDelay; /*0x2A */ + U8 + Flags; /*0x2B */ + U8 + PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /*0x2C */ +} MPI2_CONFIG_PAGE_SASIOUNIT_4, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4, + Mpi2SasIOUnitPage4_t, *pMpi2SasIOUnitPage4_t; + +#define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02) + +/*defines for Flags field */ +#define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01) + +/*defines for PHY field */ +#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03) + + +/*SAS IO Unit Page 5 */ + +typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS { + U8 ControlFlags; /*0x00 */ + U8 PortWidthModGroup; /*0x01 */ + U16 InactivityTimerExponent; /*0x02 */ + U8 SATAPartialTimeout; /*0x04 */ + U8 Reserved2; /*0x05 */ + U8 SATASlumberTimeout; /*0x06 */ + U8 Reserved3; /*0x07 */ + U8 SASPartialTimeout; /*0x08 */ + U8 Reserved4; /*0x09 */ + U8 SASSlumberTimeout; /*0x0A */ + U8 Reserved5; /*0x0B */ +} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, + *PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, + Mpi2SasIOUnit5PhyPmSettings_t, + *pMpi2SasIOUnit5PhyPmSettings_t; + +/*defines for ControlFlags field */ +#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08) +#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04) +#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02) +#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01) + +/*defines for PortWidthModeGroup field */ +#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF) + +/*defines for InactivityTimerExponent field */ +#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12) +#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8) +#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4) +#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0) + +#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7) +#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6) +#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5) +#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4) +#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3) +#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2) +#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1) +#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT5_PHY_MAX +#define MPI2_SAS_IOUNIT5_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 NumPhys; /*0x08 */ + U8 Reserved1;/*0x09 */ + U16 Reserved2;/*0x0A */ + U32 Reserved3;/*0x0C */ + MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS + SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX];/*0x10 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_5, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5, + Mpi2SasIOUnitPage5_t, *pMpi2SasIOUnitPage5_t; + +#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01) + + +/*SAS IO Unit Page 6 */ + +typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS { + U8 CurrentStatus; /*0x00 */ + U8 CurrentModulation; /*0x01 */ + U8 CurrentUtilization; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 Reserved2; /*0x04 */ +} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, + *PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, + Mpi2SasIOUnit6PortWidthModGroupStatus_t, + *pMpi2SasIOUnit6PortWidthModGroupStatus_t; + +/*defines for CurrentStatus field */ +#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00) +#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01) +#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02) +#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03) +#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04) +#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05) +#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06) +#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07) + +/*defines for CurrentModulation field */ +#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00) +#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01) +#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02) +#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumGroups at runtime. + */ +#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX +#define MPI2_SAS_IOUNIT6_GROUP_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ + U8 NumGroups; /*0x10 */ + U8 Reserved3; /*0x11 */ + U16 Reserved4; /*0x12 */ + MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS + PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /*0x14 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_6, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6, + Mpi2SasIOUnitPage6_t, *pMpi2SasIOUnitPage6_t; + +#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00) + + +/*SAS IO Unit Page 7 */ + +typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS { + U8 Flags; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U8 Threshold75Pct; /*0x04 */ + U8 Threshold50Pct; /*0x05 */ + U8 Threshold25Pct; /*0x06 */ + U8 Reserved3; /*0x07 */ +} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, + *PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, + Mpi2SasIOUnit7PortWidthModGroupSettings_t, + *pMpi2SasIOUnit7PortWidthModGroupSettings_t; + +/*defines for Flags field */ +#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01) + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumGroups at runtime. + */ +#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX +#define MPI2_SAS_IOUNIT7_GROUP_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 SamplingInterval; /*0x08 */ + U8 WindowLength; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Reserved2; /*0x0C */ + U32 Reserved3; /*0x10 */ + U8 NumGroups; /*0x14 */ + U8 Reserved4; /*0x15 */ + U16 Reserved5; /*0x16 */ + MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS + PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX];/*0x18 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_7, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7, + Mpi2SasIOUnitPage7_t, *pMpi2SasIOUnitPage7_t; + +#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00) + + +/*SAS IO Unit Page 8 */ + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U32 + PowerManagementCapabilities; /*0x0C */ + U8 + TxRxSleepStatus; /*0x10 */ + U8 + Reserved2; /*0x11 */ + U16 + Reserved3; /*0x12 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_8, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8, + Mpi2SasIOUnitPage8_t, *pMpi2SasIOUnitPage8_t; + +#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00) + +/*defines for PowerManagementCapabilities field */ +#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000) +#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800) +#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400) +#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200) +#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100) +#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001) + +/*defines for TxRxSleepStatus field */ +#define MPI25_SASIOUNIT8_TXRXSLEEP_UNSUPPORTED (0x00) +#define MPI25_SASIOUNIT8_TXRXSLEEP_DISENGAGED (0x01) +#define MPI25_SASIOUNIT8_TXRXSLEEP_ACTIVE (0x02) +#define MPI25_SASIOUNIT8_TXRXSLEEP_SHUTDOWN (0x03) + + + +/*SAS IO Unit Page 16 */ + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U64 + TimeStamp; /*0x08 */ + U32 + Reserved1; /*0x10 */ + U32 + Reserved2; /*0x14 */ + U32 + FastPathPendedRequests; /*0x18 */ + U32 + FastPathUnPendedRequests; /*0x1C */ + U32 + FastPathHostRequestStarts; /*0x20 */ + U32 + FastPathFirmwareRequestStarts; /*0x24 */ + U32 + FastPathHostCompletions; /*0x28 */ + U32 + FastPathFirmwareCompletions; /*0x2C */ + U32 + NonFastPathRequestStarts; /*0x30 */ + U32 + NonFastPathHostCompletions; /*0x30 */ +} MPI2_CONFIG_PAGE_SASIOUNIT16, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT16, + Mpi2SasIOUnitPage16_t, *pMpi2SasIOUnitPage16_t; + +#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00) + + +/**************************************************************************** +* SAS Expander Config Pages +****************************************************************************/ + +/*SAS Expander Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U8 + PhysicalPort; /*0x08 */ + U8 + ReportGenLength; /*0x09 */ + U16 + EnclosureHandle; /*0x0A */ + U64 + SASAddress; /*0x0C */ + U32 + DiscoveryStatus; /*0x14 */ + U16 + DevHandle; /*0x18 */ + U16 + ParentDevHandle; /*0x1A */ + U16 + ExpanderChangeCount; /*0x1C */ + U16 + ExpanderRouteIndexes; /*0x1E */ + U8 + NumPhys; /*0x20 */ + U8 + SASLevel; /*0x21 */ + U16 + Flags; /*0x22 */ + U16 + STPBusInactivityTimeLimit; /*0x24 */ + U16 + STPMaxConnectTimeLimit; /*0x26 */ + U16 + STP_SMP_NexusLossTime; /*0x28 */ + U16 + MaxNumRoutedSasAddresses; /*0x2A */ + U64 + ActiveZoneManagerSASAddress;/*0x2C */ + U16 + ZoneLockInactivityLimit; /*0x34 */ + U16 + Reserved1; /*0x36 */ + U8 + TimeToReducedFunc; /*0x38 */ + U8 + InitialTimeToReducedFunc; /*0x39 */ + U8 + MaxReducedFuncTime; /*0x3A */ + U8 + Reserved2; /*0x3B */ +} MPI2_CONFIG_PAGE_EXPANDER_0, + *PTR_MPI2_CONFIG_PAGE_EXPANDER_0, + Mpi2ExpanderPage0_t, *pMpi2ExpanderPage0_t; + +#define MPI2_SASEXPANDER0_PAGEVERSION (0x06) + +/*values for SAS Expander Page 0 DiscoveryStatus field */ +#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_SAS_EXPANDER0_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_SAS_EXPANDER0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_SAS_EXPANDER0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_SAS_EXPANDER0_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_SAS_EXPANDER0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400) +#define MPI2_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001) + +/*values for SAS Expander Page 0 Flags field */ +#define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000) +#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000) +#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800) +#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400) +#define MPI2_SAS_EXPANDER0_FLAGS_ZONING_SUPPORT (0x0200) +#define MPI2_SAS_EXPANDER0_FLAGS_ENABLED_ZONING (0x0100) +#define MPI2_SAS_EXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080) +#define MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010) +#define MPI2_SAS_EXPANDER0_FLAGS_OTHERS_CONFIG (0x0004) +#define MPI2_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002) +#define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001) + + +/*SAS Expander Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U8 + PhysicalPort; /*0x08 */ + U8 + Reserved1; /*0x09 */ + U16 + Reserved2; /*0x0A */ + U8 + NumPhys; /*0x0C */ + U8 + Phy; /*0x0D */ + U16 + NumTableEntriesProgrammed; /*0x0E */ + U8 + ProgrammedLinkRate; /*0x10 */ + U8 + HwLinkRate; /*0x11 */ + U16 + AttachedDevHandle; /*0x12 */ + U32 + PhyInfo; /*0x14 */ + U32 + AttachedDeviceInfo; /*0x18 */ + U16 + ExpanderDevHandle; /*0x1C */ + U8 + ChangeCount; /*0x1E */ + U8 + NegotiatedLinkRate; /*0x1F */ + U8 + PhyIdentifier; /*0x20 */ + U8 + AttachedPhyIdentifier; /*0x21 */ + U8 + Reserved3; /*0x22 */ + U8 + DiscoveryInfo; /*0x23 */ + U32 + AttachedPhyInfo; /*0x24 */ + U8 + ZoneGroup; /*0x28 */ + U8 + SelfConfigStatus; /*0x29 */ + U16 + Reserved4; /*0x2A */ +} MPI2_CONFIG_PAGE_EXPANDER_1, + *PTR_MPI2_CONFIG_PAGE_EXPANDER_1, + Mpi2ExpanderPage1_t, *pMpi2ExpanderPage1_t; + +#define MPI2_SASEXPANDER1_PAGEVERSION (0x02) + +/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ + +/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ + +/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */ + +/*see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines + *used for the AttachedDeviceInfo field */ + +/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/*values for SAS Expander Page 1 DiscoveryInfo field */ +#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04) +#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02) +#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01) + +/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ + + +/**************************************************************************** +* SAS Device Config Pages +****************************************************************************/ + +/*SAS Device Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U16 + Slot; /*0x08 */ + U16 + EnclosureHandle; /*0x0A */ + U64 + SASAddress; /*0x0C */ + U16 + ParentDevHandle; /*0x14 */ + U8 + PhyNum; /*0x16 */ + U8 + AccessStatus; /*0x17 */ + U16 + DevHandle; /*0x18 */ + U8 + AttachedPhyIdentifier; /*0x1A */ + U8 + ZoneGroup; /*0x1B */ + U32 + DeviceInfo; /*0x1C */ + U16 + Flags; /*0x20 */ + U8 + PhysicalPort; /*0x22 */ + U8 + MaxPortConnections; /*0x23 */ + U64 + DeviceName; /*0x24 */ + U8 + PortGroups; /*0x2C */ + U8 + DmaGroup; /*0x2D */ + U8 + ControlGroup; /*0x2E */ + U8 + Reserved1; /*0x2F */ + U32 + Reserved2; /*0x30 */ + U32 + Reserved3; /*0x34 */ +} MPI2_CONFIG_PAGE_SAS_DEV_0, + *PTR_MPI2_CONFIG_PAGE_SAS_DEV_0, + Mpi2SasDevicePage0_t, + *pMpi2SasDevicePage0_t; + +#define MPI2_SASDEVICE0_PAGEVERSION (0x08) + +/*values for SAS Device Page 0 AccessStatus field */ +#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04) +#define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05) +#define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06) +#define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07) +/*specific values for SATA Init failures */ +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F) + +/*see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ + +/*values for SAS Device Page 0 Flags field */ +#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000) +#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH (0x4000) +#define MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000) +#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000) +#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) +#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) +#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) +#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) + + +/*SAS Device Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U64 + SASAddress; /*0x0C */ + U32 + Reserved2; /*0x14 */ + U16 + DevHandle; /*0x18 */ + U16 + Reserved3; /*0x1A */ + U8 + InitialRegDeviceFIS[20];/*0x1C */ +} MPI2_CONFIG_PAGE_SAS_DEV_1, + *PTR_MPI2_CONFIG_PAGE_SAS_DEV_1, + Mpi2SasDevicePage1_t, + *pMpi2SasDevicePage1_t; + +#define MPI2_SASDEVICE1_PAGEVERSION (0x01) + + +/**************************************************************************** +* SAS PHY Config Pages +****************************************************************************/ + +/*SAS PHY Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U16 + OwnerDevHandle; /*0x08 */ + U16 + Reserved1; /*0x0A */ + U16 + AttachedDevHandle; /*0x0C */ + U8 + AttachedPhyIdentifier; /*0x0E */ + U8 + Reserved2; /*0x0F */ + U32 + AttachedPhyInfo; /*0x10 */ + U8 + ProgrammedLinkRate; /*0x14 */ + U8 + HwLinkRate; /*0x15 */ + U8 + ChangeCount; /*0x16 */ + U8 + Flags; /*0x17 */ + U32 + PhyInfo; /*0x18 */ + U8 + NegotiatedLinkRate; /*0x1C */ + U8 + Reserved3; /*0x1D */ + U16 + Reserved4; /*0x1E */ +} MPI2_CONFIG_PAGE_SAS_PHY_0, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_0, + Mpi2SasPhyPage0_t, *pMpi2SasPhyPage0_t; + +#define MPI2_SASPHY0_PAGEVERSION (0x03) + +/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ + +/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ + +/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ + +/*values for SAS PHY Page 0 Flags field */ +#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01) + +/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */ + +/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + + +/*SAS PHY Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U32 + InvalidDwordCount; /*0x0C */ + U32 + RunningDisparityErrorCount; /*0x10 */ + U32 + LossDwordSynchCount; /*0x14 */ + U32 + PhyResetProblemCount; /*0x18 */ +} MPI2_CONFIG_PAGE_SAS_PHY_1, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_1, + Mpi2SasPhyPage1_t, *pMpi2SasPhyPage1_t; + +#define MPI2_SASPHY1_PAGEVERSION (0x01) + + +/*SAS PHY Page 2 */ + +typedef struct _MPI2_SASPHY2_PHY_EVENT { + U8 PhyEventCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 PhyEventInfo; /*0x04 */ +} MPI2_SASPHY2_PHY_EVENT, *PTR_MPI2_SASPHY2_PHY_EVENT, + Mpi2SasPhy2PhyEvent_t, *pMpi2SasPhy2PhyEvent_t; + +/*use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */ + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhyEvents at runtime. + */ +#ifndef MPI2_SASPHY2_PHY_EVENT_MAX +#define MPI2_SASPHY2_PHY_EVENT_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U8 + NumPhyEvents; /*0x0C */ + U8 + Reserved2; /*0x0D */ + U16 + Reserved3; /*0x0E */ + MPI2_SASPHY2_PHY_EVENT + PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /*0x10 */ +} MPI2_CONFIG_PAGE_SAS_PHY_2, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_2, + Mpi2SasPhyPage2_t, + *pMpi2SasPhyPage2_t; + +#define MPI2_SASPHY2_PAGEVERSION (0x00) + + +/*SAS PHY Page 3 */ + +typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG { + U8 PhyEventCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U8 CounterType; /*0x04 */ + U8 ThresholdWindow; /*0x05 */ + U8 TimeUnits; /*0x06 */ + U8 Reserved3; /*0x07 */ + U32 EventThreshold; /*0x08 */ + U16 ThresholdFlags; /*0x0C */ + U16 Reserved4; /*0x0E */ +} MPI2_SASPHY3_PHY_EVENT_CONFIG, + *PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG, + Mpi2SasPhy3PhyEventConfig_t, + *pMpi2SasPhy3PhyEventConfig_t; + +/*values for PhyEventCode field */ +#define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00) +#define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01) +#define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02) +#define MPI2_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03) +#define MPI2_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04) +#define MPI2_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05) +#define MPI2_SASPHY3_EVENT_CODE_RX_ERROR (0x06) +#define MPI2_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20) +#define MPI2_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21) +#define MPI2_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22) +#define MPI2_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23) +#define MPI2_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26) +#define MPI2_SASPHY3_EVENT_CODE_TX_BREAK (0x27) +#define MPI2_SASPHY3_EVENT_CODE_RX_BREAK (0x28) +#define MPI2_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29) +#define MPI2_SASPHY3_EVENT_CODE_CONNECTION (0x2A) +#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2B) +#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2C) +#define MPI2_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2D) +#define MPI2_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2E) +#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40) +#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41) +#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42) +#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43) +#define MPI2_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44) +#define MPI2_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45) +#define MPI2_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50) +#define MPI2_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51) +#define MPI2_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52) +#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60) +#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61) +#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63) +#define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0) +#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2) + +/*values for the CounterType field */ +#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00) +#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01) +#define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02) + +/*values for the TimeUnits field */ +#define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00) +#define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01) +#define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02) +#define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03) + +/*values for the ThresholdFlags field */ +#define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002) +#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhyEvents at runtime. + */ +#ifndef MPI2_SASPHY3_PHY_EVENT_MAX +#define MPI2_SASPHY3_PHY_EVENT_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U8 + NumPhyEvents; /*0x0C */ + U8 + Reserved2; /*0x0D */ + U16 + Reserved3; /*0x0E */ + MPI2_SASPHY3_PHY_EVENT_CONFIG + PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /*0x10 */ +} MPI2_CONFIG_PAGE_SAS_PHY_3, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_3, + Mpi2SasPhyPage3_t, *pMpi2SasPhyPage3_t; + +#define MPI2_SASPHY3_PAGEVERSION (0x00) + + +/*SAS PHY Page 4 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U16 + Reserved1; /*0x08 */ + U8 + Reserved2; /*0x0A */ + U8 + Flags; /*0x0B */ + U8 + InitialFrame[28]; /*0x0C */ +} MPI2_CONFIG_PAGE_SAS_PHY_4, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_4, + Mpi2SasPhyPage4_t, *pMpi2SasPhyPage4_t; + +#define MPI2_SASPHY4_PAGEVERSION (0x00) + +/*values for the Flags field */ +#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02) +#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01) + + + + +/**************************************************************************** +* SAS Port Config Pages +****************************************************************************/ + +/*SAS Port Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U8 + PortNumber; /*0x08 */ + U8 + PhysicalPort; /*0x09 */ + U8 + PortWidth; /*0x0A */ + U8 + PhysicalPortWidth; /*0x0B */ + U8 + ZoneGroup; /*0x0C */ + U8 + Reserved1; /*0x0D */ + U16 + Reserved2; /*0x0E */ + U64 + SASAddress; /*0x10 */ + U32 + DeviceInfo; /*0x18 */ + U32 + Reserved3; /*0x1C */ + U32 + Reserved4; /*0x20 */ +} MPI2_CONFIG_PAGE_SAS_PORT_0, + *PTR_MPI2_CONFIG_PAGE_SAS_PORT_0, + Mpi2SasPortPage0_t, *pMpi2SasPortPage0_t; + +#define MPI2_SASPORT0_PAGEVERSION (0x00) + +/*see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */ + + +/**************************************************************************** +* SAS Enclosure Config Pages +****************************************************************************/ + +/*SAS Enclosure Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U64 + EnclosureLogicalID; /*0x0C */ + U16 + Flags; /*0x14 */ + U16 + EnclosureHandle; /*0x16 */ + U16 + NumSlots; /*0x18 */ + U16 + StartSlot; /*0x1A */ + U16 + Reserved2; /*0x1C */ + U16 + SEPDevHandle; /*0x1E */ + U32 + Reserved3; /*0x20 */ + U32 + Reserved4; /*0x24 */ +} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, + *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, + Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t; + +#define MPI2_SASENCLOSURE0_PAGEVERSION (0x03) + +/*values for SAS Enclosure Page 0 Flags field */ +#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005) + + +/**************************************************************************** +* Log Config Page +****************************************************************************/ + +/*Log Page 0 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumLogEntries at runtime. + */ +#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES +#define MPI2_LOG_0_NUM_LOG_ENTRIES (1) +#endif + +#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C) + +typedef struct _MPI2_LOG_0_ENTRY { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U16 LogSequence; /*0x0C */ + U16 LogEntryQualifier; /*0x0E */ + U8 VP_ID; /*0x10 */ + U8 VF_ID; /*0x11 */ + U16 Reserved2; /*0x12 */ + U8 + LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/*0x14 */ +} MPI2_LOG_0_ENTRY, *PTR_MPI2_LOG_0_ENTRY, + Mpi2Log0Entry_t, *pMpi2Log0Entry_t; + +/*values for Log Page 0 LogEntry LogEntryQualifier field */ +#define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000) +#define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001) +#define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002) +#define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000) +#define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF) + +typedef struct _MPI2_CONFIG_PAGE_LOG_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ + U16 NumLogEntries;/*0x10 */ + U16 Reserved3; /*0x12 */ + MPI2_LOG_0_ENTRY + LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /*0x14 */ +} MPI2_CONFIG_PAGE_LOG_0, *PTR_MPI2_CONFIG_PAGE_LOG_0, + Mpi2LogPage0_t, *pMpi2LogPage0_t; + +#define MPI2_LOG_0_PAGEVERSION (0x02) + + +/**************************************************************************** +* RAID Config Page +****************************************************************************/ + +/*RAID Page 0 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumElements at runtime. + */ +#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS +#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1) +#endif + +typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT { + U16 ElementFlags; /*0x00 */ + U16 VolDevHandle; /*0x02 */ + U8 HotSparePool; /*0x04 */ + U8 PhysDiskNum; /*0x05 */ + U16 PhysDiskDevHandle; /*0x06 */ +} MPI2_RAIDCONFIG0_CONFIG_ELEMENT, + *PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT, + Mpi2RaidConfig0ConfigElement_t, + *pMpi2RaidConfig0ConfigElement_t; + +/*values for the ElementFlags field */ +#define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F) +#define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000) +#define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001) +#define MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002) +#define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003) + + +typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 NumHotSpares; /*0x08 */ + U8 NumPhysDisks; /*0x09 */ + U8 NumVolumes; /*0x0A */ + U8 ConfigNum; /*0x0B */ + U32 Flags; /*0x0C */ + U8 ConfigGUID[24]; /*0x10 */ + U32 Reserved1; /*0x28 */ + U8 NumElements; /*0x2C */ + U8 Reserved2; /*0x2D */ + U16 Reserved3; /*0x2E */ + MPI2_RAIDCONFIG0_CONFIG_ELEMENT + ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /*0x30 */ +} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, + *PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, + Mpi2RaidConfigurationPage0_t, + *pMpi2RaidConfigurationPage0_t; + +#define MPI2_RAIDCONFIG0_PAGEVERSION (0x00) + +/*values for RAID Configuration Page 0 Flags field */ +#define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001) + + +/**************************************************************************** +* Driver Persistent Mapping Config Pages +****************************************************************************/ + +/*Driver Persistent Mapping Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY { + U64 PhysicalIdentifier; /*0x00 */ + U16 MappingInformation; /*0x08 */ + U16 DeviceIndex; /*0x0A */ + U32 PhysicalBitsMapping; /*0x0C */ + U32 Reserved1; /*0x10 */ +} MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, + *PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, + Mpi2DriverMap0Entry_t, *pMpi2DriverMap0Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /*0x08 */ +} MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, + *PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, + Mpi2DriverMappingPage0_t, *pMpi2DriverMappingPage0_t; + +#define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00) + +/*values for Driver Persistent Mapping Page 0 MappingInformation field */ +#define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0) +#define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4) +#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F) + + +/**************************************************************************** +* Ethernet Config Pages +****************************************************************************/ + +/*Ethernet Page 0 */ + +/*IP address (union of IPv4 and IPv6) */ +typedef union _MPI2_ETHERNET_IP_ADDR { + U32 IPv4Addr; + U32 IPv6Addr[4]; +} MPI2_ETHERNET_IP_ADDR, *PTR_MPI2_ETHERNET_IP_ADDR, + Mpi2EthernetIpAddr_t, *pMpi2EthernetIpAddr_t; + +#define MPI2_ETHERNET_HOST_NAME_LENGTH (32) + +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 NumInterfaces; /*0x08 */ + U8 Reserved0; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Status; /*0x0C */ + U8 MediaState; /*0x10 */ + U8 Reserved2; /*0x11 */ + U16 Reserved3; /*0x12 */ + U8 MacAddress[6]; /*0x14 */ + U8 Reserved4; /*0x1A */ + U8 Reserved5; /*0x1B */ + MPI2_ETHERNET_IP_ADDR IpAddress; /*0x1C */ + MPI2_ETHERNET_IP_ADDR SubnetMask; /*0x2C */ + MPI2_ETHERNET_IP_ADDR GatewayIpAddress;/*0x3C */ + MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /*0x4C */ + MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /*0x5C */ + MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /*0x6C */ + U8 + HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_0, + *PTR_MPI2_CONFIG_PAGE_ETHERNET_0, + Mpi2EthernetPage0_t, *pMpi2EthernetPage0_t; + +#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00) + +/*values for Ethernet Page 0 Status field */ +#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000) +#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000) +#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000) +#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100) +#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080) +#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040) +#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020) +#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010) +#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008) +#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004) +#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002) +#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001) + +/*values for Ethernet Page 0 MediaState field */ +#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80) +#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00) +#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80) + +#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07) +#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00) +#define MPI2_ETHPG0_MS_10MBIT (0x01) +#define MPI2_ETHPG0_MS_100MBIT (0x02) +#define MPI2_ETHPG0_MS_1GBIT (0x03) + + +/*Ethernet Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved0; /*0x08 */ + U32 + Flags; /*0x0C */ + U8 + MediaState; /*0x10 */ + U8 + Reserved1; /*0x11 */ + U16 + Reserved2; /*0x12 */ + U8 + MacAddress[6]; /*0x14 */ + U8 + Reserved3; /*0x1A */ + U8 + Reserved4; /*0x1B */ + MPI2_ETHERNET_IP_ADDR + StaticIpAddress; /*0x1C */ + MPI2_ETHERNET_IP_ADDR + StaticSubnetMask; /*0x2C */ + MPI2_ETHERNET_IP_ADDR + StaticGatewayIpAddress; /*0x3C */ + MPI2_ETHERNET_IP_ADDR + StaticDNS1IpAddress; /*0x4C */ + MPI2_ETHERNET_IP_ADDR + StaticDNS2IpAddress; /*0x5C */ + U32 + Reserved5; /*0x6C */ + U32 + Reserved6; /*0x70 */ + U32 + Reserved7; /*0x74 */ + U32 + Reserved8; /*0x78 */ + U8 + HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_1, + *PTR_MPI2_CONFIG_PAGE_ETHERNET_1, + Mpi2EthernetPage1_t, *pMpi2EthernetPage1_t; + +#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00) + +/*values for Ethernet Page 1 Flags field */ +#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100) +#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080) +#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040) +#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020) +#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010) +#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008) +#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004) +#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002) +#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001) + +/*values for Ethernet Page 1 MediaState field */ +#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80) +#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00) +#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80) + +#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07) +#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00) +#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01) +#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02) +#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03) + + +/**************************************************************************** +* Extended Manufacturing Config Pages +****************************************************************************/ + +/* + *Generic structure to use for product-specific extended manufacturing pages + *(currently Extended Manufacturing Page 40 through Extended Manufacturing + *Page 60). + */ + +typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + ProductSpecificInfo; /*0x08 */ +} MPI2_CONFIG_PAGE_EXT_MAN_PS, + *PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS, + Mpi2ExtManufacturingPagePS_t, + *pMpi2ExtManufacturingPagePS_t; + +/*PageVersion should be provided by product-specific code */ + +#endif diff --git a/addons/mpt3sas/src/3.10.108/mpi/mpi2_init.h b/addons/mpt3sas/src/3.10.108/mpi/mpi2_init.h new file mode 100644 index 00000000..a079e524 --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpi/mpi2_init.h @@ -0,0 +1,560 @@ +/* + * Copyright (c) 2000-2012 LSI Corporation. + * + * + * Name: mpi2_init.h + * Title: MPI SCSI initiator mode messages and structures + * Creation Date: June 23, 2006 + * + * mpi2_init.h Version: 02.00.14 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t. + * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines. + * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention. + * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY. + * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t. + * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO + * Control field Task Attribute flags. + * Moved LUN field defines to mpi2.h becasue they are + * common to many structures. + * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to + * Query Asynchronous Event. + * Defined two new bits in the SlotStatus field of the SCSI + * Enclosure Processor Request and Reply. + * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for + * both SCSI IO Error Reply and SCSI Task Management Reply. + * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. + * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. + * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. + * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. + * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. + * 11-18-11 02.00.12 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command + * Priority to match SAM-4. + * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY. + * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_INIT_H +#define MPI2_INIT_H + +/***************************************************************************** +* +* SCSI Initiator Messages +* +*****************************************************************************/ + +/**************************************************************************** +* SCSI IO messages and associated structures +****************************************************************************/ + +typedef struct _MPI2_SCSI_IO_CDB_EEDP32 { + U8 CDB[20]; /*0x00 */ + U32 PrimaryReferenceTag; /*0x14 */ + U16 PrimaryApplicationTag; /*0x18 */ + U16 PrimaryApplicationTagMask; /*0x1A */ + U32 TransferLength; /*0x1C */ +} MPI2_SCSI_IO_CDB_EEDP32, *PTR_MPI2_SCSI_IO_CDB_EEDP32, + Mpi2ScsiIoCdbEedp32_t, *pMpi2ScsiIoCdbEedp32_t; + +/*MPI v2.0 CDB field */ +typedef union _MPI2_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + MPI2_SGE_SIMPLE_UNION SGE; +} MPI2_SCSI_IO_CDB_UNION, *PTR_MPI2_SCSI_IO_CDB_UNION, + Mpi2ScsiIoCdb_t, *pMpi2ScsiIoCdb_t; + +/*MPI v2.0 SCSI IO Request Message */ +typedef struct _MPI2_SCSI_IO_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U32 SenseBufferLowAddress; /*0x0C */ + U16 SGLFlags; /*0x10 */ + U8 SenseBufferLength; /*0x12 */ + U8 Reserved4; /*0x13 */ + U8 SGLOffset0; /*0x14 */ + U8 SGLOffset1; /*0x15 */ + U8 SGLOffset2; /*0x16 */ + U8 SGLOffset3; /*0x17 */ + U32 SkipCount; /*0x18 */ + U32 DataLength; /*0x1C */ + U32 BidirectionalDataLength; /*0x20 */ + U16 IoFlags; /*0x24 */ + U16 EEDPFlags; /*0x26 */ + U32 EEDPBlockSize; /*0x28 */ + U32 SecondaryReferenceTag; /*0x2C */ + U16 SecondaryApplicationTag; /*0x30 */ + U16 ApplicationTagTranslationMask; /*0x32 */ + U8 LUN[8]; /*0x34 */ + U32 Control; /*0x3C */ + MPI2_SCSI_IO_CDB_UNION CDB; /*0x40 */ + +#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */ + MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion; +#endif + + MPI2_SGE_IO_UNION SGL; /*0x60 */ + +} MPI2_SCSI_IO_REQUEST, *PTR_MPI2_SCSI_IO_REQUEST, + Mpi2SCSIIORequest_t, *pMpi2SCSIIORequest_t; + +/*SCSI IO MsgFlags bits */ + +/*MsgFlags for SenseBufferAddressSpace */ +#define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C) +#define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00) +#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04) +#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08) +#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C) + +/*SCSI IO SGLFlags bits */ + +/*base values for Data Location Address Space */ +#define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C) +#define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00) +#define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04) +#define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08) +#define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C) + +/*base values for Type */ +#define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02) + +/*shift values for each sub-field */ +#define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12) +#define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8) +#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4) +#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0) + +/*number of SGLOffset fields */ +#define MPI2_SCSIIO_NUM_SGLOFFSETS (4) + +/*SCSI IO IoFlags bits */ + +/*Large CDB Address Space */ +#define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000) +#define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000) +#define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000) +#define MPI2_SCSIIO_CDB_ADDR_IOCPLB (0x4000) +#define MPI2_SCSIIO_CDB_ADDR_IOCPLBNTA (0x6000) + +#define MPI2_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) +#define MPI2_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI2_SCSIIO_IOFLAGS_MULTICAST (0x0400) +#define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200) +#define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) + +/*SCSI IO EEDPFlags bits */ + +#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG (0x2000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_APPTAG (0x1000) + +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) + +#define MPI2_SCSIIO_EEDPFLAGS_PASSTHRU_REFTAG (0x0008) + +#define MPI2_SCSIIO_EEDPFLAGS_MASK_OP (0x0007) +#define MPI2_SCSIIO_EEDPFLAGS_NOOP_OP (0x0000) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_OP (0x0001) +#define MPI2_SCSIIO_EEDPFLAGS_STRIP_OP (0x0002) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) +#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) +#define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007) + +/*SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */ + +/*SCSI IO Control bits */ +#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000) +#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26) + +#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000) +#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24) +#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000) +#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000) +#define MPI2_SCSIIO_CONTROL_READ (0x02000000) +#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000) + +#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800) +#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11) +/*alternate name for the previous field; called Command Priority in SAM-4 */ +#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800) +#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11) + +#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700) +#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000) +#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100) +#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200) +#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400) + +#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0) +#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000) +#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040) +#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080) + +/*MPI v2.5 CDB field */ +typedef union _MPI25_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + MPI2_IEEE_SGE_SIMPLE64 SGE; +} MPI25_SCSI_IO_CDB_UNION, *PTR_MPI25_SCSI_IO_CDB_UNION, + Mpi25ScsiIoCdb_t, *pMpi25ScsiIoCdb_t; + +/*MPI v2.5 SCSI IO Request Message */ +typedef struct _MPI25_SCSI_IO_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U32 SenseBufferLowAddress; /*0x0C */ + U8 DMAFlags; /*0x10 */ + U8 Reserved5; /*0x11 */ + U8 SenseBufferLength; /*0x12 */ + U8 Reserved4; /*0x13 */ + U8 SGLOffset0; /*0x14 */ + U8 SGLOffset1; /*0x15 */ + U8 SGLOffset2; /*0x16 */ + U8 SGLOffset3; /*0x17 */ + U32 SkipCount; /*0x18 */ + U32 DataLength; /*0x1C */ + U32 BidirectionalDataLength; /*0x20 */ + U16 IoFlags; /*0x24 */ + U16 EEDPFlags; /*0x26 */ + U16 EEDPBlockSize; /*0x28 */ + U16 Reserved6; /*0x2A */ + U32 SecondaryReferenceTag; /*0x2C */ + U16 SecondaryApplicationTag; /*0x30 */ + U16 ApplicationTagTranslationMask; /*0x32 */ + U8 LUN[8]; /*0x34 */ + U32 Control; /*0x3C */ + MPI25_SCSI_IO_CDB_UNION CDB; /*0x40 */ + +#ifdef MPI25_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */ + MPI25_SCSI_IO_VENDOR_UNIQUE VendorRegion; +#endif + + MPI25_SGE_IO_UNION SGL; /*0x60 */ + +} MPI25_SCSI_IO_REQUEST, *PTR_MPI25_SCSI_IO_REQUEST, + Mpi25SCSIIORequest_t, *pMpi25SCSIIORequest_t; + +/*use MPI2_SCSIIO_MSGFLAGS_ defines for the MsgFlags field */ + +/*Defines for the DMAFlags field + * Each setting affects 4 SGLS, from SGL0 to SGL3. + * D = Data + * C = Cache DIF + * I = Interleaved + * H = Host DIF + */ +#define MPI25_SCSIIO_DMAFLAGS_OP_MASK (0x0F) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_D (0x00) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_C (0x01) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_I (0x02) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_C (0x03) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_I (0x04) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_I_I (0x05) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_C (0x06) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_I (0x07) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_I_I (0x08) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_I_I_I (0x09) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_D (0x0A) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_C (0x0B) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_I (0x0C) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_C (0x0D) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_I (0x0E) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_I_I (0x0F) + +/*number of SGLOffset fields */ +#define MPI25_SCSIIO_NUM_SGLOFFSETS (4) + +/*defines for the IoFlags field */ +#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000) +#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000) +#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000) + +#define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) +#define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) + +/*MPI v2.5 defines for the EEDPFlags bits */ +/*use MPI2_SCSIIO_EEDPFLAGS_ defines for the other EEDPFlags bits */ +#define MPI25_SCSIIO_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0) +#define MPI25_SCSIIO_EEDPFLAGS_COMPATIBLE_MODE (0x0000) +#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040) +#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080) +#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0) + +#define MPI25_SCSIIO_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030) +#define MPI25_SCSIIO_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000) +#define MPI25_SCSIIO_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010) + +/*use MPI2_LUN_ defines from mpi2.h for the LUN field */ + +/*use MPI2_SCSIIO_CONTROL_ defines for the Control field */ + +/*NOTE: The SCSI IO Reply is nearly the same for MPI 2.0 and MPI 2.5, so + * MPI2_SCSI_IO_REPLY is used for both. + */ + +/*SCSI IO Error Reply Message */ +typedef struct _MPI2_SCSI_IO_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U8 SCSIStatus; /*0x0C */ + U8 SCSIState; /*0x0D */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 TransferCount; /*0x14 */ + U32 SenseCount; /*0x18 */ + U32 ResponseInfo; /*0x1C */ + U16 TaskTag; /*0x20 */ + U16 Reserved4; /*0x22 */ + U32 BidirectionalTransferCount; /*0x24 */ + U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/ + U32 Reserved6; /*0x2C */ +} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY, + Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t; + +/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */ + +#define MPI2_SCSI_STATUS_GOOD (0x00) +#define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02) +#define MPI2_SCSI_STATUS_CONDITION_MET (0x04) +#define MPI2_SCSI_STATUS_BUSY (0x08) +#define MPI2_SCSI_STATUS_INTERMEDIATE (0x10) +#define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14) +#define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18) +#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /*obsolete */ +#define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28) +#define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30) +#define MPI2_SCSI_STATUS_TASK_ABORTED (0x40) + +/*SCSI IO Reply SCSIState flags */ + +#define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10) +#define MPI2_SCSI_STATE_TERMINATED (0x08) +#define MPI2_SCSI_STATE_NO_SCSI_STATUS (0x04) +#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02) +#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01) + +/*masks and shifts for the ResponseInfo field */ + +#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF) +#define MPI2_SCSI_RI_SHIFT_REASONCODE (0) + +#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF) + +/**************************************************************************** +* SCSI Task Management messages +****************************************************************************/ + +/*SCSI Task Management Request Message */ +typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U8 Reserved1; /*0x04 */ + U8 TaskType; /*0x05 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U8 LUN[8]; /*0x0C */ + U32 Reserved4[7]; /*0x14 */ + U16 TaskMID; /*0x30 */ + U16 Reserved5; /*0x32 */ +} MPI2_SCSI_TASK_MANAGE_REQUEST, + *PTR_MPI2_SCSI_TASK_MANAGE_REQUEST, + Mpi2SCSITaskManagementRequest_t, + *pMpi2SCSITaskManagementRequest_t; + +/*TaskType values */ + +#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) +#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) +#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) +#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06) +#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A) + +/*obsolete TaskType name */ +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION \ + (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT) + +/*MsgFlags bits */ + +#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18) +#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00) +#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08) +#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10) + +#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01) + +/*SCSI Task Management Reply Message */ +typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U8 ResponseCode; /*0x04 */ + U8 TaskType; /*0x05 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 TerminationCount; /*0x14 */ + U32 ResponseInfo; /*0x18 */ +} MPI2_SCSI_TASK_MANAGE_REPLY, + *PTR_MPI2_SCSI_TASK_MANAGE_REPLY, + Mpi2SCSITaskManagementReply_t, *pMpi2SCSIManagementReply_t; + +/*ResponseCode values */ + +#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00) +#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02) +#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04) +#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05) +#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) +#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) +#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A) +#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) + +/*masks and shifts for the ResponseInfo field */ + +#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF) +#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24) + +/**************************************************************************** +* SCSI Enclosure Processor messages +****************************************************************************/ + +/*SCSI Enclosure Processor Request Message */ +typedef struct _MPI2_SEP_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U8 Action; /*0x04 */ + U8 Flags; /*0x05 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 SlotStatus; /*0x0C */ + U32 Reserved3; /*0x10 */ + U32 Reserved4; /*0x14 */ + U32 Reserved5; /*0x18 */ + U16 Slot; /*0x1C */ + U16 EnclosureHandle; /*0x1E */ +} MPI2_SEP_REQUEST, *PTR_MPI2_SEP_REQUEST, + Mpi2SepRequest_t, *pMpi2SepRequest_t; + +/*Action defines */ +#define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00) +#define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01) + +/*Flags defines */ +#define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00) +#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01) + +/*SlotStatus defines */ +#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000) +#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI2_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI2_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001) + +/*SCSI Enclosure Processor Reply Message */ +typedef struct _MPI2_SEP_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U8 Action; /*0x04 */ + U8 Flags; /*0x05 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 SlotStatus; /*0x14 */ + U32 Reserved4; /*0x18 */ + U16 Slot; /*0x1C */ + U16 EnclosureHandle; /*0x1E */ +} MPI2_SEP_REPLY, *PTR_MPI2_SEP_REPLY, + Mpi2SepReply_t, *pMpi2SepReply_t; + +/*SlotStatus defines */ +#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000) +#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI2_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI2_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001) + +#endif diff --git a/addons/mpt3sas/src/3.10.108/mpi/mpi2_ioc.h b/addons/mpt3sas/src/3.10.108/mpi/mpi2_ioc.h new file mode 100644 index 00000000..0de425d8 --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpi/mpi2_ioc.h @@ -0,0 +1,1665 @@ +/* + * Copyright (c) 2000-2012 LSI Corporation. + * + * + * Name: mpi2_ioc.h + * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages + * Creation Date: October 11, 2006 + * + * mpi2_ioc.h Version: 02.00.21 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to + * MaxTargets. + * Added TotalImageSize field to FWDownload Request. + * Added reserved words to FWUpload Request. + * 06-26-07 02.00.02 Added IR Configuration Change List Event. + * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit + * request and replaced it with + * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth. + * Replaced the MinReplyQueueDepth field of the IOCFacts + * reply with MaxReplyDescriptorPostQueueDepth. + * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum + * depth for the Reply Descriptor Post Queue. + * Added SASAddress field to Initiator Device Table + * Overflow Event data. + * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING + * for SAS Initiator Device Status Change Event data. + * Modified Reason Code defines for SAS Topology Change + * List Event data, including adding a bit for PHY Vacant + * status, and adding a mask for the Reason Code. + * Added define for + * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING. + * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID. + * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of + * the IOCFacts Reply. + * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Moved MPI2_VERSION_UNION to mpi2.h. + * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks + * instead of enables, and added SASBroadcastPrimitiveMasks + * field. + * Added Log Entry Added Event and related structure. + * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID. + * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET. + * Added MaxVolumes and MaxPersistentEntries fields to + * IOCFacts reply. + * Added ProtocalFlags and IOCCapabilities fields to + * MPI2_FW_IMAGE_HEADER. + * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT. + * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to + * a U16 (from a U32). + * Removed extra 's' from EventMasks name. + * 06-27-08 02.00.08 Fixed an offset in a comment. + * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST. + * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and + * renamed MinReplyFrameSize to ReplyFrameSize. + * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX. + * Added two new RAIDOperation values for Integrated RAID + * Operations Status Event data. + * Added four new IR Configuration Change List Event data + * ReasonCode values. + * Added two new ReasonCode defines for SAS Device Status + * Change Event data. + * Added three new DiscoveryStatus bits for the SAS + * Discovery event data. + * Added Multiplexing Status Change bit to the PhyStatus + * field of the SAS Topology Change List event data. + * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY. + * BootFlags are now product-specific. + * Added defines for the indivdual signature bytes + * for MPI2_INIT_IMAGE_FOOTER. + * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define. + * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR + * define. + * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE + * define. + * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. + * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define. + * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define. + * Added two new reason codes for SAS Device Status Change + * Event. + * Added new event: SAS PHY Counter. + * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. + * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Added new product id family for 2208. + * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. + * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. + * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. + * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. + * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. + * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. + * Added Host Based Discovery Phy Event data. + * Added defines for ProductID Product field + * (MPI2_FW_HEADER_PID_). + * Modified values for SAS ProductID Family + * (MPI2_FW_HEADER_PID_FAMILY_). + * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. + * Added PowerManagementControl Request structures and + * defines. + * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. + * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. + * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. + * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added + * SASNotifyPrimitiveMasks field to + * MPI2_EVENT_NOTIFICATION_REQUEST. + * Added Temperature Threshold Event. + * Added Host Message Event. + * Added Send Host Message request and reply. + * 05-25-11 02.00.18 For Extended Image Header, added + * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and + * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines. + * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define. + * 08-24-11 02.00.19 Added PhysicalPort field to + * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure. + * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. + * 11-18-11 02.00.20 Incorporating additions for MPI v2.5. + * 03-29-12 02.00.21 Added a product specific range to event values. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_IOC_H +#define MPI2_IOC_H + +/***************************************************************************** +* +* IOC Messages +* +*****************************************************************************/ + +/**************************************************************************** +* IOCInit message +****************************************************************************/ + +/*IOCInit Request message */ +typedef struct _MPI2_IOC_INIT_REQUEST { + U8 WhoInit; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 MsgVersion; /*0x0C */ + U16 HeaderVersion; /*0x0E */ + U32 Reserved5; /*0x10 */ + U16 Reserved6; /*0x14 */ + U8 Reserved7; /*0x16 */ + U8 HostMSIxVectors; /*0x17 */ + U16 Reserved8; /*0x18 */ + U16 SystemRequestFrameSize; /*0x1A */ + U16 ReplyDescriptorPostQueueDepth; /*0x1C */ + U16 ReplyFreeQueueDepth; /*0x1E */ + U32 SenseBufferAddressHigh; /*0x20 */ + U32 SystemReplyAddressHigh; /*0x24 */ + U64 SystemRequestFrameBaseAddress; /*0x28 */ + U64 ReplyDescriptorPostQueueAddress; /*0x30 */ + U64 ReplyFreeQueueAddress; /*0x38 */ + U64 TimeStamp; /*0x40 */ +} MPI2_IOC_INIT_REQUEST, *PTR_MPI2_IOC_INIT_REQUEST, + Mpi2IOCInitRequest_t, *pMpi2IOCInitRequest_t; + +/*WhoInit values */ +#define MPI2_WHOINIT_NOT_INITIALIZED (0x00) +#define MPI2_WHOINIT_SYSTEM_BIOS (0x01) +#define MPI2_WHOINIT_ROM_BIOS (0x02) +#define MPI2_WHOINIT_PCI_PEER (0x03) +#define MPI2_WHOINIT_HOST_DRIVER (0x04) +#define MPI2_WHOINIT_MANUFACTURER (0x05) + +/*MsgVersion */ +#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00) +#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8) +#define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF) +#define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0) + +/*HeaderVersion */ +#define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00) +#define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8) +#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF) +#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0) + +/*minimum depth for the Reply Descriptor Post Queue */ +#define MPI2_RDPQ_DEPTH_MIN (16) + +/*IOCInit Reply message */ +typedef struct _MPI2_IOC_INIT_REPLY { + U8 WhoInit; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_IOC_INIT_REPLY, *PTR_MPI2_IOC_INIT_REPLY, + Mpi2IOCInitReply_t, *pMpi2IOCInitReply_t; + +/**************************************************************************** +* IOCFacts message +****************************************************************************/ + +/*IOCFacts Request message */ +typedef struct _MPI2_IOC_FACTS_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ +} MPI2_IOC_FACTS_REQUEST, *PTR_MPI2_IOC_FACTS_REQUEST, + Mpi2IOCFactsRequest_t, *pMpi2IOCFactsRequest_t; + +/*IOCFacts Reply message */ +typedef struct _MPI2_IOC_FACTS_REPLY { + U16 MsgVersion; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 HeaderVersion; /*0x04 */ + U8 IOCNumber; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U16 IOCExceptions; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 MaxChainDepth; /*0x14 */ + U8 WhoInit; /*0x15 */ + U8 NumberOfPorts; /*0x16 */ + U8 MaxMSIxVectors; /*0x17 */ + U16 RequestCredit; /*0x18 */ + U16 ProductID; /*0x1A */ + U32 IOCCapabilities; /*0x1C */ + MPI2_VERSION_UNION FWVersion; /*0x20 */ + U16 IOCRequestFrameSize; /*0x24 */ + U16 IOCMaxChainSegmentSize; /*0x26 */ + U16 MaxInitiators; /*0x28 */ + U16 MaxTargets; /*0x2A */ + U16 MaxSasExpanders; /*0x2C */ + U16 MaxEnclosures; /*0x2E */ + U16 ProtocolFlags; /*0x30 */ + U16 HighPriorityCredit; /*0x32 */ + U16 MaxReplyDescriptorPostQueueDepth; /*0x34 */ + U8 ReplyFrameSize; /*0x36 */ + U8 MaxVolumes; /*0x37 */ + U16 MaxDevHandle; /*0x38 */ + U16 MaxPersistentEntries; /*0x3A */ + U16 MinDevHandle; /*0x3C */ + U16 Reserved4; /*0x3E */ +} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY, + Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t; + +/*MsgVersion */ +#define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00) +#define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8) +#define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF) +#define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0) + +/*HeaderVersion */ +#define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00) +#define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8) +#define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF) +#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0) + +/*IOCExceptions */ +#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100) + +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_GOOD (0x0000) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_BACKUP (0x0020) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_RESTORED (0x0040) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_CORRUPT_BACKUP (0x0060) + +#define MPI2_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010) +#define MPI2_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0008) +#define MPI2_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004) +#define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002) +#define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001) + +/*defines for WhoInit field are after the IOCInit Request */ + +/*ProductID field uses MPI2_FW_HEADER_PID_ */ + +/*IOCCapabilities */ +#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000) +#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000) +#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) +#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000) +#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) +#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000) +#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800) +#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) +#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080) +#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040) +#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020) +#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010) +#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008) +#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) + +/*ProtocolFlags */ +#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) +#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002) + +/**************************************************************************** +* PortFacts message +****************************************************************************/ + +/*PortFacts Request message */ +typedef struct _MPI2_PORT_FACTS_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 PortNumber; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ +} MPI2_PORT_FACTS_REQUEST, *PTR_MPI2_PORT_FACTS_REQUEST, + Mpi2PortFactsRequest_t, *pMpi2PortFactsRequest_t; + +/*PortFacts Reply message */ +typedef struct _MPI2_PORT_FACTS_REPLY { + U16 Reserved1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 PortNumber; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 Reserved5; /*0x14 */ + U8 PortType; /*0x15 */ + U16 Reserved6; /*0x16 */ + U16 MaxPostedCmdBuffers; /*0x18 */ + U16 Reserved7; /*0x1A */ +} MPI2_PORT_FACTS_REPLY, *PTR_MPI2_PORT_FACTS_REPLY, + Mpi2PortFactsReply_t, *pMpi2PortFactsReply_t; + +/*PortType values */ +#define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00) +#define MPI2_PORTFACTS_PORTTYPE_FC (0x10) +#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20) +#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30) +#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31) + +/**************************************************************************** +* PortEnable message +****************************************************************************/ + +/*PortEnable Request message */ +typedef struct _MPI2_PORT_ENABLE_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U8 Reserved2; /*0x04 */ + U8 PortFlags; /*0x05 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ +} MPI2_PORT_ENABLE_REQUEST, *PTR_MPI2_PORT_ENABLE_REQUEST, + Mpi2PortEnableRequest_t, *pMpi2PortEnableRequest_t; + +/*PortEnable Reply message */ +typedef struct _MPI2_PORT_ENABLE_REPLY { + U16 Reserved1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U8 Reserved2; /*0x04 */ + U8 PortFlags; /*0x05 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_PORT_ENABLE_REPLY, *PTR_MPI2_PORT_ENABLE_REPLY, + Mpi2PortEnableReply_t, *pMpi2PortEnableReply_t; + +/**************************************************************************** +* EventNotification message +****************************************************************************/ + +/*EventNotification Request message */ +#define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4) + +typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; /*0x14 */ + U16 SASBroadcastPrimitiveMasks; /*0x24 */ + U16 SASNotifyPrimitiveMasks; /*0x26 */ + U32 Reserved8; /*0x28 */ +} MPI2_EVENT_NOTIFICATION_REQUEST, + *PTR_MPI2_EVENT_NOTIFICATION_REQUEST, + Mpi2EventNotificationRequest_t, + *pMpi2EventNotificationRequest_t; + +/*EventNotification Reply message */ +typedef struct _MPI2_EVENT_NOTIFICATION_REPLY { + U16 EventDataLength; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 AckRequired; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U16 Event; /*0x14 */ + U16 Reserved4; /*0x16 */ + U32 EventContext; /*0x18 */ + U32 EventData[1]; /*0x1C */ +} MPI2_EVENT_NOTIFICATION_REPLY, *PTR_MPI2_EVENT_NOTIFICATION_REPLY, + Mpi2EventNotificationReply_t, + *pMpi2EventNotificationReply_t; + +/*AckRequired */ +#define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00) +#define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01) + +/*Event */ +#define MPI2_EVENT_LOG_DATA (0x0001) +#define MPI2_EVENT_STATE_CHANGE (0x0002) +#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005) +#define MPI2_EVENT_EVENT_CHANGE (0x000A) +#define MPI2_EVENT_TASK_SET_FULL (0x000E) /*obsolete */ +#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F) +#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014) +#define MPI2_EVENT_SAS_DISCOVERY (0x0016) +#define MPI2_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017) +#define MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018) +#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019) +#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C) +#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D) +#define MPI2_EVENT_IR_VOLUME (0x001E) +#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F) +#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) +#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) +#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) +#define MPI2_EVENT_GPIO_INTERRUPT (0x0023) +#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024) +#define MPI2_EVENT_SAS_QUIESCE (0x0025) +#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026) +#define MPI2_EVENT_TEMP_THRESHOLD (0x0027) +#define MPI2_EVENT_HOST_MESSAGE (0x0028) +#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029) +#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E) +#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F) + +/*Log Entry Added Event data */ + +/*the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */ +#define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C) + +typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U16 LogSequence; /*0x0C */ + U16 LogEntryQualifier; /*0x0E */ + U8 VP_ID; /*0x10 */ + U8 VF_ID; /*0x11 */ + U16 Reserved2; /*0x12 */ + U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH]; /*0x14 */ +} MPI2_EVENT_DATA_LOG_ENTRY_ADDED, + *PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED, + Mpi2EventDataLogEntryAdded_t, + *pMpi2EventDataLogEntryAdded_t; + +/*GPIO Interrupt Event data */ + +typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT { + U8 GPIONum; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ +} MPI2_EVENT_DATA_GPIO_INTERRUPT, + *PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT, + Mpi2EventDataGpioInterrupt_t, + *pMpi2EventDataGpioInterrupt_t; + +/*Temperature Threshold Event data */ + +typedef struct _MPI2_EVENT_DATA_TEMPERATURE { + U16 Status; /*0x00 */ + U8 SensorNum; /*0x02 */ + U8 Reserved1; /*0x03 */ + U16 CurrentTemperature; /*0x04 */ + U16 Reserved2; /*0x06 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ +} MPI2_EVENT_DATA_TEMPERATURE, + *PTR_MPI2_EVENT_DATA_TEMPERATURE, + Mpi2EventDataTemperature_t, *pMpi2EventDataTemperature_t; + +/*Temperature Threshold Event data Status bits */ +#define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008) +#define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004) +#define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002) +#define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001) + +/*Host Message Event data */ + +typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE { + U8 SourceVF_ID; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Reserved3; /*0x04 */ + U32 HostData[1]; /*0x08 */ +} MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE, + Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t; + +/*Power Performance Change Event */ + +typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE { + U8 CurrentPowerMode; /*0x00 */ + U8 PreviousPowerMode; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_EVENT_DATA_POWER_PERF_CHANGE, + *PTR_MPI2_EVENT_DATA_POWER_PERF_CHANGE, + Mpi2EventDataPowerPerfChange_t, + *pMpi2EventDataPowerPerfChange_t; + +/*defines for CurrentPowerMode and PreviousPowerMode fields */ +#define MPI2_EVENT_PM_INIT_MASK (0xC0) +#define MPI2_EVENT_PM_INIT_UNAVAILABLE (0x00) +#define MPI2_EVENT_PM_INIT_HOST (0x40) +#define MPI2_EVENT_PM_INIT_IO_UNIT (0x80) +#define MPI2_EVENT_PM_INIT_PCIE_DPA (0xC0) + +#define MPI2_EVENT_PM_MODE_MASK (0x07) +#define MPI2_EVENT_PM_MODE_UNAVAILABLE (0x00) +#define MPI2_EVENT_PM_MODE_UNKNOWN (0x01) +#define MPI2_EVENT_PM_MODE_FULL_POWER (0x04) +#define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05) +#define MPI2_EVENT_PM_MODE_STANDBY (0x06) + +/*Hard Reset Received Event data */ + +typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED { + U8 Reserved1; /*0x00 */ + U8 Port; /*0x01 */ + U16 Reserved2; /*0x02 */ +} MPI2_EVENT_DATA_HARD_RESET_RECEIVED, + *PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED, + Mpi2EventDataHardResetReceived_t, + *pMpi2EventDataHardResetReceived_t; + +/*Task Set Full Event data */ +/* this event is obsolete */ + +typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL { + U16 DevHandle; /*0x00 */ + U16 CurrentDepth; /*0x02 */ +} MPI2_EVENT_DATA_TASK_SET_FULL, *PTR_MPI2_EVENT_DATA_TASK_SET_FULL, + Mpi2EventDataTaskSetFull_t, *pMpi2EventDataTaskSetFull_t; + +/*SAS Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE { + U16 TaskTag; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U8 ASC; /*0x04 */ + U8 ASCQ; /*0x05 */ + U16 DevHandle; /*0x06 */ + U32 Reserved2; /*0x08 */ + U64 SASAddress; /*0x0C */ + U8 LUN[8]; /*0x14 */ +} MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, + *PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, + Mpi2EventDataSasDeviceStatusChange_t, + *pMpi2EventDataSasDeviceStatusChange_t; + +/*SAS Device Status Change Event data ReasonCode values */ +#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05) +#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07) +#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) +#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) +#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) +#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) +#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) +#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10) +#define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12) + +/*Integrated RAID Operation Status Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS { + U16 VolDevHandle; /*0x00 */ + U16 Reserved1; /*0x02 */ + U8 RAIDOperation; /*0x04 */ + U8 PercentComplete; /*0x05 */ + U16 Reserved2; /*0x06 */ + U32 Resereved3; /*0x08 */ +} MPI2_EVENT_DATA_IR_OPERATION_STATUS, + *PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS, + Mpi2EventDataIrOperationStatus_t, + *pMpi2EventDataIrOperationStatus_t; + +/*Integrated RAID Operation Status Event data RAIDOperation values */ +#define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00) +#define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02) +#define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03) +#define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04) + +/*Integrated RAID Volume Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_VOLUME { + U16 VolDevHandle; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 NewValue; /*0x04 */ + U32 PreviousValue; /*0x08 */ +} MPI2_EVENT_DATA_IR_VOLUME, *PTR_MPI2_EVENT_DATA_IR_VOLUME, + Mpi2EventDataIrVolume_t, *pMpi2EventDataIrVolume_t; + +/*Integrated RAID Volume Event data ReasonCode values */ +#define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01) +#define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02) +#define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03) + +/*Integrated RAID Physical Disk Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK { + U16 Reserved1; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysDiskNum; /*0x03 */ + U16 PhysDiskDevHandle; /*0x04 */ + U16 Reserved2; /*0x06 */ + U16 Slot; /*0x08 */ + U16 EnclosureHandle; /*0x0A */ + U32 NewValue; /*0x0C */ + U32 PreviousValue; /*0x10 */ +} MPI2_EVENT_DATA_IR_PHYSICAL_DISK, + *PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK, + Mpi2EventDataIrPhysicalDisk_t, + *pMpi2EventDataIrPhysicalDisk_t; + +/*Integrated RAID Physical Disk Event data ReasonCode values */ +#define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01) +#define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02) +#define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03) + +/*Integrated RAID Configuration Change List Event data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumElements at runtime. + */ +#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT +#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1) +#endif + +typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT { + U16 ElementFlags; /*0x00 */ + U16 VolDevHandle; /*0x02 */ + U8 ReasonCode; /*0x04 */ + U8 PhysDiskNum; /*0x05 */ + U16 PhysDiskDevHandle; /*0x06 */ +} MPI2_EVENT_IR_CONFIG_ELEMENT, *PTR_MPI2_EVENT_IR_CONFIG_ELEMENT, + Mpi2EventIrConfigElement_t, *pMpi2EventIrConfigElement_t; + +/*IR Configuration Change List Event data ElementFlags values */ +#define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002) + +/*IR Configuration Change List Event data ReasonCode values */ +#define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01) +#define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02) +#define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03) +#define MPI2_EVENT_IR_CHANGE_RC_HIDE (0x04) +#define MPI2_EVENT_IR_CHANGE_RC_UNHIDE (0x05) +#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06) +#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07) +#define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08) +#define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09) + +typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST { + U8 NumElements; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 Reserved2; /*0x02 */ + U8 ConfigNum; /*0x03 */ + U32 Flags; /*0x04 */ + MPI2_EVENT_IR_CONFIG_ELEMENT + ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT];/*0x08 */ +} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, + *PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, + Mpi2EventDataIrConfigChangeList_t, + *pMpi2EventDataIrConfigChangeList_t; + +/*IR Configuration Change List Event data Flags values */ +#define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001) + +/*SAS Discovery Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY { + U8 Flags; /*0x00 */ + U8 ReasonCode; /*0x01 */ + U8 PhysicalPort; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 DiscoveryStatus; /*0x04 */ +} MPI2_EVENT_DATA_SAS_DISCOVERY, + *PTR_MPI2_EVENT_DATA_SAS_DISCOVERY, + Mpi2EventDataSasDiscovery_t, *pMpi2EventDataSasDiscovery_t; + +/*SAS Discovery Event data Flags values */ +#define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02) +#define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01) + +/*SAS Discovery Event data ReasonCode values */ +#define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01) +#define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02) + +/*SAS Discovery Event data DiscoveryStatus values */ +#define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_EVENT_SAS_DISC_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_EVENT_SAS_DISC_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_EVENT_SAS_DISC_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_EVENT_SAS_DISC_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_EVENT_SAS_DISC_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_EVENT_SAS_DISC_DS_TABLE_LINK (0x00000400) +#define MPI2_EVENT_SAS_DISC_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_EVENT_SAS_DISC_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_EVENT_SAS_DISC_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_EVENT_SAS_DISC_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_EVENT_SAS_DISC_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_EVENT_SAS_DISC_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_EVENT_SAS_DISC_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001) + +/*SAS Broadcast Primitive Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE { + U8 PhyNum; /*0x00 */ + U8 Port; /*0x01 */ + U8 PortWidth; /*0x02 */ + U8 Primitive; /*0x03 */ +} MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, + *PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, + Mpi2EventDataSasBroadcastPrimitive_t, + *pMpi2EventDataSasBroadcastPrimitive_t; + +/*defines for the Primitive field */ +#define MPI2_EVENT_PRIMITIVE_CHANGE (0x01) +#define MPI2_EVENT_PRIMITIVE_SES (0x02) +#define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03) +#define MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04) +#define MPI2_EVENT_PRIMITIVE_RESERVED3 (0x05) +#define MPI2_EVENT_PRIMITIVE_RESERVED4 (0x06) +#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07) +#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08) + +/*SAS Notify Primitive Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE { + U8 PhyNum; /*0x00 */ + U8 Port; /*0x01 */ + U8 Reserved1; /*0x02 */ + U8 Primitive; /*0x03 */ +} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, + *PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, + Mpi2EventDataSasNotifyPrimitive_t, + *pMpi2EventDataSasNotifyPrimitive_t; + +/*defines for the Primitive field */ +#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01) +#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02) +#define MPI2_EVENT_NOTIFY_RESERVED1 (0x03) +#define MPI2_EVENT_NOTIFY_RESERVED2 (0x04) + +/*SAS Initiator Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE { + U8 ReasonCode; /*0x00 */ + U8 PhysicalPort; /*0x01 */ + U16 DevHandle; /*0x02 */ + U64 SASAddress; /*0x04 */ +} MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, + *PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, + Mpi2EventDataSasInitDevStatusChange_t, + *pMpi2EventDataSasInitDevStatusChange_t; + +/*SAS Initiator Device Status Change event ReasonCode values */ +#define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01) +#define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02) + +/*SAS Initiator Device Table Overflow Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW { + U16 MaxInit; /*0x00 */ + U16 CurrentInit; /*0x02 */ + U64 SASAddress; /*0x04 */ +} MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, + *PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, + Mpi2EventDataSasInitTableOverflow_t, + *pMpi2EventDataSasInitTableOverflow_t; + +/*SAS Topology Change List Event data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumEntries at runtime. + */ +#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT +#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1) +#endif + +typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY { + U16 AttachedDevHandle; /*0x00 */ + U8 LinkRate; /*0x02 */ + U8 PhyStatus; /*0x03 */ +} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, *PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY, + Mpi2EventSasTopoPhyEntry_t, *pMpi2EventSasTopoPhyEntry_t; + +typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST { + U16 EnclosureHandle; /*0x00 */ + U16 ExpanderDevHandle; /*0x02 */ + U8 NumPhys; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U8 NumEntries; /*0x08 */ + U8 StartPhyNum; /*0x09 */ + U8 ExpStatus; /*0x0A */ + U8 PhysicalPort; /*0x0B */ + MPI2_EVENT_SAS_TOPO_PHY_ENTRY + PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /*0x0C */ +} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, + *PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, + Mpi2EventDataSasTopologyChangeList_t, + *pMpi2EventDataSasTopologyChangeList_t; + +/*values for the ExpStatus field */ +#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00) +#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01) +#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) +#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03) +#define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04) + +/*defines for the LinkRate field */ +#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0) +#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4) +#define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F) +#define MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT (0) + +#define MPI2_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00) +#define MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01) +#define MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02) +#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03) +#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04) +#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05) +#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A) +#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B) + +/*values for the PhyStatus field */ +#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80) +#define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10) +/*values for the PhyStatus ReasonCode sub-field */ +#define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F) +#define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01) +#define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02) +#define MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03) +#define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04) +#define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05) + +/*SAS Enclosure Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE { + U16 EnclosureHandle; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U64 EnclosureLogicalID; /*0x04 */ + U16 NumSlots; /*0x0C */ + U16 StartSlot; /*0x0E */ + U32 PhyBits; /*0x10 */ +} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, + *PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, + Mpi2EventDataSasEnclDevStatusChange_t, + *pMpi2EventDataSasEnclDevStatusChange_t; + +/*SAS Enclosure Device Status Change event ReasonCode values */ +#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01) +#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02) + +/*SAS PHY Counter Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U8 PhyEventCode; /*0x0C */ + U8 PhyNum; /*0x0D */ + U16 Reserved2; /*0x0E */ + U32 PhyEventInfo; /*0x10 */ + U8 CounterType; /*0x14 */ + U8 ThresholdWindow; /*0x15 */ + U8 TimeUnits; /*0x16 */ + U8 Reserved3; /*0x17 */ + U32 EventThreshold; /*0x18 */ + U16 ThresholdFlags; /*0x1C */ + U16 Reserved4; /*0x1E */ +} MPI2_EVENT_DATA_SAS_PHY_COUNTER, + *PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER, + Mpi2EventDataSasPhyCounter_t, + *pMpi2EventDataSasPhyCounter_t; + +/*use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h + *for the PhyEventCode field */ + +/*use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h + *for the CounterType field */ + +/*use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h + *for the TimeUnits field */ + +/*use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h + *for the ThresholdFlags field */ + +/*SAS Quiesce Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE { + U8 ReasonCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Reserved3; /*0x04 */ +} MPI2_EVENT_DATA_SAS_QUIESCE, + *PTR_MPI2_EVENT_DATA_SAS_QUIESCE, + Mpi2EventDataSasQuiesce_t, *pMpi2EventDataSasQuiesce_t; + +/*SAS Quiesce Event data ReasonCode values */ +#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01) +#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02) + +/*Host Based Discovery Phy Event data */ + +typedef struct _MPI2_EVENT_HBD_PHY_SAS { + U8 Flags; /*0x00 */ + U8 NegotiatedLinkRate; /*0x01 */ + U8 PhyNum; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U32 Reserved1; /*0x04 */ + U8 InitialFrame[28]; /*0x08 */ +} MPI2_EVENT_HBD_PHY_SAS, *PTR_MPI2_EVENT_HBD_PHY_SAS, + Mpi2EventHbdPhySas_t, *pMpi2EventHbdPhySas_t; + +/*values for the Flags field */ +#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02) +#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01) + +/*use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h + *for the NegotiatedLinkRate field */ + +typedef union _MPI2_EVENT_HBD_DESCRIPTOR { + MPI2_EVENT_HBD_PHY_SAS Sas; +} MPI2_EVENT_HBD_DESCRIPTOR, *PTR_MPI2_EVENT_HBD_DESCRIPTOR, + Mpi2EventHbdDescriptor_t, *pMpi2EventHbdDescriptor_t; + +typedef struct _MPI2_EVENT_DATA_HBD_PHY { + U8 DescriptorType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Reserved3; /*0x04 */ + MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /*0x08 */ +} MPI2_EVENT_DATA_HBD_PHY, *PTR_MPI2_EVENT_DATA_HBD_PHY, + Mpi2EventDataHbdPhy_t, + *pMpi2EventDataMpi2EventDataHbdPhy_t; + +/*values for the DescriptorType field */ +#define MPI2_EVENT_HBD_DT_SAS (0x01) + +/**************************************************************************** +* EventAck message +****************************************************************************/ + +/*EventAck Request message */ +typedef struct _MPI2_EVENT_ACK_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Event; /*0x0C */ + U16 Reserved5; /*0x0E */ + U32 EventContext; /*0x10 */ +} MPI2_EVENT_ACK_REQUEST, *PTR_MPI2_EVENT_ACK_REQUEST, + Mpi2EventAckRequest_t, *pMpi2EventAckRequest_t; + +/*EventAck Reply message */ +typedef struct _MPI2_EVENT_ACK_REPLY { + U16 Reserved1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_EVENT_ACK_REPLY, *PTR_MPI2_EVENT_ACK_REPLY, + Mpi2EventAckReply_t, *pMpi2EventAckReply_t; + +/**************************************************************************** +* SendHostMessage message +****************************************************************************/ + +/*SendHostMessage Request message */ +typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST { + U16 HostDataLength; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U8 Reserved4; /*0x0C */ + U8 DestVF_ID; /*0x0D */ + U16 Reserved5; /*0x0E */ + U32 Reserved6; /*0x10 */ + U32 Reserved7; /*0x14 */ + U32 Reserved8; /*0x18 */ + U32 Reserved9; /*0x1C */ + U32 Reserved10; /*0x20 */ + U32 HostData[1]; /*0x24 */ +} MPI2_SEND_HOST_MESSAGE_REQUEST, + *PTR_MPI2_SEND_HOST_MESSAGE_REQUEST, + Mpi2SendHostMessageRequest_t, + *pMpi2SendHostMessageRequest_t; + +/*SendHostMessage Reply message */ +typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY { + U16 HostDataLength; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_SEND_HOST_MESSAGE_REPLY, *PTR_MPI2_SEND_HOST_MESSAGE_REPLY, + Mpi2SendHostMessageReply_t, *pMpi2SendHostMessageReply_t; + +/**************************************************************************** +* FWDownload message +****************************************************************************/ + +/*MPI v2.0 FWDownload Request message */ +typedef struct _MPI2_FW_DOWNLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 TotalImageSize; /*0x0C */ + U32 Reserved5; /*0x10 */ + MPI2_MPI_SGE_UNION SGL; /*0x14 */ +} MPI2_FW_DOWNLOAD_REQUEST, *PTR_MPI2_FW_DOWNLOAD_REQUEST, + Mpi2FWDownloadRequest, *pMpi2FWDownloadRequest; + +#define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01) + +#define MPI2_FW_DOWNLOAD_ITYPE_FW (0x01) +#define MPI2_FW_DOWNLOAD_ITYPE_BIOS (0x02) +#define MPI2_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06) +#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07) +#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08) +#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) +#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) +#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) +#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) + +/*MPI v2.0 FWDownload TransactionContext Element */ +typedef struct _MPI2_FW_DOWNLOAD_TCSGE { + U8 Reserved1; /*0x00 */ + U8 ContextSize; /*0x01 */ + U8 DetailsLength; /*0x02 */ + U8 Flags; /*0x03 */ + U32 Reserved2; /*0x04 */ + U32 ImageOffset; /*0x08 */ + U32 ImageSize; /*0x0C */ +} MPI2_FW_DOWNLOAD_TCSGE, *PTR_MPI2_FW_DOWNLOAD_TCSGE, + Mpi2FWDownloadTCSGE_t, *pMpi2FWDownloadTCSGE_t; + +/*MPI v2.5 FWDownload Request message */ +typedef struct _MPI25_FW_DOWNLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 TotalImageSize; /*0x0C */ + U32 Reserved5; /*0x10 */ + U32 Reserved6; /*0x14 */ + U32 ImageOffset; /*0x18 */ + U32 ImageSize; /*0x1C */ + MPI25_SGE_IO_UNION SGL; /*0x20 */ +} MPI25_FW_DOWNLOAD_REQUEST, *PTR_MPI25_FW_DOWNLOAD_REQUEST, + Mpi25FWDownloadRequest, *pMpi25FWDownloadRequest; + +/*FWDownload Reply message */ +typedef struct _MPI2_FW_DOWNLOAD_REPLY { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_FW_DOWNLOAD_REPLY, *PTR_MPI2_FW_DOWNLOAD_REPLY, + Mpi2FWDownloadReply_t, *pMpi2FWDownloadReply_t; + +/**************************************************************************** +* FWUpload message +****************************************************************************/ + +/*MPI v2.0 FWUpload Request message */ +typedef struct _MPI2_FW_UPLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + MPI2_MPI_SGE_UNION SGL; /*0x14 */ +} MPI2_FW_UPLOAD_REQUEST, *PTR_MPI2_FW_UPLOAD_REQUEST, + Mpi2FWUploadRequest_t, *pMpi2FWUploadRequest_t; + +#define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00) +#define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01) +#define MPI2_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02) +#define MPI2_FW_UPLOAD_ITYPE_FW_BACKUP (0x05) +#define MPI2_FW_UPLOAD_ITYPE_MANUFACTURING (0x06) +#define MPI2_FW_UPLOAD_ITYPE_CONFIG_1 (0x07) +#define MPI2_FW_UPLOAD_ITYPE_CONFIG_2 (0x08) +#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09) +#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A) +#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) + +/*MPI v2.0 FWUpload TransactionContext Element */ +typedef struct _MPI2_FW_UPLOAD_TCSGE { + U8 Reserved1; /*0x00 */ + U8 ContextSize; /*0x01 */ + U8 DetailsLength; /*0x02 */ + U8 Flags; /*0x03 */ + U32 Reserved2; /*0x04 */ + U32 ImageOffset; /*0x08 */ + U32 ImageSize; /*0x0C */ +} MPI2_FW_UPLOAD_TCSGE, *PTR_MPI2_FW_UPLOAD_TCSGE, + Mpi2FWUploadTCSGE_t, *pMpi2FWUploadTCSGE_t; + +/*MPI v2.5 FWUpload Request message */ +typedef struct _MPI25_FW_UPLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + U32 Reserved7; /*0x14 */ + U32 ImageOffset; /*0x18 */ + U32 ImageSize; /*0x1C */ + MPI25_SGE_IO_UNION SGL; /*0x20 */ +} MPI25_FW_UPLOAD_REQUEST, *PTR_MPI25_FW_UPLOAD_REQUEST, + Mpi25FWUploadRequest_t, *pMpi25FWUploadRequest_t; + +/*FWUpload Reply message */ +typedef struct _MPI2_FW_UPLOAD_REPLY { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 ActualImageSize; /*0x14 */ +} MPI2_FW_UPLOAD_REPLY, *PTR_MPI2_FW_UPLOAD_REPLY, + Mpi2FWUploadReply_t, *pMPi2FWUploadReply_t; + +/*FW Image Header */ +typedef struct _MPI2_FW_IMAGE_HEADER { + U32 Signature; /*0x00 */ + U32 Signature0; /*0x04 */ + U32 Signature1; /*0x08 */ + U32 Signature2; /*0x0C */ + MPI2_VERSION_UNION MPIVersion; /*0x10 */ + MPI2_VERSION_UNION FWVersion; /*0x14 */ + MPI2_VERSION_UNION NVDATAVersion; /*0x18 */ + MPI2_VERSION_UNION PackageVersion; /*0x1C */ + U16 VendorID; /*0x20 */ + U16 ProductID; /*0x22 */ + U16 ProtocolFlags; /*0x24 */ + U16 Reserved26; /*0x26 */ + U32 IOCCapabilities; /*0x28 */ + U32 ImageSize; /*0x2C */ + U32 NextImageHeaderOffset; /*0x30 */ + U32 Checksum; /*0x34 */ + U32 Reserved38; /*0x38 */ + U32 Reserved3C; /*0x3C */ + U32 Reserved40; /*0x40 */ + U32 Reserved44; /*0x44 */ + U32 Reserved48; /*0x48 */ + U32 Reserved4C; /*0x4C */ + U32 Reserved50; /*0x50 */ + U32 Reserved54; /*0x54 */ + U32 Reserved58; /*0x58 */ + U32 Reserved5C; /*0x5C */ + U32 Reserved60; /*0x60 */ + U32 FirmwareVersionNameWhat; /*0x64 */ + U8 FirmwareVersionName[32]; /*0x68 */ + U32 VendorNameWhat; /*0x88 */ + U8 VendorName[32]; /*0x8C */ + U32 PackageNameWhat; /*0x88 */ + U8 PackageName[32]; /*0x8C */ + U32 ReservedD0; /*0xD0 */ + U32 ReservedD4; /*0xD4 */ + U32 ReservedD8; /*0xD8 */ + U32 ReservedDC; /*0xDC */ + U32 ReservedE0; /*0xE0 */ + U32 ReservedE4; /*0xE4 */ + U32 ReservedE8; /*0xE8 */ + U32 ReservedEC; /*0xEC */ + U32 ReservedF0; /*0xF0 */ + U32 ReservedF4; /*0xF4 */ + U32 ReservedF8; /*0xF8 */ + U32 ReservedFC; /*0xFC */ +} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER, + Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t; + +/*Signature field */ +#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00) +#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000) +#define MPI2_FW_HEADER_SIGNATURE (0xEA000000) + +/*Signature0 field */ +#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04) +#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A) + +/*Signature1 field */ +#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08) +#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5) + +/*Signature2 field */ +#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C) +#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA) + +/*defines for using the ProductID field */ +#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000) +#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000) + +#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00) +#define MPI2_FW_HEADER_PID_PROD_A (0x0000) +#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200) +#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700) + +#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) +/*SAS ProductID Family bits */ +#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013) +#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014) +#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021) + +/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ + +/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */ + +#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C) +#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30) +#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64) + +#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840) + +#define MPI2_FW_HEADER_SIZE (0x100) + +/*Extended Image Header */ +typedef struct _MPI2_EXT_IMAGE_HEADER { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Checksum; /*0x04 */ + U32 ImageSize; /*0x08 */ + U32 NextImageHeaderOffset; /*0x0C */ + U32 PackageVersion; /*0x10 */ + U32 Reserved3; /*0x14 */ + U32 Reserved4; /*0x18 */ + U32 Reserved5; /*0x1C */ + U8 IdentifyString[32]; /*0x20 */ +} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER, + Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t; + +/*useful offsets */ +#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00) +#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08) +#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C) + +#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40) + +/*defines for the ImageType field */ +#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) +#define MPI2_EXT_IMAGE_TYPE_FW (0x01) +#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03) +#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04) +#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05) +#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06) +#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) +#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) +#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF) + +#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) + +/*FLASH Layout Extended Image Data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check RegionsPerLayout at runtime. + */ +#ifndef MPI2_FLASH_NUMBER_OF_REGIONS +#define MPI2_FLASH_NUMBER_OF_REGIONS (1) +#endif + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumberOfLayouts at runtime. + */ +#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS +#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1) +#endif + +typedef struct _MPI2_FLASH_REGION { + U8 RegionType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 RegionOffset; /*0x04 */ + U32 RegionSize; /*0x08 */ + U32 Reserved3; /*0x0C */ +} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION, + Mpi2FlashRegion_t, *pMpi2FlashRegion_t; + +typedef struct _MPI2_FLASH_LAYOUT { + U32 FlashSize; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U32 Reserved3; /*0x0C */ + MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */ +} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT, + Mpi2FlashLayout_t, *pMpi2FlashLayout_t; + +typedef struct _MPI2_FLASH_LAYOUT_DATA { + U8 ImageRevision; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 SizeOfRegion; /*0x02 */ + U8 Reserved2; /*0x03 */ + U16 NumberOfLayouts; /*0x04 */ + U16 RegionsPerLayout; /*0x06 */ + U16 MinimumSectorAlignment; /*0x08 */ + U16 Reserved3; /*0x0A */ + U32 Reserved4; /*0x0C */ + MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */ +} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA, + Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t; + +/*defines for the RegionType field */ +#define MPI2_FLASH_REGION_UNUSED (0x00) +#define MPI2_FLASH_REGION_FIRMWARE (0x01) +#define MPI2_FLASH_REGION_BIOS (0x02) +#define MPI2_FLASH_REGION_NVDATA (0x03) +#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05) +#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06) +#define MPI2_FLASH_REGION_CONFIG_1 (0x07) +#define MPI2_FLASH_REGION_CONFIG_2 (0x08) +#define MPI2_FLASH_REGION_MEGARAID (0x09) +#define MPI2_FLASH_REGION_INIT (0x0A) + +/*ImageRevision */ +#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00) + +/*Supported Devices Extended Image Data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumberOfDevices at runtime. + */ +#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES +#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1) +#endif + +typedef struct _MPI2_SUPPORTED_DEVICE { + U16 DeviceID; /*0x00 */ + U16 VendorID; /*0x02 */ + U16 DeviceIDMask; /*0x04 */ + U16 Reserved1; /*0x06 */ + U8 LowPCIRev; /*0x08 */ + U8 HighPCIRev; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 Reserved3; /*0x0C */ +} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE, + Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t; + +typedef struct _MPI2_SUPPORTED_DEVICES_DATA { + U8 ImageRevision; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 NumberOfDevices; /*0x02 */ + U8 Reserved2; /*0x03 */ + U32 Reserved3; /*0x04 */ + MPI2_SUPPORTED_DEVICE + SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */ +} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA, + Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t; + +/*ImageRevision */ +#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00) + +/*Init Extended Image Data */ + +typedef struct _MPI2_INIT_IMAGE_FOOTER { + U32 BootFlags; /*0x00 */ + U32 ImageSize; /*0x04 */ + U32 Signature0; /*0x08 */ + U32 Signature1; /*0x0C */ + U32 Signature2; /*0x10 */ + U32 ResetVector; /*0x14 */ +} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER, + Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t; + +/*defines for the BootFlags field */ +#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00) + +/*defines for the ImageSize field */ +#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04) + +/*defines for the Signature0 field */ +#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08) +#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA) + +/*defines for the Signature1 field */ +#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C) +#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5) + +/*defines for the Signature2 field */ +#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10) +#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A) + +/*Signature fields as individual bytes */ +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A) + +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5) + +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A) + +/*defines for the ResetVector field */ +#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14) + +/**************************************************************************** +* PowerManagementControl message +****************************************************************************/ + +/*PowerManagementControl Request message */ +typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST { + U8 Feature; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 Parameter1; /*0x0C */ + U8 Parameter2; /*0x0D */ + U8 Parameter3; /*0x0E */ + U8 Parameter4; /*0x0F */ + U32 Reserved5; /*0x10 */ + U32 Reserved6; /*0x14 */ +} MPI2_PWR_MGMT_CONTROL_REQUEST, *PTR_MPI2_PWR_MGMT_CONTROL_REQUEST, + Mpi2PwrMgmtControlRequest_t, *pMpi2PwrMgmtControlRequest_t; + +/*defines for the Feature field */ +#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01) +#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02) +#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /*obsolete */ +#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04) +#define MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE (0x05) +#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF) + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */ +/*Parameter1 contains a PHY number */ +/*Parameter2 indicates power condition action using these defines */ +#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01) +#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02) +#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03) +/*Parameter3 and Parameter4 are reserved */ + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION + * Feature */ +/*Parameter1 contains SAS port width modulation group number */ +/*Parameter2 indicates IOC action using these defines */ +#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01) +#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02) +#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03) +/*Parameter3 indicates desired modulation level using these defines */ +#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00) +#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01) +#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02) +#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03) +/*Parameter4 is reserved */ + +/*this next set (_PCIE_LINK) is obsolete */ +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */ +/*Parameter1 indicates desired PCIe link speed using these defines */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /*obsolete */ +/*Parameter2 indicates desired PCIe link width using these defines */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /*obsolete */ +/*Parameter3 and Parameter4 are reserved */ + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */ +/*Parameter1 indicates desired IOC hardware clock speed using these defines */ +#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01) +#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02) +#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04) +#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08) +/*Parameter2, Parameter3, and Parameter4 are reserved */ + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE Feature*/ +/*Parameter1 indicates host action regarding global power management mode */ +#define MPI2_PM_CONTROL_PARAM1_TAKE_CONTROL (0x01) +#define MPI2_PM_CONTROL_PARAM1_CHANGE_GLOBAL_MODE (0x02) +#define MPI2_PM_CONTROL_PARAM1_RELEASE_CONTROL (0x03) +/*Parameter2 indicates the requested global power management mode */ +#define MPI2_PM_CONTROL_PARAM2_FULL_PWR_PERF (0x01) +#define MPI2_PM_CONTROL_PARAM2_REDUCED_PWR_PERF (0x08) +#define MPI2_PM_CONTROL_PARAM2_STANDBY (0x40) +/*Parameter3 and Parameter4 are reserved */ + +/*PowerManagementControl Reply message */ +typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY { + U8 Feature; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_PWR_MGMT_CONTROL_REPLY, *PTR_MPI2_PWR_MGMT_CONTROL_REPLY, + Mpi2PwrMgmtControlReply_t, *pMpi2PwrMgmtControlReply_t; + +#endif diff --git a/addons/mpt3sas/src/3.10.108/mpi/mpi2_raid.h b/addons/mpt3sas/src/3.10.108/mpi/mpi2_raid.h new file mode 100644 index 00000000..d1d9866c --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpi/mpi2_raid.h @@ -0,0 +1,346 @@ +/* + * Copyright (c) 2000-2012 LSI Corporation. + * + * + * Name: mpi2_raid.h + * Title: MPI Integrated RAID messages and structures + * Creation Date: April 26, 2007 + * + * mpi2_raid.h Version: 02.00.08 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 08-31-07 02.00.01 Modifications to RAID Action request and reply, + * including the Actions and ActionData. + * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD. + * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that + * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT + * can be sized by the build environment. + * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of + * VolumeCreationFlags and marked the old one as obsolete. + * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. + * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with + * related structures and defines. + * Added product-specific range to RAID Action values. + * 11-18-11 02.00.07 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_RAID_H +#define MPI2_RAID_H + +/***************************************************************************** +* +* Integrated RAID Messages +* +*****************************************************************************/ + +/**************************************************************************** +* RAID Action messages +****************************************************************************/ + +/*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */ +#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000) +#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001) + +/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for + *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ + +/*ActionDataWord defines for use with + *MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */ +#define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001) + +/*ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */ +typedef struct _MPI2_RAID_ACTION_RATE_DATA { + U8 RateToChange; /*0x00 */ + U8 RateOrMode; /*0x01 */ + U16 DataScrubDuration; /*0x02 */ +} MPI2_RAID_ACTION_RATE_DATA, *PTR_MPI2_RAID_ACTION_RATE_DATA, + Mpi2RaidActionRateData_t, *pMpi2RaidActionRateData_t; + +#define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00) +#define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01) +#define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02) + +/*ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */ +typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION { + U8 RAIDFunction; /*0x00 */ + U8 Flags; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_RAID_ACTION_START_RAID_FUNCTION, + *PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION, + Mpi2RaidActionStartRaidFunction_t, + *pMpi2RaidActionStartRaidFunction_t; + +/*defines for the RAIDFunction field */ +#define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00) +#define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02) + +/*defines for the Flags field */ +#define MPI2_RAID_ACTION_START_NEW (0x00) +#define MPI2_RAID_ACTION_START_RESUME (0x01) + +/*ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */ +typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION { + U8 RAIDFunction; /*0x00 */ + U8 Flags; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_RAID_ACTION_STOP_RAID_FUNCTION, + *PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION, + Mpi2RaidActionStopRaidFunction_t, + *pMpi2RaidActionStopRaidFunction_t; + +/*defines for the RAIDFunction field */ +#define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00) +#define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02) + +/*defines for the Flags field */ +#define MPI2_RAID_ACTION_STOP_ABORT (0x00) +#define MPI2_RAID_ACTION_STOP_PAUSE (0x01) + +/*ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */ +typedef struct _MPI2_RAID_ACTION_HOT_SPARE { + U8 HotSparePool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 DevHandle; /*0x02 */ +} MPI2_RAID_ACTION_HOT_SPARE, *PTR_MPI2_RAID_ACTION_HOT_SPARE, + Mpi2RaidActionHotSpare_t, *pMpi2RaidActionHotSpare_t; + +/*ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */ +typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE { + U8 Flags; /*0x00 */ + U8 DeviceFirmwareUpdateModeTimeout; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_RAID_ACTION_FW_UPDATE_MODE, + *PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE, + Mpi2RaidActionFwUpdateMode_t, + *pMpi2RaidActionFwUpdateMode_t; + +/*ActionDataWord defines for use with + *MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */ +#define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00) +#define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01) + +typedef union _MPI2_RAID_ACTION_DATA { + U32 Word; + MPI2_RAID_ACTION_RATE_DATA Rates; + MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction; + MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction; + MPI2_RAID_ACTION_HOT_SPARE HotSpare; + MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode; +} MPI2_RAID_ACTION_DATA, *PTR_MPI2_RAID_ACTION_DATA, + Mpi2RaidActionData_t, *pMpi2RaidActionData_t; + +/*RAID Action Request Message */ +typedef struct _MPI2_RAID_ACTION_REQUEST { + U8 Action; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 VolDevHandle; /*0x04 */ + U8 PhysDiskNum; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 Reserved3; /*0x0C */ + MPI2_RAID_ACTION_DATA ActionDataWord; /*0x10 */ + MPI2_SGE_SIMPLE_UNION ActionDataSGE; /*0x14 */ +} MPI2_RAID_ACTION_REQUEST, *PTR_MPI2_RAID_ACTION_REQUEST, + Mpi2RaidActionRequest_t, *pMpi2RaidActionRequest_t; + +/*RAID Action request Action values */ + +#define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01) +#define MPI2_RAID_ACTION_CREATE_VOLUME (0x02) +#define MPI2_RAID_ACTION_DELETE_VOLUME (0x03) +#define MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES (0x04) +#define MPI2_RAID_ACTION_ENABLE_ALL_VOLUMES (0x05) +#define MPI2_RAID_ACTION_PHYSDISK_OFFLINE (0x0A) +#define MPI2_RAID_ACTION_PHYSDISK_ONLINE (0x0B) +#define MPI2_RAID_ACTION_FAIL_PHYSDISK (0x0F) +#define MPI2_RAID_ACTION_ACTIVATE_VOLUME (0x11) +#define MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15) +#define MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE (0x17) +#define MPI2_RAID_ACTION_SET_VOLUME_NAME (0x18) +#define MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE (0x19) +#define MPI2_RAID_ACTION_ENABLE_FAILED_VOLUME (0x1C) +#define MPI2_RAID_ACTION_CREATE_HOT_SPARE (0x1D) +#define MPI2_RAID_ACTION_DELETE_HOT_SPARE (0x1E) +#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20) +#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21) +#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22) +#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23) +#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24) +#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF) + +/*RAID Volume Creation Structure */ + +/* + *The following define can be customized for the targeted product. + */ +#ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS +#define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1) +#endif + +typedef struct _MPI2_RAID_VOLUME_PHYSDISK { + U8 RAIDSetNum; /*0x00 */ + U8 PhysDiskMap; /*0x01 */ + U16 PhysDiskDevHandle; /*0x02 */ +} MPI2_RAID_VOLUME_PHYSDISK, *PTR_MPI2_RAID_VOLUME_PHYSDISK, + Mpi2RaidVolumePhysDisk_t, *pMpi2RaidVolumePhysDisk_t; + +/*defines for the PhysDiskMap field */ +#define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01) +#define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02) + +typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT { + U8 NumPhysDisks; /*0x00 */ + U8 VolumeType; /*0x01 */ + U16 Reserved1; /*0x02 */ + U32 VolumeCreationFlags; /*0x04 */ + U32 VolumeSettings; /*0x08 */ + U8 Reserved2; /*0x0C */ + U8 ResyncRate; /*0x0D */ + U16 DataScrubDuration; /*0x0E */ + U64 VolumeMaxLBA; /*0x10 */ + U32 StripeSize; /*0x18 */ + U8 Name[16]; /*0x1C */ + MPI2_RAID_VOLUME_PHYSDISK + PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS]; /*0x2C */ +} MPI2_RAID_VOLUME_CREATION_STRUCT, + *PTR_MPI2_RAID_VOLUME_CREATION_STRUCT, + Mpi2RaidVolumeCreationStruct_t, + *pMpi2RaidVolumeCreationStruct_t; + +/*use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */ + +/*defines for the VolumeCreationFlags field */ +#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000) +#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004) +#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002) +#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001) +/*The following is an obsolete define. + *It must be shifted left 24 bits in order to set the proper bit. + */ +#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80) + +/*RAID Online Capacity Expansion Structure */ + +typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION { + U32 Flags; /*0x00 */ + U16 DevHandle0; /*0x04 */ + U16 Reserved1; /*0x06 */ + U16 DevHandle1; /*0x08 */ + U16 Reserved2; /*0x0A */ +} MPI2_RAID_ONLINE_CAPACITY_EXPANSION, + *PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION, + Mpi2RaidOnlineCapacityExpansion_t, + *pMpi2RaidOnlineCapacityExpansion_t; + +/*RAID Compatibility Input Structure */ + +typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT { + U16 SourceDevHandle; /*0x00 */ + U16 CandidateDevHandle; /*0x02 */ + U32 Flags; /*0x04 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ +} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, + *PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, + Mpi2RaidCompatibilityInputStruct_t, + *pMpi2RaidCompatibilityInputStruct_t; + +/*defines for RAID Compatibility Structure Flags field */ +#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002) +#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001) + +/*RAID Volume Indicator Structure */ + +typedef struct _MPI2_RAID_VOL_INDICATOR { + U64 TotalBlocks; /*0x00 */ + U64 BlocksRemaining; /*0x08 */ + U32 Flags; /*0x10 */ +} MPI2_RAID_VOL_INDICATOR, *PTR_MPI2_RAID_VOL_INDICATOR, + Mpi2RaidVolIndicator_t, *pMpi2RaidVolIndicator_t; + +/*defines for RAID Volume Indicator Flags field */ +#define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F) +#define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000) +#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001) +#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002) +#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003) +#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004) + +/*RAID Compatibility Result Structure */ + +typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT { + U8 State; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 GenericAttributes; /*0x04 */ + U32 OEMSpecificAttributes; /*0x08 */ + U32 Reserved3; /*0x0C */ + U32 Reserved4; /*0x10 */ +} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, + *PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, + Mpi2RaidCompatibilityResultStruct_t, + *pMpi2RaidCompatibilityResultStruct_t; + +/*defines for RAID Compatibility Result Structure State field */ +#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00) +#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01) + +/*defines for RAID Compatibility Result Structure GenericAttributes field */ +#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010) + +#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C) +#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008) +#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004) + +#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003) +#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002) +#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001) + +/*RAID Action Reply ActionData union */ +typedef union _MPI2_RAID_ACTION_REPLY_DATA { + U32 Word[5]; + MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator; + U16 VolDevHandle; + U8 VolumeState; + U8 PhysDiskNum; + MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult; +} MPI2_RAID_ACTION_REPLY_DATA, *PTR_MPI2_RAID_ACTION_REPLY_DATA, + Mpi2RaidActionReplyData_t, *pMpi2RaidActionReplyData_t; + +/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for + *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ + +/*RAID Action Reply Message */ +typedef struct _MPI2_RAID_ACTION_REPLY { + U8 Action; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 VolDevHandle; /*0x04 */ + U8 PhysDiskNum; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + MPI2_RAID_ACTION_REPLY_DATA ActionData; /*0x14 */ +} MPI2_RAID_ACTION_REPLY, *PTR_MPI2_RAID_ACTION_REPLY, + Mpi2RaidActionReply_t, *pMpi2RaidActionReply_t; + +#endif diff --git a/addons/mpt3sas/src/3.10.108/mpi/mpi2_sas.h b/addons/mpt3sas/src/3.10.108/mpi/mpi2_sas.h new file mode 100644 index 00000000..b4e7084a --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpi/mpi2_sas.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2000-2012 LSI Corporation. + * + * + * Name: mpi2_sas.h + * Title: MPI Serial Attached SCSI structures and definitions + * Creation Date: February 9, 2007 + * + * mpi2_sas.h Version: 02.00.07 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit + * Control Request. + * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control + * Request. + * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST + * to MPI2_SGE_IO_UNION since it supports chained SGLs. + * 05-12-10 02.00.04 Modified some comments. + * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control. + * 11-18-11 02.00.06 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA + * Passthrough Request message. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_SAS_H +#define MPI2_SAS_H + +/* + *Values for SASStatus. + */ +#define MPI2_SASSTATUS_SUCCESS (0x00) +#define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01) +#define MPI2_SASSTATUS_INVALID_FRAME (0x02) +#define MPI2_SASSTATUS_UTC_BAD_DEST (0x03) +#define MPI2_SASSTATUS_UTC_BREAK_RECEIVED (0x04) +#define MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05) +#define MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06) +#define MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07) +#define MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08) +#define MPI2_SASSTATUS_UTC_WRONG_DESTINATION (0x09) +#define MPI2_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A) +#define MPI2_SASSTATUS_LONG_INFORMATION_UNIT (0x0B) +#define MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C) +#define MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D) +#define MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E) +#define MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F) +#define MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10) +#define MPI2_SASSTATUS_DATA_OFFSET_ERROR (0x11) +#define MPI2_SASSTATUS_SDSF_NAK_RECEIVED (0x12) +#define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13) +#define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14) + +/* + *Values for the SAS DeviceInfo field used in SAS Device Status Change Event + *data and SAS Configuration pages. + */ +#define MPI2_SAS_DEVICE_INFO_SEP (0x00004000) +#define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000) +#define MPI2_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000) +#define MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800) +#define MPI2_SAS_DEVICE_INFO_SSP_TARGET (0x00000400) +#define MPI2_SAS_DEVICE_INFO_STP_TARGET (0x00000200) +#define MPI2_SAS_DEVICE_INFO_SMP_TARGET (0x00000100) +#define MPI2_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080) +#define MPI2_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040) +#define MPI2_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020) +#define MPI2_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010) +#define MPI2_SAS_DEVICE_INFO_SATA_HOST (0x00000008) + +#define MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007) +#define MPI2_SAS_DEVICE_INFO_NO_DEVICE (0x00000000) +#define MPI2_SAS_DEVICE_INFO_END_DEVICE (0x00000001) +#define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002) +#define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) + +/***************************************************************************** +* +* SAS Messages +* +*****************************************************************************/ + +/**************************************************************************** +* SMP Passthrough messages +****************************************************************************/ + +/*SMP Passthrough Request Message */ +typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST { + U8 PassthroughFlags; /*0x00 */ + U8 PhysicalPort; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 RequestDataLength; /*0x04 */ + U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Reserved2; /*0x0C */ + U64 SASAddress; /*0x10 */ + U32 Reserved3; /*0x18 */ + U32 Reserved4; /*0x1C */ + MPI2_SIMPLE_SGE_UNION SGL;/*0x20 */ +} MPI2_SMP_PASSTHROUGH_REQUEST, *PTR_MPI2_SMP_PASSTHROUGH_REQUEST, + Mpi2SmpPassthroughRequest_t, *pMpi2SmpPassthroughRequest_t; + +/*values for PassthroughFlags field */ +#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80) + +/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*SMP Passthrough Reply Message */ +typedef struct _MPI2_SMP_PASSTHROUGH_REPLY { + U8 PassthroughFlags; /*0x00 */ + U8 PhysicalPort; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 ResponseDataLength; /*0x04 */ + U8 SGLFlags; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U8 Reserved2; /*0x0C */ + U8 SASStatus; /*0x0D */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 Reserved3; /*0x14 */ + U8 ResponseData[4]; /*0x18 */ +} MPI2_SMP_PASSTHROUGH_REPLY, *PTR_MPI2_SMP_PASSTHROUGH_REPLY, + Mpi2SmpPassthroughReply_t, *pMpi2SmpPassthroughReply_t; + +/*values for PassthroughFlags field */ +#define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80) + +/*values for SASStatus field are at the top of this file */ + +/**************************************************************************** +* SATA Passthrough messages +****************************************************************************/ + +typedef union _MPI2_SATA_PT_SGE_UNION { + MPI2_SGE_SIMPLE_UNION MpiSimple; /*MPI v2.0 only */ + MPI2_SGE_CHAIN_UNION MpiChain; /*MPI v2.0 only */ + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; /*MPI v2.0 only */ + MPI25_IEEE_SGE_CHAIN64 IeeeChain64; /*MPI v2.5 only */ +} MPI2_SATA_PT_SGE_UNION, *PTR_MPI2_SATA_PT_SGE_UNION, + Mpi2SataPTSGEUnion_t, *pMpi2SataPTSGEUnion_t; + +/*SATA Passthrough Request Message */ +typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 PassthroughFlags; /*0x04 */ + U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Reserved2; /*0x0C */ + U32 Reserved3; /*0x10 */ + U32 Reserved4; /*0x14 */ + U32 DataLength; /*0x18 */ + U8 CommandFIS[20]; /*0x1C */ + MPI2_SATA_PT_SGE_UNION SGL;/*0x30*//*MPI v2.5: IEEE 64 elements only*/ +} MPI2_SATA_PASSTHROUGH_REQUEST, *PTR_MPI2_SATA_PASSTHROUGH_REQUEST, + Mpi2SataPassthroughRequest_t, + *pMpi2SataPassthroughRequest_t; + +/*values for PassthroughFlags field */ +#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100) +#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020) +#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010) +#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004) +#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002) +#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001) + +/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*SATA Passthrough Reply Message */ +typedef struct _MPI2_SATA_PASSTHROUGH_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 PassthroughFlags; /*0x04 */ + U8 SGLFlags; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U8 Reserved2; /*0x0C */ + U8 SASStatus; /*0x0D */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 StatusFIS[20]; /*0x14 */ + U32 StatusControlRegisters; /*0x28 */ + U32 TransferCount; /*0x2C */ +} MPI2_SATA_PASSTHROUGH_REPLY, *PTR_MPI2_SATA_PASSTHROUGH_REPLY, + Mpi2SataPassthroughReply_t, *pMpi2SataPassthroughReply_t; + +/*values for SASStatus field are at the top of this file */ + +/**************************************************************************** +* SAS IO Unit Control messages +****************************************************************************/ + +/*SAS IO Unit Control Request Message */ +typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST { + U8 Operation; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 DevHandle; /*0x04 */ + U8 IOCParameter; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U8 PhyNum; /*0x0E */ + U8 PrimFlags; /*0x0F */ + U32 Primitive; /*0x10 */ + U8 LookupMethod; /*0x14 */ + U8 Reserved5; /*0x15 */ + U16 SlotNumber; /*0x16 */ + U64 LookupAddress; /*0x18 */ + U32 IOCParameterValue; /*0x20 */ + U32 Reserved7; /*0x24 */ + U32 Reserved8; /*0x28 */ +} MPI2_SAS_IOUNIT_CONTROL_REQUEST, + *PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST, + Mpi2SasIoUnitControlRequest_t, + *pMpi2SasIoUnitControlRequest_t; + +/*values for the Operation field */ +#define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02) +#define MPI2_SAS_OP_PHY_LINK_RESET (0x06) +#define MPI2_SAS_OP_PHY_HARD_RESET (0x07) +#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08) +#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A) +#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B) +#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) +#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D) +#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E) +#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F) +#define MPI25_SAS_OP_ENABLE_FP_DEVICE (0x10) +#define MPI25_SAS_OP_DISABLE_FP_DEVICE (0x11) +#define MPI25_SAS_OP_ENABLE_FP_ALL (0x12) +#define MPI25_SAS_OP_DISABLE_FP_ALL (0x13) +#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14) +#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15) +#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80) + +/*values for the PrimFlags field */ +#define MPI2_SAS_PRIMFLAGS_SINGLE (0x08) +#define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02) +#define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01) + +/*values for the LookupMethod field */ +#define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01) +#define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02) +#define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) + +/*SAS IO Unit Control Reply Message */ +typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY { + U8 Operation; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 DevHandle; /*0x04 */ + U8 IOCParameter; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_SAS_IOUNIT_CONTROL_REPLY, + *PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY, + Mpi2SasIoUnitControlReply_t, *pMpi2SasIoUnitControlReply_t; + +#endif diff --git a/addons/mpt3sas/src/3.10.108/mpi/mpi2_tool.h b/addons/mpt3sas/src/3.10.108/mpi/mpi2_tool.h new file mode 100644 index 00000000..71453d11 --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpi/mpi2_tool.h @@ -0,0 +1,437 @@ +/* + * Copyright (c) 2000-2012 LSI Corporation. + * + * + * Name: mpi2_tool.h + * Title: MPI diagnostic tool structures and definitions + * Creation Date: March 26, 2007 + * + * mpi2_tool.h Version: 02.00.09 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release + * structures and defines. + * 02-29-08 02.00.02 Modified various names to make them 32-character unique. + * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. + * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request + * and reply messages. + * Added MPI2_DIAG_BUF_TYPE_EXTENDED. + * Incremented MPI2_DIAG_BUF_TYPE_COUNT. + * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. + * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer + * Post Request. + * 05-25-11 02.00.07 Added Flags field and related defines to + * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. + * 11-18-11 02.00.08 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request + * message. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_TOOL_H +#define MPI2_TOOL_H + +/***************************************************************************** +* +* Toolbox Messages +* +*****************************************************************************/ + +/*defines for the Tools */ +#define MPI2_TOOLBOX_CLEAN_TOOL (0x00) +#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01) +#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02) +#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) +#define MPI2_TOOLBOX_BEACON_TOOL (0x05) +#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) + +/**************************************************************************** +* Toolbox reply +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_TOOLBOX_REPLY, *PTR_MPI2_TOOLBOX_REPLY, + Mpi2ToolboxReply_t, *pMpi2ToolboxReply_t; + +/**************************************************************************** +* Toolbox Clean Tool request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Flags; /*0x0C */ +} MPI2_TOOLBOX_CLEAN_REQUEST, *PTR_MPI2_TOOLBOX_CLEAN_REQUEST, + Mpi2ToolboxCleanRequest_t, *pMpi2ToolboxCleanRequest_t; + +/*values for the Flags field */ +#define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000) +#define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000) +#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) +#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000) +#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000) +#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000) +#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000) +#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004) +#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002) +#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001) + +/**************************************************************************** +* Toolbox Memory Move request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + MPI2_SGE_SIMPLE_UNION SGL; /*0x0C */ +} MPI2_TOOLBOX_MEM_MOVE_REQUEST, *PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST, + Mpi2ToolboxMemMoveRequest_t, *pMpi2ToolboxMemMoveRequest_t; + +/**************************************************************************** +* Toolbox Diagnostic Data Upload request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 SGLFlags; /*0x0C */ + U8 Reserved5; /*0x0D */ + U16 Reserved6; /*0x0E */ + U32 Flags; /*0x10 */ + U32 DataLength; /*0x14 */ + MPI2_SGE_SIMPLE_UNION SGL; /*0x18 */ +} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, + *PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, + Mpi2ToolboxDiagDataUploadRequest_t, + *pMpi2ToolboxDiagDataUploadRequest_t; + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER { + U32 DiagDataLength; /*00h */ + U8 FormatCode; /*04h */ + U8 Reserved1; /*05h */ + U16 Reserved2; /*06h */ +} MPI2_DIAG_DATA_UPLOAD_HEADER, *PTR_MPI2_DIAG_DATA_UPLOAD_HEADER, + Mpi2DiagDataUploadHeader_t, *pMpi2DiagDataUploadHeader_t; + +/**************************************************************************** +* Toolbox ISTWI Read Write Tool +****************************************************************************/ + +/*Toolbox ISTWI Read Write Tool request message */ +typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + U8 DevIndex; /*0x14 */ + U8 Action; /*0x15 */ + U8 SGLFlags; /*0x16 */ + U8 Flags; /*0x17 */ + U16 TxDataLength; /*0x18 */ + U16 RxDataLength; /*0x1A */ + U32 Reserved8; /*0x1C */ + U32 Reserved9; /*0x20 */ + U32 Reserved10; /*0x24 */ + U32 Reserved11; /*0x28 */ + U32 Reserved12; /*0x2C */ + MPI2_SGE_SIMPLE_UNION SGL; /*0x30 */ +} MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, + *PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, + Mpi2ToolboxIstwiReadWriteRequest_t, + *pMpi2ToolboxIstwiReadWriteRequest_t; + +/*values for the Action field */ +#define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01) +#define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02) +#define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03) +#define MPI2_TOOL_ISTWI_ACTION_RESERVE_BUS (0x10) +#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11) +#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12) + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*values for the Flags field */ +#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80) +#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07) + +/*Toolbox ISTWI Read Write Tool reply message */ +typedef struct _MPI2_TOOLBOX_ISTWI_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 DevIndex; /*0x14 */ + U8 Action; /*0x15 */ + U8 IstwiStatus; /*0x16 */ + U8 Reserved6; /*0x17 */ + U16 TxDataCount; /*0x18 */ + U16 RxDataCount; /*0x1A */ +} MPI2_TOOLBOX_ISTWI_REPLY, *PTR_MPI2_TOOLBOX_ISTWI_REPLY, + Mpi2ToolboxIstwiReply_t, *pMpi2ToolboxIstwiReply_t; + +/**************************************************************************** +* Toolbox Beacon Tool request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_BEACON_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 Reserved5; /*0x0C */ + U8 PhysicalPort; /*0x0D */ + U8 Reserved6; /*0x0E */ + U8 Flags; /*0x0F */ +} MPI2_TOOLBOX_BEACON_REQUEST, *PTR_MPI2_TOOLBOX_BEACON_REQUEST, + Mpi2ToolboxBeaconRequest_t, *pMpi2ToolboxBeaconRequest_t; + +/*values for the Flags field */ +#define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00) +#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01) + +/**************************************************************************** +* Toolbox Diagnostic CLI Tool +****************************************************************************/ + +#define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C) + +/*MPI v2.0 Toolbox Diagnostic CLI Tool request message */ +typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 SGLFlags; /*0x0C */ + U8 Reserved5; /*0x0D */ + U16 Reserved6; /*0x0E */ + U32 DataLength; /*0x10 */ + U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */ + MPI2_SGE_SIMPLE_UNION SGL; /*0x70 */ +} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + *PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + Mpi2ToolboxDiagnosticCliRequest_t, + *pMpi2ToolboxDiagnosticCliRequest_t; + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*MPI v2.5 Toolbox Diagnostic CLI Tool request message */ +typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 DataLength; /*0x10 */ + U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */ + MPI25_SGE_IO_UNION SGL; /*0x70 */ +} MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + *PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + Mpi25ToolboxDiagnosticCliRequest_t, + *pMpi25ToolboxDiagnosticCliRequest_t; + +/*Toolbox Diagnostic CLI Tool reply message */ +typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 ReturnedDataLength; /*0x14 */ +} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY, + *PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY, + Mpi2ToolboxDiagnosticCliReply_t, + *pMpi2ToolboxDiagnosticCliReply_t; + +/***************************************************************************** +* +* Diagnostic Buffer Messages +* +*****************************************************************************/ + +/**************************************************************************** +* Diagnostic Buffer Post request +****************************************************************************/ + +typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST { + U8 ExtendedType; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U64 BufferAddress; /*0x0C */ + U32 BufferLength; /*0x14 */ + U32 Reserved5; /*0x18 */ + U32 Reserved6; /*0x1C */ + U32 Flags; /*0x20 */ + U32 ProductSpecific[23]; /*0x24 */ +} MPI2_DIAG_BUFFER_POST_REQUEST, *PTR_MPI2_DIAG_BUFFER_POST_REQUEST, + Mpi2DiagBufferPostRequest_t, *pMpi2DiagBufferPostRequest_t; + +/*values for the ExtendedType field */ +#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02) + +/*values for the BufferType field */ +#define MPI2_DIAG_BUF_TYPE_TRACE (0x00) +#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01) +#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02) +/*count of the number of buffer types */ +#define MPI2_DIAG_BUF_TYPE_COUNT (0x03) + +/*values for the Flags field */ +#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002) +#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001) + +/**************************************************************************** +* Diagnostic Buffer Post reply +****************************************************************************/ + +typedef struct _MPI2_DIAG_BUFFER_POST_REPLY { + U8 ExtendedType; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 TransferLength; /*0x14 */ +} MPI2_DIAG_BUFFER_POST_REPLY, *PTR_MPI2_DIAG_BUFFER_POST_REPLY, + Mpi2DiagBufferPostReply_t, *pMpi2DiagBufferPostReply_t; + +/**************************************************************************** +* Diagnostic Release request +****************************************************************************/ + +typedef struct _MPI2_DIAG_RELEASE_REQUEST { + U8 Reserved1; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ +} MPI2_DIAG_RELEASE_REQUEST, *PTR_MPI2_DIAG_RELEASE_REQUEST, + Mpi2DiagReleaseRequest_t, *pMpi2DiagReleaseRequest_t; + +/**************************************************************************** +* Diagnostic Buffer Post reply +****************************************************************************/ + +typedef struct _MPI2_DIAG_RELEASE_REPLY { + U8 Reserved1; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_DIAG_RELEASE_REPLY, *PTR_MPI2_DIAG_RELEASE_REPLY, + Mpi2DiagReleaseReply_t, *pMpi2DiagReleaseReply_t; + +#endif diff --git a/addons/mpt3sas/src/3.10.108/mpi/mpi2_type.h b/addons/mpt3sas/src/3.10.108/mpi/mpi2_type.h new file mode 100644 index 00000000..516f9595 --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpi/mpi2_type.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2000-2007 LSI Corporation. + * + * + * Name: mpi2_type.h + * Title: MPI basic type definitions + * Creation Date: August 16, 2006 + * + * mpi2_type.h Version: 02.00.00 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_TYPE_H +#define MPI2_TYPE_H + +/******************************************************************************* + * Define * if it hasn't already been defined. By default + * * is defined to be a near pointer. MPI2_POINTER can be defined as + * a far pointer by defining * as "far *" before this header file is + * included. + */ + +/* the basic types may have already been included by mpi_type.h */ +#ifndef MPI_TYPE_H +/***************************************************************************** +* +* Basic Types +* +*****************************************************************************/ + +typedef u8 U8; +typedef __le16 U16; +typedef __le32 U32; +typedef __le64 U64 __attribute__ ((aligned(4))); + +/***************************************************************************** +* +* Pointer Types +* +*****************************************************************************/ + +typedef U8 *PU8; +typedef U16 *PU16; +typedef U32 *PU32; +typedef U64 *PU64; + +#endif + +#endif diff --git a/addons/mpt3sas/src/3.10.108/mpt3sas_base.c b/addons/mpt3sas/src/3.10.108/mpt3sas_base.c new file mode 100644 index 00000000..18360032 --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpt3sas_base.c @@ -0,0 +1,4837 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c + * Copyright (C) 2012 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "mpt3sas_base.h" + +static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; + + +#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ + + /* maximum controller queue depth */ +#define MAX_HBA_QUEUE_DEPTH 30000 +#define MAX_CHAIN_DEPTH 100000 +static int max_queue_depth = -1; +module_param(max_queue_depth, int, 0); +MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); + +static int max_sgl_entries = -1; +module_param(max_sgl_entries, int, 0); +MODULE_PARM_DESC(max_sgl_entries, " max sg entries "); + +static int msix_disable = -1; +module_param(msix_disable, int, 0); +MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); + + +static int mpt3sas_fwfault_debug; +MODULE_PARM_DESC(mpt3sas_fwfault_debug, + " enable detection of firmware fault and halt firmware - (default=0)"); + + +/** + * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. + * + */ +static int +_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct MPT3SAS_ADAPTER *ioc; + + if (ret) + return ret; + + pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) + ioc->fwfault_debug = mpt3sas_fwfault_debug; + return 0; +} +module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug, + param_get_int, &mpt3sas_fwfault_debug, 0644); + +/** + * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc + * @arg: input argument, used to derive ioc + * + * Return 0 if controller is removed from pci subsystem. + * Return -1 for other case. + */ +static int mpt3sas_remove_dead_ioc_func(void *arg) +{ + struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; + struct pci_dev *pdev; + + if ((ioc == NULL)) + return -1; + + pdev = ioc->pdev; + if ((pdev == NULL)) + return -1; + pci_stop_and_remove_bus_device(pdev); + return 0; +} + +/** + * _base_fault_reset_work - workq handling ioc fault conditions + * @work: input argument, used to derive ioc + * Context: sleep. + * + * Return nothing. + */ +static void +_base_fault_reset_work(struct work_struct *work) +{ + struct MPT3SAS_ADAPTER *ioc = + container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work); + unsigned long flags; + u32 doorbell; + int rc; + struct task_struct *p; + + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->shost_recovery) + goto rearm_timer; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) { + pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n", + ioc->name); + + /* + * Call _scsih_flush_pending_cmds callback so that we flush all + * pending commands back to OS. This call is required to aovid + * deadlock at block layer. Dead IOC will fail to do diag reset, + * and this call is safe since dead ioc will never return any + * command back from HW. + */ + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + /* + * Set remove_host flag early since kernel thread will + * take some time to execute. + */ + ioc->remove_host = 1; + /*Remove the Dead Host */ + p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, + "mpt3sas_dead_ioc_%d", ioc->id); + if (IS_ERR(p)) + pr_err(MPT3SAS_FMT + "%s: Running mpt3sas_dead_ioc thread failed !!!!\n", + ioc->name, __func__); + else + pr_err(MPT3SAS_FMT + "%s: Running mpt3sas_dead_ioc thread success !!!!\n", + ioc->name, __func__); + return; /* don't rearm timer */ + } + + if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { + rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name, + __func__, (rc == 0) ? "success" : "failed"); + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) + mpt3sas_base_fault_info(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + if (rc && (doorbell & MPI2_IOC_STATE_MASK) != + MPI2_IOC_STATE_OPERATIONAL) + return; /* don't rearm timer */ + } + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + rearm_timer: + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +/** + * mpt3sas_base_start_watchdog - start the fault_reset_work_q + * @ioc: per adapter object + * Context: sleep. + * + * Return nothing. + */ +void +mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->fault_reset_work_q) + return; + + /* initialize fault polling */ + + INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); + snprintf(ioc->fault_reset_work_q_name, + sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id); + ioc->fault_reset_work_q = + create_singlethread_workqueue(ioc->fault_reset_work_q_name); + if (!ioc->fault_reset_work_q) { + pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n", + ioc->name, __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +/** + * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q + * @ioc: per adapter object + * Context: sleep. + * + * Return nothing. + */ +void +mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + wq = ioc->fault_reset_work_q; + ioc->fault_reset_work_q = NULL; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (wq) { + if (!cancel_delayed_work(&ioc->fault_reset_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +/** + * mpt3sas_base_fault_info - verbose translation of firmware FAULT code + * @ioc: per adapter object + * @fault_code: fault code + * + * Return nothing. + */ +void +mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) +{ + pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n", + ioc->name, fault_code); +} + +/** + * mpt3sas_halt_firmware - halt's mpt controller firmware + * @ioc: per adapter object + * + * For debugging timeout related issues. Writing 0xCOFFEE00 + * to the doorbell register will halt controller firmware. With + * the purpose to stop both driver and firmware, the enduser can + * obtain a ring buffer from controller UART. + */ +void +mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) +{ + u32 doorbell; + + if (!ioc->fwfault_debug) + return; + + dump_stack(); + + doorbell = readl(&ioc->chip->Doorbell); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) + mpt3sas_base_fault_info(ioc , doorbell); + else { + writel(0xC0FFEE00, &ioc->chip->Doorbell); + pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n", + ioc->name); + } + + if (ioc->fwfault_debug == 2) + for (;;) + ; + else + panic("panic in %s\n", __func__); +} + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING +/** + * _base_sas_ioc_info - verbose translation of the ioc status + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @request_hdr: request mf + * + * Return nothing. + */ +static void +_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, + MPI2RequestHeader_t *request_hdr) +{ + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + MPI2_IOCSTATUS_MASK; + char *desc = NULL; + u16 frame_sz; + char *func_str = NULL; + + /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */ + if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || + request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) + return; + + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + return; + + switch (ioc_status) { + +/**************************************************************************** +* Common IOCStatus values for all replies +****************************************************************************/ + + case MPI2_IOCSTATUS_INVALID_FUNCTION: + desc = "invalid function"; + break; + case MPI2_IOCSTATUS_BUSY: + desc = "busy"; + break; + case MPI2_IOCSTATUS_INVALID_SGL: + desc = "invalid sgl"; + break; + case MPI2_IOCSTATUS_INTERNAL_ERROR: + desc = "internal error"; + break; + case MPI2_IOCSTATUS_INVALID_VPID: + desc = "invalid vpid"; + break; + case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: + desc = "insufficient resources"; + break; + case MPI2_IOCSTATUS_INVALID_FIELD: + desc = "invalid field"; + break; + case MPI2_IOCSTATUS_INVALID_STATE: + desc = "invalid state"; + break; + case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: + desc = "op state not supported"; + break; + +/**************************************************************************** +* Config IOCStatus values +****************************************************************************/ + + case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION: + desc = "config invalid action"; + break; + case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE: + desc = "config invalid type"; + break; + case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE: + desc = "config invalid page"; + break; + case MPI2_IOCSTATUS_CONFIG_INVALID_DATA: + desc = "config invalid data"; + break; + case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS: + desc = "config no defaults"; + break; + case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT: + desc = "config cant commit"; + break; + +/**************************************************************************** +* SCSI IO Reply +****************************************************************************/ + + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: + case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: + case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: + break; + +/**************************************************************************** +* For use by SCSI Initiator and SCSI Target end-to-end data protection +****************************************************************************/ + + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + desc = "eedp guard error"; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + desc = "eedp ref tag error"; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + desc = "eedp app tag error"; + break; + +/**************************************************************************** +* SCSI Target values +****************************************************************************/ + + case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX: + desc = "target invalid io index"; + break; + case MPI2_IOCSTATUS_TARGET_ABORTED: + desc = "target aborted"; + break; + case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: + desc = "target no conn retryable"; + break; + case MPI2_IOCSTATUS_TARGET_NO_CONNECTION: + desc = "target no connection"; + break; + case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: + desc = "target xfer count mismatch"; + break; + case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: + desc = "target data offset error"; + break; + case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: + desc = "target too much write data"; + break; + case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT: + desc = "target iu too short"; + break; + case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: + desc = "target ack nak timeout"; + break; + case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED: + desc = "target nak received"; + break; + +/**************************************************************************** +* Serial Attached SCSI values +****************************************************************************/ + + case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED: + desc = "smp request failed"; + break; + case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN: + desc = "smp data overrun"; + break; + +/**************************************************************************** +* Diagnostic Buffer Post / Diagnostic Release values +****************************************************************************/ + + case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED: + desc = "diagnostic released"; + break; + default: + break; + } + + if (!desc) + return; + + switch (request_hdr->Function) { + case MPI2_FUNCTION_CONFIG: + frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size; + func_str = "config_page"; + break; + case MPI2_FUNCTION_SCSI_TASK_MGMT: + frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t); + func_str = "task_mgmt"; + break; + case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: + frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t); + func_str = "sas_iounit_ctl"; + break; + case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: + frame_sz = sizeof(Mpi2SepRequest_t); + func_str = "enclosure"; + break; + case MPI2_FUNCTION_IOC_INIT: + frame_sz = sizeof(Mpi2IOCInitRequest_t); + func_str = "ioc_init"; + break; + case MPI2_FUNCTION_PORT_ENABLE: + frame_sz = sizeof(Mpi2PortEnableRequest_t); + func_str = "port_enable"; + break; + case MPI2_FUNCTION_SMP_PASSTHROUGH: + frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; + func_str = "smp_passthru"; + break; + default: + frame_sz = 32; + func_str = "unknown"; + break; + } + + pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n", + ioc->name, desc, ioc_status, request_hdr, func_str); + + _debug_dump_mf(request_hdr, frame_sz/4); +} + +/** + * _base_display_event_data - verbose translation of firmware asyn events + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * + * Return nothing. + */ +static void +_base_display_event_data(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventNotificationReply_t *mpi_reply) +{ + char *desc = NULL; + u16 event; + + if (!(ioc->logging_level & MPT_DEBUG_EVENTS)) + return; + + event = le16_to_cpu(mpi_reply->Event); + + switch (event) { + case MPI2_EVENT_LOG_DATA: + desc = "Log Data"; + break; + case MPI2_EVENT_STATE_CHANGE: + desc = "Status Change"; + break; + case MPI2_EVENT_HARD_RESET_RECEIVED: + desc = "Hard Reset Received"; + break; + case MPI2_EVENT_EVENT_CHANGE: + desc = "Event Change"; + break; + case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: + desc = "Device Status Change"; + break; + case MPI2_EVENT_IR_OPERATION_STATUS: + desc = "IR Operation Status"; + break; + case MPI2_EVENT_SAS_DISCOVERY: + { + Mpi2EventDataSasDiscovery_t *event_data = + (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; + pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name, + (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? + "start" : "stop"); + if (event_data->DiscoveryStatus) + pr_info("discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_info("\n"); + return; + } + case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: + desc = "SAS Broadcast Primitive"; + break; + case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: + desc = "SAS Init Device Status Change"; + break; + case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW: + desc = "SAS Init Table Overflow"; + break; + case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + desc = "SAS Topology Change List"; + break; + case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + desc = "SAS Enclosure Device Status Change"; + break; + case MPI2_EVENT_IR_VOLUME: + desc = "IR Volume"; + break; + case MPI2_EVENT_IR_PHYSICAL_DISK: + desc = "IR Physical Disk"; + break; + case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: + desc = "IR Configuration Change List"; + break; + case MPI2_EVENT_LOG_ENTRY_ADDED: + desc = "Log Entry Added"; + break; + } + + if (!desc) + return; + + pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc); +} +#endif + +/** + * _base_sas_log_info - verbose translation of firmware log info + * @ioc: per adapter object + * @log_info: log info + * + * Return nothing. + */ +static void +_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info) +{ + union loginfo_type { + u32 loginfo; + struct { + u32 subcode:16; + u32 code:8; + u32 originator:4; + u32 bus_type:4; + } dw; + }; + union loginfo_type sas_loginfo; + char *originator_str = NULL; + + sas_loginfo.loginfo = log_info; + if (sas_loginfo.dw.bus_type != 3 /*SAS*/) + return; + + /* each nexus loss loginfo */ + if (log_info == 0x31170000) + return; + + /* eat the loginfos associated with task aborts */ + if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == + 0x31140000 || log_info == 0x31130000)) + return; + + switch (sas_loginfo.dw.originator) { + case 0: + originator_str = "IOP"; + break; + case 1: + originator_str = "PL"; + break; + case 2: + originator_str = "IR"; + break; + } + + pr_warn(MPT3SAS_FMT + "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n", + ioc->name, log_info, + originator_str, sas_loginfo.dw.code, + sas_loginfo.dw.subcode); +} + +/** + * _base_display_reply_info - + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return nothing. + */ +static void +_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + u16 ioc_status; + u32 loginfo = 0; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (unlikely(!mpi_reply)) { + pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + if ((ioc_status & MPI2_IOCSTATUS_MASK) && + (ioc->logging_level & MPT_DEBUG_REPLY)) { + _base_sas_ioc_info(ioc , mpi_reply, + mpt3sas_base_get_msg_frame(ioc, smid)); + } +#endif + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { + loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); + _base_sas_log_info(ioc, loginfo); + } + + if (ioc_status || loginfo) { + ioc_status &= MPI2_IOCSTATUS_MASK; + mpt3sas_trigger_mpi(ioc, ioc_status, loginfo); + } +} + +/** + * mpt3sas_base_done - base internal command completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) + return 1; + + if (ioc->base_cmds.status == MPT3_CMD_NOT_USED) + return 1; + + ioc->base_cmds.status |= MPT3_CMD_COMPLETE; + if (mpi_reply) { + ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID; + memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + } + ioc->base_cmds.status &= ~MPT3_CMD_PENDING; + + complete(&ioc->base_cmds.done); + return 1; +} + +/** + * _base_async_event - main callback handler for firmware asyn events + * @ioc: per adapter object + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) +{ + Mpi2EventNotificationReply_t *mpi_reply; + Mpi2EventAckRequest_t *ack_request; + u16 smid; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) + return 1; +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + _base_display_event_data(ioc, mpi_reply); +#endif + if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) + goto out; + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + goto out; + } + + ack_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); + ack_request->Function = MPI2_FUNCTION_EVENT_ACK; + ack_request->Event = mpi_reply->Event; + ack_request->EventContext = mpi_reply->EventContext; + ack_request->VF_ID = 0; /* TODO */ + ack_request->VP_ID = 0; + mpt3sas_base_put_smid_default(ioc, smid); + + out: + + /* scsih callback handler */ + mpt3sas_scsih_event_callback(ioc, msix_index, reply); + + /* ctl callback handler */ + mpt3sas_ctl_event_callback(ioc, msix_index, reply); + + return 1; +} + +/** + * _base_get_cb_idx - obtain the callback index + * @ioc: per adapter object + * @smid: system request message index + * + * Return callback index. + */ +static u8 +_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + int i; + u8 cb_idx; + + if (smid < ioc->hi_priority_smid) { + i = smid - 1; + cb_idx = ioc->scsi_lookup[i].cb_idx; + } else if (smid < ioc->internal_smid) { + i = smid - ioc->hi_priority_smid; + cb_idx = ioc->hpr_lookup[i].cb_idx; + } else if (smid <= ioc->hba_queue_depth) { + i = smid - ioc->internal_smid; + cb_idx = ioc->internal_lookup[i].cb_idx; + } else + cb_idx = 0xFF; + return cb_idx; +} + +/** + * _base_mask_interrupts - disable interrupts + * @ioc: per adapter object + * + * Disabling ResetIRQ, Reply and Doorbell Interrupts + * + * Return nothing. + */ +static void +_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) +{ + u32 him_register; + + ioc->mask_interrupts = 1; + him_register = readl(&ioc->chip->HostInterruptMask); + him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK; + writel(him_register, &ioc->chip->HostInterruptMask); + readl(&ioc->chip->HostInterruptMask); +} + +/** + * _base_unmask_interrupts - enable interrupts + * @ioc: per adapter object + * + * Enabling only Reply Interrupts + * + * Return nothing. + */ +static void +_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) +{ + u32 him_register; + + him_register = readl(&ioc->chip->HostInterruptMask); + him_register &= ~MPI2_HIM_RIM; + writel(him_register, &ioc->chip->HostInterruptMask); + ioc->mask_interrupts = 0; +} + +union reply_descriptor { + u64 word; + struct { + u32 low; + u32 high; + } u; +}; + +/** + * _base_interrupt - MPT adapter (IOC) specific interrupt handler. + * @irq: irq number (not used) + * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure + * @r: pt_regs pointer (not used) + * + * Return IRQ_HANDLE if processed, else IRQ_NONE. + */ +static irqreturn_t +_base_interrupt(int irq, void *bus_id) +{ + struct adapter_reply_queue *reply_q = bus_id; + union reply_descriptor rd; + u32 completed_cmds; + u8 request_desript_type; + u16 smid; + u8 cb_idx; + u32 reply; + u8 msix_index = reply_q->msix_index; + struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; + Mpi2ReplyDescriptorsUnion_t *rpf; + u8 rc; + + if (ioc->mask_interrupts) + return IRQ_NONE; + + if (!atomic_add_unless(&reply_q->busy, 1, 1)) + return IRQ_NONE; + + rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; + request_desript_type = rpf->Default.ReplyFlags + & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { + atomic_dec(&reply_q->busy); + return IRQ_NONE; + } + + completed_cmds = 0; + cb_idx = 0xFF; + do { + rd.word = le64_to_cpu(rpf->Words); + if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) + goto out; + reply = 0; + smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); + if (request_desript_type == + MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || + request_desript_type == + MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { + cb_idx = _base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && + (likely(mpt_callbacks[cb_idx] != NULL))) { + rc = mpt_callbacks[cb_idx](ioc, smid, + msix_index, 0); + if (rc) + mpt3sas_base_free_smid(ioc, smid); + } + } else if (request_desript_type == + MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { + reply = le32_to_cpu( + rpf->AddressReply.ReplyFrameAddress); + if (reply > ioc->reply_dma_max_address || + reply < ioc->reply_dma_min_address) + reply = 0; + if (smid) { + cb_idx = _base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && + (likely(mpt_callbacks[cb_idx] != NULL))) { + rc = mpt_callbacks[cb_idx](ioc, smid, + msix_index, reply); + if (reply) + _base_display_reply_info(ioc, + smid, msix_index, reply); + if (rc) + mpt3sas_base_free_smid(ioc, + smid); + } + } else { + _base_async_event(ioc, msix_index, reply); + } + + /* reply free queue handling */ + if (reply) { + ioc->reply_free_host_index = + (ioc->reply_free_host_index == + (ioc->reply_free_queue_depth - 1)) ? + 0 : ioc->reply_free_host_index + 1; + ioc->reply_free[ioc->reply_free_host_index] = + cpu_to_le32(reply); + wmb(); + writel(ioc->reply_free_host_index, + &ioc->chip->ReplyFreeHostIndex); + } + } + + rpf->Words = cpu_to_le64(ULLONG_MAX); + reply_q->reply_post_host_index = + (reply_q->reply_post_host_index == + (ioc->reply_post_queue_depth - 1)) ? 0 : + reply_q->reply_post_host_index + 1; + request_desript_type = + reply_q->reply_post_free[reply_q->reply_post_host_index]. + Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + completed_cmds++; + if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) + goto out; + if (!reply_q->reply_post_host_index) + rpf = reply_q->reply_post_free; + else + rpf++; + } while (1); + + out: + + if (!completed_cmds) { + atomic_dec(&reply_q->busy); + return IRQ_NONE; + } + + wmb(); + writel(reply_q->reply_post_host_index | (msix_index << + MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex); + atomic_dec(&reply_q->busy); + return IRQ_HANDLED; +} + +/** + * _base_is_controller_msix_enabled - is controller support muli-reply queues + * @ioc: per adapter object + * + */ +static inline int +_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) +{ + return (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; +} + +/** + * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues + * @ioc: per adapter object + * Context: ISR conext + * + * Called when a Task Management request has completed. We want + * to flush the other reply queues so all the outstanding IO has been + * completed back to OS before we process the TM completetion. + * + * Return nothing. + */ +void +mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q; + + /* If MSIX capability is turned off + * then multi-queues are not enabled + */ + if (!_base_is_controller_msix_enabled(ioc)) + return; + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->shost_recovery) + return; + /* TMs are on msix_index == 0 */ + if (reply_q->msix_index == 0) + continue; + _base_interrupt(reply_q->vector, (void *)reply_q); + } +} + +/** + * mpt3sas_base_release_callback_handler - clear interrupt callback handler + * @cb_idx: callback index + * + * Return nothing. + */ +void +mpt3sas_base_release_callback_handler(u8 cb_idx) +{ + mpt_callbacks[cb_idx] = NULL; +} + +/** + * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler + * @cb_func: callback function + * + * Returns cb_func. + */ +u8 +mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) +{ + u8 cb_idx; + + for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) + if (mpt_callbacks[cb_idx] == NULL) + break; + + mpt_callbacks[cb_idx] = cb_func; + return cb_idx; +} + +/** + * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler + * + * Return nothing. + */ +void +mpt3sas_base_initialize_callback_handler(void) +{ + u8 cb_idx; + + for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++) + mpt3sas_base_release_callback_handler(cb_idx); +} + + +/** + * _base_build_zero_len_sge - build zero length sg entry + * @ioc: per adapter object + * @paddr: virtual address for SGE + * + * Create a zero length scatter gather entry to insure the IOCs hardware has + * something to use if the target device goes brain dead and tries + * to send data even when none is asked for. + * + * Return nothing. + */ +static void +_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) +{ + u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | + MPI2_SGE_FLAGS_SIMPLE_ELEMENT) << + MPI2_SGE_FLAGS_SHIFT); + ioc->base_add_sg_single(paddr, flags_length, -1); +} + +/** + * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr. + * @paddr: virtual address for SGE + * @flags_length: SGE flags and data transfer length + * @dma_addr: Physical address + * + * Return nothing. + */ +static void +_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) +{ + Mpi2SGESimple32_t *sgel = paddr; + + flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING | + MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le32(dma_addr); +} + + +/** + * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr. + * @paddr: virtual address for SGE + * @flags_length: SGE flags and data transfer length + * @dma_addr: Physical address + * + * Return nothing. + */ +static void +_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) +{ + Mpi2SGESimple64_t *sgel = paddr; + + flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING | + MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le64(dma_addr); +} + +/** + * _base_get_chain_buffer_tracker - obtain chain tracker + * @ioc: per adapter object + * @smid: smid associated to an IO request + * + * Returns chain tracker(from ioc->free_chain_list) + */ +static struct chain_tracker * +_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct chain_tracker *chain_req; + unsigned long flags; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->free_chain_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "chain buffers not available\n", ioc->name)); + return NULL; + } + chain_req = list_entry(ioc->free_chain_list.next, + struct chain_tracker, tracker_list); + list_del_init(&chain_req->tracker_list); + list_add_tail(&chain_req->tracker_list, + &ioc->scsi_lookup[smid - 1].chain_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return chain_req; +} + + +/** + * _base_build_sg - build generic sg + * @ioc: per adapter object + * @psge: virtual address for SGE + * @data_out_dma: physical address for WRITES + * @data_out_sz: data xfer size for WRITES + * @data_in_dma: physical address for READS + * @data_in_sz: data xfer size for READS + * + * Return nothing. + */ +static void +_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz) +{ + u32 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + _base_build_zero_len_sge(ioc, psge); + return; + } + + if (data_out_sz && data_in_sz) { + /* WRITE sgel first */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + + /* incr sgel */ + psge += ioc->sge_size; + + /* READ sgel last */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } else if (data_out_sz) /* WRITE */ { + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + } else if (data_in_sz) /* READ */ { + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } +} + +/* IEEE format sgls */ + +/** + * _base_add_sg_single_ieee - add sg element for IEEE format + * @paddr: virtual address for SGE + * @flags: SGE flags + * @chain_offset: number of 128 byte elements from start of segment + * @length: data transfer length + * @dma_addr: Physical address + * + * Return nothing. + */ +static void +_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, + dma_addr_t dma_addr) +{ + Mpi25IeeeSgeChain64_t *sgel = paddr; + + sgel->Flags = flags; + sgel->NextChainOffset = chain_offset; + sgel->Length = cpu_to_le32(length); + sgel->Address = cpu_to_le64(dma_addr); +} + +/** + * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format + * @ioc: per adapter object + * @paddr: virtual address for SGE + * + * Create a zero length scatter gather entry to insure the IOCs hardware has + * something to use if the target device goes brain dead and tries + * to send data even when none is asked for. + * + * Return nothing. + */ +static void +_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) +{ + u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST); + _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); +} + +/** + * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format + * @ioc: per adapter object + * @scmd: scsi command + * @smid: system request message index + * Context: none. + * + * The main routine that builds scatter gather table from a given + * scsi request sent via the .queuecommand main handler. + * + * Returns 0 success, anything else error + */ +static int +_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid) +{ + Mpi2SCSIIORequest_t *mpi_request; + dma_addr_t chain_dma; + struct scatterlist *sg_scmd; + void *sg_local, *chain; + u32 chain_offset; + u32 chain_length; + int sges_left; + u32 sges_in_segment; + u8 simple_sgl_flags; + u8 simple_sgl_flags_last; + u8 chain_sgl_flags; + struct chain_tracker *chain_req; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + /* init scatter gather flags */ + simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + simple_sgl_flags_last = simple_sgl_flags | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST; + chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + + sg_scmd = scsi_sglist(scmd); + sges_left = scsi_dma_map(scmd); + if (!sges_left) { + sdev_printk(KERN_ERR, scmd->device, + "pci_map_sg failed: request for %d bytes!\n", + scsi_bufflen(scmd)); + return -ENOMEM; + } + + sg_local = &mpi_request->SGL; + sges_in_segment = (ioc->request_sz - + offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee; + if (sges_left <= sges_in_segment) + goto fill_in_last_segment; + + mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) + + (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee); + + /* fill in main message segment when there is a chain following */ + while (sges_in_segment > 1) { + _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + + /* initializing the pointers */ + chain_req = _base_get_chain_buffer_tracker(ioc, smid); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + do { + sges_in_segment = (sges_left <= + ioc->max_sges_in_chain_message) ? sges_left : + ioc->max_sges_in_chain_message; + chain_offset = (sges_left == sges_in_segment) ? + 0 : sges_in_segment; + chain_length = sges_in_segment * ioc->sge_size_ieee; + if (chain_offset) + chain_length += ioc->sge_size_ieee; + _base_add_sg_single_ieee(sg_local, chain_sgl_flags, + chain_offset, chain_length, chain_dma); + + sg_local = chain; + if (!chain_offset) + goto fill_in_last_segment; + + /* fill in chain segments */ + while (sges_in_segment) { + _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + + chain_req = _base_get_chain_buffer_tracker(ioc, smid); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + } while (1); + + + fill_in_last_segment: + + /* fill the last segment */ + while (sges_left) { + if (sges_left == 1) + _base_add_sg_single_ieee(sg_local, + simple_sgl_flags_last, 0, sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + else + _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + } + + return 0; +} + +/** + * _base_build_sg_ieee - build generic sg for IEEE format + * @ioc: per adapter object + * @psge: virtual address for SGE + * @data_out_dma: physical address for WRITES + * @data_out_sz: data xfer size for WRITES + * @data_in_dma: physical address for READS + * @data_in_sz: data xfer size for READS + * + * Return nothing. + */ +static void +_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz) +{ + u8 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + _base_build_zero_len_sge_ieee(ioc, psge); + return; + } + + if (data_out_sz && data_in_sz) { + /* WRITE sgel first */ + sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, + data_out_dma); + + /* incr sgel */ + psge += ioc->sge_size_ieee; + + /* READ sgel last */ + sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, + data_in_dma); + } else if (data_out_sz) /* WRITE */ { + sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, + data_out_dma); + } else if (data_in_sz) /* READ */ { + sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, + data_in_dma); + } +} + +#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) + +/** + * _base_config_dma_addressing - set dma addressing + * @ioc: per adapter object + * @pdev: PCI device struct + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) +{ + struct sysinfo s; + char *desc = NULL; + + if (sizeof(dma_addr_t) > 4) { + const uint64_t required_mask = + dma_get_required_mask(&pdev->dev); + if ((required_mask > DMA_BIT_MASK(32)) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + ioc->base_add_sg_single = &_base_add_sg_single_64; + ioc->sge_size = sizeof(Mpi2SGESimple64_t); + desc = "64"; + goto out; + } + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) + && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { + ioc->base_add_sg_single = &_base_add_sg_single_32; + ioc->sge_size = sizeof(Mpi2SGESimple32_t); + desc = "32"; + } else + return -ENODEV; + + out: + si_meminfo(&s); + pr_info(MPT3SAS_FMT + "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", + ioc->name, desc, convert_to_kb(s.totalram)); + + return 0; +} + +/** + * _base_check_enable_msix - checks MSIX capabable. + * @ioc: per adapter object + * + * Check to see if card is capable of MSIX, and set number + * of available msix vectors + */ +static int +_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) +{ + int base; + u16 message_control; + + base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); + if (!base) { + dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n", + ioc->name)); + return -EINVAL; + } + + /* get msix vector count */ + + pci_read_config_word(ioc->pdev, base + 2, &message_control); + ioc->msix_vector_count = (message_control & 0x3FF) + 1; + if (ioc->msix_vector_count > 8) + ioc->msix_vector_count = 8; + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "msix is supported, vector_count(%d)\n", + ioc->name, ioc->msix_vector_count)); + return 0; +} + +/** + * _base_free_irq - free irq + * @ioc: per adapter object + * + * Freeing respective reply_queue from the list. + */ +static void +_base_free_irq(struct MPT3SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q, *next; + + if (list_empty(&ioc->reply_queue_list)) + return; + + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + list_del(&reply_q->list); + synchronize_irq(reply_q->vector); + free_irq(reply_q->vector, reply_q); + kfree(reply_q); + } +} + +/** + * _base_request_irq - request irq + * @ioc: per adapter object + * @index: msix index into vector table + * @vector: irq vector + * + * Inserting respective reply_queue into the list. + */ +static int +_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) +{ + struct adapter_reply_queue *reply_q; + int r; + + reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); + if (!reply_q) { + pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n", + ioc->name, (int)sizeof(struct adapter_reply_queue)); + return -ENOMEM; + } + reply_q->ioc = ioc; + reply_q->msix_index = index; + reply_q->vector = vector; + atomic_set(&reply_q->busy, 0); + if (ioc->msix_enable) + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", + MPT3SAS_DRIVER_NAME, ioc->id, index); + else + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", + MPT3SAS_DRIVER_NAME, ioc->id); + r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name, + reply_q); + if (r) { + pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n", + reply_q->name, vector); + kfree(reply_q); + return -EBUSY; + } + + INIT_LIST_HEAD(&reply_q->list); + list_add_tail(&reply_q->list, &ioc->reply_queue_list); + return 0; +} + +/** + * _base_assign_reply_queues - assigning msix index for each cpu + * @ioc: per adapter object + * + * The enduser would need to set the affinity via /proc/irq/#/smp_affinity + * + * It would nice if we could call irq_set_affinity, however it is not + * an exported symbol + */ +static void +_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q; + int cpu_id; + int cpu_grouping, loop, grouping, grouping_mod; + int reply_queue; + + if (!_base_is_controller_msix_enabled(ioc)) + return; + + memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); + + /* NUMA Hardware bug workaround - drop to less reply queues */ + if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) { + ioc->reply_queue_count = ioc->facts.MaxMSIxVectors; + reply_queue = 0; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + reply_q->msix_index = reply_queue; + if (++reply_queue == ioc->reply_queue_count) + reply_queue = 0; + } + } + + /* when there are more cpus than available msix vectors, + * then group cpus togeather on same irq + */ + if (ioc->cpu_count > ioc->msix_vector_count) { + grouping = ioc->cpu_count / ioc->msix_vector_count; + grouping_mod = ioc->cpu_count % ioc->msix_vector_count; + if (grouping < 2 || (grouping == 2 && !grouping_mod)) + cpu_grouping = 2; + else if (grouping < 4 || (grouping == 4 && !grouping_mod)) + cpu_grouping = 4; + else if (grouping < 8 || (grouping == 8 && !grouping_mod)) + cpu_grouping = 8; + else + cpu_grouping = 16; + } else + cpu_grouping = 0; + + loop = 0; + reply_q = list_entry(ioc->reply_queue_list.next, + struct adapter_reply_queue, list); + for_each_online_cpu(cpu_id) { + if (!cpu_grouping) { + ioc->cpu_msix_table[cpu_id] = reply_q->msix_index; + reply_q = list_entry(reply_q->list.next, + struct adapter_reply_queue, list); + } else { + if (loop < cpu_grouping) { + ioc->cpu_msix_table[cpu_id] = + reply_q->msix_index; + loop++; + } else { + reply_q = list_entry(reply_q->list.next, + struct adapter_reply_queue, list); + ioc->cpu_msix_table[cpu_id] = + reply_q->msix_index; + loop = 1; + } + } + } +} + +/** + * _base_disable_msix - disables msix + * @ioc: per adapter object + * + */ +static void +_base_disable_msix(struct MPT3SAS_ADAPTER *ioc) +{ + if (!ioc->msix_enable) + return; + pci_disable_msix(ioc->pdev); + ioc->msix_enable = 0; +} + +/** + * _base_enable_msix - enables msix, failback to io_apic + * @ioc: per adapter object + * + */ +static int +_base_enable_msix(struct MPT3SAS_ADAPTER *ioc) +{ + struct msix_entry *entries, *a; + int r; + int i; + u8 try_msix = 0; + + INIT_LIST_HEAD(&ioc->reply_queue_list); + + if (msix_disable == -1 || msix_disable == 0) + try_msix = 1; + + if (!try_msix) + goto try_ioapic; + + if (_base_check_enable_msix(ioc) != 0) + goto try_ioapic; + + ioc->reply_queue_count = min_t(int, ioc->cpu_count, + ioc->msix_vector_count); + + entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), + GFP_KERNEL); + if (!entries) { + dfailprintk(ioc, pr_info(MPT3SAS_FMT + "kcalloc failed @ at %s:%d/%s() !!!\n", + ioc->name, __FILE__, __LINE__, __func__)); + goto try_ioapic; + } + + for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) + a->entry = i; + + r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count); + if (r) { + dfailprintk(ioc, pr_info(MPT3SAS_FMT + "pci_enable_msix failed (r=%d) !!!\n", + ioc->name, r)); + kfree(entries); + goto try_ioapic; + } + + ioc->msix_enable = 1; + for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) { + r = _base_request_irq(ioc, i, a->vector); + if (r) { + _base_free_irq(ioc); + _base_disable_msix(ioc); + kfree(entries); + goto try_ioapic; + } + } + + kfree(entries); + return 0; + +/* failback to io_apic interrupt routing */ + try_ioapic: + + r = _base_request_irq(ioc, 0, ioc->pdev->irq); + + return r; +} + +/** + * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) + * @ioc: per adapter object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + u32 memap_sz; + u32 pio_sz; + int i, r = 0; + u64 pio_chip = 0; + u64 chip_phys = 0; + struct adapter_reply_queue *reply_q; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", + ioc->name, __func__)); + + ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (pci_enable_device_mem(pdev)) { + pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n", + ioc->name); + return -ENODEV; + } + + + if (pci_request_selected_regions(pdev, ioc->bars, + MPT3SAS_DRIVER_NAME)) { + pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n", + ioc->name); + r = -ENODEV; + goto out_fail; + } + +/* AER (Advanced Error Reporting) hooks */ + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + + if (_base_config_dma_addressing(ioc, pdev) != 0) { + pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n", + ioc->name, pci_name(pdev)); + r = -ENODEV; + goto out_fail; + } + + for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) { + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + if (pio_sz) + continue; + pio_chip = (u64)pci_resource_start(pdev, i); + pio_sz = pci_resource_len(pdev, i); + } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { + if (memap_sz) + continue; + ioc->chip_phys = pci_resource_start(pdev, i); + chip_phys = (u64)ioc->chip_phys; + memap_sz = pci_resource_len(pdev, i); + ioc->chip = ioremap(ioc->chip_phys, memap_sz); + if (ioc->chip == NULL) { + pr_err(MPT3SAS_FMT "unable to map adapter memory!\n", + ioc->name); + r = -EINVAL; + goto out_fail; + } + } + } + + _base_mask_interrupts(ioc); + r = _base_enable_msix(ioc); + if (r) + goto out_fail; + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) + pr_info(MPT3SAS_FMT "%s: IRQ %d\n", + reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : + "IO-APIC enabled"), reply_q->vector); + + pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", + ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); + pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n", + ioc->name, (unsigned long long)pio_chip, pio_sz); + + /* Save PCI configuration state for recovery from PCI AER/EEH errors */ + pci_save_state(pdev); + return 0; + + out_fail: + if (ioc->chip_phys) + iounmap(ioc->chip); + ioc->chip_phys = 0; + pci_release_selected_regions(ioc->pdev, ioc->bars); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + return r; +} + +/** + * mpt3sas_base_get_msg_frame - obtain request mf pointer + * @ioc: per adapter object + * @smid: system request message index(smid zero is invalid) + * + * Returns virt pointer to message frame. + */ +void * +mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->request + (smid * ioc->request_sz)); +} + +/** + * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr + * @ioc: per adapter object + * @smid: system request message index + * + * Returns virt pointer to sense buffer. + */ +void * +mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); +} + +/** + * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr + * @ioc: per adapter object + * @smid: system request message index + * + * Returns phys pointer to the low 32bit address of the sense buffer. + */ +__le32 +mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return cpu_to_le32(ioc->sense_dma + ((smid - 1) * + SCSI_SENSE_BUFFERSIZE)); +} + +/** + * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address + * @ioc: per adapter object + * @phys_addr: lower 32 physical addr of the reply + * + * Converts 32bit lower physical addr into a virt address. + */ +void * +mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) +{ + if (!phys_addr) + return NULL; + return ioc->reply + (phys_addr - (u32)ioc->reply_dma); +} + +/** + * mpt3sas_base_get_smid - obtain a free smid from internal queue + * @ioc: per adapter object + * @cb_idx: callback index + * + * Returns smid (zero is invalid) + */ +u16 +mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->internal_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + pr_err(MPT3SAS_FMT "%s: smid not available\n", + ioc->name, __func__); + return 0; + } + + request = list_entry(ioc->internal_free_list.next, + struct request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +/** + * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue + * @ioc: per adapter object + * @cb_idx: callback index + * @scmd: pointer to scsi command object + * + * Returns smid (zero is invalid) + */ +u16 +mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd) +{ + unsigned long flags; + struct scsiio_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + pr_err(MPT3SAS_FMT "%s: smid not available\n", + ioc->name, __func__); + return 0; + } + + request = list_entry(ioc->free_list.next, + struct scsiio_tracker, tracker_list); + request->scmd = scmd; + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +/** + * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue + * @ioc: per adapter object + * @cb_idx: callback index + * + * Returns smid (zero is invalid) + */ +u16 +mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return 0; + } + + request = list_entry(ioc->hpr_free_list.next, + struct request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +/** + * mpt3sas_base_free_smid - put smid back on free_list + * @ioc: per adapter object + * @smid: system request message index + * + * Return nothing. + */ +void +mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + unsigned long flags; + int i; + struct chain_tracker *chain_req, *next; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (smid < ioc->hi_priority_smid) { + /* scsiio queue */ + i = smid - 1; + if (!list_empty(&ioc->scsi_lookup[i].chain_list)) { + list_for_each_entry_safe(chain_req, next, + &ioc->scsi_lookup[i].chain_list, tracker_list) { + list_del_init(&chain_req->tracker_list); + list_add(&chain_req->tracker_list, + &ioc->free_chain_list); + } + } + ioc->scsi_lookup[i].cb_idx = 0xFF; + ioc->scsi_lookup[i].scmd = NULL; + list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + /* + * See _wait_for_commands_to_complete() call with regards + * to this code. + */ + if (ioc->shost_recovery && ioc->pending_io_count) { + if (ioc->pending_io_count == 1) + wake_up(&ioc->reset_wq); + ioc->pending_io_count--; + } + return; + } else if (smid < ioc->internal_smid) { + /* hi-priority */ + i = smid - ioc->hi_priority_smid; + ioc->hpr_lookup[i].cb_idx = 0xFF; + list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); + } else if (smid <= ioc->hba_queue_depth) { + /* internal queue */ + i = smid - ioc->internal_smid; + ioc->internal_lookup[i].cb_idx = 0xFF; + list_add(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); +} + +/** + * _base_writeq - 64 bit write to MMIO + * @ioc: per adapter object + * @b: data payload + * @addr: address in MMIO space + * @writeq_lock: spin lock + * + * Glue for handling an atomic 64 bit word to MMIO. This special handling takes + * care of 32 bit environment where its not quarenteed to send the entire word + * in one transfer. + */ +#if defined(writeq) && defined(CONFIG_64BIT) +static inline void +_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) +{ + writeq(cpu_to_le64(b), addr); +} +#else +static inline void +_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) +{ + unsigned long flags; + __u64 data_out = cpu_to_le64(b); + + spin_lock_irqsave(writeq_lock, flags); + writel((u32)(data_out), addr); + writel((u32)(data_out >> 32), (addr + 4)); + spin_unlock_irqrestore(writeq_lock, flags); +} +#endif + +static inline u8 +_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc) +{ + return ioc->cpu_msix_table[raw_smp_processor_id()]; +} + +/** + * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle + * + * Return nothing. + */ +void +mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + + descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * mpt3sas_base_put_smid_fast_path - send fast path request to firmware + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle + * + * Return nothing. + */ +void +mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + descriptor.SCSIIO.RequestFlags = + MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; + descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware + * @ioc: per adapter object + * @smid: system request message index + * + * Return nothing. + */ +void +mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + descriptor.HighPriority.RequestFlags = + MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; + descriptor.HighPriority.MSIxIndex = 0; + descriptor.HighPriority.SMID = cpu_to_le16(smid); + descriptor.HighPriority.LMID = 0; + descriptor.HighPriority.Reserved1 = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * mpt3sas_base_put_smid_default - Default, primarily used for config pages + * @ioc: per adapter object + * @smid: system request message index + * + * Return nothing. + */ +void +mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; + descriptor.Default.MSIxIndex = _base_get_msix_index(ioc); + descriptor.Default.SMID = cpu_to_le16(smid); + descriptor.Default.LMID = 0; + descriptor.Default.DescriptorTypeDependent = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + + + +/** + * _base_display_ioc_capabilities - Disply IOC's capabilities. + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) +{ + int i = 0; + char desc[16]; + u32 iounit_pg1_flags; + u32 bios_version; + + bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + strncpy(desc, ioc->manu_pg0.ChipName, 16); + pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\ + "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", + ioc->name, desc, + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF, + ioc->pdev->revision, + (bios_version & 0xFF000000) >> 24, + (bios_version & 0x00FF0000) >> 16, + (bios_version & 0x0000FF00) >> 8, + bios_version & 0x000000FF); + + pr_info(MPT3SAS_FMT "Protocol=(", ioc->name); + + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { + pr_info("Initiator"); + i++; + } + + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { + pr_info("%sTarget", i ? "," : ""); + i++; + } + + i = 0; + pr_info("), "); + pr_info("Capabilities=("); + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { + pr_info("Raid"); + i++; + } + + if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { + pr_info("%sTLR", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { + pr_info("%sMulticast", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { + pr_info("%sBIDI Target", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { + pr_info("%sEEDP", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { + pr_info("%sSnapshot Buffer", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { + pr_info("%sDiag Trace Buffer", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { + pr_info("%sDiag Extended Buffer", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { + pr_info("%sTask Set Full", i ? "," : ""); + i++; + } + + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { + pr_info("%sNCQ", i ? "," : ""); + i++; + } + + pr_info(")\n"); +} + +/** + * mpt3sas_base_update_missing_delay - change the missing delay timers + * @ioc: per adapter object + * @device_missing_delay: amount of time till device is reported missing + * @io_missing_delay: interval IO is returned when there is a missing device + * + * Return nothing. + * + * Passed on the command line, this function will modify the device missing + * delay, as well as the io missing delay. This should be called at driver + * load time. + */ +void +mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, + u16 device_missing_delay, u8 io_missing_delay) +{ + u16 dmd, dmd_new, dmd_orignal; + u8 io_missing_delay_original; + u16 sz; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2ConfigReply_t mpi_reply; + u8 num_phys = 0; + u16 ioc_status; + + mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) + return; + + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + /* device missing delay */ + dmd = sas_iounit_pg1->ReportDeviceMissingDelay; + if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) + dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; + else + dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + dmd_orignal = dmd; + if (device_missing_delay > 0x7F) { + dmd = (device_missing_delay > 0x7F0) ? 0x7F0 : + device_missing_delay; + dmd = dmd / 16; + dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16; + } else + dmd = device_missing_delay; + sas_iounit_pg1->ReportDeviceMissingDelay = dmd; + + /* io missing delay */ + io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; + sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; + + if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, + sz)) { + if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) + dmd_new = (dmd & + MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; + else + dmd_new = + dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n", + ioc->name, dmd_orignal, dmd_new); + pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n", + ioc->name, io_missing_delay_original, + io_missing_delay); + ioc->device_missing_delay = dmd_new; + ioc->io_missing_delay = io_missing_delay; + } + +out: + kfree(sas_iounit_pg1); +} +/** + * _base_static_config_pages - static start of day config pages + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ConfigReply_t mpi_reply; + u32 iounit_pg1_flags; + + mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0); + if (ioc->ir_firmware) + mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply, + &ioc->manu_pg10); + + /* + * Ensure correct T10 PI operation if vendor left EEDPTagMode + * flag unset in NVDATA. + */ + mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11); + if (ioc->manu_pg11.EEDPTagMode == 0) { + pr_err("%s: overriding NVDATA EEDPTagMode setting\n", + ioc->name); + ioc->manu_pg11.EEDPTagMode &= ~0x3; + ioc->manu_pg11.EEDPTagMode |= 0x1; + mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, + &ioc->manu_pg11); + } + + mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); + mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); + mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); + mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); + mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); + _base_display_ioc_capabilities(ioc); + + /* + * Enable task_set_full handling in iounit_pg1 when the + * facts capabilities indicate that its supported. + */ + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if ((ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) + iounit_pg1_flags &= + ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + else + iounit_pg1_flags |= + MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); + mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); +} + +/** + * _base_release_memory_pools - release memory + * @ioc: per adapter object + * + * Free memory allocated from _base_allocate_memory_pools. + * + * Return nothing. + */ +static void +_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) +{ + int i; + + dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + if (ioc->request) { + pci_free_consistent(ioc->pdev, ioc->request_dma_sz, + ioc->request, ioc->request_dma); + dexitprintk(ioc, pr_info(MPT3SAS_FMT + "request_pool(0x%p): free\n", + ioc->name, ioc->request)); + ioc->request = NULL; + } + + if (ioc->sense) { + pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); + if (ioc->sense_dma_pool) + pci_pool_destroy(ioc->sense_dma_pool); + dexitprintk(ioc, pr_info(MPT3SAS_FMT + "sense_pool(0x%p): free\n", + ioc->name, ioc->sense)); + ioc->sense = NULL; + } + + if (ioc->reply) { + pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); + if (ioc->reply_dma_pool) + pci_pool_destroy(ioc->reply_dma_pool); + dexitprintk(ioc, pr_info(MPT3SAS_FMT + "reply_pool(0x%p): free\n", + ioc->name, ioc->reply)); + ioc->reply = NULL; + } + + if (ioc->reply_free) { + pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, + ioc->reply_free_dma); + if (ioc->reply_free_dma_pool) + pci_pool_destroy(ioc->reply_free_dma_pool); + dexitprintk(ioc, pr_info(MPT3SAS_FMT + "reply_free_pool(0x%p): free\n", + ioc->name, ioc->reply_free)); + ioc->reply_free = NULL; + } + + if (ioc->reply_post_free) { + pci_pool_free(ioc->reply_post_free_dma_pool, + ioc->reply_post_free, ioc->reply_post_free_dma); + if (ioc->reply_post_free_dma_pool) + pci_pool_destroy(ioc->reply_post_free_dma_pool); + dexitprintk(ioc, pr_info(MPT3SAS_FMT + "reply_post_free_pool(0x%p): free\n", ioc->name, + ioc->reply_post_free)); + ioc->reply_post_free = NULL; + } + + if (ioc->config_page) { + dexitprintk(ioc, pr_info(MPT3SAS_FMT + "config_page(0x%p): free\n", ioc->name, + ioc->config_page)); + pci_free_consistent(ioc->pdev, ioc->config_page_sz, + ioc->config_page, ioc->config_page_dma); + } + + if (ioc->scsi_lookup) { + free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages); + ioc->scsi_lookup = NULL; + } + kfree(ioc->hpr_lookup); + kfree(ioc->internal_lookup); + if (ioc->chain_lookup) { + for (i = 0; i < ioc->chain_depth; i++) { + if (ioc->chain_lookup[i].chain_buffer) + pci_pool_free(ioc->chain_dma_pool, + ioc->chain_lookup[i].chain_buffer, + ioc->chain_lookup[i].chain_buffer_dma); + } + if (ioc->chain_dma_pool) + pci_pool_destroy(ioc->chain_dma_pool); + free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); + ioc->chain_lookup = NULL; + } +} + +/** + * _base_allocate_memory_pools - allocate start of day memory pools + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 success, anything else error + */ +static int +_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + struct mpt3sas_facts *facts; + u16 max_sge_elements; + u16 chains_needed_per_io; + u32 sz, total_sz, reply_post_free_sz; + u32 retry_sz; + u16 max_request_credit; + unsigned short sg_tablesize; + u16 sge_size; + int i; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + + retry_sz = 0; + facts = &ioc->facts; + + /* command line tunables for max sgl entries */ + if (max_sgl_entries != -1) + sg_tablesize = max_sgl_entries; + else + sg_tablesize = MPT3SAS_SG_DEPTH; + + if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS) + sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS; + else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS) + sg_tablesize = MPT3SAS_MAX_PHYS_SEGMENTS; + ioc->shost->sg_tablesize = sg_tablesize; + + ioc->hi_priority_depth = facts->HighPriorityCredit; + ioc->internal_depth = ioc->hi_priority_depth + (5); + /* command line tunables for max controller queue depth */ + if (max_queue_depth != -1 && max_queue_depth != 0) { + max_request_credit = min_t(u16, max_queue_depth + + ioc->hi_priority_depth + ioc->internal_depth, + facts->RequestCredit); + if (max_request_credit > MAX_HBA_QUEUE_DEPTH) + max_request_credit = MAX_HBA_QUEUE_DEPTH; + } else + max_request_credit = min_t(u16, facts->RequestCredit, + MAX_HBA_QUEUE_DEPTH); + + ioc->hba_queue_depth = max_request_credit; + + /* request frame size */ + ioc->request_sz = facts->IOCRequestFrameSize * 4; + + /* reply frame size */ + ioc->reply_sz = facts->ReplyFrameSize * 4; + + /* calculate the max scatter element size */ + sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); + + retry_allocation: + total_sz = 0; + /* calculate number of sg elements left over in the 1st frame */ + max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - + sizeof(Mpi2SGEIOUnion_t)) + sge_size); + ioc->max_sges_in_main_message = max_sge_elements/sge_size; + + /* now do the same for a chain buffer */ + max_sge_elements = ioc->request_sz - sge_size; + ioc->max_sges_in_chain_message = max_sge_elements/sge_size; + + /* + * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE + */ + chains_needed_per_io = ((ioc->shost->sg_tablesize - + ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message) + + 1; + if (chains_needed_per_io > facts->MaxChainDepth) { + chains_needed_per_io = facts->MaxChainDepth; + ioc->shost->sg_tablesize = min_t(u16, + ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message + * chains_needed_per_io), ioc->shost->sg_tablesize); + } + ioc->chains_needed_per_io = chains_needed_per_io; + + /* reply free queue sizing - taking into account for 64 FW events */ + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + + /* calculate reply descriptor post queue depth */ + ioc->reply_post_queue_depth = ioc->hba_queue_depth + + ioc->reply_free_queue_depth + 1 ; + /* align the reply post queue on the next 16 count boundary */ + if (ioc->reply_post_queue_depth % 16) + ioc->reply_post_queue_depth += 16 - + (ioc->reply_post_queue_depth % 16); + + + if (ioc->reply_post_queue_depth > + facts->MaxReplyDescriptorPostQueueDepth) { + ioc->reply_post_queue_depth = + facts->MaxReplyDescriptorPostQueueDepth - + (facts->MaxReplyDescriptorPostQueueDepth % 16); + ioc->hba_queue_depth = + ((ioc->reply_post_queue_depth - 64) / 2) - 1; + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + } + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \ + "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " + "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message, + ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, + ioc->chains_needed_per_io)); + + ioc->scsiio_depth = ioc->hba_queue_depth - + ioc->hi_priority_depth - ioc->internal_depth; + + /* set the scsi host can_queue depth + * with some internal commands that could be outstanding + */ + ioc->shost->can_queue = ioc->scsiio_depth; + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "scsi host: can_queue depth (%d)\n", + ioc->name, ioc->shost->can_queue)); + + + /* contiguous pool for request and chains, 16 byte align, one extra " + * "frame for smid=0 + */ + ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; + sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); + + /* hi-priority queue */ + sz += (ioc->hi_priority_depth * ioc->request_sz); + + /* internal queue */ + sz += (ioc->internal_depth * ioc->request_sz); + + ioc->request_dma_sz = sz; + ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma); + if (!ioc->request) { + pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ + "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " + "total(%d kB)\n", ioc->name, ioc->hba_queue_depth, + ioc->chains_needed_per_io, ioc->request_sz, sz/1024); + if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) + goto out; + retry_sz += 64; + ioc->hba_queue_depth = max_request_credit - retry_sz; + goto retry_allocation; + } + + if (retry_sz) + pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ + "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " + "total(%d kb)\n", ioc->name, ioc->hba_queue_depth, + ioc->chains_needed_per_io, ioc->request_sz, sz/1024); + + /* hi-priority queue */ + ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * + ioc->request_sz); + ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) * + ioc->request_sz); + + /* internal queue */ + ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth * + ioc->request_sz); + ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * + ioc->request_sz); + + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz, + (ioc->hba_queue_depth * ioc->request_sz)/1024)); + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n", + ioc->name, (unsigned long long) ioc->request_dma)); + total_sz += sz; + + sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker); + ioc->scsi_lookup_pages = get_order(sz); + ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages( + GFP_KERNEL, ioc->scsi_lookup_pages); + if (!ioc->scsi_lookup) { + pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n", + ioc->name, (int)sz); + goto out; + } + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n", + ioc->name, ioc->request, ioc->scsiio_depth)); + + ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); + sz = ioc->chain_depth * sizeof(struct chain_tracker); + ioc->chain_pages = get_order(sz); + ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( + GFP_KERNEL, ioc->chain_pages); + if (!ioc->chain_lookup) { + pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n", + ioc->name); + goto out; + } + ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, + ioc->request_sz, 16, 0); + if (!ioc->chain_dma_pool) { + pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n", + ioc->name); + goto out; + } + for (i = 0; i < ioc->chain_depth; i++) { + ioc->chain_lookup[i].chain_buffer = pci_pool_alloc( + ioc->chain_dma_pool , GFP_KERNEL, + &ioc->chain_lookup[i].chain_buffer_dma); + if (!ioc->chain_lookup[i].chain_buffer) { + ioc->chain_depth = i; + goto chain_done; + } + total_sz += ioc->request_sz; + } + chain_done: + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->name, ioc->chain_depth, ioc->request_sz, + ((ioc->chain_depth * ioc->request_sz))/1024)); + + /* initialize hi-priority queue smid's */ + ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, + sizeof(struct request_tracker), GFP_KERNEL); + if (!ioc->hpr_lookup) { + pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n", + ioc->name); + goto out; + } + ioc->hi_priority_smid = ioc->scsiio_depth + 1; + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "hi_priority(0x%p): depth(%d), start smid(%d)\n", + ioc->name, ioc->hi_priority, + ioc->hi_priority_depth, ioc->hi_priority_smid)); + + /* initialize internal queue smid's */ + ioc->internal_lookup = kcalloc(ioc->internal_depth, + sizeof(struct request_tracker), GFP_KERNEL); + if (!ioc->internal_lookup) { + pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n", + ioc->name); + goto out; + } + ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "internal(0x%p): depth(%d), start smid(%d)\n", + ioc->name, ioc->internal, + ioc->internal_depth, ioc->internal_smid)); + + /* sense buffers, 4 byte align */ + sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; + ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4, + 0); + if (!ioc->sense_dma_pool) { + pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n", + ioc->name); + goto out; + } + ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL, + &ioc->sense_dma); + if (!ioc->sense) { + pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n", + ioc->name); + goto out; + } + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "sense pool(0x%p): depth(%d), element_size(%d), pool_size" + "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth, + SCSI_SENSE_BUFFERSIZE, sz/1024)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n", + ioc->name, (unsigned long long)ioc->sense_dma)); + total_sz += sz; + + /* reply pool, 4 byte align */ + sz = ioc->reply_free_queue_depth * ioc->reply_sz; + ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4, + 0); + if (!ioc->reply_dma_pool) { + pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n", + ioc->name); + goto out; + } + ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL, + &ioc->reply_dma); + if (!ioc->reply) { + pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n", + ioc->name); + goto out; + } + ioc->reply_dma_min_address = (u32)(ioc->reply_dma); + ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->name, ioc->reply, + ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n", + ioc->name, (unsigned long long)ioc->reply_dma)); + total_sz += sz; + + /* reply free queue, 16 byte align */ + sz = ioc->reply_free_queue_depth * 4; + ioc->reply_free_dma_pool = pci_pool_create("reply_free pool", + ioc->pdev, sz, 16, 0); + if (!ioc->reply_free_dma_pool) { + pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n", + ioc->name); + goto out; + } + ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL, + &ioc->reply_free_dma); + if (!ioc->reply_free) { + pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n", + ioc->name); + goto out; + } + memset(ioc->reply_free, 0, sz); + dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \ + "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name, + ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "reply_free_dma (0x%llx)\n", + ioc->name, (unsigned long long)ioc->reply_free_dma)); + total_sz += sz; + + /* reply post queue, 16 byte align */ + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + if (_base_is_controller_msix_enabled(ioc)) + sz = reply_post_free_sz * ioc->reply_queue_count; + else + sz = reply_post_free_sz; + ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", + ioc->pdev, sz, 16, 0); + if (!ioc->reply_post_free_dma_pool) { + pr_err(MPT3SAS_FMT + "reply_post_free pool: pci_pool_create failed\n", + ioc->name); + goto out; + } + ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool , + GFP_KERNEL, &ioc->reply_post_free_dma); + if (!ioc->reply_post_free) { + pr_err(MPT3SAS_FMT + "reply_post_free pool: pci_pool_alloc failed\n", + ioc->name); + goto out; + } + memset(ioc->reply_post_free, 0, sz); + dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply post free pool" \ + "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", + ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8, + sz/1024)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "reply_post_free_dma = (0x%llx)\n", + ioc->name, (unsigned long long) + ioc->reply_post_free_dma)); + total_sz += sz; + + ioc->config_page_sz = 512; + ioc->config_page = pci_alloc_consistent(ioc->pdev, + ioc->config_page_sz, &ioc->config_page_dma); + if (!ioc->config_page) { + pr_err(MPT3SAS_FMT + "config page: pci_pool_alloc failed\n", + ioc->name); + goto out; + } + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "config page(0x%p): size(%d)\n", + ioc->name, ioc->config_page, ioc->config_page_sz)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n", + ioc->name, (unsigned long long)ioc->config_page_dma)); + total_sz += ioc->config_page_sz; + + pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n", + ioc->name, total_sz/1024); + pr_info(MPT3SAS_FMT + "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n", + ioc->name, ioc->shost->can_queue, facts->RequestCredit); + pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n", + ioc->name, ioc->shost->sg_tablesize); + return 0; + + out: + return -ENOMEM; +} + +/** + * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter. + * @ioc: Pointer to MPT_ADAPTER structure + * @cooked: Request raw or cooked IOC state + * + * Returns all IOC Doorbell register bits if cooked==0, else just the + * Doorbell bits in MPI_IOC_STATE_MASK. + */ +u32 +mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) +{ + u32 s, sc; + + s = readl(&ioc->chip->Doorbell); + sc = s & MPI2_IOC_STATE_MASK; + return cooked ? sc : s; +} + +/** + * _base_wait_on_iocstate - waiting on a particular ioc state + * @ioc_state: controller state { READY, OPERATIONAL, or RESET } + * @timeout: timeout in second + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout, + int sleep_flag) +{ + u32 count, cntdn; + u32 current_state; + + count = 0; + cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; + do { + current_state = mpt3sas_base_get_iocstate(ioc, 1); + if (current_state == ioc_state) + return 0; + if (count && current_state == MPI2_IOC_STATE_FAULT) + break; + if (sleep_flag == CAN_SLEEP) + usleep_range(1000, 1500); + else + udelay(500); + count++; + } while (--cntdn); + + return current_state; +} + +/** + * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by + * a write to the doorbell) + * @ioc: per adapter object + * @timeout: timeout in second + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + * + * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. + */ +static int +_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout, + int sleep_flag) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; + do { + int_status = readl(&ioc->chip->HostInterruptStatus); + if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, pr_info(MPT3SAS_FMT + "%s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, timeout)); + return 0; + } + if (sleep_flag == CAN_SLEEP) + usleep_range(1000, 1500); + else + udelay(500); + count++; + } while (--cntdn); + + pr_err(MPT3SAS_FMT + "%s: failed due to timeout count(%d), int_status(%x)!\n", + ioc->name, __func__, count, int_status); + return -EFAULT; +} + +/** + * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. + * @ioc: per adapter object + * @timeout: timeout in second + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + * + * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to + * doorbell. + */ +static int +_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout, + int sleep_flag) +{ + u32 cntdn, count; + u32 int_status; + u32 doorbell; + + count = 0; + cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; + do { + int_status = readl(&ioc->chip->HostInterruptStatus); + if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { + dhsprintk(ioc, pr_info(MPT3SAS_FMT + "%s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, timeout)); + return 0; + } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { + doorbell = readl(&ioc->chip->Doorbell); + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_FAULT) { + mpt3sas_base_fault_info(ioc , doorbell); + return -EFAULT; + } + } else if (int_status == 0xFFFFFFFF) + goto out; + + if (sleep_flag == CAN_SLEEP) + usleep_range(1000, 1500); + else + udelay(500); + count++; + } while (--cntdn); + + out: + pr_err(MPT3SAS_FMT + "%s: failed due to timeout count(%d), int_status(%x)!\n", + ioc->name, __func__, count, int_status); + return -EFAULT; +} + +/** + * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use + * @ioc: per adapter object + * @timeout: timeout in second + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + * + */ +static int +_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout, + int sleep_flag) +{ + u32 cntdn, count; + u32 doorbell_reg; + + count = 0; + cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; + do { + doorbell_reg = readl(&ioc->chip->Doorbell); + if (!(doorbell_reg & MPI2_DOORBELL_USED)) { + dhsprintk(ioc, pr_info(MPT3SAS_FMT + "%s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, timeout)); + return 0; + } + if (sleep_flag == CAN_SLEEP) + usleep_range(1000, 1500); + else + udelay(500); + count++; + } while (--cntdn); + + pr_err(MPT3SAS_FMT + "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n", + ioc->name, __func__, count, doorbell_reg); + return -EFAULT; +} + +/** + * _base_send_ioc_reset - send doorbell reset + * @ioc: per adapter object + * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET + * @timeout: timeout in second + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout, + int sleep_flag) +{ + u32 ioc_state; + int r = 0; + + if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { + pr_err(MPT3SAS_FMT "%s: unknown reset_type\n", + ioc->name, __func__); + return -EFAULT; + } + + if (!(ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) + return -EFAULT; + + pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name); + + writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, + &ioc->chip->Doorbell); + if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) { + r = -EFAULT; + goto out; + } + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, + timeout, sleep_flag); + if (ioc_state) { + pr_err(MPT3SAS_FMT + "%s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + r = -EFAULT; + goto out; + } + out: + pr_info(MPT3SAS_FMT "message unit reset: %s\n", + ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); + return r; +} + +/** + * _base_handshake_req_reply_wait - send request thru doorbell interface + * @ioc: per adapter object + * @request_bytes: request length + * @request: pointer having request payload + * @reply_bytes: reply length + * @reply: pointer to reply payload + * @timeout: timeout in second + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, + u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag) +{ + MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; + int i; + u8 failed; + u16 dummy; + __le32 *mfp; + + /* make sure doorbell is not in use */ + if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { + pr_err(MPT3SAS_FMT + "doorbell is in use (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + + /* clear pending doorbell interrupts from previous state changes */ + if (readl(&ioc->chip->HostInterruptStatus) & + MPI2_HIS_IOC2SYS_DB_STATUS) + writel(0, &ioc->chip->HostInterruptStatus); + + /* send message to ioc */ + writel(((MPI2_FUNCTION_HANDSHAKE<chip->Doorbell); + + if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) { + pr_err(MPT3SAS_FMT + "doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + writel(0, &ioc->chip->HostInterruptStatus); + + if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) { + pr_err(MPT3SAS_FMT + "doorbell handshake ack failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + + /* send message 32-bits at a time */ + for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { + writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); + if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) + failed = 1; + } + + if (failed) { + pr_err(MPT3SAS_FMT + "doorbell handshake sending request failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + + /* now wait for the reply */ + if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) { + pr_err(MPT3SAS_FMT + "doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + + /* read the first two 16-bits, it gives the total length of the reply */ + reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) { + pr_err(MPT3SAS_FMT + "doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + + for (i = 2; i < default_reply->MsgLength * 2; i++) { + if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) { + pr_err(MPT3SAS_FMT + "doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + if (i >= reply_bytes/2) /* overflow case */ + dummy = readl(&ioc->chip->Doorbell); + else + reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + } + + _base_wait_for_doorbell_int(ioc, 5, sleep_flag); + if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) { + dhsprintk(ioc, pr_info(MPT3SAS_FMT + "doorbell is in use (line=%d)\n", ioc->name, __LINE__)); + } + writel(0, &ioc->chip->HostInterruptStatus); + + if (ioc->logging_level & MPT_DEBUG_INIT) { + mfp = (__le32 *)reply; + pr_info("\toffset:data\n"); + for (i = 0; i < reply_bytes/4; i++) + pr_info("\t[0x%02x]:%08x\n", i*4, + le32_to_cpu(mfp[i])); + } + return 0; +} + +/** + * mpt3sas_base_sas_iounit_control - send sas iounit control to FW + * @ioc: per adapter object + * @mpi_reply: the reply payload from FW + * @mpi_request: the request payload sent to FW + * + * The SAS IO Unit Control Request message allows the host to perform low-level + * operations, such as resets on the PHYs of the IO Unit, also allows the host + * to obtain the IOC assigned device handles for a device if it has other + * identifying information about the device, in addition allows the host to + * remove IOC resources associated with the device. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, + Mpi2SasIoUnitControlReply_t *mpi_reply, + Mpi2SasIoUnitControlRequest_t *mpi_request) +{ + u16 smid; + u32 ioc_state; + unsigned long timeleft; + u8 issue_reset; + int rc; + void *request; + u16 wait_state_count; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + mutex_lock(&ioc->base_cmds.mutex); + + if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->base_cmds.status = MPT3_CMD_PENDING; + request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)); + if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) + ioc->ioc_link_reset_in_progress = 1; + init_completion(&ioc->base_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && + ioc->ioc_link_reset_in_progress) + ioc->ioc_link_reset_in_progress = 0; + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SasIoUnitControlRequest_t)/4); + if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(Mpi2SasIoUnitControlReply_t)); + else + memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t)); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + goto out; + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + rc = -EFAULT; + out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +/** + * mpt3sas_base_scsi_enclosure_processor - sending request to sep device + * @ioc: per adapter object + * @mpi_reply: the reply payload from FW + * @mpi_request: the request payload sent to FW + * + * The SCSI Enclosure Processor request message causes the IOC to + * communicate with SES devices to control LED status signals. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, + Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) +{ + u16 smid; + u32 ioc_state; + unsigned long timeleft; + u8 issue_reset; + int rc; + void *request; + u16 wait_state_count; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + mutex_lock(&ioc->base_cmds.mutex); + + if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, + __func__, wait_state_count); + } + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->base_cmds.status = MPT3_CMD_PENDING; + request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); + init_completion(&ioc->base_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SepRequest_t)/4); + if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(Mpi2SepReply_t)); + else + memset(mpi_reply, 0, sizeof(Mpi2SepReply_t)); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + goto out; + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + rc = -EFAULT; + out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +/** + * _base_get_port_facts - obtain port facts reply and save in ioc + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag) +{ + Mpi2PortFactsRequest_t mpi_request; + Mpi2PortFactsReply_t mpi_reply; + struct mpt3sas_port_facts *pfacts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); + mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = MPI2_FUNCTION_PORT_FACTS; + mpi_request.PortNumber = port; + r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP); + + if (r != 0) { + pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + + pfacts = &ioc->pfacts[port]; + memset(pfacts, 0, sizeof(struct mpt3sas_port_facts)); + pfacts->PortNumber = mpi_reply.PortNumber; + pfacts->VP_ID = mpi_reply.VP_ID; + pfacts->VF_ID = mpi_reply.VF_ID; + pfacts->MaxPostedCmdBuffers = + le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); + + return 0; +} + +/** + * _base_get_ioc_facts - obtain ioc facts reply and save in ioc + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + Mpi2IOCFactsRequest_t mpi_request; + Mpi2IOCFactsReply_t mpi_reply; + struct mpt3sas_facts *facts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); + mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; + r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP); + + if (r != 0) { + pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + + facts = &ioc->facts; + memset(facts, 0, sizeof(struct mpt3sas_facts)); + facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); + facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); + facts->VP_ID = mpi_reply.VP_ID; + facts->VF_ID = mpi_reply.VF_ID; + facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); + facts->MaxChainDepth = mpi_reply.MaxChainDepth; + facts->WhoInit = mpi_reply.WhoInit; + facts->NumberOfPorts = mpi_reply.NumberOfPorts; + facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; + facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); + facts->MaxReplyDescriptorPostQueueDepth = + le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); + facts->ProductID = le16_to_cpu(mpi_reply.ProductID); + facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); + if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) + ioc->ir_firmware = 1; + facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); + facts->IOCRequestFrameSize = + le16_to_cpu(mpi_reply.IOCRequestFrameSize); + facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); + facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); + ioc->shost->max_id = -1; + facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); + facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); + facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); + facts->HighPriorityCredit = + le16_to_cpu(mpi_reply.HighPriorityCredit); + facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; + facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); + + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "hba queue depth(%d), max chains per io(%d)\n", + ioc->name, facts->RequestCredit, + facts->MaxChainDepth)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "request frame size(%d), reply frame size(%d)\n", ioc->name, + facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4)); + return 0; +} + +/** + * _base_send_ioc_init - send ioc_init to firmware + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + Mpi2IOCInitRequest_t mpi_request; + Mpi2IOCInitReply_t mpi_reply; + int r; + struct timeval current_time; + u16 ioc_status; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); + mpi_request.Function = MPI2_FUNCTION_IOC_INIT; + mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; + mpi_request.VF_ID = 0; /* TODO */ + mpi_request.VP_ID = 0; + mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION); + mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); + + if (_base_is_controller_msix_enabled(ioc)) + mpi_request.HostMSIxVectors = ioc->reply_queue_count; + mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); + mpi_request.ReplyDescriptorPostQueueDepth = + cpu_to_le16(ioc->reply_post_queue_depth); + mpi_request.ReplyFreeQueueDepth = + cpu_to_le16(ioc->reply_free_queue_depth); + + mpi_request.SenseBufferAddressHigh = + cpu_to_le32((u64)ioc->sense_dma >> 32); + mpi_request.SystemReplyAddressHigh = + cpu_to_le32((u64)ioc->reply_dma >> 32); + mpi_request.SystemRequestFrameBaseAddress = + cpu_to_le64((u64)ioc->request_dma); + mpi_request.ReplyFreeQueueAddress = + cpu_to_le64((u64)ioc->reply_free_dma); + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64)ioc->reply_post_free_dma); + + + /* This time stamp specifies number of milliseconds + * since epoch ~ midnight January 1, 1970. + */ + do_gettimeofday(¤t_time); + mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 + + (current_time.tv_usec / 1000)); + + if (ioc->logging_level & MPT_DEBUG_INIT) { + __le32 *mfp; + int i; + + mfp = (__le32 *)&mpi_request; + pr_info("\toffset:data\n"); + for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) + pr_info("\t[0x%02x]:%08x\n", i*4, + le32_to_cpu(mfp[i])); + } + + r = _base_handshake_req_reply_wait(ioc, + sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, + sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10, + sleep_flag); + + if (r != 0) { + pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS || + mpi_reply.IOCLogInfo) { + pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__); + r = -EIO; + } + + return 0; +} + +/** + * mpt3sas_port_enable_done - command completion routine for port enable + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + u16 ioc_status; + + if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED) + return 1; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + + if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE) + return 1; + + ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING; + ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE; + ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID; + memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + ioc->port_enable_failed = 1; + + if (ioc->is_driver_loading) { + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + mpt3sas_port_enable_complete(ioc); + return 1; + } else { + ioc->start_scan_failed = ioc_status; + ioc->start_scan = 0; + return 1; + } + } + complete(&ioc->port_enable_cmds.done); + return 1; +} + +/** + * _base_send_port_enable - send port_enable(discovery stuff) to firmware + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + Mpi2PortEnableRequest_t *mpi_request; + Mpi2PortEnableReply_t *mpi_reply; + unsigned long timeleft; + int r = 0; + u16 smid; + u16 ioc_status; + + pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); + + if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { + pr_err(MPT3SAS_FMT "%s: internal command already in use\n", + ioc->name, __func__); + return -EAGAIN; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + + ioc->port_enable_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); + mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; + + init_completion(&ioc->port_enable_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done, + 300*HZ); + if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2PortEnableRequest_t)/4); + if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) + r = -EFAULT; + else + r = -ETIME; + goto out; + } + + mpi_reply = ioc->port_enable_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n", + ioc->name, __func__, ioc_status); + r = -EFAULT; + goto out; + } + + out: + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ? + "SUCCESS" : "FAILED")); + return r; +} + +/** + * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) + * @ioc: per adapter object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2PortEnableRequest_t *mpi_request; + u16 smid; + + pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); + + if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { + pr_err(MPT3SAS_FMT "%s: internal command already in use\n", + ioc->name, __func__); + return -EAGAIN; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + + ioc->port_enable_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); + mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; + + mpt3sas_base_put_smid_default(ioc, smid); + return 0; +} + +/** + * _base_determine_wait_on_discovery - desposition + * @ioc: per adapter object + * + * Decide whether to wait on discovery to complete. Used to either + * locate boot device, or report volumes ahead of physical devices. + * + * Returns 1 for wait, 0 for don't wait + */ +static int +_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) +{ + /* We wait for discovery to complete if IR firmware is loaded. + * The sas topology events arrive before PD events, so we need time to + * turn on the bit in ioc->pd_handles to indicate PD + * Also, it maybe required to report Volumes ahead of physical + * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set. + */ + if (ioc->ir_firmware) + return 1; + + /* if no Bios, then we don't need to wait */ + if (!ioc->bios_pg3.BiosVersion) + return 0; + + /* Bios is present, then we drop down here. + * + * If there any entries in the Bios Page 2, then we wait + * for discovery to complete. + */ + + /* Current Boot Device */ + if ((ioc->bios_pg2.CurrentBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + /* Request Boot Device */ + (ioc->bios_pg2.ReqBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + /* Alternate Request Boot Device */ + (ioc->bios_pg2.ReqAltBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) + return 0; + + return 1; +} + +/** + * _base_unmask_events - turn on notification for this event + * @ioc: per adapter object + * @event: firmware event + * + * The mask is stored in ioc->event_masks. + */ +static void +_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) +{ + u32 desired_event; + + if (event >= 128) + return; + + desired_event = (1 << (event % 32)); + + if (event < 32) + ioc->event_masks[0] &= ~desired_event; + else if (event < 64) + ioc->event_masks[1] &= ~desired_event; + else if (event < 96) + ioc->event_masks[2] &= ~desired_event; + else if (event < 128) + ioc->event_masks[3] &= ~desired_event; +} + +/** + * _base_event_notification - send event notification + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + Mpi2EventNotificationRequest_t *mpi_request; + unsigned long timeleft; + u16 smid; + int r = 0; + int i; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + if (ioc->base_cmds.status & MPT3_CMD_PENDING) { + pr_err(MPT3SAS_FMT "%s: internal command already in use\n", + ioc->name, __func__); + return -EAGAIN; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + ioc->base_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t)); + mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + mpi_request->EventMasks[i] = + cpu_to_le32(ioc->event_masks[i]); + init_completion(&ioc->base_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2EventNotificationRequest_t)/4); + if (ioc->base_cmds.status & MPT3_CMD_RESET) + r = -EFAULT; + else + r = -ETIME; + } else + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n", + ioc->name, __func__)); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + return r; +} + +/** + * mpt3sas_base_validate_event_type - validating event types + * @ioc: per adapter object + * @event: firmware event + * + * This will turn on firmware event notification when application + * ask for that event. We don't mask events that are already enabled. + */ +void +mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type) +{ + int i, j; + u32 event_mask, desired_event; + u8 send_update_to_fw; + + for (i = 0, send_update_to_fw = 0; i < + MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { + event_mask = ~event_type[i]; + desired_event = 1; + for (j = 0; j < 32; j++) { + if (!(event_mask & desired_event) && + (ioc->event_masks[i] & desired_event)) { + ioc->event_masks[i] &= ~desired_event; + send_update_to_fw = 1; + } + desired_event = (desired_event << 1); + } + } + + if (!send_update_to_fw) + return; + + mutex_lock(&ioc->base_cmds.mutex); + _base_event_notification(ioc, CAN_SLEEP); + mutex_unlock(&ioc->base_cmds.mutex); +} + +/** + * _base_diag_reset - the "big hammer" start of day reset + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + u32 host_diagnostic; + u32 ioc_state; + u32 count; + u32 hcb_size; + + pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name); + + drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n", + ioc->name)); + + count = 0; + do { + /* Write magic sequence to WriteSequence register + * Loop until in diagnostic mode + */ + drsprintk(ioc, pr_info(MPT3SAS_FMT + "write magic sequence\n", ioc->name)); + writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); + + /* wait 100 msec */ + if (sleep_flag == CAN_SLEEP) + msleep(100); + else + mdelay(100); + + if (count++ > 20) + goto out; + + host_diagnostic = readl(&ioc->chip->HostDiagnostic); + drsprintk(ioc, pr_info(MPT3SAS_FMT + "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", + ioc->name, count, host_diagnostic)); + + } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); + + hcb_size = readl(&ioc->chip->HCBSize); + + drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n", + ioc->name)); + writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, + &ioc->chip->HostDiagnostic); + + /* don't access any registers for 50 milliseconds */ + msleep(50); + + /* 300 second max wait */ + for (count = 0; count < 3000000 ; count++) { + + host_diagnostic = readl(&ioc->chip->HostDiagnostic); + + if (host_diagnostic == 0xFFFFFFFF) + goto out; + if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) + break; + + /* wait 1 msec */ + if (sleep_flag == CAN_SLEEP) + usleep_range(1000, 1500); + else + mdelay(1); + } + + if (host_diagnostic & MPI2_DIAG_HCB_MODE) { + + drsprintk(ioc, pr_info(MPT3SAS_FMT + "restart the adapter assuming the HCB Address points to good F/W\n", + ioc->name)); + host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; + host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; + writel(host_diagnostic, &ioc->chip->HostDiagnostic); + + drsprintk(ioc, pr_info(MPT3SAS_FMT + "re-enable the HCDW\n", ioc->name)); + writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, + &ioc->chip->HCBSize); + } + + drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n", + ioc->name)); + writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, + &ioc->chip->HostDiagnostic); + + drsprintk(ioc, pr_info(MPT3SAS_FMT + "disable writes to the diagnostic register\n", ioc->name)); + writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); + + drsprintk(ioc, pr_info(MPT3SAS_FMT + "Wait for FW to go to the READY state\n", ioc->name)); + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20, + sleep_flag); + if (ioc_state) { + pr_err(MPT3SAS_FMT + "%s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + goto out; + } + + pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name); + return 0; + + out: + pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name); + return -EFAULT; +} + +/** + * _base_make_ioc_ready - put controller in READY state + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * @type: FORCE_BIG_HAMMER or SOFT_RESET + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, + enum reset_type type) +{ + u32 ioc_state; + int rc; + int count; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + if (ioc->pci_error_recovery) + return 0; + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + + /* if in RESET state, it should move to READY state shortly */ + count = 0; + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) { + while ((ioc_state & MPI2_IOC_STATE_MASK) != + MPI2_IOC_STATE_READY) { + if (count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + return -EFAULT; + } + if (sleep_flag == CAN_SLEEP) + ssleep(1); + else + mdelay(1000); + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + } + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) + return 0; + + if (ioc_state & MPI2_DOORBELL_USED) { + dhsprintk(ioc, pr_info(MPT3SAS_FMT + "unexpected doorbell active!\n", + ioc->name)); + goto issue_diag_reset; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_base_fault_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } + + if (type == FORCE_BIG_HAMMER) + goto issue_diag_reset; + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) + if (!(_base_send_ioc_reset(ioc, + MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) { + return 0; + } + + issue_diag_reset: + rc = _base_diag_reset(ioc, CAN_SLEEP); + return rc; +} + +/** + * _base_make_ioc_operational - put controller in OPERATIONAL state + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + int r, i; + unsigned long flags; + u32 reply_address; + u16 smid; + struct _tr_list *delayed_tr, *delayed_tr_next; + struct adapter_reply_queue *reply_q; + long reply_post_free; + u32 reply_post_free_sz; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + /* clean the delayed target reset list */ + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + + + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_volume_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + + /* initialize the scsi lookup free list */ + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + INIT_LIST_HEAD(&ioc->free_list); + smid = 1; + for (i = 0; i < ioc->scsiio_depth; i++, smid++) { + INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list); + ioc->scsi_lookup[i].cb_idx = 0xFF; + ioc->scsi_lookup[i].smid = smid; + ioc->scsi_lookup[i].scmd = NULL; + list_add_tail(&ioc->scsi_lookup[i].tracker_list, + &ioc->free_list); + } + + /* hi-priority queue */ + INIT_LIST_HEAD(&ioc->hpr_free_list); + smid = ioc->hi_priority_smid; + for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { + ioc->hpr_lookup[i].cb_idx = 0xFF; + ioc->hpr_lookup[i].smid = smid; + list_add_tail(&ioc->hpr_lookup[i].tracker_list, + &ioc->hpr_free_list); + } + + /* internal queue */ + INIT_LIST_HEAD(&ioc->internal_free_list); + smid = ioc->internal_smid; + for (i = 0; i < ioc->internal_depth; i++, smid++) { + ioc->internal_lookup[i].cb_idx = 0xFF; + ioc->internal_lookup[i].smid = smid; + list_add_tail(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + + /* chain pool */ + INIT_LIST_HEAD(&ioc->free_chain_list); + for (i = 0; i < ioc->chain_depth; i++) + list_add_tail(&ioc->chain_lookup[i].tracker_list, + &ioc->free_chain_list); + + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + /* initialize Reply Free Queue */ + for (i = 0, reply_address = (u32)ioc->reply_dma ; + i < ioc->reply_free_queue_depth ; i++, reply_address += + ioc->reply_sz) + ioc->reply_free[i] = cpu_to_le32(reply_address); + + /* initialize reply queues */ + if (ioc->is_driver_loading) + _base_assign_reply_queues(ioc); + + /* initialize Reply Post Free Queue */ + reply_post_free = (long)ioc->reply_post_free; + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + reply_q->reply_post_host_index = 0; + reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) + reply_post_free; + for (i = 0; i < ioc->reply_post_queue_depth; i++) + reply_q->reply_post_free[i].Words = + cpu_to_le64(ULLONG_MAX); + if (!_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_free_queue; + reply_post_free += reply_post_free_sz; + } + skip_init_reply_post_free_queue: + + r = _base_send_ioc_init(ioc, sleep_flag); + if (r) + return r; + + /* initialize reply free host index */ + ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; + writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); + + /* initialize reply post host index */ + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT, + &ioc->chip->ReplyPostHostIndex); + if (!_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_host_index; + } + + skip_init_reply_post_host_index: + + _base_unmask_interrupts(ioc); + r = _base_event_notification(ioc, sleep_flag); + if (r) + return r; + + if (sleep_flag == CAN_SLEEP) + _base_static_config_pages(ioc); + + + if (ioc->is_driver_loading) { + ioc->wait_for_discovery_to_complete = + _base_determine_wait_on_discovery(ioc); + + return r; /* scan_start and scan_finished support */ + } + + r = _base_send_port_enable(ioc, sleep_flag); + if (r) + return r; + + return r; +} + +/** + * mpt3sas_base_free_resources - free resources controller resources + * @ioc: per adapter object + * + * Return nothing. + */ +void +mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + + dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + _base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); + ioc->shost_recovery = 0; + _base_free_irq(ioc); + _base_disable_msix(ioc); + if (ioc->chip_phys) + iounmap(ioc->chip); + ioc->chip_phys = 0; + pci_release_selected_regions(ioc->pdev, ioc->bars); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + return; +} + +/** + * mpt3sas_base_attach - attach controller instance + * @ioc: per adapter object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) +{ + int r, i; + int cpu_id, last_cpu_id = 0; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + /* setup cpu_msix_table */ + ioc->cpu_count = num_online_cpus(); + for_each_online_cpu(cpu_id) + last_cpu_id = cpu_id; + ioc->cpu_msix_table_sz = last_cpu_id + 1; + ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); + ioc->reply_queue_count = 1; + if (!ioc->cpu_msix_table) { + dfailprintk(ioc, pr_info(MPT3SAS_FMT + "allocation for cpu_msix_table failed!!!\n", + ioc->name)); + r = -ENOMEM; + goto out_free_resources; + } + + r = mpt3sas_base_map_resources(ioc); + if (r) + goto out_free_resources; + + + pci_set_drvdata(ioc->pdev, ioc->shost); + r = _base_get_ioc_facts(ioc, CAN_SLEEP); + if (r) + goto out_free_resources; + + /* + * In SAS3.0, + * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and + * Target Status - all require the IEEE formated scatter gather + * elements. + */ + + ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; + ioc->build_sg = &_base_build_sg_ieee; + ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; + ioc->mpi25 = 1; + ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); + + /* + * These function pointers for other requests that don't + * the require IEEE scatter gather elements. + * + * For example Configuration Pages and SAS IOUNIT Control don't. + */ + ioc->build_sg_mpi = &_base_build_sg; + ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; + + r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); + if (r) + goto out_free_resources; + + ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, + sizeof(struct mpt3sas_port_facts), GFP_KERNEL); + if (!ioc->pfacts) { + r = -ENOMEM; + goto out_free_resources; + } + + for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { + r = _base_get_port_facts(ioc, i, CAN_SLEEP); + if (r) + goto out_free_resources; + } + + r = _base_allocate_memory_pools(ioc, CAN_SLEEP); + if (r) + goto out_free_resources; + + init_waitqueue_head(&ioc->reset_wq); + + /* allocate memory pd handle bitmask list */ + ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + ioc->pd_handles_sz++; + ioc->pd_handles = kzalloc(ioc->pd_handles_sz, + GFP_KERNEL); + if (!ioc->pd_handles) { + r = -ENOMEM; + goto out_free_resources; + } + ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, + GFP_KERNEL); + if (!ioc->blocking_handles) { + r = -ENOMEM; + goto out_free_resources; + } + + ioc->fwfault_debug = mpt3sas_fwfault_debug; + + /* base internal command bits */ + mutex_init(&ioc->base_cmds.mutex); + ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + + /* port_enable command bits */ + ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + + /* transport internal command bits */ + ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->transport_cmds.mutex); + + /* scsih internal command bits */ + ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->scsih_cmds.mutex); + + /* task management internal command bits */ + ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->tm_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->tm_cmds.mutex); + + /* config page internal command bits */ + ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->config_cmds.mutex); + + /* ctl module internal command bits */ + ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->ctl_cmds.mutex); + + if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply || + !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply || + !ioc->config_cmds.reply || !ioc->ctl_cmds.reply || + !ioc->ctl_cmds.sense) { + r = -ENOMEM; + goto out_free_resources; + } + + for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + ioc->event_masks[i] = -1; + + /* here we enable the events we care about */ + _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY); + _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); + _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); + _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); + _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); + _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); + _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); + _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); + _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); + _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); + + r = _base_make_ioc_operational(ioc, CAN_SLEEP); + if (r) + goto out_free_resources; + + return 0; + + out_free_resources: + + ioc->remove_host = 1; + + mpt3sas_base_free_resources(ioc); + _base_release_memory_pools(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->pfacts); + ioc->ctl_cmds.reply = NULL; + ioc->base_cmds.reply = NULL; + ioc->tm_cmds.reply = NULL; + ioc->scsih_cmds.reply = NULL; + ioc->transport_cmds.reply = NULL; + ioc->config_cmds.reply = NULL; + ioc->pfacts = NULL; + return r; +} + + +/** + * mpt3sas_base_detach - remove controller instance + * @ioc: per adapter object + * + * Return nothing. + */ +void +mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) +{ + dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_free_resources(ioc); + _base_release_memory_pools(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->pfacts); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); +} + +/** + * _base_reset_handler - reset callback handler (for base) + * @ioc: per adapter object + * @reset_phase: phase + * + * The handler for doing any required cleanup or initialization. + * + * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, + * MPT3_IOC_DONE_RESET + * + * Return nothing. + */ +static void +_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) +{ + mpt3sas_scsih_reset_handler(ioc, reset_phase); + mpt3sas_ctl_reset_handler(ioc, reset_phase); + switch (reset_phase) { + case MPT3_IOC_PRE_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); + break; + case MPT3_IOC_AFTER_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); + if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { + ioc->transport_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); + complete(&ioc->transport_cmds.done); + } + if (ioc->base_cmds.status & MPT3_CMD_PENDING) { + ioc->base_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid); + complete(&ioc->base_cmds.done); + } + if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { + ioc->port_enable_failed = 1; + ioc->port_enable_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); + if (ioc->is_driver_loading) { + ioc->start_scan_failed = + MPI2_IOCSTATUS_INTERNAL_ERROR; + ioc->start_scan = 0; + ioc->port_enable_cmds.status = + MPT3_CMD_NOT_USED; + } else + complete(&ioc->port_enable_cmds.done); + } + if (ioc->config_cmds.status & MPT3_CMD_PENDING) { + ioc->config_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); + ioc->config_cmds.smid = USHRT_MAX; + complete(&ioc->config_cmds.done); + } + break; + case MPT3_IOC_DONE_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); + break; + } +} + +/** + * _wait_for_commands_to_complete - reset controller + * @ioc: Pointer to MPT_ADAPTER structure + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * This function waiting(3s) for all pending commands to complete + * prior to putting controller in reset. + */ +static void +_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + u32 ioc_state; + unsigned long flags; + u16 i; + + ioc->pending_io_count = 0; + if (sleep_flag != CAN_SLEEP) + return; + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) + return; + + /* pending command count */ + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + for (i = 0; i < ioc->scsiio_depth; i++) + if (ioc->scsi_lookup[i].cb_idx != 0xFF) + ioc->pending_io_count++; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + if (!ioc->pending_io_count) + return; + + /* wait for pending commands to complete */ + wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); +} + +/** + * mpt3sas_base_hard_reset_handler - reset controller + * @ioc: Pointer to MPT_ADAPTER structure + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * @type: FORCE_BIG_HAMMER or SOFT_RESET + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, + enum reset_type type) +{ + int r; + unsigned long flags; + u32 ioc_state; + u8 is_fault = 0, is_trigger = 0; + + dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, + __func__)); + + if (ioc->pci_error_recovery) { + pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n", + ioc->name, __func__); + r = 0; + goto out_unlocked; + } + + if (mpt3sas_fwfault_debug) + mpt3sas_halt_firmware(ioc); + + /* TODO - What we really should be doing is pulling + * out all the code associated with NO_SLEEP; its never used. + * That is legacy code from mpt fusion driver, ported over. + * I will leave this BUG_ON here for now till its been resolved. + */ + BUG_ON(sleep_flag == NO_SLEEP); + + /* wait for an active reset in progress to complete */ + if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { + do { + ssleep(1); + } while (ioc->shost_recovery == 1); + dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); + return ioc->ioc_reset_in_progress_status; + } + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) && + (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED))) { + is_trigger = 1; + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) + is_fault = 1; + } + _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); + _wait_for_commands_to_complete(ioc, sleep_flag); + _base_mask_interrupts(ioc); + r = _base_make_ioc_ready(ioc, sleep_flag, type); + if (r) + goto out; + _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET); + + /* If this hard reset is called while port enable is active, then + * there is no reason to call make_ioc_operational + */ + if (ioc->is_driver_loading && ioc->port_enable_failed) { + ioc->remove_host = 1; + r = -EFAULT; + goto out; + } + r = _base_get_ioc_facts(ioc, CAN_SLEEP); + if (r) + goto out; + r = _base_make_ioc_operational(ioc, sleep_flag); + if (!r) + _base_reset_handler(ioc, MPT3_IOC_DONE_RESET); + + out: + dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n", + ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->ioc_reset_in_progress_status = r; + ioc->shost_recovery = 0; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + ioc->ioc_reset_count++; + mutex_unlock(&ioc->reset_in_progress_mutex); + + out_unlocked: + if ((r == 0) && is_trigger) { + if (is_fault) + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT); + else + mpt3sas_trigger_master(ioc, + MASTER_TRIGGER_ADAPTER_RESET); + } + dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); + return r; +} diff --git a/addons/mpt3sas/src/3.10.108/mpt3sas_base.h b/addons/mpt3sas/src/3.10.108/mpt3sas_base.h new file mode 100644 index 00000000..997e13f6 --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpt3sas_base.h @@ -0,0 +1,1151 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h + * Copyright (C) 2012 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef MPT3SAS_BASE_H_INCLUDED +#define MPT3SAS_BASE_H_INCLUDED + +#include "mpi/mpi2_type.h" +#include "mpi/mpi2.h" +#include "mpi/mpi2_ioc.h" +#include "mpi/mpi2_cnfg.h" +#include "mpi/mpi2_init.h" +#include "mpi/mpi2_raid.h" +#include "mpi/mpi2_tool.h" +#include "mpi/mpi2_sas.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mpt3sas_debug.h" +#include "mpt3sas_trigger_diag.h" + +/* driver versioning info */ +#define MPT3SAS_DRIVER_NAME "mpt3sas" +#define MPT3SAS_AUTHOR "LSI Corporation " +#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" +#define MPT3SAS_DRIVER_VERSION "01.100.01.00" +#define MPT3SAS_MAJOR_VERSION 1 +#define MPT3SAS_MINOR_VERSION 100 +#define MPT3SAS_BUILD_VERSION 1 +#define MPT3SAS_RELEASE_VERSION 00 + +/* + * Set MPT3SAS_SG_DEPTH value based on user input. + */ +#define MPT3SAS_MAX_PHYS_SEGMENTS SCSI_MAX_SG_SEGMENTS +#define MPT3SAS_MIN_PHYS_SEGMENTS 16 +#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE +#define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE +#else +#define MPT3SAS_SG_DEPTH MPT3SAS_MAX_PHYS_SEGMENTS +#endif + + +/* + * Generic Defines + */ +#define MPT3SAS_SATA_QUEUE_DEPTH 32 +#define MPT3SAS_SAS_QUEUE_DEPTH 254 +#define MPT3SAS_RAID_QUEUE_DEPTH 128 + +#define MPT_NAME_LENGTH 32 /* generic length of strings */ +#define MPT_STRING_LENGTH 64 + +#define MPT_MAX_CALLBACKS 32 + + +#define CAN_SLEEP 1 +#define NO_SLEEP 0 + +#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */ + +#define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/ + +#define MPT3SAS_INVALID_DEVICE_HANDLE 0xFFFF + +/* + * reset phases + */ +#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */ +#define MPT3_IOC_AFTER_RESET 2 /* just after host reset */ +#define MPT3_IOC_DONE_RESET 3 /* links re-initialized */ + +/* + * logging format + */ +#define MPT3SAS_FMT "%s: " + +/* + * per target private data + */ +#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01 +#define MPT_TARGET_FLAGS_VOLUME 0x02 +#define MPT_TARGET_FLAGS_DELETED 0x04 +#define MPT_TARGET_FASTPATH_IO 0x08 + + + +/* + * status bits for ioc->diag_buffer_status + */ +#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01) +#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02) +#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04) + + +/* OEM Identifiers */ +#define MFG10_OEM_ID_INVALID (0x00000000) +#define MFG10_OEM_ID_DELL (0x00000001) +#define MFG10_OEM_ID_FSC (0x00000002) +#define MFG10_OEM_ID_SUN (0x00000003) +#define MFG10_OEM_ID_IBM (0x00000004) + +/* GENERIC Flags 0*/ +#define MFG10_GF0_OCE_DISABLED (0x00000001) +#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002) +#define MFG10_GF0_R10_DISPLAY (0x00000004) +#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008) +#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010) + +/* OEM Specific Flags will come from OEM specific header files */ +struct Mpi2ManufacturingPage10_t { + MPI2_CONFIG_PAGE_HEADER Header; /* 00h */ + U8 OEMIdentifier; /* 04h */ + U8 Reserved1; /* 05h */ + U16 Reserved2; /* 08h */ + U32 Reserved3; /* 0Ch */ + U32 GenericFlags0; /* 10h */ + U32 GenericFlags1; /* 14h */ + U32 Reserved4; /* 18h */ + U32 OEMSpecificFlags0; /* 1Ch */ + U32 OEMSpecificFlags1; /* 20h */ + U32 Reserved5[18]; /* 24h - 60h*/ +}; + + +/* Miscellaneous options */ +struct Mpi2ManufacturingPage11_t { + MPI2_CONFIG_PAGE_HEADER Header; /* 00h */ + __le32 Reserved1; /* 04h */ + u8 Reserved2; /* 08h */ + u8 EEDPTagMode; /* 09h */ + u8 Reserved3; /* 0Ah */ + u8 Reserved4; /* 0Bh */ + __le32 Reserved5[23]; /* 0Ch-60h*/ +}; + +/** + * struct MPT3SAS_TARGET - starget private hostdata + * @starget: starget object + * @sas_address: target sas address + * @handle: device handle + * @num_luns: number luns + * @flags: MPT_TARGET_FLAGS_XXX flags + * @deleted: target flaged for deletion + * @tm_busy: target is busy with TM request. + */ +struct MPT3SAS_TARGET { + struct scsi_target *starget; + u64 sas_address; + u16 handle; + int num_luns; + u32 flags; + u8 deleted; + u8 tm_busy; +}; + + +/* + * per device private data + */ +#define MPT_DEVICE_FLAGS_INIT 0x01 +#define MPT_DEVICE_TLR_ON 0x02 + +/** + * struct MPT3SAS_DEVICE - sdev private hostdata + * @sas_target: starget private hostdata + * @lun: lun number + * @flags: MPT_DEVICE_XXX flags + * @configured_lun: lun is configured + * @block: device is in SDEV_BLOCK state + * @tlr_snoop_check: flag used in determining whether to disable TLR + * @eedp_enable: eedp support enable bit + * @eedp_type: 0(type_1), 1(type_2), 2(type_3) + * @eedp_block_length: block size + * @ata_command_pending: SATL passthrough outstanding for device + */ +struct MPT3SAS_DEVICE { + struct MPT3SAS_TARGET *sas_target; + unsigned int lun; + u32 flags; + u8 configured_lun; + u8 block; + u8 tlr_snoop_check; + /* + * Bug workaround for SATL handling: the mpt2/3sas firmware + * doesn't return BUSY or TASK_SET_FULL for subsequent + * commands while a SATL pass through is in operation as the + * spec requires, it simply does nothing with them until the + * pass through completes, causing them possibly to timeout if + * the passthrough is a long executing command (like format or + * secure erase). This variable allows us to do the right + * thing while a SATL command is pending. + */ + unsigned long ata_command_pending; +}; + +#define MPT3_CMD_NOT_USED 0x8000 /* free */ +#define MPT3_CMD_COMPLETE 0x0001 /* completed */ +#define MPT3_CMD_PENDING 0x0002 /* pending */ +#define MPT3_CMD_REPLY_VALID 0x0004 /* reply is valid */ +#define MPT3_CMD_RESET 0x0008 /* host reset dropped the command */ + +/** + * struct _internal_cmd - internal commands struct + * @mutex: mutex + * @done: completion + * @reply: reply message pointer + * @sense: sense data + * @status: MPT3_CMD_XXX status + * @smid: system message id + */ +struct _internal_cmd { + struct mutex mutex; + struct completion done; + void *reply; + void *sense; + u16 status; + u16 smid; +}; + + + +/** + * struct _sas_device - attached device information + * @list: sas device list + * @starget: starget object + * @sas_address: device sas address + * @device_name: retrieved from the SAS IDENTIFY frame. + * @handle: device handle + * @sas_address_parent: sas address of parent expander or sas host + * @enclosure_handle: enclosure handle + * @enclosure_logical_id: enclosure logical identifier + * @volume_handle: volume handle (valid when hidden raid member) + * @volume_wwid: volume unique identifier + * @device_info: bitfield provides detailed info about the device + * @id: target id + * @channel: target channel + * @slot: number number + * @phy: phy identifier provided in sas device page 0 + * @fast_path: fast path feature enable bit + * @responding: used in _scsih_sas_device_mark_responding + */ +struct _sas_device { + struct list_head list; + struct scsi_target *starget; + u64 sas_address; + u64 device_name; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u16 volume_handle; + u64 volume_wwid; + u32 device_info; + int id; + int channel; + u16 slot; + u8 phy; + u8 responding; + u8 fast_path; +}; + +/** + * struct _raid_device - raid volume link list + * @list: sas device list + * @starget: starget object + * @sdev: scsi device struct (volumes are single lun) + * @wwid: unique identifier for the volume + * @handle: device handle + * @id: target id + * @channel: target channel + * @volume_type: the raid level + * @device_info: bitfield provides detailed info about the hidden components + * @num_pds: number of hidden raid components + * @responding: used in _scsih_raid_device_mark_responding + * @percent_complete: resync percent complete + */ +#define MPT_MAX_WARPDRIVE_PDS 8 +struct _raid_device { + struct list_head list; + struct scsi_target *starget; + struct scsi_device *sdev; + u64 wwid; + u16 handle; + int id; + int channel; + u8 volume_type; + u8 num_pds; + u8 responding; + u8 percent_complete; + u32 device_info; +}; + +/** + * struct _boot_device - boot device info + * @is_raid: flag to indicate whether this is volume + * @device: holds pointer for either struct _sas_device or + * struct _raid_device + */ +struct _boot_device { + u8 is_raid; + void *device; +}; + +/** + * struct _sas_port - wide/narrow sas port information + * @port_list: list of ports belonging to expander + * @num_phys: number of phys belonging to this port + * @remote_identify: attached device identification + * @rphy: sas transport rphy object + * @port: sas transport wide/narrow port object + * @phy_list: _sas_phy list objects belonging to this port + */ +struct _sas_port { + struct list_head port_list; + u8 num_phys; + struct sas_identify remote_identify; + struct sas_rphy *rphy; + struct sas_port *port; + struct list_head phy_list; +}; + +/** + * struct _sas_phy - phy information + * @port_siblings: list of phys belonging to a port + * @identify: phy identification + * @remote_identify: attached device identification + * @phy: sas transport phy object + * @phy_id: unique phy id + * @handle: device handle for this phy + * @attached_handle: device handle for attached device + * @phy_belongs_to_port: port has been created for this phy + */ +struct _sas_phy { + struct list_head port_siblings; + struct sas_identify identify; + struct sas_identify remote_identify; + struct sas_phy *phy; + u8 phy_id; + u16 handle; + u16 attached_handle; + u8 phy_belongs_to_port; +}; + +/** + * struct _sas_node - sas_host/expander information + * @list: list of expanders + * @parent_dev: parent device class + * @num_phys: number phys belonging to this sas_host/expander + * @sas_address: sas address of this sas_host/expander + * @handle: handle for this sas_host/expander + * @sas_address_parent: sas address of parent expander or sas host + * @enclosure_handle: handle for this a member of an enclosure + * @device_info: bitwise defining capabilities of this sas_host/expander + * @responding: used in _scsih_expander_device_mark_responding + * @phy: a list of phys that make up this sas_host/expander + * @sas_port_list: list of ports attached to this sas_host/expander + */ +struct _sas_node { + struct list_head list; + struct device *parent_dev; + u8 num_phys; + u64 sas_address; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u8 responding; + struct _sas_phy *phy; + struct list_head sas_port_list; +}; + +/** + * enum reset_type - reset state + * @FORCE_BIG_HAMMER: issue diagnostic reset + * @SOFT_RESET: issue message_unit_reset, if fails to to big hammer + */ +enum reset_type { + FORCE_BIG_HAMMER, + SOFT_RESET, +}; + +/** + * struct chain_tracker - firmware chain tracker + * @chain_buffer: chain buffer + * @chain_buffer_dma: physical address + * @tracker_list: list of free request (ioc->free_chain_list) + */ +struct chain_tracker { + void *chain_buffer; + dma_addr_t chain_buffer_dma; + struct list_head tracker_list; +}; + +/** + * struct scsiio_tracker - scsi mf request tracker + * @smid: system message id + * @scmd: scsi request pointer + * @cb_idx: callback index + * @tracker_list: list of free request (ioc->free_list) + */ +struct scsiio_tracker { + u16 smid; + struct scsi_cmnd *scmd; + u8 cb_idx; + struct list_head chain_list; + struct list_head tracker_list; +}; + +/** + * struct request_tracker - firmware request tracker + * @smid: system message id + * @cb_idx: callback index + * @tracker_list: list of free request (ioc->free_list) + */ +struct request_tracker { + u16 smid; + u8 cb_idx; + struct list_head tracker_list; +}; + +/** + * struct _tr_list - target reset list + * @handle: device handle + * @state: state machine + */ +struct _tr_list { + struct list_head list; + u16 handle; + u16 state; +}; + + +/** + * struct adapter_reply_queue - the reply queue struct + * @ioc: per adapter object + * @msix_index: msix index into vector table + * @vector: irq vector + * @reply_post_host_index: head index in the pool where FW completes IO + * @reply_post_free: reply post base virt address + * @name: the name registered to request_irq() + * @busy: isr is actively processing replies on another cpu + * @list: this list +*/ +struct adapter_reply_queue { + struct MPT3SAS_ADAPTER *ioc; + u8 msix_index; + unsigned int vector; + u32 reply_post_host_index; + Mpi2ReplyDescriptorsUnion_t *reply_post_free; + char name[MPT_NAME_LENGTH]; + atomic_t busy; + struct list_head list; +}; + +typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); + +/* SAS3.0 support */ +typedef int (*MPT_BUILD_SG_SCMD)(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid); +typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, + dma_addr_t data_in_dma, size_t data_in_sz); +typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc, + void *paddr); + + + +/* IOC Facts and Port Facts converted from little endian to cpu */ +union mpi3_version_union { + MPI2_VERSION_STRUCT Struct; + u32 Word; +}; + +struct mpt3sas_facts { + u16 MsgVersion; + u16 HeaderVersion; + u8 IOCNumber; + u8 VP_ID; + u8 VF_ID; + u16 IOCExceptions; + u16 IOCStatus; + u32 IOCLogInfo; + u8 MaxChainDepth; + u8 WhoInit; + u8 NumberOfPorts; + u8 MaxMSIxVectors; + u16 RequestCredit; + u16 ProductID; + u32 IOCCapabilities; + union mpi3_version_union FWVersion; + u16 IOCRequestFrameSize; + u16 Reserved3; + u16 MaxInitiators; + u16 MaxTargets; + u16 MaxSasExpanders; + u16 MaxEnclosures; + u16 ProtocolFlags; + u16 HighPriorityCredit; + u16 MaxReplyDescriptorPostQueueDepth; + u8 ReplyFrameSize; + u8 MaxVolumes; + u16 MaxDevHandle; + u16 MaxPersistentEntries; + u16 MinDevHandle; +}; + +struct mpt3sas_port_facts { + u8 PortNumber; + u8 VP_ID; + u8 VF_ID; + u8 PortType; + u16 MaxPostedCmdBuffers; +}; + +/** + * enum mutex_type - task management mutex type + * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it + * @TM_MUTEX_ON: mutex is required + */ +enum mutex_type { + TM_MUTEX_OFF = 0, + TM_MUTEX_ON = 1, +}; + +typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); +/** + * struct MPT3SAS_ADAPTER - per adapter struct + * @list: ioc_list + * @shost: shost object + * @id: unique adapter id + * @cpu_count: number online cpus + * @name: generic ioc string + * @tmp_string: tmp string used for logging + * @pdev: pci pdev object + * @pio_chip: physical io register space + * @chip: memory mapped register space + * @chip_phys: physical addrss prior to mapping + * @logging_level: see mpt3sas_debug.h + * @fwfault_debug: debuging FW timeouts + * @ir_firmware: IR firmware present + * @bars: bitmask of BAR's that must be configured + * @mask_interrupts: ignore interrupt + * @fault_reset_work_q_name: fw fault work queue + * @fault_reset_work_q: "" + * @fault_reset_work: "" + * @firmware_event_name: fw event work queue + * @firmware_event_thread: "" + * @fw_event_lock: + * @fw_event_list: list of fw events + * @aen_event_read_flag: event log was read + * @broadcast_aen_busy: broadcast aen waiting to be serviced + * @shost_recovery: host reset in progress + * @ioc_reset_in_progress_lock: + * @ioc_link_reset_in_progress: phy/hard reset in progress + * @ignore_loginfos: ignore loginfos during task management + * @remove_host: flag for when driver unloads, to avoid sending dev resets + * @pci_error_recovery: flag to prevent ioc access until slot reset completes + * @wait_for_discovery_to_complete: flag set at driver load time when + * waiting on reporting devices + * @is_driver_loading: flag set at driver load time + * @port_enable_failed: flag set when port enable has failed + * @start_scan: flag set from scan_start callback, cleared from _mpt3sas_fw_work + * @start_scan_failed: means port enable failed, return's the ioc_status + * @msix_enable: flag indicating msix is enabled + * @msix_vector_count: number msix vectors + * @cpu_msix_table: table for mapping cpus to msix index + * @cpu_msix_table_sz: table size + * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands + * @scsi_io_cb_idx: shost generated commands + * @tm_cb_idx: task management commands + * @scsih_cb_idx: scsih internal commands + * @transport_cb_idx: transport internal commands + * @ctl_cb_idx: clt internal commands + * @base_cb_idx: base internal commands + * @config_cb_idx: base internal commands + * @tm_tr_cb_idx : device removal target reset handshake + * @tm_tr_volume_cb_idx : volume removal target reset + * @base_cmds: + * @transport_cmds: + * @scsih_cmds: + * @tm_cmds: + * @ctl_cmds: + * @config_cmds: + * @base_add_sg_single: handler for either 32/64 bit sgl's + * @event_type: bits indicating which events to log + * @event_context: unique id for each logged event + * @event_log: event log pointer + * @event_masks: events that are masked + * @facts: static facts data + * @pfacts: static port facts data + * @manu_pg0: static manufacturing page 0 + * @manu_pg10: static manufacturing page 10 + * @manu_pg11: static manufacturing page 11 + * @bios_pg2: static bios page 2 + * @bios_pg3: static bios page 3 + * @ioc_pg8: static ioc page 8 + * @iounit_pg0: static iounit page 0 + * @iounit_pg1: static iounit page 1 + * @sas_hba: sas host object + * @sas_expander_list: expander object list + * @sas_node_lock: + * @sas_device_list: sas device object list + * @sas_device_init_list: sas device object list (used only at init time) + * @sas_device_lock: + * @io_missing_delay: time for IO completed by fw when PDR enabled + * @device_missing_delay: time for device missing by fw when PDR enabled + * @sas_id : used for setting volume target IDs + * @blocking_handles: bitmask used to identify which devices need blocking + * @pd_handles : bitmask for PD handles + * @pd_handles_sz : size of pd_handle bitmask + * @config_page_sz: config page size + * @config_page: reserve memory for config page payload + * @config_page_dma: + * @hba_queue_depth: hba request queue depth + * @sge_size: sg element size for either 32/64 bit + * @scsiio_depth: SCSI_IO queue depth + * @request_sz: per request frame size + * @request: pool of request frames + * @request_dma: + * @request_dma_sz: + * @scsi_lookup: firmware request tracker list + * @scsi_lookup_lock: + * @free_list: free list of request + * @pending_io_count: + * @reset_wq: + * @chain: pool of chains + * @chain_dma: + * @max_sges_in_main_message: number sg elements in main message + * @max_sges_in_chain_message: number sg elements per chain + * @chains_needed_per_io: max chains per io + * @chain_depth: total chains allocated + * @hi_priority_smid: + * @hi_priority: + * @hi_priority_dma: + * @hi_priority_depth: + * @hpr_lookup: + * @hpr_free_list: + * @internal_smid: + * @internal: + * @internal_dma: + * @internal_depth: + * @internal_lookup: + * @internal_free_list: + * @sense: pool of sense + * @sense_dma: + * @sense_dma_pool: + * @reply_depth: hba reply queue depth: + * @reply_sz: per reply frame size: + * @reply: pool of replys: + * @reply_dma: + * @reply_dma_pool: + * @reply_free_queue_depth: reply free depth + * @reply_free: pool for reply free queue (32 bit addr) + * @reply_free_dma: + * @reply_free_dma_pool: + * @reply_free_host_index: tail index in pool to insert free replys + * @reply_post_queue_depth: reply post queue depth + * @reply_post_free: pool for reply post (64bit descriptor) + * @reply_post_free_dma: + * @reply_queue_count: number of reply queue's + * @reply_queue_list: link list contaning the reply queue info + * @reply_post_host_index: head index in the pool where FW completes IO + * @delayed_tr_list: target reset link list + * @delayed_tr_volume_list: volume target reset link list + */ +struct MPT3SAS_ADAPTER { + struct list_head list; + struct Scsi_Host *shost; + u8 id; + int cpu_count; + char name[MPT_NAME_LENGTH]; + char tmp_string[MPT_STRING_LENGTH]; + struct pci_dev *pdev; + Mpi2SystemInterfaceRegs_t __iomem *chip; + resource_size_t chip_phys; + int logging_level; + int fwfault_debug; + u8 ir_firmware; + int bars; + u8 mask_interrupts; + + /* fw fault handler */ + char fault_reset_work_q_name[20]; + struct workqueue_struct *fault_reset_work_q; + struct delayed_work fault_reset_work; + + /* fw event handler */ + char firmware_event_name[20]; + struct workqueue_struct *firmware_event_thread; + spinlock_t fw_event_lock; + struct list_head fw_event_list; + + /* misc flags */ + int aen_event_read_flag; + u8 broadcast_aen_busy; + u16 broadcast_aen_pending; + u8 shost_recovery; + + struct mutex reset_in_progress_mutex; + spinlock_t ioc_reset_in_progress_lock; + u8 ioc_link_reset_in_progress; + u8 ioc_reset_in_progress_status; + + u8 ignore_loginfos; + u8 remove_host; + u8 pci_error_recovery; + u8 wait_for_discovery_to_complete; + u8 is_driver_loading; + u8 port_enable_failed; + u8 start_scan; + u16 start_scan_failed; + + u8 msix_enable; + u16 msix_vector_count; + u8 *cpu_msix_table; + u16 cpu_msix_table_sz; + u32 ioc_reset_count; + MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; + + /* internal commands, callback index */ + u8 scsi_io_cb_idx; + u8 tm_cb_idx; + u8 transport_cb_idx; + u8 scsih_cb_idx; + u8 ctl_cb_idx; + u8 base_cb_idx; + u8 port_enable_cb_idx; + u8 config_cb_idx; + u8 tm_tr_cb_idx; + u8 tm_tr_volume_cb_idx; + u8 tm_sas_control_cb_idx; + struct _internal_cmd base_cmds; + struct _internal_cmd port_enable_cmds; + struct _internal_cmd transport_cmds; + struct _internal_cmd scsih_cmds; + struct _internal_cmd tm_cmds; + struct _internal_cmd ctl_cmds; + struct _internal_cmd config_cmds; + + MPT_ADD_SGE base_add_sg_single; + + /* function ptr for either IEEE or MPI sg elements */ + MPT_BUILD_SG_SCMD build_sg_scmd; + MPT_BUILD_SG build_sg; + MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge; + u8 mpi25; + u16 sge_size_ieee; + + /* function ptr for MPI sg elements only */ + MPT_BUILD_SG build_sg_mpi; + MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi; + + /* event log */ + u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; + u32 event_context; + void *event_log; + u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; + + /* static config pages */ + struct mpt3sas_facts facts; + struct mpt3sas_port_facts *pfacts; + Mpi2ManufacturingPage0_t manu_pg0; + struct Mpi2ManufacturingPage10_t manu_pg10; + struct Mpi2ManufacturingPage11_t manu_pg11; + Mpi2BiosPage2_t bios_pg2; + Mpi2BiosPage3_t bios_pg3; + Mpi2IOCPage8_t ioc_pg8; + Mpi2IOUnitPage0_t iounit_pg0; + Mpi2IOUnitPage1_t iounit_pg1; + + struct _boot_device req_boot_device; + struct _boot_device req_alt_boot_device; + struct _boot_device current_boot_device; + + /* sas hba, expander, and device list */ + struct _sas_node sas_hba; + struct list_head sas_expander_list; + spinlock_t sas_node_lock; + struct list_head sas_device_list; + struct list_head sas_device_init_list; + spinlock_t sas_device_lock; + struct list_head raid_device_list; + spinlock_t raid_device_lock; + u8 io_missing_delay; + u16 device_missing_delay; + int sas_id; + + void *blocking_handles; + void *pd_handles; + u16 pd_handles_sz; + + /* config page */ + u16 config_page_sz; + void *config_page; + dma_addr_t config_page_dma; + + /* scsiio request */ + u16 hba_queue_depth; + u16 sge_size; + u16 scsiio_depth; + u16 request_sz; + u8 *request; + dma_addr_t request_dma; + u32 request_dma_sz; + struct scsiio_tracker *scsi_lookup; + ulong scsi_lookup_pages; + spinlock_t scsi_lookup_lock; + struct list_head free_list; + int pending_io_count; + wait_queue_head_t reset_wq; + + /* chain */ + struct chain_tracker *chain_lookup; + struct list_head free_chain_list; + struct dma_pool *chain_dma_pool; + ulong chain_pages; + u16 max_sges_in_main_message; + u16 max_sges_in_chain_message; + u16 chains_needed_per_io; + u32 chain_depth; + + /* hi-priority queue */ + u16 hi_priority_smid; + u8 *hi_priority; + dma_addr_t hi_priority_dma; + u16 hi_priority_depth; + struct request_tracker *hpr_lookup; + struct list_head hpr_free_list; + + /* internal queue */ + u16 internal_smid; + u8 *internal; + dma_addr_t internal_dma; + u16 internal_depth; + struct request_tracker *internal_lookup; + struct list_head internal_free_list; + + /* sense */ + u8 *sense; + dma_addr_t sense_dma; + struct dma_pool *sense_dma_pool; + + /* reply */ + u16 reply_sz; + u8 *reply; + dma_addr_t reply_dma; + u32 reply_dma_max_address; + u32 reply_dma_min_address; + struct dma_pool *reply_dma_pool; + + /* reply free queue */ + u16 reply_free_queue_depth; + __le32 *reply_free; + dma_addr_t reply_free_dma; + struct dma_pool *reply_free_dma_pool; + u32 reply_free_host_index; + + /* reply post queue */ + u16 reply_post_queue_depth; + Mpi2ReplyDescriptorsUnion_t *reply_post_free; + dma_addr_t reply_post_free_dma; + struct dma_pool *reply_post_free_dma_pool; + u8 reply_queue_count; + struct list_head reply_queue_list; + + struct list_head delayed_tr_list; + struct list_head delayed_tr_volume_list; + + /* diag buffer support */ + u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 diag_buffer_sz[MPI2_DIAG_BUF_TYPE_COUNT]; + dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT]; + u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23]; + u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 ring_buffer_offset; + u32 ring_buffer_sz; + spinlock_t diag_trigger_lock; + u8 diag_trigger_active; + struct SL_WH_MASTER_TRIGGER_T diag_trigger_master; + struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event; + struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi; + struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi; +}; + +typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); + + +/* base shared API */ +extern struct list_head mpt3sas_ioc_list; +void mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc); + +int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc); +int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc); +int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, + enum reset_type type); + +void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid); +__le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, + u16 smid); +void mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc); + +/* hi-priority queue */ +u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); +u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd); + +u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); +void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle); +void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle); +void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void mpt3sas_base_initialize_callback_handler(void); +u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func); +void mpt3sas_base_release_callback_handler(u8 cb_idx); + +u8 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +u8 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, + u32 phys_addr); + +u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked); + +void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code); +int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, + Mpi2SasIoUnitControlReply_t *mpi_reply, + Mpi2SasIoUnitControlRequest_t *mpi_request); +int mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, + Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request); + +void mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, + u32 *event_type); + +void mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc); + +void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, + u16 device_missing_delay, u8 io_missing_delay); + +int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); + + +/* scsih shared API */ +u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, + u32 reply); +void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase); + +int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, u16 smid_task, + ulong timeout, unsigned long serial_number, enum mutex_type m_type); +void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); +void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); +void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address); +void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address); + +struct _sas_node *mpt3sas_scsih_expander_find_by_handle( + struct MPT3SAS_ADAPTER *ioc, u16 handle); +struct _sas_node *mpt3sas_scsih_expander_find_by_sas_address( + struct MPT3SAS_ADAPTER *ioc, u64 sas_address); +struct _sas_device *mpt3sas_scsih_sas_device_find_by_sas_address( + struct MPT3SAS_ADAPTER *ioc, u64 sas_address); + +void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc); + +/* config shared API */ +u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +int mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, + u8 *num_phys); +int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page); +int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page, + u16 sz); +int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage10_t *config_page); + +int mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page); +int mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page); + +int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2BiosPage2_t *config_page); +int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2BiosPage3_t *config_page); +int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage0_t *config_page); +int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, + u16 sz); +int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage1_t *config_page); +int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage1_t *config_page); +int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz); +int mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz); +int mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOCPage8_t *config_page); +int mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage1_t *config_page, + u32 phy_number, u16 handle); +int mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number); +int mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number); +int mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, + u32 handle); +int mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 *num_pds); +int mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form, + u32 handle, u16 sz); +int mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, + u32 form, u32 form_specific); +int mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, + u16 *volume_handle); +int mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, + u16 volume_handle, u64 *wwid); + +/* ctl shared API */ +extern struct device_attribute *mpt3sas_host_attrs[]; +extern struct device_attribute *mpt3sas_dev_attrs[]; +void mpt3sas_ctl_init(void); +void mpt3sas_ctl_exit(void); +u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +void mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase); +u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, + u8 msix_index, u32 reply); +void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventNotificationReply_t *mpi_reply); + +void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, + u8 bits_to_regsiter); +int mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, + u8 *issue_reset); + +/* transport shared API */ +u8 mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +struct _sas_port *mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, + u16 handle, u64 sas_address); +void mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u64 sas_address_parent); +int mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy + *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev); +int mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, + struct _sas_phy *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1, + struct device *parent_dev); +void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, u16 handle, u8 phy_number, u8 link_rate); +extern struct sas_function_template mpt3sas_transport_functions; +extern struct scsi_transport_template *mpt3sas_transport_template; +extern int scsi_internal_device_block(struct scsi_device *sdev); +extern int scsi_internal_device_unblock(struct scsi_device *sdev, + enum scsi_device_state new_state); +/* trigger data externs */ +void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data); +void mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data); +void mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, + u32 tigger_bitmask); +void mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, + u16 log_entry_qualifier); +void mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, + u8 asc, u8 ascq); +void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, + u32 loginfo); +#endif /* MPT3SAS_BASE_H_INCLUDED */ diff --git a/addons/mpt3sas/src/3.10.108/mpt3sas_config.c b/addons/mpt3sas/src/3.10.108/mpt3sas_config.c new file mode 100644 index 00000000..4db0c7a1 --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpt3sas_config.c @@ -0,0 +1,1649 @@ +/* + * This module provides common API for accessing firmware configuration pages + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c + * Copyright (C) 2012 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mpt3sas_base.h" + +/* local definitions */ + +/* Timeout for config page request (in seconds) */ +#define MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT 15 + +/* Common sgl flags for READING a config page. */ +#define MPT3_CONFIG_COMMON_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \ + | MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT) + +/* Common sgl flags for WRITING a config page. */ +#define MPT3_CONFIG_COMMON_WRITE_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \ + | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC) \ + << MPI2_SGE_FLAGS_SHIFT) + +/** + * struct config_request - obtain dma memory via routine + * @sz: size + * @page: virt pointer + * @page_dma: phys pointer + * + */ +struct config_request { + u16 sz; + void *page; + dma_addr_t page_dma; +}; + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING +/** + * _config_display_some_debug - debug routine + * @ioc: per adapter object + * @smid: system request message index + * @calling_function_name: string pass from calling function + * @mpi_reply: reply message frame + * Context: none. + * + * Function for displaying debug info helpful when debugging issues + * in this module. + */ +static void +_config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, + char *calling_function_name, MPI2DefaultReply_t *mpi_reply) +{ + Mpi2ConfigRequest_t *mpi_request; + char *desc = NULL; + + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + return; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) { + case MPI2_CONFIG_PAGETYPE_IO_UNIT: + desc = "io_unit"; + break; + case MPI2_CONFIG_PAGETYPE_IOC: + desc = "ioc"; + break; + case MPI2_CONFIG_PAGETYPE_BIOS: + desc = "bios"; + break; + case MPI2_CONFIG_PAGETYPE_RAID_VOLUME: + desc = "raid_volume"; + break; + case MPI2_CONFIG_PAGETYPE_MANUFACTURING: + desc = "manufaucturing"; + break; + case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK: + desc = "physdisk"; + break; + case MPI2_CONFIG_PAGETYPE_EXTENDED: + switch (mpi_request->ExtPageType) { + case MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT: + desc = "sas_io_unit"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER: + desc = "sas_expander"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE: + desc = "sas_device"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_PHY: + desc = "sas_phy"; + break; + case MPI2_CONFIG_EXTPAGETYPE_LOG: + desc = "log"; + break; + case MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE: + desc = "enclosure"; + break; + case MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG: + desc = "raid_config"; + break; + case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING: + desc = "driver_mapping"; + break; + } + break; + } + + if (!desc) + return; + + pr_info(MPT3SAS_FMT + "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n", + ioc->name, calling_function_name, desc, + mpi_request->Header.PageNumber, mpi_request->Action, + le32_to_cpu(mpi_request->PageAddress), smid); + + if (!mpi_reply) + return; + + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + pr_info(MPT3SAS_FMT + "\tiocstatus(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); +} +#endif + +/** + * _config_alloc_config_dma_memory - obtain physical memory + * @ioc: per adapter object + * @mem: struct config_request + * + * A wrapper for obtaining dma-able memory for config page request. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, + struct config_request *mem) +{ + int r = 0; + + if (mem->sz > ioc->config_page_sz) { + mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz, + &mem->page_dma, GFP_KERNEL); + if (!mem->page) { + pr_err(MPT3SAS_FMT + "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n", + ioc->name, __func__, mem->sz); + r = -ENOMEM; + } + } else { /* use tmp buffer if less than 512 bytes */ + mem->page = ioc->config_page; + mem->page_dma = ioc->config_page_dma; + } + return r; +} + +/** + * _config_free_config_dma_memory - wrapper to free the memory + * @ioc: per adapter object + * @mem: struct config_request + * + * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory. + * + * Returns 0 for success, non-zero for failure. + */ +static void +_config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, + struct config_request *mem) +{ + if (mem->sz > ioc->config_page_sz) + dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page, + mem->page_dma); +} + +/** + * mpt3sas_config_done - config page completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: none. + * + * The callback handler when using _config_request. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + if (ioc->config_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->config_cmds.smid != smid) + return 1; + ioc->config_cmds.status |= MPT3_CMD_COMPLETE; + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + ioc->config_cmds.status |= MPT3_CMD_REPLY_VALID; + memcpy(ioc->config_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + } + ioc->config_cmds.status &= ~MPT3_CMD_PENDING; +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + _config_display_some_debug(ioc, smid, "config_done", mpi_reply); +#endif + ioc->config_cmds.smid = USHRT_MAX; + complete(&ioc->config_cmds.done); + return 1; +} + +/** + * _config_request - main routine for sending config page requests + * @ioc: per adapter object + * @mpi_request: request message frame + * @mpi_reply: reply mf payload returned from firmware + * @timeout: timeout in seconds + * @config_page: contents of the config page + * @config_page_sz: size of config page + * Context: sleep + * + * A generic API for config page requests to firmware. + * + * The ioc->config_cmds.status flag should be MPT3_CMD_NOT_USED before calling + * this API. + * + * The callback index is set inside `ioc->config_cb_idx. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t + *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout, + void *config_page, u16 config_page_sz) +{ + u16 smid; + u32 ioc_state; + unsigned long timeleft; + Mpi2ConfigRequest_t *config_request; + int r; + u8 retry_count, issue_host_reset = 0; + u16 wait_state_count; + struct config_request mem; + u32 ioc_status = UINT_MAX; + + mutex_lock(&ioc->config_cmds.mutex); + if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: config_cmd in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->config_cmds.mutex); + return -EAGAIN; + } + + retry_count = 0; + memset(&mem, 0, sizeof(struct config_request)); + + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + if (config_page) { + mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion; + mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber; + mpi_request->Header.PageType = mpi_reply->Header.PageType; + mpi_request->Header.PageLength = mpi_reply->Header.PageLength; + mpi_request->ExtPageLength = mpi_reply->ExtPageLength; + mpi_request->ExtPageType = mpi_reply->ExtPageType; + if (mpi_request->Header.PageLength) + mem.sz = mpi_request->Header.PageLength * 4; + else + mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; + r = _config_alloc_config_dma_memory(ioc, &mem); + if (r != 0) + goto out; + if (mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT || + mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) { + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + MPT3_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz, + mem.page_dma); + memcpy(mem.page, config_page, min_t(u16, mem.sz, + config_page_sz)); + } else { + memset(config_page, 0, config_page_sz); + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + MPT3_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma); + memset(mem.page, 0, min_t(u16, mem.sz, config_page_sz)); + } + } + + retry_config: + if (retry_count) { + if (retry_count > 2) { /* attempt only 2 retries */ + r = -EFAULT; + goto free_mem; + } + pr_info(MPT3SAS_FMT "%s: attempting retry (%d)\n", + ioc->name, __func__, retry_count); + } + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + r = -EFAULT; + goto free_mem; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info(MPT3SAS_FMT "%s: ioc is operational\n", + ioc->name, __func__); + + smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + r = -EAGAIN; + goto free_mem; + } + + r = 0; + memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t)); + ioc->config_cmds.status = MPT3_CMD_PENDING; + config_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->config_cmds.smid = smid; + memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t)); +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + _config_display_some_debug(ioc, smid, "config_request", NULL); +#endif + init_completion(&ioc->config_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->config_cmds.done, + timeout*HZ); + if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2ConfigRequest_t)/4); + retry_count++; + if (ioc->config_cmds.smid == smid) + mpt3sas_base_free_smid(ioc, smid); + if ((ioc->shost_recovery) || (ioc->config_cmds.status & + MPT3_CMD_RESET) || ioc->pci_error_recovery) + goto retry_config; + issue_host_reset = 1; + r = -EFAULT; + goto free_mem; + } + + if (ioc->config_cmds.status & MPT3_CMD_REPLY_VALID) { + memcpy(mpi_reply, ioc->config_cmds.reply, + sizeof(Mpi2ConfigReply_t)); + + /* Reply Frame Sanity Checks to workaround FW issues */ + if ((mpi_request->Header.PageType & 0xF) != + (mpi_reply->Header.PageType & 0xF)) { + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->request_sz/4); + panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \ + " mpi_reply mismatch: Requested PageType(0x%02x)" \ + " Reply PageType(0x%02x)\n", \ + ioc->name, __func__, + (mpi_request->Header.PageType & 0xF), + (mpi_reply->Header.PageType & 0xF)); + } + + if (((mpi_request->Header.PageType & 0xF) == + MPI2_CONFIG_PAGETYPE_EXTENDED) && + mpi_request->ExtPageType != mpi_reply->ExtPageType) { + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->request_sz/4); + panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \ + " mpi_reply mismatch: Requested ExtPageType(0x%02x)" + " Reply ExtPageType(0x%02x)\n", + ioc->name, __func__, mpi_request->ExtPageType, + mpi_reply->ExtPageType); + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & MPI2_IOCSTATUS_MASK; + } + + if (retry_count) + pr_info(MPT3SAS_FMT "%s: retry (%d) completed!!\n", \ + ioc->name, __func__, retry_count); + + if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) && + config_page && mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_READ_CURRENT) { + u8 *p = (u8 *)mem.page; + + /* Config Page Sanity Checks to workaround FW issues */ + if (p) { + if ((mpi_request->Header.PageType & 0xF) != + (p[3] & 0xF)) { + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz)/4); + panic(KERN_WARNING MPT3SAS_FMT + "%s: Firmware BUG:" \ + " config page mismatch:" + " Requested PageType(0x%02x)" + " Reply PageType(0x%02x)\n", + ioc->name, __func__, + (mpi_request->Header.PageType & 0xF), + (p[3] & 0xF)); + } + + if (((mpi_request->Header.PageType & 0xF) == + MPI2_CONFIG_PAGETYPE_EXTENDED) && + (mpi_request->ExtPageType != p[6])) { + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz)/4); + panic(KERN_WARNING MPT3SAS_FMT + "%s: Firmware BUG:" \ + " config page mismatch:" + " Requested ExtPageType(0x%02x)" + " Reply ExtPageType(0x%02x)\n", + ioc->name, __func__, + mpi_request->ExtPageType, p[6]); + } + } + memcpy(config_page, mem.page, min_t(u16, mem.sz, + config_page_sz)); + } + + free_mem: + if (config_page) + _config_free_config_dma_memory(ioc, &mem); + out: + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->config_cmds.mutex); + + if (issue_host_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg0 - obtain manufacturing page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 7; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING7_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg10 - obtain manufacturing page 10 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage10_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 10; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg11 - obtain manufacturing page 11 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_manufacturing_pg11 - set manufacturing page 11 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_bios_pg2 - obtain bios page 2 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 2; + mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_bios_pg3 - obtain bios page 3 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2BiosPage3_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg0 - obtain iounit page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg1 - obtain iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_iounit_pg1 - set iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_sas_device_pg0 - obtain sas device page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: device handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page, + u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; + mpi_request.Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_sas_device_pg1 - obtain sas device page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: device handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page, + u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; + mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION; + mpi_request.Header.PageNumber = 1; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_number_hba_phys - obtain number of phys on the host + * @ioc: per adapter object + * @num_phys: pointer returned with the number of phys + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + u16 ioc_status; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t config_page; + + *num_phys = 0; + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(Mpi2SasIOUnitPage0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) + *num_phys = config_page.NumPhys; + } + out: + return r; +} + +/** + * mpt3sas_config_get_sas_iounit_pg0 - obtain sas iounit page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Calling function should call config_get_number_hba_phys prior to + * this function, so enough memory is allocated for config_page. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Calling function should call config_get_number_hba_phys prior to + * this function, so enough memory is allocated for config_page. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_set_sas_iounit_pg1 - send sas iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Calling function should call config_get_number_hba_phys prior to + * this function, so enough memory is allocated for config_page. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_expander_pg0 - obtain expander page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: expander handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_expander_pg1 - obtain expander page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @phy_number: phy number + * @handle: expander handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number, + u16 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM | + (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_enclosure_pg0 - obtain enclosure page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: expander handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_phy_pg0 - obtain phy page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @phy_number: phy number + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_phy_pg1 - obtain phy page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @phy_number: phy number + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_raid_volume_pg1 - obtain raid volume page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: volume handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, + u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_number_pds - obtain number of phys disk assigned to volume + * @ioc: per adapter object + * @handle: volume handle + * @num_pds: returns pds count + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 *num_pds) +{ + Mpi2ConfigRequest_t mpi_request; + Mpi2RaidVolPage0_t config_page; + Mpi2ConfigReply_t mpi_reply; + int r; + u16 ioc_status; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + *num_pds = 0; + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(Mpi2RaidVolPage0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) + *num_pds = config_page.NumPhysDisks; + } + + out: + return r; +} + +/** + * mpt3sas_config_get_raid_volume_pg0 - obtain raid volume page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: volume handle + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form, + u32 handle, u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_phys_disk_pg0 - obtain phys disk page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_PHYSDISKNUM, PHYSDISKNUM, DEVHANDLE + * @form_specific: specific to the form + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form, + u32 form_specific) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | form_specific); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_volume_handle - returns volume handle for give hidden + * raid components + * @ioc: per adapter object + * @pd_handle: phys disk handle + * @volume_handle: volume handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, + u16 *volume_handle) +{ + Mpi2RaidConfigurationPage0_t *config_page = NULL; + Mpi2ConfigRequest_t mpi_request; + Mpi2ConfigReply_t mpi_reply; + int r, i, config_page_sz; + u16 ioc_status; + int config_num; + u16 element_type; + u16 phys_disk_dev_handle; + + *volume_handle = 0; + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG; + mpi_request.Header.PageVersion = MPI2_RAIDCONFIG0_PAGEVERSION; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4); + config_page = kmalloc(config_page_sz, GFP_KERNEL); + if (!config_page) { + r = -1; + goto out; + } + + config_num = 0xff; + while (1) { + mpi_request.PageAddress = cpu_to_le32(config_num + + MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + config_page_sz); + if (r) + goto out; + r = -1; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < config_page->NumElements; i++) { + element_type = le16_to_cpu(config_page-> + ConfigElement[i].ElementFlags) & + MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE; + if (element_type == + MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT || + element_type == + MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) { + phys_disk_dev_handle = + le16_to_cpu(config_page->ConfigElement[i]. + PhysDiskDevHandle); + if (phys_disk_dev_handle == pd_handle) { + *volume_handle = + le16_to_cpu(config_page-> + ConfigElement[i].VolDevHandle); + r = 0; + goto out; + } + } else if (element_type == + MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) { + *volume_handle = 0; + r = 0; + goto out; + } + } + config_num = config_page->ConfigNum; + } + out: + kfree(config_page); + return r; +} + +/** + * mpt3sas_config_get_volume_wwid - returns wwid given the volume handle + * @ioc: per adapter object + * @volume_handle: volume handle + * @wwid: volume wwid + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle, + u64 *wwid) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2RaidVolPage1_t raid_vol_pg1; + + *wwid = 0; + if (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &raid_vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, + volume_handle))) { + *wwid = le64_to_cpu(raid_vol_pg1.WWID); + return 0; + } else + return -1; +} diff --git a/addons/mpt3sas/src/3.10.108/mpt3sas_ctl.c b/addons/mpt3sas/src/3.10.108/mpt3sas_ctl.c new file mode 100644 index 00000000..0b402b6f --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpt3sas_ctl.c @@ -0,0 +1,3282 @@ +/* + * Management Module Support for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c + * Copyright (C) 2012 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mpt3sas_base.h" +#include "mpt3sas_ctl.h" + + +static struct fasync_struct *async_queue; +static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); + + +/** + * enum block_state - blocking state + * @NON_BLOCKING: non blocking + * @BLOCKING: blocking + * + * These states are for ioctls that need to wait for a response + * from firmware, so they probably require sleep. + */ +enum block_state { + NON_BLOCKING, + BLOCKING, +}; + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING +/** + * _ctl_sas_device_find_by_handle - sas device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for sas_device based on sas_address, then return sas_device + * object. + */ +static struct _sas_device * +_ctl_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_device *sas_device, *r; + + r = NULL; + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (sas_device->handle != handle) + continue; + r = sas_device; + goto out; + } + + out: + return r; +} + +/** + * _ctl_display_some_debug - debug routine + * @ioc: per adapter object + * @smid: system request message index + * @calling_function_name: string pass from calling function + * @mpi_reply: reply message frame + * Context: none. + * + * Function for displaying debug info helpful when debugging issues + * in this module. + */ +static void +_ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, + char *calling_function_name, MPI2DefaultReply_t *mpi_reply) +{ + Mpi2ConfigRequest_t *mpi_request; + char *desc = NULL; + + if (!(ioc->logging_level & MPT_DEBUG_IOCTL)) + return; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + switch (mpi_request->Function) { + case MPI2_FUNCTION_SCSI_IO_REQUEST: + { + Mpi2SCSIIORequest_t *scsi_request = + (Mpi2SCSIIORequest_t *)mpi_request; + + snprintf(ioc->tmp_string, MPT_STRING_LENGTH, + "scsi_io, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case MPI2_FUNCTION_SCSI_TASK_MGMT: + desc = "task_mgmt"; + break; + case MPI2_FUNCTION_IOC_INIT: + desc = "ioc_init"; + break; + case MPI2_FUNCTION_IOC_FACTS: + desc = "ioc_facts"; + break; + case MPI2_FUNCTION_CONFIG: + { + Mpi2ConfigRequest_t *config_request = + (Mpi2ConfigRequest_t *)mpi_request; + + snprintf(ioc->tmp_string, MPT_STRING_LENGTH, + "config, type(0x%02x), ext_type(0x%02x), number(%d)", + (config_request->Header.PageType & + MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType, + config_request->Header.PageNumber); + desc = ioc->tmp_string; + break; + } + case MPI2_FUNCTION_PORT_FACTS: + desc = "port_facts"; + break; + case MPI2_FUNCTION_PORT_ENABLE: + desc = "port_enable"; + break; + case MPI2_FUNCTION_EVENT_NOTIFICATION: + desc = "event_notification"; + break; + case MPI2_FUNCTION_FW_DOWNLOAD: + desc = "fw_download"; + break; + case MPI2_FUNCTION_FW_UPLOAD: + desc = "fw_upload"; + break; + case MPI2_FUNCTION_RAID_ACTION: + desc = "raid_action"; + break; + case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + { + Mpi2SCSIIORequest_t *scsi_request = + (Mpi2SCSIIORequest_t *)mpi_request; + + snprintf(ioc->tmp_string, MPT_STRING_LENGTH, + "raid_pass, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: + desc = "sas_iounit_cntl"; + break; + case MPI2_FUNCTION_SATA_PASSTHROUGH: + desc = "sata_pass"; + break; + case MPI2_FUNCTION_DIAG_BUFFER_POST: + desc = "diag_buffer_post"; + break; + case MPI2_FUNCTION_DIAG_RELEASE: + desc = "diag_release"; + break; + case MPI2_FUNCTION_SMP_PASSTHROUGH: + desc = "smp_passthrough"; + break; + } + + if (!desc) + return; + + pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n", + ioc->name, calling_function_name, desc, smid); + + if (!mpi_reply) + return; + + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + pr_info(MPT3SAS_FMT + "\tiocstatus(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_request->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { + Mpi2SCSIIOReply_t *scsi_reply = + (Mpi2SCSIIOReply_t *)mpi_reply; + struct _sas_device *sas_device = NULL; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _ctl_sas_device_find_by_handle(ioc, + le16_to_cpu(scsi_reply->DevHandle)); + if (sas_device) { + pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n", + ioc->name, (unsigned long long) + sas_device->sas_address, sas_device->phy); + pr_warn(MPT3SAS_FMT + "\tenclosure_logical_id(0x%016llx), slot(%d)\n", + ioc->name, (unsigned long long) + sas_device->enclosure_logical_id, sas_device->slot); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) + pr_info(MPT3SAS_FMT + "\tscsi_state(0x%02x), scsi_status" + "(0x%02x)\n", ioc->name, + scsi_reply->SCSIState, + scsi_reply->SCSIStatus); + } +} + +#endif + +/** + * mpt3sas_ctl_done - ctl module completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: none. + * + * The callback handler when using ioc->ctl_cb_idx. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + Mpi2SCSIIOReply_t *scsiio_reply; + const void *sense_data; + u32 sz; + + if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->ctl_cmds.smid != smid) + return 1; + ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE; + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID; + /* get sense data */ + if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_reply->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { + scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply; + if (scsiio_reply->SCSIState & + MPI2_SCSI_STATE_AUTOSENSE_VALID) { + sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(scsiio_reply->SenseCount)); + sense_data = mpt3sas_base_get_sense_buffer(ioc, + smid); + memcpy(ioc->ctl_cmds.sense, sense_data, sz); + } + } + } +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); +#endif + ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->ctl_cmds.done); + return 1; +} + +/** + * _ctl_check_event_type - determines when an event needs logging + * @ioc: per adapter object + * @event: firmware event + * + * The bitmask in ioc->event_type[] indicates which events should be + * be saved in the driver event_log. This bitmask is set by application. + * + * Returns 1 when event should be captured, or zero means no match. + */ +static int +_ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) +{ + u16 i; + u32 desired_event; + + if (event >= 128 || !event || !ioc->event_log) + return 0; + + desired_event = (1 << (event % 32)); + if (!desired_event) + desired_event = 1; + i = event / 32; + return desired_event & ioc->event_type[i]; +} + +/** + * mpt3sas_ctl_add_to_event_log - add event + * @ioc: per adapter object + * @mpi_reply: reply message frame + * + * Return nothing. + */ +void +mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventNotificationReply_t *mpi_reply) +{ + struct MPT3_IOCTL_EVENTS *event_log; + u16 event; + int i; + u32 sz, event_data_sz; + u8 send_aen = 0; + + if (!ioc->event_log) + return; + + event = le16_to_cpu(mpi_reply->Event); + + if (_ctl_check_event_type(ioc, event)) { + + /* insert entry into circular event_log */ + i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE; + event_log = ioc->event_log; + event_log[i].event = event; + event_log[i].context = ioc->event_context++; + + event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4; + sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE); + memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE); + memcpy(event_log[i].data, mpi_reply->EventData, sz); + send_aen = 1; + } + + /* This aen_event_read_flag flag is set until the + * application has read the event log. + * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify. + */ + if (event == MPI2_EVENT_LOG_ENTRY_ADDED || + (send_aen && !ioc->aen_event_read_flag)) { + ioc->aen_event_read_flag = 1; + wake_up_interruptible(&ctl_poll_wait); + if (async_queue) + kill_fasync(&async_queue, SIGIO, POLL_IN); + } +} + +/** + * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time) + * @ioc: per adapter object + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt. + * + * This function merely adds a new work task into ioc->firmware_event_thread. + * The tasks are worked from _firmware_event_work in user context. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, + u32 reply) +{ + Mpi2EventNotificationReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); + return 1; +} + +/** + * _ctl_verify_adapter - validates ioc_number passed from application + * @ioc: per adapter object + * @iocpp: The ioc pointer is returned in this. + * + * Return (-1) means error, else ioc_number. + */ +static int +_ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp) +{ + struct MPT3SAS_ADAPTER *ioc; + + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + if (ioc->id != ioc_number) + continue; + *iocpp = ioc; + return ioc_number; + } + *iocpp = NULL; + return -1; +} + +/** + * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) + * @ioc: per adapter object + * @reset_phase: phase + * + * The handler for doing any required cleanup or initialization. + * + * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, + * MPT3_IOC_DONE_RESET + */ +void +mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) +{ + int i; + u8 issue_reset; + + switch (reset_phase) { + case MPT3_IOC_PRE_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!(ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + continue; + if ((ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + continue; + mpt3sas_send_diag_release(ioc, i, &issue_reset); + } + break; + case MPT3_IOC_AFTER_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); + if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { + ioc->ctl_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); + complete(&ioc->ctl_cmds.done); + } + break; + case MPT3_IOC_DONE_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); + + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!(ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + continue; + if ((ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + continue; + ioc->diag_buffer_status[i] |= + MPT3_DIAG_BUFFER_IS_DIAG_RESET; + } + break; + } +} + +/** + * _ctl_fasync - + * @fd - + * @filep - + * @mode - + * + * Called when application request fasyn callback handler. + */ +static int +_ctl_fasync(int fd, struct file *filep, int mode) +{ + return fasync_helper(fd, filep, mode, &async_queue); +} + +/** + * _ctl_poll - + * @file - + * @wait - + * + */ +static unsigned int +_ctl_poll(struct file *filep, poll_table *wait) +{ + struct MPT3SAS_ADAPTER *ioc; + + poll_wait(filep, &ctl_poll_wait, wait); + + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + if (ioc->aen_event_read_flag) + return POLLIN | POLLRDNORM; + } + return 0; +} + +/** + * _ctl_set_task_mid - assign an active smid to tm request + * @ioc: per adapter object + * @karg - (struct mpt3_ioctl_command) + * @tm_request - pointer to mf from user space + * + * Returns 0 when an smid if found, else fail. + * during failure, the reply frame is filled. + */ +static int +_ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, + Mpi2SCSITaskManagementRequest_t *tm_request) +{ + u8 found = 0; + u16 i; + u16 handle; + struct scsi_cmnd *scmd; + struct MPT3SAS_DEVICE *priv_data; + unsigned long flags; + Mpi2SCSITaskManagementReply_t *tm_reply; + u32 sz; + u32 lun; + char *desc = NULL; + + if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + desc = "abort_task"; + else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + desc = "query_task"; + else + return 0; + + lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); + + handle = le16_to_cpu(tm_request->DevHandle); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + for (i = ioc->scsiio_depth; i && !found; i--) { + scmd = ioc->scsi_lookup[i - 1].scmd; + if (scmd == NULL || scmd->device == NULL || + scmd->device->hostdata == NULL) + continue; + if (lun != scmd->device->lun) + continue; + priv_data = scmd->device->hostdata; + if (priv_data->sas_target == NULL) + continue; + if (priv_data->sas_target->handle != handle) + continue; + tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid); + found = 1; + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + if (!found) { + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), lun(%d), no active mid!!\n", + ioc->name, + desc, le16_to_cpu(tm_request->DevHandle), lun)); + tm_reply = ioc->ctl_cmds.reply; + tm_reply->DevHandle = tm_request->DevHandle; + tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + tm_reply->TaskType = tm_request->TaskType; + tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; + tm_reply->VP_ID = tm_request->VP_ID; + tm_reply->VF_ID = tm_request->VF_ID; + sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + return 1; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, + desc, le16_to_cpu(tm_request->DevHandle), lun, + le16_to_cpu(tm_request->TaskMID))); + return 0; +} + +/** + * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode + * @ioc: per adapter object + * @karg - (struct mpt3_ioctl_command) + * @mf - pointer to mf in user space + */ +static long +_ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, + void __user *mf) +{ + MPI2RequestHeader_t *mpi_request = NULL, *request; + MPI2DefaultReply_t *mpi_reply; + u32 ioc_state; + u16 ioc_status; + u16 smid; + unsigned long timeout, timeleft; + u8 issue_reset; + u32 sz; + void *psge; + void *data_out = NULL; + dma_addr_t data_out_dma = 0; + size_t data_out_sz = 0; + void *data_in = NULL; + dma_addr_t data_in_dma = 0; + size_t data_in_sz = 0; + long ret; + u16 wait_state_count; + + issue_reset = 0; + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); + ret = -EAGAIN; + goto out; + } + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + ret = -EFAULT; + goto out; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, + __func__, wait_state_count); + } + if (wait_state_count) + pr_info(MPT3SAS_FMT "%s: ioc is operational\n", + ioc->name, __func__); + + mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); + if (!mpi_request) { + pr_err(MPT3SAS_FMT + "%s: failed obtaining a memory for mpi_request\n", + ioc->name, __func__); + ret = -ENOMEM; + goto out; + } + + /* Check for overflow and wraparound */ + if (karg.data_sge_offset * 4 > ioc->request_sz || + karg.data_sge_offset > (UINT_MAX / 4)) { + ret = -EINVAL; + goto out; + } + + /* copy in request message frame from user */ + if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, + __func__); + ret = -EFAULT; + goto out; + } + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + ret = -EAGAIN; + goto out; + } + } else { + + smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + ret = -EAGAIN; + goto out; + } + } + + ret = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + request = mpt3sas_base_get_msg_frame(ioc, smid); + memcpy(request, mpi_request, karg.data_sge_offset*4); + ioc->ctl_cmds.smid = smid; + data_out_sz = karg.data_out_size; + data_in_sz = karg.data_in_size; + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { + if (!le16_to_cpu(mpi_request->FunctionDependent1) || + le16_to_cpu(mpi_request->FunctionDependent1) > + ioc->facts.MaxDevHandle) { + ret = -EINVAL; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + /* obtain dma-able memory for data transfer */ + if (data_out_sz) /* WRITE */ { + data_out = pci_alloc_consistent(ioc->pdev, data_out_sz, + &data_out_dma); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + if (copy_from_user(data_out, karg.data_out_buf_ptr, + data_out_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -EFAULT; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + if (data_in_sz) /* READ */ { + data_in = pci_alloc_consistent(ioc->pdev, data_in_sz, + &data_in_dma); + if (!data_in) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + psge = (void *)request + (karg.data_sge_offset*4); + + /* send command to firmware */ +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); +#endif + + init_completion(&ioc->ctl_cmds.done); + switch (mpi_request->Function) { + case MPI2_FUNCTION_SCSI_IO_REQUEST: + case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + { + Mpi2SCSIIORequest_t *scsiio_request = + (Mpi2SCSIIORequest_t *)request; + scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + scsiio_request->SenseBufferLowAddress = + mpt3sas_base_get_sense_buffer_dma(ioc, smid); + memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) + mpt3sas_base_put_smid_scsi_io(ioc, smid, + le16_to_cpu(mpi_request->FunctionDependent1)); + else + mpt3sas_base_put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_SCSI_TASK_MGMT: + { + Mpi2SCSITaskManagementRequest_t *tm_request = + (Mpi2SCSITaskManagementRequest_t *)request; + + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", + ioc->name, + le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); + + if (tm_request->TaskType == + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + tm_request->TaskType == + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { + if (_ctl_set_task_mid(ioc, &karg, tm_request)) { + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu( + tm_request->DevHandle)); + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + mpt3sas_base_put_smid_hi_priority(ioc, smid); + break; + } + case MPI2_FUNCTION_SMP_PASSTHROUGH: + { + Mpi2SmpPassthroughRequest_t *smp_request = + (Mpi2SmpPassthroughRequest_t *)mpi_request; + u8 *data; + + /* ioc determines which port to use */ + smp_request->PhysicalPort = 0xFF; + if (smp_request->PassthroughFlags & + MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) + data = (u8 *)&smp_request->SGL; + else { + if (unlikely(data_out == NULL)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + data = data_out; + } + + if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + mpt3sas_base_put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_SATA_PASSTHROUGH: + case MPI2_FUNCTION_FW_DOWNLOAD: + case MPI2_FUNCTION_FW_UPLOAD: + { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + mpt3sas_base_put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_TOOLBOX: + { + Mpi2ToolboxCleanRequest_t *toolbox_request = + (Mpi2ToolboxCleanRequest_t *)mpi_request; + + if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + } else { + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + } + mpt3sas_base_put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: + { + Mpi2SasIoUnitControlRequest_t *sasiounit_request = + (Mpi2SasIoUnitControlRequest_t *)mpi_request; + + if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET + || sasiounit_request->Operation == + MPI2_SAS_OP_PHY_LINK_RESET) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + /* drop to default case for posting the request */ + } + default: + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + mpt3sas_base_put_smid_default(ioc, smid); + break; + } + + if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) + timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; + else + timeout = karg.timeout; + timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, + timeout*HZ); + if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { + Mpi2SCSITaskManagementRequest_t *tm_request = + (Mpi2SCSITaskManagementRequest_t *)mpi_request; + mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu( + tm_request->DevHandle)); + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); + } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH || + mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) && + ioc->ioc_link_reset_in_progress) { + ioc->ioc_link_reset_in_progress = 0; + ioc->ignore_loginfos = 0; + } + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, + __func__); + _debug_dump_mf(mpi_request, karg.data_sge_offset); + if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT && + (ioc->logging_level & MPT_DEBUG_TM)) { + Mpi2SCSITaskManagementReply_t *tm_reply = + (Mpi2SCSITaskManagementReply_t *)mpi_reply; + + pr_info(MPT3SAS_FMT "TASK_MGMT: " \ + "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " + "TerminationCount(0x%08x)\n", ioc->name, + le16_to_cpu(tm_reply->IOCStatus), + le32_to_cpu(tm_reply->IOCLogInfo), + le32_to_cpu(tm_reply->TerminationCount)); + } +#endif + /* copy out xdata to user */ + if (data_in_sz) { + if (copy_to_user(karg.data_in_buf_ptr, data_in, + data_in_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + + /* copy out reply message frame to user */ + if (karg.max_reply_bytes) { + sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + + /* copy out sense to user */ + if (karg.max_sense_bytes && (mpi_request->Function == + MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { + sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE); + if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, + sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + + issue_host_reset: + if (issue_reset) { + ret = -ENODATA; + if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_request->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || + mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) { + pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n", + ioc->name, + le16_to_cpu(mpi_request->FunctionDependent1)); + mpt3sas_halt_firmware(ioc); + mpt3sas_scsih_issue_tm(ioc, + le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, + 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, + 0, TM_MUTEX_ON); + } else + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + } + + out: + + /* free memory associated with sg buffers */ + if (data_in) + pci_free_consistent(ioc->pdev, data_in_sz, data_in, + data_in_dma); + + if (data_out) + pci_free_consistent(ioc->pdev, data_out_sz, data_out, + data_out_dma); + + kfree(mpi_request); + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return ret; +} + +/** + * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_iocinfo karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, + __func__)); + + memset(&karg, 0 , sizeof(karg)); + karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; + if (ioc->pfacts) + karg.port_number = ioc->pfacts[0].PortNumber; + karg.hw_rev = ioc->pdev->revision; + karg.pci_id = ioc->pdev->device; + karg.subsystem_device = ioc->pdev->subsystem_device; + karg.subsystem_vendor = ioc->pdev->subsystem_vendor; + karg.pci_information.u.bits.bus = ioc->pdev->bus->number; + karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); + karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); + karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); + karg.firmware_version = ioc->facts.FWVersion.Word; + strcpy(karg.driver_version, MPT3SAS_DRIVER_NAME); + strcat(karg.driver_version, "-"); + strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); + karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +/** + * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_eventquery karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, + __func__)); + + karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE; + memcpy(karg.event_types, ioc->event_type, + MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +/** + * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_eventenable karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, + __func__)); + + memcpy(ioc->event_type, karg.event_types, + MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + mpt3sas_base_validate_event_type(ioc, ioc->event_type); + + if (ioc->event_log) + return 0; + /* initialize event_log */ + ioc->event_context = 0; + ioc->aen_event_read_flag = 0; + ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE, + sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL); + if (!ioc->event_log) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENOMEM; + } + return 0; +} + +/** + * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_eventreport karg; + u32 number_bytes, max_events, max; + struct mpt3_ioctl_eventreport __user *uarg = arg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, + __func__)); + + number_bytes = karg.hdr.max_data_size - + sizeof(struct mpt3_ioctl_header); + max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS); + max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events); + + /* If fewer than 1 event is requested, there must have + * been some type of error. + */ + if (!max || !ioc->event_log) + return -ENODATA; + + number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS); + if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + /* reset flag so SIGIO can restart */ + ioc->aen_event_read_flag = 0; + return 0; +} + +/** + * _ctl_do_reset - main handler for MPT3HARDRESET opcode + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_diag_reset karg; + int retval; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + if (ioc->shost_recovery || ioc->pci_error_recovery || + ioc->is_driver_loading) + return -EAGAIN; + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, + __func__)); + + retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + pr_info(MPT3SAS_FMT "host reset: %s\n", + ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); + return 0; +} + +/** + * _ctl_btdh_search_sas_device - searching for sas device + * @ioc: per adapter object + * @btdh: btdh ioctl payload + */ +static int +_ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_ioctl_btdh_mapping *btdh) +{ + struct _sas_device *sas_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->sas_device_list)) + return rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == sas_device->handle) { + btdh->bus = sas_device->channel; + btdh->id = sas_device->id; + rc = 1; + goto out; + } else if (btdh->bus == sas_device->channel && btdh->id == + sas_device->id && btdh->handle == 0xFFFF) { + btdh->handle = sas_device->handle; + rc = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/** + * _ctl_btdh_search_raid_device - searching for raid device + * @ioc: per adapter object + * @btdh: btdh ioctl payload + */ +static int +_ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_ioctl_btdh_mapping *btdh) +{ + struct _raid_device *raid_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->raid_device_list)) + return rc; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == raid_device->handle) { + btdh->bus = raid_device->channel; + btdh->id = raid_device->id; + rc = 1; + goto out; + } else if (btdh->bus == raid_device->channel && btdh->id == + raid_device->id && btdh->handle == 0xFFFF) { + btdh->handle = raid_device->handle; + rc = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return rc; +} + +/** + * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_btdh_mapping karg; + int rc; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + rc = _ctl_btdh_search_sas_device(ioc, &karg); + if (!rc) + _ctl_btdh_search_raid_device(ioc, &karg); + + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +/** + * _ctl_diag_capability - return diag buffer capability + * @ioc: per adapter object + * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED + * + * returns 1 when diag buffer support is enabled in firmware + */ +static u8 +_ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type) +{ + u8 rc = 0; + + switch (buffer_type) { + case MPI2_DIAG_BUF_TYPE_TRACE: + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) + rc = 1; + break; + case MPI2_DIAG_BUF_TYPE_SNAPSHOT: + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) + rc = 1; + break; + case MPI2_DIAG_BUF_TYPE_EXTENDED: + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) + rc = 1; + } + + return rc; +} + + +/** + * _ctl_diag_register_2 - wrapper for registering diag buffer support + * @ioc: per adapter object + * @diag_register: the diag_register struct passed in from user space + * + */ +static long +_ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_diag_register *diag_register) +{ + int rc, i; + void *request_data = NULL; + dma_addr_t request_data_dma; + u32 request_data_sz = 0; + Mpi2DiagBufferPostRequest_t *mpi_request; + Mpi2DiagBufferPostReply_t *mpi_reply; + u8 buffer_type; + unsigned long timeleft; + u16 smid; + u16 ioc_status; + u32 ioc_state; + u8 issue_reset = 0; + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + buffer_type = diag_register->buffer_type; + if (!_ctl_diag_capability(ioc, buffer_type)) { + pr_err(MPT3SAS_FMT + "%s: doesn't have capability for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -EPERM; + } + + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) { + pr_err(MPT3SAS_FMT + "%s: already has a registered buffer for buffer_type(0x%02x)\n", + ioc->name, __func__, + buffer_type); + return -EINVAL; + } + + if (diag_register->requested_buffer_size % 4) { + pr_err(MPT3SAS_FMT + "%s: the requested_buffer_size is not 4 byte aligned\n", + ioc->name, __func__); + return -EINVAL; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->ctl_cmds.smid = smid; + + request_data = ioc->diag_buffer[buffer_type]; + request_data_sz = diag_register->requested_buffer_size; + ioc->unique_id[buffer_type] = diag_register->unique_id; + ioc->diag_buffer_status[buffer_type] = 0; + memcpy(ioc->product_specific[buffer_type], + diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); + ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; + + if (request_data) { + request_data_dma = ioc->diag_buffer_dma[buffer_type]; + if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { + pci_free_consistent(ioc->pdev, + ioc->diag_buffer_sz[buffer_type], + request_data, request_data_dma); + request_data = NULL; + } + } + + if (request_data == NULL) { + ioc->diag_buffer_sz[buffer_type] = 0; + ioc->diag_buffer_dma[buffer_type] = 0; + request_data = pci_alloc_consistent( + ioc->pdev, request_data_sz, &request_data_dma); + if (request_data == NULL) { + pr_err(MPT3SAS_FMT "%s: failed allocating memory" \ + " for diag buffers, requested size(%d)\n", + ioc->name, __func__, request_data_sz); + mpt3sas_base_free_smid(ioc, smid); + return -ENOMEM; + } + ioc->diag_buffer[buffer_type] = request_data; + ioc->diag_buffer_sz[buffer_type] = request_data_sz; + ioc->diag_buffer_dma[buffer_type] = request_data_dma; + } + + mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; + mpi_request->BufferType = diag_register->buffer_type; + mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); + mpi_request->BufferAddress = cpu_to_le64(request_data_dma); + mpi_request->BufferLength = cpu_to_le32(request_data_sz); + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n", + ioc->name, __func__, request_data, + (unsigned long long)request_data_dma, + le32_to_cpu(mpi_request->BufferLength))); + + for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) + mpi_request->ProductSpecific[i] = + cpu_to_le32(ioc->product_specific[buffer_type][i]); + + init_completion(&ioc->ctl_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, + MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); + + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, + __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4); + if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + /* process the completed Reply Message Frame */ + if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + pr_err(MPT3SAS_FMT "%s: no reply message\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_REGISTERED; + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", + ioc->name, __func__)); + } else { + pr_info(MPT3SAS_FMT + "%s: ioc_status(0x%04x) log_info(0x%08x)\n", + ioc->name, __func__, + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); + rc = -EFAULT; + } + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + + out: + + if (rc && request_data) + pci_free_consistent(ioc->pdev, request_data_sz, + request_data, request_data_dma); + + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + +/** + * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time + * @ioc: per adapter object + * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 + * + * This is called when command line option diag_buffer_enable is enabled + * at driver load time. + */ +void +mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) +{ + struct mpt3_diag_register diag_register; + + memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); + + if (bits_to_register & 1) { + pr_info(MPT3SAS_FMT "registering trace buffer support\n", + ioc->name); + ioc->diag_trigger_master.MasterData = + (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + diag_register.unique_id = 0x7075900; + _ctl_diag_register_2(ioc, &diag_register); + } + + if (bits_to_register & 2) { + pr_info(MPT3SAS_FMT "registering snapshot buffer support\n", + ioc->name); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + diag_register.unique_id = 0x7075901; + _ctl_diag_register_2(ioc, &diag_register); + } + + if (bits_to_register & 4) { + pr_info(MPT3SAS_FMT "registering extended buffer support\n", + ioc->name); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + diag_register.unique_id = 0x7075901; + _ctl_diag_register_2(ioc, &diag_register); + } +} + +/** + * _ctl_diag_register - application register with driver + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + * + * This will allow the driver to setup any required buffers that will be + * needed by firmware to communicate with the driver. + */ +static long +_ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_register karg; + long rc; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + rc = _ctl_diag_register_2(ioc, &karg); + return rc; +} + +/** + * _ctl_diag_unregister - application unregister with driver + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + * + * This will allow the driver to cleanup any memory allocated for diag + * messages and to free up any resources. + */ +static long +_ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_unregister karg; + void *request_data; + dma_addr_t request_data_dma; + u32 request_data_sz; + u8 buffer_type; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + buffer_type = karg.unique_id & 0x000000ff; + if (!_ctl_diag_capability(ioc, buffer_type)) { + pr_err(MPT3SAS_FMT + "%s: doesn't have capability for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -EPERM; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + pr_err(MPT3SAS_FMT + "%s: buffer_type(0x%02x) is not registered\n", + ioc->name, __func__, buffer_type); + return -EINVAL; + } + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + pr_err(MPT3SAS_FMT + "%s: buffer_type(0x%02x) has not been released\n", + ioc->name, __func__, buffer_type); + return -EINVAL; + } + + if (karg.unique_id != ioc->unique_id[buffer_type]) { + pr_err(MPT3SAS_FMT + "%s: unique_id(0x%08x) is not registered\n", + ioc->name, __func__, karg.unique_id); + return -EINVAL; + } + + request_data = ioc->diag_buffer[buffer_type]; + if (!request_data) { + pr_err(MPT3SAS_FMT + "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -ENOMEM; + } + + request_data_sz = ioc->diag_buffer_sz[buffer_type]; + request_data_dma = ioc->diag_buffer_dma[buffer_type]; + pci_free_consistent(ioc->pdev, request_data_sz, + request_data, request_data_dma); + ioc->diag_buffer[buffer_type] = NULL; + ioc->diag_buffer_status[buffer_type] = 0; + return 0; +} + +/** + * _ctl_diag_query - query relevant info associated with diag buffers + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + * + * The application will send only buffer_type and unique_id. Driver will + * inspect unique_id first, if valid, fill in all the info. If unique_id is + * 0x00, the driver will return info specified by Buffer Type. + */ +static long +_ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_query karg; + void *request_data; + int i; + u8 buffer_type; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + karg.application_flags = 0; + buffer_type = karg.buffer_type; + + if (!_ctl_diag_capability(ioc, buffer_type)) { + pr_err(MPT3SAS_FMT + "%s: doesn't have capability for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -EPERM; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + pr_err(MPT3SAS_FMT + "%s: buffer_type(0x%02x) is not registered\n", + ioc->name, __func__, buffer_type); + return -EINVAL; + } + + if (karg.unique_id & 0xffffff00) { + if (karg.unique_id != ioc->unique_id[buffer_type]) { + pr_err(MPT3SAS_FMT + "%s: unique_id(0x%08x) is not registered\n", + ioc->name, __func__, karg.unique_id); + return -EINVAL; + } + } + + request_data = ioc->diag_buffer[buffer_type]; + if (!request_data) { + pr_err(MPT3SAS_FMT + "%s: doesn't have buffer for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -ENOMEM; + } + + if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) + karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | + MPT3_APP_FLAGS_BUFFER_VALID); + else + karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | + MPT3_APP_FLAGS_BUFFER_VALID | + MPT3_APP_FLAGS_FW_BUFFER_ACCESS); + + for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) + karg.product_specific[i] = + ioc->product_specific[buffer_type][i]; + + karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type]; + karg.driver_added_buffer_size = 0; + karg.unique_id = ioc->unique_id[buffer_type]; + karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; + + if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) { + pr_err(MPT3SAS_FMT + "%s: unable to write mpt3_diag_query data @ %p\n", + ioc->name, __func__, arg); + return -EFAULT; + } + return 0; +} + +/** + * mpt3sas_send_diag_release - Diag Release Message + * @ioc: per adapter object + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED + * @issue_reset - specifies whether host reset is required. + * + */ +int +mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, + u8 *issue_reset) +{ + Mpi2DiagReleaseRequest_t *mpi_request; + Mpi2DiagReleaseReply_t *mpi_reply; + u16 smid; + u16 ioc_status; + u32 ioc_state; + int rc; + unsigned long timeleft; + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + rc = 0; + *issue_reset = 0; + + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_RELEASED; + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: skipping due to FAULT state\n", ioc->name, + __func__)); + rc = -EAGAIN; + goto out; + } + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->ctl_cmds.smid = smid; + + mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; + mpi_request->BufferType = buffer_type; + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + init_completion(&ioc->ctl_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, + MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); + + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, + __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2DiagReleaseRequest_t)/4); + if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) + *issue_reset = 1; + rc = -EFAULT; + goto out; + } + + /* process the completed Reply Message Frame */ + if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + pr_err(MPT3SAS_FMT "%s: no reply message\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_RELEASED; + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", + ioc->name, __func__)); + } else { + pr_info(MPT3SAS_FMT + "%s: ioc_status(0x%04x) log_info(0x%08x)\n", + ioc->name, __func__, + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); + rc = -EFAULT; + } + + out: + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + +/** + * _ctl_diag_release - request to send Diag Release Message to firmware + * @arg - user space buffer containing ioctl content + * + * This allows ownership of the specified buffer to returned to the driver, + * allowing an application to read the buffer without fear that firmware is + * overwritting information in the buffer. + */ +static long +_ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_release karg; + void *request_data; + int rc; + u8 buffer_type; + u8 issue_reset = 0; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + buffer_type = karg.unique_id & 0x000000ff; + if (!_ctl_diag_capability(ioc, buffer_type)) { + pr_err(MPT3SAS_FMT + "%s: doesn't have capability for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -EPERM; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + pr_err(MPT3SAS_FMT + "%s: buffer_type(0x%02x) is not registered\n", + ioc->name, __func__, buffer_type); + return -EINVAL; + } + + if (karg.unique_id != ioc->unique_id[buffer_type]) { + pr_err(MPT3SAS_FMT + "%s: unique_id(0x%08x) is not registered\n", + ioc->name, __func__, karg.unique_id); + return -EINVAL; + } + + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + pr_err(MPT3SAS_FMT + "%s: buffer_type(0x%02x) is already released\n", + ioc->name, __func__, + buffer_type); + return 0; + } + + request_data = ioc->diag_buffer[buffer_type]; + + if (!request_data) { + pr_err(MPT3SAS_FMT + "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -ENOMEM; + } + + /* buffers were released by due to host reset */ + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DIAG_RESET)) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_RELEASED; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_DIAG_RESET; + pr_err(MPT3SAS_FMT + "%s: buffer_type(0x%02x) was released due to host reset\n", + ioc->name, __func__, buffer_type); + return 0; + } + + rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset); + + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + + return rc; +} + +/** + * _ctl_diag_read_buffer - request for copy of the diag buffer + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_read_buffer karg; + struct mpt3_diag_read_buffer __user *uarg = arg; + void *request_data, *diag_data; + Mpi2DiagBufferPostRequest_t *mpi_request; + Mpi2DiagBufferPostReply_t *mpi_reply; + int rc, i; + u8 buffer_type; + unsigned long timeleft, request_size, copy_size; + u16 smid; + u16 ioc_status; + u8 issue_reset = 0; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + buffer_type = karg.unique_id & 0x000000ff; + if (!_ctl_diag_capability(ioc, buffer_type)) { + pr_err(MPT3SAS_FMT + "%s: doesn't have capability for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -EPERM; + } + + if (karg.unique_id != ioc->unique_id[buffer_type]) { + pr_err(MPT3SAS_FMT + "%s: unique_id(0x%08x) is not registered\n", + ioc->name, __func__, karg.unique_id); + return -EINVAL; + } + + request_data = ioc->diag_buffer[buffer_type]; + if (!request_data) { + pr_err(MPT3SAS_FMT + "%s: doesn't have buffer for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -ENOMEM; + } + + request_size = ioc->diag_buffer_sz[buffer_type]; + + if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { + pr_err(MPT3SAS_FMT "%s: either the starting_offset " \ + "or bytes_to_read are not 4 byte aligned\n", ioc->name, + __func__); + return -EINVAL; + } + + if (karg.starting_offset > request_size) + return -EINVAL; + + diag_data = (void *)(request_data + karg.starting_offset); + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: diag_buffer(%p), offset(%d), sz(%d)\n", + ioc->name, __func__, + diag_data, karg.starting_offset, karg.bytes_to_read)); + + /* Truncate data on requests that are too large */ + if ((diag_data + karg.bytes_to_read < diag_data) || + (diag_data + karg.bytes_to_read > request_data + request_size)) + copy_size = request_size - karg.starting_offset; + else + copy_size = karg.bytes_to_read; + + if (copy_to_user((void __user *)uarg->diagnostic_data, + diag_data, copy_size)) { + pr_err(MPT3SAS_FMT + "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n", + ioc->name, __func__, diag_data); + return -EFAULT; + } + + if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0) + return 0; + + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: Reregister buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type)); + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: buffer_type(0x%02x) is still registered\n", + ioc->name, __func__, buffer_type)); + return 0; + } + /* Get a free request frame and save the message context. + */ + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->ctl_cmds.smid = smid; + + mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; + mpi_request->BufferType = buffer_type; + mpi_request->BufferLength = + cpu_to_le32(ioc->diag_buffer_sz[buffer_type]); + mpi_request->BufferAddress = + cpu_to_le64(ioc->diag_buffer_dma[buffer_type]); + for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) + mpi_request->ProductSpecific[i] = + cpu_to_le32(ioc->product_specific[buffer_type][i]); + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + init_completion(&ioc->ctl_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, + MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); + + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, + __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4); + if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + /* process the completed Reply Message Frame */ + if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + pr_err(MPT3SAS_FMT "%s: no reply message\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_REGISTERED; + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", + ioc->name, __func__)); + } else { + pr_info(MPT3SAS_FMT + "%s: ioc_status(0x%04x) log_info(0x%08x)\n", + ioc->name, __func__, + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); + rc = -EFAULT; + } + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + + out: + + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + + + +#ifdef CONFIG_COMPAT +/** + * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. + * @ioc: per adapter object + * @cmd - ioctl opcode + * @arg - (struct mpt3_ioctl_command32) + * + * MPT3COMMAND32 - Handle 32bit applications running on 64bit os. + */ +static long +_ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, + void __user *arg) +{ + struct mpt3_ioctl_command32 karg32; + struct mpt3_ioctl_command32 __user *uarg; + struct mpt3_ioctl_command karg; + + if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32)) + return -EINVAL; + + uarg = (struct mpt3_ioctl_command32 __user *) arg; + + if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + memset(&karg, 0, sizeof(struct mpt3_ioctl_command)); + karg.hdr.ioc_number = karg32.hdr.ioc_number; + karg.hdr.port_number = karg32.hdr.port_number; + karg.hdr.max_data_size = karg32.hdr.max_data_size; + karg.timeout = karg32.timeout; + karg.max_reply_bytes = karg32.max_reply_bytes; + karg.data_in_size = karg32.data_in_size; + karg.data_out_size = karg32.data_out_size; + karg.max_sense_bytes = karg32.max_sense_bytes; + karg.data_sge_offset = karg32.data_sge_offset; + karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); + karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); + karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); + karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); + return _ctl_do_mpt_command(ioc, karg, &uarg->mf); +} +#endif + +/** + * _ctl_ioctl_main - main ioctl entry point + * @file - (struct file) + * @cmd - ioctl opcode + * @arg - + * compat - handles 32 bit applications in 64bit os + */ +static long +_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, + u8 compat) +{ + struct MPT3SAS_ADAPTER *ioc; + struct mpt3_ioctl_header ioctl_header; + enum block_state state; + long ret = -EINVAL; + + /* get IOCTL header */ + if (copy_from_user(&ioctl_header, (char __user *)arg, + sizeof(struct mpt3_ioctl_header))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc) + return -ENODEV; + + if (ioc->shost_recovery || ioc->pci_error_recovery || + ioc->is_driver_loading) + return -EAGAIN; + + state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; + if (state == NON_BLOCKING) { + if (!mutex_trylock(&ioc->ctl_cmds.mutex)) + return -EAGAIN; + } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) + return -ERESTARTSYS; + + + switch (cmd) { + case MPT3IOCINFO: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo)) + ret = _ctl_getiocinfo(ioc, arg); + break; +#ifdef CONFIG_COMPAT + case MPT3COMMAND32: +#endif + case MPT3COMMAND: + { + struct mpt3_ioctl_command __user *uarg; + struct mpt3_ioctl_command karg; + +#ifdef CONFIG_COMPAT + if (compat) { + ret = _ctl_compat_mpt_command(ioc, cmd, arg); + break; + } +#endif + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + ret = -EFAULT; + break; + } + + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) { + uarg = arg; + ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf); + } + break; + } + case MPT3EVENTQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery)) + ret = _ctl_eventquery(ioc, arg); + break; + case MPT3EVENTENABLE: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable)) + ret = _ctl_eventenable(ioc, arg); + break; + case MPT3EVENTREPORT: + ret = _ctl_eventreport(ioc, arg); + break; + case MPT3HARDRESET: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset)) + ret = _ctl_do_reset(ioc, arg); + break; + case MPT3BTDHMAPPING: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping)) + ret = _ctl_btdh_mapping(ioc, arg); + break; + case MPT3DIAGREGISTER: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register)) + ret = _ctl_diag_register(ioc, arg); + break; + case MPT3DIAGUNREGISTER: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister)) + ret = _ctl_diag_unregister(ioc, arg); + break; + case MPT3DIAGQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query)) + ret = _ctl_diag_query(ioc, arg); + break; + case MPT3DIAGRELEASE: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release)) + ret = _ctl_diag_release(ioc, arg); + break; + case MPT3DIAGREADBUFFER: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer)) + ret = _ctl_diag_read_buffer(ioc, arg); + break; + default: + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); + break; + } + + mutex_unlock(&ioc->ctl_cmds.mutex); + return ret; +} + +/** + * _ctl_ioctl - main ioctl entry point (unlocked) + * @file - (struct file) + * @cmd - ioctl opcode + * @arg - + */ +static long +_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0); + return ret; +} + +#ifdef CONFIG_COMPAT +/** + * _ctl_ioctl_compat - main ioctl entry point (compat) + * @file - + * @cmd - + * @arg - + * + * This routine handles 32 bit applications in 64bit os. + */ +static long +_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) +{ + long ret; + + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1); + return ret; +} +#endif + +/* scsi host attributes */ +/** + * _ctl_version_fw_show - firmware version + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_version_fw_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF); +} +static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL); + +/** + * _ctl_version_bios_show - bios version + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_version_bios_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (version & 0xFF000000) >> 24, + (version & 0x00FF0000) >> 16, + (version & 0x0000FF00) >> 8, + version & 0x000000FF); +} +static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL); + +/** + * _ctl_version_mpi_show - MPI (message passing interface) version + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", + ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); +} +static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL); + +/** + * _ctl_version_product_show - product name + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_version_product_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); +} +static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL); + +/** + * _ctl_version_nvdata_persistent_show - ndvata persistent version + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_version_nvdata_persistent_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); +} +static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, + _ctl_version_nvdata_persistent_show, NULL); + +/** + * _ctl_version_nvdata_default_show - nvdata default version + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute + *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); +} +static DEVICE_ATTR(version_nvdata_default, S_IRUGO, + _ctl_version_nvdata_default_show, NULL); + +/** + * _ctl_board_name_show - board name + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_board_name_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); +} +static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL); + +/** + * _ctl_board_assembly_show - board assembly name + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); +} +static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL); + +/** + * _ctl_board_tracer_show - board tracer number + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); +} +static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL); + +/** + * _ctl_io_delay_show - io missing delay + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is for firmware implemention for deboucing device + * removal events. + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_io_delay_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); +} +static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL); + +/** + * _ctl_device_delay_show - device missing delay + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is for firmware implemention for deboucing device + * removal events. + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_device_delay_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); +} +static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL); + +/** + * _ctl_fw_queue_depth_show - global credits + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is firmware queue depth limit + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); +} +static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL); + +/** + * _ctl_sas_address_show - sas address + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is the controller sas address + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr, + char *buf) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)ioc->sas_hba.sas_address); +} +static DEVICE_ATTR(host_sas_address, S_IRUGO, + _ctl_host_sas_address_show, NULL); + +/** + * _ctl_logging_level_show - logging level + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_logging_level_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); +} +static ssize_t +_ctl_logging_level_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int val = 0; + + if (sscanf(buf, "%x", &val) != 1) + return -EINVAL; + + ioc->logging_level = val; + pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name, + ioc->logging_level); + return strlen(buf); +} +static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show, + _ctl_logging_level_store); + +/** + * _ctl_fwfault_debug_show - show/store fwfault_debug + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * mpt3sas_fwfault_debug is command line option + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); +} +static ssize_t +_ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int val = 0; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + ioc->fwfault_debug = val; + pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name, + ioc->fwfault_debug); + return strlen(buf); +} +static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, + _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); + +/** + * _ctl_ioc_reset_count_show - ioc reset count + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is firmware queue depth limit + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); +} +static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL); + +/** + * _ctl_ioc_reply_queue_count_show - number of reply queues + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is number of reply queues + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_ioc_reply_queue_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + u8 reply_queue_count; + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + if ((ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) + reply_queue_count = ioc->reply_queue_count; + else + reply_queue_count = 1; + + return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); +} +static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show, + NULL); + +struct DIAG_BUFFER_START { + __le32 Size; + __le32 DiagVersion; + u8 BufferType; + u8 Reserved[3]; + __le32 Reserved1; + __le32 Reserved2; + __le32 Reserved3; +}; + +/** + * _ctl_host_trace_buffer_size_show - host buffer size (trace only) + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_host_trace_buffer_size_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + u32 size = 0; + struct DIAG_BUFFER_START *request_data; + + if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { + pr_err(MPT3SAS_FMT + "%s: host_trace_buffer is not registered\n", + ioc->name, __func__); + return 0; + } + + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + pr_err(MPT3SAS_FMT + "%s: host_trace_buffer is not registered\n", + ioc->name, __func__); + return 0; + } + + request_data = (struct DIAG_BUFFER_START *) + ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]; + if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 || + le32_to_cpu(request_data->DiagVersion) == 0x01000000 || + le32_to_cpu(request_data->DiagVersion) == 0x01010000) && + le32_to_cpu(request_data->Reserved3) == 0x4742444c) + size = le32_to_cpu(request_data->Size); + + ioc->ring_buffer_sz = size; + return snprintf(buf, PAGE_SIZE, "%d\n", size); +} +static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO, + _ctl_host_trace_buffer_size_show, NULL); + +/** + * _ctl_host_trace_buffer_show - firmware ring buffer (trace only) + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + * + * You will only be able to read 4k bytes of ring buffer at a time. + * In order to read beyond 4k bytes, you will have to write out the + * offset to the same attribute, it will move the pointer. + */ +static ssize_t +_ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + void *request_data; + u32 size; + + if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { + pr_err(MPT3SAS_FMT + "%s: host_trace_buffer is not registered\n", + ioc->name, __func__); + return 0; + } + + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + pr_err(MPT3SAS_FMT + "%s: host_trace_buffer is not registered\n", + ioc->name, __func__); + return 0; + } + + if (ioc->ring_buffer_offset > ioc->ring_buffer_sz) + return 0; + + size = ioc->ring_buffer_sz - ioc->ring_buffer_offset; + size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; + request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset; + memcpy(buf, request_data, size); + return size; +} + +static ssize_t +_ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int val = 0; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + ioc->ring_buffer_offset = val; + return strlen(buf); +} +static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR, + _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store); + + +/*****************************************/ + +/** + * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only) + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + * + * This is a mechnism to post/release host_trace_buffers + */ +static ssize_t +_ctl_host_trace_buffer_enable_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) || + ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)) + return snprintf(buf, PAGE_SIZE, "off\n"); + else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + return snprintf(buf, PAGE_SIZE, "release\n"); + else + return snprintf(buf, PAGE_SIZE, "post\n"); +} + +static ssize_t +_ctl_host_trace_buffer_enable_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + char str[10] = ""; + struct mpt3_diag_register diag_register; + u8 issue_reset = 0; + + /* don't allow post/release occurr while recovery is active */ + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery || ioc->is_driver_loading) + return -EBUSY; + + if (sscanf(buf, "%9s", str) != 1) + return -EINVAL; + + if (!strcmp(str, "post")) { + /* exit out if host buffers are already posted */ + if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) && + (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) && + ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0)) + goto out; + memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); + pr_info(MPT3SAS_FMT "posting host trace buffers\n", + ioc->name); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; + diag_register.requested_buffer_size = (1024 * 1024); + diag_register.unique_id = 0x7075900; + ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; + _ctl_diag_register_2(ioc, &diag_register); + } else if (!strcmp(str, "release")) { + /* exit out if host buffers are already released */ + if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) + goto out; + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) + goto out; + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + goto out; + pr_info(MPT3SAS_FMT "releasing host trace buffer\n", + ioc->name); + mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, + &issue_reset); + } + + out: + return strlen(buf); +} +static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR, + _ctl_host_trace_buffer_enable_show, + _ctl_host_trace_buffer_enable_store); + +/*********** diagnostic trigger suppport *********************************/ + +/** + * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_master_show(struct device *cdev, + struct device_attribute *attr, char *buf) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_MASTER_TRIGGER_T); + memcpy(buf, &ioc->diag_trigger_master, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_master_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); + memset(&ioc->diag_trigger_master, 0, + sizeof(struct SL_WH_MASTER_TRIGGER_T)); + memcpy(&ioc->diag_trigger_master, buf, rc); + ioc->diag_trigger_master.MasterData |= + (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} +static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store); + + +/** + * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_event_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T); + memcpy(buf, &ioc->diag_trigger_event, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_event_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t sz; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); + memset(&ioc->diag_trigger_event, 0, + sizeof(struct SL_WH_EVENT_TRIGGERS_T)); + memcpy(&ioc->diag_trigger_event, buf, sz); + if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES) + ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; +} +static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store); + + +/** + * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_scsi_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T); + memcpy(buf, &ioc->diag_trigger_scsi, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_scsi_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t sz; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); + memset(&ioc->diag_trigger_scsi, 0, + sizeof(struct SL_WH_EVENT_TRIGGERS_T)); + memcpy(&ioc->diag_trigger_scsi, buf, sz); + if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) + ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; +} +static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store); + + +/** + * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_mpi_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_MPI_TRIGGERS_T); + memcpy(buf, &ioc->diag_trigger_mpi, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_mpi_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t sz; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); + memset(&ioc->diag_trigger_mpi, 0, + sizeof(ioc->diag_trigger_mpi)); + memcpy(&ioc->diag_trigger_mpi, buf, sz); + if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES) + ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; +} + +static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store); + +/*********** diagnostic trigger suppport *** END ****************************/ + + + +/*****************************************/ + +struct device_attribute *mpt3sas_host_attrs[] = { + &dev_attr_version_fw, + &dev_attr_version_bios, + &dev_attr_version_mpi, + &dev_attr_version_product, + &dev_attr_version_nvdata_persistent, + &dev_attr_version_nvdata_default, + &dev_attr_board_name, + &dev_attr_board_assembly, + &dev_attr_board_tracer, + &dev_attr_io_delay, + &dev_attr_device_delay, + &dev_attr_logging_level, + &dev_attr_fwfault_debug, + &dev_attr_fw_queue_depth, + &dev_attr_host_sas_address, + &dev_attr_ioc_reset_count, + &dev_attr_host_trace_buffer_size, + &dev_attr_host_trace_buffer, + &dev_attr_host_trace_buffer_enable, + &dev_attr_reply_queue_count, + &dev_attr_diag_trigger_master, + &dev_attr_diag_trigger_event, + &dev_attr_diag_trigger_scsi, + &dev_attr_diag_trigger_mpi, + NULL, +}; + +/* device attributes */ + +/** + * _ctl_device_sas_address_show - sas address + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is the sas address for the target + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)sas_device_priv_data->sas_target->sas_address); +} +static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL); + +/** + * _ctl_device_handle_show - device handle + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is the firmware assigned device handle + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_device_handle_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "0x%04x\n", + sas_device_priv_data->sas_target->handle); +} +static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL); + +struct device_attribute *mpt3sas_dev_attrs[] = { + &dev_attr_sas_address, + &dev_attr_sas_device_handle, + NULL, +}; + +static const struct file_operations ctl_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = _ctl_ioctl, + .poll = _ctl_poll, + .fasync = _ctl_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = _ctl_ioctl_compat, +#endif +}; + +static struct miscdevice ctl_dev = { + .minor = MPT3SAS_MINOR, + .name = MPT3SAS_DEV_NAME, + .fops = &ctl_fops, +}; + +/** + * mpt3sas_ctl_init - main entry point for ctl. + * + */ +void +mpt3sas_ctl_init(void) +{ + async_queue = NULL; + if (misc_register(&ctl_dev) < 0) + pr_err("%s can't register misc device [minor=%d]\n", + MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR); + + init_waitqueue_head(&ctl_poll_wait); +} + +/** + * mpt3sas_ctl_exit - exit point for ctl + * + */ +void +mpt3sas_ctl_exit(void) +{ + struct MPT3SAS_ADAPTER *ioc; + int i; + + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + + /* free memory associated to diag buffers */ + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!ioc->diag_buffer[i]) + continue; + if (!(ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + continue; + if ((ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + continue; + pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i], + ioc->diag_buffer[i], ioc->diag_buffer_dma[i]); + ioc->diag_buffer[i] = NULL; + ioc->diag_buffer_status[i] = 0; + } + + kfree(ioc->event_log); + } + misc_deregister(&ctl_dev); +} diff --git a/addons/mpt3sas/src/3.10.108/mpt3sas_ctl.h b/addons/mpt3sas/src/3.10.108/mpt3sas_ctl.h new file mode 100644 index 00000000..bd89f4f0 --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpt3sas_ctl.h @@ -0,0 +1,418 @@ +/* + * Management Module Support for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h + * Copyright (C) 2012 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef MPT3SAS_CTL_H_INCLUDED +#define MPT3SAS_CTL_H_INCLUDED + +#ifdef __KERNEL__ +#include +#endif + + +#ifndef MPT3SAS_MINOR +#define MPT3SAS_MINOR (MPT_MINOR + 2) +#endif +#define MPT3SAS_DEV_NAME "mpt3ctl" +#define MPT3_MAGIC_NUMBER 'L' +#define MPT3_IOCTL_DEFAULT_TIMEOUT (10) /* in seconds */ + +/** + * IOCTL opcodes + */ +#define MPT3IOCINFO _IOWR(MPT3_MAGIC_NUMBER, 17, \ + struct mpt3_ioctl_iocinfo) +#define MPT3COMMAND _IOWR(MPT3_MAGIC_NUMBER, 20, \ + struct mpt3_ioctl_command) +#ifdef CONFIG_COMPAT +#define MPT3COMMAND32 _IOWR(MPT3_MAGIC_NUMBER, 20, \ + struct mpt3_ioctl_command32) +#endif +#define MPT3EVENTQUERY _IOWR(MPT3_MAGIC_NUMBER, 21, \ + struct mpt3_ioctl_eventquery) +#define MPT3EVENTENABLE _IOWR(MPT3_MAGIC_NUMBER, 22, \ + struct mpt3_ioctl_eventenable) +#define MPT3EVENTREPORT _IOWR(MPT3_MAGIC_NUMBER, 23, \ + struct mpt3_ioctl_eventreport) +#define MPT3HARDRESET _IOWR(MPT3_MAGIC_NUMBER, 24, \ + struct mpt3_ioctl_diag_reset) +#define MPT3BTDHMAPPING _IOWR(MPT3_MAGIC_NUMBER, 31, \ + struct mpt3_ioctl_btdh_mapping) + +/* diag buffer support */ +#define MPT3DIAGREGISTER _IOWR(MPT3_MAGIC_NUMBER, 26, \ + struct mpt3_diag_register) +#define MPT3DIAGRELEASE _IOWR(MPT3_MAGIC_NUMBER, 27, \ + struct mpt3_diag_release) +#define MPT3DIAGUNREGISTER _IOWR(MPT3_MAGIC_NUMBER, 28, \ + struct mpt3_diag_unregister) +#define MPT3DIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 29, \ + struct mpt3_diag_query) +#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \ + struct mpt3_diag_read_buffer) + +/** + * struct mpt3_ioctl_header - main header structure + * @ioc_number - IOC unit number + * @port_number - IOC port number + * @max_data_size - maximum number bytes to transfer on read + */ +struct mpt3_ioctl_header { + uint32_t ioc_number; + uint32_t port_number; + uint32_t max_data_size; +}; + +/** + * struct mpt3_ioctl_diag_reset - diagnostic reset + * @hdr - generic header + */ +struct mpt3_ioctl_diag_reset { + struct mpt3_ioctl_header hdr; +}; + + +/** + * struct mpt3_ioctl_pci_info - pci device info + * @device - pci device id + * @function - pci function id + * @bus - pci bus id + * @segment_id - pci segment id + */ +struct mpt3_ioctl_pci_info { + union { + struct { + uint32_t device:5; + uint32_t function:3; + uint32_t bus:24; + } bits; + uint32_t word; + } u; + uint32_t segment_id; +}; + + +#define MPT2_IOCTL_INTERFACE_SCSI (0x00) +#define MPT2_IOCTL_INTERFACE_FC (0x01) +#define MPT2_IOCTL_INTERFACE_FC_IP (0x02) +#define MPT2_IOCTL_INTERFACE_SAS (0x03) +#define MPT2_IOCTL_INTERFACE_SAS2 (0x04) +#define MPT3_IOCTL_INTERFACE_SAS3 (0x06) +#define MPT2_IOCTL_VERSION_LENGTH (32) + +/** + * struct mpt3_ioctl_iocinfo - generic controller info + * @hdr - generic header + * @adapter_type - type of adapter (spi, fc, sas) + * @port_number - port number + * @pci_id - PCI Id + * @hw_rev - hardware revision + * @sub_system_device - PCI subsystem Device ID + * @sub_system_vendor - PCI subsystem Vendor ID + * @rsvd0 - reserved + * @firmware_version - firmware version + * @bios_version - BIOS version + * @driver_version - driver version - 32 ASCII characters + * @rsvd1 - reserved + * @scsi_id - scsi id of adapter 0 + * @rsvd2 - reserved + * @pci_information - pci info (2nd revision) + */ +struct mpt3_ioctl_iocinfo { + struct mpt3_ioctl_header hdr; + uint32_t adapter_type; + uint32_t port_number; + uint32_t pci_id; + uint32_t hw_rev; + uint32_t subsystem_device; + uint32_t subsystem_vendor; + uint32_t rsvd0; + uint32_t firmware_version; + uint32_t bios_version; + uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH]; + uint8_t rsvd1; + uint8_t scsi_id; + uint16_t rsvd2; + struct mpt3_ioctl_pci_info pci_information; +}; + + +/* number of event log entries */ +#define MPT3SAS_CTL_EVENT_LOG_SIZE (50) + +/** + * struct mpt3_ioctl_eventquery - query event count and type + * @hdr - generic header + * @event_entries - number of events returned by get_event_report + * @rsvd - reserved + * @event_types - type of events currently being captured + */ +struct mpt3_ioctl_eventquery { + struct mpt3_ioctl_header hdr; + uint16_t event_entries; + uint16_t rsvd; + uint32_t event_types[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; +}; + +/** + * struct mpt3_ioctl_eventenable - enable/disable event capturing + * @hdr - generic header + * @event_types - toggle off/on type of events to be captured + */ +struct mpt3_ioctl_eventenable { + struct mpt3_ioctl_header hdr; + uint32_t event_types[4]; +}; + +#define MPT3_EVENT_DATA_SIZE (192) +/** + * struct MPT3_IOCTL_EVENTS - + * @event - the event that was reported + * @context - unique value for each event assigned by driver + * @data - event data returned in fw reply message + */ +struct MPT3_IOCTL_EVENTS { + uint32_t event; + uint32_t context; + uint8_t data[MPT3_EVENT_DATA_SIZE]; +}; + +/** + * struct mpt3_ioctl_eventreport - returing event log + * @hdr - generic header + * @event_data - (see struct MPT3_IOCTL_EVENTS) + */ +struct mpt3_ioctl_eventreport { + struct mpt3_ioctl_header hdr; + struct MPT3_IOCTL_EVENTS event_data[1]; +}; + +/** + * struct mpt3_ioctl_command - generic mpt firmware passthru ioctl + * @hdr - generic header + * @timeout - command timeout in seconds. (if zero then use driver default + * value). + * @reply_frame_buf_ptr - reply location + * @data_in_buf_ptr - destination for read + * @data_out_buf_ptr - data source for write + * @sense_data_ptr - sense data location + * @max_reply_bytes - maximum number of reply bytes to be sent to app. + * @data_in_size - number bytes for data transfer in (read) + * @data_out_size - number bytes for data transfer out (write) + * @max_sense_bytes - maximum number of bytes for auto sense buffers + * @data_sge_offset - offset in words from the start of the request message to + * the first SGL + * @mf[1]; + */ +struct mpt3_ioctl_command { + struct mpt3_ioctl_header hdr; + uint32_t timeout; + void __user *reply_frame_buf_ptr; + void __user *data_in_buf_ptr; + void __user *data_out_buf_ptr; + void __user *sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[1]; +}; + +#ifdef CONFIG_COMPAT +struct mpt3_ioctl_command32 { + struct mpt3_ioctl_header hdr; + uint32_t timeout; + uint32_t reply_frame_buf_ptr; + uint32_t data_in_buf_ptr; + uint32_t data_out_buf_ptr; + uint32_t sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[1]; +}; +#endif + +/** + * struct mpt3_ioctl_btdh_mapping - mapping info + * @hdr - generic header + * @id - target device identification number + * @bus - SCSI bus number that the target device exists on + * @handle - device handle for the target device + * @rsvd - reserved + * + * To obtain a bus/id the application sets + * handle to valid handle, and bus/id to 0xFFFF. + * + * To obtain the device handle the application sets + * bus/id valid value, and the handle to 0xFFFF. + */ +struct mpt3_ioctl_btdh_mapping { + struct mpt3_ioctl_header hdr; + uint32_t id; + uint32_t bus; + uint16_t handle; + uint16_t rsvd; +}; + + + +/* application flags for mpt3_diag_register, mpt3_diag_query */ +#define MPT3_APP_FLAGS_APP_OWNED (0x0001) +#define MPT3_APP_FLAGS_BUFFER_VALID (0x0002) +#define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004) + +/* flags for mpt3_diag_read_buffer */ +#define MPT3_FLAGS_REREGISTER (0x0001) + +#define MPT3_PRODUCT_SPECIFIC_DWORDS 23 + +/** + * struct mpt3_diag_register - application register with driver + * @hdr - generic header + * @reserved - + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED + * @application_flags - misc flags + * @diagnostic_flags - specifies flags affecting command processing + * @product_specific - product specific information + * @requested_buffer_size - buffers size in bytes + * @unique_id - tag specified by application that is used to signal ownership + * of the buffer. + * + * This will allow the driver to setup any required buffers that will be + * needed by firmware to communicate with the driver. + */ +struct mpt3_diag_register { + struct mpt3_ioctl_header hdr; + uint8_t reserved; + uint8_t buffer_type; + uint16_t application_flags; + uint32_t diagnostic_flags; + uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS]; + uint32_t requested_buffer_size; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_unregister - application unregister with driver + * @hdr - generic header + * @unique_id - tag uniquely identifies the buffer to be unregistered + * + * This will allow the driver to cleanup any memory allocated for diag + * messages and to free up any resources. + */ +struct mpt3_diag_unregister { + struct mpt3_ioctl_header hdr; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_query - query relevant info associated with diag buffers + * @hdr - generic header + * @reserved - + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED + * @application_flags - misc flags + * @diagnostic_flags - specifies flags affecting command processing + * @product_specific - product specific information + * @total_buffer_size - diag buffer size in bytes + * @driver_added_buffer_size - size of extra space appended to end of buffer + * @unique_id - unique id associated with this buffer. + * + * The application will send only buffer_type and unique_id. Driver will + * inspect unique_id first, if valid, fill in all the info. If unique_id is + * 0x00, the driver will return info specified by Buffer Type. + */ +struct mpt3_diag_query { + struct mpt3_ioctl_header hdr; + uint8_t reserved; + uint8_t buffer_type; + uint16_t application_flags; + uint32_t diagnostic_flags; + uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS]; + uint32_t total_buffer_size; + uint32_t driver_added_buffer_size; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_release - request to send Diag Release Message to firmware + * @hdr - generic header + * @unique_id - tag uniquely identifies the buffer to be released + * + * This allows ownership of the specified buffer to returned to the driver, + * allowing an application to read the buffer without fear that firmware is + * overwritting information in the buffer. + */ +struct mpt3_diag_release { + struct mpt3_ioctl_header hdr; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_read_buffer - request for copy of the diag buffer + * @hdr - generic header + * @status - + * @reserved - + * @flags - misc flags + * @starting_offset - starting offset within drivers buffer where to start + * reading data at into the specified application buffer + * @bytes_to_read - number of bytes to copy from the drivers buffer into the + * application buffer starting at starting_offset. + * @unique_id - unique id associated with this buffer. + * @diagnostic_data - data payload + */ +struct mpt3_diag_read_buffer { + struct mpt3_ioctl_header hdr; + uint8_t status; + uint8_t reserved; + uint16_t flags; + uint32_t starting_offset; + uint32_t bytes_to_read; + uint32_t unique_id; + uint32_t diagnostic_data[1]; +}; + +#endif /* MPT3SAS_CTL_H_INCLUDED */ diff --git a/addons/mpt3sas/src/3.10.108/mpt3sas_debug.h b/addons/mpt3sas/src/3.10.108/mpt3sas_debug.h new file mode 100644 index 00000000..35405e70 --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpt3sas_debug.h @@ -0,0 +1,219 @@ +/* + * Logging Support for MPT (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c + * Copyright (C) 2012 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef MPT3SAS_DEBUG_H_INCLUDED +#define MPT3SAS_DEBUG_H_INCLUDED + +#define MPT_DEBUG 0x00000001 +#define MPT_DEBUG_MSG_FRAME 0x00000002 +#define MPT_DEBUG_SG 0x00000004 +#define MPT_DEBUG_EVENTS 0x00000008 +#define MPT_DEBUG_EVENT_WORK_TASK 0x00000010 +#define MPT_DEBUG_INIT 0x00000020 +#define MPT_DEBUG_EXIT 0x00000040 +#define MPT_DEBUG_FAIL 0x00000080 +#define MPT_DEBUG_TM 0x00000100 +#define MPT_DEBUG_REPLY 0x00000200 +#define MPT_DEBUG_HANDSHAKE 0x00000400 +#define MPT_DEBUG_CONFIG 0x00000800 +#define MPT_DEBUG_DL 0x00001000 +#define MPT_DEBUG_RESET 0x00002000 +#define MPT_DEBUG_SCSI 0x00004000 +#define MPT_DEBUG_IOCTL 0x00008000 +#define MPT_DEBUG_SAS 0x00020000 +#define MPT_DEBUG_TRANSPORT 0x00040000 +#define MPT_DEBUG_TASK_SET_FULL 0x00080000 + +#define MPT_DEBUG_TRIGGER_DIAG 0x00200000 + + +/* + * CONFIG_SCSI_MPT3SAS_LOGGING - enabled in Kconfig + */ + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING +#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \ +{ \ + if (IOC->logging_level & BITS) \ + CMD; \ +} +#else +#define MPT_CHECK_LOGGING(IOC, CMD, BITS) +#endif /* CONFIG_SCSI_MPT3SAS_LOGGING */ + + +/* + * debug macros + */ + +#define dprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG) + +#define dsgprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SG) + +#define devtprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENTS) + +#define dewtprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENT_WORK_TASK) + +#define dinitprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_INIT) + +#define dexitprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EXIT) + +#define dfailprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_FAIL) + +#define dtmprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TM) + +#define dreplyprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_REPLY) + +#define dhsprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_HANDSHAKE) + +#define dcprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CONFIG) + +#define ddlprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_DL) + +#define drsprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_RESET) + +#define dsprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SCSI) + +#define dctlprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_IOCTL) + +#define dsasprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS) + +#define dsastransport(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE) + +#define dmfprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_MSG_FRAME) + +#define dtsfprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TASK_SET_FULL) + +#define dtransportprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRANSPORT) + +#define dTriggerDiagPrintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRIGGER_DIAG) + + + +/* inline functions for dumping debug data*/ +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING +/** + * _debug_dump_mf - print message frame contents + * @mpi_request: pointer to message frame + * @sz: number of dwords + */ +static inline void +_debug_dump_mf(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + pr_info("mf:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} +/** + * _debug_dump_reply - print message frame contents + * @mpi_request: pointer to message frame + * @sz: number of dwords + */ +static inline void +_debug_dump_reply(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + pr_info("reply:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} +/** + * _debug_dump_config - print config page contents + * @mpi_request: pointer to message frame + * @sz: number of dwords + */ +static inline void +_debug_dump_config(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + pr_info("config:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} +#else +#define _debug_dump_mf(mpi_request, sz) +#define _debug_dump_reply(mpi_request, sz) +#define _debug_dump_config(mpi_request, sz) +#endif /* CONFIG_SCSI_MPT3SAS_LOGGING */ + +#endif /* MPT3SAS_DEBUG_H_INCLUDED */ diff --git a/addons/mpt3sas/src/3.10.108/mpt3sas_scsih.c b/addons/mpt3sas/src/3.10.108/mpt3sas_scsih.c new file mode 100644 index 00000000..1d6e1155 --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpt3sas_scsih.c @@ -0,0 +1,8198 @@ +/* + * Scsi Host Layer for MPT (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c + * Copyright (C) 2012 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mpt3sas_base.h" + +MODULE_AUTHOR(MPT3SAS_AUTHOR); +MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION); +MODULE_LICENSE("GPL"); +MODULE_VERSION(MPT3SAS_DRIVER_VERSION); + +#define RAID_CHANNEL 1 +/* forward proto's */ +static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander); +static void _firmware_event_work(struct work_struct *work); + +static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device); +static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 retry_count, u8 is_pd); + +static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); + +static void _scsih_scan_start(struct Scsi_Host *shost); +static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time); + +/* global parameters */ +LIST_HEAD(mpt3sas_ioc_list); + +/* local parameters */ +static u8 scsi_io_cb_idx = -1; +static u8 tm_cb_idx = -1; +static u8 ctl_cb_idx = -1; +static u8 base_cb_idx = -1; +static u8 port_enable_cb_idx = -1; +static u8 transport_cb_idx = -1; +static u8 scsih_cb_idx = -1; +static u8 config_cb_idx = -1; +static int mpt_ids; + +static u8 tm_tr_cb_idx = -1 ; +static u8 tm_tr_volume_cb_idx = -1 ; +static u8 tm_sas_control_cb_idx = -1; + +/* command line options */ +static u32 logging_level; +MODULE_PARM_DESC(logging_level, + " bits for enabling additional logging info (default=0)"); + + +static ushort max_sectors = 0xFFFF; +module_param(max_sectors, ushort, 0); +MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); + + +static int missing_delay[2] = {-1, -1}; +module_param_array(missing_delay, int, NULL, 0); +MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); + +/* scsi-mid layer global parmeter is max_report_luns, which is 511 */ +#define MPT3SAS_MAX_LUN (16895) +static int max_lun = MPT3SAS_MAX_LUN; +module_param(max_lun, int, 0); +MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); + + + + +/* diag_buffer_enable is bitwise + * bit 0 set = TRACE + * bit 1 set = SNAPSHOT + * bit 2 set = EXTENDED + * + * Either bit can be set, or both + */ +static int diag_buffer_enable = -1; +module_param(diag_buffer_enable, int, 0); +MODULE_PARM_DESC(diag_buffer_enable, + " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); +static int disable_discovery = -1; +module_param(disable_discovery, int, 0); +MODULE_PARM_DESC(disable_discovery, " disable discovery "); + + +/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ +static int prot_mask = -1; +module_param(prot_mask, int, 0); +MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); + + +/* raid transport support */ + +static struct raid_template *mpt3sas_raid_template; + + +/** + * struct sense_info - common structure for obtaining sense keys + * @skey: sense key + * @asc: additional sense code + * @ascq: additional sense code qualifier + */ +struct sense_info { + u8 skey; + u8 asc; + u8 ascq; +}; + +#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) +#define MPT3SAS_TURN_ON_FAULT_LED (0xFFFC) +#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) +#define MPT3SAS_ABRT_TASK_SET (0xFFFE) +#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) +/** + * struct fw_event_work - firmware event struct + * @list: link list framework + * @work: work object (ioc->fault_reset_work_q) + * @cancel_pending_work: flag set during reset handling + * @ioc: per adapter object + * @device_handle: device handle + * @VF_ID: virtual function id + * @VP_ID: virtual port id + * @ignore: flag meaning this event has been marked to ignore + * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h + * @event_data: reply event data payload follows + * + * This object stored on ioc->fw_event_list. + */ +struct fw_event_work { + struct list_head list; + struct work_struct work; + u8 cancel_pending_work; + struct delayed_work delayed_work; + + struct MPT3SAS_ADAPTER *ioc; + u16 device_handle; + u8 VF_ID; + u8 VP_ID; + u8 ignore; + u16 event; + void *event_data; +}; + +/* raid transport support */ +static struct raid_template *mpt3sas_raid_template; + +/** + * struct _scsi_io_transfer - scsi io transfer + * @handle: sas device handle (assigned by firmware) + * @is_raid: flag set for hidden raid components + * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, + * @data_length: data transfer length + * @data_dma: dma pointer to data + * @sense: sense data + * @lun: lun number + * @cdb_length: cdb length + * @cdb: cdb contents + * @timeout: timeout for this command + * @VF_ID: virtual function id + * @VP_ID: virtual port id + * @valid_reply: flag set for reply message + * @sense_length: sense length + * @ioc_status: ioc status + * @scsi_state: scsi state + * @scsi_status: scsi staus + * @log_info: log information + * @transfer_length: data length transfer when there is a reply message + * + * Used for sending internal scsi commands to devices within this module. + * Refer to _scsi_send_scsi_io(). + */ +struct _scsi_io_transfer { + u16 handle; + u8 is_raid; + enum dma_data_direction dir; + u32 data_length; + dma_addr_t data_dma; + u8 sense[SCSI_SENSE_BUFFERSIZE]; + u32 lun; + u8 cdb_length; + u8 cdb[32]; + u8 timeout; + u8 VF_ID; + u8 VP_ID; + u8 valid_reply; + /* the following bits are only valid when 'valid_reply = 1' */ + u32 sense_length; + u16 ioc_status; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + u32 transfer_length; +}; + +/* + * The pci device ids are defined in mpi/mpi2_cnfg.h. + */ +static DEFINE_PCI_DEVICE_TABLE(scsih_pci_table) = { + /* Fury ~ 3004 and 3008 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, + PCI_ANY_ID, PCI_ANY_ID }, + /* Invader ~ 3108 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, + PCI_ANY_ID, PCI_ANY_ID }, + {0} /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(pci, scsih_pci_table); + +/** + * _scsih_set_debug_level - global setting of ioc->logging_level. + * + * Note: The logging levels are defined in mpt3sas_debug.h. + */ +static int +_scsih_set_debug_level(const char *val, struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct MPT3SAS_ADAPTER *ioc; + + if (ret) + return ret; + + pr_info("setting logging_level(0x%08x)\n", logging_level); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) + ioc->logging_level = logging_level; + return 0; +} +module_param_call(logging_level, _scsih_set_debug_level, param_get_int, + &logging_level, 0644); + +/** + * _scsih_srch_boot_sas_address - search based on sas_address + * @sas_address: sas address + * @boot_device: boot device object from bios page 2 + * + * Returns 1 when there's a match, 0 means no match. + */ +static inline int +_scsih_srch_boot_sas_address(u64 sas_address, + Mpi2BootDeviceSasWwid_t *boot_device) +{ + return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; +} + +/** + * _scsih_srch_boot_device_name - search based on device name + * @device_name: device name specified in INDENTIFY fram + * @boot_device: boot device object from bios page 2 + * + * Returns 1 when there's a match, 0 means no match. + */ +static inline int +_scsih_srch_boot_device_name(u64 device_name, + Mpi2BootDeviceDeviceName_t *boot_device) +{ + return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; +} + +/** + * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot + * @enclosure_logical_id: enclosure logical id + * @slot_number: slot number + * @boot_device: boot device object from bios page 2 + * + * Returns 1 when there's a match, 0 means no match. + */ +static inline int +_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, + Mpi2BootDeviceEnclosureSlot_t *boot_device) +{ + return (enclosure_logical_id == le64_to_cpu(boot_device-> + EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device-> + SlotNumber)) ? 1 : 0; +} + +/** + * _scsih_is_boot_device - search for matching boot device. + * @sas_address: sas address + * @device_name: device name specified in INDENTIFY fram + * @enclosure_logical_id: enclosure logical id + * @slot_number: slot number + * @form: specifies boot device form + * @boot_device: boot device object from bios page 2 + * + * Returns 1 when there's a match, 0 means no match. + */ +static int +_scsih_is_boot_device(u64 sas_address, u64 device_name, + u64 enclosure_logical_id, u16 slot, u8 form, + Mpi2BiosPage2BootDevice_t *boot_device) +{ + int rc = 0; + + switch (form) { + case MPI2_BIOSPAGE2_FORM_SAS_WWID: + if (!sas_address) + break; + rc = _scsih_srch_boot_sas_address( + sas_address, &boot_device->SasWwid); + break; + case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT: + if (!enclosure_logical_id) + break; + rc = _scsih_srch_boot_encl_slot( + enclosure_logical_id, + slot, &boot_device->EnclosureSlot); + break; + case MPI2_BIOSPAGE2_FORM_DEVICE_NAME: + if (!device_name) + break; + rc = _scsih_srch_boot_device_name( + device_name, &boot_device->DeviceName); + break; + case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: + break; + } + + return rc; +} + +/** + * _scsih_get_sas_address - set the sas_address for given device handle + * @handle: device handle + * @sas_address: sas address + * + * Returns 0 success, non-zero when failure + */ +static int +_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u64 *sas_address) +{ + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 ioc_status; + + *sas_address = 0; + + if (handle <= ioc->sas_hba.num_phys) { + *sas_address = ioc->sas_hba.sas_address; + return 0; + } + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return -ENXIO; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + return 0; + } + + /* we hit this becuase the given parent handle doesn't exist */ + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + return -ENXIO; + + /* else error case */ + pr_err(MPT3SAS_FMT + "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", + ioc->name, handle, ioc_status, + __FILE__, __LINE__, __func__); + return -EIO; +} + +/** + * _scsih_determine_boot_device - determine boot device. + * @ioc: per adapter object + * @device: either sas_device or raid_device object + * @is_raid: [flag] 1 = raid object, 0 = sas object + * + * Determines whether this device should be first reported device to + * to scsi-ml or sas transport, this purpose is for persistent boot device. + * There are primary, alternate, and current entries in bios page 2. The order + * priority is primary, alternate, then current. This routine saves + * the corresponding device object and is_raid flag in the ioc object. + * The saved data to be used later in _scsih_probe_boot_devices(). + */ +static void +_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, + void *device, u8 is_raid) +{ + struct _sas_device *sas_device; + struct _raid_device *raid_device; + u64 sas_address; + u64 device_name; + u64 enclosure_logical_id; + u16 slot; + + /* only process this function when driver loads */ + if (!ioc->is_driver_loading) + return; + + /* no Bios, return immediately */ + if (!ioc->bios_pg3.BiosVersion) + return; + + if (!is_raid) { + sas_device = device; + sas_address = sas_device->sas_address; + device_name = sas_device->device_name; + enclosure_logical_id = sas_device->enclosure_logical_id; + slot = sas_device->slot; + } else { + raid_device = device; + sas_address = raid_device->wwid; + device_name = 0; + enclosure_logical_id = 0; + slot = 0; + } + + if (!ioc->req_boot_device.device) { + if (_scsih_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedBootDevice)) { + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "%s: req_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->req_boot_device.device = device; + ioc->req_boot_device.is_raid = is_raid; + } + } + + if (!ioc->req_alt_boot_device.device) { + if (_scsih_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqAltBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedAltBootDevice)) { + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "%s: req_alt_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->req_alt_boot_device.device = device; + ioc->req_alt_boot_device.is_raid = is_raid; + } + } + + if (!ioc->current_boot_device.device) { + if (_scsih_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.CurrentBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.CurrentBootDevice)) { + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "%s: current_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->current_boot_device.device = device; + ioc->current_boot_device.is_raid = is_raid; + } + } +} + +/** + * mpt3sas_scsih_sas_device_find_by_sas_address - sas device search + * @ioc: per adapter object + * @sas_address: sas address + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for sas_device based on sas_address, then return sas_device + * object. + */ +struct _sas_device * +mpt3sas_scsih_sas_device_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address) +{ + struct _sas_device *sas_device; + + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->sas_address == sas_address) + return sas_device; + + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->sas_address == sas_address) + return sas_device; + + return NULL; +} + +/** + * _scsih_sas_device_find_by_handle - sas device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for sas_device based on sas_address, then return sas_device + * object. + */ +static struct _sas_device * +_scsih_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_device *sas_device; + + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->handle == handle) + return sas_device; + + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->handle == handle) + return sas_device; + + return NULL; +} + +/** + * _scsih_sas_device_remove - remove sas_device from list. + * @ioc: per adapter object + * @sas_device: the sas_device object + * Context: This function will acquire ioc->sas_device_lock. + * + * Removing object and freeing associated memory from the ioc->sas_device_list. + */ +static void +_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + if (!sas_device) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_del(&sas_device->list); + kfree(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +/** + * _scsih_device_remove_by_handle - removing device object by handle + * @ioc: per adapter object + * @handle: device handle + * + * Return nothing. + */ +static void +_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_device *sas_device; + unsigned long flags; + + if (ioc->shost_recovery) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _scsih_sas_device_find_by_handle(ioc, handle); + if (sas_device) + list_del(&sas_device->list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) + _scsih_remove_device(ioc, sas_device); +} + +/** + * mpt3sas_device_remove_by_sas_address - removing device object by sas address + * @ioc: per adapter object + * @sas_address: device sas_address + * + * Return nothing. + */ +void +mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address) +{ + struct _sas_device *sas_device; + unsigned long flags; + + if (ioc->shost_recovery) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + sas_address); + if (sas_device) + list_del(&sas_device->list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) + _scsih_remove_device(ioc, sas_device); +} + +/** + * _scsih_sas_device_add - insert sas_device to the list. + * @ioc: per adapter object + * @sas_device: the sas_device object + * Context: This function will acquire ioc->sas_device_lock. + * + * Adding new object to the ioc->sas_device_list. + */ +static void +_scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address)); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_add_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (!mpt3sas_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent)) { + _scsih_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + /* + * When asyn scanning is enabled, its not possible to remove + * devices while scanning is turned on due to an oops in + * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() + */ + if (!ioc->is_driver_loading) + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent); + _scsih_sas_device_remove(ioc, sas_device); + } +} + +/** + * _scsih_sas_device_init_add - insert sas_device to the list. + * @ioc: per adapter object + * @sas_device: the sas_device object + * Context: This function will acquire ioc->sas_device_lock. + * + * Adding new object at driver load time to the ioc->sas_device_init_list. + */ +static void +_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, + __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address)); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_add_tail(&sas_device->list, &ioc->sas_device_init_list); + _scsih_determine_boot_device(ioc, sas_device, 0); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +/** + * _scsih_raid_device_find_by_id - raid device search + * @ioc: per adapter object + * @id: sas device target id + * @channel: sas device channel + * Context: Calling function should acquire ioc->raid_device_lock + * + * This searches for raid_device based on target id, then return raid_device + * object. + */ +static struct _raid_device * +_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel) +{ + struct _raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->id == id && raid_device->channel == channel) { + r = raid_device; + goto out; + } + } + + out: + return r; +} + +/** + * _scsih_raid_device_find_by_handle - raid device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * Context: Calling function should acquire ioc->raid_device_lock + * + * This searches for raid_device based on handle, then return raid_device + * object. + */ +static struct _raid_device * +_scsih_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->handle != handle) + continue; + r = raid_device; + goto out; + } + + out: + return r; +} + +/** + * _scsih_raid_device_find_by_wwid - raid device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * Context: Calling function should acquire ioc->raid_device_lock + * + * This searches for raid_device based on wwid, then return raid_device + * object. + */ +static struct _raid_device * +_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) +{ + struct _raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid != wwid) + continue; + r = raid_device; + goto out; + } + + out: + return r; +} + +/** + * _scsih_raid_device_add - add raid_device object + * @ioc: per adapter object + * @raid_device: raid_device object + * + * This is added to the raid_device_list link list. + */ +static void +_scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + unsigned long flags; + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, + raid_device->handle, (unsigned long long)raid_device->wwid)); + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_add_tail(&raid_device->list, &ioc->raid_device_list); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * _scsih_raid_device_remove - delete raid_device object + * @ioc: per adapter object + * @raid_device: raid_device object + * + */ +static void +_scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_del(&raid_device->list); + kfree(raid_device); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * mpt3sas_scsih_expander_find_by_handle - expander device search + * @ioc: per adapter object + * @handle: expander handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for expander device based on handle, then returns the + * sas_node object. + */ +struct _sas_node * +mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_node *sas_expander, *r; + + r = NULL; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->handle != handle) + continue; + r = sas_expander; + goto out; + } + out: + return r; +} + +/** + * mpt3sas_scsih_expander_find_by_sas_address - expander device search + * @ioc: per adapter object + * @sas_address: sas address + * Context: Calling function should acquire ioc->sas_node_lock. + * + * This searches for expander device based on sas_address, then returns the + * sas_node object. + */ +struct _sas_node * +mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address) +{ + struct _sas_node *sas_expander, *r; + + r = NULL; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address) + continue; + r = sas_expander; + goto out; + } + out: + return r; +} + +/** + * _scsih_expander_node_add - insert expander device to the list. + * @ioc: per adapter object + * @sas_expander: the sas_device object + * Context: This function will acquire ioc->sas_node_lock. + * + * Adding new object to the ioc->sas_expander_list. + * + * Return nothing. + */ +static void +_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&sas_expander->list, &ioc->sas_expander_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +/** + * _scsih_is_end_device - determines if device is an end device + * @device_info: bitfield providing information about the device. + * Context: none + * + * Returns 1 if end device. + */ +static int +_scsih_is_end_device(u32 device_info) +{ + if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && + ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | + (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) | + (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) + return 1; + else + return 0; +} + +/** + * _scsih_scsi_lookup_get - returns scmd entry + * @ioc: per adapter object + * @smid: system request message index + * + * Returns the smid stored scmd pointer. + */ +static struct scsi_cmnd * +_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return ioc->scsi_lookup[smid - 1].scmd; +} + +/** + * _scsih_scsi_lookup_get_clear - returns scmd entry + * @ioc: per adapter object + * @smid: system request message index + * + * Returns the smid stored scmd pointer. + * Then will derefrence the stored scmd pointer. + */ +static inline struct scsi_cmnd * +_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + unsigned long flags; + struct scsi_cmnd *scmd; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + scmd = ioc->scsi_lookup[smid - 1].scmd; + ioc->scsi_lookup[smid - 1].scmd = NULL; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + return scmd; +} + +/** + * _scsih_scsi_lookup_find_by_scmd - scmd lookup + * @ioc: per adapter object + * @smid: system request message index + * @scmd: pointer to scsi command object + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a scmd pointer in the scsi_lookup array, + * returning the revelent smid. A returned value of zero means invalid. + */ +static u16 +_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd + *scmd) +{ + u16 smid; + unsigned long flags; + int i; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + smid = 0; + for (i = 0; i < ioc->scsiio_depth; i++) { + if (ioc->scsi_lookup[i].scmd == scmd) { + smid = ioc->scsi_lookup[i].smid; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +/** + * _scsih_scsi_lookup_find_by_target - search for matching channel:id + * @ioc: per adapter object + * @id: target id + * @channel: channel + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a matching channel:id in the scsi_lookup array, + * returning 1 if found. + */ +static u8 +_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id, + int channel) +{ + u8 found; + unsigned long flags; + int i; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + found = 0; + for (i = 0 ; i < ioc->scsiio_depth; i++) { + if (ioc->scsi_lookup[i].scmd && + (ioc->scsi_lookup[i].scmd->device->id == id && + ioc->scsi_lookup[i].scmd->device->channel == channel)) { + found = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return found; +} + +/** + * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun + * @ioc: per adapter object + * @id: target id + * @lun: lun number + * @channel: channel + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a matching channel:id:lun in the scsi_lookup array, + * returning 1 if found. + */ +static u8 +_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id, + unsigned int lun, int channel) +{ + u8 found; + unsigned long flags; + int i; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + found = 0; + for (i = 0 ; i < ioc->scsiio_depth; i++) { + if (ioc->scsi_lookup[i].scmd && + (ioc->scsi_lookup[i].scmd->device->id == id && + ioc->scsi_lookup[i].scmd->device->channel == channel && + ioc->scsi_lookup[i].scmd->device->lun == lun)) { + found = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return found; +} + + +static void +_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + int max_depth; + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + unsigned long flags; + + max_depth = shost->can_queue; + + /* limit max device queue for SATA to 32 */ + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + goto not_sata; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + goto not_sata; + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) + goto not_sata; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + sas_device_priv_data->sas_target->sas_address); + if (sas_device && sas_device->device_info & + MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + max_depth = MPT3SAS_SATA_QUEUE_DEPTH; + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + not_sata: + + if (!sdev->tagged_supported) + max_depth = 1; + if (qdepth > max_depth) + qdepth = max_depth; + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth); +} + +/** + * _scsih_change_queue_depth - setting device queue depth + * @sdev: scsi device struct + * @qdepth: requested queue depth + * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP + * (see include/scsi/scsi_host.h for definition) + * + * Returns queue depth. + */ +static int +_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason) +{ + if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP) + _scsih_adjust_queue_depth(sdev, qdepth); + else if (reason == SCSI_QDEPTH_QFULL) + scsi_track_queue_full(sdev, qdepth); + else + return -EOPNOTSUPP; + + if (sdev->inquiry_len > 7) + sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " \ + "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n", + sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags, + sdev->ordered_tags, sdev->scsi_level, + (sdev->inquiry[7] & 2) >> 1); + + return sdev->queue_depth; +} + +/** + * _scsih_change_queue_type - changing device queue tag type + * @sdev: scsi device struct + * @tag_type: requested tag type + * + * Returns queue tag type. + */ +static int +_scsih_change_queue_type(struct scsi_device *sdev, int tag_type) +{ + if (sdev->tagged_supported) { + scsi_set_tag_type(sdev, tag_type); + if (tag_type) + scsi_activate_tcq(sdev, sdev->queue_depth); + else + scsi_deactivate_tcq(sdev, sdev->queue_depth); + } else + tag_type = 0; + + return tag_type; +} + + +/** + * _scsih_target_alloc - target add routine + * @starget: scsi target struct + * + * Returns 0 if ok. Any other return is assumed to be an error and + * the device is ignored. + */ +static int +_scsih_target_alloc(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + unsigned long flags; + struct sas_rphy *rphy; + + sas_target_priv_data = kzalloc(sizeof(struct scsi_target), GFP_KERNEL); + if (!sas_target_priv_data) + return -ENOMEM; + + starget->hostdata = sas_target_priv_data; + sas_target_priv_data->starget = starget; + sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; + + /* RAID volumes */ + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, + starget->channel); + if (raid_device) { + sas_target_priv_data->handle = raid_device->handle; + sas_target_priv_data->sas_address = raid_device->wwid; + sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; + raid_device->starget = starget; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return 0; + } + + /* sas/sata devices */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + rphy = dev_to_rphy(starget->dev.parent); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + rphy->identify.sas_address); + + if (sas_device) { + sas_target_priv_data->handle = sas_device->handle; + sas_target_priv_data->sas_address = sas_device->sas_address; + sas_device->starget = starget; + sas_device->id = starget->id; + sas_device->channel = starget->channel; + if (test_bit(sas_device->handle, ioc->pd_handles)) + sas_target_priv_data->flags |= + MPT_TARGET_FLAGS_RAID_COMPONENT; + if (sas_device->fast_path) + sas_target_priv_data->flags |= MPT_TARGET_FASTPATH_IO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return 0; +} + +/** + * _scsih_target_destroy - target destroy routine + * @starget: scsi target struct + * + * Returns nothing. + */ +static void +_scsih_target_destroy(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + unsigned long flags; + struct sas_rphy *rphy; + + sas_target_priv_data = starget->hostdata; + if (!sas_target_priv_data) + return; + + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, + starget->channel); + if (raid_device) { + raid_device->starget = NULL; + raid_device->sdev = NULL; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + goto out; + } + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + rphy = dev_to_rphy(starget->dev.parent); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + rphy->identify.sas_address); + if (sas_device && (sas_device->starget == starget) && + (sas_device->id == starget->id) && + (sas_device->channel == starget->channel)) + sas_device->starget = NULL; + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + out: + kfree(sas_target_priv_data); + starget->hostdata = NULL; +} + +/** + * _scsih_slave_alloc - device add routine + * @sdev: scsi device struct + * + * Returns 0 if ok. Any other return is assumed to be an error and + * the device is ignored. + */ +static int +_scsih_slave_alloc(struct scsi_device *sdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_target *starget; + struct _raid_device *raid_device; + struct _sas_device *sas_device; + unsigned long flags; + + sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL); + if (!sas_device_priv_data) + return -ENOMEM; + + sas_device_priv_data->lun = sdev->lun; + sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT; + + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns++; + sas_device_priv_data->sas_target = sas_target_priv_data; + sdev->hostdata = sas_device_priv_data; + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT)) + sdev->no_uld_attach = 1; + + shost = dev_to_shost(&starget->dev); + ioc = shost_priv(shost); + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, + starget->id, starget->channel); + if (raid_device) + raid_device->sdev = sdev; /* raid is single lun */ + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } + + if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + sas_target_priv_data->sas_address); + if (sas_device && (sas_device->starget == NULL)) { + sdev_printk(KERN_INFO, sdev, + "%s : sas_device->starget set to starget @ %d\n", + __func__, __LINE__); + sas_device->starget = starget; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + + return 0; +} + +/** + * _scsih_slave_destroy - device destroy routine + * @sdev: scsi device struct + * + * Returns nothing. + */ +static void +_scsih_slave_destroy(struct scsi_device *sdev) +{ + struct MPT3SAS_TARGET *sas_target_priv_data; + struct scsi_target *starget; + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + struct _sas_device *sas_device; + unsigned long flags; + + if (!sdev->hostdata) + return; + + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns--; + + shost = dev_to_shost(&starget->dev); + ioc = shost_priv(shost); + + if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + sas_target_priv_data->sas_address); + if (sas_device && !sas_target_priv_data->num_luns) + sas_device->starget = NULL; + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + + kfree(sdev->hostdata); + sdev->hostdata = NULL; +} + +/** + * _scsih_display_sata_capabilities - sata capabilities + * @ioc: per adapter object + * @handle: device handle + * @sdev: scsi device struct + */ +static void +_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, + u16 handle, struct scsi_device *sdev) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; + u16 flags; + u32 device_info; + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + flags = le16_to_cpu(sas_device_pg0.Flags); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + + sdev_printk(KERN_INFO, sdev, + "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " + "sw_preserve(%s)\n", + (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" : + "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n"); +} + +/* + * raid transport support - + * Enabled for SLES11 and newer, in older kernels the driver will panic when + * unloading the driver followed by a load - I beleive that the subroutine + * raid_class_release() is not cleaning up properly. + */ + +/** + * _scsih_is_raid - return boolean indicating device is raid volume + * @dev the device struct object + */ +static int +_scsih_is_raid(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + + return (sdev->channel == RAID_CHANNEL) ? 1 : 0; +} + +/** + * _scsih_get_resync - get raid volume resync percent complete + * @dev the device struct object + */ +static void +_scsih_get_resync(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); + static struct _raid_device *raid_device; + unsigned long flags; + Mpi2RaidVolPage0_t vol_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 volume_status_flags; + u8 percent_complete; + u16 handle; + + percent_complete = 0; + handle = 0; + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, + sdev->channel); + if (raid_device) { + handle = raid_device->handle; + percent_complete = raid_device->percent_complete; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (!handle) + goto out; + + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + percent_complete = 0; + goto out; + } + + volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (!(volume_status_flags & + MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) + percent_complete = 0; + + out: + raid_set_resync(mpt3sas_raid_template, dev, percent_complete); +} + +/** + * _scsih_get_state - get raid volume level + * @dev the device struct object + */ +static void +_scsih_get_state(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); + static struct _raid_device *raid_device; + unsigned long flags; + Mpi2RaidVolPage0_t vol_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 volstate; + enum raid_state state = RAID_STATE_UNKNOWN; + u16 handle = 0; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, + sdev->channel); + if (raid_device) + handle = raid_device->handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (!raid_device) + goto out; + + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { + state = RAID_STATE_RESYNCING; + goto out; + } + + switch (vol_pg0.VolumeState) { + case MPI2_RAID_VOL_STATE_OPTIMAL: + case MPI2_RAID_VOL_STATE_ONLINE: + state = RAID_STATE_ACTIVE; + break; + case MPI2_RAID_VOL_STATE_DEGRADED: + state = RAID_STATE_DEGRADED; + break; + case MPI2_RAID_VOL_STATE_FAILED: + case MPI2_RAID_VOL_STATE_MISSING: + state = RAID_STATE_OFFLINE; + break; + } + out: + raid_set_state(mpt3sas_raid_template, dev, state); +} + +/** + * _scsih_set_level - set raid level + * @sdev: scsi device struct + * @volume_type: volume type + */ +static void +_scsih_set_level(struct scsi_device *sdev, u8 volume_type) +{ + enum raid_level level = RAID_LEVEL_UNKNOWN; + + switch (volume_type) { + case MPI2_RAID_VOL_TYPE_RAID0: + level = RAID_LEVEL_0; + break; + case MPI2_RAID_VOL_TYPE_RAID10: + level = RAID_LEVEL_10; + break; + case MPI2_RAID_VOL_TYPE_RAID1E: + level = RAID_LEVEL_1E; + break; + case MPI2_RAID_VOL_TYPE_RAID1: + level = RAID_LEVEL_1; + break; + } + + raid_set_level(mpt3sas_raid_template, &sdev->sdev_gendev, level); +} + + +/** + * _scsih_get_volume_capabilities - volume capabilities + * @ioc: per adapter object + * @sas_device: the raid_device object + * + * Returns 0 for success, else 1 + */ +static int +_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + Mpi2RaidVolPage0_t *vol_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 sz; + u8 num_pds; + + if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, + &num_pds)) || !num_pds) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + + raid_device->num_pds = num_pds; + sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * + sizeof(Mpi2RaidVol0PhysDisk_t)); + vol_pg0 = kzalloc(sz, GFP_KERNEL); + if (!vol_pg0) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + + if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, + __func__)); + kfree(vol_pg0); + return 1; + } + + raid_device->volume_type = vol_pg0->VolumeType; + + /* figure out what the underlying devices are by + * obtaining the device_info bits for the 1st device + */ + if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, + vol_pg0->PhysDisk[0].PhysDiskNum))) { + if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + le16_to_cpu(pd_pg0.DevHandle)))) { + raid_device->device_info = + le32_to_cpu(sas_device_pg0.DeviceInfo); + } + } + + kfree(vol_pg0); + return 0; +} + + + +/** + * _scsih_enable_tlr - setting TLR flags + * @ioc: per adapter object + * @sdev: scsi device struct + * + * Enabling Transaction Layer Retries for tape devices when + * vpd page 0x90 is present + * + */ +static void +_scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) +{ + + /* only for TAPE */ + if (sdev->type != TYPE_TAPE) + return; + + if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) + return; + + sas_enable_tlr(sdev); + sdev_printk(KERN_INFO, sdev, "TLR %s\n", + sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); + return; + +} + +/** + * _scsih_slave_configure - device configure routine. + * @sdev: scsi device struct + * + * Returns 0 if ok. Any other return is assumed to be an error and + * the device is ignored. + */ +static int +_scsih_slave_configure(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + unsigned long flags; + int qdepth; + u8 ssp_target = 0; + char *ds = ""; + char *r_level = ""; + u16 handle, volume_handle = 0; + u64 volume_wwid = 0; + + qdepth = 1; + sas_device_priv_data = sdev->hostdata; + sas_device_priv_data->configured_lun = 1; + sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; + sas_target_priv_data = sas_device_priv_data->sas_target; + handle = sas_target_priv_data->handle; + + /* raid volume handling */ + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (!raid_device) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, + __LINE__, __func__)); + return 1; + } + + if (_scsih_get_volume_capabilities(ioc, raid_device)) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, + __LINE__, __func__)); + return 1; + } + + + /* RAID Queue Depth Support + * IS volume = underlying qdepth of drive type, either + * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH + * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH) + */ + if (raid_device->device_info & + MPI2_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + ds = "SSP"; + } else { + qdepth = MPT3SAS_SATA_QUEUE_DEPTH; + if (raid_device->device_info & + MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + else + ds = "STP"; + } + + switch (raid_device->volume_type) { + case MPI2_RAID_VOL_TYPE_RAID0: + r_level = "RAID0"; + break; + case MPI2_RAID_VOL_TYPE_RAID1E: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + if (ioc->manu_pg10.OEMIdentifier && + (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & + MFG10_GF0_R10_DISPLAY) && + !(raid_device->num_pds % 2)) + r_level = "RAID10"; + else + r_level = "RAID1E"; + break; + case MPI2_RAID_VOL_TYPE_RAID1: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + r_level = "RAID1"; + break; + case MPI2_RAID_VOL_TYPE_RAID10: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + r_level = "RAID10"; + break; + case MPI2_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + r_level = "RAIDX"; + break; + } + + sdev_printk(KERN_INFO, sdev, + "%s: handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n", + r_level, raid_device->handle, + (unsigned long long)raid_device->wwid, + raid_device->num_pds, ds); + + + _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); + +/* raid transport support */ + _scsih_set_level(sdev, raid_device->volume_type); + return 0; + } + + /* non-raid handling */ + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { + if (mpt3sas_config_get_volume_handle(ioc, handle, + &volume_handle)) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + return 1; + } + if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, + volume_handle, &volume_wwid)) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + return 1; + } + } + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + sas_device_priv_data->sas_target->sas_address); + if (!sas_device) { + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + ssp_target = 1; + ds = "SSP"; + } else { + qdepth = MPT3SAS_SATA_QUEUE_DEPTH; + if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) + ds = "STP"; + else if (sas_device->device_info & + MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + } + + sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \ + "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", + ds, handle, (unsigned long long)sas_device->sas_address, + sas_device->phy, (unsigned long long)sas_device->device_name); + sdev_printk(KERN_INFO, sdev, + "%s: enclosure_logical_id(0x%016llx), slot(%d)\n", + ds, (unsigned long long) + sas_device->enclosure_logical_id, sas_device->slot); + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (!ssp_target) + _scsih_display_sata_capabilities(ioc, handle, sdev); + + + _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT); + + if (ssp_target) { + sas_read_port_mode_page(sdev); + _scsih_enable_tlr(ioc, sdev); + } + + return 0; +} + +/** + * _scsih_bios_param - fetch head, sector, cylinder info for a disk + * @sdev: scsi device struct + * @bdev: pointer to block device context + * @capacity: device size (in 512 byte sectors) + * @params: three element array to place output: + * params[0] number of heads (max 255) + * params[1] number of sectors (max 63) + * params[2] number of cylinders + * + * Return nothing. + */ +static int +_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int params[]) +{ + int heads; + int sectors; + sector_t cylinders; + ulong dummy; + + heads = 64; + sectors = 32; + + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + + /* + * Handle extended translation size for logical drives + * > 1Gb + */ + if ((ulong)capacity >= 0x200000) { + heads = 255; + sectors = 63; + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + } + + /* return result */ + params[0] = heads; + params[1] = sectors; + params[2] = cylinders; + + return 0; +} + +/** + * _scsih_response_code - translation of device response code + * @ioc: per adapter object + * @response_code: response code returned by the device + * + * Return nothing. + */ +static void +_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) +{ + char *desc; + + switch (response_code) { + case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: + desc = "task management request completed"; + break; + case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: + desc = "invalid frame"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: + desc = "task management request not supported"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_FAILED: + desc = "task management request failed"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: + desc = "task management request succeeded"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: + desc = "invalid lun"; + break; + case 0xA: + desc = "overlapped tag attempted"; + break; + case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: + desc = "task queued, however not sent to target"; + break; + default: + desc = "unknown"; + break; + } + pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n", + ioc->name, response_code, desc); +} + +/** + * _scsih_tm_done - tm completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: none. + * + * The callback handler when using scsih_issue_tm. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->tm_cmds.smid != smid) + return 1; + mpt3sas_base_flush_reply_queues(ioc); + ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID; + } + ioc->tm_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->tm_cmds.done); + return 1; +} + +/** + * mpt3sas_scsih_set_tm_flag - set per target tm_busy + * @ioc: per adapter object + * @handle: device handle + * + * During taskmangement request, we need to freeze the device queue. + */ +void +mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 1; + skip = 1; + ioc->ignore_loginfos = 1; + } + } +} + +/** + * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy + * @ioc: per adapter object + * @handle: device handle + * + * During taskmangement request, we need to freeze the device queue. + */ +void +mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 0; + skip = 1; + ioc->ignore_loginfos = 0; + } + } +} + +/** + * mpt3sas_scsih_issue_tm - main routine for sending tm requests + * @ioc: per adapter struct + * @device_handle: device handle + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS + * @lun: lun number + * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) + * @smid_task: smid assigned to the task + * @timeout: timeout in seconds + * @serial_number: the serial_number from scmd + * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF + * Context: user + * + * A generic API for sending task management requests to firmware. + * + * The callback index is set inside `ioc->tm_cb_idx`. + * + * Return SUCCESS or FAILED. + */ +int +mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, + uint id, uint lun, u8 type, u16 smid_task, ulong timeout, + unsigned long serial_number, enum mutex_type m_type) +{ + Mpi2SCSITaskManagementRequest_t *mpi_request; + Mpi2SCSITaskManagementReply_t *mpi_reply; + u16 smid = 0; + u32 ioc_state; + unsigned long timeleft; + struct scsiio_tracker *scsi_lookup = NULL; + int rc; + + if (m_type == TM_MUTEX_ON) + mutex_lock(&ioc->tm_cmds.mutex); + if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { + pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n", + __func__, ioc->name); + rc = FAILED; + goto err_out; + } + + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) { + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + rc = FAILED; + goto err_out; + } + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if (ioc_state & MPI2_DOORBELL_USED) { + dhsprintk(ioc, pr_info(MPT3SAS_FMT + "unexpected doorbell active!\n", ioc->name)); + rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + rc = (!rc) ? SUCCESS : FAILED; + goto err_out; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_base_fault_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + rc = (!rc) ? SUCCESS : FAILED; + goto err_out; + } + + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = FAILED; + goto err_out; + } + + if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + scsi_lookup = &ioc->scsi_lookup[smid_task - 1]; + + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n", + ioc->name, handle, type, smid_task)); + ioc->tm_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->tm_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = type; + mpi_request->TaskMID = cpu_to_le16(smid_task); + int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); + mpt3sas_scsih_set_tm_flag(ioc, handle); + init_completion(&ioc->tm_cmds.done); + mpt3sas_base_put_smid_hi_priority(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); + if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SCSITaskManagementRequest_t)/4); + if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) { + rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + rc = (!rc) ? SUCCESS : FAILED; + ioc->tm_cmds.status = MPT3_CMD_NOT_USED; + mpt3sas_scsih_clear_tm_flag(ioc, handle); + goto err_out; + } + } + + if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); + mpi_reply = ioc->tm_cmds.reply; + dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \ + "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + if (ioc->logging_level & MPT_DEBUG_TM) { + _scsih_response_code(ioc, mpi_reply->ResponseCode); + if (mpi_reply->IOCStatus) + _debug_dump_mf(mpi_request, + sizeof(Mpi2SCSITaskManagementRequest_t)/4); + } + } + + switch (type) { + case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + rc = SUCCESS; + if (scsi_lookup->scmd == NULL) + break; + rc = FAILED; + break; + + case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + if (_scsih_scsi_lookup_find_by_target(ioc, id, channel)) + rc = FAILED; + else + rc = SUCCESS; + break; + case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel)) + rc = FAILED; + else + rc = SUCCESS; + break; + case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: + rc = SUCCESS; + break; + default: + rc = FAILED; + break; + } + + mpt3sas_scsih_clear_tm_flag(ioc, handle); + ioc->tm_cmds.status = MPT3_CMD_NOT_USED; + if (m_type == TM_MUTEX_ON) + mutex_unlock(&ioc->tm_cmds.mutex); + + return rc; + + err_out: + if (m_type == TM_MUTEX_ON) + mutex_unlock(&ioc->tm_cmds.mutex); + return rc; +} + +/** + * _scsih_tm_display_info - displays info about the device + * @ioc: per adapter struct + * @scmd: pointer to scsi command object + * + * Called by task management callback handlers. + */ +static void +_scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) +{ + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *priv_target = starget->hostdata; + struct _sas_device *sas_device = NULL; + unsigned long flags; + char *device_str = NULL; + + if (!priv_target) + return; + device_str = "volume"; + + scsi_print_command(scmd); + if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { + starget_printk(KERN_INFO, starget, + "%s handle(0x%04x), %s wwid(0x%016llx)\n", + device_str, priv_target->handle, + device_str, (unsigned long long)priv_target->sas_address); + } else { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + priv_target->sas_address); + if (sas_device) { + if (priv_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + starget_printk(KERN_INFO, starget, + "volume handle(0x%04x), " + "volume wwid(0x%016llx)\n", + sas_device->volume_handle, + (unsigned long long)sas_device->volume_wwid); + } + starget_printk(KERN_INFO, starget, + "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", + sas_device->handle, + (unsigned long long)sas_device->sas_address, + sas_device->phy); + starget_printk(KERN_INFO, starget, + "enclosure_logical_id(0x%016llx), slot(%d)\n", + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } +} + +/** + * _scsih_abort - eh threads main abort routine + * @scmd: pointer to scsi command object + * + * Returns SUCCESS if command aborted else FAILED + */ +static int +_scsih_abort(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + u16 smid; + u16 handle; + int r; + + sdev_printk(KERN_INFO, scmd->device, + "attempting task abort! scmd(%p)\n", scmd); + _scsih_tm_display_info(ioc, scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* search for the command */ + smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd); + if (!smid) { + scmd->result = DID_RESET << 16; + r = SUCCESS; + goto out; + } + + /* for hidden raid components and volumes this is not supported */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT || + sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + + mpt3sas_halt_firmware(ioc); + + handle = sas_device_priv_data->sas_target->handle; + r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, + scmd->device->id, scmd->device->lun, + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, + scmd->serial_number, TM_MUTEX_ON); + + out: + sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + return r; +} + +/** + * _scsih_dev_reset - eh threads main device reset routine + * @scmd: pointer to scsi command object + * + * Returns SUCCESS if command aborted else FAILED + */ +static int +_scsih_dev_reset(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct _sas_device *sas_device; + unsigned long flags; + u16 handle; + int r; + + sdev_printk(KERN_INFO, scmd->device, + "attempting device reset! scmd(%p)\n", scmd); + _scsih_tm_display_info(ioc, scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* for hidden raid components obtain the volume_handle */ + handle = 0; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _scsih_sas_device_find_by_handle(ioc, + sas_device_priv_data->sas_target->handle); + if (sas_device) + handle = sas_device->volume_handle; + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } else + handle = sas_device_priv_data->sas_target->handle; + + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + + r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, + scmd->device->id, scmd->device->lun, + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0, + TM_MUTEX_ON); + + out: + sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + return r; +} + +/** + * _scsih_target_reset - eh threads main target reset routine + * @scmd: pointer to scsi command object + * + * Returns SUCCESS if command aborted else FAILED + */ +static int +_scsih_target_reset(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct _sas_device *sas_device; + unsigned long flags; + u16 handle; + int r; + struct scsi_target *starget = scmd->device->sdev_target; + + starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n", + scmd); + _scsih_tm_display_info(ioc, scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", + scmd); + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* for hidden raid components obtain the volume_handle */ + handle = 0; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _scsih_sas_device_find_by_handle(ioc, + sas_device_priv_data->sas_target->handle); + if (sas_device) + handle = sas_device->volume_handle; + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } else + handle = sas_device_priv_data->sas_target->handle; + + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + + r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, + scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, + 30, 0, TM_MUTEX_ON); + + out: + starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + return r; +} + + +/** + * _scsih_host_reset - eh threads main host reset routine + * @scmd: pointer to scsi command object + * + * Returns SUCCESS if command aborted else FAILED + */ +static int +_scsih_host_reset(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + int r, retval; + + pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n", + ioc->name, scmd); + scsi_print_command(scmd); + + retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + r = (retval < 0) ? FAILED : SUCCESS; + pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n", + ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + + return r; +} + +/** + * _scsih_fw_event_add - insert and queue up fw_event + * @ioc: per adapter object + * @fw_event: object describing the event + * Context: This function will acquire ioc->fw_event_lock. + * + * This adds the firmware event object into link list, then queues it up to + * be processed from user context. + * + * Return nothing. + */ +static void +_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) +{ + unsigned long flags; + + if (ioc->firmware_event_thread == NULL) + return; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + INIT_LIST_HEAD(&fw_event->list); + list_add_tail(&fw_event->list, &ioc->fw_event_list); + INIT_WORK(&fw_event->work, _firmware_event_work); + queue_work(ioc->firmware_event_thread, &fw_event->work); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +/** + * _scsih_fw_event_free - delete fw_event + * @ioc: per adapter object + * @fw_event: object describing the event + * Context: This function will acquire ioc->fw_event_lock. + * + * This removes firmware event object from link list, frees associated memory. + * + * Return nothing. + */ +static void +_scsih_fw_event_free(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work + *fw_event) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + list_del(&fw_event->list); + kfree(fw_event->event_data); + kfree(fw_event); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + + + /** + * mpt3sas_send_trigger_data_event - send event for processing trigger data + * @ioc: per adapter object + * @event_data: trigger event data + * + * Return nothing. + */ +void +mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + struct fw_event_work *fw_event; + + if (ioc->is_driver_loading) + return; + fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); + if (!fw_event) + return; + fw_event->event_data = kzalloc(sizeof(*event_data), GFP_ATOMIC); + if (!fw_event->event_data) + return; + fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; + fw_event->ioc = ioc; + memcpy(fw_event->event_data, event_data, sizeof(*event_data)); + _scsih_fw_event_add(ioc, fw_event); +} + +/** + * _scsih_error_recovery_delete_devices - remove devices not responding + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) +{ + struct fw_event_work *fw_event; + + if (ioc->is_driver_loading) + return; + fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); + if (!fw_event) + return; + fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); +} + +/** + * mpt3sas_port_enable_complete - port enable completed (fake event) + * @ioc: per adapter object + * + * Return nothing. + */ +void +mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) +{ + struct fw_event_work *fw_event; + + fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); + if (!fw_event) + return; + fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); +} + +/** + * _scsih_fw_event_cleanup_queue - cleanup event queue + * @ioc: per adapter object + * + * Walk the firmware event queue, either killing timers, or waiting + * for outstanding events to complete + * + * Return nothing. + */ +static void +_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) +{ + struct fw_event_work *fw_event, *next; + + if (list_empty(&ioc->fw_event_list) || + !ioc->firmware_event_thread || in_interrupt()) + return; + + list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) { + if (cancel_delayed_work(&fw_event->delayed_work)) { + _scsih_fw_event_free(ioc, fw_event); + continue; + } + fw_event->cancel_pending_work = 1; + } +} + +/** + * _scsih_ublock_io_all_device - unblock every device + * @ioc: per adapter object + * + * change the device state from block to running + */ +static void +_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (!sas_device_priv_data->block) + continue; + + sas_device_priv_data->block = 0; + dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, + "device_running, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle)); + scsi_internal_device_unblock(sdev, SDEV_RUNNING); + } +} + + +/** + * _scsih_ublock_io_device - prepare device to be deleted + * @ioc: per adapter object + * @sas_addr: sas address + * + * unblock then put device in offline state + */ +static void +_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->sas_address + != sas_address) + continue; + if (sas_device_priv_data->block) { + sas_device_priv_data->block = 0; + scsi_internal_device_unblock(sdev, SDEV_RUNNING); + } + } +} + +/** + * _scsih_block_io_all_device - set the device state to SDEV_BLOCK + * @ioc: per adapter object + * @handle: device handle + * + * During device pull we need to appropiately set the sdev state. + */ +static void +_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->block) + continue; + sas_device_priv_data->block = 1; + scsi_internal_device_block(sdev); + sdev_printk(KERN_INFO, sdev, "device_blocked, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + } +} + +/** + * _scsih_block_io_device - set the device state to SDEV_BLOCK + * @ioc: per adapter object + * @handle: device handle + * + * During device pull we need to appropiately set the sdev state. + */ +static void +_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle != handle) + continue; + if (sas_device_priv_data->block) + continue; + sas_device_priv_data->block = 1; + scsi_internal_device_block(sdev); + sdev_printk(KERN_INFO, sdev, + "device_blocked, handle(0x%04x)\n", handle); + } +} + +/** + * _scsih_block_io_to_children_attached_to_ex + * @ioc: per adapter object + * @sas_expander: the sas_device object + * + * This routine set sdev state to SDEV_BLOCK for all devices + * attached to this expander. This function called when expander is + * pulled. + */ +static void +_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander) +{ + struct _sas_port *mpt3sas_port; + struct _sas_device *sas_device; + struct _sas_node *expander_sibling; + unsigned long flags; + + if (!sas_expander) + return; + + list_for_each_entry(mpt3sas_port, + &sas_expander->sas_port_list, port_list) { + if (mpt3sas_port->remote_identify.device_type == + SAS_END_DEVICE) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = + mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + mpt3sas_port->remote_identify.sas_address); + if (sas_device) + set_bit(sas_device->handle, + ioc->blocking_handles); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + } + + list_for_each_entry(mpt3sas_port, + &sas_expander->sas_port_list, port_list) { + + if (mpt3sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mpt3sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) { + expander_sibling = + mpt3sas_scsih_expander_find_by_sas_address( + ioc, mpt3sas_port->remote_identify.sas_address); + _scsih_block_io_to_children_attached_to_ex(ioc, + expander_sibling); + } + } +} + +/** + * _scsih_block_io_to_children_attached_directly + * @ioc: per adapter object + * @event_data: topology change event data + * + * This routine set sdev state to SDEV_BLOCK for all devices + * direct attached during device pull. + */ +static void +_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasTopologyChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) + _scsih_block_io_device(ioc, handle); + } +} + +/** + * _scsih_tm_tr_send - send task management request + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt time. + * + * This code is to initiate the device removal handshake protocol + * with controller firmware. This function will issue target reset + * using high priority request queue. It will send a sas iounit + * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion. + * + * This is designed to send muliple task management request at the same + * time to the fifo. If the fifo is full, we will append the request, + * and process it in a future completion. + */ +static void +_scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2SCSITaskManagementRequest_t *mpi_request; + u16 smid; + struct _sas_device *sas_device; + struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + u64 sas_address = 0; + unsigned long flags; + struct _tr_list *delayed_tr; + u32 ioc_state; + + if (ioc->remove_host) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host has been removed: handle(0x%04x)\n", + __func__, ioc->name, handle)); + return; + } else if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host in pci error recovery: handle(0x%04x)\n", + __func__, ioc->name, + handle)); + return; + } + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host is not operational: handle(0x%04x)\n", + __func__, ioc->name, + handle)); + return; + } + + /* if PD, then return */ + if (test_bit(handle, ioc->pd_handles)) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _scsih_sas_device_find_by_handle(ioc, handle); + if (sas_device && sas_device->starget && + sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + sas_address = sas_device->sas_address; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (sas_target_priv_data) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, + (unsigned long long)sas_address)); + _scsih_ublock_io_device(ioc, sas_address); + sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; + } + + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + return; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + return; + } + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, smid, + ioc->tm_tr_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpt3sas_base_put_smid_hi_priority(ioc, smid); + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); +} + +/** + * _scsih_tm_tr_complete - + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt time. + * + * This is the target reset completion routine. + * This code is part of the code to initiate the device removal + * handshake protocol with controller firmware. + * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + u16 handle; + Mpi2SCSITaskManagementRequest_t *mpi_request_tm; + Mpi2SCSITaskManagementReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + Mpi2SasIoUnitControlRequest_t *mpi_request; + u16 smid_sas_ctrl; + u32 ioc_state; + + if (ioc->remove_host) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host has been removed\n", __func__, ioc->name)); + return 1; + } else if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host in pci error recovery\n", __func__, + ioc->name)); + return 1; + } + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host is not operational\n", __func__, ioc->name)); + return 1; + } + if (unlikely(!mpi_reply)) { + pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 1; + } + mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, pr_err(MPT3SAS_FMT + "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + ioc->name, handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); + return 0; + } + + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), " + "loginfo(0x%08x), completed(%d)\n", ioc->name, + handle, smid, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + + smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); + if (!smid_sas_ctrl) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + return 1; + } + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, smid_sas_ctrl, + ioc->tm_sas_control_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl); + memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; + mpi_request->DevHandle = mpi_request_tm->DevHandle; + mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl); + + return _scsih_check_for_pending_tm(ioc, smid); +} + + +/** + * _scsih_sas_control_complete - completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt time. + * + * This is the sas iounit control completion routine. + * This code is part of the code to initiate the device removal + * handshake protocol with controller firmware. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + Mpi2SasIoUnitControlReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + + if (likely(mpi_reply)) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "sc_complete:handle(0x%04x), (open) " + "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + } else { + pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + } + return 1; +} + +/** + * _scsih_tm_tr_volume_send - send target reset request for volumes + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt time. + * + * This is designed to send muliple task management request at the same + * time to the fifo. If the fifo is full, we will append the request, + * and process it in a future completion. + */ +static void +_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2SCSITaskManagementRequest_t *mpi_request; + u16 smid; + struct _tr_list *delayed_tr; + + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host reset in progress!\n", + __func__, ioc->name)); + return; + } + + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + return; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + return; + } + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, smid, + ioc->tm_tr_volume_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpt3sas_base_put_smid_hi_priority(ioc, smid); +} + +/** + * _scsih_tm_volume_tr_complete - target reset completion + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt time. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + u16 handle; + Mpi2SCSITaskManagementRequest_t *mpi_request_tm; + Mpi2SCSITaskManagementReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host reset in progress!\n", + __func__, ioc->name)); + return 1; + } + if (unlikely(!mpi_reply)) { + pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 1; + } + + mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, pr_err(MPT3SAS_FMT + "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + ioc->name, handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); + return 0; + } + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), " + "loginfo(0x%08x), completed(%d)\n", ioc->name, + handle, smid, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + + return _scsih_check_for_pending_tm(ioc, smid); +} + + +/** + * _scsih_check_for_pending_tm - check for pending task management + * @ioc: per adapter object + * @smid: system request message index + * + * This will check delayed target reset list, and feed the + * next reqeust. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct _tr_list *delayed_tr; + + if (!list_empty(&ioc->delayed_tr_volume_list)) { + delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, + struct _tr_list, list); + mpt3sas_base_free_smid(ioc, smid); + _scsih_tm_tr_volume_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + + if (!list_empty(&ioc->delayed_tr_list)) { + delayed_tr = list_entry(ioc->delayed_tr_list.next, + struct _tr_list, list); + mpt3sas_base_free_smid(ioc, smid); + _scsih_tm_tr_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + + return 1; +} + +/** + * _scsih_check_topo_delete_events - sanity check on topo events + * @ioc: per adapter object + * @event_data: the event data payload + * + * This routine added to better handle cable breaker. + * + * This handles the case where driver receives multiple expander + * add and delete events in a single shot. When there is a delete event + * the routine will void any pending add events waiting in the event queue. + * + * Return nothing. + */ +static void +_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasTopologyChangeList_t *event_data) +{ + struct fw_event_work *fw_event; + Mpi2EventDataSasTopologyChangeList_t *local_event_data; + u16 expander_handle; + struct _sas_node *sas_expander; + unsigned long flags; + int i, reason_code; + u16 handle; + + for (i = 0 ; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) + _scsih_tm_tr_send(ioc, handle); + } + + expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); + if (expander_handle < ioc->sas_hba.num_phys) { + _scsih_block_io_to_children_attached_directly(ioc, event_data); + return; + } + if (event_data->ExpStatus == + MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { + /* put expander attached devices into blocking state */ + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, + expander_handle); + _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + do { + handle = find_first_bit(ioc->blocking_handles, + ioc->facts.MaxDevHandle); + if (handle < ioc->facts.MaxDevHandle) + _scsih_block_io_device(ioc, handle); + } while (test_and_clear_bit(handle, ioc->blocking_handles)); + } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) + _scsih_block_io_to_children_attached_directly(ioc, event_data); + + if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) + return; + + /* mark ignore flag for pending events */ + spin_lock_irqsave(&ioc->fw_event_lock, flags); + list_for_each_entry(fw_event, &ioc->fw_event_list, list) { + if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || + fw_event->ignore) + continue; + local_event_data = fw_event->event_data; + if (local_event_data->ExpStatus == + MPI2_EVENT_SAS_TOPO_ES_ADDED || + local_event_data->ExpStatus == + MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { + if (le16_to_cpu(local_event_data->ExpanderDevHandle) == + expander_handle) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "setting ignoring flag\n", ioc->name)); + fw_event->ignore = 1; + } + } + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +/** + * _scsih_set_volume_delete_flag - setting volume delete flag + * @ioc: per adapter object + * @handle: device handle + * + * This returns nothing. + */ +static void +_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _raid_device *raid_device; + struct MPT3SAS_TARGET *sas_target_priv_data; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_handle(ioc, handle); + if (raid_device && raid_device->starget && + raid_device->starget->hostdata) { + sas_target_priv_data = + raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "setting delete flag: handle(0x%04x), " + "wwid(0x%016llx)\n", ioc->name, handle, + (unsigned long long) raid_device->wwid)); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * _scsih_set_volume_handle_for_tr - set handle for target reset to volume + * @handle: input handle + * @a: handle for volume a + * @b: handle for volume b + * + * IR firmware only supports two raid volumes. The purpose of this + * routine is to set the volume handle in either a or b. When the given + * input handle is non-zero, or when a and b have not been set before. + */ +static void +_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) +{ + if (!handle || handle == *a || handle == *b) + return; + if (!*a) + *a = handle; + else if (!*b) + *b = handle; +} + +/** + * _scsih_check_ir_config_unhide_events - check for UNHIDE events + * @ioc: per adapter object + * @event_data: the event data payload + * Context: interrupt time. + * + * This routine will send target reset to volume, followed by target + * resets to the PDs. This is called when a PD has been removed, or + * volume has been deleted or removed. When the target reset is sent + * to volume, the PD target resets need to be queued to start upon + * completion of the volume target reset. + * + * Return nothing. + */ +static void +_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrConfigChangeList_t *event_data) +{ + Mpi2EventIrConfigElement_t *element; + int i; + u16 handle, volume_handle, a, b; + struct _tr_list *delayed_tr; + + a = 0; + b = 0; + + /* Volume Resets for Deleted or Removed */ + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == + MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED || + element->ReasonCode == + MPI2_EVENT_IR_CHANGE_RC_REMOVED) { + volume_handle = le16_to_cpu(element->VolDevHandle); + _scsih_set_volume_delete_flag(ioc, volume_handle); + _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); + } + } + + /* Volume Resets for UNHIDE events */ + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) { + volume_handle = le16_to_cpu(element->VolDevHandle); + _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); + } + } + + if (a) + _scsih_tm_tr_volume_send(ioc, a); + if (b) + _scsih_tm_tr_volume_send(ioc, b); + + /* PD target resets */ + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) + continue; + handle = le16_to_cpu(element->PhysDiskDevHandle); + volume_handle = le16_to_cpu(element->VolDevHandle); + clear_bit(handle, ioc->pd_handles); + if (!volume_handle) + _scsih_tm_tr_send(ioc, handle); + else if (volume_handle == a || volume_handle == b) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + BUG_ON(!delayed_tr); + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name, + handle)); + } else + _scsih_tm_tr_send(ioc, handle); + } +} + + +/** + * _scsih_check_volume_delete_events - set delete flag for volumes + * @ioc: per adapter object + * @event_data: the event data payload + * Context: interrupt time. + * + * This will handle the case when the cable connected to entire volume is + * pulled. We will take care of setting the deleted flag so normal IO will + * not be sent. + * + * Return nothing. + */ +static void +_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrVolume_t *event_data) +{ + u32 state; + + if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + state = le32_to_cpu(event_data->NewValue); + if (state == MPI2_RAID_VOL_STATE_MISSING || state == + MPI2_RAID_VOL_STATE_FAILED) + _scsih_set_volume_delete_flag(ioc, + le16_to_cpu(event_data->VolDevHandle)); +} + +static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) +{ + struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; + + if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) + return 0; + + if (pending) + return test_and_set_bit(0, &priv->ata_command_pending); + + clear_bit(0, &priv->ata_command_pending); + return 0; +} + +/** + * _scsih_flush_running_cmds - completing outstanding commands. + * @ioc: per adapter object + * + * The flushing out of all pending scmd commands following host reset, + * where all IO is dropped to the floor. + * + * Return nothing. + */ +static void +_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) +{ + struct scsi_cmnd *scmd; + u16 smid; + u16 count = 0; + + for (smid = 1; smid <= ioc->scsiio_depth; smid++) { + scmd = _scsih_scsi_lookup_get_clear(ioc, smid); + if (!scmd) + continue; + count++; + _scsih_set_satl_pending(scmd, false); + mpt3sas_base_free_smid(ioc, smid); + scsi_dma_unmap(scmd); + if (ioc->pci_error_recovery) + scmd->result = DID_NO_CONNECT << 16; + else + scmd->result = DID_RESET << 16; + scmd->scsi_done(scmd); + } + dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n", + ioc->name, count)); +} + +/** + * _scsih_setup_eedp - setup MPI request for EEDP transfer + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * @mpi_request: pointer to the SCSI_IO reqest message frame + * + * Supporting protection 1 and 3. + * + * Returns nothing + */ +static void +_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + Mpi2SCSIIORequest_t *mpi_request) +{ + u16 eedp_flags; + unsigned char prot_op = scsi_get_prot_op(scmd); + unsigned char prot_type = scsi_get_prot_type(scmd); + Mpi25SCSIIORequest_t *mpi_request_3v = + (Mpi25SCSIIORequest_t *)mpi_request; + + if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL) + return; + + if (prot_op == SCSI_PROT_READ_STRIP) + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; + else if (prot_op == SCSI_PROT_WRITE_INSERT) + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; + else + return; + + switch (prot_type) { + case SCSI_PROT_DIF_TYPE1: + case SCSI_PROT_DIF_TYPE2: + + /* + * enable ref/guard checking + * auto increment ref tag + */ + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + mpi_request->CDB.EEDP32.PrimaryReferenceTag = + cpu_to_be32(scsi_get_lba(scmd)); + break; + + case SCSI_PROT_DIF_TYPE3: + + /* + * enable guard checking + */ + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + + break; + } + + mpi_request_3v->EEDPBlockSize = + cpu_to_le16(scmd->device->sector_size); + mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); +} + +/** + * _scsih_eedp_error_handling - return sense code for EEDP errors + * @scmd: pointer to scsi command object + * @ioc_status: ioc status + * + * Returns nothing + */ +static void +_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) +{ + u8 ascq; + + switch (ioc_status) { + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + ascq = 0x01; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + ascq = 0x02; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + ascq = 0x03; + break; + default: + ascq = 0x00; + break; + } + scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10, + ascq); + scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) | + SAM_STAT_CHECK_CONDITION; +} + +/** + * _scsih_qcmd_lck - main scsi request entry point + * @scmd: pointer to scsi command object + * @done: function pointer to be invoked on completion + * + * The callback index is set inside `ioc->scsi_io_cb_idx`. + * + * Returns 0 on success. If there's a failure, return either: + * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or + * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full + */ +static int +_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + Mpi2SCSIIORequest_t *mpi_request; + u32 mpi_control; + u16 smid; + u16 handle; + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + if (ioc->logging_level & MPT_DEBUG_SCSI) + scsi_print_command(scmd); +#endif + + scmd->scsi_done = done; + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + return 0; + } + + if (ioc->pci_error_recovery || ioc->remove_host) { + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + return 0; + } + + /* + * Bug work around for firmware SATL handling. The loop + * is based on atomic operations and ensures consistency + * since we're lockless at this point + */ + do { + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { + scmd->result = SAM_STAT_BUSY; + scmd->scsi_done(scmd); + return 0; + } + } while (_scsih_set_satl_pending(scmd, true)); + + sas_target_priv_data = sas_device_priv_data->sas_target; + + /* invalid device handle */ + handle = sas_target_priv_data->handle; + if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + return 0; + } + + + /* host recovery or link resets sent via IOCTLs */ + if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) + return SCSI_MLQUEUE_HOST_BUSY; + + /* device has been deleted */ + else if (sas_target_priv_data->deleted) { + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + return 0; + /* device busy with task managment */ + } else if (sas_target_priv_data->tm_busy || + sas_device_priv_data->block) + return SCSI_MLQUEUE_DEVICE_BUSY; + + if (scmd->sc_data_direction == DMA_FROM_DEVICE) + mpi_control = MPI2_SCSIIO_CONTROL_READ; + else if (scmd->sc_data_direction == DMA_TO_DEVICE) + mpi_control = MPI2_SCSIIO_CONTROL_WRITE; + else + mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; + + /* set tags */ + if (!(sas_device_priv_data->flags & MPT_DEVICE_FLAGS_INIT)) { + if (scmd->device->tagged_supported) { + if (scmd->device->ordered_tags) + mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ; + else + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; + } else + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; + } else + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; + + if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && + scmd->cmd_len != 32) + mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; + + smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + goto out; + } + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); + _scsih_setup_eedp(ioc, scmd, mpi_request); + + if (scmd->cmd_len == 32) + mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; + mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) + mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; + else + mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); + mpi_request->Control = cpu_to_le32(mpi_control); + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); + mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; + mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + mpi_request->SenseBufferLowAddress = + mpt3sas_base_get_sense_buffer_dma(ioc, smid); + mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4; + int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) + mpi_request->LUN); + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + + if (mpi_request->DataLength) { + if (ioc->build_sg_scmd(ioc, scmd, smid)) { + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } else + ioc->build_zero_len_sge(ioc, &mpi_request->SGL); + + if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { + if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | + MPI25_SCSIIO_IOFLAGS_FAST_PATH); + mpt3sas_base_put_smid_fast_path(ioc, smid, handle); + } else + mpt3sas_base_put_smid_scsi_io(ioc, smid, handle); + } else + mpt3sas_base_put_smid_default(ioc, smid); + return 0; + + out: + return SCSI_MLQUEUE_HOST_BUSY; +} +static DEF_SCSI_QCMD(_scsih_qcmd) + + +/** + * _scsih_normalize_sense - normalize descriptor and fixed format sense data + * @sense_buffer: sense data returned by target + * @data: normalized skey/asc/ascq + * + * Return nothing. + */ +static void +_scsih_normalize_sense(char *sense_buffer, struct sense_info *data) +{ + if ((sense_buffer[0] & 0x7F) >= 0x72) { + /* descriptor format */ + data->skey = sense_buffer[1] & 0x0F; + data->asc = sense_buffer[2]; + data->ascq = sense_buffer[3]; + } else { + /* fixed format */ + data->skey = sense_buffer[2] & 0x0F; + data->asc = sense_buffer[12]; + data->ascq = sense_buffer[13]; + } +} + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING +/** + * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * @mpi_reply: reply mf payload returned from firmware + * + * scsi_status - SCSI Status code returned from target device + * scsi_state - state info associated with SCSI_IO determined by ioc + * ioc_status - ioc supplied status info + * + * Return nothing. + */ +static void +_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + Mpi2SCSIIOReply_t *mpi_reply, u16 smid) +{ + u32 response_info; + u8 *response_bytes; + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + MPI2_IOCSTATUS_MASK; + u8 scsi_state = mpi_reply->SCSIState; + u8 scsi_status = mpi_reply->SCSIStatus; + char *desc_ioc_state = NULL; + char *desc_scsi_status = NULL; + char *desc_scsi_state = ioc->tmp_string; + u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + struct _sas_device *sas_device = NULL; + unsigned long flags; + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *priv_target = starget->hostdata; + char *device_str = NULL; + + if (!priv_target) + return; + device_str = "volume"; + + if (log_info == 0x31170000) + return; + + switch (ioc_status) { + case MPI2_IOCSTATUS_SUCCESS: + desc_ioc_state = "success"; + break; + case MPI2_IOCSTATUS_INVALID_FUNCTION: + desc_ioc_state = "invalid function"; + break; + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: + desc_ioc_state = "scsi recovered error"; + break; + case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + desc_ioc_state = "scsi invalid dev handle"; + break; + case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + desc_ioc_state = "scsi device not there"; + break; + case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: + desc_ioc_state = "scsi data overrun"; + break; + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: + desc_ioc_state = "scsi data underrun"; + break; + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: + desc_ioc_state = "scsi io data error"; + break; + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: + desc_ioc_state = "scsi protocol error"; + break; + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: + desc_ioc_state = "scsi task terminated"; + break; + case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + desc_ioc_state = "scsi residual mismatch"; + break; + case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + desc_ioc_state = "scsi task mgmt failed"; + break; + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: + desc_ioc_state = "scsi ioc terminated"; + break; + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: + desc_ioc_state = "scsi ext terminated"; + break; + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + desc_ioc_state = "eedp guard error"; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + desc_ioc_state = "eedp ref tag error"; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + desc_ioc_state = "eedp app tag error"; + break; + default: + desc_ioc_state = "unknown"; + break; + } + + switch (scsi_status) { + case MPI2_SCSI_STATUS_GOOD: + desc_scsi_status = "good"; + break; + case MPI2_SCSI_STATUS_CHECK_CONDITION: + desc_scsi_status = "check condition"; + break; + case MPI2_SCSI_STATUS_CONDITION_MET: + desc_scsi_status = "condition met"; + break; + case MPI2_SCSI_STATUS_BUSY: + desc_scsi_status = "busy"; + break; + case MPI2_SCSI_STATUS_INTERMEDIATE: + desc_scsi_status = "intermediate"; + break; + case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: + desc_scsi_status = "intermediate condmet"; + break; + case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: + desc_scsi_status = "reservation conflict"; + break; + case MPI2_SCSI_STATUS_COMMAND_TERMINATED: + desc_scsi_status = "command terminated"; + break; + case MPI2_SCSI_STATUS_TASK_SET_FULL: + desc_scsi_status = "task set full"; + break; + case MPI2_SCSI_STATUS_ACA_ACTIVE: + desc_scsi_status = "aca active"; + break; + case MPI2_SCSI_STATUS_TASK_ABORTED: + desc_scsi_status = "task aborted"; + break; + default: + desc_scsi_status = "unknown"; + break; + } + + desc_scsi_state[0] = '\0'; + if (!scsi_state) + desc_scsi_state = " "; + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) + strcat(desc_scsi_state, "response info "); + if (scsi_state & MPI2_SCSI_STATE_TERMINATED) + strcat(desc_scsi_state, "state terminated "); + if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) + strcat(desc_scsi_state, "no status "); + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) + strcat(desc_scsi_state, "autosense failed "); + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) + strcat(desc_scsi_state, "autosense valid "); + + scsi_print_command(scmd); + + if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { + pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name, + device_str, (unsigned long long)priv_target->sas_address); + } else { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + priv_target->sas_address); + if (sas_device) { + pr_warn(MPT3SAS_FMT + "\tsas_address(0x%016llx), phy(%d)\n", + ioc->name, (unsigned long long) + sas_device->sas_address, sas_device->phy); + pr_warn(MPT3SAS_FMT + "\tenclosure_logical_id(0x%016llx), slot(%d)\n", + ioc->name, (unsigned long long) + sas_device->enclosure_logical_id, sas_device->slot); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + + pr_warn(MPT3SAS_FMT + "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->DevHandle), + desc_ioc_state, ioc_status, smid); + pr_warn(MPT3SAS_FMT + "\trequest_len(%d), underflow(%d), resid(%d)\n", + ioc->name, scsi_bufflen(scmd), scmd->underflow, + scsi_get_resid(scmd)); + pr_warn(MPT3SAS_FMT + "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->TaskTag), + le32_to_cpu(mpi_reply->TransferCount), scmd->result); + pr_warn(MPT3SAS_FMT + "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", + ioc->name, desc_scsi_status, + scsi_status, desc_scsi_state, scsi_state); + + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + _scsih_normalize_sense(scmd->sense_buffer, &data); + pr_warn(MPT3SAS_FMT + "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", + ioc->name, data.skey, + data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount)); + } + + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { + response_info = le32_to_cpu(mpi_reply->ResponseInfo); + response_bytes = (u8 *)&response_info; + _scsih_response_code(ioc, response_bytes[0]); + } +} +#endif + +/** + * _scsih_turn_on_fault_led - illuminate Fault LED + * @ioc: per adapter object + * @handle: device handle + * Context: process + * + * Return nothing. + */ +static void +_scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2SepReply_t mpi_reply; + Mpi2SepRequest_t mpi_request; + + memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); + mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = + cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); + mpi_request.DevHandle = cpu_to_le16(handle); + mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; + if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + return; + } +} + +/** + * _scsih_send_event_to_turn_on_fault_led - fire delayed event + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt. + * + * Return nothing. + */ +static void +_scsih_send_event_to_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct fw_event_work *fw_event; + + fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); + if (!fw_event) + return; + fw_event->event = MPT3SAS_TURN_ON_FAULT_LED; + fw_event->device_handle = handle; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); +} + +/** + * _scsih_smart_predicted_fault - process smart errors + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt. + * + * Return nothing. + */ +static void +_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct scsi_target *starget; + struct MPT3SAS_TARGET *sas_target_priv_data; + Mpi2EventNotificationReply_t *event_reply; + Mpi2EventDataSasDeviceStatusChange_t *event_data; + struct _sas_device *sas_device; + ssize_t sz; + unsigned long flags; + + /* only handle non-raid devices */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _scsih_sas_device_find_by_handle(ioc, handle); + if (!sas_device) { + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return; + } + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || + ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) { + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return; + } + starget_printk(KERN_WARNING, starget, "predicted fault\n"); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) + _scsih_send_event_to_turn_on_fault_led(ioc, handle); + + /* insert into event log */ + sz = offsetof(Mpi2EventNotificationReply_t, EventData) + + sizeof(Mpi2EventDataSasDeviceStatusChange_t); + event_reply = kzalloc(sz, GFP_KERNEL); + if (!event_reply) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; + event_reply->Event = + cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); + event_reply->MsgLength = sz/4; + event_reply->EventDataLength = + cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4); + event_data = (Mpi2EventDataSasDeviceStatusChange_t *) + event_reply->EventData; + event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA; + event_data->ASC = 0x5D; + event_data->DevHandle = cpu_to_le16(handle); + event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); + mpt3sas_ctl_add_to_event_log(ioc, event_reply); + kfree(event_reply); +} + +/** + * _scsih_io_done - scsi request callback + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when using _scsih_qcmd. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) +{ + Mpi2SCSIIORequest_t *mpi_request; + Mpi2SCSIIOReply_t *mpi_reply; + struct scsi_cmnd *scmd; + u16 ioc_status; + u32 xfer_cnt; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + struct MPT3SAS_DEVICE *sas_device_priv_data; + u32 response_code = 0; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + scmd = _scsih_scsi_lookup_get_clear(ioc, smid); + if (scmd == NULL) + return 1; + + _scsih_set_satl_pending(scmd, false); + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + if (mpi_reply == NULL) { + scmd->result = DID_OK << 16; + goto out; + } + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + sas_device_priv_data->sas_target->deleted) { + scmd->result = DID_NO_CONNECT << 16; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + + /* turning off TLR */ + scsi_state = mpi_reply->SCSIState; + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) + response_code = + le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; + if (!sas_device_priv_data->tlr_snoop_check) { + sas_device_priv_data->tlr_snoop_check++; + if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) && + response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) + sas_device_priv_data->flags &= + ~MPT_DEVICE_TLR_ON; + } + + xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= MPI2_IOCSTATUS_MASK; + scsi_status = mpi_reply->SCSIStatus; + + if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && + (scsi_status == MPI2_SCSI_STATUS_BUSY || + scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || + scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) { + ioc_status = MPI2_IOCSTATUS_SUCCESS; + } + + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + const void *sense_data = mpt3sas_base_get_sense_buffer(ioc, + smid); + u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(mpi_reply->SenseCount)); + memcpy(scmd->sense_buffer, sense_data, sz); + _scsih_normalize_sense(scmd->sense_buffer, &data); + /* failure prediction threshold exceeded */ + if (data.asc == 0x5D) + _scsih_smart_predicted_fault(ioc, + le16_to_cpu(mpi_reply->DevHandle)); + mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); + } + + switch (ioc_status) { + case MPI2_IOCSTATUS_BUSY: + case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: + scmd->result = SAM_STAT_BUSY; + break; + + case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + scmd->result = DID_NO_CONNECT << 16; + break; + + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: + if (sas_device_priv_data->block) { + scmd->result = DID_TRANSPORT_DISRUPTED << 16; + goto out; + } + if (log_info == 0x31110630) { + if (scmd->retries > 2) { + scmd->result = DID_NO_CONNECT << 16; + scsi_device_set_state(scmd->device, + SDEV_OFFLINE); + } else { + scmd->result = DID_SOFT_ERROR << 16; + scmd->device->expecting_cc_ua = 1; + } + break; + } + scmd->result = DID_SOFT_ERROR << 16; + break; + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: + scmd->result = DID_RESET << 16; + break; + + case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) + scmd->result = DID_SOFT_ERROR << 16; + else + scmd->result = (DID_OK << 16) | scsi_status; + break; + + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: + scmd->result = (DID_OK << 16) | scsi_status; + + if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)) + break; + + if (xfer_cnt < scmd->underflow) { + if (scsi_status == SAM_STAT_BUSY) + scmd->result = SAM_STAT_BUSY; + else + scmd->result = DID_SOFT_ERROR << 16; + } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | + MPI2_SCSI_STATE_NO_SCSI_STATUS)) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { + mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; + mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; + scmd->result = (DRIVER_SENSE << 24) | + SAM_STAT_CHECK_CONDITION; + scmd->sense_buffer[0] = 0x70; + scmd->sense_buffer[2] = ILLEGAL_REQUEST; + scmd->sense_buffer[12] = 0x20; + scmd->sense_buffer[13] = 0; + } + break; + + case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: + scsi_set_resid(scmd, 0); + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: + case MPI2_IOCSTATUS_SUCCESS: + scmd->result = (DID_OK << 16) | scsi_status; + if (response_code == + MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || + (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | + MPI2_SCSI_STATE_NO_SCSI_STATUS))) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + break; + + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + _scsih_eedp_error_handling(scmd, ioc_status); + break; + + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case MPI2_IOCSTATUS_INVALID_FUNCTION: + case MPI2_IOCSTATUS_INVALID_SGL: + case MPI2_IOCSTATUS_INTERNAL_ERROR: + case MPI2_IOCSTATUS_INVALID_FIELD: + case MPI2_IOCSTATUS_INVALID_STATE: + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: + case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + default: + scmd->result = DID_SOFT_ERROR << 16; + break; + + } + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) + _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); +#endif + + out: + + scsi_dma_unmap(scmd); + + scmd->scsi_done(scmd); + return 1; +} + +/** + * _scsih_sas_host_refresh - refreshing sas host object contents + * @ioc: per adapter object + * Context: user + * + * During port enable, fw will send topology events for every device. Its + * possible that the handles may change from the previous setting, so this + * code keeping handles updating if changed. + * + * Return nothing. + */ +static void +_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) +{ + u16 sz; + u16 ioc_status; + int i; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u8 link_rate; + + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "updating handles for sas_host(0x%016llx)\n", + ioc->name, (unsigned long long)ioc->sas_hba.sas_address)); + + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys + * sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { + link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; + if (i == 0) + ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> + PhyData[0].ControllerDevHandle); + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. + AttachedDevHandle); + if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) + link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; + mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address, + attached_handle, i, link_rate); + } + out: + kfree(sas_iounit_pg0); +} + +/** + * _scsih_sas_host_add - create sas host object + * @ioc: per adapter object + * + * Creating host side data object, stored in ioc->sas_hba + * + * Return nothing. + */ +static void +_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) +{ + int i; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2SasPhyPage0_t phy_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2SasEnclosurePage0_t enclosure_pg0; + u16 ioc_status; + u16 sz; + u8 device_missing_delay; + + mpt3sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys); + if (!ioc->sas_hba.num_phys) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + /* sas_iounit page 0 */ + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + /* sas_iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + ioc->io_missing_delay = + sas_iounit_pg1->IODeviceMissingDelay; + device_missing_delay = + sas_iounit_pg1->ReportDeviceMissingDelay; + if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) + ioc->device_missing_delay = (device_missing_delay & + MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; + else + ioc->device_missing_delay = device_missing_delay & + MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + + ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; + ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys, + sizeof(struct _sas_phy), GFP_KERNEL); + if (!ioc->sas_hba.phy) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + if (i == 0) + ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> + PhyData[0].ControllerDevHandle); + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + ioc->sas_hba.phy[i].phy_id = i; + mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], + phy_pg0, ioc->sas_hba.parent_dev); + } + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->sas_hba.enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + pr_info(MPT3SAS_FMT + "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + ioc->name, ioc->sas_hba.handle, + (unsigned long long) ioc->sas_hba.sas_address, + ioc->sas_hba.num_phys) ; + + if (ioc->sas_hba.enclosure_handle) { + if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, + ioc->sas_hba.enclosure_handle))) + ioc->sas_hba.enclosure_logical_id = + le64_to_cpu(enclosure_pg0.EnclosureLogicalID); + } + + out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); +} + +/** + * _scsih_expander_add - creating expander object + * @ioc: per adapter object + * @handle: expander handle + * + * Creating expander object, stored in ioc->sas_expander_list. + * + * Return 0 for success, else error. + */ +static int +_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_node *sas_expander; + Mpi2ConfigReply_t mpi_reply; + Mpi2ExpanderPage0_t expander_pg0; + Mpi2ExpanderPage1_t expander_pg1; + Mpi2SasEnclosurePage0_t enclosure_pg0; + u32 ioc_status; + u16 parent_handle; + u64 sas_address, sas_address_parent = 0; + int i; + unsigned long flags; + struct _sas_port *mpt3sas_port = NULL; + + int rc = 0; + + if (!handle) + return -1; + + if (ioc->shost_recovery || ioc->pci_error_recovery) + return -1; + + if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + + /* handle out of order topology events */ + parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); + if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) + != 0) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + if (sas_address_parent != ioc->sas_hba.sas_address) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address_parent); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (!sas_expander) { + rc = _scsih_expander_add(ioc, parent_handle); + if (rc != 0) + return rc; + } + } + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (sas_expander) + return 0; + + sas_expander = kzalloc(sizeof(struct _sas_node), + GFP_KERNEL); + if (!sas_expander) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + + sas_expander->handle = handle; + sas_expander->num_phys = expander_pg0.NumPhys; + sas_expander->sas_address_parent = sas_address_parent; + sas_expander->sas_address = sas_address; + + pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \ + " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name, + handle, parent_handle, (unsigned long long) + sas_expander->sas_address, sas_expander->num_phys); + + if (!sas_expander->num_phys) + goto out_fail; + sas_expander->phy = kcalloc(sas_expander->num_phys, + sizeof(struct _sas_phy), GFP_KERNEL); + if (!sas_expander->phy) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + + INIT_LIST_HEAD(&sas_expander->sas_port_list); + mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, + sas_address_parent); + if (!mpt3sas_port) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->parent_dev = &mpt3sas_port->rphy->dev; + + for (i = 0 ; i < sas_expander->num_phys ; i++) { + if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, + &expander_pg1, i, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->phy[i].handle = handle; + sas_expander->phy[i].phy_id = i; + + if ((mpt3sas_transport_add_expander_phy(ioc, + &sas_expander->phy[i], expander_pg1, + sas_expander->parent_dev))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + } + + if (sas_expander->enclosure_handle) { + if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, + sas_expander->enclosure_handle))) + sas_expander->enclosure_logical_id = + le64_to_cpu(enclosure_pg0.EnclosureLogicalID); + } + + _scsih_expander_node_add(ioc, sas_expander); + return 0; + + out_fail: + + if (mpt3sas_port) + mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, + sas_address_parent); + kfree(sas_expander); + return rc; +} + +/** + * mpt3sas_expander_remove - removing expander object + * @ioc: per adapter object + * @sas_address: expander sas_address + * + * Return nothing. + */ +void +mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) +{ + struct _sas_node *sas_expander; + unsigned long flags; + + if (ioc->shost_recovery) + return; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address); + if (sas_expander) + list_del(&sas_expander->list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (sas_expander) + _scsih_expander_node_remove(ioc, sas_expander); +} + +/** + * _scsih_done - internal SCSI_IO callback handler. + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when sending internal generated SCSI_IO. + * The callback index passed is `ioc->scsih_cb_idx` + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->scsih_cmds.smid != smid) + return 1; + ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->scsih_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID; + } + ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->scsih_cmds.done); + return 1; +} + + + + +#define MPT3_MAX_LUNS (255) + + +/** + * _scsih_check_access_status - check access flags + * @ioc: per adapter object + * @sas_address: sas address + * @handle: sas device handle + * @access_flags: errors returned during discovery of the device + * + * Return 0 for success, else failure + */ +static u8 +_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u16 handle, u8 access_status) +{ + u8 rc = 1; + char *desc = NULL; + + switch (access_status) { + case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS: + case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: + rc = 0; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: + desc = "sata capability failed"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: + desc = "sata affiliation conflict"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: + desc = "route not addressable"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: + desc = "smp error not addressable"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: + desc = "device blocked"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX: + desc = "sata initialization failed"; + break; + default: + desc = "unknown"; + break; + } + + if (!rc) + return 0; + + pr_err(MPT3SAS_FMT + "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", + ioc->name, desc, (unsigned long long)sas_address, handle); + return rc; +} + +/** + * _scsih_check_device - checking device responsiveness + * @ioc: per adapter object + * @parent_sas_address: sas address of parent expander or sas host + * @handle: attached device handle + * @phy_numberv: phy number + * @link_rate: new link rate + * + * Returns nothing. + */ +static void +_scsih_check_device(struct MPT3SAS_ADAPTER *ioc, + u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + struct _sas_device *sas_device; + u32 ioc_status; + unsigned long flags; + u64 sas_address; + struct scsi_target *starget; + struct MPT3SAS_TARGET *sas_target_priv_data; + u32 device_info; + + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) + return; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + return; + + /* wide port handling ~ we need only handle device once for the phy that + * is matched in sas device page zero + */ + if (phy_number != sas_device_pg0.PhyNum) + return; + + /* check if this is end device */ + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(_scsih_is_end_device(device_info))) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + sas_address); + + if (!sas_device) { + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return; + } + + if (unlikely(sas_device->handle != handle)) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + starget_printk(KERN_INFO, starget, + "handle changed from(0x%04x) to (0x%04x)!!!\n", + sas_device->handle, handle); + sas_target_priv_data->handle = handle; + sas_device->handle = handle; + } + + /* check if device is present */ + if (!(le16_to_cpu(sas_device_pg0.Flags) & + MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + pr_err(MPT3SAS_FMT + "device is not present handle(0x%04x), flags!!!\n", + ioc->name, handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return; + } + + /* check if there were any issues with discovery */ + if (_scsih_check_access_status(ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) { + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return; + } + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + _scsih_ublock_io_device(ioc, sas_address); + +} + +/** + * _scsih_add_device - creating sas device object + * @ioc: per adapter object + * @handle: sas device handle + * @phy_num: phy number end device attached to + * @is_pd: is this hidden raid component + * + * Creating end device object, stored in ioc->sas_device_list. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, + u8 is_pd) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2SasEnclosurePage0_t enclosure_pg0; + struct _sas_device *sas_device; + u32 ioc_status; + u64 sas_address; + u32 device_info; + unsigned long flags; + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + + /* check if this is end device */ + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(_scsih_is_end_device(device_info))) + return -1; + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + + /* check if device is present */ + if (!(le16_to_cpu(sas_device_pg0.Flags) & + MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n", + ioc->name, handle); + return -1; + } + + /* check if there were any issues with discovery */ + if (_scsih_check_access_status(ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) + return -1; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + sas_address); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (sas_device) + return -1; + + sas_device = kzalloc(sizeof(struct _sas_device), + GFP_KERNEL); + if (!sas_device) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 0; + } + + sas_device->handle = handle; + if (_scsih_get_sas_address(ioc, + le16_to_cpu(sas_device_pg0.ParentDevHandle), + &sas_device->sas_address_parent) != 0) + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + sas_device->slot = + le16_to_cpu(sas_device_pg0.Slot); + sas_device->device_info = device_info; + sas_device->sas_address = sas_address; + sas_device->phy = sas_device_pg0.PhyNum; + sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & + MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; + + /* get enclosure_logical_id */ + if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0( + ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, + sas_device->enclosure_handle))) + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_pg0.EnclosureLogicalID); + + /* get device name */ + sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); + + if (ioc->wait_for_discovery_to_complete) + _scsih_sas_device_init_add(ioc, sas_device); + else + _scsih_sas_device_add(ioc, sas_device); + + return 0; +} + +/** + * _scsih_remove_device - removing sas device object + * @ioc: per adapter object + * @sas_device_delete: the sas_device object + * + * Return nothing. + */ +static void +_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + struct MPT3SAS_TARGET *sas_target_priv_data; + + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, + sas_device->handle, (unsigned long long) + sas_device->sas_address)); + + if (sas_device->starget && sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + _scsih_ublock_io_device(ioc, sas_device->sas_address); + sas_target_priv_data->handle = + MPT3SAS_INVALID_DEVICE_HANDLE; + } + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent); + + pr_info(MPT3SAS_FMT + "removing handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, sas_device->handle, + (unsigned long long) sas_device->sas_address); + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, + sas_device->handle, (unsigned long long) + sas_device->sas_address)); + + kfree(sas_device); +} + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING +/** + * _scsih_sas_topology_change_event_debug - debug for topology event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasTopologyChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + u8 phy_number; + char *status_str = NULL; + u8 link_rate, prev_link_rate; + + switch (event_data->ExpStatus) { + case MPI2_EVENT_SAS_TOPO_ES_ADDED: + status_str = "add"; + break; + case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: + status_str = "remove"; + break; + case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: + case 0: + status_str = "responding"; + break; + case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: + status_str = "remove delay"; + break; + default: + status_str = "unknown status"; + break; + } + pr_info(MPT3SAS_FMT "sas topology change: (%s)\n", + ioc->name, status_str); + pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \ + "start_phy(%02d), count(%d)\n", + le16_to_cpu(event_data->ExpanderDevHandle), + le16_to_cpu(event_data->EnclosureHandle), + event_data->StartPhyNum, event_data->NumEntries); + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + phy_number = event_data->StartPhyNum + i; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + switch (reason_code) { + case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: + status_str = "target add"; + break; + case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + status_str = "target remove"; + break; + case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: + status_str = "delay target remove"; + break; + case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: + status_str = "link rate change"; + break; + case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: + status_str = "target responding"; + break; + default: + status_str = "unknown"; + break; + } + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \ + " link rate: new(0x%02x), old(0x%02x)\n", phy_number, + handle, status_str, link_rate, prev_link_rate); + + } +} +#endif + +/** + * _scsih_sas_topology_change_event - handle topology changes + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + */ +static int +_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + int i; + u16 parent_handle, handle; + u16 reason_code; + u8 phy_number, max_phys; + struct _sas_node *sas_expander; + u64 sas_address; + unsigned long flags; + u8 link_rate, prev_link_rate; + Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data; + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_topology_change_event_debug(ioc, event_data); +#endif + + if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) + return 0; + + if (!ioc->sas_hba.num_phys) + _scsih_sas_host_add(ioc); + else + _scsih_sas_host_refresh(ioc); + + if (fw_event->ignore) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "ignoring expander event\n", ioc->name)); + return 0; + } + + parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); + + /* handle expander add */ + if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) + if (_scsih_expander_add(ioc, parent_handle) != 0) + return 0; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, + parent_handle); + if (sas_expander) { + sas_address = sas_expander->sas_address; + max_phys = sas_expander->num_phys; + } else if (parent_handle < ioc->sas_hba.num_phys) { + sas_address = ioc->sas_hba.sas_address; + max_phys = ioc->sas_hba.num_phys; + } else { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* handle siblings events */ + for (i = 0; i < event_data->NumEntries; i++) { + if (fw_event->ignore) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "ignoring expander event\n", ioc->name)); + return 0; + } + if (ioc->remove_host || ioc->pci_error_recovery) + return 0; + phy_number = event_data->StartPhyNum + i; + if (phy_number >= max_phys) + continue; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + if ((event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != + MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) + continue; + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + switch (reason_code) { + case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: + + if (ioc->shost_recovery) + break; + + if (link_rate == prev_link_rate) + break; + + mpt3sas_transport_update_links(ioc, sas_address, + handle, phy_number, link_rate); + + if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) + break; + + _scsih_check_device(ioc, sas_address, handle, + phy_number, link_rate); + + + case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: + + if (ioc->shost_recovery) + break; + + mpt3sas_transport_update_links(ioc, sas_address, + handle, phy_number, link_rate); + + _scsih_add_device(ioc, handle, phy_number, 0); + + break; + case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + + _scsih_device_remove_by_handle(ioc, handle); + break; + } + } + + /* handle expander removal */ + if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && + sas_expander) + mpt3sas_expander_remove(ioc, sas_address); + + return 0; +} + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING +/** + * _scsih_sas_device_status_change_event_debug - debug for device event + * @event_data: event data payload + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasDeviceStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: + reason_str = "smart data"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: + reason_str = "unsupported device discovered"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: + reason_str = "internal device reset"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: + reason_str = "internal task abort"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: + reason_str = "internal task abort set"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: + reason_str = "internal clear task set"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: + reason_str = "internal query task"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: + reason_str = "sata init failure"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: + reason_str = "internal device reset complete"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: + reason_str = "internal task abort complete"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: + reason_str = "internal async notification"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality complete"; + break; + default: + reason_str = "unknown reason"; + break; + } + pr_info(MPT3SAS_FMT "device status change: (%s)\n" + "\thandle(0x%04x), sas address(0x%016llx), tag(%d)", + ioc->name, reason_str, le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + le16_to_cpu(event_data->TaskTag)); + if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) + pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name, + event_data->ASC, event_data->ASCQ); + pr_info("\n"); +} +#endif + +/** + * _scsih_sas_device_status_change_event - handle device status change + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + struct MPT3SAS_TARGET *target_priv_data; + struct _sas_device *sas_device; + u64 sas_address; + unsigned long flags; + Mpi2EventDataSasDeviceStatusChange_t *event_data = + fw_event->event_data; + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_device_status_change_event_debug(ioc, + event_data); +#endif + + /* In MPI Revision K (0xC), the internal device reset complete was + * implemented, so avoid setting tm_busy flag for older firmware. + */ + if ((ioc->facts.HeaderVersion >> 8) < 0xC) + return; + + if (event_data->ReasonCode != + MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && + event_data->ReasonCode != + MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(event_data->SASAddress); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + sas_address); + + if (!sas_device || !sas_device->starget) { + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return; + } + + target_priv_data = sas_device->starget->hostdata; + if (!target_priv_data) { + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return; + } + + if (event_data->ReasonCode == + MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) + target_priv_data->tm_busy = 1; + else + target_priv_data->tm_busy = 0; + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING +/** + * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure + * event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasEnclDevStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case MPI2_EVENT_SAS_ENCL_RC_ADDED: + reason_str = "enclosure add"; + break; + case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: + reason_str = "enclosure remove"; + break; + default: + reason_str = "unknown reason"; + break; + } + + pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n" + "\thandle(0x%04x), enclosure logical id(0x%016llx)" + " number slots(%d)\n", ioc->name, reason_str, + le16_to_cpu(event_data->EnclosureHandle), + (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID), + le16_to_cpu(event_data->StartSlot)); +} +#endif + +/** + * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_enclosure_dev_status_change_event_debug(ioc, + fw_event->event_data); +#endif +} + +/** + * _scsih_sas_broadcast_primitive_event - handle broadcast events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + struct scsi_cmnd *scmd; + struct scsi_device *sdev; + u16 smid, handle; + u32 lun; + struct MPT3SAS_DEVICE *sas_device_priv_data; + u32 termination_count; + u32 query_count; + Mpi2SCSITaskManagementReply_t *mpi_reply; + Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data; + u16 ioc_status; + unsigned long flags; + int r; + u8 max_retries = 0; + u8 task_abort_retries; + + mutex_lock(&ioc->tm_cmds.mutex); + pr_info(MPT3SAS_FMT + "%s: enter: phy number(%d), width(%d)\n", + ioc->name, __func__, event_data->PhyNum, + event_data->PortWidth); + + _scsih_block_io_all_device(ioc); + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + mpi_reply = ioc->tm_cmds.reply; + broadcast_aen_retry: + + /* sanity checks for retrying this loop */ + if (max_retries++ == 5) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n", + ioc->name, __func__)); + goto out; + } else if (max_retries > 1) + dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n", + ioc->name, __func__, max_retries - 1)); + + termination_count = 0; + query_count = 0; + for (smid = 1; smid <= ioc->scsiio_depth; smid++) { + if (ioc->shost_recovery) + goto out; + scmd = _scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + sdev = scmd->device; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) + continue; + /* skip hidden raid components */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) + continue; + /* skip volumes */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_VOLUME) + continue; + + handle = sas_device_priv_data->sas_target->handle; + lun = sas_device_priv_data->lun; + query_count++; + + if (ioc->shost_recovery) + goto out; + + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0, + TM_MUTEX_OFF); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "mpt3sas_scsih_issue_tm: FAILED when sending " + "QUERY_TASK: scmd(%p)\n", scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + sdev_printk(KERN_WARNING, sdev, + "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", + ioc_status, scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + + /* see if IO is still owned by IOC and target */ + if (mpi_reply->ResponseCode == + MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || + mpi_reply->ResponseCode == + MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + continue; + } + task_abort_retries = 0; + tm_retry: + if (task_abort_retries++ == 60) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: ABORT_TASK: giving up\n", ioc->name, + __func__)); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + + if (ioc->shost_recovery) + goto out_no_lock; + + r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, + sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, + scmd->serial_number, TM_MUTEX_OFF); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " + "scmd(%p)\n", scmd); + goto tm_retry; + } + + if (task_abort_retries > 1) + sdev_printk(KERN_WARNING, sdev, + "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" + " scmd(%p)\n", + task_abort_retries - 1, scmd); + + termination_count += le32_to_cpu(mpi_reply->TerminationCount); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + } + + if (ioc->broadcast_aen_pending) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: loop back due to pending AEN\n", + ioc->name, __func__)); + ioc->broadcast_aen_pending = 0; + goto broadcast_aen_retry; + } + + out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + out_no_lock: + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s - exit, query_count = %d termination_count = %d\n", + ioc->name, __func__, query_count, termination_count)); + + ioc->broadcast_aen_busy = 0; + if (!ioc->shost_recovery) + _scsih_ublock_io_all_device(ioc); + mutex_unlock(&ioc->tm_cmds.mutex); +} + +/** + * _scsih_sas_discovery_event - handle discovery events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2EventDataSasDiscovery_t *event_data = fw_event->event_data; + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { + pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name, + (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? + "start" : "stop"); + if (event_data->DiscoveryStatus) + pr_info("discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_info("\n"); + } +#endif + + if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && + !ioc->sas_hba.num_phys) { + if (disable_discovery > 0 && ioc->shost_recovery) { + /* Wait for the reset to complete */ + while (ioc->shost_recovery) + ssleep(1); + } + _scsih_sas_host_add(ioc); + } +} + +/** + * _scsih_ir_fastpath - turn on fastpath for IR physdisk + * @ioc: per adapter object + * @handle: device handle for physical disk + * @phys_disk_num: physical disk number + * + * Return 0 for success, else failure. + */ +static int +_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) +{ + Mpi2RaidActionRequest_t *mpi_request; + Mpi2RaidActionReply_t *mpi_reply; + u16 smid; + u8 issue_reset = 0; + int rc = 0; + u16 ioc_status; + u32 log_info; + + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + rc = -EAGAIN; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); + + mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; + mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; + mpi_request->PhysDiskNum = phys_disk_num; + + dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\ + "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name, + handle, phys_disk_num)); + + init_completion(&ioc->scsih_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); + + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + rc = -EFAULT; + goto out; + } + + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->scsih_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "IR RAID_ACTION: failed: ioc_status(0x%04x), " + "loginfo(0x%08x)!!!\n", ioc->name, ioc_status, + log_info)); + rc = -EFAULT; + } else + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "IR RAID_ACTION: completed successfully\n", + ioc->name)); + } + + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); + + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + return rc; +} + +/** + * _scsih_reprobe_lun - reprobing lun + * @sdev: scsi device struct + * @no_uld_attach: sdev->no_uld_attach flag setting + * + **/ +static void +_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) +{ + int rc; + sdev->no_uld_attach = no_uld_attach ? 1 : 0; + sdev_printk(KERN_INFO, sdev, "%s raid component\n", + sdev->no_uld_attach ? "hidding" : "exposing"); + rc = scsi_device_reprobe(sdev); +} + +/** + * _scsih_sas_volume_add - add new volume + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _raid_device *raid_device; + unsigned long flags; + u64 wwid; + u16 handle = le16_to_cpu(element->VolDevHandle); + int rc; + + mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + pr_err(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (raid_device) + return; + + raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); + if (!raid_device) { + pr_err(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + _scsih_raid_device_add(ioc, raid_device); + if (!ioc->wait_for_discovery_to_complete) { + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + } else { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + _scsih_determine_boot_device(ioc, raid_device, 1); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +/** + * _scsih_sas_volume_delete - delete volume + * @ioc: per adapter object + * @handle: volume device handle + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _raid_device *raid_device; + unsigned long flags; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct scsi_target *starget = NULL; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_handle(ioc, handle); + if (raid_device) { + if (raid_device->starget) { + starget = raid_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 1; + } + pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, raid_device->handle, + (unsigned long long) raid_device->wwid); + list_del(&raid_device->list); + kfree(raid_device); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (starget) + scsi_remove_target(&starget->dev); +} + +/** + * _scsih_sas_pd_expose - expose pd component to /dev/sdX + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _sas_device *sas_device; + struct scsi_target *starget = NULL; + struct MPT3SAS_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _scsih_sas_device_find_by_handle(ioc, handle); + if (sas_device) { + sas_device->volume_handle = 0; + sas_device->volume_wwid = 0; + clear_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags &= + ~MPT_TARGET_FLAGS_RAID_COMPONENT; + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + + /* exposing raid component */ + if (starget) + starget_for_each_device(starget, NULL, _scsih_reprobe_lun); +} + +/** + * _scsih_sas_pd_hide - hide pd component from /dev/sdX + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _sas_device *sas_device; + struct scsi_target *starget = NULL; + struct MPT3SAS_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + u16 volume_handle = 0; + u64 volume_wwid = 0; + + mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle); + if (volume_handle) + mpt3sas_config_get_volume_wwid(ioc, volume_handle, + &volume_wwid); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _scsih_sas_device_find_by_handle(ioc, handle); + if (sas_device) { + set_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags |= + MPT_TARGET_FLAGS_RAID_COMPONENT; + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + + /* hiding raid component */ + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + if (starget) + starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); +} + +/** + * _scsih_sas_pd_delete - delete pd component + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + _scsih_device_remove_by_handle(ioc, handle); +} + +/** + * _scsih_sas_pd_add - remove pd component + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _sas_device *sas_device; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; + u64 sas_address; + u16 parent_handle; + + set_bit(handle, ioc->pd_handles); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _scsih_sas_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) { + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + return; + } + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) + mpt3sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); + + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + _scsih_add_device(ioc, handle, 0, 1); +} + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING +/** + * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrConfigChangeList_t *event_data) +{ + Mpi2EventIrConfigElement_t *element; + u8 element_type; + int i; + char *reason_str = NULL, *element_str = NULL; + + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + + pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n", + ioc->name, (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? + "foreign" : "native", event_data->NumElements); + for (i = 0; i < event_data->NumElements; i++, element++) { + switch (element->ReasonCode) { + case MPI2_EVENT_IR_CHANGE_RC_ADDED: + reason_str = "add"; + break; + case MPI2_EVENT_IR_CHANGE_RC_REMOVED: + reason_str = "remove"; + break; + case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE: + reason_str = "no change"; + break; + case MPI2_EVENT_IR_CHANGE_RC_HIDE: + reason_str = "hide"; + break; + case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: + reason_str = "unhide"; + break; + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + reason_str = "volume_created"; + break; + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + reason_str = "volume_deleted"; + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: + reason_str = "pd_created"; + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: + reason_str = "pd_deleted"; + break; + default: + reason_str = "unknown reason"; + break; + } + element_type = le16_to_cpu(element->ElementFlags) & + MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; + switch (element_type) { + case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: + element_str = "volume"; + break; + case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: + element_str = "phys disk"; + break; + case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: + element_str = "hot spare"; + break; + default: + element_str = "unknown element"; + break; + } + pr_info("\t(%s:%s), vol handle(0x%04x), " \ + "pd handle(0x%04x), pd num(0x%02x)\n", element_str, + reason_str, le16_to_cpu(element->VolDevHandle), + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } +} +#endif + +/** + * _scsih_sas_ir_config_change_event - handle ir configuration change events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2EventIrConfigElement_t *element; + int i; + u8 foreign_config; + Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data; + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_ir_config_change_event_debug(ioc, event_data); + +#endif + + foreign_config = (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; + + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + if (ioc->shost_recovery) { + + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) + _scsih_ir_fastpath(ioc, + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } + return; + } + for (i = 0; i < event_data->NumElements; i++, element++) { + + switch (element->ReasonCode) { + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + case MPI2_EVENT_IR_CHANGE_RC_ADDED: + if (!foreign_config) + _scsih_sas_volume_add(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + case MPI2_EVENT_IR_CHANGE_RC_REMOVED: + if (!foreign_config) + _scsih_sas_volume_delete(ioc, + le16_to_cpu(element->VolDevHandle)); + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: + _scsih_sas_pd_hide(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: + _scsih_sas_pd_expose(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_HIDE: + _scsih_sas_pd_add(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: + _scsih_sas_pd_delete(ioc, element); + break; + } + } +} + +/** + * _scsih_sas_ir_volume_event - IR volume event + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + u64 wwid; + unsigned long flags; + struct _raid_device *raid_device; + u16 handle; + u32 state; + int rc; + Mpi2EventDataIrVolume_t *event_data = fw_event->event_data; + + if (ioc->shost_recovery) + return; + + if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + + handle = le16_to_cpu(event_data->VolDevHandle); + state = le32_to_cpu(event_data->NewValue); + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + ioc->name, __func__, handle, + le32_to_cpu(event_data->PreviousValue), state)); + switch (state) { + case MPI2_RAID_VOL_STATE_MISSING: + case MPI2_RAID_VOL_STATE_FAILED: + _scsih_sas_volume_delete(ioc, handle); + break; + + case MPI2_RAID_VOL_STATE_ONLINE: + case MPI2_RAID_VOL_STATE_DEGRADED: + case MPI2_RAID_VOL_STATE_OPTIMAL: + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (raid_device) + break; + + mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + pr_err(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + break; + } + + raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); + if (!raid_device) { + pr_err(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + break; + } + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + _scsih_raid_device_add(ioc, raid_device); + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + break; + + case MPI2_RAID_VOL_STATE_INITIALIZING: + default: + break; + } +} + +/** + * _scsih_sas_ir_physical_disk_event - PD event + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + u16 handle, parent_handle; + u32 state; + struct _sas_device *sas_device; + unsigned long flags; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; + Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data; + u64 sas_address; + + if (ioc->shost_recovery) + return; + + if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) + return; + + handle = le16_to_cpu(event_data->PhysDiskDevHandle); + state = le32_to_cpu(event_data->NewValue); + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + ioc->name, __func__, handle, + le32_to_cpu(event_data->PreviousValue), state)); + switch (state) { + case MPI2_RAID_PD_STATE_ONLINE: + case MPI2_RAID_PD_STATE_DEGRADED: + case MPI2_RAID_PD_STATE_REBUILDING: + case MPI2_RAID_PD_STATE_OPTIMAL: + case MPI2_RAID_PD_STATE_HOT_SPARE: + + set_bit(handle, ioc->pd_handles); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _scsih_sas_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (sas_device) + return; + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) + mpt3sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); + + _scsih_add_device(ioc, handle, 0, 1); + + break; + + case MPI2_RAID_PD_STATE_OFFLINE: + case MPI2_RAID_PD_STATE_NOT_CONFIGURED: + case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: + default: + break; + } +} + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING +/** + * _scsih_sas_ir_operation_status_event_debug - debug for IR op event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrOperationStatus_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->RAIDOperation) { + case MPI2_EVENT_IR_RAIDOP_RESYNC: + reason_str = "resync"; + break; + case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: + reason_str = "online capacity expansion"; + break; + case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: + reason_str = "consistency check"; + break; + case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: + reason_str = "background init"; + break; + case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: + reason_str = "make data consistent"; + break; + } + + if (!reason_str) + return; + + pr_info(MPT3SAS_FMT "raid operational status: (%s)" \ + "\thandle(0x%04x), percent complete(%d)\n", + ioc->name, reason_str, + le16_to_cpu(event_data->VolDevHandle), + event_data->PercentComplete); +} +#endif + +/** + * _scsih_sas_ir_operation_status_event - handle RAID operation events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data; + static struct _raid_device *raid_device; + unsigned long flags; + u16 handle; + +#ifdef CONFIG_SCSI_MPT3SAS_LOGGING + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_ir_operation_status_event_debug(ioc, + event_data); +#endif + + /* code added for raid transport support */ + if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + handle = le16_to_cpu(event_data->VolDevHandle); + raid_device = _scsih_raid_device_find_by_handle(ioc, handle); + if (raid_device) + raid_device->percent_complete = + event_data->PercentComplete; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +/** + * _scsih_prep_device_scan - initialize parameters prior to device scan + * @ioc: per adapter object + * + * Set the deleted flag prior to device scan. If the device is found during + * the scan, then we clear the deleted flag. + */ +static void +_scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) + sas_device_priv_data->sas_target->deleted = 1; + } +} + +/** + * _scsih_mark_responding_sas_device - mark a sas_devices as responding + * @ioc: per adapter object + * @sas_address: sas address + * @slot: enclosure slot id + * @handle: device handle + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponsive_sas_devices. + * + * Return nothing. + */ +static void +_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u16 slot, u16 handle) +{ + struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + struct scsi_target *starget; + struct _sas_device *sas_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (sas_device->sas_address == sas_address && + sas_device->slot == slot) { + sas_device->responding = 1; + starget = sas_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->tm_busy = 0; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + if (starget) + starget_printk(KERN_INFO, starget, + "handle(0x%04x), sas_addr(0x%016llx), " + "enclosure logical id(0x%016llx), " + "slot(%d)\n", handle, + (unsigned long long)sas_device->sas_address, + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->handle == handle) + goto out; + pr_info("\thandle changed from(0x%04x)!!!\n", + sas_device->handle); + sas_device->handle = handle; + if (sas_target_priv_data) + sas_target_priv_data->handle = handle; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +/** + * _scsih_search_responding_sas_devices - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + * + * Return nothing. + */ +static void +_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 handle; + u32 device_info; + + pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name); + + if (list_empty(&ioc->sas_device_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + handle = le16_to_cpu(sas_device_pg0.DevHandle); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(_scsih_is_end_device(device_info))) + continue; + _scsih_mark_responding_sas_device(ioc, + le64_to_cpu(sas_device_pg0.SASAddress), + le16_to_cpu(sas_device_pg0.Slot), handle); + } + + out: + pr_info(MPT3SAS_FMT "search for end-devices: complete\n", + ioc->name); +} + +/** + * _scsih_mark_responding_raid_device - mark a raid_device as responding + * @ioc: per adapter object + * @wwid: world wide identifier for raid volume + * @handle: device handle + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponsive_raid_devices. + * + * Return nothing. + */ +static void +_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, + u16 handle) +{ + struct MPT3SAS_TARGET *sas_target_priv_data; + struct scsi_target *starget; + struct _raid_device *raid_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid == wwid && raid_device->starget) { + starget = raid_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + raid_device->responding = 1; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + starget_printk(KERN_INFO, raid_device->starget, + "handle(0x%04x), wwid(0x%016llx)\n", handle, + (unsigned long long)raid_device->wwid); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + if (raid_device->handle == handle) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + return; + } + pr_info("\thandle changed from(0x%04x)!!!\n", + raid_device->handle); + raid_device->handle = handle; + if (sas_target_priv_data) + sas_target_priv_data->handle = handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return; + } + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * _scsih_search_responding_raid_devices - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + * + * Return nothing. + */ +static void +_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidVolPage1_t volume_pg1; + Mpi2RaidVolPage0_t volume_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 handle; + u8 phys_disk_num; + + if (!ioc->ir_firmware) + return; + + pr_info(MPT3SAS_FMT "search for raid volumes: start\n", + ioc->name); + + if (list_empty(&ioc->raid_device_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + handle = le16_to_cpu(volume_pg1.DevHandle); + + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, + &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) + continue; + + if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) + _scsih_mark_responding_raid_device(ioc, + le64_to_cpu(volume_pg1.WWID), handle); + } + + /* refresh the pd_handles */ + phys_disk_num = 0xFF; + memset(ioc->pd_handles, 0, ioc->pd_handles_sz); + while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + set_bit(handle, ioc->pd_handles); + } + out: + pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n", + ioc->name); +} + +/** + * _scsih_mark_responding_expander - mark a expander as responding + * @ioc: per adapter object + * @sas_address: sas address + * @handle: + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponsive_expanders. + * + * Return nothing. + */ +static void +_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u16 handle) +{ + struct _sas_node *sas_expander; + unsigned long flags; + int i; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address) + continue; + sas_expander->responding = 1; + if (sas_expander->handle == handle) + goto out; + pr_info("\texpander(0x%016llx): handle changed" \ + " from(0x%04x) to (0x%04x)!!!\n", + (unsigned long long)sas_expander->sas_address, + sas_expander->handle, handle); + sas_expander->handle = handle; + for (i = 0 ; i < sas_expander->num_phys ; i++) + sas_expander->phy[i].handle = handle; + goto out; + } + out: + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +/** + * _scsih_search_responding_expanders - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + * + * Return nothing. + */ +static void +_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ExpanderPage0_t expander_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u64 sas_address; + u16 handle; + + pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name); + + if (list_empty(&ioc->sas_expander_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + + handle = le16_to_cpu(expander_pg0.DevHandle); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n", + handle, + (unsigned long long)sas_address); + _scsih_mark_responding_expander(ioc, sas_address, handle); + } + + out: + pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name); +} + +/** + * _scsih_remove_unresponding_sas_devices - removing unresponding devices + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc) +{ + struct _sas_device *sas_device, *sas_device_next; + struct _sas_node *sas_expander, *sas_expander_next; + struct _raid_device *raid_device, *raid_device_next; + struct list_head tmp_list; + unsigned long flags; + + pr_info(MPT3SAS_FMT "removing unresponding devices: start\n", + ioc->name); + + /* removing unresponding end devices */ + pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n", + ioc->name); + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_list, list) { + if (!sas_device->responding) + mpt3sas_device_remove_by_sas_address(ioc, + sas_device->sas_address); + else + sas_device->responding = 0; + } + + /* removing unresponding volumes */ + if (ioc->ir_firmware) { + pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n", + ioc->name); + list_for_each_entry_safe(raid_device, raid_device_next, + &ioc->raid_device_list, list) { + if (!raid_device->responding) + _scsih_sas_volume_delete(ioc, + raid_device->handle); + else + raid_device->responding = 0; + } + } + + /* removing unresponding expanders */ + pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n", + ioc->name); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + INIT_LIST_HEAD(&tmp_list); + list_for_each_entry_safe(sas_expander, sas_expander_next, + &ioc->sas_expander_list, list) { + if (!sas_expander->responding) + list_move_tail(&sas_expander->list, &tmp_list); + else + sas_expander->responding = 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list, + list) { + list_del(&sas_expander->list); + _scsih_expander_node_remove(ioc, sas_expander); + } + + pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n", + ioc->name); + + /* unblock devices */ + _scsih_ublock_io_all_device(ioc); +} + +static void +_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander, u16 handle) +{ + Mpi2ExpanderPage1_t expander_pg1; + Mpi2ConfigReply_t mpi_reply; + int i; + + for (i = 0 ; i < sas_expander->num_phys ; i++) { + if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, + &expander_pg1, i, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + mpt3sas_transport_update_links(ioc, sas_expander->sas_address, + le16_to_cpu(expander_pg1.AttachedDevHandle), i, + expander_pg1.NegotiatedLinkRate >> 4); + } +} + +/** + * _scsih_scan_for_devices_after_reset - scan for devices after host reset + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ExpanderPage0_t expander_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2RaidVolPage1_t volume_pg1; + Mpi2RaidVolPage0_t volume_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2EventIrConfigElement_t element; + Mpi2ConfigReply_t mpi_reply; + u8 phys_disk_num; + u16 ioc_status; + u16 handle, parent_handle; + u64 sas_address; + struct _sas_device *sas_device; + struct _sas_node *expander_device; + static struct _raid_device *raid_device; + u8 retry_count; + unsigned long flags; + + pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name); + + _scsih_sas_host_refresh(ioc); + + pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name); + + /* expanders */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \ + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(expander_pg0.DevHandle); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + expander_device = mpt3sas_scsih_expander_find_by_sas_address( + ioc, le64_to_cpu(expander_pg0.SASAddress)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (expander_device) + _scsih_refresh_expander_links(ioc, expander_device, + handle); + else { + pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \ + "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, + handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); + _scsih_expander_add(ioc, handle); + pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \ + "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, + handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); + } + } + + pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n", + ioc->name); + + if (!ioc->ir_firmware) + goto skip_to_sas; + + pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name); + + /* phys disk */ + phys_disk_num = 0xFF; + while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\ + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _scsih_sas_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) + continue; + if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + handle) != 0) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \ + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, + &sas_address)) { + pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \ + " handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + mpt3sas_transport_update_links(ioc, sas_address, + handle, sas_device_pg0.PhyNum, + MPI2_SAS_NEG_LINK_RATE_1_5); + set_bit(handle, ioc->pd_handles); + retry_count = 0; + /* This will retry adding the end device. + * _scsih_add_device() will decide on retries and + * return "1" when it should be retried + */ + while (_scsih_add_device(ioc, handle, retry_count++, + 1)) { + ssleep(1); + } + pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \ + " handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + + pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n", + ioc->name); + + pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name); + + /* volumes */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \ + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(volume_pg1.DevHandle); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_wwid(ioc, + le64_to_cpu(volume_pg1.WWID)); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (raid_device) + continue; + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, + &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \ + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { + memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); + element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; + element.VolDevHandle = volume_pg1.DevHandle; + pr_info(MPT3SAS_FMT + "\tBEFORE adding volume: handle (0x%04x)\n", + ioc->name, volume_pg1.DevHandle); + _scsih_sas_volume_add(ioc, &element); + pr_info(MPT3SAS_FMT + "\tAFTER adding volume: handle (0x%04x)\n", + ioc->name, volume_pg1.DevHandle); + } + } + + pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n", + ioc->name); + + skip_to_sas: + + pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n", + ioc->name); + + /* sas devices */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\ + " ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(sas_device_pg0.DevHandle); + if (!(_scsih_is_end_device( + le32_to_cpu(sas_device_pg0.DeviceInfo)))) + continue; + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + le64_to_cpu(sas_device_pg0.SASAddress)); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) + continue; + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { + pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \ + "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, + handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + mpt3sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); + retry_count = 0; + /* This will retry adding the end device. + * _scsih_add_device() will decide on retries and + * return "1" when it should be retried + */ + while (_scsih_add_device(ioc, handle, retry_count++, + 0)) { + ssleep(1); + } + pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \ + "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, + handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n", + ioc->name); + + pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name); +} +/** + * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) + * @ioc: per adapter object + * @reset_phase: phase + * + * The handler for doing any required cleanup or initialization. + * + * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, + * MPT3_IOC_DONE_RESET + * + * Return nothing. + */ +void +mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) +{ + switch (reset_phase) { + case MPT3_IOC_PRE_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); + break; + case MPT3_IOC_AFTER_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); + if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { + ioc->scsih_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); + complete(&ioc->scsih_cmds.done); + } + if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { + ioc->tm_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid); + complete(&ioc->tm_cmds.done); + } + + _scsih_fw_event_cleanup_queue(ioc); + _scsih_flush_running_cmds(ioc); + break; + case MPT3_IOC_DONE_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); + if ((!ioc->is_driver_loading) && !(disable_discovery > 0 && + !ioc->sas_hba.num_phys)) { + _scsih_prep_device_scan(ioc); + _scsih_search_responding_sas_devices(ioc); + _scsih_search_responding_raid_devices(ioc); + _scsih_search_responding_expanders(ioc); + _scsih_error_recovery_delete_devices(ioc); + } + break; + } +} + +/** + * _mpt3sas_fw_work - delayed task for processing firmware events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) +{ + /* the queue is being flushed so ignore this event */ + if (ioc->remove_host || fw_event->cancel_pending_work || + ioc->pci_error_recovery) { + _scsih_fw_event_free(ioc, fw_event); + return; + } + + switch (fw_event->event) { + case MPT3SAS_PROCESS_TRIGGER_DIAG: + mpt3sas_process_trigger_data(ioc, fw_event->event_data); + break; + case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: + while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery) + ssleep(1); + _scsih_remove_unresponding_sas_devices(ioc); + _scsih_scan_for_devices_after_reset(ioc); + break; + case MPT3SAS_PORT_ENABLE_COMPLETE: + ioc->start_scan = 0; + if (missing_delay[0] != -1 && missing_delay[1] != -1) + mpt3sas_base_update_missing_delay(ioc, missing_delay[0], + missing_delay[1]); + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "port enable: complete from worker thread\n", + ioc->name)); + break; + case MPT3SAS_TURN_ON_FAULT_LED: + _scsih_turn_on_fault_led(ioc, fw_event->device_handle); + break; + case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + _scsih_sas_topology_change_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: + _scsih_sas_device_status_change_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_DISCOVERY: + _scsih_sas_discovery_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: + _scsih_sas_broadcast_primitive_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + _scsih_sas_enclosure_dev_status_change_event(ioc, + fw_event); + break; + case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: + _scsih_sas_ir_config_change_event(ioc, fw_event); + break; + case MPI2_EVENT_IR_VOLUME: + _scsih_sas_ir_volume_event(ioc, fw_event); + break; + case MPI2_EVENT_IR_PHYSICAL_DISK: + _scsih_sas_ir_physical_disk_event(ioc, fw_event); + break; + case MPI2_EVENT_IR_OPERATION_STATUS: + _scsih_sas_ir_operation_status_event(ioc, fw_event); + break; + } + _scsih_fw_event_free(ioc, fw_event); +} + +/** + * _firmware_event_work + * @ioc: per adapter object + * @work: The fw_event_work object + * Context: user. + * + * wrappers for the work thread handling firmware events + * + * Return nothing. + */ + +static void +_firmware_event_work(struct work_struct *work) +{ + struct fw_event_work *fw_event = container_of(work, + struct fw_event_work, work); + + _mpt3sas_fw_work(fw_event->ioc, fw_event); +} + +/** + * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time) + * @ioc: per adapter object + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt. + * + * This function merely adds a new work task into ioc->firmware_event_thread. + * The tasks are worked from _firmware_event_work in user context. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, + u32 reply) +{ + struct fw_event_work *fw_event; + Mpi2EventNotificationReply_t *mpi_reply; + u16 event; + u16 sz; + + /* events turned off due to host reset or driver unloading */ + if (ioc->remove_host || ioc->pci_error_recovery) + return 1; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + + if (unlikely(!mpi_reply)) { + pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 1; + } + + event = le16_to_cpu(mpi_reply->Event); + + if (event != MPI2_EVENT_LOG_ENTRY_ADDED) + mpt3sas_trigger_event(ioc, event, 0); + + switch (event) { + /* handle these */ + case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: + { + Mpi2EventDataSasBroadcastPrimitive_t *baen_data = + (Mpi2EventDataSasBroadcastPrimitive_t *) + mpi_reply->EventData; + + if (baen_data->Primitive != + MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) + return 1; + + if (ioc->broadcast_aen_busy) { + ioc->broadcast_aen_pending++; + return 1; + } else + ioc->broadcast_aen_busy = 1; + break; + } + + case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + _scsih_check_topo_delete_events(ioc, + (Mpi2EventDataSasTopologyChangeList_t *) + mpi_reply->EventData); + break; + case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: + _scsih_check_ir_config_unhide_events(ioc, + (Mpi2EventDataIrConfigChangeList_t *) + mpi_reply->EventData); + break; + case MPI2_EVENT_IR_VOLUME: + _scsih_check_volume_delete_events(ioc, + (Mpi2EventDataIrVolume_t *) + mpi_reply->EventData); + break; + + case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: + case MPI2_EVENT_IR_OPERATION_STATUS: + case MPI2_EVENT_SAS_DISCOVERY: + case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + case MPI2_EVENT_IR_PHYSICAL_DISK: + break; + + default: /* ignore the rest */ + return 1; + } + + fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC); + if (!fw_event) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 1; + } + sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; + fw_event->event_data = kzalloc(sz, GFP_ATOMIC); + if (!fw_event->event_data) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + kfree(fw_event); + return 1; + } + + memcpy(fw_event->event_data, mpi_reply->EventData, sz); + fw_event->ioc = ioc; + fw_event->VF_ID = mpi_reply->VF_ID; + fw_event->VP_ID = mpi_reply->VP_ID; + fw_event->event = event; + _scsih_fw_event_add(ioc, fw_event); + return 1; +} + +/* shost template */ +static struct scsi_host_template scsih_driver_template = { + .module = THIS_MODULE, + .name = "Fusion MPT SAS Host", + .proc_name = MPT3SAS_DRIVER_NAME, + .queuecommand = _scsih_qcmd, + .target_alloc = _scsih_target_alloc, + .slave_alloc = _scsih_slave_alloc, + .slave_configure = _scsih_slave_configure, + .target_destroy = _scsih_target_destroy, + .slave_destroy = _scsih_slave_destroy, + .scan_finished = _scsih_scan_finished, + .scan_start = _scsih_scan_start, + .change_queue_depth = _scsih_change_queue_depth, + .change_queue_type = _scsih_change_queue_type, + .eh_abort_handler = _scsih_abort, + .eh_device_reset_handler = _scsih_dev_reset, + .eh_target_reset_handler = _scsih_target_reset, + .eh_host_reset_handler = _scsih_host_reset, + .bios_param = _scsih_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = MPT3SAS_SG_DEPTH, + .max_sectors = 32767, + .cmd_per_lun = 7, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = mpt3sas_host_attrs, + .sdev_attrs = mpt3sas_dev_attrs, +}; + +/** + * _scsih_expander_node_remove - removing expander device from list. + * @ioc: per adapter object + * @sas_expander: the sas_device object + * Context: Calling function should acquire ioc->sas_node_lock. + * + * Removing object and freeing associated memory from the + * ioc->sas_expander_list. + * + * Return nothing. + */ +static void +_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander) +{ + struct _sas_port *mpt3sas_port, *next; + + /* remove sibling ports attached to this expander */ + list_for_each_entry_safe(mpt3sas_port, next, + &sas_expander->sas_port_list, port_list) { + if (ioc->shost_recovery) + return; + if (mpt3sas_port->remote_identify.device_type == + SAS_END_DEVICE) + mpt3sas_device_remove_by_sas_address(ioc, + mpt3sas_port->remote_identify.sas_address); + else if (mpt3sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mpt3sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + mpt3sas_expander_remove(ioc, + mpt3sas_port->remote_identify.sas_address); + } + + mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, + sas_expander->sas_address_parent); + + pr_info(MPT3SAS_FMT + "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, + sas_expander->handle, (unsigned long long) + sas_expander->sas_address); + + kfree(sas_expander->phy); + kfree(sas_expander); +} + +/** + * _scsih_ir_shutdown - IR shutdown notification + * @ioc: per adapter object + * + * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that + * the host system is shutting down. + * + * Return nothing. + */ +static void +_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidActionRequest_t *mpi_request; + Mpi2RaidActionReply_t *mpi_reply; + u16 smid; + + /* is IR firmware build loaded ? */ + if (!ioc->ir_firmware) + return; + + /* are there any volumes ? */ + if (list_empty(&ioc->raid_device_list)) + return; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n", + ioc->name, __func__); + goto out; + } + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); + + mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; + mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; + + pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name); + init_completion(&ioc->scsih_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); + + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + goto out; + } + + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + pr_info(MPT3SAS_FMT + "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } + + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + +/** + * _scsih_remove - detach and remove add host + * @pdev: PCI device struct + * + * Routine called when unloading the driver. + * Return nothing. + */ +static void _scsih_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct _sas_port *mpt3sas_port, *next_port; + struct _raid_device *raid_device, *next; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct workqueue_struct *wq; + unsigned long flags; + + ioc->remove_host = 1; + _scsih_fw_event_cleanup_queue(ioc); + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + + /* release all the volumes */ + _scsih_ir_shutdown(ioc); + list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, + list) { + if (raid_device->starget) { + sas_target_priv_data = + raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + scsi_remove_target(&raid_device->starget->dev); + } + pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, raid_device->handle, + (unsigned long long) raid_device->wwid); + _scsih_raid_device_remove(ioc, raid_device); + } + + /* free ports attached to the sas_host */ + list_for_each_entry_safe(mpt3sas_port, next_port, + &ioc->sas_hba.sas_port_list, port_list) { + if (mpt3sas_port->remote_identify.device_type == + SAS_END_DEVICE) + mpt3sas_device_remove_by_sas_address(ioc, + mpt3sas_port->remote_identify.sas_address); + else if (mpt3sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mpt3sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + mpt3sas_expander_remove(ioc, + mpt3sas_port->remote_identify.sas_address); + } + + /* free phys attached to the sas_host */ + if (ioc->sas_hba.num_phys) { + kfree(ioc->sas_hba.phy); + ioc->sas_hba.phy = NULL; + ioc->sas_hba.num_phys = 0; + } + + sas_remove_host(shost); + mpt3sas_base_detach(ioc); + list_del(&ioc->list); + scsi_remove_host(shost); + scsi_host_put(shost); +} + +/** + * _scsih_shutdown - routine call during system shutdown + * @pdev: PCI device struct + * + * Return nothing. + */ +static void +_scsih_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct workqueue_struct *wq; + unsigned long flags; + + ioc->remove_host = 1; + _scsih_fw_event_cleanup_queue(ioc); + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + + _scsih_ir_shutdown(ioc); + mpt3sas_base_detach(ioc); +} + + +/** + * _scsih_probe_boot_devices - reports 1st device + * @ioc: per adapter object + * + * If specified in bios page 2, this routine reports the 1st + * device scsi-ml or sas transport for persistent boot device + * purposes. Please refer to function _scsih_determine_boot_device() + */ +static void +_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) +{ + u8 is_raid; + void *device; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + u16 handle; + u64 sas_address_parent; + u64 sas_address; + unsigned long flags; + int rc; + + /* no Bios, return immediately */ + if (!ioc->bios_pg3.BiosVersion) + return; + + device = NULL; + is_raid = 0; + if (ioc->req_boot_device.device) { + device = ioc->req_boot_device.device; + is_raid = ioc->req_boot_device.is_raid; + } else if (ioc->req_alt_boot_device.device) { + device = ioc->req_alt_boot_device.device; + is_raid = ioc->req_alt_boot_device.is_raid; + } else if (ioc->current_boot_device.device) { + device = ioc->current_boot_device.device; + is_raid = ioc->current_boot_device.is_raid; + } + + if (!device) + return; + + if (is_raid) { + raid_device = device; + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + } else { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = device; + handle = sas_device->handle; + sas_address_parent = sas_device->sas_address_parent; + sas_address = sas_device->sas_address; + list_move_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (!mpt3sas_transport_port_add(ioc, handle, + sas_address_parent)) { + _scsih_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + if (!ioc->is_driver_loading) + mpt3sas_transport_port_remove(ioc, sas_address, + sas_address_parent); + _scsih_sas_device_remove(ioc, sas_device); + } + } +} + +/** + * _scsih_probe_raid - reporting raid volumes to scsi-ml + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc) +{ + struct _raid_device *raid_device, *raid_next; + int rc; + + list_for_each_entry_safe(raid_device, raid_next, + &ioc->raid_device_list, list) { + if (raid_device->starget) + continue; + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + } +} + +/** + * _scsih_probe_sas - reporting sas devices to sas transport + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) +{ + struct _sas_device *sas_device, *next; + unsigned long flags; + + /* SAS Device List */ + list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list, + list) { + + if (!mpt3sas_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent)) { + list_del(&sas_device->list); + kfree(sas_device); + continue; + } else if (!sas_device->starget) { + /* + * When asyn scanning is enabled, its not possible to + * remove devices while scanning is turned on due to an + * oops in scsi_sysfs_add_sdev()->add_device()-> + * sysfs_addrm_start() + */ + if (!ioc->is_driver_loading) + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent); + list_del(&sas_device->list); + kfree(sas_device); + continue; + } + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_move_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } +} + +/** + * _scsih_probe_devices - probing for devices + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) +{ + u16 volume_mapping_flags; + + if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) + return; /* return when IOC doesn't support initiator mode */ + + _scsih_probe_boot_devices(ioc); + + if (ioc->ir_firmware) { + volume_mapping_flags = + le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & + MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; + if (volume_mapping_flags == + MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { + _scsih_probe_raid(ioc); + _scsih_probe_sas(ioc); + } else { + _scsih_probe_sas(ioc); + _scsih_probe_raid(ioc); + } + } else + _scsih_probe_sas(ioc); +} + +/** + * _scsih_scan_start - scsi lld callback for .scan_start + * @shost: SCSI host pointer + * + * The shost has the ability to discover targets on its own instead + * of scanning the entire bus. In our implemention, we will kick off + * firmware discovery. + */ +static void +_scsih_scan_start(struct Scsi_Host *shost) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int rc; + if (diag_buffer_enable != -1 && diag_buffer_enable != 0) + mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); + + if (disable_discovery > 0) + return; + + ioc->start_scan = 1; + rc = mpt3sas_port_enable(ioc); + + if (rc != 0) + pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name); +} + +/** + * _scsih_scan_finished - scsi lld callback for .scan_finished + * @shost: SCSI host pointer + * @time: elapsed time of the scan in jiffies + * + * This function will be called periodicallyn until it returns 1 with the + * scsi_host and the elapsed time of the scan in jiffies. In our implemention, + * we wait for firmware discovery to complete, then return 1. + */ +static int +_scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + if (disable_discovery > 0) { + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + return 1; + } + + if (time >= (300 * HZ)) { + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + pr_info(MPT3SAS_FMT + "port enable: FAILED with timeout (timeout=300s)\n", + ioc->name); + ioc->is_driver_loading = 0; + return 1; + } + + if (ioc->start_scan) + return 0; + + if (ioc->start_scan_failed) { + pr_info(MPT3SAS_FMT + "port enable: FAILED with (ioc_status=0x%08x)\n", + ioc->name, ioc->start_scan_failed); + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + ioc->remove_host = 1; + return 1; + } + + pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + + if (ioc->wait_for_discovery_to_complete) { + ioc->wait_for_discovery_to_complete = 0; + _scsih_probe_devices(ioc); + } + mpt3sas_base_start_watchdog(ioc); + ioc->is_driver_loading = 0; + return 1; +} + +/** + * _scsih_probe - attach and add scsi host + * @pdev: PCI device struct + * @id: pci device id + * + * Returns 0 success, anything else error. + */ +static int +_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct MPT3SAS_ADAPTER *ioc; + struct Scsi_Host *shost; + + shost = scsi_host_alloc(&scsih_driver_template, + sizeof(struct MPT3SAS_ADAPTER)); + if (!shost) + return -ENODEV; + + /* init local params */ + ioc = shost_priv(shost); + memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); + INIT_LIST_HEAD(&ioc->list); + list_add_tail(&ioc->list, &mpt3sas_ioc_list); + ioc->shost = shost; + ioc->id = mpt_ids++; + sprintf(ioc->name, "%s%d", MPT3SAS_DRIVER_NAME, ioc->id); + ioc->pdev = pdev; + ioc->scsi_io_cb_idx = scsi_io_cb_idx; + ioc->tm_cb_idx = tm_cb_idx; + ioc->ctl_cb_idx = ctl_cb_idx; + ioc->base_cb_idx = base_cb_idx; + ioc->port_enable_cb_idx = port_enable_cb_idx; + ioc->transport_cb_idx = transport_cb_idx; + ioc->scsih_cb_idx = scsih_cb_idx; + ioc->config_cb_idx = config_cb_idx; + ioc->tm_tr_cb_idx = tm_tr_cb_idx; + ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; + ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; + ioc->logging_level = logging_level; + ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; + /* misc semaphores and spin locks */ + mutex_init(&ioc->reset_in_progress_mutex); + spin_lock_init(&ioc->ioc_reset_in_progress_lock); + spin_lock_init(&ioc->scsi_lookup_lock); + spin_lock_init(&ioc->sas_device_lock); + spin_lock_init(&ioc->sas_node_lock); + spin_lock_init(&ioc->fw_event_lock); + spin_lock_init(&ioc->raid_device_lock); + spin_lock_init(&ioc->diag_trigger_lock); + + INIT_LIST_HEAD(&ioc->sas_device_list); + INIT_LIST_HEAD(&ioc->sas_device_init_list); + INIT_LIST_HEAD(&ioc->sas_expander_list); + INIT_LIST_HEAD(&ioc->fw_event_list); + INIT_LIST_HEAD(&ioc->raid_device_list); + INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); + INIT_LIST_HEAD(&ioc->delayed_tr_list); + INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); + + /* init shost parameters */ + shost->max_cmd_len = 32; + shost->max_lun = max_lun; + shost->transportt = mpt3sas_transport_template; + shost->unique_id = ioc->id; + + if (max_sectors != 0xFFFF) { + if (max_sectors < 64) { + shost->max_sectors = 64; + pr_warn(MPT3SAS_FMT "Invalid value %d passed " \ + "for max_sectors, range is 64 to 32767. Assigning " + "value of 64.\n", ioc->name, max_sectors); + } else if (max_sectors > 32767) { + shost->max_sectors = 32767; + pr_warn(MPT3SAS_FMT "Invalid value %d passed " \ + "for max_sectors, range is 64 to 32767. Assigning " + "default value of 32767.\n", ioc->name, + max_sectors); + } else { + shost->max_sectors = max_sectors & 0xFFFE; + pr_info(MPT3SAS_FMT + "The max_sectors value is set to %d\n", + ioc->name, shost->max_sectors); + } + } + + if ((scsi_add_host(shost, &pdev->dev))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + list_del(&ioc->list); + goto out_add_shost_fail; + } + + /* register EEDP capabilities with SCSI layer */ + if (prot_mask > 0) + scsi_host_set_prot(shost, prot_mask); + else + scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIF_TYPE2_PROTECTION + | SHOST_DIF_TYPE3_PROTECTION); + + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + + /* event thread */ + snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), + "fw_event%d", ioc->id); + ioc->firmware_event_thread = create_singlethread_workqueue( + ioc->firmware_event_name); + if (!ioc->firmware_event_thread) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_thread_fail; + } + + ioc->is_driver_loading = 1; + if ((mpt3sas_base_attach(ioc))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_attach_fail; + } + scsi_scan_host(shost); + return 0; + + out_attach_fail: + destroy_workqueue(ioc->firmware_event_thread); + out_thread_fail: + list_del(&ioc->list); + scsi_remove_host(shost); + out_add_shost_fail: + scsi_host_put(shost); + return -ENODEV; +} + +#ifdef CONFIG_PM +/** + * _scsih_suspend - power management suspend main entry point + * @pdev: PCI device struct + * @state: PM state change to (usually PCI_D3) + * + * Returns 0 success, anything else error. + */ +static int +_scsih_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + pci_power_t device_state; + + mpt3sas_base_stop_watchdog(ioc); + flush_scheduled_work(); + scsi_block_requests(shost); + device_state = pci_choose_state(pdev, state); + pr_info(MPT3SAS_FMT + "pdev=0x%p, slot=%s, entering operating state [D%d]\n", + ioc->name, pdev, pci_name(pdev), device_state); + + pci_save_state(pdev); + mpt3sas_base_free_resources(ioc); + pci_set_power_state(pdev, device_state); + return 0; +} + +/** + * _scsih_resume - power management resume main entry point + * @pdev: PCI device struct + * + * Returns 0 success, anything else error. + */ +static int +_scsih_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + pci_power_t device_state = pdev->current_state; + int r; + + pr_info(MPT3SAS_FMT + "pdev=0x%p, slot=%s, previous operating state [D%d]\n", + ioc->name, pdev, pci_name(pdev), device_state); + + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, PCI_D0, 0); + pci_restore_state(pdev); + ioc->pdev = pdev; + r = mpt3sas_base_map_resources(ioc); + if (r) + return r; + + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET); + scsi_unblock_requests(shost); + mpt3sas_base_start_watchdog(ioc); + return 0; +} +#endif /* CONFIG_PM */ + +/** + * _scsih_pci_error_detected - Called when a PCI error is detected. + * @pdev: PCI device struct + * @state: PCI channel state + * + * Description: Called when a PCI error is detected. + * + * Return value: + * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT + */ +static pci_ers_result_t +_scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n", + ioc->name, state); + + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + /* Fatal error, prepare for slot reset */ + ioc->pci_error_recovery = 1; + scsi_block_requests(ioc->shost); + mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_free_resources(ioc); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + /* Permanent error, prepare for device removal */ + ioc->pci_error_recovery = 1; + mpt3sas_base_stop_watchdog(ioc); + _scsih_flush_running_cmds(ioc); + return PCI_ERS_RESULT_DISCONNECT; + } + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * _scsih_pci_slot_reset - Called when PCI slot has been reset. + * @pdev: PCI device struct + * + * Description: This routine is called by the pci error recovery + * code after the PCI slot has been reset, just before we + * should resume normal operations. + */ +static pci_ers_result_t +_scsih_pci_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int rc; + + pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n", + ioc->name); + + ioc->pci_error_recovery = 0; + ioc->pdev = pdev; + pci_restore_state(pdev); + rc = mpt3sas_base_map_resources(ioc); + if (rc) + return PCI_ERS_RESULT_DISCONNECT; + + rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + + pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name, + (rc == 0) ? "success" : "failed"); + + if (!rc) + return PCI_ERS_RESULT_RECOVERED; + else + return PCI_ERS_RESULT_DISCONNECT; +} + +/** + * _scsih_pci_resume() - resume normal ops after PCI reset + * @pdev: pointer to PCI device + * + * Called when the error recovery driver tells us that its + * OK to resume normal operation. Use completion to allow + * halted scsi ops to resume. + */ +static void +_scsih_pci_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name); + + pci_cleanup_aer_uncorrect_error_status(pdev); + mpt3sas_base_start_watchdog(ioc); + scsi_unblock_requests(ioc->shost); +} + +/** + * _scsih_pci_mmio_enabled - Enable MMIO and dump debug registers + * @pdev: pointer to PCI device + */ +static pci_ers_result_t +_scsih_pci_mmio_enabled(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n", + ioc->name); + + /* TODO - dump whatever for debugging purposes */ + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/* raid transport support */ +static struct raid_function_template mpt3sas_raid_functions = { + .cookie = &scsih_driver_template, + .is_raid = _scsih_is_raid, + .get_resync = _scsih_get_resync, + .get_state = _scsih_get_state, +}; + +static struct pci_error_handlers _scsih_err_handler = { + .error_detected = _scsih_pci_error_detected, + .mmio_enabled = _scsih_pci_mmio_enabled, + .slot_reset = _scsih_pci_slot_reset, + .resume = _scsih_pci_resume, +}; + +static struct pci_driver scsih_driver = { + .name = MPT3SAS_DRIVER_NAME, + .id_table = scsih_pci_table, + .probe = _scsih_probe, + .remove = _scsih_remove, + .shutdown = _scsih_shutdown, + .err_handler = &_scsih_err_handler, +#ifdef CONFIG_PM + .suspend = _scsih_suspend, + .resume = _scsih_resume, +#endif +}; + + +/** + * _scsih_init - main entry point for this driver. + * + * Returns 0 success, anything else error. + */ +static int __init +_scsih_init(void) +{ + int error; + + mpt_ids = 0; + + pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME, + MPT3SAS_DRIVER_VERSION); + + mpt3sas_transport_template = + sas_attach_transport(&mpt3sas_transport_functions); + if (!mpt3sas_transport_template) + return -ENODEV; + +/* raid transport support */ + mpt3sas_raid_template = raid_class_attach(&mpt3sas_raid_functions); + if (!mpt3sas_raid_template) { + sas_release_transport(mpt3sas_transport_template); + return -ENODEV; + } + + mpt3sas_base_initialize_callback_handler(); + + /* queuecommand callback hander */ + scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); + + /* task managment callback handler */ + tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); + + /* base internal commands callback handler */ + base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done); + port_enable_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_port_enable_done); + + /* transport internal commands callback handler */ + transport_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_transport_done); + + /* scsih internal commands callback handler */ + scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done); + + /* configuration page API internal commands callback handler */ + config_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_config_done); + + /* ctl module callback handler */ + ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done); + + tm_tr_cb_idx = mpt3sas_base_register_callback_handler( + _scsih_tm_tr_complete); + + tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler( + _scsih_tm_volume_tr_complete); + + tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( + _scsih_sas_control_complete); + + mpt3sas_ctl_init(); + + error = pci_register_driver(&scsih_driver); + if (error) { + /* raid transport support */ + raid_class_release(mpt3sas_raid_template); + sas_release_transport(mpt3sas_transport_template); + } + + return error; +} + +/** + * _scsih_exit - exit point for this driver (when it is a module). + * + * Returns 0 success, anything else error. + */ +static void __exit +_scsih_exit(void) +{ + pr_info("mpt3sas version %s unloading\n", + MPT3SAS_DRIVER_VERSION); + + mpt3sas_ctl_exit(); + + pci_unregister_driver(&scsih_driver); + + + mpt3sas_base_release_callback_handler(scsi_io_cb_idx); + mpt3sas_base_release_callback_handler(tm_cb_idx); + mpt3sas_base_release_callback_handler(base_cb_idx); + mpt3sas_base_release_callback_handler(port_enable_cb_idx); + mpt3sas_base_release_callback_handler(transport_cb_idx); + mpt3sas_base_release_callback_handler(scsih_cb_idx); + mpt3sas_base_release_callback_handler(config_cb_idx); + mpt3sas_base_release_callback_handler(ctl_cb_idx); + + mpt3sas_base_release_callback_handler(tm_tr_cb_idx); + mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx); + mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx); + +/* raid transport support */ + raid_class_release(mpt3sas_raid_template); + sas_release_transport(mpt3sas_transport_template); +} + +module_init(_scsih_init); +module_exit(_scsih_exit); diff --git a/addons/mpt3sas/src/3.10.108/mpt3sas_transport.c b/addons/mpt3sas/src/3.10.108/mpt3sas_transport.c new file mode 100644 index 00000000..87ca2b72 --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpt3sas_transport.c @@ -0,0 +1,2128 @@ +/* + * SAS Transport Layer for MPT (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c + * Copyright (C) 2012 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mpt3sas_base.h" + +/** + * _transport_sas_node_find_by_sas_address - sas node search + * @ioc: per adapter object + * @sas_address: sas address of expander or sas host + * Context: Calling function should acquire ioc->sas_node_lock. + * + * Search for either hba phys or expander device based on handle, then returns + * the sas_node object. + */ +static struct _sas_node * +_transport_sas_node_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address) +{ + if (ioc->sas_hba.sas_address == sas_address) + return &ioc->sas_hba; + else + return mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address); +} + +/** + * _transport_convert_phy_link_rate - + * @link_rate: link rate returned from mpt firmware + * + * Convert link_rate from mpi fusion into sas_transport form. + */ +static enum sas_linkrate +_transport_convert_phy_link_rate(u8 link_rate) +{ + enum sas_linkrate rc; + + switch (link_rate) { + case MPI2_SAS_NEG_LINK_RATE_1_5: + rc = SAS_LINK_RATE_1_5_GBPS; + break; + case MPI2_SAS_NEG_LINK_RATE_3_0: + rc = SAS_LINK_RATE_3_0_GBPS; + break; + case MPI2_SAS_NEG_LINK_RATE_6_0: + rc = SAS_LINK_RATE_6_0_GBPS; + break; + case MPI25_SAS_NEG_LINK_RATE_12_0: + rc = SAS_LINK_RATE_12_0_GBPS; + break; + case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED: + rc = SAS_PHY_DISABLED; + break; + case MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED: + rc = SAS_LINK_RATE_FAILED; + break; + case MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR: + rc = SAS_SATA_PORT_SELECTOR; + break; + case MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS: + rc = SAS_PHY_RESET_IN_PROGRESS; + break; + + default: + case MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE: + case MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE: + rc = SAS_LINK_RATE_UNKNOWN; + break; + } + return rc; +} + +/** + * _transport_set_identify - set identify for phys and end devices + * @ioc: per adapter object + * @handle: device handle + * @identify: sas identify info + * + * Populates sas identify info. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle, + struct sas_identify *identify) +{ + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 device_info; + u32 ioc_status; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT + "handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n", + ioc->name, handle, ioc_status, + __FILE__, __LINE__, __func__); + return -EIO; + } + + memset(identify, 0, sizeof(struct sas_identify)); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + + /* sas_address */ + identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + + /* phy number of the parent device this device is linked to */ + identify->phy_identifier = sas_device_pg0.PhyNum; + + /* device_type */ + switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) { + case MPI2_SAS_DEVICE_INFO_NO_DEVICE: + identify->device_type = SAS_PHY_UNUSED; + break; + case MPI2_SAS_DEVICE_INFO_END_DEVICE: + identify->device_type = SAS_END_DEVICE; + break; + case MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER: + identify->device_type = SAS_EDGE_EXPANDER_DEVICE; + break; + case MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER: + identify->device_type = SAS_FANOUT_EXPANDER_DEVICE; + break; + } + + /* initiator_port_protocols */ + if (device_info & MPI2_SAS_DEVICE_INFO_SSP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & MPI2_SAS_DEVICE_INFO_STP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & MPI2_SAS_DEVICE_INFO_SMP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & MPI2_SAS_DEVICE_INFO_SATA_HOST) + identify->initiator_port_protocols |= SAS_PROTOCOL_SATA; + + /* target_port_protocols */ + if (device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + identify->target_port_protocols |= SAS_PROTOCOL_SATA; + + return 0; +} + +/** + * mpt3sas_transport_done - internal transport layer callback handler. + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when sending internal generated transport cmds. + * The callback index passed is `ioc->transport_cb_idx` + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (ioc->transport_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->transport_cmds.smid != smid) + return 1; + ioc->transport_cmds.status |= MPT3_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->transport_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + ioc->transport_cmds.status |= MPT3_CMD_REPLY_VALID; + } + ioc->transport_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->transport_cmds.done); + return 1; +} + +/* report manufacture request structure */ +struct rep_manu_request { + u8 smp_frame_type; + u8 function; + u8 reserved; + u8 request_length; +}; + +/* report manufacture reply structure */ +struct rep_manu_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x01 */ + u8 function_result; + u8 response_length; + u16 expander_change_count; + u8 reserved0[2]; + u8 sas_format; + u8 reserved2[3]; + u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; + u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; + u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN]; + u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN]; + u16 component_id; + u8 component_revision_id; + u8 reserved3; + u8 vendor_specific[8]; +}; + +/** + * transport_expander_report_manufacture - obtain SMP report_manufacture + * @ioc: per adapter object + * @sas_address: expander sas address + * @edev: the sas_expander_device object + * + * Fills in the sas_expander_device object when SMP port is created. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct sas_expander_device *edev) +{ + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + struct rep_manu_reply *manufacture_reply; + struct rep_manu_request *manufacture_request; + int rc; + u16 smid; + u32 ioc_state; + unsigned long timeleft; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + dma_addr_t data_in_dma; + size_t data_in_sz; + size_t data_out_sz; + u16 wait_state_count; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + + mutex_lock(&ioc->transport_cmds.mutex); + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info(MPT3SAS_FMT "%s: ioc is operational\n", + ioc->name, __func__); + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + data_out_sz = sizeof(struct rep_manu_request); + data_in_sz = sizeof(struct rep_manu_reply); + data_out = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz, + &data_out_dma); + + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + + data_in_dma = data_out_dma + sizeof(struct rep_manu_request); + + manufacture_request = data_out; + manufacture_request->smp_frame_type = 0x40; + manufacture_request->function = 1; + manufacture_request->reserved = 0; + manufacture_request->request_length = 0; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = 0xFF; + mpi_request->SASAddress = cpu_to_le64(sas_address); + mpi_request->RequestDataLength = cpu_to_le16(data_out_sz); + psge = &mpi_request->SGL; + + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "report_manufacture - send to sas_addr(0x%016llx)\n", + ioc->name, (unsigned long long)sas_address)); + init_completion(&ioc->transport_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, + 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "report_manufacture - complete\n", ioc->name)); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + u8 *tmp; + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "report_manufacture - reply data transfer size(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength))); + + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct rep_manu_reply)) + goto out; + + manufacture_reply = data_out + sizeof(struct rep_manu_request); + strncpy(edev->vendor_id, manufacture_reply->vendor_id, + SAS_EXPANDER_VENDOR_ID_LEN); + strncpy(edev->product_id, manufacture_reply->product_id, + SAS_EXPANDER_PRODUCT_ID_LEN); + strncpy(edev->product_rev, manufacture_reply->product_rev, + SAS_EXPANDER_PRODUCT_REV_LEN); + edev->level = manufacture_reply->sas_format & 1; + if (edev->level) { + strncpy(edev->component_vendor_id, + manufacture_reply->component_vendor_id, + SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); + tmp = (u8 *)&manufacture_reply->component_id; + edev->component_id = tmp[0] << 8 | tmp[1]; + edev->component_revision_id = + manufacture_reply->component_revision_id; + } + } else + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "report_manufacture - no reply\n", ioc->name)); + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + if (data_out) + pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz, + data_out, data_out_dma); + + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + + +/** + * _transport_delete_port - helper function to removing a port + * @ioc: per adapter object + * @mpt3sas_port: mpt3sas per port object + * + * Returns nothing. + */ +static void +_transport_delete_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_port *mpt3sas_port) +{ + u64 sas_address = mpt3sas_port->remote_identify.sas_address; + enum sas_device_type device_type = + mpt3sas_port->remote_identify.device_type; + + dev_printk(KERN_INFO, &mpt3sas_port->port->dev, + "remove: sas_addr(0x%016llx)\n", + (unsigned long long) sas_address); + + ioc->logging_level |= MPT_DEBUG_TRANSPORT; + if (device_type == SAS_END_DEVICE) + mpt3sas_device_remove_by_sas_address(ioc, sas_address); + else if (device_type == SAS_EDGE_EXPANDER_DEVICE || + device_type == SAS_FANOUT_EXPANDER_DEVICE) + mpt3sas_expander_remove(ioc, sas_address); + ioc->logging_level &= ~MPT_DEBUG_TRANSPORT; +} + +/** + * _transport_delete_phy - helper function to removing single phy from port + * @ioc: per adapter object + * @mpt3sas_port: mpt3sas per port object + * @mpt3sas_phy: mpt3sas per phy object + * + * Returns nothing. + */ +static void +_transport_delete_phy(struct MPT3SAS_ADAPTER *ioc, + struct _sas_port *mpt3sas_port, struct _sas_phy *mpt3sas_phy) +{ + u64 sas_address = mpt3sas_port->remote_identify.sas_address; + + dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long) sas_address, mpt3sas_phy->phy_id); + + list_del(&mpt3sas_phy->port_siblings); + mpt3sas_port->num_phys--; + sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy); + mpt3sas_phy->phy_belongs_to_port = 0; +} + +/** + * _transport_add_phy - helper function to adding single phy to port + * @ioc: per adapter object + * @mpt3sas_port: mpt3sas per port object + * @mpt3sas_phy: mpt3sas per phy object + * + * Returns nothing. + */ +static void +_transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port, + struct _sas_phy *mpt3sas_phy) +{ + u64 sas_address = mpt3sas_port->remote_identify.sas_address; + + dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev, + "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long) + sas_address, mpt3sas_phy->phy_id); + + list_add_tail(&mpt3sas_phy->port_siblings, &mpt3sas_port->phy_list); + mpt3sas_port->num_phys++; + sas_port_add_phy(mpt3sas_port->port, mpt3sas_phy->phy); + mpt3sas_phy->phy_belongs_to_port = 1; +} + +/** + * _transport_add_phy_to_an_existing_port - adding new phy to existing port + * @ioc: per adapter object + * @sas_node: sas node object (either expander or sas host) + * @mpt3sas_phy: mpt3sas per phy object + * @sas_address: sas address of device/expander were phy needs to be added to + * + * Returns nothing. + */ +static void +_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy, + u64 sas_address) +{ + struct _sas_port *mpt3sas_port; + struct _sas_phy *phy_srch; + + if (mpt3sas_phy->phy_belongs_to_port == 1) + return; + + list_for_each_entry(mpt3sas_port, &sas_node->sas_port_list, + port_list) { + if (mpt3sas_port->remote_identify.sas_address != + sas_address) + continue; + list_for_each_entry(phy_srch, &mpt3sas_port->phy_list, + port_siblings) { + if (phy_srch == mpt3sas_phy) + return; + } + _transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy); + return; + } + +} + +/** + * _transport_del_phy_from_an_existing_port - delete phy from existing port + * @ioc: per adapter object + * @sas_node: sas node object (either expander or sas host) + * @mpt3sas_phy: mpt3sas per phy object + * + * Returns nothing. + */ +static void +_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy) +{ + struct _sas_port *mpt3sas_port, *next; + struct _sas_phy *phy_srch; + + if (mpt3sas_phy->phy_belongs_to_port == 0) + return; + + list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list, + port_list) { + list_for_each_entry(phy_srch, &mpt3sas_port->phy_list, + port_siblings) { + if (phy_srch != mpt3sas_phy) + continue; + + if (mpt3sas_port->num_phys == 1) + _transport_delete_port(ioc, mpt3sas_port); + else + _transport_delete_phy(ioc, mpt3sas_port, + mpt3sas_phy); + return; + } + } +} + +/** + * _transport_sanity_check - sanity check when adding a new port + * @ioc: per adapter object + * @sas_node: sas node object (either expander or sas host) + * @sas_address: sas address of device being added + * + * See the explanation above from _transport_delete_duplicate_port + */ +static void +_transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node, + u64 sas_address) +{ + int i; + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != sas_address) + continue; + if (sas_node->phy[i].phy_belongs_to_port == 1) + _transport_del_phy_from_an_existing_port(ioc, sas_node, + &sas_node->phy[i]); + } +} + +/** + * mpt3sas_transport_port_add - insert port to the list + * @ioc: per adapter object + * @handle: handle of attached device + * @sas_address: sas address of parent expander or sas host + * Context: This function will acquire ioc->sas_node_lock. + * + * Adding new port object to the sas_node->sas_port_list. + * + * Returns mpt3sas_port. + */ +struct _sas_port * +mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u64 sas_address) +{ + struct _sas_phy *mpt3sas_phy, *next; + struct _sas_port *mpt3sas_port; + unsigned long flags; + struct _sas_node *sas_node; + struct sas_rphy *rphy; + int i; + struct sas_port *port; + + mpt3sas_port = kzalloc(sizeof(struct _sas_port), + GFP_KERNEL); + if (!mpt3sas_port) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return NULL; + } + + INIT_LIST_HEAD(&mpt3sas_port->port_list); + INIT_LIST_HEAD(&mpt3sas_port->phy_list); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (!sas_node) { + pr_err(MPT3SAS_FMT + "%s: Could not find parent sas_address(0x%016llx)!\n", + ioc->name, __func__, (unsigned long long)sas_address); + goto out_fail; + } + + if ((_transport_set_identify(ioc, handle, + &mpt3sas_port->remote_identify))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + + if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + + _transport_sanity_check(ioc, sas_node, + mpt3sas_port->remote_identify.sas_address); + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != + mpt3sas_port->remote_identify.sas_address) + continue; + list_add_tail(&sas_node->phy[i].port_siblings, + &mpt3sas_port->phy_list); + mpt3sas_port->num_phys++; + } + + if (!mpt3sas_port->num_phys) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + + port = sas_port_alloc_num(sas_node->parent_dev); + if ((sas_port_add(port))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + + list_for_each_entry(mpt3sas_phy, &mpt3sas_port->phy_list, + port_siblings) { + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &port->dev, + "add: handle(0x%04x), sas_addr(0x%016llx), phy(%d)\n", + handle, (unsigned long long) + mpt3sas_port->remote_identify.sas_address, + mpt3sas_phy->phy_id); + sas_port_add_phy(port, mpt3sas_phy->phy); + mpt3sas_phy->phy_belongs_to_port = 1; + } + + mpt3sas_port->port = port; + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) + rphy = sas_end_device_alloc(port); + else + rphy = sas_expander_alloc(port, + mpt3sas_port->remote_identify.device_type); + + rphy->identify = mpt3sas_port->remote_identify; + if ((sas_rphy_add(rphy))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + } + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &rphy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n", + handle, (unsigned long long) + mpt3sas_port->remote_identify.sas_address); + mpt3sas_port->rphy = rphy; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* fill in report manufacture */ + if (mpt3sas_port->remote_identify.device_type == + MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || + mpt3sas_port->remote_identify.device_type == + MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) + _transport_expander_report_manufacture(ioc, + mpt3sas_port->remote_identify.sas_address, + rphy_to_expander_device(rphy)); + return mpt3sas_port; + + out_fail: + list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list, + port_siblings) + list_del(&mpt3sas_phy->port_siblings); + kfree(mpt3sas_port); + return NULL; +} + +/** + * mpt3sas_transport_port_remove - remove port from the list + * @ioc: per adapter object + * @sas_address: sas address of attached device + * @sas_address_parent: sas address of parent expander or sas host + * Context: This function will acquire ioc->sas_node_lock. + * + * Removing object and freeing associated memory from the + * ioc->sas_port_list. + * + * Return nothing. + */ +void +mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u64 sas_address_parent) +{ + int i; + unsigned long flags; + struct _sas_port *mpt3sas_port, *next; + struct _sas_node *sas_node; + u8 found = 0; + struct _sas_phy *mpt3sas_phy, *next_phy; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = _transport_sas_node_find_by_sas_address(ioc, + sas_address_parent); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list, + port_list) { + if (mpt3sas_port->remote_identify.sas_address != sas_address) + continue; + found = 1; + list_del(&mpt3sas_port->port_list); + goto out; + } + out: + if (!found) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address == sas_address) + memset(&sas_node->phy[i].remote_identify, 0 , + sizeof(struct sas_identify)); + } + + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + list_for_each_entry_safe(mpt3sas_phy, next_phy, + &mpt3sas_port->phy_list, port_siblings) { + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &mpt3sas_port->port->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long) + mpt3sas_port->remote_identify.sas_address, + mpt3sas_phy->phy_id); + mpt3sas_phy->phy_belongs_to_port = 0; + sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy); + list_del(&mpt3sas_phy->port_siblings); + } + sas_port_delete(mpt3sas_port->port); + kfree(mpt3sas_port); +} + +/** + * mpt3sas_transport_add_host_phy - report sas_host phy to transport + * @ioc: per adapter object + * @mpt3sas_phy: mpt3sas per phy object + * @phy_pg0: sas phy page 0 + * @parent_dev: parent device class object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy + *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = mpt3sas_phy->phy_id; + + + INIT_LIST_HEAD(&mpt3sas_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + if ((_transport_set_identify(ioc, mpt3sas_phy->handle, + &mpt3sas_phy->identify))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = mpt3sas_phy->identify; + mpt3sas_phy->attached_handle = le16_to_cpu(phy_pg0.AttachedDevHandle); + if (mpt3sas_phy->attached_handle) + _transport_set_identify(ioc, mpt3sas_phy->attached_handle, + &mpt3sas_phy->remote_identify); + phy->identify.phy_identifier = mpt3sas_phy->phy_id; + phy->negotiated_linkrate = _transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = _transport_convert_phy_link_rate( + phy_pg0.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = _transport_convert_phy_link_rate( + phy_pg0.HwLinkRate >> 4); + phy->minimum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + + if ((sas_phy_add(phy))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + mpt3sas_phy->handle, (unsigned long long) + mpt3sas_phy->identify.sas_address, + mpt3sas_phy->attached_handle, + (unsigned long long) + mpt3sas_phy->remote_identify.sas_address); + mpt3sas_phy->phy = phy; + return 0; +} + + +/** + * mpt3sas_transport_add_expander_phy - report expander phy to transport + * @ioc: per adapter object + * @mpt3sas_phy: mpt3sas per phy object + * @expander_pg1: expander page 1 + * @parent_dev: parent device class object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy + *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1, + struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = mpt3sas_phy->phy_id; + + INIT_LIST_HEAD(&mpt3sas_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + if ((_transport_set_identify(ioc, mpt3sas_phy->handle, + &mpt3sas_phy->identify))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = mpt3sas_phy->identify; + mpt3sas_phy->attached_handle = + le16_to_cpu(expander_pg1.AttachedDevHandle); + if (mpt3sas_phy->attached_handle) + _transport_set_identify(ioc, mpt3sas_phy->attached_handle, + &mpt3sas_phy->remote_identify); + phy->identify.phy_identifier = mpt3sas_phy->phy_id; + phy->negotiated_linkrate = _transport_convert_phy_link_rate( + expander_pg1.NegotiatedLinkRate & + MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = _transport_convert_phy_link_rate( + expander_pg1.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = _transport_convert_phy_link_rate( + expander_pg1.HwLinkRate >> 4); + phy->minimum_linkrate = _transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = _transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate >> 4); + + if ((sas_phy_add(phy))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + mpt3sas_phy->handle, (unsigned long long) + mpt3sas_phy->identify.sas_address, + mpt3sas_phy->attached_handle, + (unsigned long long) + mpt3sas_phy->remote_identify.sas_address); + mpt3sas_phy->phy = phy; + return 0; +} + +/** + * mpt3sas_transport_update_links - refreshing phy link changes + * @ioc: per adapter object + * @sas_address: sas address of parent expander or sas host + * @handle: attached device handle + * @phy_numberv: phy number + * @link_rate: new link rate + * + * Returns nothing. + */ +void +mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, u16 handle, u8 phy_number, u8 link_rate) +{ + unsigned long flags; + struct _sas_node *sas_node; + struct _sas_phy *mpt3sas_phy; + + if (ioc->shost_recovery || ioc->pci_error_recovery) + return; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + + mpt3sas_phy = &sas_node->phy[phy_number]; + mpt3sas_phy->attached_handle = handle; + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { + _transport_set_identify(ioc, handle, + &mpt3sas_phy->remote_identify); + _transport_add_phy_to_an_existing_port(ioc, sas_node, + mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address); + } else + memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct + sas_identify)); + + if (mpt3sas_phy->phy) + mpt3sas_phy->phy->negotiated_linkrate = + _transport_convert_phy_link_rate(link_rate); + + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev, + "refresh: parent sas_addr(0x%016llx),\n" + "\tlink_rate(0x%02x), phy(%d)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + (unsigned long long)sas_address, + link_rate, phy_number, handle, (unsigned long long) + mpt3sas_phy->remote_identify.sas_address); +} + +static inline void * +phy_to_ioc(struct sas_phy *phy) +{ + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + return shost_priv(shost); +} + +static inline void * +rphy_to_ioc(struct sas_rphy *rphy) +{ + struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); + return shost_priv(shost); +} + +/* report phy error log structure */ +struct phy_error_log_request { + u8 smp_frame_type; /* 0x40 */ + u8 function; /* 0x11 */ + u8 allocated_response_length; + u8 request_length; /* 02 */ + u8 reserved_1[5]; + u8 phy_identifier; + u8 reserved_2[2]; +}; + +/* report phy error log reply structure */ +struct phy_error_log_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x11 */ + u8 function_result; + u8 response_length; + __be16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 reserved_2[2]; + __be32 invalid_dword; + __be32 running_disparity_error; + __be32 loss_of_dword_sync; + __be32 phy_reset_problem; +}; + +/** + * _transport_get_expander_phy_error_log - return expander counters + * @ioc: per adapter object + * @phy: The sas phy object + * + * Returns 0 for success, non-zero for failure. + * + */ +static int +_transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, + struct sas_phy *phy) +{ + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + struct phy_error_log_request *phy_error_log_request; + struct phy_error_log_reply *phy_error_log_reply; + int rc; + u16 smid; + u32 ioc_state; + unsigned long timeleft; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + u16 wait_state_count; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + + mutex_lock(&ioc->transport_cmds.mutex); + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info(MPT3SAS_FMT "%s: ioc is operational\n", + ioc->name, __func__); + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + sz = sizeof(struct phy_error_log_request) + + sizeof(struct phy_error_log_reply); + data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + + rc = -EINVAL; + memset(data_out, 0, sz); + phy_error_log_request = data_out; + phy_error_log_request->smp_frame_type = 0x40; + phy_error_log_request->function = 0x11; + phy_error_log_request->request_length = 2; + phy_error_log_request->allocated_response_length = 0; + phy_error_log_request->phy_identifier = phy->number; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = 0xFF; + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct phy_error_log_request)); + psge = &mpi_request->SGL; + + ioc->build_sg(ioc, psge, data_out_dma, + sizeof(struct phy_error_log_request), + data_out_dma + sizeof(struct phy_error_log_request), + sizeof(struct phy_error_log_reply)); + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n", + ioc->name, (unsigned long long)phy->identify.sas_address, + phy->number)); + init_completion(&ioc->transport_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, + 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_error_log - complete\n", ioc->name)); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_error_log - reply data transfer size(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength))); + + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct phy_error_log_reply)) + goto out; + + phy_error_log_reply = data_out + + sizeof(struct phy_error_log_request); + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_error_log - function_result(%d)\n", + ioc->name, phy_error_log_reply->function_result)); + + phy->invalid_dword_count = + be32_to_cpu(phy_error_log_reply->invalid_dword); + phy->running_disparity_error_count = + be32_to_cpu(phy_error_log_reply->running_disparity_error); + phy->loss_of_dword_sync_count = + be32_to_cpu(phy_error_log_reply->loss_of_dword_sync); + phy->phy_reset_problem_count = + be32_to_cpu(phy_error_log_reply->phy_reset_problem); + rc = 0; + } else + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_error_log - no reply\n", ioc->name)); + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + if (data_out) + pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma); + + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +/** + * _transport_get_linkerrors - return phy counters for both hba and expanders + * @phy: The sas phy object + * + * Returns 0 for success, non-zero for failure. + * + */ +static int +_transport_get_linkerrors(struct sas_phy *phy) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + unsigned long flags; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasPhyPage1_t phy_pg1; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return _transport_get_expander_phy_error_log(ioc, phy); + + /* get hba phy error logs */ + if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1, + phy->number))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + pr_info(MPT3SAS_FMT + "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n", + ioc->name, phy->number, + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + + phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount); + phy->running_disparity_error_count = + le32_to_cpu(phy_pg1.RunningDisparityErrorCount); + phy->loss_of_dword_sync_count = + le32_to_cpu(phy_pg1.LossDwordSynchCount); + phy->phy_reset_problem_count = + le32_to_cpu(phy_pg1.PhyResetProblemCount); + return 0; +} + +/** + * _transport_get_enclosure_identifier - + * @phy: The sas phy object + * + * Obtain the enclosure logical id for an expander. + * Returns 0 for success, non-zero for failure. + */ +static int +_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) +{ + struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy); + struct _sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + rphy->identify.sas_address); + if (sas_device) { + *identifier = sas_device->enclosure_logical_id; + rc = 0; + } else { + *identifier = 0; + rc = -ENXIO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/** + * _transport_get_bay_identifier - + * @phy: The sas phy object + * + * Returns the slot id for a device that resides inside an enclosure. + */ +static int +_transport_get_bay_identifier(struct sas_rphy *rphy) +{ + struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy); + struct _sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc, + rphy->identify.sas_address); + if (sas_device) + rc = sas_device->slot; + else + rc = -ENXIO; + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/* phy control request structure */ +struct phy_control_request { + u8 smp_frame_type; /* 0x40 */ + u8 function; /* 0x91 */ + u8 allocated_response_length; + u8 request_length; /* 0x09 */ + u16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 phy_operation; + u8 reserved_2[13]; + u64 attached_device_name; + u8 programmed_min_physical_link_rate; + u8 programmed_max_physical_link_rate; + u8 reserved_3[6]; +}; + +/* phy control reply structure */ +struct phy_control_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x11 */ + u8 function_result; + u8 response_length; +}; + +#define SMP_PHY_CONTROL_LINK_RESET (0x01) +#define SMP_PHY_CONTROL_HARD_RESET (0x02) +#define SMP_PHY_CONTROL_DISABLE (0x03) + +/** + * _transport_expander_phy_control - expander phy control + * @ioc: per adapter object + * @phy: The sas phy object + * + * Returns 0 for success, non-zero for failure. + * + */ +static int +_transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, + struct sas_phy *phy, u8 phy_operation) +{ + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + struct phy_control_request *phy_control_request; + struct phy_control_reply *phy_control_reply; + int rc; + u16 smid; + u32 ioc_state; + unsigned long timeleft; + void *psge; + u32 sgl_flags; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + u16 wait_state_count; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + + mutex_lock(&ioc->transport_cmds.mutex); + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info(MPT3SAS_FMT "%s: ioc is operational\n", + ioc->name, __func__); + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + sz = sizeof(struct phy_control_request) + + sizeof(struct phy_control_reply); + data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + + rc = -EINVAL; + memset(data_out, 0, sz); + phy_control_request = data_out; + phy_control_request->smp_frame_type = 0x40; + phy_control_request->function = 0x91; + phy_control_request->request_length = 9; + phy_control_request->allocated_response_length = 0; + phy_control_request->phy_identifier = phy->number; + phy_control_request->phy_operation = phy_operation; + phy_control_request->programmed_min_physical_link_rate = + phy->minimum_linkrate << 4; + phy_control_request->programmed_max_physical_link_rate = + phy->maximum_linkrate << 4; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = 0xFF; + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct phy_error_log_request)); + psge = &mpi_request->SGL; + + /* WRITE sgel first */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + sizeof(struct phy_control_request), data_out_dma); + + /* incr sgel */ + psge += ioc->sge_size; + + /* READ sgel last */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + sizeof(struct phy_control_reply), data_out_dma + + sizeof(struct phy_control_request)); + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", + ioc->name, (unsigned long long)phy->identify.sas_address, + phy->number, phy_operation)); + init_completion(&ioc->transport_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, + 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_control - complete\n", ioc->name)); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_control - reply data transfer size(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength))); + + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct phy_control_reply)) + goto out; + + phy_control_reply = data_out + + sizeof(struct phy_control_request); + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_control - function_result(%d)\n", + ioc->name, phy_control_reply->function_result)); + + rc = 0; + } else + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_control - no reply\n", ioc->name)); + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + if (data_out) + pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma); + + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +/** + * _transport_phy_reset - + * @phy: The sas phy object + * @hard_reset: + * + * Returns 0 for success, non-zero for failure. + */ +static int +_transport_phy_reset(struct sas_phy *phy, int hard_reset) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + Mpi2SasIoUnitControlReply_t mpi_reply; + Mpi2SasIoUnitControlRequest_t mpi_request; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* handle expander phys */ + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return _transport_expander_phy_control(ioc, phy, + (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET : + SMP_PHY_CONTROL_LINK_RESET); + + /* handle hba phys */ + memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t)); + mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + mpi_request.Operation = hard_reset ? + MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET; + mpi_request.PhyNum = phy->number; + + if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + pr_info(MPT3SAS_FMT + "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, phy->number, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + + return 0; +} + +/** + * _transport_phy_enable - enable/disable phys + * @phy: The sas phy object + * @enable: enable phy when true + * + * Only support sas_host direct attached phys. + * Returns 0 for success, non-zero for failure. + */ +static int +_transport_phy_enable(struct sas_phy *phy, int enable) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 sz; + int rc = 0; + unsigned long flags; + int i, discovery_active; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* handle expander phys */ + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return _transport_expander_phy_control(ioc, phy, + (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET : + SMP_PHY_CONTROL_DISABLE); + + /* handle hba phys */ + + /* read sas_iounit page 0 */ + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + /* unable to enable/disable phys when when discovery is active */ + for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) { + if (sas_iounit_pg0->PhyData[i].PortFlags & + MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) { + pr_err(MPT3SAS_FMT "discovery is active on " \ + "port = %d, phy = %d: unable to enable/disable " + "phys, try again later!\n", ioc->name, + sas_iounit_pg0->PhyData[i].Port, i); + discovery_active = 1; + } + } + + if (discovery_active) { + rc = -EAGAIN; + goto out; + } + + /* read sas_iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + /* copy Port/PortFlags/PhyFlags from page 0 */ + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { + sas_iounit_pg1->PhyData[i].Port = + sas_iounit_pg0->PhyData[i].Port; + sas_iounit_pg1->PhyData[i].PortFlags = + (sas_iounit_pg0->PhyData[i].PortFlags & + MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG); + sas_iounit_pg1->PhyData[i].PhyFlags = + (sas_iounit_pg0->PhyData[i].PhyFlags & + (MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED + + MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED)); + } + + if (enable) + sas_iounit_pg1->PhyData[phy->number].PhyFlags + &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + else + sas_iounit_pg1->PhyData[phy->number].PhyFlags + |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + + mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz); + + /* link reset */ + if (enable) + _transport_phy_reset(phy, 0); + + out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); + return rc; +} + +/** + * _transport_phy_speed - set phy min/max link rates + * @phy: The sas phy object + * @rates: rates defined in sas_phy_linkrates + * + * Only support sas_host direct attached phys. + * Returns 0 for success, non-zero for failure. + */ +static int +_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2SasPhyPage0_t phy_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 sz; + int i; + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (!rates->minimum_linkrate) + rates->minimum_linkrate = phy->minimum_linkrate; + else if (rates->minimum_linkrate < phy->minimum_linkrate_hw) + rates->minimum_linkrate = phy->minimum_linkrate_hw; + + if (!rates->maximum_linkrate) + rates->maximum_linkrate = phy->maximum_linkrate; + else if (rates->maximum_linkrate > phy->maximum_linkrate_hw) + rates->maximum_linkrate = phy->maximum_linkrate_hw; + + /* handle expander phys */ + if (phy->identify.sas_address != ioc->sas_hba.sas_address) { + phy->minimum_linkrate = rates->minimum_linkrate; + phy->maximum_linkrate = rates->maximum_linkrate; + return _transport_expander_phy_control(ioc, phy, + SMP_PHY_CONTROL_LINK_RESET); + } + + /* handle hba phys */ + + /* sas_iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + if (phy->number != i) { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (ioc->sas_hba.phy[i].phy->minimum_linkrate + + (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4)); + } else { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (rates->minimum_linkrate + + (rates->maximum_linkrate << 4)); + } + } + + if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, + sz)) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + + /* link reset */ + _transport_phy_reset(phy, 0); + + /* read phy page 0, then update the rates in the sas transport phy */ + if (!mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + phy->number)) { + phy->minimum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + phy->negotiated_linkrate = _transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & + MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + } + + out: + kfree(sas_iounit_pg1); + return rc; +} + +/** + * _transport_smp_handler - transport portal for smp passthru + * @shost: shost object + * @rphy: sas transport rphy object + * @req: + * + * This used primarily for smp_utils. + * Example: + * smp_rep_general /sys/class/bsg/expander-5:0 + */ +static int +_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, + struct request *req) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + int rc, i; + u16 smid; + u32 ioc_state; + unsigned long timeleft; + void *psge; + u8 issue_reset = 0; + dma_addr_t dma_addr_in = 0; + dma_addr_t dma_addr_out = 0; + dma_addr_t pci_dma_in = 0; + dma_addr_t pci_dma_out = 0; + void *pci_addr_in = NULL; + void *pci_addr_out = NULL; + u16 wait_state_count; + struct request *rsp = req->next_rq; + struct bio_vec *bvec = NULL; + + if (!rsp) { + pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n", + ioc->name, __func__); + return -EINVAL; + } + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + + rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); + if (rc) + return rc; + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name, + __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + /* Check if the request is split across multiple segments */ + if (req->bio->bi_vcnt > 1) { + u32 offset = 0; + + /* Allocate memory and copy the request */ + pci_addr_out = pci_alloc_consistent(ioc->pdev, + blk_rq_bytes(req), &pci_dma_out); + if (!pci_addr_out) { + pr_info(MPT3SAS_FMT "%s(): PCI Addr out = NULL\n", + ioc->name, __func__); + rc = -ENOMEM; + goto out; + } + + bio_for_each_segment(bvec, req->bio, i) { + memcpy(pci_addr_out + offset, + page_address(bvec->bv_page) + bvec->bv_offset, + bvec->bv_len); + offset += bvec->bv_len; + } + } else { + dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), + blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); + if (!dma_addr_out) { + pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n", + ioc->name, __func__); + rc = -ENOMEM; + goto free_pci; + } + } + + /* Check if the response needs to be populated across + * multiple segments */ + if (rsp->bio->bi_vcnt > 1) { + pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), + &pci_dma_in); + if (!pci_addr_in) { + pr_info(MPT3SAS_FMT "%s(): PCI Addr in = NULL\n", + ioc->name, __func__); + rc = -ENOMEM; + goto unmap; + } + } else { + dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), + blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); + if (!dma_addr_in) { + pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n", + ioc->name, __func__); + rc = -ENOMEM; + goto unmap; + } + } + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto unmap; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info(MPT3SAS_FMT "%s: ioc is operational\n", + ioc->name, __func__); + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto unmap; + } + + rc = 0; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = 0xFF; + mpi_request->SASAddress = (rphy) ? + cpu_to_le64(rphy->identify.sas_address) : + cpu_to_le64(ioc->sas_hba.sas_address); + mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); + psge = &mpi_request->SGL; + + if (req->bio->bi_vcnt > 1) + ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4), + pci_dma_in, (blk_rq_bytes(rsp) + 4)); + else + ioc->build_sg(ioc, psge, dma_addr_out, (blk_rq_bytes(req) - 4), + dma_addr_in, (blk_rq_bytes(rsp) + 4)); + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "%s - sending smp request\n", ioc->name, __func__)); + + init_completion(&ioc->transport_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, + 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s : timeout\n", + __func__, ioc->name); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "%s - complete\n", ioc->name, __func__)); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "%s - reply data transfer size(%d)\n", + ioc->name, __func__, + le16_to_cpu(mpi_reply->ResponseDataLength))); + + memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); + req->sense_len = sizeof(*mpi_reply); + req->resid_len = 0; + rsp->resid_len -= + le16_to_cpu(mpi_reply->ResponseDataLength); + + /* check if the resp needs to be copied from the allocated + * pci mem */ + if (rsp->bio->bi_vcnt > 1) { + u32 offset = 0; + u32 bytes_to_copy = + le16_to_cpu(mpi_reply->ResponseDataLength); + bio_for_each_segment(bvec, rsp->bio, i) { + if (bytes_to_copy <= bvec->bv_len) { + memcpy(page_address(bvec->bv_page) + + bvec->bv_offset, pci_addr_in + + offset, bytes_to_copy); + break; + } else { + memcpy(page_address(bvec->bv_page) + + bvec->bv_offset, pci_addr_in + + offset, bvec->bv_len); + bytes_to_copy -= bvec->bv_len; + } + offset += bvec->bv_len; + } + } + } else { + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "%s - no reply\n", ioc->name, __func__)); + rc = -ENXIO; + } + + issue_host_reset: + if (issue_reset) { + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + rc = -ETIMEDOUT; + } + + unmap: + if (dma_addr_out) + pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req), + PCI_DMA_BIDIRECTIONAL); + if (dma_addr_in) + pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp), + PCI_DMA_BIDIRECTIONAL); + + free_pci: + if (pci_addr_out) + pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out, + pci_dma_out); + + if (pci_addr_in) + pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in, + pci_dma_in); + + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +struct sas_function_template mpt3sas_transport_functions = { + .get_linkerrors = _transport_get_linkerrors, + .get_enclosure_identifier = _transport_get_enclosure_identifier, + .get_bay_identifier = _transport_get_bay_identifier, + .phy_reset = _transport_phy_reset, + .phy_enable = _transport_phy_enable, + .set_phy_speed = _transport_phy_speed, + .smp_handler = _transport_smp_handler, +}; + +struct scsi_transport_template *mpt3sas_transport_template; diff --git a/addons/mpt3sas/src/3.10.108/mpt3sas_trigger_diag.c b/addons/mpt3sas/src/3.10.108/mpt3sas_trigger_diag.c new file mode 100644 index 00000000..6f8d6213 --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpt3sas_trigger_diag.c @@ -0,0 +1,433 @@ +/* + * This module provides common API to set Diagnostic trigger for MPT + * (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c + * Copyright (C) 2012 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mpt3sas_base.h" + +/** + * _mpt3sas_raise_sigio - notifiy app + * @ioc: per adapter object + * @event_data: + */ +static void +_mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + Mpi2EventNotificationReply_t *mpi_reply; + u16 sz, event_data_sz; + unsigned long flags; + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", + ioc->name, __func__)); + + sz = offsetof(Mpi2EventNotificationReply_t, EventData) + + sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4; + mpi_reply = kzalloc(sz, GFP_KERNEL); + if (!mpi_reply) + goto out; + mpi_reply->Event = cpu_to_le16(MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED); + event_data_sz = (sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4) / 4; + mpi_reply->EventDataLength = cpu_to_le16(event_data_sz); + memcpy(&mpi_reply->EventData, event_data, + sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: add to driver event log\n", + ioc->name, __func__)); + mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); + kfree(mpi_reply); + out: + + /* clearing the diag_trigger_active flag */ + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: clearing diag_trigger_active flag\n", + ioc->name, __func__)); + ioc->diag_trigger_active = 0; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_process_trigger_data - process the event data for the trigger + * @ioc: per adapter object + * @event_data: + */ +void +mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + u8 issue_reset = 0; + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", + ioc->name, __func__)); + + /* release the diag buffer trace */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: release trace diag buffer\n", ioc->name, __func__)); + mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, + &issue_reset); + } + + _mpt3sas_raise_sigio(ioc, event_data); + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_trigger_master - Master trigger handler + * @ioc: per adapter object + * @trigger_bitmask: + * + */ +void +mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + unsigned long flags; + u8 found_match = 0; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT || + trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) + goto by_pass_checks; + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + by_pass_checks: + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: enter - trigger_bitmask = 0x%08x\n", + ioc->name, __func__, trigger_bitmask)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + if (ioc->diag_trigger_master.MasterData & trigger_bitmask) { + found_match = 1; + ioc->diag_trigger_active = 1; + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: setting diag_trigger_active flag\n", + ioc->name, __func__)); + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_MASTER; + event_data.u.master.MasterData = trigger_bitmask; + + if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT || + trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) + _mpt3sas_raise_sigio(ioc, &event_data); + else + mpt3sas_send_trigger_data_event(ioc, &event_data); + + out: + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_trigger_event - Event trigger handler + * @ioc: per adapter object + * @event: + * @log_entry_qualifier: + * + */ +void +mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, + u16 log_entry_qualifier) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_EVENT_TRIGGER_T *event_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n", + ioc->name, __func__, event, log_entry_qualifier)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + event_trigger = ioc->diag_trigger_event.EventTriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_event.ValidEntries + && !found_match; i++, event_trigger++) { + if (event_trigger->EventValue != event) + continue; + if (event == MPI2_EVENT_LOG_ENTRY_ADDED) { + if (event_trigger->LogEntryQualifier == + log_entry_qualifier) + found_match = 1; + continue; + } + found_match = 1; + ioc->diag_trigger_active = 1; + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: setting diag_trigger_active flag\n", + ioc->name, __func__)); + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: setting diag_trigger_active flag\n", + ioc->name, __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_EVENT; + event_data.u.event.EventValue = event; + event_data.u.event.LogEntryQualifier = log_entry_qualifier; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_trigger_scsi - SCSI trigger handler + * @ioc: per adapter object + * @sense_key: + * @asc: + * @ascq: + * + */ +void +mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc, + u8 ascq) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_SCSI_TRIGGER_T *scsi_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n", + ioc->name, __func__, sense_key, asc, ascq)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + scsi_trigger = ioc->diag_trigger_scsi.SCSITriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_scsi.ValidEntries + && !found_match; i++, scsi_trigger++) { + if (scsi_trigger->SenseKey != sense_key) + continue; + if (!(scsi_trigger->ASC == 0xFF || scsi_trigger->ASC == asc)) + continue; + if (!(scsi_trigger->ASCQ == 0xFF || scsi_trigger->ASCQ == ascq)) + continue; + found_match = 1; + ioc->diag_trigger_active = 1; + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: setting diag_trigger_active flag\n", + ioc->name, __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_SCSI; + event_data.u.scsi.SenseKey = sense_key; + event_data.u.scsi.ASC = asc; + event_data.u.scsi.ASCQ = ascq; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_trigger_mpi - MPI trigger handler + * @ioc: per adapter object + * @ioc_status: + * @loginfo: + * + */ +void +mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_MPI_TRIGGER_T *mpi_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n", + ioc->name, __func__, ioc_status, loginfo)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + mpi_trigger = ioc->diag_trigger_mpi.MPITriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_mpi.ValidEntries + && !found_match; i++, mpi_trigger++) { + if (mpi_trigger->IOCStatus != ioc_status) + continue; + if (!(mpi_trigger->IocLogInfo == 0xFFFFFFFF || + mpi_trigger->IocLogInfo == loginfo)) + continue; + found_match = 1; + ioc->diag_trigger_active = 1; + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: setting diag_trigger_active flag\n", + ioc->name, __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_MPI; + event_data.u.mpi.IOCStatus = ioc_status; + event_data.u.mpi.IocLogInfo = loginfo; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); +} diff --git a/addons/mpt3sas/src/3.10.108/mpt3sas_trigger_diag.h b/addons/mpt3sas/src/3.10.108/mpt3sas_trigger_diag.h new file mode 100644 index 00000000..a10c3090 --- /dev/null +++ b/addons/mpt3sas/src/3.10.108/mpt3sas_trigger_diag.h @@ -0,0 +1,193 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * to set Diagnostic triggers for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h + * Copyright (C) 2012 LSI Corporation + * (mailto:DL-MPTFusionLinux@lsi.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + /* Diagnostic Trigger Configuration Data Structures */ + +#ifndef MPT3SAS_TRIGGER_DIAG_H_INCLUDED +#define MPT3SAS_TRIGGER_DIAG_H_INCLUDED + +/* limitation on number of entries */ +#define NUM_VALID_ENTRIES (20) + +/* trigger types */ +#define MPT3SAS_TRIGGER_MASTER (1) +#define MPT3SAS_TRIGGER_EVENT (2) +#define MPT3SAS_TRIGGER_SCSI (3) +#define MPT3SAS_TRIGGER_MPI (4) + +/* trigger names */ +#define MASTER_TRIGGER_FILE_NAME "diag_trigger_master" +#define EVENT_TRIGGERS_FILE_NAME "diag_trigger_event" +#define SCSI_TRIGGERS_FILE_NAME "diag_trigger_scsi" +#define MPI_TRIGGER_FILE_NAME "diag_trigger_mpi" + +/* master trigger bitmask */ +#define MASTER_TRIGGER_FW_FAULT (0x00000001) +#define MASTER_TRIGGER_ADAPTER_RESET (0x00000002) +#define MASTER_TRIGGER_TASK_MANAGMENT (0x00000004) +#define MASTER_TRIGGER_DEVICE_REMOVAL (0x00000008) + +/* fake firmware event for tigger */ +#define MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED (0x6E) + +/** + * MasterTrigger is a single U32 passed to/from sysfs. + * + * Bit Flags (enables) include: + * 1. FW Faults + * 2. Adapter Reset issued by driver + * 3. TMs + * 4. Device Remove Event sent by FW + */ + +struct SL_WH_MASTER_TRIGGER_T { + uint32_t MasterData; +}; + +/** + * struct SL_WH_EVENT_TRIGGER_T - Definition of an event trigger element + * @EventValue: Event Code to trigger on + * @LogEntryQualifier: Type of FW event that logged (Log Entry Added Event only) + * + * Defines an event that should induce a DIAG_TRIGGER driver event if observed. + */ +struct SL_WH_EVENT_TRIGGER_T { + uint16_t EventValue; + uint16_t LogEntryQualifier; +}; + +/** + * struct SL_WH_EVENT_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of Event Triggers to be monitored for. + * @ValidEntries: Number of _SL_WH_EVENT_TRIGGER_T structures contained in this + * structure. + * @EventTriggerEntry: List of Event trigger elements. + * + * This binary structure is transferred via sysfs to get/set Event Triggers + * in the Linux Driver. + */ + +struct SL_WH_EVENT_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_EVENT_TRIGGER_T EventTriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_SCSI_TRIGGER_T - Definition of a SCSI trigger element + * @ASCQ: Additional Sense Code Qualifier. Can be specific or 0xFF for + * wildcard. + * @ASC: Additional Sense Code. Can be specific or 0xFF for wildcard + * @SenseKey: SCSI Sense Key + * + * Defines a sense key (single or many variants) that should induce a + * DIAG_TRIGGER driver event if observed. + */ +struct SL_WH_SCSI_TRIGGER_T { + U8 ASCQ; + U8 ASC; + U8 SenseKey; + U8 Reserved; +}; + +/** + * struct SL_WH_SCSI_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of SCSI sense codes that should trigger a DIAG_SERVICE event when + * observed. + * @ValidEntries: Number of _SL_WH_SCSI_TRIGGER_T structures contained in this + * structure. + * @SCSITriggerEntry: List of SCSI Sense Code trigger elements. + * + * This binary structure is transferred via sysfs to get/set SCSI Sense Code + * Triggers in the Linux Driver. + */ +struct SL_WH_SCSI_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_SCSI_TRIGGER_T SCSITriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_MPI_TRIGGER_T - Definition of an MPI trigger element + * @IOCStatus: MPI IOCStatus + * @IocLogInfo: MPI IocLogInfo. Can be specific or 0xFFFFFFFF for wildcard + * + * Defines a MPI IOCStatus/IocLogInfo pair that should induce a DIAG_TRIGGER + * driver event if observed. + */ +struct SL_WH_MPI_TRIGGER_T { + uint16_t IOCStatus; + uint16_t Reserved; + uint32_t IocLogInfo; +}; + +/** + * struct SL_WH_MPI_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of MPI IOCStatus/IocLogInfo pairs that should trigger a DIAG_SERVICE + * event when observed. + * @ValidEntries: Number of _SL_WH_MPI_TRIGGER_T structures contained in this + * structure. + * @MPITriggerEntry: List of MPI IOCStatus/IocLogInfo trigger elements. + * + * This binary structure is transferred via sysfs to get/set MPI Error Triggers + * in the Linux Driver. + */ +struct SL_WH_MPI_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_MPI_TRIGGER_T MPITriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_TRIGGERS_EVENT_DATA_T - event data for trigger + * @trigger_type: trigger type (see MPT3SAS_TRIGGER_XXXX) + * @u: trigger condition that caused trigger to be sent + */ +struct SL_WH_TRIGGERS_EVENT_DATA_T { + uint32_t trigger_type; + union { + struct SL_WH_MASTER_TRIGGER_T master; + struct SL_WH_EVENT_TRIGGER_T event; + struct SL_WH_SCSI_TRIGGER_T scsi; + struct SL_WH_MPI_TRIGGER_T mpi; + } u; +}; +#endif /* MPT3SAS_TRIGGER_DIAG_H_INCLUDED */ diff --git a/addons/mpt3sas/src/4.4.180/Makefile b/addons/mpt3sas/src/4.4.180/Makefile new file mode 100644 index 00000000..4c824586 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/Makefile @@ -0,0 +1,8 @@ +obj-m += mpt3sas.o +mpt3sas-y += mpt3sas_base.o \ + mpt3sas_config.o \ + mpt3sas_scsih.o \ + mpt3sas_transport.o \ + mpt3sas_ctl.o \ + mpt3sas_trigger_diag.o \ + mpt3sas_warpdrive.o diff --git a/addons/mpt3sas/src/4.4.180/mpi/mpi2.h b/addons/mpt3sas/src/4.4.180/mpi/mpi2.h new file mode 100644 index 00000000..ec27ad2d --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpi/mpi2.h @@ -0,0 +1,1176 @@ +/* + * Copyright (c) 2000-2014 LSI Corporation. + * + * + * Name: mpi2.h + * Title: MPI Message independent structures and definitions + * including System Interface Register Set and + * scatter/gather formats. + * Creation Date: June 21, 2006 + * + * mpi2.h Version: 02.00.35 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT. + * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved ReplyPostHostIndex register to offset 0x6C of the + * MPI2_SYSTEM_INTERFACE_REGS and modified the define for + * MPI2_REPLY_POST_HOST_INDEX_OFFSET. + * Added union of request descriptors. + * Added union of reply descriptors. + * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_VERSION_02_00. + * Fixed the size of the FunctionDependent5 field in the + * MPI2_DEFAULT_REPLY structure. + * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT. + * Removed the MPI-defined Fault Codes and extended the + * product specific codes up to 0xEFFF. + * Added a sixth key value for the WriteSequence register + * and changed the flush value to 0x0. + * Added message function codes for Diagnostic Buffer Post + * and Diagnsotic Release. + * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED + * Moved MPI2_VERSION_UNION from mpi2_ioc.h. + * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT. + * Added #defines for marking a reply descriptor as unused. + * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT. + * Moved LUN field defines from mpi2_init.h. + * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT. + * In all request and reply descriptors, replaced VF_ID + * field with MSIxIndex field. + * Removed DevHandle field from + * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those + * bytes reserved. + * Added RAID Accelerator functionality. + * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT. + * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MSI-x index mask and shift for Reply Post Host + * Index register. + * Added function code for Host Based Discovery Action. + * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT. + * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL. + * Added defines for product-specific range of message + * function codes, 0xF0 to 0xFF. + * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT. + * Added alternative defines for the SGE Direction bit. + * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define. + * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI2_FUNCTION_SEND_HOST_MESSAGE. + * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT. + * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT. + * Incorporating additions for MPI v2.5. + * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT. + * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT. + * Added Hard Reset delay timings. + * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT. + * 07-26-12 02.00.27 Bumped MPI2_HEADER_VERSION_UNIT. + * 11-27-12 02.00.28 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-20-12 02.00.29 Bumped MPI2_HEADER_VERSION_UNIT. + * Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET. + * 04-09-13 02.00.30 Bumped MPI2_HEADER_VERSION_UNIT. + * 04-17-13 02.00.31 Bumped MPI2_HEADER_VERSION_UNIT. + * 08-19-13 02.00.32 Bumped MPI2_HEADER_VERSION_UNIT. + * 12-05-13 02.00.33 Bumped MPI2_HEADER_VERSION_UNIT. + * 01-08-14 02.00.34 Bumped MPI2_HEADER_VERSION_UNIT + * 06-13-14 02.00.35 Bumped MPI2_HEADER_VERSION_UNIT. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_H +#define MPI2_H + +/***************************************************************************** +* +* MPI Version Definitions +* +*****************************************************************************/ + +#define MPI2_VERSION_MAJOR_MASK (0xFF00) +#define MPI2_VERSION_MAJOR_SHIFT (8) +#define MPI2_VERSION_MINOR_MASK (0x00FF) +#define MPI2_VERSION_MINOR_SHIFT (0) + +/*major version for all MPI v2.x */ +#define MPI2_VERSION_MAJOR (0x02) + +/*minor version for MPI v2.0 compatible products */ +#define MPI2_VERSION_MINOR (0x00) +#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI2_VERSION_MINOR) +#define MPI2_VERSION_02_00 (0x0200) + +/*minor version for MPI v2.5 compatible products */ +#define MPI25_VERSION_MINOR (0x05) +#define MPI25_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \ + MPI25_VERSION_MINOR) +#define MPI2_VERSION_02_05 (0x0205) + +/*Unit and Dev versioning for this MPI header set */ +#define MPI2_HEADER_VERSION_UNIT (0x23) +#define MPI2_HEADER_VERSION_DEV (0x00) +#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) +#define MPI2_HEADER_VERSION_UNIT_SHIFT (8) +#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF) +#define MPI2_HEADER_VERSION_DEV_SHIFT (0) +#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \ + MPI2_HEADER_VERSION_DEV) + +/***************************************************************************** +* +* IOC State Definitions +* +*****************************************************************************/ + +#define MPI2_IOC_STATE_RESET (0x00000000) +#define MPI2_IOC_STATE_READY (0x10000000) +#define MPI2_IOC_STATE_OPERATIONAL (0x20000000) +#define MPI2_IOC_STATE_FAULT (0x40000000) + +#define MPI2_IOC_STATE_MASK (0xF0000000) +#define MPI2_IOC_STATE_SHIFT (28) + +/*Fault state range for prodcut specific codes */ +#define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000) +#define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF) + +/***************************************************************************** +* +* System Interface Register Definitions +* +*****************************************************************************/ + +typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS { + U32 Doorbell; /*0x00 */ + U32 WriteSequence; /*0x04 */ + U32 HostDiagnostic; /*0x08 */ + U32 Reserved1; /*0x0C */ + U32 DiagRWData; /*0x10 */ + U32 DiagRWAddressLow; /*0x14 */ + U32 DiagRWAddressHigh; /*0x18 */ + U32 Reserved2[5]; /*0x1C */ + U32 HostInterruptStatus; /*0x30 */ + U32 HostInterruptMask; /*0x34 */ + U32 DCRData; /*0x38 */ + U32 DCRAddress; /*0x3C */ + U32 Reserved3[2]; /*0x40 */ + U32 ReplyFreeHostIndex; /*0x48 */ + U32 Reserved4[8]; /*0x4C */ + U32 ReplyPostHostIndex; /*0x6C */ + U32 Reserved5; /*0x70 */ + U32 HCBSize; /*0x74 */ + U32 HCBAddressLow; /*0x78 */ + U32 HCBAddressHigh; /*0x7C */ + U32 Reserved6[16]; /*0x80 */ + U32 RequestDescriptorPostLow; /*0xC0 */ + U32 RequestDescriptorPostHigh; /*0xC4 */ + U32 Reserved7[14]; /*0xC8 */ +} MPI2_SYSTEM_INTERFACE_REGS, + *PTR_MPI2_SYSTEM_INTERFACE_REGS, + Mpi2SystemInterfaceRegs_t, + *pMpi2SystemInterfaceRegs_t; + +/* + *Defines for working with the Doorbell register. + */ +#define MPI2_DOORBELL_OFFSET (0x00000000) + +/*IOC --> System values */ +#define MPI2_DOORBELL_USED (0x08000000) +#define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000) +#define MPI2_DOORBELL_WHO_INIT_SHIFT (24) +#define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF) +#define MPI2_DOORBELL_DATA_MASK (0x0000FFFF) + +/*System --> IOC values */ +#define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000) +#define MPI2_DOORBELL_FUNCTION_SHIFT (24) +#define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000) +#define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16) + +/* + *Defines for the WriteSequence register + */ +#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004) +#define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F) +#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0) +#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF) +#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4) +#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB) +#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2) +#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7) +#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD) + +/* + *Defines for the HostDiagnostic register + */ +#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008) + +#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800) +#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000) +#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800) + +#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400) +#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200) +#define MPI2_DIAG_HCB_MODE (0x00000100) +#define MPI2_DIAG_DIAG_WRITE_ENABLE (0x00000080) +#define MPI2_DIAG_FLASH_BAD_SIG (0x00000040) +#define MPI2_DIAG_RESET_HISTORY (0x00000020) +#define MPI2_DIAG_DIAG_RW_ENABLE (0x00000010) +#define MPI2_DIAG_RESET_ADAPTER (0x00000004) +#define MPI2_DIAG_HOLD_IOC_RESET (0x00000002) + +/* + *Offsets for DiagRWData and address + */ +#define MPI2_DIAG_RW_DATA_OFFSET (0x00000010) +#define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014) +#define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018) + +/* + *Defines for the HostInterruptStatus register + */ +#define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030) +#define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000) +#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS +#define MPI2_HIS_RESET_IRQ_STATUS (0x40000000) +#define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008) +#define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001) +#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS + +/* + *Defines for the HostInterruptMask register + */ +#define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034) +#define MPI2_HIM_RESET_IRQ_MASK (0x40000000) +#define MPI2_HIM_REPLY_INT_MASK (0x00000008) +#define MPI2_HIM_RIM MPI2_HIM_REPLY_INT_MASK +#define MPI2_HIM_IOC2SYS_DB_MASK (0x00000001) +#define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK + +/* + *Offsets for DCRData and address + */ +#define MPI2_DCR_DATA_OFFSET (0x00000038) +#define MPI2_DCR_ADDRESS_OFFSET (0x0000003C) + +/* + *Offset for the Reply Free Queue + */ +#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048) + +/* + *Defines for the Reply Descriptor Post Queue + */ +#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C) +#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF) +#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000) +#define MPI2_RPHI_MSIX_INDEX_SHIFT (24) +#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET (0x0000030C) /*MPI v2.5 only*/ + + +/* + *Defines for the HCBSize and address + */ +#define MPI2_HCB_SIZE_OFFSET (0x00000074) +#define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000) +#define MPI2_HCB_SIZE_HCB_ENABLE (0x00000001) + +#define MPI2_HCB_ADDRESS_LOW_OFFSET (0x00000078) +#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C) + +/* + *Offsets for the Request Queue + */ +#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0) +#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4) + +/*Hard Reset delay timings */ +#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000) +#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000) +#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000) + +/***************************************************************************** +* +* Message Descriptors +* +*****************************************************************************/ + +/*Request Descriptors */ + +/*Default Request Descriptor */ +typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 DescriptorTypeDependent; /*0x06 */ +} MPI2_DEFAULT_REQUEST_DESCRIPTOR, + *PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR, + Mpi2DefaultRequestDescriptor_t, + *pMpi2DefaultRequestDescriptor_t; + +/*defines for the RequestFlags field */ +#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E) +#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00) +#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02) +#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06) +#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08) +#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A) +#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C) + +#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01) + +/*High Priority Request Descriptor */ +typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 Reserved1; /*0x06 */ +} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, + *PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR, + Mpi2HighPriorityRequestDescriptor_t, + *pMpi2HighPriorityRequestDescriptor_t; + +/*SCSI IO Request Descriptor */ +typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 DevHandle; /*0x06 */ +} MPI2_SCSI_IO_REQUEST_DESCRIPTOR, + *PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR, + Mpi2SCSIIORequestDescriptor_t, + *pMpi2SCSIIORequestDescriptor_t; + +/*SCSI Target Request Descriptor */ +typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 IoIndex; /*0x06 */ +} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, + *PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR, + Mpi2SCSITargetRequestDescriptor_t, + *pMpi2SCSITargetRequestDescriptor_t; + +/*RAID Accelerator Request Descriptor */ +typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR { + U8 RequestFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 LMID; /*0x04 */ + U16 Reserved; /*0x06 */ +} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, + *PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR, + Mpi2RAIDAcceleratorRequestDescriptor_t, + *pMpi2RAIDAcceleratorRequestDescriptor_t; + +/*Fast Path SCSI IO Request Descriptor */ +typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR + MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, + *PTR_MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR, + Mpi25FastPathSCSIIORequestDescriptor_t, + *pMpi25FastPathSCSIIORequestDescriptor_t; + +/*union of Request Descriptors */ +typedef union _MPI2_REQUEST_DESCRIPTOR_UNION { + MPI2_DEFAULT_REQUEST_DESCRIPTOR Default; + MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority; + MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO; + MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget; + MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator; + MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO; + U64 Words; +} MPI2_REQUEST_DESCRIPTOR_UNION, + *PTR_MPI2_REQUEST_DESCRIPTOR_UNION, + Mpi2RequestDescriptorUnion_t, + *pMpi2RequestDescriptorUnion_t; + +/*Reply Descriptors */ + +/*Default Reply Descriptor */ +typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 DescriptorTypeDependent1; /*0x02 */ + U32 DescriptorTypeDependent2; /*0x04 */ +} MPI2_DEFAULT_REPLY_DESCRIPTOR, + *PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR, + Mpi2DefaultReplyDescriptor_t, + *pMpi2DefaultReplyDescriptor_t; + +/*defines for the ReplyFlags field */ +#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F) +#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00) +#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01) +#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02) +#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03) +#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05) +#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06) +#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F) + +/*values for marking a reply descriptor as unused */ +#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF) +#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF) + +/*Address Reply Descriptor */ +typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U32 ReplyFrameAddress; /*0x04 */ +} MPI2_ADDRESS_REPLY_DESCRIPTOR, + *PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR, + Mpi2AddressReplyDescriptor_t, + *pMpi2AddressReplyDescriptor_t; + +#define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00) + +/*SCSI IO Success Reply Descriptor */ +typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U16 TaskTag; /*0x04 */ + U16 Reserved1; /*0x06 */ +} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + Mpi2SCSIIOSuccessReplyDescriptor_t, + *pMpi2SCSIIOSuccessReplyDescriptor_t; + +/*TargetAssist Success Reply Descriptor */ +typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U8 SequenceNumber; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 IoIndex; /*0x06 */ +} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR, + Mpi2TargetAssistSuccessReplyDescriptor_t, + *pMpi2TargetAssistSuccessReplyDescriptor_t; + +/*Target Command Buffer Reply Descriptor */ +typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U8 VP_ID; /*0x02 */ + U8 Flags; /*0x03 */ + U16 InitiatorDevHandle; /*0x04 */ + U16 IoIndex; /*0x06 */ +} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, + *PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR, + Mpi2TargetCommandBufferReplyDescriptor_t, + *pMpi2TargetCommandBufferReplyDescriptor_t; + +/*defines for Flags field */ +#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F) + +/*RAID Accelerator Success Reply Descriptor */ +typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR { + U8 ReplyFlags; /*0x00 */ + U8 MSIxIndex; /*0x01 */ + U16 SMID; /*0x02 */ + U32 Reserved; /*0x04 */ +} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR, + Mpi2RAIDAcceleratorSuccessReplyDescriptor_t, + *pMpi2RAIDAcceleratorSuccessReplyDescriptor_t; + +/*Fast Path SCSI IO Success Reply Descriptor */ +typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR + MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + *PTR_MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR, + Mpi25FastPathSCSIIOSuccessReplyDescriptor_t, + *pMpi25FastPathSCSIIOSuccessReplyDescriptor_t; + +/*union of Reply Descriptors */ +typedef union _MPI2_REPLY_DESCRIPTORS_UNION { + MPI2_DEFAULT_REPLY_DESCRIPTOR Default; + MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply; + MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess; + MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess; + MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer; + MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess; + MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess; + U64 Words; +} MPI2_REPLY_DESCRIPTORS_UNION, + *PTR_MPI2_REPLY_DESCRIPTORS_UNION, + Mpi2ReplyDescriptorsUnion_t, + *pMpi2ReplyDescriptorsUnion_t; + +/***************************************************************************** +* +* Message Functions +* +*****************************************************************************/ + +#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00) +#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01) +#define MPI2_FUNCTION_IOC_INIT (0x02) +#define MPI2_FUNCTION_IOC_FACTS (0x03) +#define MPI2_FUNCTION_CONFIG (0x04) +#define MPI2_FUNCTION_PORT_FACTS (0x05) +#define MPI2_FUNCTION_PORT_ENABLE (0x06) +#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07) +#define MPI2_FUNCTION_EVENT_ACK (0x08) +#define MPI2_FUNCTION_FW_DOWNLOAD (0x09) +#define MPI2_FUNCTION_TARGET_ASSIST (0x0B) +#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C) +#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D) +#define MPI2_FUNCTION_FW_UPLOAD (0x12) +#define MPI2_FUNCTION_RAID_ACTION (0x15) +#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16) +#define MPI2_FUNCTION_TOOLBOX (0x17) +#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18) +#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A) +#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B) +#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C) +#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D) +#define MPI2_FUNCTION_DIAG_RELEASE (0x1E) +#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24) +#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25) +#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C) +#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F) +#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30) +#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31) +#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0) +#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF) + +/*Doorbell functions */ +#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40) +#define MPI2_FUNCTION_HANDSHAKE (0x42) + +/***************************************************************************** +* +* IOC Status Values +* +*****************************************************************************/ + +/*mask for IOCStatus status value */ +#define MPI2_IOCSTATUS_MASK (0x7FFF) + +/**************************************************************************** +* Common IOCStatus values for all replies +****************************************************************************/ + +#define MPI2_IOCSTATUS_SUCCESS (0x0000) +#define MPI2_IOCSTATUS_INVALID_FUNCTION (0x0001) +#define MPI2_IOCSTATUS_BUSY (0x0002) +#define MPI2_IOCSTATUS_INVALID_SGL (0x0003) +#define MPI2_IOCSTATUS_INTERNAL_ERROR (0x0004) +#define MPI2_IOCSTATUS_INVALID_VPID (0x0005) +#define MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006) +#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007) +#define MPI2_IOCSTATUS_INVALID_STATE (0x0008) +#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009) + +/**************************************************************************** +* Config IOCStatus values +****************************************************************************/ + +#define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020) +#define MPI2_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021) +#define MPI2_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022) +#define MPI2_IOCSTATUS_CONFIG_INVALID_DATA (0x0023) +#define MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024) +#define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025) + +/**************************************************************************** +* SCSI IO Reply +****************************************************************************/ + +#define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040) +#define MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042) +#define MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043) +#define MPI2_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044) +#define MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045) +#define MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046) +#define MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047) +#define MPI2_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048) +#define MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049) +#define MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A) +#define MPI2_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B) +#define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C) + +/**************************************************************************** +* For use by SCSI Initiator and SCSI Target end-to-end data protection +****************************************************************************/ + +#define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D) +#define MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E) +#define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F) + +/**************************************************************************** +* SCSI Target values +****************************************************************************/ + +#define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062) +#define MPI2_IOCSTATUS_TARGET_ABORTED (0x0063) +#define MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064) +#define MPI2_IOCSTATUS_TARGET_NO_CONNECTION (0x0065) +#define MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A) +#define MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D) +#define MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E) +#define MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F) +#define MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070) +#define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071) + +/**************************************************************************** +* Serial Attached SCSI values +****************************************************************************/ + +#define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090) +#define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091) + +/**************************************************************************** +* Diagnostic Buffer Post / Diagnostic Release values +****************************************************************************/ + +#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0) + +/**************************************************************************** +* RAID Accelerator values +****************************************************************************/ + +#define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0) + +/**************************************************************************** +* IOCStatus flag to indicate that log info is available +****************************************************************************/ + +#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000) + +/**************************************************************************** +* IOCLogInfo Types +****************************************************************************/ + +#define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000) +#define MPI2_IOCLOGINFO_TYPE_SHIFT (28) +#define MPI2_IOCLOGINFO_TYPE_NONE (0x0) +#define MPI2_IOCLOGINFO_TYPE_SCSI (0x1) +#define MPI2_IOCLOGINFO_TYPE_FC (0x2) +#define MPI2_IOCLOGINFO_TYPE_SAS (0x3) +#define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4) +#define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF) + +/***************************************************************************** +* +* Standard Message Structures +* +*****************************************************************************/ + +/**************************************************************************** +*Request Message Header for all request messages +****************************************************************************/ + +typedef struct _MPI2_REQUEST_HEADER { + U16 FunctionDependent1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 FunctionDependent2; /*0x04 */ + U8 FunctionDependent3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ +} MPI2_REQUEST_HEADER, *PTR_MPI2_REQUEST_HEADER, + MPI2RequestHeader_t, *pMPI2RequestHeader_t; + +/**************************************************************************** +* Default Reply +****************************************************************************/ + +typedef struct _MPI2_DEFAULT_REPLY { + U16 FunctionDependent1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 FunctionDependent2; /*0x04 */ + U8 FunctionDependent3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U16 FunctionDependent5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_DEFAULT_REPLY, *PTR_MPI2_DEFAULT_REPLY, + MPI2DefaultReply_t, *pMPI2DefaultReply_t; + +/*common version structure/union used in messages and configuration pages */ + +typedef struct _MPI2_VERSION_STRUCT { + U8 Dev; /*0x00 */ + U8 Unit; /*0x01 */ + U8 Minor; /*0x02 */ + U8 Major; /*0x03 */ +} MPI2_VERSION_STRUCT; + +typedef union _MPI2_VERSION_UNION { + MPI2_VERSION_STRUCT Struct; + U32 Word; +} MPI2_VERSION_UNION; + +/*LUN field defines, common to many structures */ +#define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF) +#define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000) +#define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF) +#define MPI2_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000) +#define MPI2_LUN_LEVEL_1_WORD (0xFF00) +#define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00) + +/***************************************************************************** +* +* Fusion-MPT MPI Scatter Gather Elements +* +*****************************************************************************/ + +/**************************************************************************** +* MPI Simple Element structures +****************************************************************************/ + +typedef struct _MPI2_SGE_SIMPLE32 { + U32 FlagsLength; + U32 Address; +} MPI2_SGE_SIMPLE32, *PTR_MPI2_SGE_SIMPLE32, + Mpi2SGESimple32_t, *pMpi2SGESimple32_t; + +typedef struct _MPI2_SGE_SIMPLE64 { + U32 FlagsLength; + U64 Address; +} MPI2_SGE_SIMPLE64, *PTR_MPI2_SGE_SIMPLE64, + Mpi2SGESimple64_t, *pMpi2SGESimple64_t; + +typedef struct _MPI2_SGE_SIMPLE_UNION { + U32 FlagsLength; + union { + U32 Address32; + U64 Address64; + } u; +} MPI2_SGE_SIMPLE_UNION, + *PTR_MPI2_SGE_SIMPLE_UNION, + Mpi2SGESimpleUnion_t, + *pMpi2SGESimpleUnion_t; + +/**************************************************************************** +* MPI Chain Element structures - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_CHAIN32 { + U16 Length; + U8 NextChainOffset; + U8 Flags; + U32 Address; +} MPI2_SGE_CHAIN32, *PTR_MPI2_SGE_CHAIN32, + Mpi2SGEChain32_t, *pMpi2SGEChain32_t; + +typedef struct _MPI2_SGE_CHAIN64 { + U16 Length; + U8 NextChainOffset; + U8 Flags; + U64 Address; +} MPI2_SGE_CHAIN64, *PTR_MPI2_SGE_CHAIN64, + Mpi2SGEChain64_t, *pMpi2SGEChain64_t; + +typedef struct _MPI2_SGE_CHAIN_UNION { + U16 Length; + U8 NextChainOffset; + U8 Flags; + union { + U32 Address32; + U64 Address64; + } u; +} MPI2_SGE_CHAIN_UNION, + *PTR_MPI2_SGE_CHAIN_UNION, + Mpi2SGEChainUnion_t, + *pMpi2SGEChainUnion_t; + +/**************************************************************************** +* MPI Transaction Context Element structures - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_TRANSACTION32 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[1]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION32, + *PTR_MPI2_SGE_TRANSACTION32, + Mpi2SGETransaction32_t, + *pMpi2SGETransaction32_t; + +typedef struct _MPI2_SGE_TRANSACTION64 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[2]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION64, + *PTR_MPI2_SGE_TRANSACTION64, + Mpi2SGETransaction64_t, + *pMpi2SGETransaction64_t; + +typedef struct _MPI2_SGE_TRANSACTION96 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[3]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION96, *PTR_MPI2_SGE_TRANSACTION96, + Mpi2SGETransaction96_t, *pMpi2SGETransaction96_t; + +typedef struct _MPI2_SGE_TRANSACTION128 { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + U32 TransactionContext[4]; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION128, *PTR_MPI2_SGE_TRANSACTION128, + Mpi2SGETransaction_t128, *pMpi2SGETransaction_t128; + +typedef struct _MPI2_SGE_TRANSACTION_UNION { + U8 Reserved; + U8 ContextSize; + U8 DetailsLength; + U8 Flags; + union { + U32 TransactionContext32[1]; + U32 TransactionContext64[2]; + U32 TransactionContext96[3]; + U32 TransactionContext128[4]; + } u; + U32 TransactionDetails[1]; +} MPI2_SGE_TRANSACTION_UNION, + *PTR_MPI2_SGE_TRANSACTION_UNION, + Mpi2SGETransactionUnion_t, + *pMpi2SGETransactionUnion_t; + +/**************************************************************************** +* MPI SGE union for IO SGL's - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_MPI_SGE_IO_UNION { + union { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_CHAIN_UNION Chain; + } u; +} MPI2_MPI_SGE_IO_UNION, *PTR_MPI2_MPI_SGE_IO_UNION, + Mpi2MpiSGEIOUnion_t, *pMpi2MpiSGEIOUnion_t; + +/**************************************************************************** +* MPI SGE union for SGL's with Simple and Transaction elements - for MPI v2.0 products only +****************************************************************************/ + +typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION { + union { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_TRANSACTION_UNION Transaction; + } u; +} MPI2_SGE_TRANS_SIMPLE_UNION, + *PTR_MPI2_SGE_TRANS_SIMPLE_UNION, + Mpi2SGETransSimpleUnion_t, + *pMpi2SGETransSimpleUnion_t; + +/**************************************************************************** +* All MPI SGE types union +****************************************************************************/ + +typedef struct _MPI2_MPI_SGE_UNION { + union { + MPI2_SGE_SIMPLE_UNION Simple; + MPI2_SGE_CHAIN_UNION Chain; + MPI2_SGE_TRANSACTION_UNION Transaction; + } u; +} MPI2_MPI_SGE_UNION, *PTR_MPI2_MPI_SGE_UNION, + Mpi2MpiSgeUnion_t, *pMpi2MpiSgeUnion_t; + +/**************************************************************************** +* MPI SGE field definition and masks +****************************************************************************/ + +/*Flags field bit definitions */ + +#define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80) +#define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40) +#define MPI2_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30) +#define MPI2_SGE_FLAGS_LOCAL_ADDRESS (0x08) +#define MPI2_SGE_FLAGS_DIRECTION (0x04) +#define MPI2_SGE_FLAGS_ADDRESS_SIZE (0x02) +#define MPI2_SGE_FLAGS_END_OF_LIST (0x01) + +#define MPI2_SGE_FLAGS_SHIFT (24) + +#define MPI2_SGE_LENGTH_MASK (0x00FFFFFF) +#define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF) + +/*Element Type */ + +#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00) +#define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10) +#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30) +#define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30) + +/*Address location */ + +#define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00) + +/*Direction */ + +#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00) +#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04) + +#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST) +#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC) + +/*Address Size */ + +#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00) +#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02) + +/*Context Size */ + +#define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00) +#define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02) +#define MPI2_SGE_FLAGS_96_BIT_CONTEXT (0x04) +#define MPI2_SGE_FLAGS_128_BIT_CONTEXT (0x06) + +#define MPI2_SGE_CHAIN_OFFSET_MASK (0x00FF0000) +#define MPI2_SGE_CHAIN_OFFSET_SHIFT (16) + +/**************************************************************************** +* MPI SGE operation Macros +****************************************************************************/ + +/*SIMPLE FlagsLength manipulations... */ +#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT) +#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> \ + MPI2_SGE_FLAGS_SHIFT) +#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK) +#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK) + +#define MPI2_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_SGE_SET_FLAGS(f) | \ + MPI2_SGE_LENGTH(l)) + +#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength) +#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength) +#define MPI2_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \ + MPI2_SGE_SET_FLAGS_LENGTH(f, l)) + +/*CAUTION - The following are READ-MODIFY-WRITE! */ +#define MPI2_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \ + MPI2_SGE_SET_FLAGS(f)) +#define MPI2_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \ + MPI2_SGE_LENGTH(l)) + +#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> \ + MPI2_SGE_CHAIN_OFFSET_SHIFT) + +/***************************************************************************** +* +* Fusion-MPT IEEE Scatter Gather Elements +* +*****************************************************************************/ + +/**************************************************************************** +* IEEE Simple Element structures +****************************************************************************/ + +/*MPI2_IEEE_SGE_SIMPLE32 is for MPI v2.0 products only */ +typedef struct _MPI2_IEEE_SGE_SIMPLE32 { + U32 Address; + U32 FlagsLength; +} MPI2_IEEE_SGE_SIMPLE32, *PTR_MPI2_IEEE_SGE_SIMPLE32, + Mpi2IeeeSgeSimple32_t, *pMpi2IeeeSgeSimple32_t; + +typedef struct _MPI2_IEEE_SGE_SIMPLE64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 Reserved2; + U8 Flags; +} MPI2_IEEE_SGE_SIMPLE64, *PTR_MPI2_IEEE_SGE_SIMPLE64, + Mpi2IeeeSgeSimple64_t, *pMpi2IeeeSgeSimple64_t; + +typedef union _MPI2_IEEE_SGE_SIMPLE_UNION { + MPI2_IEEE_SGE_SIMPLE32 Simple32; + MPI2_IEEE_SGE_SIMPLE64 Simple64; +} MPI2_IEEE_SGE_SIMPLE_UNION, + *PTR_MPI2_IEEE_SGE_SIMPLE_UNION, + Mpi2IeeeSgeSimpleUnion_t, + *pMpi2IeeeSgeSimpleUnion_t; + +/**************************************************************************** +* IEEE Chain Element structures +****************************************************************************/ + +/*MPI2_IEEE_SGE_CHAIN32 is for MPI v2.0 products only */ +typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32; + +/*MPI2_IEEE_SGE_CHAIN64 is for MPI v2.0 products only */ +typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64; + +typedef union _MPI2_IEEE_SGE_CHAIN_UNION { + MPI2_IEEE_SGE_CHAIN32 Chain32; + MPI2_IEEE_SGE_CHAIN64 Chain64; +} MPI2_IEEE_SGE_CHAIN_UNION, + *PTR_MPI2_IEEE_SGE_CHAIN_UNION, + Mpi2IeeeSgeChainUnion_t, + *pMpi2IeeeSgeChainUnion_t; + +/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 products only */ +typedef struct _MPI25_IEEE_SGE_CHAIN64 { + U64 Address; + U32 Length; + U16 Reserved1; + U8 NextChainOffset; + U8 Flags; +} MPI25_IEEE_SGE_CHAIN64, + *PTR_MPI25_IEEE_SGE_CHAIN64, + Mpi25IeeeSgeChain64_t, + *pMpi25IeeeSgeChain64_t; + +/**************************************************************************** +* All IEEE SGE types union +****************************************************************************/ + +/*MPI2_IEEE_SGE_UNION is for MPI v2.0 products only */ +typedef struct _MPI2_IEEE_SGE_UNION { + union { + MPI2_IEEE_SGE_SIMPLE_UNION Simple; + MPI2_IEEE_SGE_CHAIN_UNION Chain; + } u; +} MPI2_IEEE_SGE_UNION, *PTR_MPI2_IEEE_SGE_UNION, + Mpi2IeeeSgeUnion_t, *pMpi2IeeeSgeUnion_t; + +/**************************************************************************** +* IEEE SGE union for IO SGL's +****************************************************************************/ + +typedef union _MPI25_SGE_IO_UNION { + MPI2_IEEE_SGE_SIMPLE64 IeeeSimple; + MPI25_IEEE_SGE_CHAIN64 IeeeChain; +} MPI25_SGE_IO_UNION, *PTR_MPI25_SGE_IO_UNION, + Mpi25SGEIOUnion_t, *pMpi25SGEIOUnion_t; + +/**************************************************************************** +* IEEE SGE field definitions and masks +****************************************************************************/ + +/*Flags field bit definitions */ + +#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80) +#define MPI25_IEEE_SGE_FLAGS_END_OF_LIST (0x40) + +#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24) + +#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF) + +/*Element Type */ + +#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00) +#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80) + +/*Data Location Address Space */ + +#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03) +#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00) +#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01) +#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02) +#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03) +#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03) +#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \ + (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) + +/**************************************************************************** +* IEEE SGE operation Macros +****************************************************************************/ + +/*SIMPLE FlagsLength manipulations... */ +#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT) +#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) \ + >> MPI2_IEEE32_SGE_FLAGS_SHIFT) +#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK) + +#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) |\ + MPI2_IEEE32_SGE_LENGTH(l)) + +#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) \ + MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength) +#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) \ + MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength) +#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \ + MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l)) + +/*CAUTION - The following are READ-MODIFY-WRITE! */ +#define MPI2_IEEE32_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \ + MPI2_IEEE32_SGE_SET_FLAGS(f)) +#define MPI2_IEEE32_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \ + MPI2_IEEE32_SGE_LENGTH(l)) + +/***************************************************************************** +* +* Fusion-MPT MPI/IEEE Scatter Gather Unions +* +*****************************************************************************/ + +typedef union _MPI2_SIMPLE_SGE_UNION { + MPI2_SGE_SIMPLE_UNION MpiSimple; + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; +} MPI2_SIMPLE_SGE_UNION, *PTR_MPI2_SIMPLE_SGE_UNION, + Mpi2SimpleSgeUntion_t, *pMpi2SimpleSgeUntion_t; + +typedef union _MPI2_SGE_IO_UNION { + MPI2_SGE_SIMPLE_UNION MpiSimple; + MPI2_SGE_CHAIN_UNION MpiChain; + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; +} MPI2_SGE_IO_UNION, *PTR_MPI2_SGE_IO_UNION, + Mpi2SGEIOUnion_t, *pMpi2SGEIOUnion_t; + +/**************************************************************************** +* +* Values for SGLFlags field, used in many request messages with an SGL +* +****************************************************************************/ + +/*values for MPI SGL Data Location Address Space subfield */ +#define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C) +#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00) +#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04) +#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08) +#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C) +/*values for SGL Type subfield */ +#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03) +#define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00) +#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01) +#define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02) + +#endif diff --git a/addons/mpt3sas/src/4.4.180/mpi/mpi2_cnfg.h b/addons/mpt3sas/src/4.4.180/mpi/mpi2_cnfg.h new file mode 100644 index 00000000..581fdb37 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpi/mpi2_cnfg.h @@ -0,0 +1,3380 @@ +/* + * Copyright (c) 2000-2014 LSI Corporation. + * + * + * Name: mpi2_cnfg.h + * Title: MPI Configuration messages and pages + * Creation Date: November 10, 2006 + * + * mpi2_cnfg.h Version: 02.00.29 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags. + * Added Manufacturing Page 11. + * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE + * define. + * 06-26-07 02.00.02 Adding generic structure for product-specific + * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS. + * Rework of BIOS Page 2 configuration page. + * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the + * forms. + * Added configuration pages IOC Page 8 and Driver + * Persistent Mapping Page 0. + * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated + * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1, + * RAID Physical Disk Pages 0 and 1, RAID Configuration + * Page 0). + * Added new value for AccessStatus field of SAS Device + * Page 0 (_SATA_NEEDS_INITIALIZATION). + * 10-31-07 02.00.04 Added missing SEPDevHandle field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for + * NVDATA. + * Modified IOC Page 7 to use masks and added field for + * SASBroadcastPrimitiveMasks. + * Added MPI2_CONFIG_PAGE_BIOS_4. + * Added MPI2_CONFIG_PAGE_LOG_0. + * 02-29-08 02.00.06 Modified various names to make them 32-character unique. + * Added SAS Device IDs. + * Updated Integrated RAID configuration pages including + * Manufacturing Page 4, IOC Page 6, and RAID Configuration + * Page 0. + * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA. + * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION. + * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING. + * Added missing MaxNumRoutedSasAddresses field to + * MPI2_CONFIG_PAGE_EXPANDER_0. + * Added SAS Port Page 0. + * Modified structure layout for + * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0. + * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use + * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array. + * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF + * to 0x000000FF. + * Added two new values for the Physical Disk Coercion Size + * bits in the Flags field of Manufacturing Page 4. + * Added product-specific Manufacturing pages 16 to 31. + * Modified Flags bits for controlling write cache on SATA + * drives in IO Unit Page 1. + * Added new bit to AdditionalControlFlags of SAS IO Unit + * Page 1 to control Invalid Topology Correction. + * Added additional defines for RAID Volume Page 0 + * VolumeStatusFlags field. + * Modified meaning of RAID Volume Page 0 VolumeSettings + * define for auto-configure of hot-swap drives. + * Added SupportedPhysDisks field to RAID Volume Page 1 and + * added related defines. + * Added PhysDiskAttributes field (and related defines) to + * RAID Physical Disk Page 0. + * Added MPI2_SAS_PHYINFO_PHY_VACANT define. + * Added three new DiscoveryStatus bits for SAS IO Unit + * Page 0 and SAS Expander Page 0. + * Removed multiplexing information from SAS IO Unit pages. + * Added BootDeviceWaitTime field to SAS IO Unit Page 4. + * Removed Zone Address Resolved bit from PhyInfo and from + * Expander Page 0 Flags field. + * Added two new AccessStatus values to SAS Device Page 0 + * for indicating routing problems. Added 3 reserved words + * to this page. + * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3. + * Inserted missing reserved field into structure for IOC + * Page 6. + * Added more pending task bits to RAID Volume Page 0 + * VolumeStatusFlags defines. + * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define. + * Added a new DiscoveryStatus bit for SAS IO Unit Page 0 + * and SAS Expander Page 0 to flag a downstream initiator + * when in simplified routing mode. + * Removed SATA Init Failure defines for DiscoveryStatus + * fields of SAS IO Unit Page 0 and SAS Expander Page 0. + * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define. + * Added PortGroups, DmaGroup, and ControlGroup fields to + * SAS Device Page 0. + * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO + * Unit Page 6. + * Added expander reduced functionality data to SAS + * Expander Page 0. + * Added SAS PHY Page 2 and SAS PHY Page 3. + * 07-30-09 02.00.12 Added IO Unit Page 7. + * Added new device ids. + * Added SAS IO Unit Page 5. + * Added partial and slumber power management capable flags + * to SAS Device Page 0 Flags field. + * Added PhyInfo defines for power condition. + * Added Ethernet configuration pages. + * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY. + * Added SAS PHY Page 4 structure and defines. + * 02-10-10 02.00.14 Modified the comments for the configuration page + * structures that contain an array of data. The host + * should use the "count" field in the page data (e.g. the + * NumPhys field) to determine the number of valid elements + * in the array. + * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines. + * Added PowerManagementCapabilities to IO Unit Page 7. + * Added PortWidthModGroup field to + * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines. + * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines. + * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT + * define. + * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define. + * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define. + * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing) + * defines. + * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to + * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for + * the Pinout field. + * Added BoardTemperature and BoardTemperatureUnits fields + * to MPI2_CONFIG_PAGE_IO_UNIT_7. + * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define + * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure. + * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST. + * Added IO Unit Page 8, IO Unit Page 9, + * and IO Unit Page 10. + * Added SASNotifyPrimitiveMasks field to + * MPI2_CONFIG_PAGE_IOC_7. + * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec). + * 05-25-11 02.00.20 Cleaned up a few comments. + * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities + * for PCIe link as obsolete. + * Added SpinupFlags field containing a Disable Spin-up bit + * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO + * Unit Page 4. + * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT. + * Added UEFIVersion field to BIOS Page 1 and defined new + * BiosOptions bits. + * Incorporating additions for MPI v2.5. + * 11-27-12 02.00.23 Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER. + * Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID. + * 12-20-12 02.00.24 Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as + * obsolete for MPI v2.5 and later. + * Added some defines for 12G SAS speeds. + * 04-09-13 02.00.25 Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK. + * Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to + * match the specification. + * 08-19-13 02.00.26 Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for + * future use. + * 12-05-13 02.00.27 Added MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL for + * MPI2_CONFIG_PAGE_MAN_7. + * Added EnclosureLevel and ConnectorName fields to + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_DEV_0. + * Added EnclosureLevel field to + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * Added MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID for + * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0. + * 01-08-14 02.00.28 Added more defines for the BiosOptions field of + * MPI2_CONFIG_PAGE_BIOS_1. + * 06-13-14 02.00.29 Added SSUTimeout field to MPI2_CONFIG_PAGE_BIOS_1, and + * more defines for the BiosOptions field.. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_CNFG_H +#define MPI2_CNFG_H + +/***************************************************************************** +* Configuration Page Header and defines +*****************************************************************************/ + +/*Config Page Header */ +typedef struct _MPI2_CONFIG_PAGE_HEADER { + U8 PageVersion; /*0x00 */ + U8 PageLength; /*0x01 */ + U8 PageNumber; /*0x02 */ + U8 PageType; /*0x03 */ +} MPI2_CONFIG_PAGE_HEADER, *PTR_MPI2_CONFIG_PAGE_HEADER, + Mpi2ConfigPageHeader_t, *pMpi2ConfigPageHeader_t; + +typedef union _MPI2_CONFIG_PAGE_HEADER_UNION { + MPI2_CONFIG_PAGE_HEADER Struct; + U8 Bytes[4]; + U16 Word16[2]; + U32 Word32; +} MPI2_CONFIG_PAGE_HEADER_UNION, *PTR_MPI2_CONFIG_PAGE_HEADER_UNION, + Mpi2ConfigPageHeaderUnion, *pMpi2ConfigPageHeaderUnion; + +/*Extended Config Page Header */ +typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER { + U8 PageVersion; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 PageNumber; /*0x02 */ + U8 PageType; /*0x03 */ + U16 ExtPageLength; /*0x04 */ + U8 ExtPageType; /*0x06 */ + U8 Reserved2; /*0x07 */ +} MPI2_CONFIG_EXTENDED_PAGE_HEADER, + *PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER, + Mpi2ConfigExtendedPageHeader_t, + *pMpi2ConfigExtendedPageHeader_t; + +typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION { + MPI2_CONFIG_PAGE_HEADER Struct; + MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext; + U8 Bytes[8]; + U16 Word16[4]; + U32 Word32[2]; +} MPI2_CONFIG_EXT_PAGE_HEADER_UNION, + *PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION, + Mpi2ConfigPageExtendedHeaderUnion, + *pMpi2ConfigPageExtendedHeaderUnion; + + +/*PageType field values */ +#define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00) +#define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10) +#define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20) +#define MPI2_CONFIG_PAGEATTR_MASK (0xF0) + +#define MPI2_CONFIG_PAGETYPE_IO_UNIT (0x00) +#define MPI2_CONFIG_PAGETYPE_IOC (0x01) +#define MPI2_CONFIG_PAGETYPE_BIOS (0x02) +#define MPI2_CONFIG_PAGETYPE_RAID_VOLUME (0x08) +#define MPI2_CONFIG_PAGETYPE_MANUFACTURING (0x09) +#define MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A) +#define MPI2_CONFIG_PAGETYPE_EXTENDED (0x0F) +#define MPI2_CONFIG_PAGETYPE_MASK (0x0F) + +#define MPI2_CONFIG_TYPENUM_MASK (0x0FFF) + + +/*ExtPageType field values */ +#define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_PHY (0x13) +#define MPI2_CONFIG_EXTPAGETYPE_LOG (0x14) +#define MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15) +#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16) +#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17) +#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18) +#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19) +#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A) + + +/***************************************************************************** +* PageAddress defines +*****************************************************************************/ + +/*RAID Volume PageAddress format */ +#define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000) +#define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000) + +#define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF) + + +/*RAID Physical Disk PageAddress format */ +#define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000) +#define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000) +#define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000) +#define MPI2_PHYSDISK_PGAD_FORM_DEVHANDLE (0x20000000) + +#define MPI2_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF) +#define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF) + + +/*SAS Expander PageAddress format */ +#define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000) +#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000) + +#define MPI2_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF) +#define MPI2_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000) +#define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16) + + +/*SAS Device PageAddress format */ +#define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000) + +#define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF) + + +/*SAS PHY PageAddress format */ +#define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000) +#define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000) + +#define MPI2_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF) +#define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF) + + +/*SAS Port PageAddress format */ +#define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000) +#define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000) + +#define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF) + + +/*SAS Enclosure PageAddress format */ +#define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000) +#define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000) +#define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000) + +#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF) + + +/*RAID Configuration PageAddress format */ +#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000) +#define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000) +#define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000) +#define MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG (0x20000000) + +#define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF) + + +/*Driver Persistent Mapping PageAddress format */ +#define MPI2_DPM_PGAD_FORM_MASK (0xF0000000) +#define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000) + +#define MPI2_DPM_PGAD_ENTRY_COUNT_MASK (0x0FFF0000) +#define MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT (16) +#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF) + + +/*Ethernet PageAddress format */ +#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000) +#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000) + +#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF) + + + +/**************************************************************************** +* Configuration messages +****************************************************************************/ + +/*Configuration Request Message */ +typedef struct _MPI2_CONFIG_REQUEST { + U8 Action; /*0x00 */ + U8 SGLFlags; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 ExtPageLength; /*0x04 */ + U8 ExtPageType; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U8 Reserved2; /*0x0C */ + U8 ProxyVF_ID; /*0x0D */ + U16 Reserved4; /*0x0E */ + U32 Reserved3; /*0x10 */ + MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */ + U32 PageAddress; /*0x18 */ + MPI2_SGE_IO_UNION PageBufferSGE; /*0x1C */ +} MPI2_CONFIG_REQUEST, *PTR_MPI2_CONFIG_REQUEST, + Mpi2ConfigRequest_t, *pMpi2ConfigRequest_t; + +/*values for the Action field */ +#define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00) +#define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01) +#define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02) +#define MPI2_CONFIG_ACTION_PAGE_DEFAULT (0x03) +#define MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04) +#define MPI2_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05) +#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06) +#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07) + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + + +/*Config Reply Message */ +typedef struct _MPI2_CONFIG_REPLY { + U8 Action; /*0x00 */ + U8 SGLFlags; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 ExtPageLength; /*0x04 */ + U8 ExtPageType; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U16 Reserved2; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */ +} MPI2_CONFIG_REPLY, *PTR_MPI2_CONFIG_REPLY, + Mpi2ConfigReply_t, *pMpi2ConfigReply_t; + + + +/***************************************************************************** +* +* C o n f i g u r a t i o n P a g e s +* +*****************************************************************************/ + +/**************************************************************************** +* Manufacturing Config pages +****************************************************************************/ + +#define MPI2_MFGPAGE_VENDORID_LSI (0x1000) + +/*MPI v2.0 SAS products */ +#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070) +#define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072) +#define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074) +#define MPI2_MFGPAGE_DEVID_SAS2108_2 (0x0076) +#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077) +#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064) +#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065) + +#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E) + +#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080) +#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081) +#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082) +#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083) +#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084) +#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085) +#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086) +#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087) +#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E) + +/*MPI v2.5 SAS products */ +#define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096) +#define MPI25_MFGPAGE_DEVID_SAS3008 (0x0097) +#define MPI25_MFGPAGE_DEVID_SAS3108_1 (0x0090) +#define MPI25_MFGPAGE_DEVID_SAS3108_2 (0x0091) +#define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094) +#define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095) + + + + +/*Manufacturing Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 ChipName[16]; /*0x04 */ + U8 ChipRevision[8]; /*0x14 */ + U8 BoardName[16]; /*0x1C */ + U8 BoardAssembly[16]; /*0x2C */ + U8 BoardTracerNumber[16]; /*0x3C */ +} MPI2_CONFIG_PAGE_MAN_0, + *PTR_MPI2_CONFIG_PAGE_MAN_0, + Mpi2ManufacturingPage0_t, + *pMpi2ManufacturingPage0_t; + +#define MPI2_MANUFACTURING0_PAGEVERSION (0x00) + + +/*Manufacturing Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 VPD[256]; /*0x04 */ +} MPI2_CONFIG_PAGE_MAN_1, + *PTR_MPI2_CONFIG_PAGE_MAN_1, + Mpi2ManufacturingPage1_t, + *pMpi2ManufacturingPage1_t; + +#define MPI2_MANUFACTURING1_PAGEVERSION (0x00) + + +typedef struct _MPI2_CHIP_REVISION_ID { + U16 DeviceID; /*0x00 */ + U8 PCIRevisionID; /*0x02 */ + U8 Reserved; /*0x03 */ +} MPI2_CHIP_REVISION_ID, *PTR_MPI2_CHIP_REVISION_ID, + Mpi2ChipRevisionId_t, *pMpi2ChipRevisionId_t; + + +/*Manufacturing Page 2 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check Header.PageLength at runtime. + */ +#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS +#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_2 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + MPI2_CHIP_REVISION_ID ChipId; /*0x04 */ + U32 + HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/*0x08 */ +} MPI2_CONFIG_PAGE_MAN_2, + *PTR_MPI2_CONFIG_PAGE_MAN_2, + Mpi2ManufacturingPage2_t, + *pMpi2ManufacturingPage2_t; + +#define MPI2_MANUFACTURING2_PAGEVERSION (0x00) + + +/*Manufacturing Page 3 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check Header.PageLength at runtime. + */ +#ifndef MPI2_MAN_PAGE_3_INFO_WORDS +#define MPI2_MAN_PAGE_3_INFO_WORDS (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_3 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + MPI2_CHIP_REVISION_ID ChipId; /*0x04 */ + U32 + Info[MPI2_MAN_PAGE_3_INFO_WORDS];/*0x08 */ +} MPI2_CONFIG_PAGE_MAN_3, + *PTR_MPI2_CONFIG_PAGE_MAN_3, + Mpi2ManufacturingPage3_t, + *pMpi2ManufacturingPage3_t; + +#define MPI2_MANUFACTURING3_PAGEVERSION (0x00) + + +/*Manufacturing Page 4 */ + +typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS { + U8 PowerSaveFlags; /*0x00 */ + U8 InternalOperationsSleepTime; /*0x01 */ + U8 InternalOperationsRunTime; /*0x02 */ + U8 HostIdleTime; /*0x03 */ +} MPI2_MANPAGE4_PWR_SAVE_SETTINGS, + *PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS, + Mpi2ManPage4PwrSaveSettings_t, + *pMpi2ManPage4PwrSaveSettings_t; + +/*defines for the PowerSaveFlags field */ +#define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03) +#define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00) +#define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01) +#define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02) + +typedef struct _MPI2_CONFIG_PAGE_MAN_4 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Flags; /*0x08 */ + U8 InquirySize; /*0x0C */ + U8 Reserved2; /*0x0D */ + U16 Reserved3; /*0x0E */ + U8 InquiryData[56]; /*0x10 */ + U32 RAID0VolumeSettings; /*0x48 */ + U32 RAID1EVolumeSettings; /*0x4C */ + U32 RAID1VolumeSettings; /*0x50 */ + U32 RAID10VolumeSettings; /*0x54 */ + U32 Reserved4; /*0x58 */ + U32 Reserved5; /*0x5C */ + MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /*0x60 */ + U8 MaxOCEDisks; /*0x64 */ + U8 ResyncRate; /*0x65 */ + U16 DataScrubDuration; /*0x66 */ + U8 MaxHotSpares; /*0x68 */ + U8 MaxPhysDisksPerVol; /*0x69 */ + U8 MaxPhysDisks; /*0x6A */ + U8 MaxVolumes; /*0x6B */ +} MPI2_CONFIG_PAGE_MAN_4, + *PTR_MPI2_CONFIG_PAGE_MAN_4, + Mpi2ManufacturingPage4_t, + *pMpi2ManufacturingPage4_t; + +#define MPI2_MANUFACTURING4_PAGEVERSION (0x0A) + +/*Manufacturing Page 4 Flags field */ +#define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000) +#define MPI2_MANPAGE4_METADATA_512MB (0x00000000) + +#define MPI2_MANPAGE4_MIX_SSD_SAS_SATA (0x00008000) +#define MPI2_MANPAGE4_MIX_SSD_AND_NON_SSD (0x00004000) +#define MPI2_MANPAGE4_HIDE_PHYSDISK_NON_IR (0x00002000) + +#define MPI2_MANPAGE4_MASK_PHYSDISK_COERCION (0x00001C00) +#define MPI2_MANPAGE4_PHYSDISK_COERCION_1GB (0x00000000) +#define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION (0x00000400) +#define MPI2_MANPAGE4_PHYSDISK_ADAPTIVE_COERCION (0x00000800) +#define MPI2_MANPAGE4_PHYSDISK_ZERO_COERCION (0x00000C00) + +#define MPI2_MANPAGE4_MASK_BAD_BLOCK_MARKING (0x00000300) +#define MPI2_MANPAGE4_DEFAULT_BAD_BLOCK_MARKING (0x00000000) +#define MPI2_MANPAGE4_TABLE_BAD_BLOCK_MARKING (0x00000100) +#define MPI2_MANPAGE4_WRITE_LONG_BAD_BLOCK_MARKING (0x00000200) + +#define MPI2_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x00000080) +#define MPI2_MANPAGE4_RAID10_DISABLE (0x00000040) +#define MPI2_MANPAGE4_RAID1E_DISABLE (0x00000020) +#define MPI2_MANPAGE4_RAID1_DISABLE (0x00000010) +#define MPI2_MANPAGE4_RAID0_DISABLE (0x00000008) +#define MPI2_MANPAGE4_IR_MODEPAGE8_DISABLE (0x00000004) +#define MPI2_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x00000002) +#define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001) + + +/*Manufacturing Page 5 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES +#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1) +#endif + +typedef struct _MPI2_MANUFACTURING5_ENTRY { + U64 WWID; /*0x00 */ + U64 DeviceName; /*0x08 */ +} MPI2_MANUFACTURING5_ENTRY, + *PTR_MPI2_MANUFACTURING5_ENTRY, + Mpi2Manufacturing5Entry_t, + *pMpi2Manufacturing5Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_MAN_5 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumPhys; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ + MPI2_MANUFACTURING5_ENTRY + Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/*0x08 */ +} MPI2_CONFIG_PAGE_MAN_5, + *PTR_MPI2_CONFIG_PAGE_MAN_5, + Mpi2ManufacturingPage5_t, + *pMpi2ManufacturingPage5_t; + +#define MPI2_MANUFACTURING5_PAGEVERSION (0x03) + + +/*Manufacturing Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_6 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 ProductSpecificInfo;/*0x04 */ +} MPI2_CONFIG_PAGE_MAN_6, + *PTR_MPI2_CONFIG_PAGE_MAN_6, + Mpi2ManufacturingPage6_t, + *pMpi2ManufacturingPage6_t; + +#define MPI2_MANUFACTURING6_PAGEVERSION (0x00) + + +/*Manufacturing Page 7 */ + +typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO { + U32 Pinout; /*0x00 */ + U8 Connector[16]; /*0x04 */ + U8 Location; /*0x14 */ + U8 ReceptacleID; /*0x15 */ + U16 Slot; /*0x16 */ + U32 Reserved2; /*0x18 */ +} MPI2_MANPAGE7_CONNECTOR_INFO, + *PTR_MPI2_MANPAGE7_CONNECTOR_INFO, + Mpi2ManPage7ConnectorInfo_t, + *pMpi2ManPage7ConnectorInfo_t; + +/*defines for the Pinout field */ +#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00) +#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8) + +#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF) +#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00) +#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01) +#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02) +#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03) +#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04) +#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06) +#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07) +#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08) +#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B) +#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C) +#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D) + +/*defines for the Location field */ +#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01) +#define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02) +#define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04) +#define MPI2_MANPAGE7_LOCATION_SWITCHABLE (0x08) +#define MPI2_MANPAGE7_LOCATION_AUTO (0x10) +#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20) +#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX +#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_MAN_7 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U32 Flags; /*0x0C */ + U8 EnclosureName[16]; /*0x10 */ + U8 NumPhys; /*0x20 */ + U8 Reserved3; /*0x21 */ + U16 Reserved4; /*0x22 */ + MPI2_MANPAGE7_CONNECTOR_INFO + ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /*0x24 */ +} MPI2_CONFIG_PAGE_MAN_7, + *PTR_MPI2_CONFIG_PAGE_MAN_7, + Mpi2ManufacturingPage7_t, + *pMpi2ManufacturingPage7_t; + +#define MPI2_MANUFACTURING7_PAGEVERSION (0x01) + +/*defines for the Flags field */ +#define MPI2_MANPAGE7_FLAG_BASE_ENCLOSURE_LEVEL (0x00000008) +#define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) +#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) + + +/* + *Generic structure to use for product-specific manufacturing pages + *(currently Manufacturing Page 8 through Manufacturing Page 31). + */ + +typedef struct _MPI2_CONFIG_PAGE_MAN_PS { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 ProductSpecificInfo;/*0x04 */ +} MPI2_CONFIG_PAGE_MAN_PS, + *PTR_MPI2_CONFIG_PAGE_MAN_PS, + Mpi2ManufacturingPagePS_t, + *pMpi2ManufacturingPagePS_t; + +#define MPI2_MANUFACTURING8_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING9_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING10_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING11_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING12_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING13_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING14_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING15_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING16_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING17_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING18_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING19_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING20_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING21_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING22_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING23_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING24_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING25_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING26_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING27_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING28_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING29_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING30_PAGEVERSION (0x00) +#define MPI2_MANUFACTURING31_PAGEVERSION (0x00) + + +/**************************************************************************** +* IO Unit Config Pages +****************************************************************************/ + +/*IO Unit Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U64 UniqueValue; /*0x04 */ + MPI2_VERSION_UNION NvdataVersionDefault; /*0x08 */ + MPI2_VERSION_UNION NvdataVersionPersistent; /*0x0A */ +} MPI2_CONFIG_PAGE_IO_UNIT_0, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_0, + Mpi2IOUnitPage0_t, *pMpi2IOUnitPage0_t; + +#define MPI2_IOUNITPAGE0_PAGEVERSION (0x02) + + +/*IO Unit Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Flags; /*0x04 */ +} MPI2_CONFIG_PAGE_IO_UNIT_1, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_1, + Mpi2IOUnitPage1_t, *pMpi2IOUnitPage1_t; + +#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04) + +/*IO Unit Page 1 Flags defines */ +#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000) +#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000) +#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000) +#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800) +#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600) +#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9) +#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000) +#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200) +#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400) +#define MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100) +#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040) +#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020) +#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004) + + +/*IO Unit Page 3 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for GPIOCount at runtime. + */ +#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX +#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 GPIOCount; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U16 + GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/*0x08 */ +} MPI2_CONFIG_PAGE_IO_UNIT_3, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_3, + Mpi2IOUnitPage3_t, *pMpi2IOUnitPage3_t; + +#define MPI2_IOUNITPAGE3_PAGEVERSION (0x01) + +/*defines for IO Unit Page 3 GPIOVal field */ +#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC) +#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2) +#define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000) +#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001) + + +/*IO Unit Page 5 */ + +/* + *Upper layer code (drivers, utilities, etc.) should leave this define set to + *one and check the value returned for NumDmaEngines at runtime. + */ +#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES +#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U64 + RaidAcceleratorBufferBaseAddress; /*0x04 */ + U64 + RaidAcceleratorBufferSize; /*0x0C */ + U64 + RaidAcceleratorControlBaseAddress; /*0x14 */ + U8 RAControlSize; /*0x1C */ + U8 NumDmaEngines; /*0x1D */ + U8 RAMinControlSize; /*0x1E */ + U8 RAMaxControlSize; /*0x1F */ + U32 Reserved1; /*0x20 */ + U32 Reserved2; /*0x24 */ + U32 Reserved3; /*0x28 */ + U32 + DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /*0x2C */ +} MPI2_CONFIG_PAGE_IO_UNIT_5, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_5, + Mpi2IOUnitPage5_t, *pMpi2IOUnitPage5_t; + +#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00) + +/*defines for IO Unit Page 5 DmaEngineCapabilities field */ +#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFFFF0000) +#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16) + +#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008) +#define MPI2_IOUNITPAGE5_DMA_CAP_PARITY_GENERATION (0x0004) +#define MPI2_IOUNITPAGE5_DMA_CAP_HASHING (0x0002) +#define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001) + + +/*IO Unit Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 Flags; /*0x04 */ + U8 RAHostControlSize; /*0x06 */ + U8 Reserved0; /*0x07 */ + U64 + RaidAcceleratorHostControlBaseAddress; /*0x08 */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ + U32 Reserved3; /*0x18 */ +} MPI2_CONFIG_PAGE_IO_UNIT_6, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_6, + Mpi2IOUnitPage6_t, *pMpi2IOUnitPage6_t; + +#define MPI2_IOUNITPAGE6_PAGEVERSION (0x00) + +/*defines for IO Unit Page 6 Flags field */ +#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001) + + +/*IO Unit Page 7 */ + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 CurrentPowerMode; /*0x04 */ + U8 PreviousPowerMode; /*0x05 */ + U8 PCIeWidth; /*0x06 */ + U8 PCIeSpeed; /*0x07 */ + U32 ProcessorState; /*0x08 */ + U32 + PowerManagementCapabilities; /*0x0C */ + U16 IOCTemperature; /*0x10 */ + U8 + IOCTemperatureUnits; /*0x12 */ + U8 IOCSpeed; /*0x13 */ + U16 BoardTemperature; /*0x14 */ + U8 + BoardTemperatureUnits; /*0x16 */ + U8 Reserved3; /*0x17 */ + U32 Reserved4; /* 0x18 */ + U32 Reserved5; /* 0x1C */ + U32 Reserved6; /* 0x20 */ + U32 Reserved7; /* 0x24 */ +} MPI2_CONFIG_PAGE_IO_UNIT_7, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7, + Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t; + +#define MPI2_IOUNITPAGE7_PAGEVERSION (0x04) + +/*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */ +#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0) +#define MPI25_IOUNITPAGE7_PM_INIT_UNAVAILABLE (0x00) +#define MPI25_IOUNITPAGE7_PM_INIT_HOST (0x40) +#define MPI25_IOUNITPAGE7_PM_INIT_IO_UNIT (0x80) +#define MPI25_IOUNITPAGE7_PM_INIT_PCIE_DPA (0xC0) + +#define MPI25_IOUNITPAGE7_PM_MODE_MASK (0x07) +#define MPI25_IOUNITPAGE7_PM_MODE_UNAVAILABLE (0x00) +#define MPI25_IOUNITPAGE7_PM_MODE_UNKNOWN (0x01) +#define MPI25_IOUNITPAGE7_PM_MODE_FULL_POWER (0x04) +#define MPI25_IOUNITPAGE7_PM_MODE_REDUCED_POWER (0x05) +#define MPI25_IOUNITPAGE7_PM_MODE_STANDBY (0x06) + + +/*defines for IO Unit Page 7 PCIeWidth field */ +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04) +#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08) + +/*defines for IO Unit Page 7 PCIeSpeed field */ +#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01) +#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02) + +/*defines for IO Unit Page 7 ProcessorState field */ +#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F) +#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0) + +#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01) +#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02) + +/*defines for IO Unit Page 7 PowerManagementCapabilities field */ +#define MPI25_IOUNITPAGE7_PMCAP_DPA_FULL_PWR_MODE (0x00400000) +#define MPI25_IOUNITPAGE7_PMCAP_DPA_REDUCED_PWR_MODE (0x00200000) +#define MPI25_IOUNITPAGE7_PMCAP_DPA_STANDBY_MODE (0x00100000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_FULL_PWR_MODE (0x00040000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_REDUCED_PWR_MODE (0x00020000) +#define MPI25_IOUNITPAGE7_PMCAP_HOST_STANDBY_MODE (0x00010000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_FULL_PWR_MODE (0x00004000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_REDUCED_PWR_MODE (0x00002000) +#define MPI25_IOUNITPAGE7_PMCAP_IO_STANDBY_MODE (0x00001000) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_12_5_PCT_IOCSPEED (0x00000400) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_25_0_PCT_IOCSPEED (0x00000200) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_50_0_PCT_IOCSPEED (0x00000100) +#define MPI25_IOUNITPAGE7_PMCAP_IO_12_5_PCT_IOCSPEED (0x00000040) +#define MPI25_IOUNITPAGE7_PMCAP_IO_25_0_PCT_IOCSPEED (0x00000020) +#define MPI25_IOUNITPAGE7_PMCAP_IO_50_0_PCT_IOCSPEED (0x00000010) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_WIDTH_CHANGE_PCIE (0x00000008) +#define MPI2_IOUNITPAGE7_PMCAP_HOST_SPEED_CHANGE_PCIE (0x00000004) +#define MPI25_IOUNITPAGE7_PMCAP_IO_WIDTH_CHANGE_PCIE (0x00000002) +#define MPI25_IOUNITPAGE7_PMCAP_IO_SPEED_CHANGE_PCIE (0x00000001) + +/*obsolete names for the PowerManagementCapabilities bits (above) */ +#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400) +#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200) +#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100) +#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /*obsolete */ +#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /*obsolete */ + + +/*defines for IO Unit Page 7 IOCTemperatureUnits field */ +#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01) +#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02) + +/*defines for IO Unit Page 7 IOCSpeed field */ +#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01) +#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02) +#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04) +#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08) + +/*defines for IO Unit Page 7 BoardTemperatureUnits field */ +#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00) +#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01) +#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02) + + +/*IO Unit Page 8 */ + +#define MPI2_IOUNIT8_NUM_THRESHOLDS (4) + +typedef struct _MPI2_IOUNIT8_SENSOR { + U16 Flags; /*0x00 */ + U16 Reserved1; /*0x02 */ + U16 + Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /*0x04 */ + U32 Reserved2; /*0x0C */ + U32 Reserved3; /*0x10 */ + U32 Reserved4; /*0x14 */ +} MPI2_IOUNIT8_SENSOR, *PTR_MPI2_IOUNIT8_SENSOR, + Mpi2IOUnit8Sensor_t, *pMpi2IOUnit8Sensor_t; + +/*defines for IO Unit Page 8 Sensor Flags field */ +#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002) +#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumSensors at runtime. + */ +#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES +#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U8 NumSensors; /*0x0C */ + U8 PollingInterval; /*0x0D */ + U16 Reserved3; /*0x0E */ + MPI2_IOUNIT8_SENSOR + Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/*0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_8, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_8, + Mpi2IOUnitPage8_t, *pMpi2IOUnitPage8_t; + +#define MPI2_IOUNITPAGE8_PAGEVERSION (0x00) + + +/*IO Unit Page 9 */ + +typedef struct _MPI2_IOUNIT9_SENSOR { + U16 CurrentTemperature; /*0x00 */ + U16 Reserved1; /*0x02 */ + U8 Flags; /*0x04 */ + U8 Reserved2; /*0x05 */ + U16 Reserved3; /*0x06 */ + U32 Reserved4; /*0x08 */ + U32 Reserved5; /*0x0C */ +} MPI2_IOUNIT9_SENSOR, *PTR_MPI2_IOUNIT9_SENSOR, + Mpi2IOUnit9Sensor_t, *pMpi2IOUnit9Sensor_t; + +/*defines for IO Unit Page 9 Sensor Flags field */ +#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumSensors at runtime. + */ +#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES +#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U8 NumSensors; /*0x0C */ + U8 Reserved4; /*0x0D */ + U16 Reserved3; /*0x0E */ + MPI2_IOUNIT9_SENSOR + Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/*0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_9, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_9, + Mpi2IOUnitPage9_t, *pMpi2IOUnitPage9_t; + +#define MPI2_IOUNITPAGE9_PAGEVERSION (0x00) + + +/*IO Unit Page 10 */ + +typedef struct _MPI2_IOUNIT10_FUNCTION { + U8 CreditPercent; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ +} MPI2_IOUNIT10_FUNCTION, + *PTR_MPI2_IOUNIT10_FUNCTION, + Mpi2IOUnit10Function_t, + *pMpi2IOUnit10Function_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumFunctions at runtime. + */ +#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES +#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumFunctions; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ + MPI2_IOUNIT10_FUNCTION + Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/*0x10 */ +} MPI2_CONFIG_PAGE_IO_UNIT_10, + *PTR_MPI2_CONFIG_PAGE_IO_UNIT_10, + Mpi2IOUnitPage10_t, *pMpi2IOUnitPage10_t; + +#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01) + + + +/**************************************************************************** +* IOC Config Pages +****************************************************************************/ + +/*IOC Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U16 VendorID; /*0x0C */ + U16 DeviceID; /*0x0E */ + U8 RevisionID; /*0x10 */ + U8 Reserved3; /*0x11 */ + U16 Reserved4; /*0x12 */ + U32 ClassCode; /*0x14 */ + U16 SubsystemVendorID; /*0x18 */ + U16 SubsystemID; /*0x1A */ +} MPI2_CONFIG_PAGE_IOC_0, + *PTR_MPI2_CONFIG_PAGE_IOC_0, + Mpi2IOCPage0_t, *pMpi2IOCPage0_t; + +#define MPI2_IOCPAGE0_PAGEVERSION (0x02) + + +/*IOC Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Flags; /*0x04 */ + U32 CoalescingTimeout; /*0x08 */ + U8 CoalescingDepth; /*0x0C */ + U8 PCISlotNum; /*0x0D */ + U8 PCIBusNum; /*0x0E */ + U8 PCIDomainSegment; /*0x0F */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ +} MPI2_CONFIG_PAGE_IOC_1, + *PTR_MPI2_CONFIG_PAGE_IOC_1, + Mpi2IOCPage1_t, *pMpi2IOCPage1_t; + +#define MPI2_IOCPAGE1_PAGEVERSION (0x05) + +/*defines for IOC Page 1 Flags field */ +#define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001) + +#define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF) +#define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF) +#define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF) + +/*IOC Page 6 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_6 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 + CapabilitiesFlags; /*0x04 */ + U8 MaxDrivesRAID0; /*0x08 */ + U8 MaxDrivesRAID1; /*0x09 */ + U8 + MaxDrivesRAID1E; /*0x0A */ + U8 + MaxDrivesRAID10; /*0x0B */ + U8 MinDrivesRAID0; /*0x0C */ + U8 MinDrivesRAID1; /*0x0D */ + U8 + MinDrivesRAID1E; /*0x0E */ + U8 + MinDrivesRAID10; /*0x0F */ + U32 Reserved1; /*0x10 */ + U8 + MaxGlobalHotSpares; /*0x14 */ + U8 MaxPhysDisks; /*0x15 */ + U8 MaxVolumes; /*0x16 */ + U8 MaxConfigs; /*0x17 */ + U8 MaxOCEDisks; /*0x18 */ + U8 Reserved2; /*0x19 */ + U16 Reserved3; /*0x1A */ + U32 + SupportedStripeSizeMapRAID0; /*0x1C */ + U32 + SupportedStripeSizeMapRAID1E; /*0x20 */ + U32 + SupportedStripeSizeMapRAID10; /*0x24 */ + U32 Reserved4; /*0x28 */ + U32 Reserved5; /*0x2C */ + U16 + DefaultMetadataSize; /*0x30 */ + U16 Reserved6; /*0x32 */ + U16 + MaxBadBlockTableEntries; /*0x34 */ + U16 Reserved7; /*0x36 */ + U32 + IRNvsramVersion; /*0x38 */ +} MPI2_CONFIG_PAGE_IOC_6, + *PTR_MPI2_CONFIG_PAGE_IOC_6, + Mpi2IOCPage6_t, *pMpi2IOCPage6_t; + +#define MPI2_IOCPAGE6_PAGEVERSION (0x05) + +/*defines for IOC Page 6 CapabilitiesFlags */ +#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004) +#define MPI2_IOCPAGE6_CAP_FLAGS_RAID0_SUPPORT (0x00000002) +#define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001) + + +/*IOC Page 7 */ + +#define MPI2_IOCPAGE7_EVENTMASK_WORDS (4) + +typedef struct _MPI2_CONFIG_PAGE_IOC_7 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 + EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/*0x08 */ + U16 SASBroadcastPrimitiveMasks; /*0x18 */ + U16 SASNotifyPrimitiveMasks; /*0x1A */ + U32 Reserved3; /*0x1C */ +} MPI2_CONFIG_PAGE_IOC_7, + *PTR_MPI2_CONFIG_PAGE_IOC_7, + Mpi2IOCPage7_t, *pMpi2IOCPage7_t; + +#define MPI2_IOCPAGE7_PAGEVERSION (0x02) + + +/*IOC Page 8 */ + +typedef struct _MPI2_CONFIG_PAGE_IOC_8 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumDevsPerEnclosure; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U16 MaxPersistentEntries; /*0x08 */ + U16 MaxNumPhysicalMappedIDs; /*0x0A */ + U16 Flags; /*0x0C */ + U16 Reserved3; /*0x0E */ + U16 IRVolumeMappingFlags; /*0x10 */ + U16 Reserved4; /*0x12 */ + U32 Reserved5; /*0x14 */ +} MPI2_CONFIG_PAGE_IOC_8, + *PTR_MPI2_CONFIG_PAGE_IOC_8, + Mpi2IOCPage8_t, *pMpi2IOCPage8_t; + +#define MPI2_IOCPAGE8_PAGEVERSION (0x00) + +/*defines for IOC Page 8 Flags field */ +#define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020) +#define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010) + +#define MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE (0x0000000E) +#define MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING (0x00000000) +#define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING (0x00000002) + +#define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001) +#define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000) + +/*defines for IOC Page 8 IRVolumeMappingFlags */ +#define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003) +#define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000) +#define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001) + + +/**************************************************************************** +* BIOS Config Pages +****************************************************************************/ + +/*BIOS Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_BIOS_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 BiosOptions; /*0x04 */ + U32 IOCSettings; /*0x08 */ + U8 SSUTimeout; /*0x0C */ + U8 Reserved1; /*0x0D */ + U16 Reserved2; /*0x0E */ + U32 DeviceSettings; /*0x10 */ + U16 NumberOfDevices; /*0x14 */ + U16 UEFIVersion; /*0x16 */ + U16 IOTimeoutBlockDevicesNonRM; /*0x18 */ + U16 IOTimeoutSequential; /*0x1A */ + U16 IOTimeoutOther; /*0x1C */ + U16 IOTimeoutBlockDevicesRM; /*0x1E */ +} MPI2_CONFIG_PAGE_BIOS_1, + *PTR_MPI2_CONFIG_PAGE_BIOS_1, + Mpi2BiosPage1_t, *pMpi2BiosPage1_t; + +#define MPI2_BIOSPAGE1_PAGEVERSION (0x07) + +/*values for BIOS Page 1 BiosOptions field */ +#define MPI2_BIOSPAGE1_OPTIONS_PNS_MASK (0x00003800) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_PBDHL (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_ENCSLOSURE (0x00000800) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_LWWID (0x00001000) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_PSENS (0x00001800) +#define MPI2_BIOSPAGE1_OPTIONS_PNS_ESPHY (0x00002000) + +#define MPI2_BIOSPAGE1_OPTIONS_X86_DISABLE_BIOS (0x00000400) + +#define MPI2_BIOSPAGE1_OPTIONS_MASK_REGISTRATION_UEFI_BSD (0x00000300) +#define MPI2_BIOSPAGE1_OPTIONS_USE_BIT0_REGISTRATION_UEFI_BSD (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_FULL_REGISTRATION_UEFI_BSD (0x00000100) +#define MPI2_BIOSPAGE1_OPTIONS_ADAPTER_REGISTRATION_UEFI_BSD (0x00000200) +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_REGISTRATION_UEFI_BSD (0x00000300) + +#define MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID (0x000000F0) +#define MPI2_BIOSPAGE1_OPTIONS_LSI_OEM_ID (0x00000000) + +#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006) +#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000) +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002) +#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004) + +#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001) + +/*values for BIOS Page 1 IOCSettings field */ +#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000) +#define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000) + +#define MPI2_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0) +#define MPI2_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040) +#define MPI2_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080) + +#define MPI2_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030) +#define MPI2_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000) +#define MPI2_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010) +#define MPI2_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020) +#define MPI2_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030) + +#define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008) + +/*values for BIOS Page 1 DeviceSettings field */ +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002) +#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001) + +/*defines for BIOS Page 1 UEFIVersion field */ +#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00) +#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8) +#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF) +#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0) + + + +/*BIOS Page 2 */ + +typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER { + U32 Reserved1; /*0x00 */ + U32 Reserved2; /*0x04 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ + U32 Reserved5; /*0x10 */ + U32 Reserved6; /*0x14 */ +} MPI2_BOOT_DEVICE_ADAPTER_ORDER, + *PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER, + Mpi2BootDeviceAdapterOrder_t, + *pMpi2BootDeviceAdapterOrder_t; + +typedef struct _MPI2_BOOT_DEVICE_SAS_WWID { + U64 SASAddress; /*0x00 */ + U8 LUN[8]; /*0x08 */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ +} MPI2_BOOT_DEVICE_SAS_WWID, + *PTR_MPI2_BOOT_DEVICE_SAS_WWID, + Mpi2BootDeviceSasWwid_t, + *pMpi2BootDeviceSasWwid_t; + +typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT { + U64 EnclosureLogicalID; /*0x00 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ + U16 SlotNumber; /*0x10 */ + U16 Reserved3; /*0x12 */ + U32 Reserved4; /*0x14 */ +} MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, + *PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT, + Mpi2BootDeviceEnclosureSlot_t, + *pMpi2BootDeviceEnclosureSlot_t; + +typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME { + U64 DeviceName; /*0x00 */ + U8 LUN[8]; /*0x08 */ + U32 Reserved1; /*0x10 */ + U32 Reserved2; /*0x14 */ +} MPI2_BOOT_DEVICE_DEVICE_NAME, + *PTR_MPI2_BOOT_DEVICE_DEVICE_NAME, + Mpi2BootDeviceDeviceName_t, + *pMpi2BootDeviceDeviceName_t; + +typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE { + MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder; + MPI2_BOOT_DEVICE_SAS_WWID SasWwid; + MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot; + MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName; +} MPI2_BIOSPAGE2_BOOT_DEVICE, + *PTR_MPI2_BIOSPAGE2_BOOT_DEVICE, + Mpi2BiosPage2BootDevice_t, + *pMpi2BiosPage2BootDevice_t; + +typedef struct _MPI2_CONFIG_PAGE_BIOS_2 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U32 Reserved3; /*0x0C */ + U32 Reserved4; /*0x10 */ + U32 Reserved5; /*0x14 */ + U32 Reserved6; /*0x18 */ + U8 ReqBootDeviceForm; /*0x1C */ + U8 Reserved7; /*0x1D */ + U16 Reserved8; /*0x1E */ + MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /*0x20 */ + U8 ReqAltBootDeviceForm; /*0x38 */ + U8 Reserved9; /*0x39 */ + U16 Reserved10; /*0x3A */ + MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /*0x3C */ + U8 CurrentBootDeviceForm; /*0x58 */ + U8 Reserved11; /*0x59 */ + U16 Reserved12; /*0x5A */ + MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /*0x58 */ +} MPI2_CONFIG_PAGE_BIOS_2, *PTR_MPI2_CONFIG_PAGE_BIOS_2, + Mpi2BiosPage2_t, *pMpi2BiosPage2_t; + +#define MPI2_BIOSPAGE2_PAGEVERSION (0x04) + +/*values for BIOS Page 2 BootDeviceForm fields */ +#define MPI2_BIOSPAGE2_FORM_MASK (0x0F) +#define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00) +#define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05) +#define MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06) +#define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07) + + +/*BIOS Page 3 */ + +typedef struct _MPI2_ADAPTER_INFO { + U8 PciBusNumber; /*0x00 */ + U8 PciDeviceAndFunctionNumber; /*0x01 */ + U16 AdapterFlags; /*0x02 */ +} MPI2_ADAPTER_INFO, *PTR_MPI2_ADAPTER_INFO, + Mpi2AdapterInfo_t, *pMpi2AdapterInfo_t; + +#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001) +#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002) + +typedef struct _MPI2_CONFIG_PAGE_BIOS_3 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U32 GlobalFlags; /*0x04 */ + U32 BiosVersion; /*0x08 */ + MPI2_ADAPTER_INFO AdapterOrder[4]; /*0x0C */ + U32 Reserved1; /*0x1C */ +} MPI2_CONFIG_PAGE_BIOS_3, + *PTR_MPI2_CONFIG_PAGE_BIOS_3, + Mpi2BiosPage3_t, *pMpi2BiosPage3_t; + +#define MPI2_BIOSPAGE3_PAGEVERSION (0x00) + +/*values for BIOS Page 3 GlobalFlags */ +#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002) +#define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004) +#define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010) + +#define MPI2_BIOSPAGE3_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0) +#define MPI2_BIOSPAGE3_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000) +#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DISPLAY (0x00000020) +#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040) + + +/*BIOS Page 4 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES +#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1) +#endif + +typedef struct _MPI2_BIOS4_ENTRY { + U64 ReassignmentWWID; /*0x00 */ + U64 ReassignmentDeviceName; /*0x08 */ +} MPI2_BIOS4_ENTRY, *PTR_MPI2_BIOS4_ENTRY, + Mpi2MBios4Entry_t, *pMpi2Bios4Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_BIOS_4 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumPhys; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + MPI2_BIOS4_ENTRY + Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /*0x08 */ +} MPI2_CONFIG_PAGE_BIOS_4, *PTR_MPI2_CONFIG_PAGE_BIOS_4, + Mpi2BiosPage4_t, *pMpi2BiosPage4_t; + +#define MPI2_BIOSPAGE4_PAGEVERSION (0x01) + + +/**************************************************************************** +* RAID Volume Config Pages +****************************************************************************/ + +/*RAID Volume Page 0 */ + +typedef struct _MPI2_RAIDVOL0_PHYS_DISK { + U8 RAIDSetNum; /*0x00 */ + U8 PhysDiskMap; /*0x01 */ + U8 PhysDiskNum; /*0x02 */ + U8 Reserved; /*0x03 */ +} MPI2_RAIDVOL0_PHYS_DISK, *PTR_MPI2_RAIDVOL0_PHYS_DISK, + Mpi2RaidVol0PhysDisk_t, *pMpi2RaidVol0PhysDisk_t; + +/*defines for the PhysDiskMap field */ +#define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01) +#define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02) + +typedef struct _MPI2_RAIDVOL0_SETTINGS { + U16 Settings; /*0x00 */ + U8 HotSparePool; /*0x01 */ + U8 Reserved; /*0x02 */ +} MPI2_RAIDVOL0_SETTINGS, *PTR_MPI2_RAIDVOL0_SETTINGS, + Mpi2RaidVol0Settings_t, + *pMpi2RaidVol0Settings_t; + +/*RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */ +#define MPI2_RAID_HOT_SPARE_POOL_0 (0x01) +#define MPI2_RAID_HOT_SPARE_POOL_1 (0x02) +#define MPI2_RAID_HOT_SPARE_POOL_2 (0x04) +#define MPI2_RAID_HOT_SPARE_POOL_3 (0x08) +#define MPI2_RAID_HOT_SPARE_POOL_4 (0x10) +#define MPI2_RAID_HOT_SPARE_POOL_5 (0x20) +#define MPI2_RAID_HOT_SPARE_POOL_6 (0x40) +#define MPI2_RAID_HOT_SPARE_POOL_7 (0x80) + +/*RAID Volume Page 0 VolumeSettings defines */ +#define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008) +#define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004) + +#define MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING (0x0003) +#define MPI2_RAIDVOL0_SETTING_UNCHANGED (0x0000) +#define MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING (0x0001) +#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhysDisks at runtime. + */ +#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX +#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x04 */ + U8 VolumeState; /*0x06 */ + U8 VolumeType; /*0x07 */ + U32 VolumeStatusFlags; /*0x08 */ + MPI2_RAIDVOL0_SETTINGS VolumeSettings; /*0x0C */ + U64 MaxLBA; /*0x10 */ + U32 StripeSize; /*0x18 */ + U16 BlockSize; /*0x1C */ + U16 Reserved1; /*0x1E */ + U8 SupportedPhysDisks;/*0x20 */ + U8 ResyncRate; /*0x21 */ + U16 DataScrubDuration; /*0x22 */ + U8 NumPhysDisks; /*0x24 */ + U8 Reserved2; /*0x25 */ + U8 Reserved3; /*0x26 */ + U8 InactiveStatus; /*0x27 */ + MPI2_RAIDVOL0_PHYS_DISK + PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /*0x28 */ +} MPI2_CONFIG_PAGE_RAID_VOL_0, + *PTR_MPI2_CONFIG_PAGE_RAID_VOL_0, + Mpi2RaidVolPage0_t, *pMpi2RaidVolPage0_t; + +#define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A) + +/*values for RAID VolumeState */ +#define MPI2_RAID_VOL_STATE_MISSING (0x00) +#define MPI2_RAID_VOL_STATE_FAILED (0x01) +#define MPI2_RAID_VOL_STATE_INITIALIZING (0x02) +#define MPI2_RAID_VOL_STATE_ONLINE (0x03) +#define MPI2_RAID_VOL_STATE_DEGRADED (0x04) +#define MPI2_RAID_VOL_STATE_OPTIMAL (0x05) + +/*values for RAID VolumeType */ +#define MPI2_RAID_VOL_TYPE_RAID0 (0x00) +#define MPI2_RAID_VOL_TYPE_RAID1E (0x01) +#define MPI2_RAID_VOL_TYPE_RAID1 (0x02) +#define MPI2_RAID_VOL_TYPE_RAID10 (0x05) +#define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF) + +/*values for RAID Volume Page 0 VolumeStatusFlags field */ +#define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000) +#define MPI2_RAIDVOL0_STATUS_FLAG_USER_CONSIST_PENDING (0x00400000) +#define MPI2_RAIDVOL0_STATUS_FLAG_MAKE_DATA_CONSISTENT (0x00200000) +#define MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB (0x00100000) +#define MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK (0x00080000) +#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000) +#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000) +#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000) +#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080) +#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040) +#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020) +#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000) +#define MPI2_RAIDVOL0_STATUS_FLAG_1E_ADJACENT_MIRROR (0x00000010) +#define MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x00000008) +#define MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x00000004) +#define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002) +#define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001) + +/*values for RAID Volume Page 0 SupportedPhysDisks field */ +#define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08) +#define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04) +#define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02) +#define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01) + +/*values for RAID Volume Page 0 InactiveStatus field */ +#define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00) +#define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01) +#define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02) +#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03) +#define MPI2_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04) +#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05) +#define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06) + + +/*RAID Volume Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x04 */ + U16 Reserved0; /*0x06 */ + U8 GUID[24]; /*0x08 */ + U8 Name[16]; /*0x20 */ + U64 WWID; /*0x30 */ + U32 Reserved1; /*0x38 */ + U32 Reserved2; /*0x3C */ +} MPI2_CONFIG_PAGE_RAID_VOL_1, + *PTR_MPI2_CONFIG_PAGE_RAID_VOL_1, + Mpi2RaidVolPage1_t, *pMpi2RaidVolPage1_t; + +#define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03) + + +/**************************************************************************** +* RAID Physical Disk Config Pages +****************************************************************************/ + +/*RAID Physical Disk Page 0 */ + +typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS { + U16 Reserved1; /*0x00 */ + U8 HotSparePool; /*0x02 */ + U8 Reserved2; /*0x03 */ +} MPI2_RAIDPHYSDISK0_SETTINGS, + *PTR_MPI2_RAIDPHYSDISK0_SETTINGS, + Mpi2RaidPhysDisk0Settings_t, + *pMpi2RaidPhysDisk0Settings_t; + +/*use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */ + +typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA { + U8 VendorID[8]; /*0x00 */ + U8 ProductID[16]; /*0x08 */ + U8 ProductRevLevel[4]; /*0x18 */ + U8 SerialNum[32]; /*0x1C */ +} MPI2_RAIDPHYSDISK0_INQUIRY_DATA, + *PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA, + Mpi2RaidPhysDisk0InquiryData_t, + *pMpi2RaidPhysDisk0InquiryData_t; + +typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U16 DevHandle; /*0x04 */ + U8 Reserved1; /*0x06 */ + U8 PhysDiskNum; /*0x07 */ + MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /*0x08 */ + U32 Reserved2; /*0x0C */ + MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /*0x10 */ + U32 Reserved3; /*0x4C */ + U8 PhysDiskState; /*0x50 */ + U8 OfflineReason; /*0x51 */ + U8 IncompatibleReason; /*0x52 */ + U8 PhysDiskAttributes; /*0x53 */ + U32 PhysDiskStatusFlags;/*0x54 */ + U64 DeviceMaxLBA; /*0x58 */ + U64 HostMaxLBA; /*0x60 */ + U64 CoercedMaxLBA; /*0x68 */ + U16 BlockSize; /*0x70 */ + U16 Reserved5; /*0x72 */ + U32 Reserved6; /*0x74 */ +} MPI2_CONFIG_PAGE_RD_PDISK_0, + *PTR_MPI2_CONFIG_PAGE_RD_PDISK_0, + Mpi2RaidPhysDiskPage0_t, + *pMpi2RaidPhysDiskPage0_t; + +#define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05) + +/*PhysDiskState defines */ +#define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00) +#define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01) +#define MPI2_RAID_PD_STATE_OFFLINE (0x02) +#define MPI2_RAID_PD_STATE_ONLINE (0x03) +#define MPI2_RAID_PD_STATE_HOT_SPARE (0x04) +#define MPI2_RAID_PD_STATE_DEGRADED (0x05) +#define MPI2_RAID_PD_STATE_REBUILDING (0x06) +#define MPI2_RAID_PD_STATE_OPTIMAL (0x07) + +/*OfflineReason defines */ +#define MPI2_PHYSDISK0_ONLINE (0x00) +#define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01) +#define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03) +#define MPI2_PHYSDISK0_OFFLINE_INITIALIZING (0x04) +#define MPI2_PHYSDISK0_OFFLINE_REQUESTED (0x05) +#define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06) +#define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF) + +/*IncompatibleReason defines */ +#define MPI2_PHYSDISK0_COMPATIBLE (0x00) +#define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01) +#define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02) +#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03) +#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04) +#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05) +#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06) +#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF) + +/*PhysDiskAttributes defines */ +#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C) +#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08) +#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04) + +#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03) +#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02) +#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01) + +/*PhysDiskStatusFlags defines */ +#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040) +#define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020) +#define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010) +#define MPI2_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00000000) +#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x00000008) +#define MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x00000004) +#define MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED (0x00000002) +#define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001) + + +/*RAID Physical Disk Page 1 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhysDiskPaths at runtime. + */ +#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX +#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1) +#endif + +typedef struct _MPI2_RAIDPHYSDISK1_PATH { + U16 DevHandle; /*0x00 */ + U16 Reserved1; /*0x02 */ + U64 WWID; /*0x04 */ + U64 OwnerWWID; /*0x0C */ + U8 OwnerIdentifier; /*0x14 */ + U8 Reserved2; /*0x15 */ + U16 Flags; /*0x16 */ +} MPI2_RAIDPHYSDISK1_PATH, *PTR_MPI2_RAIDPHYSDISK1_PATH, + Mpi2RaidPhysDisk1Path_t, + *pMpi2RaidPhysDisk1Path_t; + +/*RAID Physical Disk Page 1 Physical Disk Path Flags field defines */ +#define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004) +#define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002) +#define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001) + +typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 { + MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */ + U8 NumPhysDiskPaths; /*0x04 */ + U8 PhysDiskNum; /*0x05 */ + U16 Reserved1; /*0x06 */ + U32 Reserved2; /*0x08 */ + MPI2_RAIDPHYSDISK1_PATH + PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/*0x0C */ +} MPI2_CONFIG_PAGE_RD_PDISK_1, + *PTR_MPI2_CONFIG_PAGE_RD_PDISK_1, + Mpi2RaidPhysDiskPage1_t, + *pMpi2RaidPhysDiskPage1_t; + +#define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02) + + +/**************************************************************************** +* values for fields used by several types of SAS Config Pages +****************************************************************************/ + +/*values for NegotiatedLinkRates fields */ +#define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0) +#define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4) +#define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F) +/*link rates used for Negotiated Physical and Logical Link Rate */ +#define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00) +#define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01) +#define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02) +#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03) +#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04) +#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05) +#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06) +#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08) +#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09) +#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A) +#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B) + + +/*values for AttachedPhyInfo fields */ +#define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040) +#define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020) +#define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010) + +#define MPI2_SAS_APHYINFO_REASON_MASK (0x0000000F) +#define MPI2_SAS_APHYINFO_REASON_UNKNOWN (0x00000000) +#define MPI2_SAS_APHYINFO_REASON_POWER_ON (0x00000001) +#define MPI2_SAS_APHYINFO_REASON_HARD_RESET (0x00000002) +#define MPI2_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003) +#define MPI2_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004) +#define MPI2_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005) +#define MPI2_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006) +#define MPI2_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007) +#define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008) + + +/*values for PhyInfo fields */ +#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000) + +#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000) +#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27) +#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000) +#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000) +#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000) + +#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000) +#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000) +#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000) +#define MPI2_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000) +#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS (0x00200000) +#define MPI2_SAS_PHYINFO_ZONING_ENABLED (0x00100000) + +#define MPI2_SAS_PHYINFO_REASON_MASK (0x000F0000) +#define MPI2_SAS_PHYINFO_REASON_UNKNOWN (0x00000000) +#define MPI2_SAS_PHYINFO_REASON_POWER_ON (0x00010000) +#define MPI2_SAS_PHYINFO_REASON_HARD_RESET (0x00020000) +#define MPI2_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000) +#define MPI2_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000) +#define MPI2_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000) +#define MPI2_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000) +#define MPI2_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000) +#define MPI2_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000) + +#define MPI2_SAS_PHYINFO_MULTIPLEXING_SUPPORTED (0x00008000) +#define MPI2_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000) +#define MPI2_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000) +#define MPI2_SAS_PHYINFO_VIRTUAL_PHY (0x00001000) + +#define MPI2_SAS_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00) +#define MPI2_SAS_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8) + +#define MPI2_SAS_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0) +#define MPI2_SAS_PHYINFO_DIRECT_ROUTING (0x00000000) +#define MPI2_SAS_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010) +#define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020) + + +/*values for SAS ProgrammedLinkRate fields */ +#define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0) +#define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00) +#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80) +#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90) +#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0) +#define MPI25_SAS_PRATE_MAX_RATE_12_0 (0xB0) +#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F) +#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00) +#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08) +#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09) +#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A) +#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B) + + +/*values for SAS HwLinkRate fields */ +#define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0) +#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80) +#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90) +#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0) +#define MPI25_SAS_HWRATE_MAX_RATE_12_0 (0xB0) +#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F) +#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08) +#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09) +#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A) +#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B) + + + +/**************************************************************************** +* SAS IO Unit Config Pages +****************************************************************************/ + +/*SAS IO Unit Page 0 */ + +typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA { + U8 Port; /*0x00 */ + U8 PortFlags; /*0x01 */ + U8 PhyFlags; /*0x02 */ + U8 NegotiatedLinkRate; /*0x03 */ + U32 ControllerPhyDeviceInfo;/*0x04 */ + U16 AttachedDevHandle; /*0x08 */ + U16 ControllerDevHandle; /*0x0A */ + U32 DiscoveryStatus; /*0x0C */ + U32 Reserved; /*0x10 */ +} MPI2_SAS_IO_UNIT0_PHY_DATA, + *PTR_MPI2_SAS_IO_UNIT0_PHY_DATA, + Mpi2SasIOUnit0PhyData_t, + *pMpi2SasIOUnit0PhyData_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT0_PHY_MAX +#define MPI2_SAS_IOUNIT0_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1;/*0x08 */ + U8 NumPhys; /*0x0C */ + U8 Reserved2;/*0x0D */ + U16 Reserved3;/*0x0E */ + MPI2_SAS_IO_UNIT0_PHY_DATA + PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /*0x10 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_0, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0, + Mpi2SasIOUnitPage0_t, *pMpi2SasIOUnitPage0_t; + +#define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05) + +/*values for SAS IO Unit Page 0 PortFlags */ +#define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08) +#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01) + +/*values for SAS IO Unit Page 0 PhyFlags */ +#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10) +#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08) + +/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/*see mpi2_sas.h for values for + *SAS IO Unit Page 0 ControllerPhyDeviceInfo values */ + +/*values for SAS IO Unit Page 0 DiscoveryStatus */ +#define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_SASIOUNIT0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_SASIOUNIT0_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_SASIOUNIT0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_SASIOUNIT0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_SASIOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_SASIOUNIT0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_SASIOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_SASIOUNIT0_DS_TABLE_LINK (0x00000400) +#define MPI2_SASIOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_SASIOUNIT0_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_SASIOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_SASIOUNIT0_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_SASIOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_SASIOUNIT0_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_SASIOUNIT0_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_SASIOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001) + + +/*SAS IO Unit Page 1 */ + +typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA { + U8 Port; /*0x00 */ + U8 PortFlags; /*0x01 */ + U8 PhyFlags; /*0x02 */ + U8 MaxMinLinkRate; /*0x03 */ + U32 ControllerPhyDeviceInfo; /*0x04 */ + U16 MaxTargetPortConnectTime; /*0x08 */ + U16 Reserved1; /*0x0A */ +} MPI2_SAS_IO_UNIT1_PHY_DATA, + *PTR_MPI2_SAS_IO_UNIT1_PHY_DATA, + Mpi2SasIOUnit1PhyData_t, + *pMpi2SasIOUnit1PhyData_t; + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT1_PHY_MAX +#define MPI2_SAS_IOUNIT1_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U16 + ControlFlags; /*0x08 */ + U16 + SASNarrowMaxQueueDepth; /*0x0A */ + U16 + AdditionalControlFlags; /*0x0C */ + U16 + SASWideMaxQueueDepth; /*0x0E */ + U8 + NumPhys; /*0x10 */ + U8 + SATAMaxQDepth; /*0x11 */ + U8 + ReportDeviceMissingDelay; /*0x12 */ + U8 + IODeviceMissingDelay; /*0x13 */ + MPI2_SAS_IO_UNIT1_PHY_DATA + PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /*0x14 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_1, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1, + Mpi2SasIOUnitPage1_t, *pMpi2SasIOUnitPage1_t; + +#define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09) + +/*values for SAS IO Unit Page 1 ControlFlags */ +#define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000) +#define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000) + +#define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600) +#define MPI2_SASIOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x0) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x1) +#define MPI2_SASIOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x2) + +#define MPI2_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080) +#define MPI2_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040) +#define MPI2_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020) +#define MPI2_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010) +#define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008) +#define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004) +#define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002) +#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001) + +/*values for SAS IO Unit Page 1 AdditionalControlFlags */ +#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080) +#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040) +#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020) +#define MPI2_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010) +#define MPI2_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008) +#define MPI2_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004) +#define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002) +#define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001) + +/*defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */ +#define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F) +#define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80) + +/*values for SAS IO Unit Page 1 PortFlags */ +#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01) + +/*values for SAS IO Unit Page 1 PhyFlags */ +#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10) +#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08) + +/*values for SAS IO Unit Page 1 MaxMinLinkRate */ +#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0) +#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80) +#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90) +#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0) +#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0) +#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F) +#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08) +#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09) +#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A) +#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B) + +/*see mpi2_sas.h for values for + *SAS IO Unit Page 1 ControllerPhyDeviceInfo values */ + + +/*SAS IO Unit Page 4 */ + +typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP { + U8 MaxTargetSpinup; /*0x00 */ + U8 SpinupDelay; /*0x01 */ + U8 SpinupFlags; /*0x02 */ + U8 Reserved1; /*0x03 */ +} MPI2_SAS_IOUNIT4_SPINUP_GROUP, + *PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP, + Mpi2SasIOUnit4SpinupGroup_t, + *pMpi2SasIOUnit4SpinupGroup_t; +/*defines for SAS IO Unit Page 4 SpinupFlags */ +#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01) + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT4_PHY_MAX +#define MPI2_SAS_IOUNIT4_PHY_MAX (4) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header;/*0x00 */ + MPI2_SAS_IOUNIT4_SPINUP_GROUP + SpinupGroupParameters[4]; /*0x08 */ + U32 + Reserved1; /*0x18 */ + U32 + Reserved2; /*0x1C */ + U32 + Reserved3; /*0x20 */ + U8 + BootDeviceWaitTime; /*0x24 */ + U8 + Reserved4; /*0x25 */ + U16 + Reserved5; /*0x26 */ + U8 + NumPhys; /*0x28 */ + U8 + PEInitialSpinupDelay; /*0x29 */ + U8 + PEReplyDelay; /*0x2A */ + U8 + Flags; /*0x2B */ + U8 + PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /*0x2C */ +} MPI2_CONFIG_PAGE_SASIOUNIT_4, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4, + Mpi2SasIOUnitPage4_t, *pMpi2SasIOUnitPage4_t; + +#define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02) + +/*defines for Flags field */ +#define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01) + +/*defines for PHY field */ +#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03) + + +/*SAS IO Unit Page 5 */ + +typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS { + U8 ControlFlags; /*0x00 */ + U8 PortWidthModGroup; /*0x01 */ + U16 InactivityTimerExponent; /*0x02 */ + U8 SATAPartialTimeout; /*0x04 */ + U8 Reserved2; /*0x05 */ + U8 SATASlumberTimeout; /*0x06 */ + U8 Reserved3; /*0x07 */ + U8 SASPartialTimeout; /*0x08 */ + U8 Reserved4; /*0x09 */ + U8 SASSlumberTimeout; /*0x0A */ + U8 Reserved5; /*0x0B */ +} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, + *PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS, + Mpi2SasIOUnit5PhyPmSettings_t, + *pMpi2SasIOUnit5PhyPmSettings_t; + +/*defines for ControlFlags field */ +#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08) +#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04) +#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02) +#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01) + +/*defines for PortWidthModeGroup field */ +#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF) + +/*defines for InactivityTimerExponent field */ +#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12) +#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8) +#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4) +#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007) +#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0) + +#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7) +#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6) +#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5) +#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4) +#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3) +#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2) +#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1) +#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhys at runtime. + */ +#ifndef MPI2_SAS_IOUNIT5_PHY_MAX +#define MPI2_SAS_IOUNIT5_PHY_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 NumPhys; /*0x08 */ + U8 Reserved1;/*0x09 */ + U16 Reserved2;/*0x0A */ + U32 Reserved3;/*0x0C */ + MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS + SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX];/*0x10 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_5, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5, + Mpi2SasIOUnitPage5_t, *pMpi2SasIOUnitPage5_t; + +#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01) + + +/*SAS IO Unit Page 6 */ + +typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS { + U8 CurrentStatus; /*0x00 */ + U8 CurrentModulation; /*0x01 */ + U8 CurrentUtilization; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 Reserved2; /*0x04 */ +} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, + *PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS, + Mpi2SasIOUnit6PortWidthModGroupStatus_t, + *pMpi2SasIOUnit6PortWidthModGroupStatus_t; + +/*defines for CurrentStatus field */ +#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00) +#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01) +#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02) +#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03) +#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04) +#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05) +#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06) +#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07) + +/*defines for CurrentModulation field */ +#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00) +#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01) +#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02) +#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumGroups at runtime. + */ +#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX +#define MPI2_SAS_IOUNIT6_GROUP_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ + U8 NumGroups; /*0x10 */ + U8 Reserved3; /*0x11 */ + U16 Reserved4; /*0x12 */ + MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS + PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /*0x14 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_6, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6, + Mpi2SasIOUnitPage6_t, *pMpi2SasIOUnitPage6_t; + +#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00) + + +/*SAS IO Unit Page 7 */ + +typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS { + U8 Flags; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U8 Threshold75Pct; /*0x04 */ + U8 Threshold50Pct; /*0x05 */ + U8 Threshold25Pct; /*0x06 */ + U8 Reserved3; /*0x07 */ +} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, + *PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS, + Mpi2SasIOUnit7PortWidthModGroupSettings_t, + *pMpi2SasIOUnit7PortWidthModGroupSettings_t; + +/*defines for Flags field */ +#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01) + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumGroups at runtime. + */ +#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX +#define MPI2_SAS_IOUNIT7_GROUP_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 SamplingInterval; /*0x08 */ + U8 WindowLength; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Reserved2; /*0x0C */ + U32 Reserved3; /*0x10 */ + U8 NumGroups; /*0x14 */ + U8 Reserved4; /*0x15 */ + U16 Reserved5; /*0x16 */ + MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS + PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX];/*0x18 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_7, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7, + Mpi2SasIOUnitPage7_t, *pMpi2SasIOUnitPage7_t; + +#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00) + + +/*SAS IO Unit Page 8 */ + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U32 + PowerManagementCapabilities; /*0x0C */ + U8 + TxRxSleepStatus; /*0x10 */ + U8 + Reserved2; /*0x11 */ + U16 + Reserved3; /*0x12 */ +} MPI2_CONFIG_PAGE_SASIOUNIT_8, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8, + Mpi2SasIOUnitPage8_t, *pMpi2SasIOUnitPage8_t; + +#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00) + +/*defines for PowerManagementCapabilities field */ +#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000) +#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800) +#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400) +#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200) +#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100) +#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002) +#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001) + +/*defines for TxRxSleepStatus field */ +#define MPI25_SASIOUNIT8_TXRXSLEEP_UNSUPPORTED (0x00) +#define MPI25_SASIOUNIT8_TXRXSLEEP_DISENGAGED (0x01) +#define MPI25_SASIOUNIT8_TXRXSLEEP_ACTIVE (0x02) +#define MPI25_SASIOUNIT8_TXRXSLEEP_SHUTDOWN (0x03) + + + +/*SAS IO Unit Page 16 */ + +typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U64 + TimeStamp; /*0x08 */ + U32 + Reserved1; /*0x10 */ + U32 + Reserved2; /*0x14 */ + U32 + FastPathPendedRequests; /*0x18 */ + U32 + FastPathUnPendedRequests; /*0x1C */ + U32 + FastPathHostRequestStarts; /*0x20 */ + U32 + FastPathFirmwareRequestStarts; /*0x24 */ + U32 + FastPathHostCompletions; /*0x28 */ + U32 + FastPathFirmwareCompletions; /*0x2C */ + U32 + NonFastPathRequestStarts; /*0x30 */ + U32 + NonFastPathHostCompletions; /*0x30 */ +} MPI2_CONFIG_PAGE_SASIOUNIT16, + *PTR_MPI2_CONFIG_PAGE_SASIOUNIT16, + Mpi2SasIOUnitPage16_t, *pMpi2SasIOUnitPage16_t; + +#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00) + + +/**************************************************************************** +* SAS Expander Config Pages +****************************************************************************/ + +/*SAS Expander Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U8 + PhysicalPort; /*0x08 */ + U8 + ReportGenLength; /*0x09 */ + U16 + EnclosureHandle; /*0x0A */ + U64 + SASAddress; /*0x0C */ + U32 + DiscoveryStatus; /*0x14 */ + U16 + DevHandle; /*0x18 */ + U16 + ParentDevHandle; /*0x1A */ + U16 + ExpanderChangeCount; /*0x1C */ + U16 + ExpanderRouteIndexes; /*0x1E */ + U8 + NumPhys; /*0x20 */ + U8 + SASLevel; /*0x21 */ + U16 + Flags; /*0x22 */ + U16 + STPBusInactivityTimeLimit; /*0x24 */ + U16 + STPMaxConnectTimeLimit; /*0x26 */ + U16 + STP_SMP_NexusLossTime; /*0x28 */ + U16 + MaxNumRoutedSasAddresses; /*0x2A */ + U64 + ActiveZoneManagerSASAddress;/*0x2C */ + U16 + ZoneLockInactivityLimit; /*0x34 */ + U16 + Reserved1; /*0x36 */ + U8 + TimeToReducedFunc; /*0x38 */ + U8 + InitialTimeToReducedFunc; /*0x39 */ + U8 + MaxReducedFuncTime; /*0x3A */ + U8 + Reserved2; /*0x3B */ +} MPI2_CONFIG_PAGE_EXPANDER_0, + *PTR_MPI2_CONFIG_PAGE_EXPANDER_0, + Mpi2ExpanderPage0_t, *pMpi2ExpanderPage0_t; + +#define MPI2_SASEXPANDER0_PAGEVERSION (0x06) + +/*values for SAS Expander Page 0 DiscoveryStatus field */ +#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_SAS_EXPANDER0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_SAS_EXPANDER0_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_SAS_EXPANDER0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_SAS_EXPANDER0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_SAS_EXPANDER0_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_SAS_EXPANDER0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400) +#define MPI2_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001) + +/*values for SAS Expander Page 0 Flags field */ +#define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000) +#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000) +#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800) +#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400) +#define MPI2_SAS_EXPANDER0_FLAGS_ZONING_SUPPORT (0x0200) +#define MPI2_SAS_EXPANDER0_FLAGS_ENABLED_ZONING (0x0100) +#define MPI2_SAS_EXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080) +#define MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010) +#define MPI2_SAS_EXPANDER0_FLAGS_OTHERS_CONFIG (0x0004) +#define MPI2_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002) +#define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001) + + +/*SAS Expander Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U8 + PhysicalPort; /*0x08 */ + U8 + Reserved1; /*0x09 */ + U16 + Reserved2; /*0x0A */ + U8 + NumPhys; /*0x0C */ + U8 + Phy; /*0x0D */ + U16 + NumTableEntriesProgrammed; /*0x0E */ + U8 + ProgrammedLinkRate; /*0x10 */ + U8 + HwLinkRate; /*0x11 */ + U16 + AttachedDevHandle; /*0x12 */ + U32 + PhyInfo; /*0x14 */ + U32 + AttachedDeviceInfo; /*0x18 */ + U16 + ExpanderDevHandle; /*0x1C */ + U8 + ChangeCount; /*0x1E */ + U8 + NegotiatedLinkRate; /*0x1F */ + U8 + PhyIdentifier; /*0x20 */ + U8 + AttachedPhyIdentifier; /*0x21 */ + U8 + Reserved3; /*0x22 */ + U8 + DiscoveryInfo; /*0x23 */ + U32 + AttachedPhyInfo; /*0x24 */ + U8 + ZoneGroup; /*0x28 */ + U8 + SelfConfigStatus; /*0x29 */ + U16 + Reserved4; /*0x2A */ +} MPI2_CONFIG_PAGE_EXPANDER_1, + *PTR_MPI2_CONFIG_PAGE_EXPANDER_1, + Mpi2ExpanderPage1_t, *pMpi2ExpanderPage1_t; + +#define MPI2_SASEXPANDER1_PAGEVERSION (0x02) + +/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ + +/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ + +/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */ + +/*see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines + *used for the AttachedDeviceInfo field */ + +/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + +/*values for SAS Expander Page 1 DiscoveryInfo field */ +#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04) +#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02) +#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01) + +/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ + + +/**************************************************************************** +* SAS Device Config Pages +****************************************************************************/ + +/*SAS Device Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U16 + Slot; /*0x08 */ + U16 + EnclosureHandle; /*0x0A */ + U64 + SASAddress; /*0x0C */ + U16 + ParentDevHandle; /*0x14 */ + U8 + PhyNum; /*0x16 */ + U8 + AccessStatus; /*0x17 */ + U16 + DevHandle; /*0x18 */ + U8 + AttachedPhyIdentifier; /*0x1A */ + U8 + ZoneGroup; /*0x1B */ + U32 + DeviceInfo; /*0x1C */ + U16 + Flags; /*0x20 */ + U8 + PhysicalPort; /*0x22 */ + U8 + MaxPortConnections; /*0x23 */ + U64 + DeviceName; /*0x24 */ + U8 + PortGroups; /*0x2C */ + U8 + DmaGroup; /*0x2D */ + U8 + ControlGroup; /*0x2E */ + U8 + EnclosureLevel; /*0x2F */ + U32 + ConnectorName[4]; /*0x30 */ + U32 + Reserved3; /*0x34 */ +} MPI2_CONFIG_PAGE_SAS_DEV_0, + *PTR_MPI2_CONFIG_PAGE_SAS_DEV_0, + Mpi2SasDevicePage0_t, + *pMpi2SasDevicePage0_t; + +#define MPI2_SASDEVICE0_PAGEVERSION (0x09) + +/*values for SAS Device Page 0 AccessStatus field */ +#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03) +#define MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04) +#define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05) +#define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06) +#define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07) +/*specific values for SATA Init failures */ +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19) +#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F) + +/*see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */ + +/*values for SAS Device Page 0 Flags field */ +#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000) +#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH (0x4000) +#define MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000) +#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000) +#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200) +#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020) +#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010) +#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008) +#define MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID (0x0002) +#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001) + + +/*SAS Device Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U64 + SASAddress; /*0x0C */ + U32 + Reserved2; /*0x14 */ + U16 + DevHandle; /*0x18 */ + U16 + Reserved3; /*0x1A */ + U8 + InitialRegDeviceFIS[20];/*0x1C */ +} MPI2_CONFIG_PAGE_SAS_DEV_1, + *PTR_MPI2_CONFIG_PAGE_SAS_DEV_1, + Mpi2SasDevicePage1_t, + *pMpi2SasDevicePage1_t; + +#define MPI2_SASDEVICE1_PAGEVERSION (0x01) + + +/**************************************************************************** +* SAS PHY Config Pages +****************************************************************************/ + +/*SAS PHY Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U16 + OwnerDevHandle; /*0x08 */ + U16 + Reserved1; /*0x0A */ + U16 + AttachedDevHandle; /*0x0C */ + U8 + AttachedPhyIdentifier; /*0x0E */ + U8 + Reserved2; /*0x0F */ + U32 + AttachedPhyInfo; /*0x10 */ + U8 + ProgrammedLinkRate; /*0x14 */ + U8 + HwLinkRate; /*0x15 */ + U8 + ChangeCount; /*0x16 */ + U8 + Flags; /*0x17 */ + U32 + PhyInfo; /*0x18 */ + U8 + NegotiatedLinkRate; /*0x1C */ + U8 + Reserved3; /*0x1D */ + U16 + Reserved4; /*0x1E */ +} MPI2_CONFIG_PAGE_SAS_PHY_0, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_0, + Mpi2SasPhyPage0_t, *pMpi2SasPhyPage0_t; + +#define MPI2_SASPHY0_PAGEVERSION (0x03) + +/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */ + +/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */ + +/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */ + +/*values for SAS PHY Page 0 Flags field */ +#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01) + +/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */ + +/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */ + + +/*SAS PHY Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U32 + InvalidDwordCount; /*0x0C */ + U32 + RunningDisparityErrorCount; /*0x10 */ + U32 + LossDwordSynchCount; /*0x14 */ + U32 + PhyResetProblemCount; /*0x18 */ +} MPI2_CONFIG_PAGE_SAS_PHY_1, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_1, + Mpi2SasPhyPage1_t, *pMpi2SasPhyPage1_t; + +#define MPI2_SASPHY1_PAGEVERSION (0x01) + + +/*SAS PHY Page 2 */ + +typedef struct _MPI2_SASPHY2_PHY_EVENT { + U8 PhyEventCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 PhyEventInfo; /*0x04 */ +} MPI2_SASPHY2_PHY_EVENT, *PTR_MPI2_SASPHY2_PHY_EVENT, + Mpi2SasPhy2PhyEvent_t, *pMpi2SasPhy2PhyEvent_t; + +/*use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */ + + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhyEvents at runtime. + */ +#ifndef MPI2_SASPHY2_PHY_EVENT_MAX +#define MPI2_SASPHY2_PHY_EVENT_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U8 + NumPhyEvents; /*0x0C */ + U8 + Reserved2; /*0x0D */ + U16 + Reserved3; /*0x0E */ + MPI2_SASPHY2_PHY_EVENT + PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /*0x10 */ +} MPI2_CONFIG_PAGE_SAS_PHY_2, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_2, + Mpi2SasPhyPage2_t, + *pMpi2SasPhyPage2_t; + +#define MPI2_SASPHY2_PAGEVERSION (0x00) + + +/*SAS PHY Page 3 */ + +typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG { + U8 PhyEventCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U8 CounterType; /*0x04 */ + U8 ThresholdWindow; /*0x05 */ + U8 TimeUnits; /*0x06 */ + U8 Reserved3; /*0x07 */ + U32 EventThreshold; /*0x08 */ + U16 ThresholdFlags; /*0x0C */ + U16 Reserved4; /*0x0E */ +} MPI2_SASPHY3_PHY_EVENT_CONFIG, + *PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG, + Mpi2SasPhy3PhyEventConfig_t, + *pMpi2SasPhy3PhyEventConfig_t; + +/*values for PhyEventCode field */ +#define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00) +#define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01) +#define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02) +#define MPI2_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03) +#define MPI2_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04) +#define MPI2_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05) +#define MPI2_SASPHY3_EVENT_CODE_RX_ERROR (0x06) +#define MPI2_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20) +#define MPI2_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21) +#define MPI2_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22) +#define MPI2_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23) +#define MPI2_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26) +#define MPI2_SASPHY3_EVENT_CODE_TX_BREAK (0x27) +#define MPI2_SASPHY3_EVENT_CODE_RX_BREAK (0x28) +#define MPI2_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29) +#define MPI2_SASPHY3_EVENT_CODE_CONNECTION (0x2A) +#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2B) +#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2C) +#define MPI2_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2D) +#define MPI2_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2E) +#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40) +#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41) +#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42) +#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43) +#define MPI2_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44) +#define MPI2_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45) +#define MPI2_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50) +#define MPI2_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51) +#define MPI2_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52) +#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60) +#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61) +#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63) +#define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0) +#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1) +#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2) + +/*values for the CounterType field */ +#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00) +#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01) +#define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02) + +/*values for the TimeUnits field */ +#define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00) +#define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01) +#define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02) +#define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03) + +/*values for the ThresholdFlags field */ +#define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002) +#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001) + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumPhyEvents at runtime. + */ +#ifndef MPI2_SASPHY3_PHY_EVENT_MAX +#define MPI2_SASPHY3_PHY_EVENT_MAX (1) +#endif + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U8 + NumPhyEvents; /*0x0C */ + U8 + Reserved2; /*0x0D */ + U16 + Reserved3; /*0x0E */ + MPI2_SASPHY3_PHY_EVENT_CONFIG + PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /*0x10 */ +} MPI2_CONFIG_PAGE_SAS_PHY_3, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_3, + Mpi2SasPhyPage3_t, *pMpi2SasPhyPage3_t; + +#define MPI2_SASPHY3_PAGEVERSION (0x00) + + +/*SAS PHY Page 4 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U16 + Reserved1; /*0x08 */ + U8 + Reserved2; /*0x0A */ + U8 + Flags; /*0x0B */ + U8 + InitialFrame[28]; /*0x0C */ +} MPI2_CONFIG_PAGE_SAS_PHY_4, + *PTR_MPI2_CONFIG_PAGE_SAS_PHY_4, + Mpi2SasPhyPage4_t, *pMpi2SasPhyPage4_t; + +#define MPI2_SASPHY4_PAGEVERSION (0x00) + +/*values for the Flags field */ +#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02) +#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01) + + + + +/**************************************************************************** +* SAS Port Config Pages +****************************************************************************/ + +/*SAS Port Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U8 + PortNumber; /*0x08 */ + U8 + PhysicalPort; /*0x09 */ + U8 + PortWidth; /*0x0A */ + U8 + PhysicalPortWidth; /*0x0B */ + U8 + ZoneGroup; /*0x0C */ + U8 + Reserved1; /*0x0D */ + U16 + Reserved2; /*0x0E */ + U64 + SASAddress; /*0x10 */ + U32 + DeviceInfo; /*0x18 */ + U32 + Reserved3; /*0x1C */ + U32 + Reserved4; /*0x20 */ +} MPI2_CONFIG_PAGE_SAS_PORT_0, + *PTR_MPI2_CONFIG_PAGE_SAS_PORT_0, + Mpi2SasPortPage0_t, *pMpi2SasPortPage0_t; + +#define MPI2_SASPORT0_PAGEVERSION (0x00) + +/*see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */ + + +/**************************************************************************** +* SAS Enclosure Config Pages +****************************************************************************/ + +/*SAS Enclosure Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved1; /*0x08 */ + U64 + EnclosureLogicalID; /*0x0C */ + U16 + Flags; /*0x14 */ + U16 + EnclosureHandle; /*0x16 */ + U16 + NumSlots; /*0x18 */ + U16 + StartSlot; /*0x1A */ + U8 + Reserved2; /*0x1C */ + U8 + EnclosureLevel; /*0x1D */ + U16 + SEPDevHandle; /*0x1E */ + U32 + Reserved3; /*0x20 */ + U32 + Reserved4; /*0x24 */ +} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, + *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0, + Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t; + +#define MPI2_SASENCLOSURE0_PAGEVERSION (0x04) + +/*values for SAS Enclosure Page 0 Flags field */ +#define MPI2_SAS_ENCLS0_FLAGS_ENCL_LEVEL_VALID (0x0010) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004) +#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005) + + +/**************************************************************************** +* Log Config Page +****************************************************************************/ + +/*Log Page 0 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumLogEntries at runtime. + */ +#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES +#define MPI2_LOG_0_NUM_LOG_ENTRIES (1) +#endif + +#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C) + +typedef struct _MPI2_LOG_0_ENTRY { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U16 LogSequence; /*0x0C */ + U16 LogEntryQualifier; /*0x0E */ + U8 VP_ID; /*0x10 */ + U8 VF_ID; /*0x11 */ + U16 Reserved2; /*0x12 */ + U8 + LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/*0x14 */ +} MPI2_LOG_0_ENTRY, *PTR_MPI2_LOG_0_ENTRY, + Mpi2Log0Entry_t, *pMpi2Log0Entry_t; + +/*values for Log Page 0 LogEntry LogEntryQualifier field */ +#define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000) +#define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001) +#define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002) +#define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000) +#define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF) + +typedef struct _MPI2_CONFIG_PAGE_LOG_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ + U16 NumLogEntries;/*0x10 */ + U16 Reserved3; /*0x12 */ + MPI2_LOG_0_ENTRY + LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /*0x14 */ +} MPI2_CONFIG_PAGE_LOG_0, *PTR_MPI2_CONFIG_PAGE_LOG_0, + Mpi2LogPage0_t, *pMpi2LogPage0_t; + +#define MPI2_LOG_0_PAGEVERSION (0x02) + + +/**************************************************************************** +* RAID Config Page +****************************************************************************/ + +/*RAID Page 0 */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check the value returned for NumElements at runtime. + */ +#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS +#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1) +#endif + +typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT { + U16 ElementFlags; /*0x00 */ + U16 VolDevHandle; /*0x02 */ + U8 HotSparePool; /*0x04 */ + U8 PhysDiskNum; /*0x05 */ + U16 PhysDiskDevHandle; /*0x06 */ +} MPI2_RAIDCONFIG0_CONFIG_ELEMENT, + *PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT, + Mpi2RaidConfig0ConfigElement_t, + *pMpi2RaidConfig0ConfigElement_t; + +/*values for the ElementFlags field */ +#define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F) +#define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000) +#define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001) +#define MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002) +#define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003) + + +typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 NumHotSpares; /*0x08 */ + U8 NumPhysDisks; /*0x09 */ + U8 NumVolumes; /*0x0A */ + U8 ConfigNum; /*0x0B */ + U32 Flags; /*0x0C */ + U8 ConfigGUID[24]; /*0x10 */ + U32 Reserved1; /*0x28 */ + U8 NumElements; /*0x2C */ + U8 Reserved2; /*0x2D */ + U16 Reserved3; /*0x2E */ + MPI2_RAIDCONFIG0_CONFIG_ELEMENT + ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /*0x30 */ +} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, + *PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0, + Mpi2RaidConfigurationPage0_t, + *pMpi2RaidConfigurationPage0_t; + +#define MPI2_RAIDCONFIG0_PAGEVERSION (0x00) + +/*values for RAID Configuration Page 0 Flags field */ +#define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001) + + +/**************************************************************************** +* Driver Persistent Mapping Config Pages +****************************************************************************/ + +/*Driver Persistent Mapping Page 0 */ + +typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY { + U64 PhysicalIdentifier; /*0x00 */ + U16 MappingInformation; /*0x08 */ + U16 DeviceIndex; /*0x0A */ + U32 PhysicalBitsMapping; /*0x0C */ + U32 Reserved1; /*0x10 */ +} MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, + *PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY, + Mpi2DriverMap0Entry_t, *pMpi2DriverMap0Entry_t; + +typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /*0x08 */ +} MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, + *PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0, + Mpi2DriverMappingPage0_t, *pMpi2DriverMappingPage0_t; + +#define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00) + +/*values for Driver Persistent Mapping Page 0 MappingInformation field */ +#define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0) +#define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4) +#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F) + + +/**************************************************************************** +* Ethernet Config Pages +****************************************************************************/ + +/*Ethernet Page 0 */ + +/*IP address (union of IPv4 and IPv6) */ +typedef union _MPI2_ETHERNET_IP_ADDR { + U32 IPv4Addr; + U32 IPv6Addr[4]; +} MPI2_ETHERNET_IP_ADDR, *PTR_MPI2_ETHERNET_IP_ADDR, + Mpi2EthernetIpAddr_t, *pMpi2EthernetIpAddr_t; + +#define MPI2_ETHERNET_HOST_NAME_LENGTH (32) + +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */ + U8 NumInterfaces; /*0x08 */ + U8 Reserved0; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Status; /*0x0C */ + U8 MediaState; /*0x10 */ + U8 Reserved2; /*0x11 */ + U16 Reserved3; /*0x12 */ + U8 MacAddress[6]; /*0x14 */ + U8 Reserved4; /*0x1A */ + U8 Reserved5; /*0x1B */ + MPI2_ETHERNET_IP_ADDR IpAddress; /*0x1C */ + MPI2_ETHERNET_IP_ADDR SubnetMask; /*0x2C */ + MPI2_ETHERNET_IP_ADDR GatewayIpAddress;/*0x3C */ + MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /*0x4C */ + MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /*0x5C */ + MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /*0x6C */ + U8 + HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_0, + *PTR_MPI2_CONFIG_PAGE_ETHERNET_0, + Mpi2EthernetPage0_t, *pMpi2EthernetPage0_t; + +#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00) + +/*values for Ethernet Page 0 Status field */ +#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000) +#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000) +#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000) +#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100) +#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080) +#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040) +#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020) +#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010) +#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008) +#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004) +#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002) +#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001) + +/*values for Ethernet Page 0 MediaState field */ +#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80) +#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00) +#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80) + +#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07) +#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00) +#define MPI2_ETHPG0_MS_10MBIT (0x01) +#define MPI2_ETHPG0_MS_100MBIT (0x02) +#define MPI2_ETHPG0_MS_1GBIT (0x03) + + +/*Ethernet Page 1 */ + +typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + Reserved0; /*0x08 */ + U32 + Flags; /*0x0C */ + U8 + MediaState; /*0x10 */ + U8 + Reserved1; /*0x11 */ + U16 + Reserved2; /*0x12 */ + U8 + MacAddress[6]; /*0x14 */ + U8 + Reserved3; /*0x1A */ + U8 + Reserved4; /*0x1B */ + MPI2_ETHERNET_IP_ADDR + StaticIpAddress; /*0x1C */ + MPI2_ETHERNET_IP_ADDR + StaticSubnetMask; /*0x2C */ + MPI2_ETHERNET_IP_ADDR + StaticGatewayIpAddress; /*0x3C */ + MPI2_ETHERNET_IP_ADDR + StaticDNS1IpAddress; /*0x4C */ + MPI2_ETHERNET_IP_ADDR + StaticDNS2IpAddress; /*0x5C */ + U32 + Reserved5; /*0x6C */ + U32 + Reserved6; /*0x70 */ + U32 + Reserved7; /*0x74 */ + U32 + Reserved8; /*0x78 */ + U8 + HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */ +} MPI2_CONFIG_PAGE_ETHERNET_1, + *PTR_MPI2_CONFIG_PAGE_ETHERNET_1, + Mpi2EthernetPage1_t, *pMpi2EthernetPage1_t; + +#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00) + +/*values for Ethernet Page 1 Flags field */ +#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100) +#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080) +#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040) +#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020) +#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010) +#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008) +#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004) +#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002) +#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001) + +/*values for Ethernet Page 1 MediaState field */ +#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80) +#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00) +#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80) + +#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07) +#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00) +#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01) +#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02) +#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03) + + +/**************************************************************************** +* Extended Manufacturing Config Pages +****************************************************************************/ + +/* + *Generic structure to use for product-specific extended manufacturing pages + *(currently Extended Manufacturing Page 40 through Extended Manufacturing + *Page 60). + */ + +typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS { + MPI2_CONFIG_EXTENDED_PAGE_HEADER + Header; /*0x00 */ + U32 + ProductSpecificInfo; /*0x08 */ +} MPI2_CONFIG_PAGE_EXT_MAN_PS, + *PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS, + Mpi2ExtManufacturingPagePS_t, + *pMpi2ExtManufacturingPagePS_t; + +/*PageVersion should be provided by product-specific code */ + +#endif diff --git a/addons/mpt3sas/src/4.4.180/mpi/mpi2_init.h b/addons/mpt3sas/src/4.4.180/mpi/mpi2_init.h new file mode 100644 index 00000000..068c98ef --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpi/mpi2_init.h @@ -0,0 +1,562 @@ +/* + * Copyright (c) 2000-2014 LSI Corporation. + * + * + * Name: mpi2_init.h + * Title: MPI SCSI initiator mode messages and structures + * Creation Date: June 23, 2006 + * + * mpi2_init.h Version: 02.00.15 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t. + * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines. + * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention. + * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY. + * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t. + * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO + * Control field Task Attribute flags. + * Moved LUN field defines to mpi2.h becasue they are + * common to many structures. + * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to + * Query Asynchronous Event. + * Defined two new bits in the SlotStatus field of the SCSI + * Enclosure Processor Request and Reply. + * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for + * both SCSI IO Error Reply and SCSI Task Management Reply. + * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY. + * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define. + * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it. + * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request. + * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define. + * 11-18-11 02.00.12 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command + * Priority to match SAM-4. + * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY. + * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION. + * 04-09-13 02.00.15 Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY, + * replacing the Reserved4 field. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_INIT_H +#define MPI2_INIT_H + +/***************************************************************************** +* +* SCSI Initiator Messages +* +*****************************************************************************/ + +/**************************************************************************** +* SCSI IO messages and associated structures +****************************************************************************/ + +typedef struct _MPI2_SCSI_IO_CDB_EEDP32 { + U8 CDB[20]; /*0x00 */ + U32 PrimaryReferenceTag; /*0x14 */ + U16 PrimaryApplicationTag; /*0x18 */ + U16 PrimaryApplicationTagMask; /*0x1A */ + U32 TransferLength; /*0x1C */ +} MPI2_SCSI_IO_CDB_EEDP32, *PTR_MPI2_SCSI_IO_CDB_EEDP32, + Mpi2ScsiIoCdbEedp32_t, *pMpi2ScsiIoCdbEedp32_t; + +/*MPI v2.0 CDB field */ +typedef union _MPI2_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + MPI2_SGE_SIMPLE_UNION SGE; +} MPI2_SCSI_IO_CDB_UNION, *PTR_MPI2_SCSI_IO_CDB_UNION, + Mpi2ScsiIoCdb_t, *pMpi2ScsiIoCdb_t; + +/*MPI v2.0 SCSI IO Request Message */ +typedef struct _MPI2_SCSI_IO_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U32 SenseBufferLowAddress; /*0x0C */ + U16 SGLFlags; /*0x10 */ + U8 SenseBufferLength; /*0x12 */ + U8 Reserved4; /*0x13 */ + U8 SGLOffset0; /*0x14 */ + U8 SGLOffset1; /*0x15 */ + U8 SGLOffset2; /*0x16 */ + U8 SGLOffset3; /*0x17 */ + U32 SkipCount; /*0x18 */ + U32 DataLength; /*0x1C */ + U32 BidirectionalDataLength; /*0x20 */ + U16 IoFlags; /*0x24 */ + U16 EEDPFlags; /*0x26 */ + U32 EEDPBlockSize; /*0x28 */ + U32 SecondaryReferenceTag; /*0x2C */ + U16 SecondaryApplicationTag; /*0x30 */ + U16 ApplicationTagTranslationMask; /*0x32 */ + U8 LUN[8]; /*0x34 */ + U32 Control; /*0x3C */ + MPI2_SCSI_IO_CDB_UNION CDB; /*0x40 */ + +#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */ + MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion; +#endif + + MPI2_SGE_IO_UNION SGL; /*0x60 */ + +} MPI2_SCSI_IO_REQUEST, *PTR_MPI2_SCSI_IO_REQUEST, + Mpi2SCSIIORequest_t, *pMpi2SCSIIORequest_t; + +/*SCSI IO MsgFlags bits */ + +/*MsgFlags for SenseBufferAddressSpace */ +#define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C) +#define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00) +#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04) +#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08) +#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C) + +/*SCSI IO SGLFlags bits */ + +/*base values for Data Location Address Space */ +#define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C) +#define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00) +#define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04) +#define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08) +#define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C) + +/*base values for Type */ +#define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01) +#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02) + +/*shift values for each sub-field */ +#define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12) +#define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8) +#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4) +#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0) + +/*number of SGLOffset fields */ +#define MPI2_SCSIIO_NUM_SGLOFFSETS (4) + +/*SCSI IO IoFlags bits */ + +/*Large CDB Address Space */ +#define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000) +#define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000) +#define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000) +#define MPI2_SCSIIO_CDB_ADDR_IOCPLB (0x4000) +#define MPI2_SCSIIO_CDB_ADDR_IOCPLBNTA (0x6000) + +#define MPI2_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) +#define MPI2_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI2_SCSIIO_IOFLAGS_MULTICAST (0x0400) +#define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200) +#define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) + +/*SCSI IO EEDPFlags bits */ + +#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG (0x2000) +#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_APPTAG (0x1000) + +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100) + +#define MPI2_SCSIIO_EEDPFLAGS_PASSTHRU_REFTAG (0x0008) + +#define MPI2_SCSIIO_EEDPFLAGS_MASK_OP (0x0007) +#define MPI2_SCSIIO_EEDPFLAGS_NOOP_OP (0x0000) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_OP (0x0001) +#define MPI2_SCSIIO_EEDPFLAGS_STRIP_OP (0x0002) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003) +#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004) +#define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006) +#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007) + +/*SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */ + +/*SCSI IO Control bits */ +#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000) +#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26) + +#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000) +#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24) +#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000) +#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000) +#define MPI2_SCSIIO_CONTROL_READ (0x02000000) +#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000) + +#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800) +#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11) +/*alternate name for the previous field; called Command Priority in SAM-4 */ +#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800) +#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11) + +#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700) +#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000) +#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100) +#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200) +#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400) + +#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0) +#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000) +#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040) +#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080) + +/*MPI v2.5 CDB field */ +typedef union _MPI25_SCSI_IO_CDB_UNION { + U8 CDB32[32]; + MPI2_SCSI_IO_CDB_EEDP32 EEDP32; + MPI2_IEEE_SGE_SIMPLE64 SGE; +} MPI25_SCSI_IO_CDB_UNION, *PTR_MPI25_SCSI_IO_CDB_UNION, + Mpi25ScsiIoCdb_t, *pMpi25ScsiIoCdb_t; + +/*MPI v2.5 SCSI IO Request Message */ +typedef struct _MPI25_SCSI_IO_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U32 SenseBufferLowAddress; /*0x0C */ + U8 DMAFlags; /*0x10 */ + U8 Reserved5; /*0x11 */ + U8 SenseBufferLength; /*0x12 */ + U8 Reserved4; /*0x13 */ + U8 SGLOffset0; /*0x14 */ + U8 SGLOffset1; /*0x15 */ + U8 SGLOffset2; /*0x16 */ + U8 SGLOffset3; /*0x17 */ + U32 SkipCount; /*0x18 */ + U32 DataLength; /*0x1C */ + U32 BidirectionalDataLength; /*0x20 */ + U16 IoFlags; /*0x24 */ + U16 EEDPFlags; /*0x26 */ + U16 EEDPBlockSize; /*0x28 */ + U16 Reserved6; /*0x2A */ + U32 SecondaryReferenceTag; /*0x2C */ + U16 SecondaryApplicationTag; /*0x30 */ + U16 ApplicationTagTranslationMask; /*0x32 */ + U8 LUN[8]; /*0x34 */ + U32 Control; /*0x3C */ + MPI25_SCSI_IO_CDB_UNION CDB; /*0x40 */ + +#ifdef MPI25_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */ + MPI25_SCSI_IO_VENDOR_UNIQUE VendorRegion; +#endif + + MPI25_SGE_IO_UNION SGL; /*0x60 */ + +} MPI25_SCSI_IO_REQUEST, *PTR_MPI25_SCSI_IO_REQUEST, + Mpi25SCSIIORequest_t, *pMpi25SCSIIORequest_t; + +/*use MPI2_SCSIIO_MSGFLAGS_ defines for the MsgFlags field */ + +/*Defines for the DMAFlags field + * Each setting affects 4 SGLS, from SGL0 to SGL3. + * D = Data + * C = Cache DIF + * I = Interleaved + * H = Host DIF + */ +#define MPI25_SCSIIO_DMAFLAGS_OP_MASK (0x0F) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_D (0x00) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_C (0x01) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_I (0x02) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_C (0x03) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_I (0x04) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_I_I (0x05) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_C (0x06) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_I (0x07) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_I_I (0x08) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_I_I_I (0x09) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_D (0x0A) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_C (0x0B) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_I (0x0C) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_C (0x0D) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_I (0x0E) +#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_I_I (0x0F) + +/*number of SGLOffset fields */ +#define MPI25_SCSIIO_NUM_SGLOFFSETS (4) + +/*defines for the IoFlags field */ +#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000) +#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000) +#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000) + +#define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000) +#define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800) +#define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF) + +/*MPI v2.5 defines for the EEDPFlags bits */ +/*use MPI2_SCSIIO_EEDPFLAGS_ defines for the other EEDPFlags bits */ +#define MPI25_SCSIIO_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0) +#define MPI25_SCSIIO_EEDPFLAGS_COMPATIBLE_MODE (0x0000) +#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040) +#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080) +#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0) + +#define MPI25_SCSIIO_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030) +#define MPI25_SCSIIO_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000) +#define MPI25_SCSIIO_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010) + +/*use MPI2_LUN_ defines from mpi2.h for the LUN field */ + +/*use MPI2_SCSIIO_CONTROL_ defines for the Control field */ + +/*NOTE: The SCSI IO Reply is nearly the same for MPI 2.0 and MPI 2.5, so + * MPI2_SCSI_IO_REPLY is used for both. + */ + +/*SCSI IO Error Reply Message */ +typedef struct _MPI2_SCSI_IO_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U8 SCSIStatus; /*0x0C */ + U8 SCSIState; /*0x0D */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 TransferCount; /*0x14 */ + U32 SenseCount; /*0x18 */ + U32 ResponseInfo; /*0x1C */ + U16 TaskTag; /*0x20 */ + U16 SCSIStatusQualifier; /* 0x22 */ + U32 BidirectionalTransferCount; /*0x24 */ + U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/ + U32 Reserved6; /*0x2C */ +} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY, + Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t; + +/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */ + +#define MPI2_SCSI_STATUS_GOOD (0x00) +#define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02) +#define MPI2_SCSI_STATUS_CONDITION_MET (0x04) +#define MPI2_SCSI_STATUS_BUSY (0x08) +#define MPI2_SCSI_STATUS_INTERMEDIATE (0x10) +#define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14) +#define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18) +#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /*obsolete */ +#define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28) +#define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30) +#define MPI2_SCSI_STATUS_TASK_ABORTED (0x40) + +/*SCSI IO Reply SCSIState flags */ + +#define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10) +#define MPI2_SCSI_STATE_TERMINATED (0x08) +#define MPI2_SCSI_STATE_NO_SCSI_STATUS (0x04) +#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02) +#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01) + +/*masks and shifts for the ResponseInfo field */ + +#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF) +#define MPI2_SCSI_RI_SHIFT_REASONCODE (0) + +#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF) + +/**************************************************************************** +* SCSI Task Management messages +****************************************************************************/ + +/*SCSI Task Management Request Message */ +typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U8 Reserved1; /*0x04 */ + U8 TaskType; /*0x05 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U8 LUN[8]; /*0x0C */ + U32 Reserved4[7]; /*0x14 */ + U16 TaskMID; /*0x30 */ + U16 Reserved5; /*0x32 */ +} MPI2_SCSI_TASK_MANAGE_REQUEST, + *PTR_MPI2_SCSI_TASK_MANAGE_REQUEST, + Mpi2SCSITaskManagementRequest_t, + *pMpi2SCSITaskManagementRequest_t; + +/*TaskType values */ + +#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01) +#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02) +#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03) +#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06) +#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07) +#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09) +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A) + +/*obsolete TaskType name */ +#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION \ + (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT) + +/*MsgFlags bits */ + +#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18) +#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00) +#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08) +#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10) + +#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01) + +/*SCSI Task Management Reply Message */ +typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U8 ResponseCode; /*0x04 */ + U8 TaskType; /*0x05 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 TerminationCount; /*0x14 */ + U32 ResponseInfo; /*0x18 */ +} MPI2_SCSI_TASK_MANAGE_REPLY, + *PTR_MPI2_SCSI_TASK_MANAGE_REPLY, + Mpi2SCSITaskManagementReply_t, *pMpi2SCSIManagementReply_t; + +/*ResponseCode values */ + +#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00) +#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02) +#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04) +#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05) +#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08) +#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09) +#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A) +#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80) + +/*masks and shifts for the ResponseInfo field */ + +#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF) +#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16) +#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000) +#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24) + +/**************************************************************************** +* SCSI Enclosure Processor messages +****************************************************************************/ + +/*SCSI Enclosure Processor Request Message */ +typedef struct _MPI2_SEP_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U8 Action; /*0x04 */ + U8 Flags; /*0x05 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 SlotStatus; /*0x0C */ + U32 Reserved3; /*0x10 */ + U32 Reserved4; /*0x14 */ + U32 Reserved5; /*0x18 */ + U16 Slot; /*0x1C */ + U16 EnclosureHandle; /*0x1E */ +} MPI2_SEP_REQUEST, *PTR_MPI2_SEP_REQUEST, + Mpi2SepRequest_t, *pMpi2SepRequest_t; + +/*Action defines */ +#define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00) +#define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01) + +/*Flags defines */ +#define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00) +#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01) + +/*SlotStatus defines */ +#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000) +#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI2_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI2_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001) + +/*SCSI Enclosure Processor Reply Message */ +typedef struct _MPI2_SEP_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U8 Action; /*0x04 */ + U8 Flags; /*0x05 */ + U8 Reserved1; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 SlotStatus; /*0x14 */ + U32 Reserved4; /*0x18 */ + U16 Slot; /*0x1C */ + U16 EnclosureHandle; /*0x1E */ +} MPI2_SEP_REPLY, *PTR_MPI2_SEP_REPLY, + Mpi2SepReply_t, *pMpi2SepReply_t; + +/*SlotStatus defines */ +#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000) +#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000) +#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200) +#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100) +#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080) +#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040) +#define MPI2_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010) +#define MPI2_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008) +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004) +#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002) +#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001) + +#endif diff --git a/addons/mpt3sas/src/4.4.180/mpi/mpi2_ioc.h b/addons/mpt3sas/src/4.4.180/mpi/mpi2_ioc.h new file mode 100644 index 00000000..d7598cc4 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpi/mpi2_ioc.h @@ -0,0 +1,1729 @@ +/* + * Copyright (c) 2000-2014 LSI Corporation. + * + * + * Name: mpi2_ioc.h + * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages + * Creation Date: October 11, 2006 + * + * mpi2_ioc.h Version: 02.00.24 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to + * MaxTargets. + * Added TotalImageSize field to FWDownload Request. + * Added reserved words to FWUpload Request. + * 06-26-07 02.00.02 Added IR Configuration Change List Event. + * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit + * request and replaced it with + * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth. + * Replaced the MinReplyQueueDepth field of the IOCFacts + * reply with MaxReplyDescriptorPostQueueDepth. + * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum + * depth for the Reply Descriptor Post Queue. + * Added SASAddress field to Initiator Device Table + * Overflow Event data. + * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING + * for SAS Initiator Device Status Change Event data. + * Modified Reason Code defines for SAS Topology Change + * List Event data, including adding a bit for PHY Vacant + * status, and adding a mask for the Reason Code. + * Added define for + * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING. + * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID. + * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of + * the IOCFacts Reply. + * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Moved MPI2_VERSION_UNION to mpi2.h. + * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks + * instead of enables, and added SASBroadcastPrimitiveMasks + * field. + * Added Log Entry Added Event and related structure. + * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID. + * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET. + * Added MaxVolumes and MaxPersistentEntries fields to + * IOCFacts reply. + * Added ProtocalFlags and IOCCapabilities fields to + * MPI2_FW_IMAGE_HEADER. + * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT. + * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to + * a U16 (from a U32). + * Removed extra 's' from EventMasks name. + * 06-27-08 02.00.08 Fixed an offset in a comment. + * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST. + * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and + * renamed MinReplyFrameSize to ReplyFrameSize. + * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX. + * Added two new RAIDOperation values for Integrated RAID + * Operations Status Event data. + * Added four new IR Configuration Change List Event data + * ReasonCode values. + * Added two new ReasonCode defines for SAS Device Status + * Change Event data. + * Added three new DiscoveryStatus bits for the SAS + * Discovery event data. + * Added Multiplexing Status Change bit to the PhyStatus + * field of the SAS Topology Change List event data. + * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY. + * BootFlags are now product-specific. + * Added defines for the indivdual signature bytes + * for MPI2_INIT_IMAGE_FOOTER. + * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define. + * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR + * define. + * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE + * define. + * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define. + * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define. + * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define. + * Added two new reason codes for SAS Device Status Change + * Event. + * Added new event: SAS PHY Counter. + * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure. + * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define. + * Added new product id family for 2208. + * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST. + * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY. + * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY. + * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY. + * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define. + * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define. + * Added Host Based Discovery Phy Event data. + * Added defines for ProductID Product field + * (MPI2_FW_HEADER_PID_). + * Modified values for SAS ProductID Family + * (MPI2_FW_HEADER_PID_FAMILY_). + * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines. + * Added PowerManagementControl Request structures and + * defines. + * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete. + * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define. + * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC. + * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added + * SASNotifyPrimitiveMasks field to + * MPI2_EVENT_NOTIFICATION_REQUEST. + * Added Temperature Threshold Event. + * Added Host Message Event. + * Added Send Host Message request and reply. + * 05-25-11 02.00.18 For Extended Image Header, added + * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and + * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines. + * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define. + * 08-24-11 02.00.19 Added PhysicalPort field to + * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure. + * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete. + * 11-18-11 02.00.20 Incorporating additions for MPI v2.5. + * 03-29-12 02.00.21 Added a product specific range to event values. + * 07-26-12 02.00.22 Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE. + * Added ElapsedSeconds field to + * MPI2_EVENT_DATA_IR_OPERATION_STATUS. + * 08-19-13 02.00.23 For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE + * and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY. + * Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE. + * Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY. + * Added Encrypted Hash Extended Image. + * 12-05-13 02.00.24 Added MPI25_HASH_IMAGE_TYPE_BIOS. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_IOC_H +#define MPI2_IOC_H + +/***************************************************************************** +* +* IOC Messages +* +*****************************************************************************/ + +/**************************************************************************** +* IOCInit message +****************************************************************************/ + +/*IOCInit Request message */ +typedef struct _MPI2_IOC_INIT_REQUEST { + U8 WhoInit; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 MsgVersion; /*0x0C */ + U16 HeaderVersion; /*0x0E */ + U32 Reserved5; /*0x10 */ + U16 Reserved6; /*0x14 */ + U8 Reserved7; /*0x16 */ + U8 HostMSIxVectors; /*0x17 */ + U16 Reserved8; /*0x18 */ + U16 SystemRequestFrameSize; /*0x1A */ + U16 ReplyDescriptorPostQueueDepth; /*0x1C */ + U16 ReplyFreeQueueDepth; /*0x1E */ + U32 SenseBufferAddressHigh; /*0x20 */ + U32 SystemReplyAddressHigh; /*0x24 */ + U64 SystemRequestFrameBaseAddress; /*0x28 */ + U64 ReplyDescriptorPostQueueAddress; /*0x30 */ + U64 ReplyFreeQueueAddress; /*0x38 */ + U64 TimeStamp; /*0x40 */ +} MPI2_IOC_INIT_REQUEST, *PTR_MPI2_IOC_INIT_REQUEST, + Mpi2IOCInitRequest_t, *pMpi2IOCInitRequest_t; + +/*WhoInit values */ +#define MPI2_WHOINIT_NOT_INITIALIZED (0x00) +#define MPI2_WHOINIT_SYSTEM_BIOS (0x01) +#define MPI2_WHOINIT_ROM_BIOS (0x02) +#define MPI2_WHOINIT_PCI_PEER (0x03) +#define MPI2_WHOINIT_HOST_DRIVER (0x04) +#define MPI2_WHOINIT_MANUFACTURER (0x05) + +/* MsgFlags */ +#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE (0x01) + + +/*MsgVersion */ +#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00) +#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8) +#define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF) +#define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0) + +/*HeaderVersion */ +#define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00) +#define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8) +#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF) +#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0) + +/*minimum depth for a Reply Descriptor Post Queue */ +#define MPI2_RDPQ_DEPTH_MIN (16) + +/* Reply Descriptor Post Queue Array Entry */ +typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY { + U64 RDPQBaseAddress; /* 0x00 */ + U32 Reserved1; /* 0x08 */ + U32 Reserved2; /* 0x0C */ +} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, +*PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY, +Mpi2IOCInitRDPQArrayEntry, *pMpi2IOCInitRDPQArrayEntry; + + +/*IOCInit Reply message */ +typedef struct _MPI2_IOC_INIT_REPLY { + U8 WhoInit; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_IOC_INIT_REPLY, *PTR_MPI2_IOC_INIT_REPLY, + Mpi2IOCInitReply_t, *pMpi2IOCInitReply_t; + +/**************************************************************************** +* IOCFacts message +****************************************************************************/ + +/*IOCFacts Request message */ +typedef struct _MPI2_IOC_FACTS_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ +} MPI2_IOC_FACTS_REQUEST, *PTR_MPI2_IOC_FACTS_REQUEST, + Mpi2IOCFactsRequest_t, *pMpi2IOCFactsRequest_t; + +/*IOCFacts Reply message */ +typedef struct _MPI2_IOC_FACTS_REPLY { + U16 MsgVersion; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 HeaderVersion; /*0x04 */ + U8 IOCNumber; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U16 IOCExceptions; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 MaxChainDepth; /*0x14 */ + U8 WhoInit; /*0x15 */ + U8 NumberOfPorts; /*0x16 */ + U8 MaxMSIxVectors; /*0x17 */ + U16 RequestCredit; /*0x18 */ + U16 ProductID; /*0x1A */ + U32 IOCCapabilities; /*0x1C */ + MPI2_VERSION_UNION FWVersion; /*0x20 */ + U16 IOCRequestFrameSize; /*0x24 */ + U16 IOCMaxChainSegmentSize; /*0x26 */ + U16 MaxInitiators; /*0x28 */ + U16 MaxTargets; /*0x2A */ + U16 MaxSasExpanders; /*0x2C */ + U16 MaxEnclosures; /*0x2E */ + U16 ProtocolFlags; /*0x30 */ + U16 HighPriorityCredit; /*0x32 */ + U16 MaxReplyDescriptorPostQueueDepth; /*0x34 */ + U8 ReplyFrameSize; /*0x36 */ + U8 MaxVolumes; /*0x37 */ + U16 MaxDevHandle; /*0x38 */ + U16 MaxPersistentEntries; /*0x3A */ + U16 MinDevHandle; /*0x3C */ + U16 Reserved4; /*0x3E */ +} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY, + Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t; + +/*MsgVersion */ +#define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00) +#define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8) +#define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF) +#define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0) + +/*HeaderVersion */ +#define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00) +#define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8) +#define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF) +#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0) + +/*IOCExceptions */ +#define MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE (0x0200) +#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100) + +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_GOOD (0x0000) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_BACKUP (0x0020) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_RESTORED (0x0040) +#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_CORRUPT_BACKUP (0x0060) + +#define MPI2_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010) +#define MPI2_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0008) +#define MPI2_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004) +#define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002) +#define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001) + +/*defines for WhoInit field are after the IOCInit Request */ + +/*ProductID field uses MPI2_FW_HEADER_PID_ */ + +/*IOCCapabilities */ +#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) +#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000) +#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000) +#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000) +#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000) +#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000) +#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000) +#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800) +#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100) +#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080) +#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040) +#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020) +#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010) +#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008) +#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004) + +/*ProtocolFlags */ +#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001) +#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002) + +/**************************************************************************** +* PortFacts message +****************************************************************************/ + +/*PortFacts Request message */ +typedef struct _MPI2_PORT_FACTS_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 PortNumber; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ +} MPI2_PORT_FACTS_REQUEST, *PTR_MPI2_PORT_FACTS_REQUEST, + Mpi2PortFactsRequest_t, *pMpi2PortFactsRequest_t; + +/*PortFacts Reply message */ +typedef struct _MPI2_PORT_FACTS_REPLY { + U16 Reserved1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 PortNumber; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 Reserved5; /*0x14 */ + U8 PortType; /*0x15 */ + U16 Reserved6; /*0x16 */ + U16 MaxPostedCmdBuffers; /*0x18 */ + U16 Reserved7; /*0x1A */ +} MPI2_PORT_FACTS_REPLY, *PTR_MPI2_PORT_FACTS_REPLY, + Mpi2PortFactsReply_t, *pMpi2PortFactsReply_t; + +/*PortType values */ +#define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00) +#define MPI2_PORTFACTS_PORTTYPE_FC (0x10) +#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20) +#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30) +#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31) + +/**************************************************************************** +* PortEnable message +****************************************************************************/ + +/*PortEnable Request message */ +typedef struct _MPI2_PORT_ENABLE_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U8 Reserved2; /*0x04 */ + U8 PortFlags; /*0x05 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ +} MPI2_PORT_ENABLE_REQUEST, *PTR_MPI2_PORT_ENABLE_REQUEST, + Mpi2PortEnableRequest_t, *pMpi2PortEnableRequest_t; + +/*PortEnable Reply message */ +typedef struct _MPI2_PORT_ENABLE_REPLY { + U16 Reserved1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U8 Reserved2; /*0x04 */ + U8 PortFlags; /*0x05 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_PORT_ENABLE_REPLY, *PTR_MPI2_PORT_ENABLE_REPLY, + Mpi2PortEnableReply_t, *pMpi2PortEnableReply_t; + +/**************************************************************************** +* EventNotification message +****************************************************************************/ + +/*EventNotification Request message */ +#define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4) + +typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; /*0x14 */ + U16 SASBroadcastPrimitiveMasks; /*0x24 */ + U16 SASNotifyPrimitiveMasks; /*0x26 */ + U32 Reserved8; /*0x28 */ +} MPI2_EVENT_NOTIFICATION_REQUEST, + *PTR_MPI2_EVENT_NOTIFICATION_REQUEST, + Mpi2EventNotificationRequest_t, + *pMpi2EventNotificationRequest_t; + +/*EventNotification Reply message */ +typedef struct _MPI2_EVENT_NOTIFICATION_REPLY { + U16 EventDataLength; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 AckRequired; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U16 Event; /*0x14 */ + U16 Reserved4; /*0x16 */ + U32 EventContext; /*0x18 */ + U32 EventData[1]; /*0x1C */ +} MPI2_EVENT_NOTIFICATION_REPLY, *PTR_MPI2_EVENT_NOTIFICATION_REPLY, + Mpi2EventNotificationReply_t, + *pMpi2EventNotificationReply_t; + +/*AckRequired */ +#define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00) +#define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01) + +/*Event */ +#define MPI2_EVENT_LOG_DATA (0x0001) +#define MPI2_EVENT_STATE_CHANGE (0x0002) +#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005) +#define MPI2_EVENT_EVENT_CHANGE (0x000A) +#define MPI2_EVENT_TASK_SET_FULL (0x000E) /*obsolete */ +#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F) +#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014) +#define MPI2_EVENT_SAS_DISCOVERY (0x0016) +#define MPI2_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017) +#define MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018) +#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019) +#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C) +#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D) +#define MPI2_EVENT_IR_VOLUME (0x001E) +#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F) +#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020) +#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021) +#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022) +#define MPI2_EVENT_GPIO_INTERRUPT (0x0023) +#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024) +#define MPI2_EVENT_SAS_QUIESCE (0x0025) +#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026) +#define MPI2_EVENT_TEMP_THRESHOLD (0x0027) +#define MPI2_EVENT_HOST_MESSAGE (0x0028) +#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029) +#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E) +#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F) + +/*Log Entry Added Event data */ + +/*the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */ +#define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C) + +typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U16 LogSequence; /*0x0C */ + U16 LogEntryQualifier; /*0x0E */ + U8 VP_ID; /*0x10 */ + U8 VF_ID; /*0x11 */ + U16 Reserved2; /*0x12 */ + U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH]; /*0x14 */ +} MPI2_EVENT_DATA_LOG_ENTRY_ADDED, + *PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED, + Mpi2EventDataLogEntryAdded_t, + *pMpi2EventDataLogEntryAdded_t; + +/*GPIO Interrupt Event data */ + +typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT { + U8 GPIONum; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ +} MPI2_EVENT_DATA_GPIO_INTERRUPT, + *PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT, + Mpi2EventDataGpioInterrupt_t, + *pMpi2EventDataGpioInterrupt_t; + +/*Temperature Threshold Event data */ + +typedef struct _MPI2_EVENT_DATA_TEMPERATURE { + U16 Status; /*0x00 */ + U8 SensorNum; /*0x02 */ + U8 Reserved1; /*0x03 */ + U16 CurrentTemperature; /*0x04 */ + U16 Reserved2; /*0x06 */ + U32 Reserved3; /*0x08 */ + U32 Reserved4; /*0x0C */ +} MPI2_EVENT_DATA_TEMPERATURE, + *PTR_MPI2_EVENT_DATA_TEMPERATURE, + Mpi2EventDataTemperature_t, *pMpi2EventDataTemperature_t; + +/*Temperature Threshold Event data Status bits */ +#define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008) +#define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004) +#define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002) +#define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001) + +/*Host Message Event data */ + +typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE { + U8 SourceVF_ID; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Reserved3; /*0x04 */ + U32 HostData[1]; /*0x08 */ +} MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE, + Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t; + +/*Power Performance Change Event */ + +typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE { + U8 CurrentPowerMode; /*0x00 */ + U8 PreviousPowerMode; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_EVENT_DATA_POWER_PERF_CHANGE, + *PTR_MPI2_EVENT_DATA_POWER_PERF_CHANGE, + Mpi2EventDataPowerPerfChange_t, + *pMpi2EventDataPowerPerfChange_t; + +/*defines for CurrentPowerMode and PreviousPowerMode fields */ +#define MPI2_EVENT_PM_INIT_MASK (0xC0) +#define MPI2_EVENT_PM_INIT_UNAVAILABLE (0x00) +#define MPI2_EVENT_PM_INIT_HOST (0x40) +#define MPI2_EVENT_PM_INIT_IO_UNIT (0x80) +#define MPI2_EVENT_PM_INIT_PCIE_DPA (0xC0) + +#define MPI2_EVENT_PM_MODE_MASK (0x07) +#define MPI2_EVENT_PM_MODE_UNAVAILABLE (0x00) +#define MPI2_EVENT_PM_MODE_UNKNOWN (0x01) +#define MPI2_EVENT_PM_MODE_FULL_POWER (0x04) +#define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05) +#define MPI2_EVENT_PM_MODE_STANDBY (0x06) + +/*Hard Reset Received Event data */ + +typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED { + U8 Reserved1; /*0x00 */ + U8 Port; /*0x01 */ + U16 Reserved2; /*0x02 */ +} MPI2_EVENT_DATA_HARD_RESET_RECEIVED, + *PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED, + Mpi2EventDataHardResetReceived_t, + *pMpi2EventDataHardResetReceived_t; + +/*Task Set Full Event data */ +/* this event is obsolete */ + +typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL { + U16 DevHandle; /*0x00 */ + U16 CurrentDepth; /*0x02 */ +} MPI2_EVENT_DATA_TASK_SET_FULL, *PTR_MPI2_EVENT_DATA_TASK_SET_FULL, + Mpi2EventDataTaskSetFull_t, *pMpi2EventDataTaskSetFull_t; + +/*SAS Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE { + U16 TaskTag; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U8 ASC; /*0x04 */ + U8 ASCQ; /*0x05 */ + U16 DevHandle; /*0x06 */ + U32 Reserved2; /*0x08 */ + U64 SASAddress; /*0x0C */ + U8 LUN[8]; /*0x14 */ +} MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, + *PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE, + Mpi2EventDataSasDeviceStatusChange_t, + *pMpi2EventDataSasDeviceStatusChange_t; + +/*SAS Device Status Change Event data ReasonCode values */ +#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05) +#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07) +#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08) +#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09) +#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B) +#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C) +#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F) +#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10) +#define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11) +#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12) + +/*Integrated RAID Operation Status Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS { + U16 VolDevHandle; /*0x00 */ + U16 Reserved1; /*0x02 */ + U8 RAIDOperation; /*0x04 */ + U8 PercentComplete; /*0x05 */ + U16 Reserved2; /*0x06 */ + U32 ElapsedSeconds; /*0x08 */ +} MPI2_EVENT_DATA_IR_OPERATION_STATUS, + *PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS, + Mpi2EventDataIrOperationStatus_t, + *pMpi2EventDataIrOperationStatus_t; + +/*Integrated RAID Operation Status Event data RAIDOperation values */ +#define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00) +#define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02) +#define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03) +#define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04) + +/*Integrated RAID Volume Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_VOLUME { + U16 VolDevHandle; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 NewValue; /*0x04 */ + U32 PreviousValue; /*0x08 */ +} MPI2_EVENT_DATA_IR_VOLUME, *PTR_MPI2_EVENT_DATA_IR_VOLUME, + Mpi2EventDataIrVolume_t, *pMpi2EventDataIrVolume_t; + +/*Integrated RAID Volume Event data ReasonCode values */ +#define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01) +#define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02) +#define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03) + +/*Integrated RAID Physical Disk Event data */ + +typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK { + U16 Reserved1; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysDiskNum; /*0x03 */ + U16 PhysDiskDevHandle; /*0x04 */ + U16 Reserved2; /*0x06 */ + U16 Slot; /*0x08 */ + U16 EnclosureHandle; /*0x0A */ + U32 NewValue; /*0x0C */ + U32 PreviousValue; /*0x10 */ +} MPI2_EVENT_DATA_IR_PHYSICAL_DISK, + *PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK, + Mpi2EventDataIrPhysicalDisk_t, + *pMpi2EventDataIrPhysicalDisk_t; + +/*Integrated RAID Physical Disk Event data ReasonCode values */ +#define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01) +#define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02) +#define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03) + +/*Integrated RAID Configuration Change List Event data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumElements at runtime. + */ +#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT +#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1) +#endif + +typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT { + U16 ElementFlags; /*0x00 */ + U16 VolDevHandle; /*0x02 */ + U8 ReasonCode; /*0x04 */ + U8 PhysDiskNum; /*0x05 */ + U16 PhysDiskDevHandle; /*0x06 */ +} MPI2_EVENT_IR_CONFIG_ELEMENT, *PTR_MPI2_EVENT_IR_CONFIG_ELEMENT, + Mpi2EventIrConfigElement_t, *pMpi2EventIrConfigElement_t; + +/*IR Configuration Change List Event data ElementFlags values */ +#define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001) +#define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002) + +/*IR Configuration Change List Event data ReasonCode values */ +#define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01) +#define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02) +#define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03) +#define MPI2_EVENT_IR_CHANGE_RC_HIDE (0x04) +#define MPI2_EVENT_IR_CHANGE_RC_UNHIDE (0x05) +#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06) +#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07) +#define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08) +#define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09) + +typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST { + U8 NumElements; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 Reserved2; /*0x02 */ + U8 ConfigNum; /*0x03 */ + U32 Flags; /*0x04 */ + MPI2_EVENT_IR_CONFIG_ELEMENT + ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT];/*0x08 */ +} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, + *PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST, + Mpi2EventDataIrConfigChangeList_t, + *pMpi2EventDataIrConfigChangeList_t; + +/*IR Configuration Change List Event data Flags values */ +#define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001) + +/*SAS Discovery Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY { + U8 Flags; /*0x00 */ + U8 ReasonCode; /*0x01 */ + U8 PhysicalPort; /*0x02 */ + U8 Reserved1; /*0x03 */ + U32 DiscoveryStatus; /*0x04 */ +} MPI2_EVENT_DATA_SAS_DISCOVERY, + *PTR_MPI2_EVENT_DATA_SAS_DISCOVERY, + Mpi2EventDataSasDiscovery_t, *pMpi2EventDataSasDiscovery_t; + +/*SAS Discovery Event data Flags values */ +#define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02) +#define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01) + +/*SAS Discovery Event data ReasonCode values */ +#define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01) +#define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02) + +/*SAS Discovery Event data DiscoveryStatus values */ +#define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000) +#define MPI2_EVENT_SAS_DISC_DS_MAX_TOPO_PHYS_EXCEED (0x10000000) +#define MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR (0x08000000) +#define MPI2_EVENT_SAS_DISC_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000) +#define MPI2_EVENT_SAS_DISC_DS_EXP_MULTI_SUBTRACTIVE (0x00004000) +#define MPI2_EVENT_SAS_DISC_DS_MULTI_PORT_DOMAIN (0x00002000) +#define MPI2_EVENT_SAS_DISC_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000) +#define MPI2_EVENT_SAS_DISC_DS_UNSUPPORTED_DEVICE (0x00000800) +#define MPI2_EVENT_SAS_DISC_DS_TABLE_LINK (0x00000400) +#define MPI2_EVENT_SAS_DISC_DS_SUBTRACTIVE_LINK (0x00000200) +#define MPI2_EVENT_SAS_DISC_DS_SMP_CRC_ERROR (0x00000100) +#define MPI2_EVENT_SAS_DISC_DS_SMP_FUNCTION_FAILED (0x00000080) +#define MPI2_EVENT_SAS_DISC_DS_INDEX_NOT_EXIST (0x00000040) +#define MPI2_EVENT_SAS_DISC_DS_OUT_ROUTE_ENTRIES (0x00000020) +#define MPI2_EVENT_SAS_DISC_DS_SMP_TIMEOUT (0x00000010) +#define MPI2_EVENT_SAS_DISC_DS_MULTIPLE_PORTS (0x00000004) +#define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002) +#define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001) + +/*SAS Broadcast Primitive Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE { + U8 PhyNum; /*0x00 */ + U8 Port; /*0x01 */ + U8 PortWidth; /*0x02 */ + U8 Primitive; /*0x03 */ +} MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, + *PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE, + Mpi2EventDataSasBroadcastPrimitive_t, + *pMpi2EventDataSasBroadcastPrimitive_t; + +/*defines for the Primitive field */ +#define MPI2_EVENT_PRIMITIVE_CHANGE (0x01) +#define MPI2_EVENT_PRIMITIVE_SES (0x02) +#define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03) +#define MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04) +#define MPI2_EVENT_PRIMITIVE_RESERVED3 (0x05) +#define MPI2_EVENT_PRIMITIVE_RESERVED4 (0x06) +#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07) +#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08) + +/*SAS Notify Primitive Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE { + U8 PhyNum; /*0x00 */ + U8 Port; /*0x01 */ + U8 Reserved1; /*0x02 */ + U8 Primitive; /*0x03 */ +} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, + *PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE, + Mpi2EventDataSasNotifyPrimitive_t, + *pMpi2EventDataSasNotifyPrimitive_t; + +/*defines for the Primitive field */ +#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01) +#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02) +#define MPI2_EVENT_NOTIFY_RESERVED1 (0x03) +#define MPI2_EVENT_NOTIFY_RESERVED2 (0x04) + +/*SAS Initiator Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE { + U8 ReasonCode; /*0x00 */ + U8 PhysicalPort; /*0x01 */ + U16 DevHandle; /*0x02 */ + U64 SASAddress; /*0x04 */ +} MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, + *PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE, + Mpi2EventDataSasInitDevStatusChange_t, + *pMpi2EventDataSasInitDevStatusChange_t; + +/*SAS Initiator Device Status Change event ReasonCode values */ +#define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01) +#define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02) + +/*SAS Initiator Device Table Overflow Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW { + U16 MaxInit; /*0x00 */ + U16 CurrentInit; /*0x02 */ + U64 SASAddress; /*0x04 */ +} MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, + *PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW, + Mpi2EventDataSasInitTableOverflow_t, + *pMpi2EventDataSasInitTableOverflow_t; + +/*SAS Topology Change List Event data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumEntries at runtime. + */ +#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT +#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1) +#endif + +typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY { + U16 AttachedDevHandle; /*0x00 */ + U8 LinkRate; /*0x02 */ + U8 PhyStatus; /*0x03 */ +} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, *PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY, + Mpi2EventSasTopoPhyEntry_t, *pMpi2EventSasTopoPhyEntry_t; + +typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST { + U16 EnclosureHandle; /*0x00 */ + U16 ExpanderDevHandle; /*0x02 */ + U8 NumPhys; /*0x04 */ + U8 Reserved1; /*0x05 */ + U16 Reserved2; /*0x06 */ + U8 NumEntries; /*0x08 */ + U8 StartPhyNum; /*0x09 */ + U8 ExpStatus; /*0x0A */ + U8 PhysicalPort; /*0x0B */ + MPI2_EVENT_SAS_TOPO_PHY_ENTRY + PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /*0x0C */ +} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, + *PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST, + Mpi2EventDataSasTopologyChangeList_t, + *pMpi2EventDataSasTopologyChangeList_t; + +/*values for the ExpStatus field */ +#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00) +#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01) +#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02) +#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03) +#define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04) + +/*defines for the LinkRate field */ +#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0) +#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4) +#define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F) +#define MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT (0) + +#define MPI2_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00) +#define MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01) +#define MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02) +#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03) +#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04) +#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05) +#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09) +#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A) +#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B) + +/*values for the PhyStatus field */ +#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80) +#define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10) +/*values for the PhyStatus ReasonCode sub-field */ +#define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F) +#define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01) +#define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02) +#define MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03) +#define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04) +#define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05) + +/*SAS Enclosure Device Status Change Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE { + U16 EnclosureHandle; /*0x00 */ + U8 ReasonCode; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U64 EnclosureLogicalID; /*0x04 */ + U16 NumSlots; /*0x0C */ + U16 StartSlot; /*0x0E */ + U32 PhyBits; /*0x10 */ +} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, + *PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE, + Mpi2EventDataSasEnclDevStatusChange_t, + *pMpi2EventDataSasEnclDevStatusChange_t; + +/*SAS Enclosure Device Status Change event ReasonCode values */ +#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01) +#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02) + +/*SAS PHY Counter Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER { + U64 TimeStamp; /*0x00 */ + U32 Reserved1; /*0x08 */ + U8 PhyEventCode; /*0x0C */ + U8 PhyNum; /*0x0D */ + U16 Reserved2; /*0x0E */ + U32 PhyEventInfo; /*0x10 */ + U8 CounterType; /*0x14 */ + U8 ThresholdWindow; /*0x15 */ + U8 TimeUnits; /*0x16 */ + U8 Reserved3; /*0x17 */ + U32 EventThreshold; /*0x18 */ + U16 ThresholdFlags; /*0x1C */ + U16 Reserved4; /*0x1E */ +} MPI2_EVENT_DATA_SAS_PHY_COUNTER, + *PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER, + Mpi2EventDataSasPhyCounter_t, + *pMpi2EventDataSasPhyCounter_t; + +/*use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h + *for the PhyEventCode field */ + +/*use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h + *for the CounterType field */ + +/*use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h + *for the TimeUnits field */ + +/*use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h + *for the ThresholdFlags field */ + +/*SAS Quiesce Event data */ + +typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE { + U8 ReasonCode; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Reserved3; /*0x04 */ +} MPI2_EVENT_DATA_SAS_QUIESCE, + *PTR_MPI2_EVENT_DATA_SAS_QUIESCE, + Mpi2EventDataSasQuiesce_t, *pMpi2EventDataSasQuiesce_t; + +/*SAS Quiesce Event data ReasonCode values */ +#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01) +#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02) + +/*Host Based Discovery Phy Event data */ + +typedef struct _MPI2_EVENT_HBD_PHY_SAS { + U8 Flags; /*0x00 */ + U8 NegotiatedLinkRate; /*0x01 */ + U8 PhyNum; /*0x02 */ + U8 PhysicalPort; /*0x03 */ + U32 Reserved1; /*0x04 */ + U8 InitialFrame[28]; /*0x08 */ +} MPI2_EVENT_HBD_PHY_SAS, *PTR_MPI2_EVENT_HBD_PHY_SAS, + Mpi2EventHbdPhySas_t, *pMpi2EventHbdPhySas_t; + +/*values for the Flags field */ +#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02) +#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01) + +/*use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h + *for the NegotiatedLinkRate field */ + +typedef union _MPI2_EVENT_HBD_DESCRIPTOR { + MPI2_EVENT_HBD_PHY_SAS Sas; +} MPI2_EVENT_HBD_DESCRIPTOR, *PTR_MPI2_EVENT_HBD_DESCRIPTOR, + Mpi2EventHbdDescriptor_t, *pMpi2EventHbdDescriptor_t; + +typedef struct _MPI2_EVENT_DATA_HBD_PHY { + U8 DescriptorType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Reserved3; /*0x04 */ + MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /*0x08 */ +} MPI2_EVENT_DATA_HBD_PHY, *PTR_MPI2_EVENT_DATA_HBD_PHY, + Mpi2EventDataHbdPhy_t, + *pMpi2EventDataMpi2EventDataHbdPhy_t; + +/*values for the DescriptorType field */ +#define MPI2_EVENT_HBD_DT_SAS (0x01) + +/**************************************************************************** +* EventAck message +****************************************************************************/ + +/*EventAck Request message */ +typedef struct _MPI2_EVENT_ACK_REQUEST { + U16 Reserved1; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Event; /*0x0C */ + U16 Reserved5; /*0x0E */ + U32 EventContext; /*0x10 */ +} MPI2_EVENT_ACK_REQUEST, *PTR_MPI2_EVENT_ACK_REQUEST, + Mpi2EventAckRequest_t, *pMpi2EventAckRequest_t; + +/*EventAck Reply message */ +typedef struct _MPI2_EVENT_ACK_REPLY { + U16 Reserved1; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_EVENT_ACK_REPLY, *PTR_MPI2_EVENT_ACK_REPLY, + Mpi2EventAckReply_t, *pMpi2EventAckReply_t; + +/**************************************************************************** +* SendHostMessage message +****************************************************************************/ + +/*SendHostMessage Request message */ +typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST { + U16 HostDataLength; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U8 Reserved4; /*0x0C */ + U8 DestVF_ID; /*0x0D */ + U16 Reserved5; /*0x0E */ + U32 Reserved6; /*0x10 */ + U32 Reserved7; /*0x14 */ + U32 Reserved8; /*0x18 */ + U32 Reserved9; /*0x1C */ + U32 Reserved10; /*0x20 */ + U32 HostData[1]; /*0x24 */ +} MPI2_SEND_HOST_MESSAGE_REQUEST, + *PTR_MPI2_SEND_HOST_MESSAGE_REQUEST, + Mpi2SendHostMessageRequest_t, + *pMpi2SendHostMessageRequest_t; + +/*SendHostMessage Reply message */ +typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY { + U16 HostDataLength; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved1; /*0x04 */ + U8 Reserved2; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_SEND_HOST_MESSAGE_REPLY, *PTR_MPI2_SEND_HOST_MESSAGE_REPLY, + Mpi2SendHostMessageReply_t, *pMpi2SendHostMessageReply_t; + +/**************************************************************************** +* FWDownload message +****************************************************************************/ + +/*MPI v2.0 FWDownload Request message */ +typedef struct _MPI2_FW_DOWNLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 TotalImageSize; /*0x0C */ + U32 Reserved5; /*0x10 */ + MPI2_MPI_SGE_UNION SGL; /*0x14 */ +} MPI2_FW_DOWNLOAD_REQUEST, *PTR_MPI2_FW_DOWNLOAD_REQUEST, + Mpi2FWDownloadRequest, *pMpi2FWDownloadRequest; + +#define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01) + +#define MPI2_FW_DOWNLOAD_ITYPE_FW (0x01) +#define MPI2_FW_DOWNLOAD_ITYPE_BIOS (0x02) +#define MPI2_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06) +#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07) +#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08) +#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09) +#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A) +#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) +#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY (0x0C) +#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) + +/*MPI v2.0 FWDownload TransactionContext Element */ +typedef struct _MPI2_FW_DOWNLOAD_TCSGE { + U8 Reserved1; /*0x00 */ + U8 ContextSize; /*0x01 */ + U8 DetailsLength; /*0x02 */ + U8 Flags; /*0x03 */ + U32 Reserved2; /*0x04 */ + U32 ImageOffset; /*0x08 */ + U32 ImageSize; /*0x0C */ +} MPI2_FW_DOWNLOAD_TCSGE, *PTR_MPI2_FW_DOWNLOAD_TCSGE, + Mpi2FWDownloadTCSGE_t, *pMpi2FWDownloadTCSGE_t; + +/*MPI v2.5 FWDownload Request message */ +typedef struct _MPI25_FW_DOWNLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 TotalImageSize; /*0x0C */ + U32 Reserved5; /*0x10 */ + U32 Reserved6; /*0x14 */ + U32 ImageOffset; /*0x18 */ + U32 ImageSize; /*0x1C */ + MPI25_SGE_IO_UNION SGL; /*0x20 */ +} MPI25_FW_DOWNLOAD_REQUEST, *PTR_MPI25_FW_DOWNLOAD_REQUEST, + Mpi25FWDownloadRequest, *pMpi25FWDownloadRequest; + +/*FWDownload Reply message */ +typedef struct _MPI2_FW_DOWNLOAD_REPLY { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_FW_DOWNLOAD_REPLY, *PTR_MPI2_FW_DOWNLOAD_REPLY, + Mpi2FWDownloadReply_t, *pMpi2FWDownloadReply_t; + +/**************************************************************************** +* FWUpload message +****************************************************************************/ + +/*MPI v2.0 FWUpload Request message */ +typedef struct _MPI2_FW_UPLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + MPI2_MPI_SGE_UNION SGL; /*0x14 */ +} MPI2_FW_UPLOAD_REQUEST, *PTR_MPI2_FW_UPLOAD_REQUEST, + Mpi2FWUploadRequest_t, *pMpi2FWUploadRequest_t; + +#define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00) +#define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01) +#define MPI2_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02) +#define MPI2_FW_UPLOAD_ITYPE_FW_BACKUP (0x05) +#define MPI2_FW_UPLOAD_ITYPE_MANUFACTURING (0x06) +#define MPI2_FW_UPLOAD_ITYPE_CONFIG_1 (0x07) +#define MPI2_FW_UPLOAD_ITYPE_CONFIG_2 (0x08) +#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09) +#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A) +#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B) + +/*MPI v2.0 FWUpload TransactionContext Element */ +typedef struct _MPI2_FW_UPLOAD_TCSGE { + U8 Reserved1; /*0x00 */ + U8 ContextSize; /*0x01 */ + U8 DetailsLength; /*0x02 */ + U8 Flags; /*0x03 */ + U32 Reserved2; /*0x04 */ + U32 ImageOffset; /*0x08 */ + U32 ImageSize; /*0x0C */ +} MPI2_FW_UPLOAD_TCSGE, *PTR_MPI2_FW_UPLOAD_TCSGE, + Mpi2FWUploadTCSGE_t, *pMpi2FWUploadTCSGE_t; + +/*MPI v2.5 FWUpload Request message */ +typedef struct _MPI25_FW_UPLOAD_REQUEST { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + U32 Reserved7; /*0x14 */ + U32 ImageOffset; /*0x18 */ + U32 ImageSize; /*0x1C */ + MPI25_SGE_IO_UNION SGL; /*0x20 */ +} MPI25_FW_UPLOAD_REQUEST, *PTR_MPI25_FW_UPLOAD_REQUEST, + Mpi25FWUploadRequest_t, *pMpi25FWUploadRequest_t; + +/*FWUpload Reply message */ +typedef struct _MPI2_FW_UPLOAD_REPLY { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 ActualImageSize; /*0x14 */ +} MPI2_FW_UPLOAD_REPLY, *PTR_MPI2_FW_UPLOAD_REPLY, + Mpi2FWUploadReply_t, *pMPi2FWUploadReply_t; + +/*FW Image Header */ +typedef struct _MPI2_FW_IMAGE_HEADER { + U32 Signature; /*0x00 */ + U32 Signature0; /*0x04 */ + U32 Signature1; /*0x08 */ + U32 Signature2; /*0x0C */ + MPI2_VERSION_UNION MPIVersion; /*0x10 */ + MPI2_VERSION_UNION FWVersion; /*0x14 */ + MPI2_VERSION_UNION NVDATAVersion; /*0x18 */ + MPI2_VERSION_UNION PackageVersion; /*0x1C */ + U16 VendorID; /*0x20 */ + U16 ProductID; /*0x22 */ + U16 ProtocolFlags; /*0x24 */ + U16 Reserved26; /*0x26 */ + U32 IOCCapabilities; /*0x28 */ + U32 ImageSize; /*0x2C */ + U32 NextImageHeaderOffset; /*0x30 */ + U32 Checksum; /*0x34 */ + U32 Reserved38; /*0x38 */ + U32 Reserved3C; /*0x3C */ + U32 Reserved40; /*0x40 */ + U32 Reserved44; /*0x44 */ + U32 Reserved48; /*0x48 */ + U32 Reserved4C; /*0x4C */ + U32 Reserved50; /*0x50 */ + U32 Reserved54; /*0x54 */ + U32 Reserved58; /*0x58 */ + U32 Reserved5C; /*0x5C */ + U32 Reserved60; /*0x60 */ + U32 FirmwareVersionNameWhat; /*0x64 */ + U8 FirmwareVersionName[32]; /*0x68 */ + U32 VendorNameWhat; /*0x88 */ + U8 VendorName[32]; /*0x8C */ + U32 PackageNameWhat; /*0x88 */ + U8 PackageName[32]; /*0x8C */ + U32 ReservedD0; /*0xD0 */ + U32 ReservedD4; /*0xD4 */ + U32 ReservedD8; /*0xD8 */ + U32 ReservedDC; /*0xDC */ + U32 ReservedE0; /*0xE0 */ + U32 ReservedE4; /*0xE4 */ + U32 ReservedE8; /*0xE8 */ + U32 ReservedEC; /*0xEC */ + U32 ReservedF0; /*0xF0 */ + U32 ReservedF4; /*0xF4 */ + U32 ReservedF8; /*0xF8 */ + U32 ReservedFC; /*0xFC */ +} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER, + Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t; + +/*Signature field */ +#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00) +#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000) +#define MPI2_FW_HEADER_SIGNATURE (0xEA000000) + +/*Signature0 field */ +#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04) +#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A) + +/*Signature1 field */ +#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08) +#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5) + +/*Signature2 field */ +#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C) +#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA) + +/*defines for using the ProductID field */ +#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000) +#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000) + +#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00) +#define MPI2_FW_HEADER_PID_PROD_A (0x0000) +#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200) +#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700) + +#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF) +/*SAS ProductID Family bits */ +#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013) +#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014) +#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021) + +/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */ + +/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */ + +#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C) +#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30) +#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64) + +#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840) + +#define MPI2_FW_HEADER_SIZE (0x100) + +/*Extended Image Header */ +typedef struct _MPI2_EXT_IMAGE_HEADER { + U8 ImageType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 Checksum; /*0x04 */ + U32 ImageSize; /*0x08 */ + U32 NextImageHeaderOffset; /*0x0C */ + U32 PackageVersion; /*0x10 */ + U32 Reserved3; /*0x14 */ + U32 Reserved4; /*0x18 */ + U32 Reserved5; /*0x1C */ + U8 IdentifyString[32]; /*0x20 */ +} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER, + Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t; + +/*useful offsets */ +#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00) +#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08) +#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C) + +#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40) + +/*defines for the ImageType field */ +#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00) +#define MPI2_EXT_IMAGE_TYPE_FW (0x01) +#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03) +#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04) +#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05) +#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06) +#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07) +#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) +#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09) +#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF) + +#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) + +/*FLASH Layout Extended Image Data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check RegionsPerLayout at runtime. + */ +#ifndef MPI2_FLASH_NUMBER_OF_REGIONS +#define MPI2_FLASH_NUMBER_OF_REGIONS (1) +#endif + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumberOfLayouts at runtime. + */ +#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS +#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1) +#endif + +typedef struct _MPI2_FLASH_REGION { + U8 RegionType; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 RegionOffset; /*0x04 */ + U32 RegionSize; /*0x08 */ + U32 Reserved3; /*0x0C */ +} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION, + Mpi2FlashRegion_t, *pMpi2FlashRegion_t; + +typedef struct _MPI2_FLASH_LAYOUT { + U32 FlashSize; /*0x00 */ + U32 Reserved1; /*0x04 */ + U32 Reserved2; /*0x08 */ + U32 Reserved3; /*0x0C */ + MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */ +} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT, + Mpi2FlashLayout_t, *pMpi2FlashLayout_t; + +typedef struct _MPI2_FLASH_LAYOUT_DATA { + U8 ImageRevision; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 SizeOfRegion; /*0x02 */ + U8 Reserved2; /*0x03 */ + U16 NumberOfLayouts; /*0x04 */ + U16 RegionsPerLayout; /*0x06 */ + U16 MinimumSectorAlignment; /*0x08 */ + U16 Reserved3; /*0x0A */ + U32 Reserved4; /*0x0C */ + MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */ +} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA, + Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t; + +/*defines for the RegionType field */ +#define MPI2_FLASH_REGION_UNUSED (0x00) +#define MPI2_FLASH_REGION_FIRMWARE (0x01) +#define MPI2_FLASH_REGION_BIOS (0x02) +#define MPI2_FLASH_REGION_NVDATA (0x03) +#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05) +#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06) +#define MPI2_FLASH_REGION_CONFIG_1 (0x07) +#define MPI2_FLASH_REGION_CONFIG_2 (0x08) +#define MPI2_FLASH_REGION_MEGARAID (0x09) +#define MPI2_FLASH_REGION_INIT (0x0A) + +/*ImageRevision */ +#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00) + +/*Supported Devices Extended Image Data */ + +/* + *Host code (drivers, BIOS, utilities, etc.) should leave this define set to + *one and check NumberOfDevices at runtime. + */ +#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES +#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1) +#endif + +typedef struct _MPI2_SUPPORTED_DEVICE { + U16 DeviceID; /*0x00 */ + U16 VendorID; /*0x02 */ + U16 DeviceIDMask; /*0x04 */ + U16 Reserved1; /*0x06 */ + U8 LowPCIRev; /*0x08 */ + U8 HighPCIRev; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 Reserved3; /*0x0C */ +} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE, + Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t; + +typedef struct _MPI2_SUPPORTED_DEVICES_DATA { + U8 ImageRevision; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 NumberOfDevices; /*0x02 */ + U8 Reserved2; /*0x03 */ + U32 Reserved3; /*0x04 */ + MPI2_SUPPORTED_DEVICE + SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */ +} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA, + Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t; + +/*ImageRevision */ +#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00) + +/*Init Extended Image Data */ + +typedef struct _MPI2_INIT_IMAGE_FOOTER { + U32 BootFlags; /*0x00 */ + U32 ImageSize; /*0x04 */ + U32 Signature0; /*0x08 */ + U32 Signature1; /*0x0C */ + U32 Signature2; /*0x10 */ + U32 ResetVector; /*0x14 */ +} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER, + Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t; + +/*defines for the BootFlags field */ +#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00) + +/*defines for the ImageSize field */ +#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04) + +/*defines for the Signature0 field */ +#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08) +#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA) + +/*defines for the Signature1 field */ +#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C) +#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5) + +/*defines for the Signature2 field */ +#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10) +#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A) + +/*Signature fields as individual bytes */ +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A) + +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5) + +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA) +#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A) + +/*defines for the ResetVector field */ +#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14) + + +/* Encrypted Hash Extended Image Data */ + +typedef struct _MPI25_ENCRYPTED_HASH_ENTRY { + U8 HashImageType; /* 0x00 */ + U8 HashAlgorithm; /* 0x01 */ + U8 EncryptionAlgorithm; /* 0x02 */ + U8 Reserved1; /* 0x03 */ + U32 Reserved2; /* 0x04 */ + U32 EncryptedHash[1]; /* 0x08 */ /* variable length */ +} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY, +Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t; + +/* values for HashImageType */ +#define MPI25_HASH_IMAGE_TYPE_UNUSED (0x00) +#define MPI25_HASH_IMAGE_TYPE_FIRMWARE (0x01) +#define MPI25_HASH_IMAGE_TYPE_BIOS (0x02) + +/* values for HashAlgorithm */ +#define MPI25_HASH_ALGORITHM_UNUSED (0x00) +#define MPI25_HASH_ALGORITHM_SHA256 (0x01) + +/* values for EncryptionAlgorithm */ +#define MPI25_ENCRYPTION_ALG_UNUSED (0x00) +#define MPI25_ENCRYPTION_ALG_RSA256 (0x01) + +typedef struct _MPI25_ENCRYPTED_HASH_DATA { + U8 ImageVersion; /* 0x00 */ + U8 NumHash; /* 0x01 */ + U16 Reserved1; /* 0x02 */ + U32 Reserved2; /* 0x04 */ + MPI25_ENCRYPTED_HASH_ENTRY EncryptedHashEntry[1]; /* 0x08 */ +} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA, +Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t; + + + +/**************************************************************************** +* PowerManagementControl message +****************************************************************************/ + +/*PowerManagementControl Request message */ +typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST { + U8 Feature; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 Parameter1; /*0x0C */ + U8 Parameter2; /*0x0D */ + U8 Parameter3; /*0x0E */ + U8 Parameter4; /*0x0F */ + U32 Reserved5; /*0x10 */ + U32 Reserved6; /*0x14 */ +} MPI2_PWR_MGMT_CONTROL_REQUEST, *PTR_MPI2_PWR_MGMT_CONTROL_REQUEST, + Mpi2PwrMgmtControlRequest_t, *pMpi2PwrMgmtControlRequest_t; + +/*defines for the Feature field */ +#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01) +#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02) +#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /*obsolete */ +#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04) +#define MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE (0x05) +#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF) + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */ +/*Parameter1 contains a PHY number */ +/*Parameter2 indicates power condition action using these defines */ +#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01) +#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02) +#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03) +/*Parameter3 and Parameter4 are reserved */ + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION + * Feature */ +/*Parameter1 contains SAS port width modulation group number */ +/*Parameter2 indicates IOC action using these defines */ +#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01) +#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02) +#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03) +/*Parameter3 indicates desired modulation level using these defines */ +#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00) +#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01) +#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02) +#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03) +/*Parameter4 is reserved */ + +/*this next set (_PCIE_LINK) is obsolete */ +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */ +/*Parameter1 indicates desired PCIe link speed using these defines */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /*obsolete */ +/*Parameter2 indicates desired PCIe link width using these defines */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /*obsolete */ +#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /*obsolete */ +/*Parameter3 and Parameter4 are reserved */ + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */ +/*Parameter1 indicates desired IOC hardware clock speed using these defines */ +#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01) +#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02) +#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04) +#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08) +/*Parameter2, Parameter3, and Parameter4 are reserved */ + +/*parameter usage for the MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE Feature*/ +/*Parameter1 indicates host action regarding global power management mode */ +#define MPI2_PM_CONTROL_PARAM1_TAKE_CONTROL (0x01) +#define MPI2_PM_CONTROL_PARAM1_CHANGE_GLOBAL_MODE (0x02) +#define MPI2_PM_CONTROL_PARAM1_RELEASE_CONTROL (0x03) +/*Parameter2 indicates the requested global power management mode */ +#define MPI2_PM_CONTROL_PARAM2_FULL_PWR_PERF (0x01) +#define MPI2_PM_CONTROL_PARAM2_REDUCED_PWR_PERF (0x08) +#define MPI2_PM_CONTROL_PARAM2_STANDBY (0x40) +/*Parameter3 and Parameter4 are reserved */ + +/*PowerManagementControl Reply message */ +typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY { + U8 Feature; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_PWR_MGMT_CONTROL_REPLY, *PTR_MPI2_PWR_MGMT_CONTROL_REPLY, + Mpi2PwrMgmtControlReply_t, *pMpi2PwrMgmtControlReply_t; + +#endif diff --git a/addons/mpt3sas/src/4.4.180/mpi/mpi2_raid.h b/addons/mpt3sas/src/4.4.180/mpi/mpi2_raid.h new file mode 100644 index 00000000..13d93ca0 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpi/mpi2_raid.h @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2000-2014 LSI Corporation. + * + * + * Name: mpi2_raid.h + * Title: MPI Integrated RAID messages and structures + * Creation Date: April 26, 2007 + * + * mpi2_raid.h Version: 02.00.10 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 08-31-07 02.00.01 Modifications to RAID Action request and reply, + * including the Actions and ActionData. + * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD. + * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that + * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT + * can be sized by the build environment. + * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of + * VolumeCreationFlags and marked the old one as obsolete. + * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define. + * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with + * related structures and defines. + * Added product-specific range to RAID Action values. + * 11-18-11 02.00.07 Incorporating additions for MPI v2.5. + * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN. + * 07-26-12 02.00.09 Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR. + * Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define. + * 04-17-13 02.00.10 Added MPI25_RAID_ACTION_ADATA_ALLOW_PI. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_RAID_H +#define MPI2_RAID_H + +/***************************************************************************** +* +* Integrated RAID Messages +* +*****************************************************************************/ + +/**************************************************************************** +* RAID Action messages +****************************************************************************/ + +/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */ +#define MPI25_RAID_ACTION_ADATA_ALLOW_PI (0x80000000) + +/*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */ +#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000) +#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001) + +/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for + *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ + +/*ActionDataWord defines for use with + *MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */ +#define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001) + +/*ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */ +typedef struct _MPI2_RAID_ACTION_RATE_DATA { + U8 RateToChange; /*0x00 */ + U8 RateOrMode; /*0x01 */ + U16 DataScrubDuration; /*0x02 */ +} MPI2_RAID_ACTION_RATE_DATA, *PTR_MPI2_RAID_ACTION_RATE_DATA, + Mpi2RaidActionRateData_t, *pMpi2RaidActionRateData_t; + +#define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00) +#define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01) +#define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02) + +/*ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */ +typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION { + U8 RAIDFunction; /*0x00 */ + U8 Flags; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_RAID_ACTION_START_RAID_FUNCTION, + *PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION, + Mpi2RaidActionStartRaidFunction_t, + *pMpi2RaidActionStartRaidFunction_t; + +/*defines for the RAIDFunction field */ +#define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00) +#define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02) + +/*defines for the Flags field */ +#define MPI2_RAID_ACTION_START_NEW (0x00) +#define MPI2_RAID_ACTION_START_RESUME (0x01) + +/*ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */ +typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION { + U8 RAIDFunction; /*0x00 */ + U8 Flags; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_RAID_ACTION_STOP_RAID_FUNCTION, + *PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION, + Mpi2RaidActionStopRaidFunction_t, + *pMpi2RaidActionStopRaidFunction_t; + +/*defines for the RAIDFunction field */ +#define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00) +#define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01) +#define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02) + +/*defines for the Flags field */ +#define MPI2_RAID_ACTION_STOP_ABORT (0x00) +#define MPI2_RAID_ACTION_STOP_PAUSE (0x01) + +/*ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */ +typedef struct _MPI2_RAID_ACTION_HOT_SPARE { + U8 HotSparePool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 DevHandle; /*0x02 */ +} MPI2_RAID_ACTION_HOT_SPARE, *PTR_MPI2_RAID_ACTION_HOT_SPARE, + Mpi2RaidActionHotSpare_t, *pMpi2RaidActionHotSpare_t; + +/*ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */ +typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE { + U8 Flags; /*0x00 */ + U8 DeviceFirmwareUpdateModeTimeout; /*0x01 */ + U16 Reserved1; /*0x02 */ +} MPI2_RAID_ACTION_FW_UPDATE_MODE, + *PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE, + Mpi2RaidActionFwUpdateMode_t, + *pMpi2RaidActionFwUpdateMode_t; + +/*ActionDataWord defines for use with + *MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */ +#define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00) +#define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01) + +typedef union _MPI2_RAID_ACTION_DATA { + U32 Word; + MPI2_RAID_ACTION_RATE_DATA Rates; + MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction; + MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction; + MPI2_RAID_ACTION_HOT_SPARE HotSpare; + MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode; +} MPI2_RAID_ACTION_DATA, *PTR_MPI2_RAID_ACTION_DATA, + Mpi2RaidActionData_t, *pMpi2RaidActionData_t; + +/*RAID Action Request Message */ +typedef struct _MPI2_RAID_ACTION_REQUEST { + U8 Action; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 VolDevHandle; /*0x04 */ + U8 PhysDiskNum; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U32 Reserved3; /*0x0C */ + MPI2_RAID_ACTION_DATA ActionDataWord; /*0x10 */ + MPI2_SGE_SIMPLE_UNION ActionDataSGE; /*0x14 */ +} MPI2_RAID_ACTION_REQUEST, *PTR_MPI2_RAID_ACTION_REQUEST, + Mpi2RaidActionRequest_t, *pMpi2RaidActionRequest_t; + +/*RAID Action request Action values */ + +#define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01) +#define MPI2_RAID_ACTION_CREATE_VOLUME (0x02) +#define MPI2_RAID_ACTION_DELETE_VOLUME (0x03) +#define MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES (0x04) +#define MPI2_RAID_ACTION_ENABLE_ALL_VOLUMES (0x05) +#define MPI2_RAID_ACTION_PHYSDISK_OFFLINE (0x0A) +#define MPI2_RAID_ACTION_PHYSDISK_ONLINE (0x0B) +#define MPI2_RAID_ACTION_FAIL_PHYSDISK (0x0F) +#define MPI2_RAID_ACTION_ACTIVATE_VOLUME (0x11) +#define MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15) +#define MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE (0x17) +#define MPI2_RAID_ACTION_SET_VOLUME_NAME (0x18) +#define MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE (0x19) +#define MPI2_RAID_ACTION_ENABLE_FAILED_VOLUME (0x1C) +#define MPI2_RAID_ACTION_CREATE_HOT_SPARE (0x1D) +#define MPI2_RAID_ACTION_DELETE_HOT_SPARE (0x1E) +#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20) +#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21) +#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22) +#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23) +#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24) +#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80) +#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF) + +/*RAID Volume Creation Structure */ + +/* + *The following define can be customized for the targeted product. + */ +#ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS +#define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1) +#endif + +typedef struct _MPI2_RAID_VOLUME_PHYSDISK { + U8 RAIDSetNum; /*0x00 */ + U8 PhysDiskMap; /*0x01 */ + U16 PhysDiskDevHandle; /*0x02 */ +} MPI2_RAID_VOLUME_PHYSDISK, *PTR_MPI2_RAID_VOLUME_PHYSDISK, + Mpi2RaidVolumePhysDisk_t, *pMpi2RaidVolumePhysDisk_t; + +/*defines for the PhysDiskMap field */ +#define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01) +#define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02) + +typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT { + U8 NumPhysDisks; /*0x00 */ + U8 VolumeType; /*0x01 */ + U16 Reserved1; /*0x02 */ + U32 VolumeCreationFlags; /*0x04 */ + U32 VolumeSettings; /*0x08 */ + U8 Reserved2; /*0x0C */ + U8 ResyncRate; /*0x0D */ + U16 DataScrubDuration; /*0x0E */ + U64 VolumeMaxLBA; /*0x10 */ + U32 StripeSize; /*0x18 */ + U8 Name[16]; /*0x1C */ + MPI2_RAID_VOLUME_PHYSDISK + PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS]; /*0x2C */ +} MPI2_RAID_VOLUME_CREATION_STRUCT, + *PTR_MPI2_RAID_VOLUME_CREATION_STRUCT, + Mpi2RaidVolumeCreationStruct_t, + *pMpi2RaidVolumeCreationStruct_t; + +/*use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */ + +/*defines for the VolumeCreationFlags field */ +#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000) +#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004) +#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002) +#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001) +/*The following is an obsolete define. + *It must be shifted left 24 bits in order to set the proper bit. + */ +#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80) + +/*RAID Online Capacity Expansion Structure */ + +typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION { + U32 Flags; /*0x00 */ + U16 DevHandle0; /*0x04 */ + U16 Reserved1; /*0x06 */ + U16 DevHandle1; /*0x08 */ + U16 Reserved2; /*0x0A */ +} MPI2_RAID_ONLINE_CAPACITY_EXPANSION, + *PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION, + Mpi2RaidOnlineCapacityExpansion_t, + *pMpi2RaidOnlineCapacityExpansion_t; + +/*RAID Compatibility Input Structure */ + +typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT { + U16 SourceDevHandle; /*0x00 */ + U16 CandidateDevHandle; /*0x02 */ + U32 Flags; /*0x04 */ + U32 Reserved1; /*0x08 */ + U32 Reserved2; /*0x0C */ +} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, + *PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT, + Mpi2RaidCompatibilityInputStruct_t, + *pMpi2RaidCompatibilityInputStruct_t; + +/*defines for RAID Compatibility Structure Flags field */ +#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002) +#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001) + +/*RAID Volume Indicator Structure */ + +typedef struct _MPI2_RAID_VOL_INDICATOR { + U64 TotalBlocks; /*0x00 */ + U64 BlocksRemaining; /*0x08 */ + U32 Flags; /*0x10 */ + U32 ElapsedSeconds; /* 0x14 */ +} MPI2_RAID_VOL_INDICATOR, *PTR_MPI2_RAID_VOL_INDICATOR, + Mpi2RaidVolIndicator_t, *pMpi2RaidVolIndicator_t; + +/*defines for RAID Volume Indicator Flags field */ +#define MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID (0x80000000) +#define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F) +#define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000) +#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001) +#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002) +#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003) +#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004) + +/*RAID Compatibility Result Structure */ + +typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT { + U8 State; /*0x00 */ + U8 Reserved1; /*0x01 */ + U16 Reserved2; /*0x02 */ + U32 GenericAttributes; /*0x04 */ + U32 OEMSpecificAttributes; /*0x08 */ + U32 Reserved3; /*0x0C */ + U32 Reserved4; /*0x10 */ +} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, + *PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT, + Mpi2RaidCompatibilityResultStruct_t, + *pMpi2RaidCompatibilityResultStruct_t; + +/*defines for RAID Compatibility Result Structure State field */ +#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00) +#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01) + +/*defines for RAID Compatibility Result Structure GenericAttributes field */ +#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010) + +#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C) +#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008) +#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004) + +#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003) +#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002) +#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001) + +/*RAID Action Reply ActionData union */ +typedef union _MPI2_RAID_ACTION_REPLY_DATA { + U32 Word[6]; + MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator; + U16 VolDevHandle; + U8 VolumeState; + U8 PhysDiskNum; + MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult; +} MPI2_RAID_ACTION_REPLY_DATA, *PTR_MPI2_RAID_ACTION_REPLY_DATA, + Mpi2RaidActionReplyData_t, *pMpi2RaidActionReplyData_t; + +/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for + *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */ + +/*RAID Action Reply Message */ +typedef struct _MPI2_RAID_ACTION_REPLY { + U8 Action; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 VolDevHandle; /*0x04 */ + U8 PhysDiskNum; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved2; /*0x0A */ + U16 Reserved3; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + MPI2_RAID_ACTION_REPLY_DATA ActionData; /*0x14 */ +} MPI2_RAID_ACTION_REPLY, *PTR_MPI2_RAID_ACTION_REPLY, + Mpi2RaidActionReply_t, *pMpi2RaidActionReply_t; + +#endif diff --git a/addons/mpt3sas/src/4.4.180/mpi/mpi2_sas.h b/addons/mpt3sas/src/4.4.180/mpi/mpi2_sas.h new file mode 100644 index 00000000..156e3054 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpi/mpi2_sas.h @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2000-2014 LSI Corporation. + * + * + * Name: mpi2_sas.h + * Title: MPI Serial Attached SCSI structures and definitions + * Creation Date: February 9, 2007 + * + * mpi2_sas.h Version: 02.00.08 + * + * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 + * prefix are for use only on MPI v2.5 products, and must not be used + * with MPI v2.0 products. Unless otherwise noted, names beginning with + * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products. + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit + * Control Request. + * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control + * Request. + * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST + * to MPI2_SGE_IO_UNION since it supports chained SGLs. + * 05-12-10 02.00.04 Modified some comments. + * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control. + * 11-18-11 02.00.06 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA + * Passthrough Request message. + * 08-19-13 02.00.08 Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete + * for anything newer than MPI v2.0. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_SAS_H +#define MPI2_SAS_H + +/* + *Values for SASStatus. + */ +#define MPI2_SASSTATUS_SUCCESS (0x00) +#define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01) +#define MPI2_SASSTATUS_INVALID_FRAME (0x02) +#define MPI2_SASSTATUS_UTC_BAD_DEST (0x03) +#define MPI2_SASSTATUS_UTC_BREAK_RECEIVED (0x04) +#define MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05) +#define MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06) +#define MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07) +#define MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08) +#define MPI2_SASSTATUS_UTC_WRONG_DESTINATION (0x09) +#define MPI2_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A) +#define MPI2_SASSTATUS_LONG_INFORMATION_UNIT (0x0B) +#define MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C) +#define MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D) +#define MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E) +#define MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F) +#define MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10) +#define MPI2_SASSTATUS_DATA_OFFSET_ERROR (0x11) +#define MPI2_SASSTATUS_SDSF_NAK_RECEIVED (0x12) +#define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13) +#define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14) + +/* + *Values for the SAS DeviceInfo field used in SAS Device Status Change Event + *data and SAS Configuration pages. + */ +#define MPI2_SAS_DEVICE_INFO_SEP (0x00004000) +#define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000) +#define MPI2_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000) +#define MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800) +#define MPI2_SAS_DEVICE_INFO_SSP_TARGET (0x00000400) +#define MPI2_SAS_DEVICE_INFO_STP_TARGET (0x00000200) +#define MPI2_SAS_DEVICE_INFO_SMP_TARGET (0x00000100) +#define MPI2_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080) +#define MPI2_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040) +#define MPI2_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020) +#define MPI2_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010) +#define MPI2_SAS_DEVICE_INFO_SATA_HOST (0x00000008) + +#define MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007) +#define MPI2_SAS_DEVICE_INFO_NO_DEVICE (0x00000000) +#define MPI2_SAS_DEVICE_INFO_END_DEVICE (0x00000001) +#define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002) +#define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003) + +/***************************************************************************** +* +* SAS Messages +* +*****************************************************************************/ + +/**************************************************************************** +* SMP Passthrough messages +****************************************************************************/ + +/*SMP Passthrough Request Message */ +typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST { + U8 PassthroughFlags; /*0x00 */ + U8 PhysicalPort; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 RequestDataLength; /*0x04 */ + U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Reserved2; /*0x0C */ + U64 SASAddress; /*0x10 */ + U32 Reserved3; /*0x18 */ + U32 Reserved4; /*0x1C */ + MPI2_SIMPLE_SGE_UNION SGL;/*0x20 */ +} MPI2_SMP_PASSTHROUGH_REQUEST, *PTR_MPI2_SMP_PASSTHROUGH_REQUEST, + Mpi2SmpPassthroughRequest_t, *pMpi2SmpPassthroughRequest_t; + +/*values for PassthroughFlags field */ +#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80) + +/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*SMP Passthrough Reply Message */ +typedef struct _MPI2_SMP_PASSTHROUGH_REPLY { + U8 PassthroughFlags; /*0x00 */ + U8 PhysicalPort; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 ResponseDataLength; /*0x04 */ + U8 SGLFlags; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U8 Reserved2; /*0x0C */ + U8 SASStatus; /*0x0D */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 Reserved3; /*0x14 */ + U8 ResponseData[4]; /*0x18 */ +} MPI2_SMP_PASSTHROUGH_REPLY, *PTR_MPI2_SMP_PASSTHROUGH_REPLY, + Mpi2SmpPassthroughReply_t, *pMpi2SmpPassthroughReply_t; + +/*values for PassthroughFlags field */ +#define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80) + +/*values for SASStatus field are at the top of this file */ + +/**************************************************************************** +* SATA Passthrough messages +****************************************************************************/ + +typedef union _MPI2_SATA_PT_SGE_UNION { + MPI2_SGE_SIMPLE_UNION MpiSimple; /*MPI v2.0 only */ + MPI2_SGE_CHAIN_UNION MpiChain; /*MPI v2.0 only */ + MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple; + MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; /*MPI v2.0 only */ + MPI25_IEEE_SGE_CHAIN64 IeeeChain64; /*MPI v2.5 only */ +} MPI2_SATA_PT_SGE_UNION, *PTR_MPI2_SATA_PT_SGE_UNION, + Mpi2SataPTSGEUnion_t, *pMpi2SataPTSGEUnion_t; + +/*SATA Passthrough Request Message */ +typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST { + U16 DevHandle; /*0x00 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 PassthroughFlags; /*0x04 */ + U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U32 Reserved2; /*0x0C */ + U32 Reserved3; /*0x10 */ + U32 Reserved4; /*0x14 */ + U32 DataLength; /*0x18 */ + U8 CommandFIS[20]; /*0x1C */ + MPI2_SATA_PT_SGE_UNION SGL;/*0x30*//*MPI v2.5: IEEE 64 elements only*/ +} MPI2_SATA_PASSTHROUGH_REQUEST, *PTR_MPI2_SATA_PASSTHROUGH_REQUEST, + Mpi2SataPassthroughRequest_t, + *pMpi2SataPassthroughRequest_t; + +/*values for PassthroughFlags field */ +#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100) +#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020) +#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010) +#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004) +#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002) +#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001) + +/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*SATA Passthrough Reply Message */ +typedef struct _MPI2_SATA_PASSTHROUGH_REPLY { + U16 DevHandle; /*0x00 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 PassthroughFlags; /*0x04 */ + U8 SGLFlags; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved1; /*0x0A */ + U8 Reserved2; /*0x0C */ + U8 SASStatus; /*0x0D */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 StatusFIS[20]; /*0x14 */ + U32 StatusControlRegisters; /*0x28 */ + U32 TransferCount; /*0x2C */ +} MPI2_SATA_PASSTHROUGH_REPLY, *PTR_MPI2_SATA_PASSTHROUGH_REPLY, + Mpi2SataPassthroughReply_t, *pMpi2SataPassthroughReply_t; + +/*values for SASStatus field are at the top of this file */ + +/**************************************************************************** +* SAS IO Unit Control messages +****************************************************************************/ + +/*SAS IO Unit Control Request Message */ +typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST { + U8 Operation; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 DevHandle; /*0x04 */ + U8 IOCParameter; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U8 PhyNum; /*0x0E */ + U8 PrimFlags; /*0x0F */ + U32 Primitive; /*0x10 */ + U8 LookupMethod; /*0x14 */ + U8 Reserved5; /*0x15 */ + U16 SlotNumber; /*0x16 */ + U64 LookupAddress; /*0x18 */ + U32 IOCParameterValue; /*0x20 */ + U32 Reserved7; /*0x24 */ + U32 Reserved8; /*0x28 */ +} MPI2_SAS_IOUNIT_CONTROL_REQUEST, + *PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST, + Mpi2SasIoUnitControlRequest_t, + *pMpi2SasIoUnitControlRequest_t; + +/*values for the Operation field */ +#define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02) +#define MPI2_SAS_OP_PHY_LINK_RESET (0x06) +#define MPI2_SAS_OP_PHY_HARD_RESET (0x07) +#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08) +#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A) +#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B) +#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) /* MPI v2.0 only */ +#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D) +#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E) +#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F) +#define MPI25_SAS_OP_ENABLE_FP_DEVICE (0x10) +#define MPI25_SAS_OP_DISABLE_FP_DEVICE (0x11) +#define MPI25_SAS_OP_ENABLE_FP_ALL (0x12) +#define MPI25_SAS_OP_DISABLE_FP_ALL (0x13) +#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14) +#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15) +#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80) + +/*values for the PrimFlags field */ +#define MPI2_SAS_PRIMFLAGS_SINGLE (0x08) +#define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02) +#define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01) + +/*values for the LookupMethod field */ +#define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01) +#define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02) +#define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03) + +/*SAS IO Unit Control Reply Message */ +typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY { + U8 Operation; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 DevHandle; /*0x04 */ + U8 IOCParameter; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved3; /*0x0A */ + U16 Reserved4; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_SAS_IOUNIT_CONTROL_REPLY, + *PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY, + Mpi2SasIoUnitControlReply_t, *pMpi2SasIoUnitControlReply_t; + +#endif diff --git a/addons/mpt3sas/src/4.4.180/mpi/mpi2_tool.h b/addons/mpt3sas/src/4.4.180/mpi/mpi2_tool.h new file mode 100644 index 00000000..1629e5bc --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpi/mpi2_tool.h @@ -0,0 +1,482 @@ +/* + * Copyright (c) 2000-2014 LSI Corporation. + * + * + * Name: mpi2_tool.h + * Title: MPI diagnostic tool structures and definitions + * Creation Date: March 26, 2007 + * + * mpi2_tool.h Version: 02.00.12 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release + * structures and defines. + * 02-29-08 02.00.02 Modified various names to make them 32-character unique. + * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool. + * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request + * and reply messages. + * Added MPI2_DIAG_BUF_TYPE_EXTENDED. + * Incremented MPI2_DIAG_BUF_TYPE_COUNT. + * 05-12-10 02.00.05 Added Diagnostic Data Upload tool. + * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer + * Post Request. + * 05-25-11 02.00.07 Added Flags field and related defines to + * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST. + * 11-18-11 02.00.08 Incorporating additions for MPI v2.5. + * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request + * message. + * 07-26-12 02.00.10 Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that + * it uses MPI Chain SGE as well as MPI Simple SGE. + * 08-19-13 02.00.11 Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info. + * 01-08-14 02.00.12 Added MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_TOOL_H +#define MPI2_TOOL_H + +/***************************************************************************** +* +* Toolbox Messages +* +*****************************************************************************/ + +/*defines for the Tools */ +#define MPI2_TOOLBOX_CLEAN_TOOL (0x00) +#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01) +#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02) +#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03) +#define MPI2_TOOLBOX_BEACON_TOOL (0x05) +#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06) +#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL (0x07) + +/**************************************************************************** +* Toolbox reply +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_TOOLBOX_REPLY, *PTR_MPI2_TOOLBOX_REPLY, + Mpi2ToolboxReply_t, *pMpi2ToolboxReply_t; + +/**************************************************************************** +* Toolbox Clean Tool request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Flags; /*0x0C */ +} MPI2_TOOLBOX_CLEAN_REQUEST, *PTR_MPI2_TOOLBOX_CLEAN_REQUEST, + Mpi2ToolboxCleanRequest_t, *pMpi2ToolboxCleanRequest_t; + +/*values for the Flags field */ +#define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000) +#define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000) +#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000) +#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000) +#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000) +#define MPI2_TOOLBOX_CLEAN_BIT26_PRODUCT_SPECIFIC (0x04000000) +#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000) +#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000) +#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004) +#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002) +#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001) + +/**************************************************************************** +* Toolbox Memory Move request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + MPI2_SGE_SIMPLE_UNION SGL; /*0x0C */ +} MPI2_TOOLBOX_MEM_MOVE_REQUEST, *PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST, + Mpi2ToolboxMemMoveRequest_t, *pMpi2ToolboxMemMoveRequest_t; + +/**************************************************************************** +* Toolbox Diagnostic Data Upload request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 SGLFlags; /*0x0C */ + U8 Reserved5; /*0x0D */ + U16 Reserved6; /*0x0E */ + U32 Flags; /*0x10 */ + U32 DataLength; /*0x14 */ + MPI2_SGE_SIMPLE_UNION SGL; /*0x18 */ +} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, + *PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, + Mpi2ToolboxDiagDataUploadRequest_t, + *pMpi2ToolboxDiagDataUploadRequest_t; + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER { + U32 DiagDataLength; /*00h */ + U8 FormatCode; /*04h */ + U8 Reserved1; /*05h */ + U16 Reserved2; /*06h */ +} MPI2_DIAG_DATA_UPLOAD_HEADER, *PTR_MPI2_DIAG_DATA_UPLOAD_HEADER, + Mpi2DiagDataUploadHeader_t, *pMpi2DiagDataUploadHeader_t; + +/**************************************************************************** +* Toolbox ISTWI Read Write Tool +****************************************************************************/ + +/*Toolbox ISTWI Read Write Tool request message */ +typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 Reserved6; /*0x10 */ + U8 DevIndex; /*0x14 */ + U8 Action; /*0x15 */ + U8 SGLFlags; /*0x16 */ + U8 Flags; /*0x17 */ + U16 TxDataLength; /*0x18 */ + U16 RxDataLength; /*0x1A */ + U32 Reserved8; /*0x1C */ + U32 Reserved9; /*0x20 */ + U32 Reserved10; /*0x24 */ + U32 Reserved11; /*0x28 */ + U32 Reserved12; /*0x2C */ + MPI2_SGE_SIMPLE_UNION SGL; /*0x30 */ +} MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, + *PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST, + Mpi2ToolboxIstwiReadWriteRequest_t, + *pMpi2ToolboxIstwiReadWriteRequest_t; + +/*values for the Action field */ +#define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01) +#define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02) +#define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03) +#define MPI2_TOOL_ISTWI_ACTION_RESERVE_BUS (0x10) +#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11) +#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12) + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*values for the Flags field */ +#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80) +#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07) + +/*Toolbox ISTWI Read Write Tool reply message */ +typedef struct _MPI2_TOOLBOX_ISTWI_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U8 DevIndex; /*0x14 */ + U8 Action; /*0x15 */ + U8 IstwiStatus; /*0x16 */ + U8 Reserved6; /*0x17 */ + U16 TxDataCount; /*0x18 */ + U16 RxDataCount; /*0x1A */ +} MPI2_TOOLBOX_ISTWI_REPLY, *PTR_MPI2_TOOLBOX_ISTWI_REPLY, + Mpi2ToolboxIstwiReply_t, *pMpi2ToolboxIstwiReply_t; + +/**************************************************************************** +* Toolbox Beacon Tool request +****************************************************************************/ + +typedef struct _MPI2_TOOLBOX_BEACON_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 Reserved5; /*0x0C */ + U8 PhysicalPort; /*0x0D */ + U8 Reserved6; /*0x0E */ + U8 Flags; /*0x0F */ +} MPI2_TOOLBOX_BEACON_REQUEST, *PTR_MPI2_TOOLBOX_BEACON_REQUEST, + Mpi2ToolboxBeaconRequest_t, *pMpi2ToolboxBeaconRequest_t; + +/*values for the Flags field */ +#define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00) +#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01) + +/**************************************************************************** +* Toolbox Diagnostic CLI Tool +****************************************************************************/ + +#define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C) + +/*MPI v2.0 Toolbox Diagnostic CLI Tool request message */ +typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U8 SGLFlags; /*0x0C */ + U8 Reserved5; /*0x0D */ + U16 Reserved6; /*0x0E */ + U32 DataLength; /*0x10 */ + U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */ + MPI2_MPI_SGE_IO_UNION SGL; /*0x70 */ +} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + *PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + Mpi2ToolboxDiagnosticCliRequest_t, + *pMpi2ToolboxDiagnosticCliRequest_t; + +/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */ + +/*MPI v2.5 Toolbox Diagnostic CLI Tool request message */ +typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U32 Reserved5; /*0x0C */ + U32 DataLength; /*0x10 */ + U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */ + MPI25_SGE_IO_UNION SGL; /* 0x70 */ +} MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + *PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST, + Mpi25ToolboxDiagnosticCliRequest_t, + *pMpi25ToolboxDiagnosticCliRequest_t; + +/*Toolbox Diagnostic CLI Tool reply message */ +typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY { + U8 Tool; /*0x00 */ + U8 Reserved1; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 ReturnedDataLength; /*0x14 */ +} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY, + *PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY, + Mpi2ToolboxDiagnosticCliReply_t, + *pMpi2ToolboxDiagnosticCliReply_t; + + +/**************************************************************************** +* Toolbox Console Text Display Tool +****************************************************************************/ + +/* Toolbox Console Text Display Tool request message */ +typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST { + U8 Tool; /* 0x00 */ + U8 Reserved1; /* 0x01 */ + U8 ChainOffset; /* 0x02 */ + U8 Function; /* 0x03 */ + U16 Reserved2; /* 0x04 */ + U8 Reserved3; /* 0x06 */ + U8 MsgFlags; /* 0x07 */ + U8 VP_ID; /* 0x08 */ + U8 VF_ID; /* 0x09 */ + U16 Reserved4; /* 0x0A */ + U8 Console; /* 0x0C */ + U8 Flags; /* 0x0D */ + U16 Reserved6; /* 0x0E */ + U8 TextToDisplay[4]; /* 0x10 */ +} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, +*PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST, +Mpi2ToolboxTextDisplayRequest_t, +*pMpi2ToolboxTextDisplayRequest_t; + +/* defines for the Console field */ +#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK (0xF0) +#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT (0x00) +#define MPI2_TOOLBOX_CONSOLE_TYPE_UART (0x10) +#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET (0x20) + +#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK (0x0F) + +/* defines for the Flags field */ +#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP (0x01) + + + +/***************************************************************************** +* +* Diagnostic Buffer Messages +* +*****************************************************************************/ + +/**************************************************************************** +* Diagnostic Buffer Post request +****************************************************************************/ + +typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST { + U8 ExtendedType; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U64 BufferAddress; /*0x0C */ + U32 BufferLength; /*0x14 */ + U32 Reserved5; /*0x18 */ + U32 Reserved6; /*0x1C */ + U32 Flags; /*0x20 */ + U32 ProductSpecific[23]; /*0x24 */ +} MPI2_DIAG_BUFFER_POST_REQUEST, *PTR_MPI2_DIAG_BUFFER_POST_REQUEST, + Mpi2DiagBufferPostRequest_t, *pMpi2DiagBufferPostRequest_t; + +/*values for the ExtendedType field */ +#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02) + +/*values for the BufferType field */ +#define MPI2_DIAG_BUF_TYPE_TRACE (0x00) +#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01) +#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02) +/*count of the number of buffer types */ +#define MPI2_DIAG_BUF_TYPE_COUNT (0x03) + +/*values for the Flags field */ +#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002) +#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001) + +/**************************************************************************** +* Diagnostic Buffer Post reply +****************************************************************************/ + +typedef struct _MPI2_DIAG_BUFFER_POST_REPLY { + U8 ExtendedType; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ + U32 TransferLength; /*0x14 */ +} MPI2_DIAG_BUFFER_POST_REPLY, *PTR_MPI2_DIAG_BUFFER_POST_REPLY, + Mpi2DiagBufferPostReply_t, *pMpi2DiagBufferPostReply_t; + +/**************************************************************************** +* Diagnostic Release request +****************************************************************************/ + +typedef struct _MPI2_DIAG_RELEASE_REQUEST { + U8 Reserved1; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 ChainOffset; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ +} MPI2_DIAG_RELEASE_REQUEST, *PTR_MPI2_DIAG_RELEASE_REQUEST, + Mpi2DiagReleaseRequest_t, *pMpi2DiagReleaseRequest_t; + +/**************************************************************************** +* Diagnostic Buffer Post reply +****************************************************************************/ + +typedef struct _MPI2_DIAG_RELEASE_REPLY { + U8 Reserved1; /*0x00 */ + U8 BufferType; /*0x01 */ + U8 MsgLength; /*0x02 */ + U8 Function; /*0x03 */ + U16 Reserved2; /*0x04 */ + U8 Reserved3; /*0x06 */ + U8 MsgFlags; /*0x07 */ + U8 VP_ID; /*0x08 */ + U8 VF_ID; /*0x09 */ + U16 Reserved4; /*0x0A */ + U16 Reserved5; /*0x0C */ + U16 IOCStatus; /*0x0E */ + U32 IOCLogInfo; /*0x10 */ +} MPI2_DIAG_RELEASE_REPLY, *PTR_MPI2_DIAG_RELEASE_REPLY, + Mpi2DiagReleaseReply_t, *pMpi2DiagReleaseReply_t; + +#endif diff --git a/addons/mpt3sas/src/4.4.180/mpi/mpi2_type.h b/addons/mpt3sas/src/4.4.180/mpi/mpi2_type.h new file mode 100644 index 00000000..99ab0936 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpi/mpi2_type.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2000-2014 LSI Corporation. + * + * + * Name: mpi2_type.h + * Title: MPI basic type definitions + * Creation Date: August 16, 2006 + * + * mpi2_type.h Version: 02.00.00 + * + * Version History + * --------------- + * + * Date Version Description + * -------- -------- ------------------------------------------------------ + * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A. + * -------------------------------------------------------------------------- + */ + +#ifndef MPI2_TYPE_H +#define MPI2_TYPE_H + +/******************************************************************************* + * Define * if it hasn't already been defined. By default + * * is defined to be a near pointer. MPI2_POINTER can be defined as + * a far pointer by defining * as "far *" before this header file is + * included. + */ + +/* the basic types may have already been included by mpi_type.h */ +#ifndef MPI_TYPE_H +/***************************************************************************** +* +* Basic Types +* +*****************************************************************************/ + +typedef u8 U8; +typedef __le16 U16; +typedef __le32 U32; +typedef __le64 U64 __attribute__ ((aligned(4))); + +/***************************************************************************** +* +* Pointer Types +* +*****************************************************************************/ + +typedef U8 *PU8; +typedef U16 *PU16; +typedef U32 *PU32; +typedef U64 *PU64; + +#endif + +#endif diff --git a/addons/mpt3sas/src/4.4.180/mpt3sas_base.c b/addons/mpt3sas/src/4.4.180/mpt3sas_base.c new file mode 100644 index 00000000..9b536729 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpt3sas_base.c @@ -0,0 +1,5634 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include "mpt3sas_base.h" + +static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS]; + + +#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */ + + /* maximum controller queue depth */ +#define MAX_HBA_QUEUE_DEPTH 30000 +#define MAX_CHAIN_DEPTH 100000 +static int max_queue_depth = -1; +module_param(max_queue_depth, int, 0); +MODULE_PARM_DESC(max_queue_depth, " max controller queue depth "); + +static int max_sgl_entries = -1; +module_param(max_sgl_entries, int, 0); +MODULE_PARM_DESC(max_sgl_entries, " max sg entries "); + +static int msix_disable = -1; +module_param(msix_disable, int, 0); +MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)"); + +static int max_msix_vectors = -1; +module_param(max_msix_vectors, int, 0); +MODULE_PARM_DESC(max_msix_vectors, + " max msix vectors"); + +static int mpt3sas_fwfault_debug; +MODULE_PARM_DESC(mpt3sas_fwfault_debug, + " enable detection of firmware fault and halt firmware - (default=0)"); + +static int +_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag); + +/** + * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug. + * + */ +static int +_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct MPT3SAS_ADAPTER *ioc; + + if (ret) + return ret; + + /* global ioc spinlock to protect controller list on list operations */ + pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug); + spin_lock(&gioc_lock); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) + ioc->fwfault_debug = mpt3sas_fwfault_debug; + spin_unlock(&gioc_lock); + return 0; +} +module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug, + param_get_int, &mpt3sas_fwfault_debug, 0644); + +/** + * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc + * @arg: input argument, used to derive ioc + * + * Return 0 if controller is removed from pci subsystem. + * Return -1 for other case. + */ +static int mpt3sas_remove_dead_ioc_func(void *arg) +{ + struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg; + struct pci_dev *pdev; + + if ((ioc == NULL)) + return -1; + + pdev = ioc->pdev; + if ((pdev == NULL)) + return -1; + pci_stop_and_remove_bus_device_locked(pdev); + return 0; +} + +/** + * _base_fault_reset_work - workq handling ioc fault conditions + * @work: input argument, used to derive ioc + * Context: sleep. + * + * Return nothing. + */ +static void +_base_fault_reset_work(struct work_struct *work) +{ + struct MPT3SAS_ADAPTER *ioc = + container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work); + unsigned long flags; + u32 doorbell; + int rc; + struct task_struct *p; + + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->shost_recovery || ioc->pci_error_recovery) + goto rearm_timer; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) { + pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n", + ioc->name); + + /* It may be possible that EEH recovery can resolve some of + * pci bus failure issues rather removing the dead ioc function + * by considering controller is in a non-operational state. So + * here priority is given to the EEH recovery. If it doesn't + * not resolve this issue, mpt3sas driver will consider this + * controller to non-operational state and remove the dead ioc + * function. + */ + if (ioc->non_operational_loop++ < 5) { + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, + flags); + goto rearm_timer; + } + + /* + * Call _scsih_flush_pending_cmds callback so that we flush all + * pending commands back to OS. This call is required to aovid + * deadlock at block layer. Dead IOC will fail to do diag reset, + * and this call is safe since dead ioc will never return any + * command back from HW. + */ + ioc->schedule_dead_ioc_flush_running_cmds(ioc); + /* + * Set remove_host flag early since kernel thread will + * take some time to execute. + */ + ioc->remove_host = 1; + /*Remove the Dead Host */ + p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc, + "%s_dead_ioc_%d", ioc->driver_name, ioc->id); + if (IS_ERR(p)) + pr_err(MPT3SAS_FMT + "%s: Running mpt3sas_dead_ioc thread failed !!!!\n", + ioc->name, __func__); + else + pr_err(MPT3SAS_FMT + "%s: Running mpt3sas_dead_ioc thread success !!!!\n", + ioc->name, __func__); + return; /* don't rearm timer */ + } + + ioc->non_operational_loop = 0; + + if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { + rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name, + __func__, (rc == 0) ? "success" : "failed"); + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) + mpt3sas_base_fault_info(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + if (rc && (doorbell & MPI2_IOC_STATE_MASK) != + MPI2_IOC_STATE_OPERATIONAL) + return; /* don't rearm timer */ + } + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + rearm_timer: + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +/** + * mpt3sas_base_start_watchdog - start the fault_reset_work_q + * @ioc: per adapter object + * Context: sleep. + * + * Return nothing. + */ +void +mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned long flags; + + if (ioc->fault_reset_work_q) + return; + + /* initialize fault polling */ + + INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work); + snprintf(ioc->fault_reset_work_q_name, + sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status", + ioc->driver_name, ioc->id); + ioc->fault_reset_work_q = + create_singlethread_workqueue(ioc->fault_reset_work_q_name); + if (!ioc->fault_reset_work_q) { + pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n", + ioc->name, __func__, __LINE__); + return; + } + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + if (ioc->fault_reset_work_q) + queue_delayed_work(ioc->fault_reset_work_q, + &ioc->fault_reset_work, + msecs_to_jiffies(FAULT_POLLING_INTERVAL)); + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); +} + +/** + * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q + * @ioc: per adapter object + * Context: sleep. + * + * Return nothing. + */ +void +mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned long flags; + struct workqueue_struct *wq; + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + wq = ioc->fault_reset_work_q; + ioc->fault_reset_work_q = NULL; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + if (wq) { + if (!cancel_delayed_work_sync(&ioc->fault_reset_work)) + flush_workqueue(wq); + destroy_workqueue(wq); + } +} + +/** + * mpt3sas_base_fault_info - verbose translation of firmware FAULT code + * @ioc: per adapter object + * @fault_code: fault code + * + * Return nothing. + */ +void +mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) +{ + pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n", + ioc->name, fault_code); +} + +/** + * mpt3sas_halt_firmware - halt's mpt controller firmware + * @ioc: per adapter object + * + * For debugging timeout related issues. Writing 0xCOFFEE00 + * to the doorbell register will halt controller firmware. With + * the purpose to stop both driver and firmware, the enduser can + * obtain a ring buffer from controller UART. + */ +void +mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) +{ + u32 doorbell; + + if (!ioc->fwfault_debug) + return; + + dump_stack(); + + doorbell = readl(&ioc->chip->Doorbell); + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) + mpt3sas_base_fault_info(ioc , doorbell); + else { + writel(0xC0FFEE00, &ioc->chip->Doorbell); + pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n", + ioc->name); + } + + if (ioc->fwfault_debug == 2) + for (;;) + ; + else + panic("panic in %s\n", __func__); +} + +/** + * _base_sas_ioc_info - verbose translation of the ioc status + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @request_hdr: request mf + * + * Return nothing. + */ +static void +_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply, + MPI2RequestHeader_t *request_hdr) +{ + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + MPI2_IOCSTATUS_MASK; + char *desc = NULL; + u16 frame_sz; + char *func_str = NULL; + + /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */ + if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || + request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION) + return; + + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + return; + + switch (ioc_status) { + +/**************************************************************************** +* Common IOCStatus values for all replies +****************************************************************************/ + + case MPI2_IOCSTATUS_INVALID_FUNCTION: + desc = "invalid function"; + break; + case MPI2_IOCSTATUS_BUSY: + desc = "busy"; + break; + case MPI2_IOCSTATUS_INVALID_SGL: + desc = "invalid sgl"; + break; + case MPI2_IOCSTATUS_INTERNAL_ERROR: + desc = "internal error"; + break; + case MPI2_IOCSTATUS_INVALID_VPID: + desc = "invalid vpid"; + break; + case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: + desc = "insufficient resources"; + break; + case MPI2_IOCSTATUS_INVALID_FIELD: + desc = "invalid field"; + break; + case MPI2_IOCSTATUS_INVALID_STATE: + desc = "invalid state"; + break; + case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED: + desc = "op state not supported"; + break; + +/**************************************************************************** +* Config IOCStatus values +****************************************************************************/ + + case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION: + desc = "config invalid action"; + break; + case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE: + desc = "config invalid type"; + break; + case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE: + desc = "config invalid page"; + break; + case MPI2_IOCSTATUS_CONFIG_INVALID_DATA: + desc = "config invalid data"; + break; + case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS: + desc = "config no defaults"; + break; + case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT: + desc = "config cant commit"; + break; + +/**************************************************************************** +* SCSI IO Reply +****************************************************************************/ + + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: + case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: + case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: + break; + +/**************************************************************************** +* For use by SCSI Initiator and SCSI Target end-to-end data protection +****************************************************************************/ + + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + desc = "eedp guard error"; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + desc = "eedp ref tag error"; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + desc = "eedp app tag error"; + break; + +/**************************************************************************** +* SCSI Target values +****************************************************************************/ + + case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX: + desc = "target invalid io index"; + break; + case MPI2_IOCSTATUS_TARGET_ABORTED: + desc = "target aborted"; + break; + case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE: + desc = "target no conn retryable"; + break; + case MPI2_IOCSTATUS_TARGET_NO_CONNECTION: + desc = "target no connection"; + break; + case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH: + desc = "target xfer count mismatch"; + break; + case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR: + desc = "target data offset error"; + break; + case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA: + desc = "target too much write data"; + break; + case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT: + desc = "target iu too short"; + break; + case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT: + desc = "target ack nak timeout"; + break; + case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED: + desc = "target nak received"; + break; + +/**************************************************************************** +* Serial Attached SCSI values +****************************************************************************/ + + case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED: + desc = "smp request failed"; + break; + case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN: + desc = "smp data overrun"; + break; + +/**************************************************************************** +* Diagnostic Buffer Post / Diagnostic Release values +****************************************************************************/ + + case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED: + desc = "diagnostic released"; + break; + default: + break; + } + + if (!desc) + return; + + switch (request_hdr->Function) { + case MPI2_FUNCTION_CONFIG: + frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size; + func_str = "config_page"; + break; + case MPI2_FUNCTION_SCSI_TASK_MGMT: + frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t); + func_str = "task_mgmt"; + break; + case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: + frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t); + func_str = "sas_iounit_ctl"; + break; + case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR: + frame_sz = sizeof(Mpi2SepRequest_t); + func_str = "enclosure"; + break; + case MPI2_FUNCTION_IOC_INIT: + frame_sz = sizeof(Mpi2IOCInitRequest_t); + func_str = "ioc_init"; + break; + case MPI2_FUNCTION_PORT_ENABLE: + frame_sz = sizeof(Mpi2PortEnableRequest_t); + func_str = "port_enable"; + break; + case MPI2_FUNCTION_SMP_PASSTHROUGH: + frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size; + func_str = "smp_passthru"; + break; + default: + frame_sz = 32; + func_str = "unknown"; + break; + } + + pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n", + ioc->name, desc, ioc_status, request_hdr, func_str); + + _debug_dump_mf(request_hdr, frame_sz/4); +} + +/** + * _base_display_event_data - verbose translation of firmware asyn events + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * + * Return nothing. + */ +static void +_base_display_event_data(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventNotificationReply_t *mpi_reply) +{ + char *desc = NULL; + u16 event; + + if (!(ioc->logging_level & MPT_DEBUG_EVENTS)) + return; + + event = le16_to_cpu(mpi_reply->Event); + + switch (event) { + case MPI2_EVENT_LOG_DATA: + desc = "Log Data"; + break; + case MPI2_EVENT_STATE_CHANGE: + desc = "Status Change"; + break; + case MPI2_EVENT_HARD_RESET_RECEIVED: + desc = "Hard Reset Received"; + break; + case MPI2_EVENT_EVENT_CHANGE: + desc = "Event Change"; + break; + case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: + desc = "Device Status Change"; + break; + case MPI2_EVENT_IR_OPERATION_STATUS: + if (!ioc->hide_ir_msg) + desc = "IR Operation Status"; + break; + case MPI2_EVENT_SAS_DISCOVERY: + { + Mpi2EventDataSasDiscovery_t *event_data = + (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData; + pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name, + (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? + "start" : "stop"); + if (event_data->DiscoveryStatus) + pr_info("discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_info("\n"); + return; + } + case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: + desc = "SAS Broadcast Primitive"; + break; + case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE: + desc = "SAS Init Device Status Change"; + break; + case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW: + desc = "SAS Init Table Overflow"; + break; + case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + desc = "SAS Topology Change List"; + break; + case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + desc = "SAS Enclosure Device Status Change"; + break; + case MPI2_EVENT_IR_VOLUME: + if (!ioc->hide_ir_msg) + desc = "IR Volume"; + break; + case MPI2_EVENT_IR_PHYSICAL_DISK: + if (!ioc->hide_ir_msg) + desc = "IR Physical Disk"; + break; + case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: + if (!ioc->hide_ir_msg) + desc = "IR Configuration Change List"; + break; + case MPI2_EVENT_LOG_ENTRY_ADDED: + if (!ioc->hide_ir_msg) + desc = "Log Entry Added"; + break; + case MPI2_EVENT_TEMP_THRESHOLD: + desc = "Temperature Threshold"; + break; + } + + if (!desc) + return; + + pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc); +} + +/** + * _base_sas_log_info - verbose translation of firmware log info + * @ioc: per adapter object + * @log_info: log info + * + * Return nothing. + */ +static void +_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info) +{ + union loginfo_type { + u32 loginfo; + struct { + u32 subcode:16; + u32 code:8; + u32 originator:4; + u32 bus_type:4; + } dw; + }; + union loginfo_type sas_loginfo; + char *originator_str = NULL; + + sas_loginfo.loginfo = log_info; + if (sas_loginfo.dw.bus_type != 3 /*SAS*/) + return; + + /* each nexus loss loginfo */ + if (log_info == 0x31170000) + return; + + /* eat the loginfos associated with task aborts */ + if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info == + 0x31140000 || log_info == 0x31130000)) + return; + + switch (sas_loginfo.dw.originator) { + case 0: + originator_str = "IOP"; + break; + case 1: + originator_str = "PL"; + break; + case 2: + if (!ioc->hide_ir_msg) + originator_str = "IR"; + else + originator_str = "WarpDrive"; + break; + } + + pr_warn(MPT3SAS_FMT + "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n", + ioc->name, log_info, + originator_str, sas_loginfo.dw.code, + sas_loginfo.dw.subcode); +} + +/** + * _base_display_reply_info - + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return nothing. + */ +static void +_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + u16 ioc_status; + u32 loginfo = 0; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (unlikely(!mpi_reply)) { + pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + + if ((ioc_status & MPI2_IOCSTATUS_MASK) && + (ioc->logging_level & MPT_DEBUG_REPLY)) { + _base_sas_ioc_info(ioc , mpi_reply, + mpt3sas_base_get_msg_frame(ioc, smid)); + } + + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) { + loginfo = le32_to_cpu(mpi_reply->IOCLogInfo); + _base_sas_log_info(ioc, loginfo); + } + + if (ioc_status || loginfo) { + ioc_status &= MPI2_IOCSTATUS_MASK; + mpt3sas_trigger_mpi(ioc, ioc_status, loginfo); + } +} + +/** + * mpt3sas_base_done - base internal command completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK) + return 1; + + if (ioc->base_cmds.status == MPT3_CMD_NOT_USED) + return 1; + + ioc->base_cmds.status |= MPT3_CMD_COMPLETE; + if (mpi_reply) { + ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID; + memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + } + ioc->base_cmds.status &= ~MPT3_CMD_PENDING; + + complete(&ioc->base_cmds.done); + return 1; +} + +/** + * _base_async_event - main callback handler for firmware asyn events + * @ioc: per adapter object + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) +{ + Mpi2EventNotificationReply_t *mpi_reply; + Mpi2EventAckRequest_t *ack_request; + u16 smid; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION) + return 1; + + _base_display_event_data(ioc, mpi_reply); + + if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED)) + goto out; + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + goto out; + } + + ack_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t)); + ack_request->Function = MPI2_FUNCTION_EVENT_ACK; + ack_request->Event = mpi_reply->Event; + ack_request->EventContext = mpi_reply->EventContext; + ack_request->VF_ID = 0; /* TODO */ + ack_request->VP_ID = 0; + mpt3sas_base_put_smid_default(ioc, smid); + + out: + + /* scsih callback handler */ + mpt3sas_scsih_event_callback(ioc, msix_index, reply); + + /* ctl callback handler */ + mpt3sas_ctl_event_callback(ioc, msix_index, reply); + + return 1; +} + +/** + * _base_get_cb_idx - obtain the callback index + * @ioc: per adapter object + * @smid: system request message index + * + * Return callback index. + */ +static u8 +_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + int i; + u8 cb_idx; + + if (smid < ioc->hi_priority_smid) { + i = smid - 1; + cb_idx = ioc->scsi_lookup[i].cb_idx; + } else if (smid < ioc->internal_smid) { + i = smid - ioc->hi_priority_smid; + cb_idx = ioc->hpr_lookup[i].cb_idx; + } else if (smid <= ioc->hba_queue_depth) { + i = smid - ioc->internal_smid; + cb_idx = ioc->internal_lookup[i].cb_idx; + } else + cb_idx = 0xFF; + return cb_idx; +} + +/** + * _base_mask_interrupts - disable interrupts + * @ioc: per adapter object + * + * Disabling ResetIRQ, Reply and Doorbell Interrupts + * + * Return nothing. + */ +static void +_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc) +{ + u32 him_register; + + ioc->mask_interrupts = 1; + him_register = readl(&ioc->chip->HostInterruptMask); + him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK; + writel(him_register, &ioc->chip->HostInterruptMask); + readl(&ioc->chip->HostInterruptMask); +} + +/** + * _base_unmask_interrupts - enable interrupts + * @ioc: per adapter object + * + * Enabling only Reply Interrupts + * + * Return nothing. + */ +static void +_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc) +{ + u32 him_register; + + him_register = readl(&ioc->chip->HostInterruptMask); + him_register &= ~MPI2_HIM_RIM; + writel(him_register, &ioc->chip->HostInterruptMask); + ioc->mask_interrupts = 0; +} + +union reply_descriptor { + u64 word; + struct { + u32 low; + u32 high; + } u; +}; + +/** + * _base_interrupt - MPT adapter (IOC) specific interrupt handler. + * @irq: irq number (not used) + * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure + * @r: pt_regs pointer (not used) + * + * Return IRQ_HANDLE if processed, else IRQ_NONE. + */ +static irqreturn_t +_base_interrupt(int irq, void *bus_id) +{ + struct adapter_reply_queue *reply_q = bus_id; + union reply_descriptor rd; + u32 completed_cmds; + u8 request_desript_type; + u16 smid; + u8 cb_idx; + u32 reply; + u8 msix_index = reply_q->msix_index; + struct MPT3SAS_ADAPTER *ioc = reply_q->ioc; + Mpi2ReplyDescriptorsUnion_t *rpf; + u8 rc; + + if (ioc->mask_interrupts) + return IRQ_NONE; + + if (!atomic_add_unless(&reply_q->busy, 1, 1)) + return IRQ_NONE; + + rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; + request_desript_type = rpf->Default.ReplyFlags + & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { + atomic_dec(&reply_q->busy); + return IRQ_NONE; + } + + completed_cmds = 0; + cb_idx = 0xFF; + do { + rd.word = le64_to_cpu(rpf->Words); + if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) + goto out; + reply = 0; + smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); + if (request_desript_type == + MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS || + request_desript_type == + MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) { + cb_idx = _base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && + (likely(mpt_callbacks[cb_idx] != NULL))) { + rc = mpt_callbacks[cb_idx](ioc, smid, + msix_index, 0); + if (rc) + mpt3sas_base_free_smid(ioc, smid); + } + } else if (request_desript_type == + MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { + reply = le32_to_cpu( + rpf->AddressReply.ReplyFrameAddress); + if (reply > ioc->reply_dma_max_address || + reply < ioc->reply_dma_min_address) + reply = 0; + if (smid) { + cb_idx = _base_get_cb_idx(ioc, smid); + if ((likely(cb_idx < MPT_MAX_CALLBACKS)) && + (likely(mpt_callbacks[cb_idx] != NULL))) { + rc = mpt_callbacks[cb_idx](ioc, smid, + msix_index, reply); + if (reply) + _base_display_reply_info(ioc, + smid, msix_index, reply); + if (rc) + mpt3sas_base_free_smid(ioc, + smid); + } + } else { + _base_async_event(ioc, msix_index, reply); + } + + /* reply free queue handling */ + if (reply) { + ioc->reply_free_host_index = + (ioc->reply_free_host_index == + (ioc->reply_free_queue_depth - 1)) ? + 0 : ioc->reply_free_host_index + 1; + ioc->reply_free[ioc->reply_free_host_index] = + cpu_to_le32(reply); + wmb(); + writel(ioc->reply_free_host_index, + &ioc->chip->ReplyFreeHostIndex); + } + } + + rpf->Words = cpu_to_le64(ULLONG_MAX); + reply_q->reply_post_host_index = + (reply_q->reply_post_host_index == + (ioc->reply_post_queue_depth - 1)) ? 0 : + reply_q->reply_post_host_index + 1; + request_desript_type = + reply_q->reply_post_free[reply_q->reply_post_host_index]. + Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + completed_cmds++; + if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) + goto out; + if (!reply_q->reply_post_host_index) + rpf = reply_q->reply_post_free; + else + rpf++; + } while (1); + + out: + + if (!completed_cmds) { + atomic_dec(&reply_q->busy); + return IRQ_NONE; + } + + wmb(); + if (ioc->is_warpdrive) { + writel(reply_q->reply_post_host_index, + ioc->reply_post_host_index[msix_index]); + atomic_dec(&reply_q->busy); + return IRQ_HANDLED; + } + + /* Update Reply Post Host Index. + * For those HBA's which support combined reply queue feature + * 1. Get the correct Supplemental Reply Post Host Index Register. + * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host + * Index Register address bank i.e replyPostRegisterIndex[], + * 2. Then update this register with new reply host index value + * in ReplyPostIndex field and the MSIxIndex field with + * msix_index value reduced to a value between 0 and 7, + * using a modulo 8 operation. Since each Supplemental Reply Post + * Host Index Register supports 8 MSI-X vectors. + * + * For other HBA's just update the Reply Post Host Index register with + * new reply host index value in ReplyPostIndex Field and msix_index + * value in MSIxIndex field. + */ + if (ioc->msix96_vector) + writel(reply_q->reply_post_host_index | ((msix_index & 7) << + MPI2_RPHI_MSIX_INDEX_SHIFT), + ioc->replyPostRegisterIndex[msix_index/8]); + else + writel(reply_q->reply_post_host_index | (msix_index << + MPI2_RPHI_MSIX_INDEX_SHIFT), + &ioc->chip->ReplyPostHostIndex); + atomic_dec(&reply_q->busy); + return IRQ_HANDLED; +} + +/** + * _base_is_controller_msix_enabled - is controller support muli-reply queues + * @ioc: per adapter object + * + */ +static inline int +_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc) +{ + return (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; +} + +/** + * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues + * @ioc: per adapter object + * Context: ISR conext + * + * Called when a Task Management request has completed. We want + * to flush the other reply queues so all the outstanding IO has been + * completed back to OS before we process the TM completetion. + * + * Return nothing. + */ +void +mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q; + + /* If MSIX capability is turned off + * then multi-queues are not enabled + */ + if (!_base_is_controller_msix_enabled(ioc)) + return; + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->shost_recovery) + return; + /* TMs are on msix_index == 0 */ + if (reply_q->msix_index == 0) + continue; + _base_interrupt(reply_q->vector, (void *)reply_q); + } +} + +/** + * mpt3sas_base_release_callback_handler - clear interrupt callback handler + * @cb_idx: callback index + * + * Return nothing. + */ +void +mpt3sas_base_release_callback_handler(u8 cb_idx) +{ + mpt_callbacks[cb_idx] = NULL; +} + +/** + * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler + * @cb_func: callback function + * + * Returns cb_func. + */ +u8 +mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func) +{ + u8 cb_idx; + + for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--) + if (mpt_callbacks[cb_idx] == NULL) + break; + + mpt_callbacks[cb_idx] = cb_func; + return cb_idx; +} + +/** + * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler + * + * Return nothing. + */ +void +mpt3sas_base_initialize_callback_handler(void) +{ + u8 cb_idx; + + for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++) + mpt3sas_base_release_callback_handler(cb_idx); +} + + +/** + * _base_build_zero_len_sge - build zero length sg entry + * @ioc: per adapter object + * @paddr: virtual address for SGE + * + * Create a zero length scatter gather entry to insure the IOCs hardware has + * something to use if the target device goes brain dead and tries + * to send data even when none is asked for. + * + * Return nothing. + */ +static void +_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr) +{ + u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST | + MPI2_SGE_FLAGS_SIMPLE_ELEMENT) << + MPI2_SGE_FLAGS_SHIFT); + ioc->base_add_sg_single(paddr, flags_length, -1); +} + +/** + * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr. + * @paddr: virtual address for SGE + * @flags_length: SGE flags and data transfer length + * @dma_addr: Physical address + * + * Return nothing. + */ +static void +_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr) +{ + Mpi2SGESimple32_t *sgel = paddr; + + flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING | + MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le32(dma_addr); +} + + +/** + * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr. + * @paddr: virtual address for SGE + * @flags_length: SGE flags and data transfer length + * @dma_addr: Physical address + * + * Return nothing. + */ +static void +_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr) +{ + Mpi2SGESimple64_t *sgel = paddr; + + flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING | + MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT; + sgel->FlagsLength = cpu_to_le32(flags_length); + sgel->Address = cpu_to_le64(dma_addr); +} + +/** + * _base_get_chain_buffer_tracker - obtain chain tracker + * @ioc: per adapter object + * @smid: smid associated to an IO request + * + * Returns chain tracker(from ioc->free_chain_list) + */ +static struct chain_tracker * +_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct chain_tracker *chain_req; + unsigned long flags; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->free_chain_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "chain buffers not available\n", ioc->name)); + return NULL; + } + chain_req = list_entry(ioc->free_chain_list.next, + struct chain_tracker, tracker_list); + list_del_init(&chain_req->tracker_list); + list_add_tail(&chain_req->tracker_list, + &ioc->scsi_lookup[smid - 1].chain_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return chain_req; +} + + +/** + * _base_build_sg - build generic sg + * @ioc: per adapter object + * @psge: virtual address for SGE + * @data_out_dma: physical address for WRITES + * @data_out_sz: data xfer size for WRITES + * @data_in_dma: physical address for READS + * @data_in_sz: data xfer size for READS + * + * Return nothing. + */ +static void +_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz) +{ + u32 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + _base_build_zero_len_sge(ioc, psge); + return; + } + + if (data_out_sz && data_in_sz) { + /* WRITE sgel first */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + + /* incr sgel */ + psge += ioc->sge_size; + + /* READ sgel last */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } else if (data_out_sz) /* WRITE */ { + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_out_sz, data_out_dma); + } else if (data_in_sz) /* READ */ { + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + data_in_sz, data_in_dma); + } +} + +/* IEEE format sgls */ + +/** + * _base_add_sg_single_ieee - add sg element for IEEE format + * @paddr: virtual address for SGE + * @flags: SGE flags + * @chain_offset: number of 128 byte elements from start of segment + * @length: data transfer length + * @dma_addr: Physical address + * + * Return nothing. + */ +static void +_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length, + dma_addr_t dma_addr) +{ + Mpi25IeeeSgeChain64_t *sgel = paddr; + + sgel->Flags = flags; + sgel->NextChainOffset = chain_offset; + sgel->Length = cpu_to_le32(length); + sgel->Address = cpu_to_le64(dma_addr); +} + +/** + * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format + * @ioc: per adapter object + * @paddr: virtual address for SGE + * + * Create a zero length scatter gather entry to insure the IOCs hardware has + * something to use if the target device goes brain dead and tries + * to send data even when none is asked for. + * + * Return nothing. + */ +static void +_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr) +{ + u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST); + _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1); +} + +/** + * _base_build_sg_scmd - main sg creation routine + * @ioc: per adapter object + * @scmd: scsi command + * @smid: system request message index + * Context: none. + * + * The main routine that builds scatter gather table from a given + * scsi request sent via the .queuecommand main handler. + * + * Returns 0 success, anything else error + */ +static int +_base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid) +{ + Mpi2SCSIIORequest_t *mpi_request; + dma_addr_t chain_dma; + struct scatterlist *sg_scmd; + void *sg_local, *chain; + u32 chain_offset; + u32 chain_length; + u32 chain_flags; + int sges_left; + u32 sges_in_segment; + u32 sgl_flags; + u32 sgl_flags_last_element; + u32 sgl_flags_end_buffer; + struct chain_tracker *chain_req; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + /* init scatter gather flags */ + sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT; + if (scmd->sc_data_direction == DMA_TO_DEVICE) + sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC; + sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT) + << MPI2_SGE_FLAGS_SHIFT; + sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST) + << MPI2_SGE_FLAGS_SHIFT; + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + + sg_scmd = scsi_sglist(scmd); + sges_left = scsi_dma_map(scmd); + if (sges_left < 0) { + sdev_printk(KERN_ERR, scmd->device, + "pci_map_sg failed: request for %d bytes!\n", + scsi_bufflen(scmd)); + return -ENOMEM; + } + + sg_local = &mpi_request->SGL; + sges_in_segment = ioc->max_sges_in_main_message; + if (sges_left <= sges_in_segment) + goto fill_in_last_segment; + + mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) + + (sges_in_segment * ioc->sge_size))/4; + + /* fill in main message segment when there is a chain following */ + while (sges_in_segment) { + if (sges_in_segment == 1) + ioc->base_add_sg_single(sg_local, + sgl_flags_last_element | sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + else + ioc->base_add_sg_single(sg_local, sgl_flags | + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size; + sges_left--; + sges_in_segment--; + } + + /* initializing the chain flags and pointers */ + chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT; + chain_req = _base_get_chain_buffer_tracker(ioc, smid); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + do { + sges_in_segment = (sges_left <= + ioc->max_sges_in_chain_message) ? sges_left : + ioc->max_sges_in_chain_message; + chain_offset = (sges_left == sges_in_segment) ? + 0 : (sges_in_segment * ioc->sge_size)/4; + chain_length = sges_in_segment * ioc->sge_size; + if (chain_offset) { + chain_offset = chain_offset << + MPI2_SGE_CHAIN_OFFSET_SHIFT; + chain_length += ioc->sge_size; + } + ioc->base_add_sg_single(sg_local, chain_flags | chain_offset | + chain_length, chain_dma); + sg_local = chain; + if (!chain_offset) + goto fill_in_last_segment; + + /* fill in chain segments */ + while (sges_in_segment) { + if (sges_in_segment == 1) + ioc->base_add_sg_single(sg_local, + sgl_flags_last_element | + sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + else + ioc->base_add_sg_single(sg_local, sgl_flags | + sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size; + sges_left--; + sges_in_segment--; + } + + chain_req = _base_get_chain_buffer_tracker(ioc, smid); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + } while (1); + + + fill_in_last_segment: + + /* fill the last segment */ + while (sges_left) { + if (sges_left == 1) + ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer | + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + else + ioc->base_add_sg_single(sg_local, sgl_flags | + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size; + sges_left--; + } + + return 0; +} + +/** + * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format + * @ioc: per adapter object + * @scmd: scsi command + * @smid: system request message index + * Context: none. + * + * The main routine that builds scatter gather table from a given + * scsi request sent via the .queuecommand main handler. + * + * Returns 0 success, anything else error + */ +static int +_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid) +{ + Mpi2SCSIIORequest_t *mpi_request; + dma_addr_t chain_dma; + struct scatterlist *sg_scmd; + void *sg_local, *chain; + u32 chain_offset; + u32 chain_length; + int sges_left; + u32 sges_in_segment; + u8 simple_sgl_flags; + u8 simple_sgl_flags_last; + u8 chain_sgl_flags; + struct chain_tracker *chain_req; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + /* init scatter gather flags */ + simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + simple_sgl_flags_last = simple_sgl_flags | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST; + chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + + sg_scmd = scsi_sglist(scmd); + sges_left = scsi_dma_map(scmd); + if (sges_left < 0) { + sdev_printk(KERN_ERR, scmd->device, + "pci_map_sg failed: request for %d bytes!\n", + scsi_bufflen(scmd)); + return -ENOMEM; + } + + sg_local = &mpi_request->SGL; + sges_in_segment = (ioc->request_sz - + offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee; + if (sges_left <= sges_in_segment) + goto fill_in_last_segment; + + mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) + + (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee); + + /* fill in main message segment when there is a chain following */ + while (sges_in_segment > 1) { + _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + + /* initializing the pointers */ + chain_req = _base_get_chain_buffer_tracker(ioc, smid); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + do { + sges_in_segment = (sges_left <= + ioc->max_sges_in_chain_message) ? sges_left : + ioc->max_sges_in_chain_message; + chain_offset = (sges_left == sges_in_segment) ? + 0 : sges_in_segment; + chain_length = sges_in_segment * ioc->sge_size_ieee; + if (chain_offset) + chain_length += ioc->sge_size_ieee; + _base_add_sg_single_ieee(sg_local, chain_sgl_flags, + chain_offset, chain_length, chain_dma); + + sg_local = chain; + if (!chain_offset) + goto fill_in_last_segment; + + /* fill in chain segments */ + while (sges_in_segment) { + _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + sges_in_segment--; + } + + chain_req = _base_get_chain_buffer_tracker(ioc, smid); + if (!chain_req) + return -1; + chain = chain_req->chain_buffer; + chain_dma = chain_req->chain_buffer_dma; + } while (1); + + + fill_in_last_segment: + + /* fill the last segment */ + while (sges_left > 0) { + if (sges_left == 1) + _base_add_sg_single_ieee(sg_local, + simple_sgl_flags_last, 0, sg_dma_len(sg_scmd), + sg_dma_address(sg_scmd)); + else + _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0, + sg_dma_len(sg_scmd), sg_dma_address(sg_scmd)); + sg_scmd = sg_next(sg_scmd); + sg_local += ioc->sge_size_ieee; + sges_left--; + } + + return 0; +} + +/** + * _base_build_sg_ieee - build generic sg for IEEE format + * @ioc: per adapter object + * @psge: virtual address for SGE + * @data_out_dma: physical address for WRITES + * @data_out_sz: data xfer size for WRITES + * @data_in_dma: physical address for READS + * @data_in_sz: data xfer size for READS + * + * Return nothing. + */ +static void +_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma, + size_t data_in_sz) +{ + u8 sgl_flags; + + if (!data_out_sz && !data_in_sz) { + _base_build_zero_len_sge_ieee(ioc, psge); + return; + } + + if (data_out_sz && data_in_sz) { + /* WRITE sgel first */ + sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, + data_out_dma); + + /* incr sgel */ + psge += ioc->sge_size_ieee; + + /* READ sgel last */ + sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, + data_in_dma); + } else if (data_out_sz) /* WRITE */ { + sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz, + data_out_dma); + } else if (data_in_sz) /* READ */ { + sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | + MPI25_IEEE_SGE_FLAGS_END_OF_LIST | + MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR; + _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz, + data_in_dma); + } +} + +#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10)) + +/** + * _base_config_dma_addressing - set dma addressing + * @ioc: per adapter object + * @pdev: PCI device struct + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) +{ + struct sysinfo s; + u64 consistent_dma_mask; + + if (ioc->dma_mask) + consistent_dma_mask = DMA_BIT_MASK(64); + else + consistent_dma_mask = DMA_BIT_MASK(32); + + if (sizeof(dma_addr_t) > 4) { + const uint64_t required_mask = + dma_get_required_mask(&pdev->dev); + if ((required_mask > DMA_BIT_MASK(32)) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) { + ioc->base_add_sg_single = &_base_add_sg_single_64; + ioc->sge_size = sizeof(Mpi2SGESimple64_t); + ioc->dma_mask = 64; + goto out; + } + } + + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) + && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) { + ioc->base_add_sg_single = &_base_add_sg_single_32; + ioc->sge_size = sizeof(Mpi2SGESimple32_t); + ioc->dma_mask = 32; + } else + return -ENODEV; + + out: + si_meminfo(&s); + pr_info(MPT3SAS_FMT + "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n", + ioc->name, ioc->dma_mask, convert_to_kb(s.totalram)); + + return 0; +} + +static int +_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc, + struct pci_dev *pdev) +{ + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + return -ENODEV; + } + return 0; +} + +/** + * _base_check_enable_msix - checks MSIX capabable. + * @ioc: per adapter object + * + * Check to see if card is capable of MSIX, and set number + * of available msix vectors + */ +static int +_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc) +{ + int base; + u16 message_control; + + /* Check whether controller SAS2008 B0 controller, + * if it is SAS2008 B0 controller use IO-APIC instead of MSIX + */ + if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && + ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) { + return -EINVAL; + } + + base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); + if (!base) { + dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n", + ioc->name)); + return -EINVAL; + } + + /* get msix vector count */ + /* NUMA_IO not supported for older controllers */ + if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2) + ioc->msix_vector_count = 1; + else { + pci_read_config_word(ioc->pdev, base + 2, &message_control); + ioc->msix_vector_count = (message_control & 0x3FF) + 1; + } + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "msix is supported, vector_count(%d)\n", + ioc->name, ioc->msix_vector_count)); + return 0; +} + +/** + * _base_free_irq - free irq + * @ioc: per adapter object + * + * Freeing respective reply_queue from the list. + */ +static void +_base_free_irq(struct MPT3SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q, *next; + + if (list_empty(&ioc->reply_queue_list)) + return; + + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + list_del(&reply_q->list); + irq_set_affinity_hint(reply_q->vector, NULL); + free_cpumask_var(reply_q->affinity_hint); + synchronize_irq(reply_q->vector); + free_irq(reply_q->vector, reply_q); + kfree(reply_q); + } +} + +/** + * _base_request_irq - request irq + * @ioc: per adapter object + * @index: msix index into vector table + * @vector: irq vector + * + * Inserting respective reply_queue into the list. + */ +static int +_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector) +{ + struct adapter_reply_queue *reply_q; + int r; + + reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); + if (!reply_q) { + pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n", + ioc->name, (int)sizeof(struct adapter_reply_queue)); + return -ENOMEM; + } + reply_q->ioc = ioc; + reply_q->msix_index = index; + reply_q->vector = vector; + + if (!alloc_cpumask_var(&reply_q->affinity_hint, GFP_KERNEL)) + return -ENOMEM; + cpumask_clear(reply_q->affinity_hint); + + atomic_set(&reply_q->busy, 0); + if (ioc->msix_enable) + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", + ioc->driver_name, ioc->id, index); + else + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", + ioc->driver_name, ioc->id); + r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name, + reply_q); + if (r) { + pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n", + reply_q->name, vector); + kfree(reply_q); + return -EBUSY; + } + + INIT_LIST_HEAD(&reply_q->list); + list_add_tail(&reply_q->list, &ioc->reply_queue_list); + return 0; +} + +/** + * _base_assign_reply_queues - assigning msix index for each cpu + * @ioc: per adapter object + * + * The enduser would need to set the affinity via /proc/irq/#/smp_affinity + * + * It would nice if we could call irq_set_affinity, however it is not + * an exported symbol + */ +static void +_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned int cpu, nr_cpus, nr_msix, index = 0; + struct adapter_reply_queue *reply_q; + + if (!_base_is_controller_msix_enabled(ioc)) + return; + + memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); + + nr_cpus = num_online_cpus(); + nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count, + ioc->facts.MaxMSIxVectors); + if (!nr_msix) + return; + + cpu = cpumask_first(cpu_online_mask); + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + + unsigned int i, group = nr_cpus / nr_msix; + + if (cpu >= nr_cpus) + break; + + if (index < nr_cpus % nr_msix) + group++; + + for (i = 0 ; i < group ; i++) { + ioc->cpu_msix_table[cpu] = index; + cpumask_or(reply_q->affinity_hint, + reply_q->affinity_hint, get_cpu_mask(cpu)); + cpu = cpumask_next(cpu, cpu_online_mask); + } + + if (irq_set_affinity_hint(reply_q->vector, + reply_q->affinity_hint)) + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "error setting affinity hint for irq vector %d\n", + ioc->name, reply_q->vector)); + index++; + } +} + +/** + * _base_disable_msix - disables msix + * @ioc: per adapter object + * + */ +static void +_base_disable_msix(struct MPT3SAS_ADAPTER *ioc) +{ + if (!ioc->msix_enable) + return; + pci_disable_msix(ioc->pdev); + ioc->msix_enable = 0; +} + +/** + * _base_enable_msix - enables msix, failback to io_apic + * @ioc: per adapter object + * + */ +static int +_base_enable_msix(struct MPT3SAS_ADAPTER *ioc) +{ + struct msix_entry *entries, *a; + int r; + int i; + u8 try_msix = 0; + + if (msix_disable == -1 || msix_disable == 0) + try_msix = 1; + + if (!try_msix) + goto try_ioapic; + + if (_base_check_enable_msix(ioc) != 0) + goto try_ioapic; + + ioc->reply_queue_count = min_t(int, ioc->cpu_count, + ioc->msix_vector_count); + + printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores" + ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count, + ioc->cpu_count, max_msix_vectors); + + if (!ioc->rdpq_array_enable && max_msix_vectors == -1) + max_msix_vectors = 8; + + if (max_msix_vectors > 0) { + ioc->reply_queue_count = min_t(int, max_msix_vectors, + ioc->reply_queue_count); + ioc->msix_vector_count = ioc->reply_queue_count; + } else if (max_msix_vectors == 0) + goto try_ioapic; + + entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), + GFP_KERNEL); + if (!entries) { + dfailprintk(ioc, pr_info(MPT3SAS_FMT + "kcalloc failed @ at %s:%d/%s() !!!\n", + ioc->name, __FILE__, __LINE__, __func__)); + goto try_ioapic; + } + + for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) + a->entry = i; + + r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count); + if (r) { + dfailprintk(ioc, pr_info(MPT3SAS_FMT + "pci_enable_msix_exact failed (r=%d) !!!\n", + ioc->name, r)); + kfree(entries); + goto try_ioapic; + } + + ioc->msix_enable = 1; + for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) { + r = _base_request_irq(ioc, i, a->vector); + if (r) { + _base_free_irq(ioc); + _base_disable_msix(ioc); + kfree(entries); + goto try_ioapic; + } + } + + kfree(entries); + return 0; + +/* failback to io_apic interrupt routing */ + try_ioapic: + + ioc->reply_queue_count = 1; + r = _base_request_irq(ioc, 0, ioc->pdev->irq); + + return r; +} + +/** + * mpt3sas_base_unmap_resources - free controller resources + * @ioc: per adapter object + */ +void +mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + + dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n", + ioc->name, __func__)); + + _base_free_irq(ioc); + _base_disable_msix(ioc); + + if (ioc->msix96_vector) { + kfree(ioc->replyPostRegisterIndex); + ioc->replyPostRegisterIndex = NULL; + } + + if (ioc->chip_phys) { + iounmap(ioc->chip); + ioc->chip_phys = 0; + } + + if (pci_is_enabled(pdev)) { + pci_release_selected_regions(ioc->pdev, ioc->bars); + pci_disable_pcie_error_reporting(pdev); + pci_disable_device(pdev); + } +} + +/** + * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) + * @ioc: per adapter object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) +{ + struct pci_dev *pdev = ioc->pdev; + u32 memap_sz; + u32 pio_sz; + int i, r = 0; + u64 pio_chip = 0; + u64 chip_phys = 0; + struct adapter_reply_queue *reply_q; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", + ioc->name, __func__)); + + ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (pci_enable_device_mem(pdev)) { + pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n", + ioc->name); + ioc->bars = 0; + return -ENODEV; + } + + + if (pci_request_selected_regions(pdev, ioc->bars, + ioc->driver_name)) { + pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n", + ioc->name); + ioc->bars = 0; + r = -ENODEV; + goto out_fail; + } + +/* AER (Advanced Error Reporting) hooks */ + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); + + + if (_base_config_dma_addressing(ioc, pdev) != 0) { + pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n", + ioc->name, pci_name(pdev)); + r = -ENODEV; + goto out_fail; + } + + for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) && + (!memap_sz || !pio_sz); i++) { + if (pci_resource_flags(pdev, i) & IORESOURCE_IO) { + if (pio_sz) + continue; + pio_chip = (u64)pci_resource_start(pdev, i); + pio_sz = pci_resource_len(pdev, i); + } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { + if (memap_sz) + continue; + ioc->chip_phys = pci_resource_start(pdev, i); + chip_phys = (u64)ioc->chip_phys; + memap_sz = pci_resource_len(pdev, i); + ioc->chip = ioremap(ioc->chip_phys, memap_sz); + } + } + + if (ioc->chip == NULL) { + pr_err(MPT3SAS_FMT "unable to map adapter memory! " + " or resource not found\n", ioc->name); + r = -EINVAL; + goto out_fail; + } + + _base_mask_interrupts(ioc); + + r = _base_get_ioc_facts(ioc, CAN_SLEEP); + if (r) + goto out_fail; + + if (!ioc->rdpq_array_enable_assigned) { + ioc->rdpq_array_enable = ioc->rdpq_array_capable; + ioc->rdpq_array_enable_assigned = 1; + } + + r = _base_enable_msix(ioc); + if (r) + goto out_fail; + + /* Use the Combined reply queue feature only for SAS3 C0 & higher + * revision HBAs and also only when reply queue count is greater than 8 + */ + if (ioc->msix96_vector && ioc->reply_queue_count > 8) { + /* Determine the Supplemental Reply Post Host Index Registers + * Addresse. Supplemental Reply Post Host Index Registers + * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and + * each register is at offset bytes of + * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one. + */ + ioc->replyPostRegisterIndex = kcalloc( + MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT, + sizeof(resource_size_t *), GFP_KERNEL); + if (!ioc->replyPostRegisterIndex) { + dfailprintk(ioc, printk(MPT3SAS_FMT + "allocation for reply Post Register Index failed!!!\n", + ioc->name)); + r = -ENOMEM; + goto out_fail; + } + + for (i = 0; i < MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT; i++) { + ioc->replyPostRegisterIndex[i] = (resource_size_t *) + ((u8 *)&ioc->chip->Doorbell + + MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET + + (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET)); + } + } else + ioc->msix96_vector = 0; + + if (ioc->is_warpdrive) { + ioc->reply_post_host_index[0] = (resource_size_t __iomem *) + &ioc->chip->ReplyPostHostIndex; + + for (i = 1; i < ioc->cpu_msix_table_sz; i++) + ioc->reply_post_host_index[i] = + (resource_size_t __iomem *) + ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) + * 4))); + } + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) + pr_info(MPT3SAS_FMT "%s: IRQ %d\n", + reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : + "IO-APIC enabled"), reply_q->vector); + + pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", + ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); + pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n", + ioc->name, (unsigned long long)pio_chip, pio_sz); + + /* Save PCI configuration state for recovery from PCI AER/EEH errors */ + pci_save_state(pdev); + return 0; + + out_fail: + mpt3sas_base_unmap_resources(ioc); + return r; +} + +/** + * mpt3sas_base_get_msg_frame - obtain request mf pointer + * @ioc: per adapter object + * @smid: system request message index(smid zero is invalid) + * + * Returns virt pointer to message frame. + */ +void * +mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->request + (smid * ioc->request_sz)); +} + +/** + * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr + * @ioc: per adapter object + * @smid: system request message index + * + * Returns virt pointer to sense buffer. + */ +void * +mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE)); +} + +/** + * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr + * @ioc: per adapter object + * @smid: system request message index + * + * Returns phys pointer to the low 32bit address of the sense buffer. + */ +__le32 +mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return cpu_to_le32(ioc->sense_dma + ((smid - 1) * + SCSI_SENSE_BUFFERSIZE)); +} + +/** + * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address + * @ioc: per adapter object + * @phys_addr: lower 32 physical addr of the reply + * + * Converts 32bit lower physical addr into a virt address. + */ +void * +mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr) +{ + if (!phys_addr) + return NULL; + return ioc->reply + (phys_addr - (u32)ioc->reply_dma); +} + +static inline u8 +_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc) +{ + return ioc->cpu_msix_table[raw_smp_processor_id()]; +} + +/** + * mpt3sas_base_get_smid - obtain a free smid from internal queue + * @ioc: per adapter object + * @cb_idx: callback index + * + * Returns smid (zero is invalid) + */ +u16 +mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->internal_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + pr_err(MPT3SAS_FMT "%s: smid not available\n", + ioc->name, __func__); + return 0; + } + + request = list_entry(ioc->internal_free_list.next, + struct request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +/** + * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue + * @ioc: per adapter object + * @cb_idx: callback index + * @scmd: pointer to scsi command object + * + * Returns smid (zero is invalid) + */ +u16 +mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd) +{ + unsigned long flags; + struct scsiio_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + pr_err(MPT3SAS_FMT "%s: smid not available\n", + ioc->name, __func__); + return 0; + } + + request = list_entry(ioc->free_list.next, + struct scsiio_tracker, tracker_list); + request->scmd = scmd; + request->cb_idx = cb_idx; + smid = request->smid; + request->msix_io = _base_get_msix_index(ioc); + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +/** + * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue + * @ioc: per adapter object + * @cb_idx: callback index + * + * Returns smid (zero is invalid) + */ +u16 +mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx) +{ + unsigned long flags; + struct request_tracker *request; + u16 smid; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (list_empty(&ioc->hpr_free_list)) { + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return 0; + } + + request = list_entry(ioc->hpr_free_list.next, + struct request_tracker, tracker_list); + request->cb_idx = cb_idx; + smid = request->smid; + list_del(&request->tracker_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +/** + * mpt3sas_base_free_smid - put smid back on free_list + * @ioc: per adapter object + * @smid: system request message index + * + * Return nothing. + */ +void +mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + unsigned long flags; + int i; + struct chain_tracker *chain_req, *next; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + if (smid < ioc->hi_priority_smid) { + /* scsiio queue */ + i = smid - 1; + if (!list_empty(&ioc->scsi_lookup[i].chain_list)) { + list_for_each_entry_safe(chain_req, next, + &ioc->scsi_lookup[i].chain_list, tracker_list) { + list_del_init(&chain_req->tracker_list); + list_add(&chain_req->tracker_list, + &ioc->free_chain_list); + } + } + ioc->scsi_lookup[i].cb_idx = 0xFF; + ioc->scsi_lookup[i].scmd = NULL; + ioc->scsi_lookup[i].direct_io = 0; + list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list); + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + /* + * See _wait_for_commands_to_complete() call with regards + * to this code. + */ + if (ioc->shost_recovery && ioc->pending_io_count) { + if (ioc->pending_io_count == 1) + wake_up(&ioc->reset_wq); + ioc->pending_io_count--; + } + return; + } else if (smid < ioc->internal_smid) { + /* hi-priority */ + i = smid - ioc->hi_priority_smid; + ioc->hpr_lookup[i].cb_idx = 0xFF; + list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list); + } else if (smid <= ioc->hba_queue_depth) { + /* internal queue */ + i = smid - ioc->internal_smid; + ioc->internal_lookup[i].cb_idx = 0xFF; + list_add(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); +} + +/** + * _base_writeq - 64 bit write to MMIO + * @ioc: per adapter object + * @b: data payload + * @addr: address in MMIO space + * @writeq_lock: spin lock + * + * Glue for handling an atomic 64 bit word to MMIO. This special handling takes + * care of 32 bit environment where its not quarenteed to send the entire word + * in one transfer. + */ +#if defined(writeq) && defined(CONFIG_64BIT) +static inline void +_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) +{ + writeq(cpu_to_le64(b), addr); +} +#else +static inline void +_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock) +{ + unsigned long flags; + __u64 data_out = cpu_to_le64(b); + + spin_lock_irqsave(writeq_lock, flags); + writel((u32)(data_out), addr); + writel((u32)(data_out >> 32), (addr + 4)); + spin_unlock_irqrestore(writeq_lock, flags); +} +#endif + +/** + * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle + * + * Return nothing. + */ +void +mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + + descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; + descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * mpt3sas_base_put_smid_fast_path - send fast path request to firmware + * @ioc: per adapter object + * @smid: system request message index + * @handle: device handle + * + * Return nothing. + */ +void +mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + descriptor.SCSIIO.RequestFlags = + MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO; + descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); + descriptor.SCSIIO.SMID = cpu_to_le16(smid); + descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); + descriptor.SCSIIO.LMID = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware + * @ioc: per adapter object + * @smid: system request message index + * @msix_task: msix_task will be same as msix of IO incase of task abort else 0. + * Return nothing. + */ +void +mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 msix_task) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + descriptor.HighPriority.RequestFlags = + MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; + descriptor.HighPriority.MSIxIndex = msix_task; + descriptor.HighPriority.SMID = cpu_to_le16(smid); + descriptor.HighPriority.LMID = 0; + descriptor.HighPriority.Reserved1 = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * mpt3sas_base_put_smid_default - Default, primarily used for config pages + * @ioc: per adapter object + * @smid: system request message index + * + * Return nothing. + */ +void +mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + Mpi2RequestDescriptorUnion_t descriptor; + u64 *request = (u64 *)&descriptor; + + descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; + descriptor.Default.MSIxIndex = _base_get_msix_index(ioc); + descriptor.Default.SMID = cpu_to_le16(smid); + descriptor.Default.LMID = 0; + descriptor.Default.DescriptorTypeDependent = 0; + _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow, + &ioc->scsi_lookup_lock); +} + +/** + * _base_display_OEMs_branding - Display branding string + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) +{ + if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL) + return; + + switch (ioc->pdev->subsystem_vendor) { + case PCI_VENDOR_ID_INTEL: + switch (ioc->pdev->device) { + case MPI2_MFGPAGE_DEVID_SAS2008: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_INTEL_RMS2LL080_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS2LL080_BRANDING); + break; + case MPT2SAS_INTEL_RMS2LL040_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS2LL040_BRANDING); + break; + case MPT2SAS_INTEL_SSD910_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_SSD910_BRANDING); + break; + default: + pr_info(MPT3SAS_FMT + "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } + case MPI2_MFGPAGE_DEVID_SAS2308_2: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_INTEL_RS25GB008_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RS25GB008_BRANDING); + break; + case MPT2SAS_INTEL_RMS25JB080_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS25JB080_BRANDING); + break; + case MPT2SAS_INTEL_RMS25JB040_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS25JB040_BRANDING); + break; + case MPT2SAS_INTEL_RMS25KB080_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS25KB080_BRANDING); + break; + case MPT2SAS_INTEL_RMS25KB040_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS25KB040_BRANDING); + break; + case MPT2SAS_INTEL_RMS25LB040_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS25LB040_BRANDING); + break; + case MPT2SAS_INTEL_RMS25LB080_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_INTEL_RMS25LB080_BRANDING); + break; + default: + pr_info(MPT3SAS_FMT + "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } + case MPI25_MFGPAGE_DEVID_SAS3008: + switch (ioc->pdev->subsystem_device) { + case MPT3SAS_INTEL_RMS3JC080_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_INTEL_RMS3JC080_BRANDING); + break; + + case MPT3SAS_INTEL_RS3GC008_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_INTEL_RS3GC008_BRANDING); + break; + case MPT3SAS_INTEL_RS3FC044_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_INTEL_RS3FC044_BRANDING); + break; + case MPT3SAS_INTEL_RS3UC080_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_INTEL_RS3UC080_BRANDING); + break; + default: + pr_info(MPT3SAS_FMT + "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } + break; + default: + pr_info(MPT3SAS_FMT + "Intel(R) Controller: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } + break; + case PCI_VENDOR_ID_DELL: + switch (ioc->pdev->device) { + case MPI2_MFGPAGE_DEVID_SAS2008: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING); + break; + case MPT2SAS_DELL_PERC_H200_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_DELL_PERC_H200_BRANDING); + break; + case MPT2SAS_DELL_6GBPS_SAS_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_DELL_6GBPS_SAS_BRANDING); + break; + default: + pr_info(MPT3SAS_FMT + "Dell 6Gbps HBA: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } + break; + case MPI25_MFGPAGE_DEVID_SAS3008: + switch (ioc->pdev->subsystem_device) { + case MPT3SAS_DELL_12G_HBA_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_DELL_12G_HBA_BRANDING); + break; + default: + pr_info(MPT3SAS_FMT + "Dell 12Gbps HBA: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } + break; + default: + pr_info(MPT3SAS_FMT + "Dell HBA: Subsystem ID: 0x%X\n", ioc->name, + ioc->pdev->subsystem_device); + break; + } + break; + case PCI_VENDOR_ID_CISCO: + switch (ioc->pdev->device) { + case MPI25_MFGPAGE_DEVID_SAS3008: + switch (ioc->pdev->subsystem_device) { + case MPT3SAS_CISCO_12G_8E_HBA_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_CISCO_12G_8E_HBA_BRANDING); + break; + case MPT3SAS_CISCO_12G_8I_HBA_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_CISCO_12G_8I_HBA_BRANDING); + break; + case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); + break; + default: + pr_info(MPT3SAS_FMT + "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } + break; + case MPI25_MFGPAGE_DEVID_SAS3108_1: + switch (ioc->pdev->subsystem_device) { + case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING); + break; + case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING + ); + break; + default: + pr_info(MPT3SAS_FMT + "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } + break; + default: + pr_info(MPT3SAS_FMT + "Cisco SAS HBA: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } + break; + case MPT2SAS_HP_3PAR_SSVID: + switch (ioc->pdev->device) { + case MPI2_MFGPAGE_DEVID_SAS2004: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING); + break; + default: + pr_info(MPT3SAS_FMT + "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } + case MPI2_MFGPAGE_DEVID_SAS2308_2: + switch (ioc->pdev->subsystem_device) { + case MPT2SAS_HP_2_4_INTERNAL_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_HP_2_4_INTERNAL_BRANDING); + break; + case MPT2SAS_HP_2_4_EXTERNAL_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_HP_2_4_EXTERNAL_BRANDING); + break; + case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING); + break; + case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID: + pr_info(MPT3SAS_FMT "%s\n", ioc->name, + MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING); + break; + default: + pr_info(MPT3SAS_FMT + "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } + default: + pr_info(MPT3SAS_FMT + "HP SAS HBA: Subsystem ID: 0x%X\n", + ioc->name, ioc->pdev->subsystem_device); + break; + } + default: + break; + } +} + +/** + * _base_display_ioc_capabilities - Disply IOC's capabilities. + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc) +{ + int i = 0; + char desc[16]; + u32 iounit_pg1_flags; + u32 bios_version; + + bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + strncpy(desc, ioc->manu_pg0.ChipName, 16); + pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\ + "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n", + ioc->name, desc, + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF, + ioc->pdev->revision, + (bios_version & 0xFF000000) >> 24, + (bios_version & 0x00FF0000) >> 16, + (bios_version & 0x0000FF00) >> 8, + bios_version & 0x000000FF); + + _base_display_OEMs_branding(ioc); + + pr_info(MPT3SAS_FMT "Protocol=(", ioc->name); + + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) { + pr_info("Initiator"); + i++; + } + + if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) { + pr_info("%sTarget", i ? "," : ""); + i++; + } + + i = 0; + pr_info("), "); + pr_info("Capabilities=("); + + if (!ioc->hide_ir_msg) { + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) { + pr_info("Raid"); + i++; + } + } + + if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) { + pr_info("%sTLR", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) { + pr_info("%sMulticast", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) { + pr_info("%sBIDI Target", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) { + pr_info("%sEEDP", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) { + pr_info("%sSnapshot Buffer", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) { + pr_info("%sDiag Trace Buffer", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) { + pr_info("%sDiag Extended Buffer", i ? "," : ""); + i++; + } + + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) { + pr_info("%sTask Set Full", i ? "," : ""); + i++; + } + + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) { + pr_info("%sNCQ", i ? "," : ""); + i++; + } + + pr_info(")\n"); +} + +/** + * mpt3sas_base_update_missing_delay - change the missing delay timers + * @ioc: per adapter object + * @device_missing_delay: amount of time till device is reported missing + * @io_missing_delay: interval IO is returned when there is a missing device + * + * Return nothing. + * + * Passed on the command line, this function will modify the device missing + * delay, as well as the io missing delay. This should be called at driver + * load time. + */ +void +mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, + u16 device_missing_delay, u8 io_missing_delay) +{ + u16 dmd, dmd_new, dmd_orignal; + u8 io_missing_delay_original; + u16 sz; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2ConfigReply_t mpi_reply; + u8 num_phys = 0; + u16 ioc_status; + + mpt3sas_config_get_number_hba_phys(ioc, &num_phys); + if (!num_phys) + return; + + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + /* device missing delay */ + dmd = sas_iounit_pg1->ReportDeviceMissingDelay; + if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) + dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; + else + dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + dmd_orignal = dmd; + if (device_missing_delay > 0x7F) { + dmd = (device_missing_delay > 0x7F0) ? 0x7F0 : + device_missing_delay; + dmd = dmd / 16; + dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16; + } else + dmd = device_missing_delay; + sas_iounit_pg1->ReportDeviceMissingDelay = dmd; + + /* io missing delay */ + io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay; + sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay; + + if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, + sz)) { + if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) + dmd_new = (dmd & + MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; + else + dmd_new = + dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n", + ioc->name, dmd_orignal, dmd_new); + pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n", + ioc->name, io_missing_delay_original, + io_missing_delay); + ioc->device_missing_delay = dmd_new; + ioc->io_missing_delay = io_missing_delay; + } + +out: + kfree(sas_iounit_pg1); +} +/** + * _base_static_config_pages - static start of day config pages + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ConfigReply_t mpi_reply; + u32 iounit_pg1_flags; + + mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0); + if (ioc->ir_firmware) + mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply, + &ioc->manu_pg10); + + /* + * Ensure correct T10 PI operation if vendor left EEDPTagMode + * flag unset in NVDATA. + */ + mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11); + if (ioc->manu_pg11.EEDPTagMode == 0) { + pr_err("%s: overriding NVDATA EEDPTagMode setting\n", + ioc->name); + ioc->manu_pg11.EEDPTagMode &= ~0x3; + ioc->manu_pg11.EEDPTagMode |= 0x1; + mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply, + &ioc->manu_pg11); + } + + mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2); + mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3); + mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8); + mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0); + mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); + mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8); + _base_display_ioc_capabilities(ioc); + + /* + * Enable task_set_full handling in iounit_pg1 when the + * facts capabilities indicate that its supported. + */ + iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags); + if ((ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING)) + iounit_pg1_flags &= + ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + else + iounit_pg1_flags |= + MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING; + ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags); + mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1); + + if (ioc->iounit_pg8.NumSensors) + ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors; +} + +/** + * _base_release_memory_pools - release memory + * @ioc: per adapter object + * + * Free memory allocated from _base_allocate_memory_pools. + * + * Return nothing. + */ +static void +_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc) +{ + int i = 0; + struct reply_post_struct *rps; + + dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + if (ioc->request) { + pci_free_consistent(ioc->pdev, ioc->request_dma_sz, + ioc->request, ioc->request_dma); + dexitprintk(ioc, pr_info(MPT3SAS_FMT + "request_pool(0x%p): free\n", + ioc->name, ioc->request)); + ioc->request = NULL; + } + + if (ioc->sense) { + pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma); + if (ioc->sense_dma_pool) + pci_pool_destroy(ioc->sense_dma_pool); + dexitprintk(ioc, pr_info(MPT3SAS_FMT + "sense_pool(0x%p): free\n", + ioc->name, ioc->sense)); + ioc->sense = NULL; + } + + if (ioc->reply) { + pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); + if (ioc->reply_dma_pool) + pci_pool_destroy(ioc->reply_dma_pool); + dexitprintk(ioc, pr_info(MPT3SAS_FMT + "reply_pool(0x%p): free\n", + ioc->name, ioc->reply)); + ioc->reply = NULL; + } + + if (ioc->reply_free) { + pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free, + ioc->reply_free_dma); + if (ioc->reply_free_dma_pool) + pci_pool_destroy(ioc->reply_free_dma_pool); + dexitprintk(ioc, pr_info(MPT3SAS_FMT + "reply_free_pool(0x%p): free\n", + ioc->name, ioc->reply_free)); + ioc->reply_free = NULL; + } + + if (ioc->reply_post) { + do { + rps = &ioc->reply_post[i]; + if (rps->reply_post_free) { + pci_pool_free( + ioc->reply_post_free_dma_pool, + rps->reply_post_free, + rps->reply_post_free_dma); + dexitprintk(ioc, pr_info(MPT3SAS_FMT + "reply_post_free_pool(0x%p): free\n", + ioc->name, rps->reply_post_free)); + rps->reply_post_free = NULL; + } + } while (ioc->rdpq_array_enable && + (++i < ioc->reply_queue_count)); + + if (ioc->reply_post_free_dma_pool) + pci_pool_destroy(ioc->reply_post_free_dma_pool); + kfree(ioc->reply_post); + } + + if (ioc->config_page) { + dexitprintk(ioc, pr_info(MPT3SAS_FMT + "config_page(0x%p): free\n", ioc->name, + ioc->config_page)); + pci_free_consistent(ioc->pdev, ioc->config_page_sz, + ioc->config_page, ioc->config_page_dma); + } + + if (ioc->scsi_lookup) { + free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages); + ioc->scsi_lookup = NULL; + } + kfree(ioc->hpr_lookup); + kfree(ioc->internal_lookup); + if (ioc->chain_lookup) { + for (i = 0; i < ioc->chain_depth; i++) { + if (ioc->chain_lookup[i].chain_buffer) + pci_pool_free(ioc->chain_dma_pool, + ioc->chain_lookup[i].chain_buffer, + ioc->chain_lookup[i].chain_buffer_dma); + } + if (ioc->chain_dma_pool) + pci_pool_destroy(ioc->chain_dma_pool); + free_pages((ulong)ioc->chain_lookup, ioc->chain_pages); + ioc->chain_lookup = NULL; + } +} + +/** + * _base_allocate_memory_pools - allocate start of day memory pools + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 success, anything else error + */ +static int +_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + struct mpt3sas_facts *facts; + u16 max_sge_elements; + u16 chains_needed_per_io; + u32 sz, total_sz, reply_post_free_sz; + u32 retry_sz; + u16 max_request_credit; + unsigned short sg_tablesize; + u16 sge_size; + int i; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + + retry_sz = 0; + facts = &ioc->facts; + + /* command line tunables for max sgl entries */ + if (max_sgl_entries != -1) + sg_tablesize = max_sgl_entries; + else { + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) + sg_tablesize = MPT2SAS_SG_DEPTH; + else + sg_tablesize = MPT3SAS_SG_DEPTH; + } + + if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS) + sg_tablesize = MPT_MIN_PHYS_SEGMENTS; + else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) { + sg_tablesize = min_t(unsigned short, sg_tablesize, + SCSI_MAX_SG_CHAIN_SEGMENTS); + pr_warn(MPT3SAS_FMT + "sg_tablesize(%u) is bigger than kernel" + " defined SCSI_MAX_SG_SEGMENTS(%u)\n", ioc->name, + sg_tablesize, MPT_MAX_PHYS_SEGMENTS); + } + ioc->shost->sg_tablesize = sg_tablesize; + + ioc->hi_priority_depth = facts->HighPriorityCredit; + ioc->internal_depth = ioc->hi_priority_depth + (5); + /* command line tunables for max controller queue depth */ + if (max_queue_depth != -1 && max_queue_depth != 0) { + max_request_credit = min_t(u16, max_queue_depth + + ioc->hi_priority_depth + ioc->internal_depth, + facts->RequestCredit); + if (max_request_credit > MAX_HBA_QUEUE_DEPTH) + max_request_credit = MAX_HBA_QUEUE_DEPTH; + } else + max_request_credit = min_t(u16, facts->RequestCredit, + MAX_HBA_QUEUE_DEPTH); + + ioc->hba_queue_depth = max_request_credit; + + /* request frame size */ + ioc->request_sz = facts->IOCRequestFrameSize * 4; + + /* reply frame size */ + ioc->reply_sz = facts->ReplyFrameSize * 4; + + /* calculate the max scatter element size */ + sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee); + + retry_allocation: + total_sz = 0; + /* calculate number of sg elements left over in the 1st frame */ + max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) - + sizeof(Mpi2SGEIOUnion_t)) + sge_size); + ioc->max_sges_in_main_message = max_sge_elements/sge_size; + + /* now do the same for a chain buffer */ + max_sge_elements = ioc->request_sz - sge_size; + ioc->max_sges_in_chain_message = max_sge_elements/sge_size; + + /* + * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE + */ + chains_needed_per_io = ((ioc->shost->sg_tablesize - + ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message) + + 1; + if (chains_needed_per_io > facts->MaxChainDepth) { + chains_needed_per_io = facts->MaxChainDepth; + ioc->shost->sg_tablesize = min_t(u16, + ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message + * chains_needed_per_io), ioc->shost->sg_tablesize); + } + ioc->chains_needed_per_io = chains_needed_per_io; + + /* reply free queue sizing - taking into account for 64 FW events */ + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + + /* calculate reply descriptor post queue depth */ + ioc->reply_post_queue_depth = ioc->hba_queue_depth + + ioc->reply_free_queue_depth + 1 ; + /* align the reply post queue on the next 16 count boundary */ + if (ioc->reply_post_queue_depth % 16) + ioc->reply_post_queue_depth += 16 - + (ioc->reply_post_queue_depth % 16); + + + if (ioc->reply_post_queue_depth > + facts->MaxReplyDescriptorPostQueueDepth) { + ioc->reply_post_queue_depth = + facts->MaxReplyDescriptorPostQueueDepth - + (facts->MaxReplyDescriptorPostQueueDepth % 16); + ioc->hba_queue_depth = + ((ioc->reply_post_queue_depth - 64) / 2) - 1; + ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; + } + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \ + "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), " + "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message, + ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize, + ioc->chains_needed_per_io)); + + /* reply post queue, 16 byte align */ + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + + sz = reply_post_free_sz; + if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable) + sz *= ioc->reply_queue_count; + + ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ? + (ioc->reply_queue_count):1, + sizeof(struct reply_post_struct), GFP_KERNEL); + + if (!ioc->reply_post) { + pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n", + ioc->name); + goto out; + } + ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", + ioc->pdev, sz, 16, 0); + if (!ioc->reply_post_free_dma_pool) { + pr_err(MPT3SAS_FMT + "reply_post_free pool: pci_pool_create failed\n", + ioc->name); + goto out; + } + i = 0; + do { + ioc->reply_post[i].reply_post_free = + pci_pool_alloc(ioc->reply_post_free_dma_pool, + GFP_KERNEL, + &ioc->reply_post[i].reply_post_free_dma); + if (!ioc->reply_post[i].reply_post_free) { + pr_err(MPT3SAS_FMT + "reply_post_free pool: pci_pool_alloc failed\n", + ioc->name); + goto out; + } + memset(ioc->reply_post[i].reply_post_free, 0, sz); + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "reply post free pool (0x%p): depth(%d)," + "element_size(%d), pool_size(%d kB)\n", ioc->name, + ioc->reply_post[i].reply_post_free, + ioc->reply_post_queue_depth, 8, sz/1024)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "reply_post_free_dma = (0x%llx)\n", ioc->name, + (unsigned long long) + ioc->reply_post[i].reply_post_free_dma)); + total_sz += sz; + } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count)); + + if (ioc->dma_mask == 64) { + if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) { + pr_warn(MPT3SAS_FMT + "no suitable consistent DMA mask for %s\n", + ioc->name, pci_name(ioc->pdev)); + goto out; + } + } + + ioc->scsiio_depth = ioc->hba_queue_depth - + ioc->hi_priority_depth - ioc->internal_depth; + + /* set the scsi host can_queue depth + * with some internal commands that could be outstanding + */ + ioc->shost->can_queue = ioc->scsiio_depth; + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "scsi host: can_queue depth (%d)\n", + ioc->name, ioc->shost->can_queue)); + + + /* contiguous pool for request and chains, 16 byte align, one extra " + * "frame for smid=0 + */ + ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth; + sz = ((ioc->scsiio_depth + 1) * ioc->request_sz); + + /* hi-priority queue */ + sz += (ioc->hi_priority_depth * ioc->request_sz); + + /* internal queue */ + sz += (ioc->internal_depth * ioc->request_sz); + + ioc->request_dma_sz = sz; + ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma); + if (!ioc->request) { + pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ + "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " + "total(%d kB)\n", ioc->name, ioc->hba_queue_depth, + ioc->chains_needed_per_io, ioc->request_sz, sz/1024); + if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH) + goto out; + retry_sz += 64; + ioc->hba_queue_depth = max_request_credit - retry_sz; + goto retry_allocation; + } + + if (retry_sz) + pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \ + "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), " + "total(%d kb)\n", ioc->name, ioc->hba_queue_depth, + ioc->chains_needed_per_io, ioc->request_sz, sz/1024); + + /* hi-priority queue */ + ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) * + ioc->request_sz); + ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) * + ioc->request_sz); + + /* internal queue */ + ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth * + ioc->request_sz); + ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * + ioc->request_sz); + + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz, + (ioc->hba_queue_depth * ioc->request_sz)/1024)); + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n", + ioc->name, (unsigned long long) ioc->request_dma)); + total_sz += sz; + + sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker); + ioc->scsi_lookup_pages = get_order(sz); + ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages( + GFP_KERNEL, ioc->scsi_lookup_pages); + if (!ioc->scsi_lookup) { + pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n", + ioc->name, (int)sz); + goto out; + } + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n", + ioc->name, ioc->request, ioc->scsiio_depth)); + + ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH); + sz = ioc->chain_depth * sizeof(struct chain_tracker); + ioc->chain_pages = get_order(sz); + ioc->chain_lookup = (struct chain_tracker *)__get_free_pages( + GFP_KERNEL, ioc->chain_pages); + if (!ioc->chain_lookup) { + pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n", + ioc->name); + goto out; + } + ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev, + ioc->request_sz, 16, 0); + if (!ioc->chain_dma_pool) { + pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n", + ioc->name); + goto out; + } + for (i = 0; i < ioc->chain_depth; i++) { + ioc->chain_lookup[i].chain_buffer = pci_pool_alloc( + ioc->chain_dma_pool , GFP_KERNEL, + &ioc->chain_lookup[i].chain_buffer_dma); + if (!ioc->chain_lookup[i].chain_buffer) { + ioc->chain_depth = i; + goto chain_done; + } + total_sz += ioc->request_sz; + } + chain_done: + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->name, ioc->chain_depth, ioc->request_sz, + ((ioc->chain_depth * ioc->request_sz))/1024)); + + /* initialize hi-priority queue smid's */ + ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth, + sizeof(struct request_tracker), GFP_KERNEL); + if (!ioc->hpr_lookup) { + pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n", + ioc->name); + goto out; + } + ioc->hi_priority_smid = ioc->scsiio_depth + 1; + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "hi_priority(0x%p): depth(%d), start smid(%d)\n", + ioc->name, ioc->hi_priority, + ioc->hi_priority_depth, ioc->hi_priority_smid)); + + /* initialize internal queue smid's */ + ioc->internal_lookup = kcalloc(ioc->internal_depth, + sizeof(struct request_tracker), GFP_KERNEL); + if (!ioc->internal_lookup) { + pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n", + ioc->name); + goto out; + } + ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth; + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "internal(0x%p): depth(%d), start smid(%d)\n", + ioc->name, ioc->internal, + ioc->internal_depth, ioc->internal_smid)); + + /* sense buffers, 4 byte align */ + sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE; + ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4, + 0); + if (!ioc->sense_dma_pool) { + pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n", + ioc->name); + goto out; + } + ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL, + &ioc->sense_dma); + if (!ioc->sense) { + pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n", + ioc->name); + goto out; + } + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "sense pool(0x%p): depth(%d), element_size(%d), pool_size" + "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth, + SCSI_SENSE_BUFFERSIZE, sz/1024)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n", + ioc->name, (unsigned long long)ioc->sense_dma)); + total_sz += sz; + + /* reply pool, 4 byte align */ + sz = ioc->reply_free_queue_depth * ioc->reply_sz; + ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4, + 0); + if (!ioc->reply_dma_pool) { + pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n", + ioc->name); + goto out; + } + ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL, + &ioc->reply_dma); + if (!ioc->reply) { + pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n", + ioc->name); + goto out; + } + ioc->reply_dma_min_address = (u32)(ioc->reply_dma); + ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz; + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->name, ioc->reply, + ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n", + ioc->name, (unsigned long long)ioc->reply_dma)); + total_sz += sz; + + /* reply free queue, 16 byte align */ + sz = ioc->reply_free_queue_depth * 4; + ioc->reply_free_dma_pool = pci_pool_create("reply_free pool", + ioc->pdev, sz, 16, 0); + if (!ioc->reply_free_dma_pool) { + pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n", + ioc->name); + goto out; + } + ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL, + &ioc->reply_free_dma); + if (!ioc->reply_free) { + pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n", + ioc->name); + goto out; + } + memset(ioc->reply_free, 0, sz); + dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \ + "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name, + ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "reply_free_dma (0x%llx)\n", + ioc->name, (unsigned long long)ioc->reply_free_dma)); + total_sz += sz; + + ioc->config_page_sz = 512; + ioc->config_page = pci_alloc_consistent(ioc->pdev, + ioc->config_page_sz, &ioc->config_page_dma); + if (!ioc->config_page) { + pr_err(MPT3SAS_FMT + "config page: pci_pool_alloc failed\n", + ioc->name); + goto out; + } + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "config page(0x%p): size(%d)\n", + ioc->name, ioc->config_page, ioc->config_page_sz)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n", + ioc->name, (unsigned long long)ioc->config_page_dma)); + total_sz += ioc->config_page_sz; + + pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n", + ioc->name, total_sz/1024); + pr_info(MPT3SAS_FMT + "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n", + ioc->name, ioc->shost->can_queue, facts->RequestCredit); + pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n", + ioc->name, ioc->shost->sg_tablesize); + return 0; + + out: + return -ENOMEM; +} + +/** + * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter. + * @ioc: Pointer to MPT_ADAPTER structure + * @cooked: Request raw or cooked IOC state + * + * Returns all IOC Doorbell register bits if cooked==0, else just the + * Doorbell bits in MPI_IOC_STATE_MASK. + */ +u32 +mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked) +{ + u32 s, sc; + + s = readl(&ioc->chip->Doorbell); + sc = s & MPI2_IOC_STATE_MASK; + return cooked ? sc : s; +} + +/** + * _base_wait_on_iocstate - waiting on a particular ioc state + * @ioc_state: controller state { READY, OPERATIONAL, or RESET } + * @timeout: timeout in second + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout, + int sleep_flag) +{ + u32 count, cntdn; + u32 current_state; + + count = 0; + cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; + do { + current_state = mpt3sas_base_get_iocstate(ioc, 1); + if (current_state == ioc_state) + return 0; + if (count && current_state == MPI2_IOC_STATE_FAULT) + break; + if (sleep_flag == CAN_SLEEP) + usleep_range(1000, 1500); + else + udelay(500); + count++; + } while (--cntdn); + + return current_state; +} + +/** + * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by + * a write to the doorbell) + * @ioc: per adapter object + * @timeout: timeout in second + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + * + * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. + */ +static int +_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag); + +static int +_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout, + int sleep_flag) +{ + u32 cntdn, count; + u32 int_status; + + count = 0; + cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; + do { + int_status = readl(&ioc->chip->HostInterruptStatus); + if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { + dhsprintk(ioc, pr_info(MPT3SAS_FMT + "%s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, timeout)); + return 0; + } + if (sleep_flag == CAN_SLEEP) + usleep_range(1000, 1500); + else + udelay(500); + count++; + } while (--cntdn); + + pr_err(MPT3SAS_FMT + "%s: failed due to timeout count(%d), int_status(%x)!\n", + ioc->name, __func__, count, int_status); + return -EFAULT; +} + +/** + * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell. + * @ioc: per adapter object + * @timeout: timeout in second + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + * + * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to + * doorbell. + */ +static int +_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout, + int sleep_flag) +{ + u32 cntdn, count; + u32 int_status; + u32 doorbell; + + count = 0; + cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; + do { + int_status = readl(&ioc->chip->HostInterruptStatus); + if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { + dhsprintk(ioc, pr_info(MPT3SAS_FMT + "%s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, timeout)); + return 0; + } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { + doorbell = readl(&ioc->chip->Doorbell); + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_FAULT) { + mpt3sas_base_fault_info(ioc , doorbell); + return -EFAULT; + } + } else if (int_status == 0xFFFFFFFF) + goto out; + + if (sleep_flag == CAN_SLEEP) + usleep_range(1000, 1500); + else + udelay(500); + count++; + } while (--cntdn); + + out: + pr_err(MPT3SAS_FMT + "%s: failed due to timeout count(%d), int_status(%x)!\n", + ioc->name, __func__, count, int_status); + return -EFAULT; +} + +/** + * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use + * @ioc: per adapter object + * @timeout: timeout in second + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + * + */ +static int +_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout, + int sleep_flag) +{ + u32 cntdn, count; + u32 doorbell_reg; + + count = 0; + cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; + do { + doorbell_reg = readl(&ioc->chip->Doorbell); + if (!(doorbell_reg & MPI2_DOORBELL_USED)) { + dhsprintk(ioc, pr_info(MPT3SAS_FMT + "%s: successful count(%d), timeout(%d)\n", + ioc->name, __func__, count, timeout)); + return 0; + } + if (sleep_flag == CAN_SLEEP) + usleep_range(1000, 1500); + else + udelay(500); + count++; + } while (--cntdn); + + pr_err(MPT3SAS_FMT + "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n", + ioc->name, __func__, count, doorbell_reg); + return -EFAULT; +} + +/** + * _base_send_ioc_reset - send doorbell reset + * @ioc: per adapter object + * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET + * @timeout: timeout in second + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout, + int sleep_flag) +{ + u32 ioc_state; + int r = 0; + + if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { + pr_err(MPT3SAS_FMT "%s: unknown reset_type\n", + ioc->name, __func__); + return -EFAULT; + } + + if (!(ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY)) + return -EFAULT; + + pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name); + + writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT, + &ioc->chip->Doorbell); + if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) { + r = -EFAULT; + goto out; + } + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, + timeout, sleep_flag); + if (ioc_state) { + pr_err(MPT3SAS_FMT + "%s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + r = -EFAULT; + goto out; + } + out: + pr_info(MPT3SAS_FMT "message unit reset: %s\n", + ioc->name, ((r == 0) ? "SUCCESS" : "FAILED")); + return r; +} + +/** + * _base_handshake_req_reply_wait - send request thru doorbell interface + * @ioc: per adapter object + * @request_bytes: request length + * @request: pointer having request payload + * @reply_bytes: reply length + * @reply: pointer to reply payload + * @timeout: timeout in second + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, + u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag) +{ + MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; + int i; + u8 failed; + u16 dummy; + __le32 *mfp; + + /* make sure doorbell is not in use */ + if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) { + pr_err(MPT3SAS_FMT + "doorbell is in use (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + + /* clear pending doorbell interrupts from previous state changes */ + if (readl(&ioc->chip->HostInterruptStatus) & + MPI2_HIS_IOC2SYS_DB_STATUS) + writel(0, &ioc->chip->HostInterruptStatus); + + /* send message to ioc */ + writel(((MPI2_FUNCTION_HANDSHAKE<chip->Doorbell); + + if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) { + pr_err(MPT3SAS_FMT + "doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + writel(0, &ioc->chip->HostInterruptStatus); + + if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) { + pr_err(MPT3SAS_FMT + "doorbell handshake ack failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + + /* send message 32-bits at a time */ + for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) { + writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell); + if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) + failed = 1; + } + + if (failed) { + pr_err(MPT3SAS_FMT + "doorbell handshake sending request failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + + /* now wait for the reply */ + if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) { + pr_err(MPT3SAS_FMT + "doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + + /* read the first two 16-bits, it gives the total length of the reply */ + reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) { + pr_err(MPT3SAS_FMT + "doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + + for (i = 2; i < default_reply->MsgLength * 2; i++) { + if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) { + pr_err(MPT3SAS_FMT + "doorbell handshake int failed (line=%d)\n", + ioc->name, __LINE__); + return -EFAULT; + } + if (i >= reply_bytes/2) /* overflow case */ + dummy = readl(&ioc->chip->Doorbell); + else + reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell) + & MPI2_DOORBELL_DATA_MASK); + writel(0, &ioc->chip->HostInterruptStatus); + } + + _base_wait_for_doorbell_int(ioc, 5, sleep_flag); + if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) { + dhsprintk(ioc, pr_info(MPT3SAS_FMT + "doorbell is in use (line=%d)\n", ioc->name, __LINE__)); + } + writel(0, &ioc->chip->HostInterruptStatus); + + if (ioc->logging_level & MPT_DEBUG_INIT) { + mfp = (__le32 *)reply; + pr_info("\toffset:data\n"); + for (i = 0; i < reply_bytes/4; i++) + pr_info("\t[0x%02x]:%08x\n", i*4, + le32_to_cpu(mfp[i])); + } + return 0; +} + +/** + * mpt3sas_base_sas_iounit_control - send sas iounit control to FW + * @ioc: per adapter object + * @mpi_reply: the reply payload from FW + * @mpi_request: the request payload sent to FW + * + * The SAS IO Unit Control Request message allows the host to perform low-level + * operations, such as resets on the PHYs of the IO Unit, also allows the host + * to obtain the IOC assigned device handles for a device if it has other + * identifying information about the device, in addition allows the host to + * remove IOC resources associated with the device. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, + Mpi2SasIoUnitControlReply_t *mpi_reply, + Mpi2SasIoUnitControlRequest_t *mpi_request) +{ + u16 smid; + u32 ioc_state; + unsigned long timeleft; + bool issue_reset = false; + int rc; + void *request; + u16 wait_state_count; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + mutex_lock(&ioc->base_cmds.mutex); + + if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->base_cmds.status = MPT3_CMD_PENDING; + request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)); + if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) + ioc->ioc_link_reset_in_progress = 1; + init_completion(&ioc->base_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET || + mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) && + ioc->ioc_link_reset_in_progress) + ioc->ioc_link_reset_in_progress = 0; + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SasIoUnitControlRequest_t)/4); + if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) + issue_reset = true; + goto issue_host_reset; + } + if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(Mpi2SasIoUnitControlReply_t)); + else + memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t)); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + goto out; + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + rc = -EFAULT; + out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +/** + * mpt3sas_base_scsi_enclosure_processor - sending request to sep device + * @ioc: per adapter object + * @mpi_reply: the reply payload from FW + * @mpi_request: the request payload sent to FW + * + * The SCSI Enclosure Processor request message causes the IOC to + * communicate with SES devices to control LED status signals. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, + Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request) +{ + u16 smid; + u32 ioc_state; + unsigned long timeleft; + bool issue_reset = false; + int rc; + void *request; + u16 wait_state_count; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + mutex_lock(&ioc->base_cmds.mutex); + + if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: base_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, + __func__, wait_state_count); + } + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->base_cmds.status = MPT3_CMD_PENDING; + request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); + init_completion(&ioc->base_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, + msecs_to_jiffies(10000)); + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SepRequest_t)/4); + if (!(ioc->base_cmds.status & MPT3_CMD_RESET)) + issue_reset = false; + goto issue_host_reset; + } + if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) + memcpy(mpi_reply, ioc->base_cmds.reply, + sizeof(Mpi2SepReply_t)); + else + memset(mpi_reply, 0, sizeof(Mpi2SepReply_t)); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + goto out; + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + rc = -EFAULT; + out: + mutex_unlock(&ioc->base_cmds.mutex); + return rc; +} + +/** + * _base_get_port_facts - obtain port facts reply and save in ioc + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag) +{ + Mpi2PortFactsRequest_t mpi_request; + Mpi2PortFactsReply_t mpi_reply; + struct mpt3sas_port_facts *pfacts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + mpi_reply_sz = sizeof(Mpi2PortFactsReply_t); + mpi_request_sz = sizeof(Mpi2PortFactsRequest_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = MPI2_FUNCTION_PORT_FACTS; + mpi_request.PortNumber = port; + r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP); + + if (r != 0) { + pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + + pfacts = &ioc->pfacts[port]; + memset(pfacts, 0, sizeof(struct mpt3sas_port_facts)); + pfacts->PortNumber = mpi_reply.PortNumber; + pfacts->VP_ID = mpi_reply.VP_ID; + pfacts->VF_ID = mpi_reply.VF_ID; + pfacts->MaxPostedCmdBuffers = + le16_to_cpu(mpi_reply.MaxPostedCmdBuffers); + + return 0; +} + +/** + * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL + * @ioc: per adapter object + * @timeout: + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout, + int sleep_flag) +{ + u32 ioc_state; + int rc; + + dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + if (ioc->pci_error_recovery) { + dfailprintk(ioc, printk(MPT3SAS_FMT + "%s: host in pci error recovery\n", ioc->name, __func__)); + return -EFAULT; + } + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + + if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) || + (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) + return 0; + + if (ioc_state & MPI2_DOORBELL_USED) { + dhsprintk(ioc, printk(MPT3SAS_FMT + "unexpected doorbell active!\n", ioc->name)); + goto issue_diag_reset; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_base_fault_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } + + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, + timeout, sleep_flag); + if (ioc_state) { + dfailprintk(ioc, printk(MPT3SAS_FMT + "%s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state)); + return -EFAULT; + } + + issue_diag_reset: + rc = _base_diag_reset(ioc, sleep_flag); + return rc; +} + +/** + * _base_get_ioc_facts - obtain ioc facts reply and save in ioc + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + Mpi2IOCFactsRequest_t mpi_request; + Mpi2IOCFactsReply_t mpi_reply; + struct mpt3sas_facts *facts; + int mpi_reply_sz, mpi_request_sz, r; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + r = _base_wait_for_iocstate(ioc, 10, sleep_flag); + if (r) { + dfailprintk(ioc, printk(MPT3SAS_FMT + "%s: failed getting to correct state\n", + ioc->name, __func__)); + return r; + } + mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t); + mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t); + memset(&mpi_request, 0, mpi_request_sz); + mpi_request.Function = MPI2_FUNCTION_IOC_FACTS; + r = _base_handshake_req_reply_wait(ioc, mpi_request_sz, + (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP); + + if (r != 0) { + pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + return r; + } + + facts = &ioc->facts; + memset(facts, 0, sizeof(struct mpt3sas_facts)); + facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion); + facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion); + facts->VP_ID = mpi_reply.VP_ID; + facts->VF_ID = mpi_reply.VF_ID; + facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions); + facts->MaxChainDepth = mpi_reply.MaxChainDepth; + facts->WhoInit = mpi_reply.WhoInit; + facts->NumberOfPorts = mpi_reply.NumberOfPorts; + facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; + facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); + facts->MaxReplyDescriptorPostQueueDepth = + le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); + facts->ProductID = le16_to_cpu(mpi_reply.ProductID); + facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities); + if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID)) + ioc->ir_firmware = 1; + if ((facts->IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE)) + ioc->rdpq_array_capable = 1; + facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word); + facts->IOCRequestFrameSize = + le16_to_cpu(mpi_reply.IOCRequestFrameSize); + facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators); + facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets); + ioc->shost->max_id = -1; + facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders); + facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures); + facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags); + facts->HighPriorityCredit = + le16_to_cpu(mpi_reply.HighPriorityCredit); + facts->ReplyFrameSize = mpi_reply.ReplyFrameSize; + facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle); + + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "hba queue depth(%d), max chains per io(%d)\n", + ioc->name, facts->RequestCredit, + facts->MaxChainDepth)); + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "request frame size(%d), reply frame size(%d)\n", ioc->name, + facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4)); + return 0; +} + +/** + * _base_send_ioc_init - send ioc_init to firmware + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + Mpi2IOCInitRequest_t mpi_request; + Mpi2IOCInitReply_t mpi_reply; + int i, r = 0; + struct timeval current_time; + u16 ioc_status; + u32 reply_post_free_array_sz = 0; + Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL; + dma_addr_t reply_post_free_array_dma; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t)); + mpi_request.Function = MPI2_FUNCTION_IOC_INIT; + mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER; + mpi_request.VF_ID = 0; /* TODO */ + mpi_request.VP_ID = 0; + mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged); + mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); + + if (_base_is_controller_msix_enabled(ioc)) + mpi_request.HostMSIxVectors = ioc->reply_queue_count; + mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); + mpi_request.ReplyDescriptorPostQueueDepth = + cpu_to_le16(ioc->reply_post_queue_depth); + mpi_request.ReplyFreeQueueDepth = + cpu_to_le16(ioc->reply_free_queue_depth); + + mpi_request.SenseBufferAddressHigh = + cpu_to_le32((u64)ioc->sense_dma >> 32); + mpi_request.SystemReplyAddressHigh = + cpu_to_le32((u64)ioc->reply_dma >> 32); + mpi_request.SystemRequestFrameBaseAddress = + cpu_to_le64((u64)ioc->request_dma); + mpi_request.ReplyFreeQueueAddress = + cpu_to_le64((u64)ioc->reply_free_dma); + + if (ioc->rdpq_array_enable) { + reply_post_free_array_sz = ioc->reply_queue_count * + sizeof(Mpi2IOCInitRDPQArrayEntry); + reply_post_free_array = pci_alloc_consistent(ioc->pdev, + reply_post_free_array_sz, &reply_post_free_array_dma); + if (!reply_post_free_array) { + pr_err(MPT3SAS_FMT + "reply_post_free_array: pci_alloc_consistent failed\n", + ioc->name); + r = -ENOMEM; + goto out; + } + memset(reply_post_free_array, 0, reply_post_free_array_sz); + for (i = 0; i < ioc->reply_queue_count; i++) + reply_post_free_array[i].RDPQBaseAddress = + cpu_to_le64( + (u64)ioc->reply_post[i].reply_post_free_dma); + mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE; + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64)reply_post_free_array_dma); + } else { + mpi_request.ReplyDescriptorPostQueueAddress = + cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); + } + + /* This time stamp specifies number of milliseconds + * since epoch ~ midnight January 1, 1970. + */ + do_gettimeofday(¤t_time); + mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 + + (current_time.tv_usec / 1000)); + + if (ioc->logging_level & MPT_DEBUG_INIT) { + __le32 *mfp; + int i; + + mfp = (__le32 *)&mpi_request; + pr_info("\toffset:data\n"); + for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) + pr_info("\t[0x%02x]:%08x\n", i*4, + le32_to_cpu(mfp[i])); + } + + r = _base_handshake_req_reply_wait(ioc, + sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request, + sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10, + sleep_flag); + + if (r != 0) { + pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n", + ioc->name, __func__, r); + goto out; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS || + mpi_reply.IOCLogInfo) { + pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__); + r = -EIO; + } + +out: + if (reply_post_free_array) + pci_free_consistent(ioc->pdev, reply_post_free_array_sz, + reply_post_free_array, + reply_post_free_array_dma); + return r; +} + +/** + * mpt3sas_port_enable_done - command completion routine for port enable + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + u16 ioc_status; + + if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED) + return 1; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (!mpi_reply) + return 1; + + if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE) + return 1; + + ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING; + ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE; + ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID; + memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + ioc->port_enable_failed = 1; + + if (ioc->is_driver_loading) { + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + mpt3sas_port_enable_complete(ioc); + return 1; + } else { + ioc->start_scan_failed = ioc_status; + ioc->start_scan = 0; + return 1; + } + } + complete(&ioc->port_enable_cmds.done); + return 1; +} + +/** + * _base_send_port_enable - send port_enable(discovery stuff) to firmware + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + Mpi2PortEnableRequest_t *mpi_request; + Mpi2PortEnableReply_t *mpi_reply; + unsigned long timeleft; + int r = 0; + u16 smid; + u16 ioc_status; + + pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); + + if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { + pr_err(MPT3SAS_FMT "%s: internal command already in use\n", + ioc->name, __func__); + return -EAGAIN; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + + ioc->port_enable_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); + mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; + + init_completion(&ioc->port_enable_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done, + 300*HZ); + if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2PortEnableRequest_t)/4); + if (ioc->port_enable_cmds.status & MPT3_CMD_RESET) + r = -EFAULT; + else + r = -ETIME; + goto out; + } + + mpi_reply = ioc->port_enable_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n", + ioc->name, __func__, ioc_status); + r = -EFAULT; + goto out; + } + + out: + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ? + "SUCCESS" : "FAILED")); + return r; +} + +/** + * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply) + * @ioc: per adapter object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2PortEnableRequest_t *mpi_request; + u16 smid; + + pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name); + + if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { + pr_err(MPT3SAS_FMT "%s: internal command already in use\n", + ioc->name, __func__); + return -EAGAIN; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + + ioc->port_enable_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->port_enable_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t)); + mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE; + + mpt3sas_base_put_smid_default(ioc, smid); + return 0; +} + +/** + * _base_determine_wait_on_discovery - desposition + * @ioc: per adapter object + * + * Decide whether to wait on discovery to complete. Used to either + * locate boot device, or report volumes ahead of physical devices. + * + * Returns 1 for wait, 0 for don't wait + */ +static int +_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc) +{ + /* We wait for discovery to complete if IR firmware is loaded. + * The sas topology events arrive before PD events, so we need time to + * turn on the bit in ioc->pd_handles to indicate PD + * Also, it maybe required to report Volumes ahead of physical + * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set. + */ + if (ioc->ir_firmware) + return 1; + + /* if no Bios, then we don't need to wait */ + if (!ioc->bios_pg3.BiosVersion) + return 0; + + /* Bios is present, then we drop down here. + * + * If there any entries in the Bios Page 2, then we wait + * for discovery to complete. + */ + + /* Current Boot Device */ + if ((ioc->bios_pg2.CurrentBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + /* Request Boot Device */ + (ioc->bios_pg2.ReqBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED && + /* Alternate Request Boot Device */ + (ioc->bios_pg2.ReqAltBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK) == + MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED) + return 0; + + return 1; +} + +/** + * _base_unmask_events - turn on notification for this event + * @ioc: per adapter object + * @event: firmware event + * + * The mask is stored in ioc->event_masks. + */ +static void +_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event) +{ + u32 desired_event; + + if (event >= 128) + return; + + desired_event = (1 << (event % 32)); + + if (event < 32) + ioc->event_masks[0] &= ~desired_event; + else if (event < 64) + ioc->event_masks[1] &= ~desired_event; + else if (event < 96) + ioc->event_masks[2] &= ~desired_event; + else if (event < 128) + ioc->event_masks[3] &= ~desired_event; +} + +/** + * _base_event_notification - send event notification + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + Mpi2EventNotificationRequest_t *mpi_request; + unsigned long timeleft; + u16 smid; + int r = 0; + int i; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + if (ioc->base_cmds.status & MPT3_CMD_PENDING) { + pr_err(MPT3SAS_FMT "%s: internal command already in use\n", + ioc->name, __func__); + return -EAGAIN; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + return -EAGAIN; + } + ioc->base_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->base_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t)); + mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + mpi_request->EventMasks[i] = + cpu_to_le32(ioc->event_masks[i]); + init_completion(&ioc->base_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ); + if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2EventNotificationRequest_t)/4); + if (ioc->base_cmds.status & MPT3_CMD_RESET) + r = -EFAULT; + else + r = -ETIME; + } else + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n", + ioc->name, __func__)); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + return r; +} + +/** + * mpt3sas_base_validate_event_type - validating event types + * @ioc: per adapter object + * @event: firmware event + * + * This will turn on firmware event notification when application + * ask for that event. We don't mask events that are already enabled. + */ +void +mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type) +{ + int i, j; + u32 event_mask, desired_event; + u8 send_update_to_fw; + + for (i = 0, send_update_to_fw = 0; i < + MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) { + event_mask = ~event_type[i]; + desired_event = 1; + for (j = 0; j < 32; j++) { + if (!(event_mask & desired_event) && + (ioc->event_masks[i] & desired_event)) { + ioc->event_masks[i] &= ~desired_event; + send_update_to_fw = 1; + } + desired_event = (desired_event << 1); + } + } + + if (!send_update_to_fw) + return; + + mutex_lock(&ioc->base_cmds.mutex); + _base_event_notification(ioc, CAN_SLEEP); + mutex_unlock(&ioc->base_cmds.mutex); +} + +/** + * _base_diag_reset - the "big hammer" start of day reset + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + u32 host_diagnostic; + u32 ioc_state; + u32 count; + u32 hcb_size; + + pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name); + + drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n", + ioc->name)); + + count = 0; + do { + /* Write magic sequence to WriteSequence register + * Loop until in diagnostic mode + */ + drsprintk(ioc, pr_info(MPT3SAS_FMT + "write magic sequence\n", ioc->name)); + writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence); + writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence); + + /* wait 100 msec */ + if (sleep_flag == CAN_SLEEP) + msleep(100); + else + mdelay(100); + + if (count++ > 20) + goto out; + + host_diagnostic = readl(&ioc->chip->HostDiagnostic); + drsprintk(ioc, pr_info(MPT3SAS_FMT + "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n", + ioc->name, count, host_diagnostic)); + + } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0); + + hcb_size = readl(&ioc->chip->HCBSize); + + drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n", + ioc->name)); + writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER, + &ioc->chip->HostDiagnostic); + + /*This delay allows the chip PCIe hardware time to finish reset tasks*/ + if (sleep_flag == CAN_SLEEP) + msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); + else + mdelay(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000); + + /* Approximately 300 second max wait */ + for (count = 0; count < (300000000 / + MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) { + + host_diagnostic = readl(&ioc->chip->HostDiagnostic); + + if (host_diagnostic == 0xFFFFFFFF) + goto out; + if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) + break; + + /* Wait to pass the second read delay window */ + if (sleep_flag == CAN_SLEEP) + msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC + / 1000); + else + mdelay(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC + / 1000); + } + + if (host_diagnostic & MPI2_DIAG_HCB_MODE) { + + drsprintk(ioc, pr_info(MPT3SAS_FMT + "restart the adapter assuming the HCB Address points to good F/W\n", + ioc->name)); + host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK; + host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW; + writel(host_diagnostic, &ioc->chip->HostDiagnostic); + + drsprintk(ioc, pr_info(MPT3SAS_FMT + "re-enable the HCDW\n", ioc->name)); + writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE, + &ioc->chip->HCBSize); + } + + drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n", + ioc->name)); + writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET, + &ioc->chip->HostDiagnostic); + + drsprintk(ioc, pr_info(MPT3SAS_FMT + "disable writes to the diagnostic register\n", ioc->name)); + writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence); + + drsprintk(ioc, pr_info(MPT3SAS_FMT + "Wait for FW to go to the READY state\n", ioc->name)); + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20, + sleep_flag); + if (ioc_state) { + pr_err(MPT3SAS_FMT + "%s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + goto out; + } + + pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name); + return 0; + + out: + pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name); + return -EFAULT; +} + +/** + * _base_make_ioc_ready - put controller in READY state + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * @type: FORCE_BIG_HAMMER or SOFT_RESET + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, + enum reset_type type) +{ + u32 ioc_state; + int rc; + int count; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + if (ioc->pci_error_recovery) + return 0; + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n", + ioc->name, __func__, ioc_state)); + + /* if in RESET state, it should move to READY state shortly */ + count = 0; + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) { + while ((ioc_state & MPI2_IOC_STATE_MASK) != + MPI2_IOC_STATE_READY) { + if (count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed going to ready state (ioc_state=0x%x)\n", + ioc->name, __func__, ioc_state); + return -EFAULT; + } + if (sleep_flag == CAN_SLEEP) + ssleep(1); + else + mdelay(1000); + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + } + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) + return 0; + + if (ioc_state & MPI2_DOORBELL_USED) { + dhsprintk(ioc, pr_info(MPT3SAS_FMT + "unexpected doorbell active!\n", + ioc->name)); + goto issue_diag_reset; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_base_fault_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + goto issue_diag_reset; + } + + if (type == FORCE_BIG_HAMMER) + goto issue_diag_reset; + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL) + if (!(_base_send_ioc_reset(ioc, + MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) { + return 0; + } + + issue_diag_reset: + rc = _base_diag_reset(ioc, CAN_SLEEP); + return rc; +} + +/** + * _base_make_ioc_operational - put controller in OPERATIONAL state + * @ioc: per adapter object + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * Returns 0 for success, non-zero for failure. + */ +static int +_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + int r, i, index; + unsigned long flags; + u32 reply_address; + u16 smid; + struct _tr_list *delayed_tr, *delayed_tr_next; + u8 hide_flag; + struct adapter_reply_queue *reply_q; + Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + /* clean the delayed target reset list */ + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + + + list_for_each_entry_safe(delayed_tr, delayed_tr_next, + &ioc->delayed_tr_volume_list, list) { + list_del(&delayed_tr->list); + kfree(delayed_tr); + } + + /* initialize the scsi lookup free list */ + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + INIT_LIST_HEAD(&ioc->free_list); + smid = 1; + for (i = 0; i < ioc->scsiio_depth; i++, smid++) { + INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list); + ioc->scsi_lookup[i].cb_idx = 0xFF; + ioc->scsi_lookup[i].smid = smid; + ioc->scsi_lookup[i].scmd = NULL; + ioc->scsi_lookup[i].direct_io = 0; + list_add_tail(&ioc->scsi_lookup[i].tracker_list, + &ioc->free_list); + } + + /* hi-priority queue */ + INIT_LIST_HEAD(&ioc->hpr_free_list); + smid = ioc->hi_priority_smid; + for (i = 0; i < ioc->hi_priority_depth; i++, smid++) { + ioc->hpr_lookup[i].cb_idx = 0xFF; + ioc->hpr_lookup[i].smid = smid; + list_add_tail(&ioc->hpr_lookup[i].tracker_list, + &ioc->hpr_free_list); + } + + /* internal queue */ + INIT_LIST_HEAD(&ioc->internal_free_list); + smid = ioc->internal_smid; + for (i = 0; i < ioc->internal_depth; i++, smid++) { + ioc->internal_lookup[i].cb_idx = 0xFF; + ioc->internal_lookup[i].smid = smid; + list_add_tail(&ioc->internal_lookup[i].tracker_list, + &ioc->internal_free_list); + } + + /* chain pool */ + INIT_LIST_HEAD(&ioc->free_chain_list); + for (i = 0; i < ioc->chain_depth; i++) + list_add_tail(&ioc->chain_lookup[i].tracker_list, + &ioc->free_chain_list); + + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + /* initialize Reply Free Queue */ + for (i = 0, reply_address = (u32)ioc->reply_dma ; + i < ioc->reply_free_queue_depth ; i++, reply_address += + ioc->reply_sz) + ioc->reply_free[i] = cpu_to_le32(reply_address); + + /* initialize reply queues */ + if (ioc->is_driver_loading) + _base_assign_reply_queues(ioc); + + /* initialize Reply Post Free Queue */ + index = 0; + reply_post_free_contig = ioc->reply_post[0].reply_post_free; + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + /* + * If RDPQ is enabled, switch to the next allocation. + * Otherwise advance within the contiguous region. + */ + if (ioc->rdpq_array_enable) { + reply_q->reply_post_free = + ioc->reply_post[index++].reply_post_free; + } else { + reply_q->reply_post_free = reply_post_free_contig; + reply_post_free_contig += ioc->reply_post_queue_depth; + } + + reply_q->reply_post_host_index = 0; + for (i = 0; i < ioc->reply_post_queue_depth; i++) + reply_q->reply_post_free[i].Words = + cpu_to_le64(ULLONG_MAX); + if (!_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_free_queue; + } + skip_init_reply_post_free_queue: + + r = _base_send_ioc_init(ioc, sleep_flag); + if (r) + return r; + + /* initialize reply free host index */ + ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; + writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); + + /* initialize reply post host index */ + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->msix96_vector) + writel((reply_q->msix_index & 7)<< + MPI2_RPHI_MSIX_INDEX_SHIFT, + ioc->replyPostRegisterIndex[reply_q->msix_index/8]); + else + writel(reply_q->msix_index << + MPI2_RPHI_MSIX_INDEX_SHIFT, + &ioc->chip->ReplyPostHostIndex); + + if (!_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_host_index; + } + + skip_init_reply_post_host_index: + + _base_unmask_interrupts(ioc); + r = _base_event_notification(ioc, sleep_flag); + if (r) + return r; + + if (sleep_flag == CAN_SLEEP) + _base_static_config_pages(ioc); + + + if (ioc->is_driver_loading) { + + if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier + == 0x80) { + hide_flag = (u8) ( + le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) & + MFG_PAGE10_HIDE_SSDS_MASK); + if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK) + ioc->mfg_pg10_hide_flag = hide_flag; + } + + ioc->wait_for_discovery_to_complete = + _base_determine_wait_on_discovery(ioc); + + return r; /* scan_start and scan_finished support */ + } + + r = _base_send_port_enable(ioc, sleep_flag); + if (r) + return r; + + return r; +} + +/** + * mpt3sas_base_free_resources - free resources controller resources + * @ioc: per adapter object + * + * Return nothing. + */ +void +mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) +{ + dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + /* synchronizing freeing resource with pci_access_mutex lock */ + mutex_lock(&ioc->pci_access_mutex); + if (ioc->chip_phys && ioc->chip) { + _base_mask_interrupts(ioc); + ioc->shost_recovery = 1; + _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); + ioc->shost_recovery = 0; + } + + mpt3sas_base_unmap_resources(ioc); + mutex_unlock(&ioc->pci_access_mutex); + return; +} + +/** + * mpt3sas_base_attach - attach controller instance + * @ioc: per adapter object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) +{ + int r, i; + int cpu_id, last_cpu_id = 0; + + dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + /* setup cpu_msix_table */ + ioc->cpu_count = num_online_cpus(); + for_each_online_cpu(cpu_id) + last_cpu_id = cpu_id; + ioc->cpu_msix_table_sz = last_cpu_id + 1; + ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); + ioc->reply_queue_count = 1; + if (!ioc->cpu_msix_table) { + dfailprintk(ioc, pr_info(MPT3SAS_FMT + "allocation for cpu_msix_table failed!!!\n", + ioc->name)); + r = -ENOMEM; + goto out_free_resources; + } + + if (ioc->is_warpdrive) { + ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, + sizeof(resource_size_t *), GFP_KERNEL); + if (!ioc->reply_post_host_index) { + dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation " + "for cpu_msix_table failed!!!\n", ioc->name)); + r = -ENOMEM; + goto out_free_resources; + } + } + + ioc->rdpq_array_enable_assigned = 0; + ioc->dma_mask = 0; + r = mpt3sas_base_map_resources(ioc); + if (r) + goto out_free_resources; + + pci_set_drvdata(ioc->pdev, ioc->shost); + r = _base_get_ioc_facts(ioc, CAN_SLEEP); + if (r) + goto out_free_resources; + + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: + ioc->build_sg_scmd = &_base_build_sg_scmd; + ioc->build_sg = &_base_build_sg; + ioc->build_zero_len_sge = &_base_build_zero_len_sge; + break; + case MPI25_VERSION: + /* + * In SAS3.0, + * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and + * Target Status - all require the IEEE formated scatter gather + * elements. + */ + ioc->build_sg_scmd = &_base_build_sg_scmd_ieee; + ioc->build_sg = &_base_build_sg_ieee; + ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee; + ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t); + break; + } + + /* + * These function pointers for other requests that don't + * the require IEEE scatter gather elements. + * + * For example Configuration Pages and SAS IOUNIT Control don't. + */ + ioc->build_sg_mpi = &_base_build_sg; + ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge; + + r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); + if (r) + goto out_free_resources; + + ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts, + sizeof(struct mpt3sas_port_facts), GFP_KERNEL); + if (!ioc->pfacts) { + r = -ENOMEM; + goto out_free_resources; + } + + for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { + r = _base_get_port_facts(ioc, i, CAN_SLEEP); + if (r) + goto out_free_resources; + } + + r = _base_allocate_memory_pools(ioc, CAN_SLEEP); + if (r) + goto out_free_resources; + + init_waitqueue_head(&ioc->reset_wq); + + /* allocate memory pd handle bitmask list */ + ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + ioc->pd_handles_sz++; + ioc->pd_handles = kzalloc(ioc->pd_handles_sz, + GFP_KERNEL); + if (!ioc->pd_handles) { + r = -ENOMEM; + goto out_free_resources; + } + ioc->blocking_handles = kzalloc(ioc->pd_handles_sz, + GFP_KERNEL); + if (!ioc->blocking_handles) { + r = -ENOMEM; + goto out_free_resources; + } + + ioc->fwfault_debug = mpt3sas_fwfault_debug; + + /* base internal command bits */ + mutex_init(&ioc->base_cmds.mutex); + ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + + /* port_enable command bits */ + ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED; + + /* transport internal command bits */ + ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->transport_cmds.mutex); + + /* scsih internal command bits */ + ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->scsih_cmds.mutex); + + /* task management internal command bits */ + ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->tm_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->tm_cmds.mutex); + + /* config page internal command bits */ + ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->config_cmds.mutex); + + /* ctl module internal command bits */ + ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); + ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + mutex_init(&ioc->ctl_cmds.mutex); + + if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply || + !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply || + !ioc->config_cmds.reply || !ioc->ctl_cmds.reply || + !ioc->ctl_cmds.sense) { + r = -ENOMEM; + goto out_free_resources; + } + + for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) + ioc->event_masks[i] = -1; + + /* here we enable the events we care about */ + _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY); + _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE); + _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST); + _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); + _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE); + _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST); + _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME); + _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK); + _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS); + _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED); + _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD); + + r = _base_make_ioc_operational(ioc, CAN_SLEEP); + if (r) + goto out_free_resources; + + ioc->non_operational_loop = 0; + return 0; + + out_free_resources: + + ioc->remove_host = 1; + + mpt3sas_base_free_resources(ioc); + _base_release_memory_pools(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + if (ioc->is_warpdrive) + kfree(ioc->reply_post_host_index); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->pfacts); + ioc->ctl_cmds.reply = NULL; + ioc->base_cmds.reply = NULL; + ioc->tm_cmds.reply = NULL; + ioc->scsih_cmds.reply = NULL; + ioc->transport_cmds.reply = NULL; + ioc->config_cmds.reply = NULL; + ioc->pfacts = NULL; + return r; +} + + +/** + * mpt3sas_base_detach - remove controller instance + * @ioc: per adapter object + * + * Return nothing. + */ +void +mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc) +{ + dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_free_resources(ioc); + _base_release_memory_pools(ioc); + pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + if (ioc->is_warpdrive) + kfree(ioc->reply_post_host_index); + kfree(ioc->pd_handles); + kfree(ioc->blocking_handles); + kfree(ioc->pfacts); + kfree(ioc->ctl_cmds.reply); + kfree(ioc->ctl_cmds.sense); + kfree(ioc->base_cmds.reply); + kfree(ioc->port_enable_cmds.reply); + kfree(ioc->tm_cmds.reply); + kfree(ioc->transport_cmds.reply); + kfree(ioc->scsih_cmds.reply); + kfree(ioc->config_cmds.reply); +} + +/** + * _base_reset_handler - reset callback handler (for base) + * @ioc: per adapter object + * @reset_phase: phase + * + * The handler for doing any required cleanup or initialization. + * + * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, + * MPT3_IOC_DONE_RESET + * + * Return nothing. + */ +static void +_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) +{ + mpt3sas_scsih_reset_handler(ioc, reset_phase); + mpt3sas_ctl_reset_handler(ioc, reset_phase); + switch (reset_phase) { + case MPT3_IOC_PRE_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); + break; + case MPT3_IOC_AFTER_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); + if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { + ioc->transport_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); + complete(&ioc->transport_cmds.done); + } + if (ioc->base_cmds.status & MPT3_CMD_PENDING) { + ioc->base_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid); + complete(&ioc->base_cmds.done); + } + if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) { + ioc->port_enable_failed = 1; + ioc->port_enable_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid); + if (ioc->is_driver_loading) { + ioc->start_scan_failed = + MPI2_IOCSTATUS_INTERNAL_ERROR; + ioc->start_scan = 0; + ioc->port_enable_cmds.status = + MPT3_CMD_NOT_USED; + } else + complete(&ioc->port_enable_cmds.done); + } + if (ioc->config_cmds.status & MPT3_CMD_PENDING) { + ioc->config_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid); + ioc->config_cmds.smid = USHRT_MAX; + complete(&ioc->config_cmds.done); + } + break; + case MPT3_IOC_DONE_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); + break; + } +} + +/** + * _wait_for_commands_to_complete - reset controller + * @ioc: Pointer to MPT_ADAPTER structure + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * + * This function waiting(3s) for all pending commands to complete + * prior to putting controller in reset. + */ +static void +_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag) +{ + u32 ioc_state; + unsigned long flags; + u16 i; + + ioc->pending_io_count = 0; + if (sleep_flag != CAN_SLEEP) + return; + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) + return; + + /* pending command count */ + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + for (i = 0; i < ioc->scsiio_depth; i++) + if (ioc->scsi_lookup[i].cb_idx != 0xFF) + ioc->pending_io_count++; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + if (!ioc->pending_io_count) + return; + + /* wait for pending commands to complete */ + wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ); +} + +/** + * mpt3sas_base_hard_reset_handler - reset controller + * @ioc: Pointer to MPT_ADAPTER structure + * @sleep_flag: CAN_SLEEP or NO_SLEEP + * @type: FORCE_BIG_HAMMER or SOFT_RESET + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, + enum reset_type type) +{ + int r; + unsigned long flags; + u32 ioc_state; + u8 is_fault = 0, is_trigger = 0; + + dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, + __func__)); + + if (ioc->pci_error_recovery) { + pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n", + ioc->name, __func__); + r = 0; + goto out_unlocked; + } + + if (mpt3sas_fwfault_debug) + mpt3sas_halt_firmware(ioc); + + /* TODO - What we really should be doing is pulling + * out all the code associated with NO_SLEEP; its never used. + * That is legacy code from mpt fusion driver, ported over. + * I will leave this BUG_ON here for now till its been resolved. + */ + BUG_ON(sleep_flag == NO_SLEEP); + + /* wait for an active reset in progress to complete */ + if (!mutex_trylock(&ioc->reset_in_progress_mutex)) { + do { + ssleep(1); + } while (ioc->shost_recovery == 1); + dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); + return ioc->ioc_reset_in_progress_status; + } + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) && + (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED))) { + is_trigger = 1; + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) + is_fault = 1; + } + _base_reset_handler(ioc, MPT3_IOC_PRE_RESET); + _wait_for_commands_to_complete(ioc, sleep_flag); + _base_mask_interrupts(ioc); + r = _base_make_ioc_ready(ioc, sleep_flag, type); + if (r) + goto out; + _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET); + + /* If this hard reset is called while port enable is active, then + * there is no reason to call make_ioc_operational + */ + if (ioc->is_driver_loading && ioc->port_enable_failed) { + ioc->remove_host = 1; + r = -EFAULT; + goto out; + } + r = _base_get_ioc_facts(ioc, CAN_SLEEP); + if (r) + goto out; + + if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) + panic("%s: Issue occurred with flashing controller firmware." + "Please reboot the system and ensure that the correct" + " firmware version is running\n", ioc->name); + + r = _base_make_ioc_operational(ioc, sleep_flag); + if (!r) + _base_reset_handler(ioc, MPT3_IOC_DONE_RESET); + + out: + dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n", + ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED"))); + + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + ioc->ioc_reset_in_progress_status = r; + ioc->shost_recovery = 0; + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + ioc->ioc_reset_count++; + mutex_unlock(&ioc->reset_in_progress_mutex); + + out_unlocked: + if ((r == 0) && is_trigger) { + if (is_fault) + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT); + else + mpt3sas_trigger_master(ioc, + MASTER_TRIGGER_ADAPTER_RESET); + } + dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); + return r; +} diff --git a/addons/mpt3sas/src/4.4.180/mpt3sas_base.h b/addons/mpt3sas/src/4.4.180/mpt3sas_base.h new file mode 100644 index 00000000..63f5965a --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpt3sas_base.h @@ -0,0 +1,1437 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * for access to MPT (Message Passing Technology) firmware. + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef MPT3SAS_BASE_H_INCLUDED +#define MPT3SAS_BASE_H_INCLUDED + +#include "mpi/mpi2_type.h" +#include "mpi/mpi2.h" +#include "mpi/mpi2_ioc.h" +#include "mpi/mpi2_cnfg.h" +#include "mpi/mpi2_init.h" +#include "mpi/mpi2_raid.h" +#include "mpi/mpi2_tool.h" +#include "mpi/mpi2_sas.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mpt3sas_debug.h" +#include "mpt3sas_trigger_diag.h" + +/* driver versioning info */ +#define MPT3SAS_DRIVER_NAME "mpt3sas" +#define MPT3SAS_AUTHOR "Avago Technologies " +#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" +#define MPT3SAS_DRIVER_VERSION "09.102.00.00" +#define MPT3SAS_MAJOR_VERSION 9 +#define MPT3SAS_MINOR_VERSION 102 +#define MPT3SAS_BUILD_VERSION 0 +#define MPT3SAS_RELEASE_VERSION 00 + +#define MPT2SAS_DRIVER_NAME "mpt2sas" +#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" +#define MPT2SAS_DRIVER_VERSION "20.102.00.00" +#define MPT2SAS_MAJOR_VERSION 20 +#define MPT2SAS_MINOR_VERSION 102 +#define MPT2SAS_BUILD_VERSION 0 +#define MPT2SAS_RELEASE_VERSION 00 + +/* + * Set MPT3SAS_SG_DEPTH value based on user input. + */ +#define MPT_MAX_PHYS_SEGMENTS SCSI_MAX_SG_SEGMENTS +#define MPT_MIN_PHYS_SEGMENTS 16 + +#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE +#define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE +#else +#define MPT3SAS_SG_DEPTH MPT_MAX_PHYS_SEGMENTS +#endif + +#ifdef CONFIG_SCSI_MPT2SAS_MAX_SGE +#define MPT2SAS_SG_DEPTH CONFIG_SCSI_MPT2SAS_MAX_SGE +#else +#define MPT2SAS_SG_DEPTH MPT_MAX_PHYS_SEGMENTS +#endif + +/* + * Generic Defines + */ +#define MPT3SAS_SATA_QUEUE_DEPTH 32 +#define MPT3SAS_SAS_QUEUE_DEPTH 254 +#define MPT3SAS_RAID_QUEUE_DEPTH 128 + +#define MPT_NAME_LENGTH 32 /* generic length of strings */ +#define MPT_STRING_LENGTH 64 + +#define MPT_MAX_CALLBACKS 32 + + +#define CAN_SLEEP 1 +#define NO_SLEEP 0 + +#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */ + +#define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/ + +#define MPT3SAS_INVALID_DEVICE_HANDLE 0xFFFF + +/* + * reset phases + */ +#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */ +#define MPT3_IOC_AFTER_RESET 2 /* just after host reset */ +#define MPT3_IOC_DONE_RESET 3 /* links re-initialized */ + +/* + * logging format + */ +#define MPT3SAS_FMT "%s: " + +/* + * WarpDrive Specific Log codes + */ + +#define MPT2_WARPDRIVE_LOGENTRY (0x8002) +#define MPT2_WARPDRIVE_LC_SSDT (0x41) +#define MPT2_WARPDRIVE_LC_SSDLW (0x43) +#define MPT2_WARPDRIVE_LC_SSDLF (0x44) +#define MPT2_WARPDRIVE_LC_BRMF (0x4D) + +/* + * per target private data + */ +#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01 +#define MPT_TARGET_FLAGS_VOLUME 0x02 +#define MPT_TARGET_FLAGS_DELETED 0x04 +#define MPT_TARGET_FASTPATH_IO 0x08 + +#define SAS2_PCI_DEVICE_B0_REVISION (0x01) +#define SAS3_PCI_DEVICE_C0_REVISION (0x02) + +/* + * Intel HBA branding + */ +#define MPT2SAS_INTEL_RMS25JB080_BRANDING \ + "Intel(R) Integrated RAID Module RMS25JB080" +#define MPT2SAS_INTEL_RMS25JB040_BRANDING \ + "Intel(R) Integrated RAID Module RMS25JB040" +#define MPT2SAS_INTEL_RMS25KB080_BRANDING \ + "Intel(R) Integrated RAID Module RMS25KB080" +#define MPT2SAS_INTEL_RMS25KB040_BRANDING \ + "Intel(R) Integrated RAID Module RMS25KB040" +#define MPT2SAS_INTEL_RMS25LB040_BRANDING \ + "Intel(R) Integrated RAID Module RMS25LB040" +#define MPT2SAS_INTEL_RMS25LB080_BRANDING \ + "Intel(R) Integrated RAID Module RMS25LB080" +#define MPT2SAS_INTEL_RMS2LL080_BRANDING \ + "Intel Integrated RAID Module RMS2LL080" +#define MPT2SAS_INTEL_RMS2LL040_BRANDING \ + "Intel Integrated RAID Module RMS2LL040" +#define MPT2SAS_INTEL_RS25GB008_BRANDING \ + "Intel(R) RAID Controller RS25GB008" +#define MPT2SAS_INTEL_SSD910_BRANDING \ + "Intel(R) SSD 910 Series" + +#define MPT3SAS_INTEL_RMS3JC080_BRANDING \ + "Intel(R) Integrated RAID Module RMS3JC080" +#define MPT3SAS_INTEL_RS3GC008_BRANDING \ + "Intel(R) RAID Controller RS3GC008" +#define MPT3SAS_INTEL_RS3FC044_BRANDING \ + "Intel(R) RAID Controller RS3FC044" +#define MPT3SAS_INTEL_RS3UC080_BRANDING \ + "Intel(R) RAID Controller RS3UC080" + +/* + * Intel HBA SSDIDs + */ +#define MPT2SAS_INTEL_RMS25JB080_SSDID 0x3516 +#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517 +#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518 +#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519 +#define MPT2SAS_INTEL_RMS25LB040_SSDID 0x351A +#define MPT2SAS_INTEL_RMS25LB080_SSDID 0x351B +#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E +#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F +#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000 +#define MPT2SAS_INTEL_SSD910_SSDID 0x3700 + +#define MPT3SAS_INTEL_RMS3JC080_SSDID 0x3521 +#define MPT3SAS_INTEL_RS3GC008_SSDID 0x3522 +#define MPT3SAS_INTEL_RS3FC044_SSDID 0x3523 +#define MPT3SAS_INTEL_RS3UC080_SSDID 0x3524 + +/* + * Dell HBA branding + */ +#define MPT2SAS_DELL_BRANDING_SIZE 32 + +#define MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING "Dell 6Gbps SAS HBA" +#define MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING "Dell PERC H200 Adapter" +#define MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING "Dell PERC H200 Integrated" +#define MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING "Dell PERC H200 Modular" +#define MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING "Dell PERC H200 Embedded" +#define MPT2SAS_DELL_PERC_H200_BRANDING "Dell PERC H200" +#define MPT2SAS_DELL_6GBPS_SAS_BRANDING "Dell 6Gbps SAS" + +#define MPT3SAS_DELL_12G_HBA_BRANDING \ + "Dell 12Gbps HBA" + +/* + * Dell HBA SSDIDs + */ +#define MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID 0x1F1C +#define MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID 0x1F1D +#define MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID 0x1F1E +#define MPT2SAS_DELL_PERC_H200_MODULAR_SSDID 0x1F1F +#define MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID 0x1F20 +#define MPT2SAS_DELL_PERC_H200_SSDID 0x1F21 +#define MPT2SAS_DELL_6GBPS_SAS_SSDID 0x1F22 + +#define MPT3SAS_DELL_12G_HBA_SSDID 0x1F46 + +/* + * Cisco HBA branding + */ +#define MPT3SAS_CISCO_12G_8E_HBA_BRANDING \ + "Cisco 9300-8E 12G SAS HBA" +#define MPT3SAS_CISCO_12G_8I_HBA_BRANDING \ + "Cisco 9300-8i 12G SAS HBA" +#define MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING \ + "Cisco 12G Modular SAS Pass through Controller" +#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING \ + "UCS C3X60 12G SAS Pass through Controller" +/* + * Cisco HBA SSSDIDs + */ +#define MPT3SAS_CISCO_12G_8E_HBA_SSDID 0x14C +#define MPT3SAS_CISCO_12G_8I_HBA_SSDID 0x154 +#define MPT3SAS_CISCO_12G_AVILA_HBA_SSDID 0x155 +#define MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID 0x156 + +/* + * status bits for ioc->diag_buffer_status + */ +#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01) +#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02) +#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04) + +/* + * HP HBA branding + */ +#define MPT2SAS_HP_3PAR_SSVID 0x1590 + +#define MPT2SAS_HP_2_4_INTERNAL_BRANDING \ + "HP H220 Host Bus Adapter" +#define MPT2SAS_HP_2_4_EXTERNAL_BRANDING \ + "HP H221 Host Bus Adapter" +#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING \ + "HP H222 Host Bus Adapter" +#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING \ + "HP H220i Host Bus Adapter" +#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING \ + "HP H210i Host Bus Adapter" + +/* + * HO HBA SSDIDs + */ +#define MPT2SAS_HP_2_4_INTERNAL_SSDID 0x0041 +#define MPT2SAS_HP_2_4_EXTERNAL_SSDID 0x0042 +#define MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID 0x0043 +#define MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID 0x0044 +#define MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID 0x0046 + +/* + * Combined Reply Queue constants, + * There are twelve Supplemental Reply Post Host Index Registers + * and each register is at offset 0x10 bytes from the previous one. + */ +#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT 12 +#define MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET (0x10) + +/* OEM Identifiers */ +#define MFG10_OEM_ID_INVALID (0x00000000) +#define MFG10_OEM_ID_DELL (0x00000001) +#define MFG10_OEM_ID_FSC (0x00000002) +#define MFG10_OEM_ID_SUN (0x00000003) +#define MFG10_OEM_ID_IBM (0x00000004) + +/* GENERIC Flags 0*/ +#define MFG10_GF0_OCE_DISABLED (0x00000001) +#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002) +#define MFG10_GF0_R10_DISPLAY (0x00000004) +#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008) +#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010) + +#define VIRTUAL_IO_FAILED_RETRY (0x32010081) + +/* OEM Specific Flags will come from OEM specific header files */ +struct Mpi2ManufacturingPage10_t { + MPI2_CONFIG_PAGE_HEADER Header; /* 00h */ + U8 OEMIdentifier; /* 04h */ + U8 Reserved1; /* 05h */ + U16 Reserved2; /* 08h */ + U32 Reserved3; /* 0Ch */ + U32 GenericFlags0; /* 10h */ + U32 GenericFlags1; /* 14h */ + U32 Reserved4; /* 18h */ + U32 OEMSpecificFlags0; /* 1Ch */ + U32 OEMSpecificFlags1; /* 20h */ + U32 Reserved5[18]; /* 24h - 60h*/ +}; + + +/* Miscellaneous options */ +struct Mpi2ManufacturingPage11_t { + MPI2_CONFIG_PAGE_HEADER Header; /* 00h */ + __le32 Reserved1; /* 04h */ + u8 Reserved2; /* 08h */ + u8 EEDPTagMode; /* 09h */ + u8 Reserved3; /* 0Ah */ + u8 Reserved4; /* 0Bh */ + __le32 Reserved5[23]; /* 0Ch-60h*/ +}; + +/** + * struct MPT3SAS_TARGET - starget private hostdata + * @starget: starget object + * @sas_address: target sas address + * @raid_device: raid_device pointer to access volume data + * @handle: device handle + * @num_luns: number luns + * @flags: MPT_TARGET_FLAGS_XXX flags + * @deleted: target flaged for deletion + * @tm_busy: target is busy with TM request. + * @sdev: The sas_device associated with this target + */ +struct MPT3SAS_TARGET { + struct scsi_target *starget; + u64 sas_address; + struct _raid_device *raid_device; + u16 handle; + int num_luns; + u32 flags; + u8 deleted; + u8 tm_busy; + struct _sas_device *sdev; +}; + + +/* + * per device private data + */ +#define MPT_DEVICE_FLAGS_INIT 0x01 +#define MPT_DEVICE_TLR_ON 0x02 + +#define MFG_PAGE10_HIDE_SSDS_MASK (0x00000003) +#define MFG_PAGE10_HIDE_ALL_DISKS (0x00) +#define MFG_PAGE10_EXPOSE_ALL_DISKS (0x01) +#define MFG_PAGE10_HIDE_IF_VOL_PRESENT (0x02) + +/** + * struct MPT3SAS_DEVICE - sdev private hostdata + * @sas_target: starget private hostdata + * @lun: lun number + * @flags: MPT_DEVICE_XXX flags + * @configured_lun: lun is configured + * @block: device is in SDEV_BLOCK state + * @tlr_snoop_check: flag used in determining whether to disable TLR + * @eedp_enable: eedp support enable bit + * @eedp_type: 0(type_1), 1(type_2), 2(type_3) + * @eedp_block_length: block size + * @ata_command_pending: SATL passthrough outstanding for device + */ +struct MPT3SAS_DEVICE { + struct MPT3SAS_TARGET *sas_target; + unsigned int lun; + u32 flags; + u8 configured_lun; + u8 block; + u8 tlr_snoop_check; + /* + * Bug workaround for SATL handling: the mpt2/3sas firmware + * doesn't return BUSY or TASK_SET_FULL for subsequent + * commands while a SATL pass through is in operation as the + * spec requires, it simply does nothing with them until the + * pass through completes, causing them possibly to timeout if + * the passthrough is a long executing command (like format or + * secure erase). This variable allows us to do the right + * thing while a SATL command is pending. + */ + unsigned long ata_command_pending; +}; + +#define MPT3_CMD_NOT_USED 0x8000 /* free */ +#define MPT3_CMD_COMPLETE 0x0001 /* completed */ +#define MPT3_CMD_PENDING 0x0002 /* pending */ +#define MPT3_CMD_REPLY_VALID 0x0004 /* reply is valid */ +#define MPT3_CMD_RESET 0x0008 /* host reset dropped the command */ + +/** + * struct _internal_cmd - internal commands struct + * @mutex: mutex + * @done: completion + * @reply: reply message pointer + * @sense: sense data + * @status: MPT3_CMD_XXX status + * @smid: system message id + */ +struct _internal_cmd { + struct mutex mutex; + struct completion done; + void *reply; + void *sense; + u16 status; + u16 smid; +}; + + + +/** + * struct _sas_device - attached device information + * @list: sas device list + * @starget: starget object + * @sas_address: device sas address + * @device_name: retrieved from the SAS IDENTIFY frame. + * @handle: device handle + * @sas_address_parent: sas address of parent expander or sas host + * @enclosure_handle: enclosure handle + * @enclosure_logical_id: enclosure logical identifier + * @volume_handle: volume handle (valid when hidden raid member) + * @volume_wwid: volume unique identifier + * @device_info: bitfield provides detailed info about the device + * @id: target id + * @channel: target channel + * @slot: number number + * @phy: phy identifier provided in sas device page 0 + * @responding: used in _scsih_sas_device_mark_responding + * @fast_path: fast path feature enable bit + * @pfa_led_on: flag for PFA LED status + * @pend_sas_rphy_add: flag to check if device is in sas_rphy_add() + * addition routine. + */ +struct _sas_device { + struct list_head list; + struct scsi_target *starget; + u64 sas_address; + u64 device_name; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u16 volume_handle; + u64 volume_wwid; + u32 device_info; + int id; + int channel; + u16 slot; + u8 phy; + u8 responding; + u8 fast_path; + u8 pfa_led_on; + u8 pend_sas_rphy_add; + u8 enclosure_level; + u8 connector_name[4]; + struct kref refcount; +}; + +static inline void sas_device_get(struct _sas_device *s) +{ + kref_get(&s->refcount); +} + +static inline void sas_device_free(struct kref *r) +{ + kfree(container_of(r, struct _sas_device, refcount)); +} + +static inline void sas_device_put(struct _sas_device *s) +{ + kref_put(&s->refcount, sas_device_free); +} + +/** + * struct _raid_device - raid volume link list + * @list: sas device list + * @starget: starget object + * @sdev: scsi device struct (volumes are single lun) + * @wwid: unique identifier for the volume + * @handle: device handle + * @block_size: Block size of the volume + * @id: target id + * @channel: target channel + * @volume_type: the raid level + * @device_info: bitfield provides detailed info about the hidden components + * @num_pds: number of hidden raid components + * @responding: used in _scsih_raid_device_mark_responding + * @percent_complete: resync percent complete + * @direct_io_enabled: Whether direct io to PDs are allowed or not + * @stripe_exponent: X where 2powX is the stripe sz in blocks + * @block_exponent: X where 2powX is the block sz in bytes + * @max_lba: Maximum number of LBA in the volume + * @stripe_sz: Stripe Size of the volume + * @device_info: Device info of the volume member disk + * @pd_handle: Array of handles of the physical drives for direct I/O in le16 + */ +#define MPT_MAX_WARPDRIVE_PDS 8 +struct _raid_device { + struct list_head list; + struct scsi_target *starget; + struct scsi_device *sdev; + u64 wwid; + u16 handle; + u16 block_sz; + int id; + int channel; + u8 volume_type; + u8 num_pds; + u8 responding; + u8 percent_complete; + u8 direct_io_enabled; + u8 stripe_exponent; + u8 block_exponent; + u64 max_lba; + u32 stripe_sz; + u32 device_info; + u16 pd_handle[MPT_MAX_WARPDRIVE_PDS]; +}; + +/** + * struct _boot_device - boot device info + * @is_raid: flag to indicate whether this is volume + * @device: holds pointer for either struct _sas_device or + * struct _raid_device + */ +struct _boot_device { + u8 is_raid; + void *device; +}; + +/** + * struct _sas_port - wide/narrow sas port information + * @port_list: list of ports belonging to expander + * @num_phys: number of phys belonging to this port + * @remote_identify: attached device identification + * @rphy: sas transport rphy object + * @port: sas transport wide/narrow port object + * @phy_list: _sas_phy list objects belonging to this port + */ +struct _sas_port { + struct list_head port_list; + u8 num_phys; + struct sas_identify remote_identify; + struct sas_rphy *rphy; + struct sas_port *port; + struct list_head phy_list; +}; + +/** + * struct _sas_phy - phy information + * @port_siblings: list of phys belonging to a port + * @identify: phy identification + * @remote_identify: attached device identification + * @phy: sas transport phy object + * @phy_id: unique phy id + * @handle: device handle for this phy + * @attached_handle: device handle for attached device + * @phy_belongs_to_port: port has been created for this phy + */ +struct _sas_phy { + struct list_head port_siblings; + struct sas_identify identify; + struct sas_identify remote_identify; + struct sas_phy *phy; + u8 phy_id; + u16 handle; + u16 attached_handle; + u8 phy_belongs_to_port; +}; + +/** + * struct _sas_node - sas_host/expander information + * @list: list of expanders + * @parent_dev: parent device class + * @num_phys: number phys belonging to this sas_host/expander + * @sas_address: sas address of this sas_host/expander + * @handle: handle for this sas_host/expander + * @sas_address_parent: sas address of parent expander or sas host + * @enclosure_handle: handle for this a member of an enclosure + * @device_info: bitwise defining capabilities of this sas_host/expander + * @responding: used in _scsih_expander_device_mark_responding + * @phy: a list of phys that make up this sas_host/expander + * @sas_port_list: list of ports attached to this sas_host/expander + */ +struct _sas_node { + struct list_head list; + struct device *parent_dev; + u8 num_phys; + u64 sas_address; + u16 handle; + u64 sas_address_parent; + u16 enclosure_handle; + u64 enclosure_logical_id; + u8 responding; + struct _sas_phy *phy; + struct list_head sas_port_list; +}; + +/** + * enum reset_type - reset state + * @FORCE_BIG_HAMMER: issue diagnostic reset + * @SOFT_RESET: issue message_unit_reset, if fails to to big hammer + */ +enum reset_type { + FORCE_BIG_HAMMER, + SOFT_RESET, +}; + +/** + * struct chain_tracker - firmware chain tracker + * @chain_buffer: chain buffer + * @chain_buffer_dma: physical address + * @tracker_list: list of free request (ioc->free_chain_list) + */ +struct chain_tracker { + void *chain_buffer; + dma_addr_t chain_buffer_dma; + struct list_head tracker_list; +}; + +/** + * struct scsiio_tracker - scsi mf request tracker + * @smid: system message id + * @scmd: scsi request pointer + * @cb_idx: callback index + * @direct_io: To indicate whether I/O is direct (WARPDRIVE) + * @tracker_list: list of free request (ioc->free_list) + * @msix_io: IO's msix + */ +struct scsiio_tracker { + u16 smid; + struct scsi_cmnd *scmd; + u8 cb_idx; + u8 direct_io; + struct list_head chain_list; + struct list_head tracker_list; + u16 msix_io; +}; + +/** + * struct request_tracker - firmware request tracker + * @smid: system message id + * @cb_idx: callback index + * @tracker_list: list of free request (ioc->free_list) + */ +struct request_tracker { + u16 smid; + u8 cb_idx; + struct list_head tracker_list; +}; + +/** + * struct _tr_list - target reset list + * @handle: device handle + * @state: state machine + */ +struct _tr_list { + struct list_head list; + u16 handle; + u16 state; +}; + + +/** + * struct adapter_reply_queue - the reply queue struct + * @ioc: per adapter object + * @msix_index: msix index into vector table + * @vector: irq vector + * @reply_post_host_index: head index in the pool where FW completes IO + * @reply_post_free: reply post base virt address + * @name: the name registered to request_irq() + * @busy: isr is actively processing replies on another cpu + * @list: this list +*/ +struct adapter_reply_queue { + struct MPT3SAS_ADAPTER *ioc; + u8 msix_index; + unsigned int vector; + u32 reply_post_host_index; + Mpi2ReplyDescriptorsUnion_t *reply_post_free; + char name[MPT_NAME_LENGTH]; + atomic_t busy; + cpumask_var_t affinity_hint; + struct list_head list; +}; + +typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); + +/* SAS3.0 support */ +typedef int (*MPT_BUILD_SG_SCMD)(struct MPT3SAS_ADAPTER *ioc, + struct scsi_cmnd *scmd, u16 smid); +typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge, + dma_addr_t data_out_dma, size_t data_out_sz, + dma_addr_t data_in_dma, size_t data_in_sz); +typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc, + void *paddr); + + + +/* IOC Facts and Port Facts converted from little endian to cpu */ +union mpi3_version_union { + MPI2_VERSION_STRUCT Struct; + u32 Word; +}; + +struct mpt3sas_facts { + u16 MsgVersion; + u16 HeaderVersion; + u8 IOCNumber; + u8 VP_ID; + u8 VF_ID; + u16 IOCExceptions; + u16 IOCStatus; + u32 IOCLogInfo; + u8 MaxChainDepth; + u8 WhoInit; + u8 NumberOfPorts; + u8 MaxMSIxVectors; + u16 RequestCredit; + u16 ProductID; + u32 IOCCapabilities; + union mpi3_version_union FWVersion; + u16 IOCRequestFrameSize; + u16 Reserved3; + u16 MaxInitiators; + u16 MaxTargets; + u16 MaxSasExpanders; + u16 MaxEnclosures; + u16 ProtocolFlags; + u16 HighPriorityCredit; + u16 MaxReplyDescriptorPostQueueDepth; + u8 ReplyFrameSize; + u8 MaxVolumes; + u16 MaxDevHandle; + u16 MaxPersistentEntries; + u16 MinDevHandle; +}; + +struct mpt3sas_port_facts { + u8 PortNumber; + u8 VP_ID; + u8 VF_ID; + u8 PortType; + u16 MaxPostedCmdBuffers; +}; + +struct reply_post_struct { + Mpi2ReplyDescriptorsUnion_t *reply_post_free; + dma_addr_t reply_post_free_dma; +}; + +/** + * enum mutex_type - task management mutex type + * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it + * @TM_MUTEX_ON: mutex is required + */ +enum mutex_type { + TM_MUTEX_OFF = 0, + TM_MUTEX_ON = 1, +}; + +typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); +/** + * struct MPT3SAS_ADAPTER - per adapter struct + * @list: ioc_list + * @shost: shost object + * @id: unique adapter id + * @cpu_count: number online cpus + * @name: generic ioc string + * @tmp_string: tmp string used for logging + * @pdev: pci pdev object + * @pio_chip: physical io register space + * @chip: memory mapped register space + * @chip_phys: physical addrss prior to mapping + * @logging_level: see mpt3sas_debug.h + * @fwfault_debug: debuging FW timeouts + * @ir_firmware: IR firmware present + * @bars: bitmask of BAR's that must be configured + * @mask_interrupts: ignore interrupt + * @dma_mask: used to set the consistent dma mask + * @fault_reset_work_q_name: fw fault work queue + * @fault_reset_work_q: "" + * @fault_reset_work: "" + * @firmware_event_name: fw event work queue + * @firmware_event_thread: "" + * @fw_event_lock: + * @fw_event_list: list of fw events + * @aen_event_read_flag: event log was read + * @broadcast_aen_busy: broadcast aen waiting to be serviced + * @shost_recovery: host reset in progress + * @ioc_reset_in_progress_lock: + * @ioc_link_reset_in_progress: phy/hard reset in progress + * @ignore_loginfos: ignore loginfos during task management + * @remove_host: flag for when driver unloads, to avoid sending dev resets + * @pci_error_recovery: flag to prevent ioc access until slot reset completes + * @wait_for_discovery_to_complete: flag set at driver load time when + * waiting on reporting devices + * @is_driver_loading: flag set at driver load time + * @port_enable_failed: flag set when port enable has failed + * @start_scan: flag set from scan_start callback, cleared from _mpt3sas_fw_work + * @start_scan_failed: means port enable failed, return's the ioc_status + * @msix_enable: flag indicating msix is enabled + * @msix_vector_count: number msix vectors + * @cpu_msix_table: table for mapping cpus to msix index + * @cpu_msix_table_sz: table size + * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands + * @scsi_io_cb_idx: shost generated commands + * @tm_cb_idx: task management commands + * @scsih_cb_idx: scsih internal commands + * @transport_cb_idx: transport internal commands + * @ctl_cb_idx: clt internal commands + * @base_cb_idx: base internal commands + * @config_cb_idx: base internal commands + * @tm_tr_cb_idx : device removal target reset handshake + * @tm_tr_volume_cb_idx : volume removal target reset + * @base_cmds: + * @transport_cmds: + * @scsih_cmds: + * @tm_cmds: + * @ctl_cmds: + * @config_cmds: + * @base_add_sg_single: handler for either 32/64 bit sgl's + * @event_type: bits indicating which events to log + * @event_context: unique id for each logged event + * @event_log: event log pointer + * @event_masks: events that are masked + * @facts: static facts data + * @pfacts: static port facts data + * @manu_pg0: static manufacturing page 0 + * @manu_pg10: static manufacturing page 10 + * @manu_pg11: static manufacturing page 11 + * @bios_pg2: static bios page 2 + * @bios_pg3: static bios page 3 + * @ioc_pg8: static ioc page 8 + * @iounit_pg0: static iounit page 0 + * @iounit_pg1: static iounit page 1 + * @iounit_pg8: static iounit page 8 + * @sas_hba: sas host object + * @sas_expander_list: expander object list + * @sas_node_lock: + * @sas_device_list: sas device object list + * @sas_device_init_list: sas device object list (used only at init time) + * @sas_device_lock: + * @io_missing_delay: time for IO completed by fw when PDR enabled + * @device_missing_delay: time for device missing by fw when PDR enabled + * @sas_id : used for setting volume target IDs + * @blocking_handles: bitmask used to identify which devices need blocking + * @pd_handles : bitmask for PD handles + * @pd_handles_sz : size of pd_handle bitmask + * @config_page_sz: config page size + * @config_page: reserve memory for config page payload + * @config_page_dma: + * @hba_queue_depth: hba request queue depth + * @sge_size: sg element size for either 32/64 bit + * @scsiio_depth: SCSI_IO queue depth + * @request_sz: per request frame size + * @request: pool of request frames + * @request_dma: + * @request_dma_sz: + * @scsi_lookup: firmware request tracker list + * @scsi_lookup_lock: + * @free_list: free list of request + * @pending_io_count: + * @reset_wq: + * @chain: pool of chains + * @chain_dma: + * @max_sges_in_main_message: number sg elements in main message + * @max_sges_in_chain_message: number sg elements per chain + * @chains_needed_per_io: max chains per io + * @chain_depth: total chains allocated + * @hi_priority_smid: + * @hi_priority: + * @hi_priority_dma: + * @hi_priority_depth: + * @hpr_lookup: + * @hpr_free_list: + * @internal_smid: + * @internal: + * @internal_dma: + * @internal_depth: + * @internal_lookup: + * @internal_free_list: + * @sense: pool of sense + * @sense_dma: + * @sense_dma_pool: + * @reply_depth: hba reply queue depth: + * @reply_sz: per reply frame size: + * @reply: pool of replys: + * @reply_dma: + * @reply_dma_pool: + * @reply_free_queue_depth: reply free depth + * @reply_free: pool for reply free queue (32 bit addr) + * @reply_free_dma: + * @reply_free_dma_pool: + * @reply_free_host_index: tail index in pool to insert free replys + * @reply_post_queue_depth: reply post queue depth + * @reply_post_struct: struct for reply_post_free physical & virt address + * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init + * @rdpq_array_enable: rdpq_array support is enabled in the driver + * @rdpq_array_enable_assigned: this ensures that rdpq_array_enable flag + * is assigned only ones + * @reply_queue_count: number of reply queue's + * @reply_queue_list: link list contaning the reply queue info + * @msix96_vector: 96 MSI-X vector support + * @replyPostRegisterIndex: index of next position in Reply Desc Post Queue + * @delayed_tr_list: target reset link list + * @delayed_tr_volume_list: volume target reset link list + * @temp_sensors_count: flag to carry the number of temperature sensors + * @pci_access_mutex: Mutex to synchronize ioctl,sysfs show path and + * pci resource handling. PCI resource freeing will lead to free + * vital hardware/memory resource, which might be in use by cli/sysfs + * path functions resulting in Null pointer reference followed by kernel + * crash. To avoid the above race condition we use mutex syncrhonization + * which ensures the syncrhonization between cli/sysfs_show path. + */ +struct MPT3SAS_ADAPTER { + struct list_head list; + struct Scsi_Host *shost; + u8 id; + int cpu_count; + char name[MPT_NAME_LENGTH]; + char driver_name[MPT_NAME_LENGTH]; + char tmp_string[MPT_STRING_LENGTH]; + struct pci_dev *pdev; + Mpi2SystemInterfaceRegs_t __iomem *chip; + resource_size_t chip_phys; + int logging_level; + int fwfault_debug; + u8 ir_firmware; + int bars; + u8 mask_interrupts; + int dma_mask; + + /* fw fault handler */ + char fault_reset_work_q_name[20]; + struct workqueue_struct *fault_reset_work_q; + struct delayed_work fault_reset_work; + + /* fw event handler */ + char firmware_event_name[20]; + struct workqueue_struct *firmware_event_thread; + spinlock_t fw_event_lock; + struct list_head fw_event_list; + + /* misc flags */ + int aen_event_read_flag; + u8 broadcast_aen_busy; + u16 broadcast_aen_pending; + u8 shost_recovery; + + struct mutex reset_in_progress_mutex; + spinlock_t ioc_reset_in_progress_lock; + u8 ioc_link_reset_in_progress; + u8 ioc_reset_in_progress_status; + + u8 ignore_loginfos; + u8 remove_host; + u8 pci_error_recovery; + u8 wait_for_discovery_to_complete; + u8 is_driver_loading; + u8 port_enable_failed; + u8 start_scan; + u16 start_scan_failed; + + u8 msix_enable; + u16 msix_vector_count; + u8 *cpu_msix_table; + u16 cpu_msix_table_sz; + resource_size_t __iomem **reply_post_host_index; + u32 ioc_reset_count; + MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; + u32 non_operational_loop; + + /* internal commands, callback index */ + u8 scsi_io_cb_idx; + u8 tm_cb_idx; + u8 transport_cb_idx; + u8 scsih_cb_idx; + u8 ctl_cb_idx; + u8 base_cb_idx; + u8 port_enable_cb_idx; + u8 config_cb_idx; + u8 tm_tr_cb_idx; + u8 tm_tr_volume_cb_idx; + u8 tm_sas_control_cb_idx; + struct _internal_cmd base_cmds; + struct _internal_cmd port_enable_cmds; + struct _internal_cmd transport_cmds; + struct _internal_cmd scsih_cmds; + struct _internal_cmd tm_cmds; + struct _internal_cmd ctl_cmds; + struct _internal_cmd config_cmds; + + MPT_ADD_SGE base_add_sg_single; + + /* function ptr for either IEEE or MPI sg elements */ + MPT_BUILD_SG_SCMD build_sg_scmd; + MPT_BUILD_SG build_sg; + MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge; + u16 sge_size_ieee; + u16 hba_mpi_version_belonged; + + /* function ptr for MPI sg elements only */ + MPT_BUILD_SG build_sg_mpi; + MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi; + + /* event log */ + u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; + u32 event_context; + void *event_log; + u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; + + /* static config pages */ + struct mpt3sas_facts facts; + struct mpt3sas_port_facts *pfacts; + Mpi2ManufacturingPage0_t manu_pg0; + struct Mpi2ManufacturingPage10_t manu_pg10; + struct Mpi2ManufacturingPage11_t manu_pg11; + Mpi2BiosPage2_t bios_pg2; + Mpi2BiosPage3_t bios_pg3; + Mpi2IOCPage8_t ioc_pg8; + Mpi2IOUnitPage0_t iounit_pg0; + Mpi2IOUnitPage1_t iounit_pg1; + Mpi2IOUnitPage8_t iounit_pg8; + + struct _boot_device req_boot_device; + struct _boot_device req_alt_boot_device; + struct _boot_device current_boot_device; + + /* sas hba, expander, and device list */ + struct _sas_node sas_hba; + struct list_head sas_expander_list; + spinlock_t sas_node_lock; + struct list_head sas_device_list; + struct list_head sas_device_init_list; + spinlock_t sas_device_lock; + struct list_head raid_device_list; + spinlock_t raid_device_lock; + u8 io_missing_delay; + u16 device_missing_delay; + int sas_id; + + void *blocking_handles; + void *pd_handles; + u16 pd_handles_sz; + + /* config page */ + u16 config_page_sz; + void *config_page; + dma_addr_t config_page_dma; + + /* scsiio request */ + u16 hba_queue_depth; + u16 sge_size; + u16 scsiio_depth; + u16 request_sz; + u8 *request; + dma_addr_t request_dma; + u32 request_dma_sz; + struct scsiio_tracker *scsi_lookup; + ulong scsi_lookup_pages; + spinlock_t scsi_lookup_lock; + struct list_head free_list; + int pending_io_count; + wait_queue_head_t reset_wq; + + /* chain */ + struct chain_tracker *chain_lookup; + struct list_head free_chain_list; + struct dma_pool *chain_dma_pool; + ulong chain_pages; + u16 max_sges_in_main_message; + u16 max_sges_in_chain_message; + u16 chains_needed_per_io; + u32 chain_depth; + + /* hi-priority queue */ + u16 hi_priority_smid; + u8 *hi_priority; + dma_addr_t hi_priority_dma; + u16 hi_priority_depth; + struct request_tracker *hpr_lookup; + struct list_head hpr_free_list; + + /* internal queue */ + u16 internal_smid; + u8 *internal; + dma_addr_t internal_dma; + u16 internal_depth; + struct request_tracker *internal_lookup; + struct list_head internal_free_list; + + /* sense */ + u8 *sense; + dma_addr_t sense_dma; + struct dma_pool *sense_dma_pool; + + /* reply */ + u16 reply_sz; + u8 *reply; + dma_addr_t reply_dma; + u32 reply_dma_max_address; + u32 reply_dma_min_address; + struct dma_pool *reply_dma_pool; + + /* reply free queue */ + u16 reply_free_queue_depth; + __le32 *reply_free; + dma_addr_t reply_free_dma; + struct dma_pool *reply_free_dma_pool; + u32 reply_free_host_index; + + /* reply post queue */ + u16 reply_post_queue_depth; + struct reply_post_struct *reply_post; + u8 rdpq_array_capable; + u8 rdpq_array_enable; + u8 rdpq_array_enable_assigned; + struct dma_pool *reply_post_free_dma_pool; + u8 reply_queue_count; + struct list_head reply_queue_list; + + u8 msix96_vector; + /* reply post register index */ + resource_size_t **replyPostRegisterIndex; + + struct list_head delayed_tr_list; + struct list_head delayed_tr_volume_list; + u8 temp_sensors_count; + struct mutex pci_access_mutex; + + /* diag buffer support */ + u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 diag_buffer_sz[MPI2_DIAG_BUF_TYPE_COUNT]; + dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT]; + u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23]; + u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT]; + u32 ring_buffer_offset; + u32 ring_buffer_sz; + u8 is_warpdrive; + u8 hide_ir_msg; + u8 mfg_pg10_hide_flag; + u8 hide_drives; + spinlock_t diag_trigger_lock; + u8 diag_trigger_active; + struct SL_WH_MASTER_TRIGGER_T diag_trigger_master; + struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event; + struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi; + struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi; +}; + +typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); + + +/* base shared API */ +extern struct list_head mpt3sas_ioc_list; +extern char driver_name[MPT_NAME_LENGTH]; +/* spinlock on list operations over IOCs + * Case: when multiple warpdrive cards(IOCs) are in use + * Each IOC will added to the ioc list structure on initialization. + * Watchdog threads run at regular intervals to check IOC for any + * fault conditions which will trigger the dead_ioc thread to + * deallocate pci resource, resulting deleting the IOC netry from list, + * this deletion need to protected by spinlock to enusre that + * ioc removal is syncrhonized, if not synchronized it might lead to + * list_del corruption as the ioc list is traversed in cli path. + */ +extern spinlock_t gioc_lock; + +void mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc); + +int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc); +int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc); +int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag, + enum reset_type type); + +void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid); +__le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, + u16 smid); +void mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc); + +/* hi-priority queue */ +u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); +u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx, + struct scsi_cmnd *scmd); + +u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx); +void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle); +void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u16 handle); +void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, + u16 smid, u16 msix_task); +void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void mpt3sas_base_initialize_callback_handler(void); +u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func); +void mpt3sas_base_release_callback_handler(u8 cb_idx); + +u8 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +u8 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply); +void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, + u32 phys_addr); + +u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked); + +void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code); +int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, + Mpi2SasIoUnitControlReply_t *mpi_reply, + Mpi2SasIoUnitControlRequest_t *mpi_request); +int mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, + Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request); + +void mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, + u32 *event_type); + +void mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc); + +void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc, + u16 device_missing_delay, u8 io_missing_delay); + +int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc); + + +/* scsih shared API */ +u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, + u32 reply); +void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase); + +int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, + uint channel, uint id, uint lun, u8 type, u16 smid_task, + ulong timeout, enum mutex_type m_type); +void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); +void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle); +void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address); +void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address); + +struct _sas_node *mpt3sas_scsih_expander_find_by_handle( + struct MPT3SAS_ADAPTER *ioc, u16 handle); +struct _sas_node *mpt3sas_scsih_expander_find_by_sas_address( + struct MPT3SAS_ADAPTER *ioc, u64 sas_address); +struct _sas_device *mpt3sas_get_sdev_by_addr( + struct MPT3SAS_ADAPTER *ioc, u64 sas_address); +struct _sas_device *__mpt3sas_get_sdev_by_addr( + struct MPT3SAS_ADAPTER *ioc, u64 sas_address); + +void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc); +struct _raid_device * +mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle); + +/* config shared API */ +u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +int mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, + u8 *num_phys); +int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page); +int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page, + u16 sz); +int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage10_t *config_page); + +int mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page); +int mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page); + +int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2BiosPage2_t *config_page); +int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2BiosPage3_t *config_page); +int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage0_t *config_page); +int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, + u16 sz); +int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage1_t *config_page); +int mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz); +int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage1_t *config_page); +int mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOUnitPage8_t *config_page); +int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz); +int mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz); +int mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2IOCPage8_t *config_page); +int mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage1_t *config_page, + u32 phy_number, u16 handle); +int mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, + u32 form, u32 handle); +int mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number); +int mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number); +int mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, + u32 handle); +int mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 *num_pds); +int mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form, + u32 handle, u16 sz); +int mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, + u32 form, u32 form_specific); +int mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, + u16 *volume_handle); +int mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, + u16 volume_handle, u64 *wwid); + +/* ctl shared API */ +extern struct device_attribute *mpt3sas_host_attrs[]; +extern struct device_attribute *mpt3sas_dev_attrs[]; +void mpt3sas_ctl_init(ushort hbas_to_enumerate); +void mpt3sas_ctl_exit(ushort hbas_to_enumerate); +u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +void mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase); +u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, + u8 msix_index, u32 reply); +void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventNotificationReply_t *mpi_reply); + +void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, + u8 bits_to_regsiter); +int mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, + u8 *issue_reset); + +/* transport shared API */ +extern struct scsi_transport_template *mpt3sas_transport_template; +u8 mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply); +struct _sas_port *mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, + u16 handle, u64 sas_address); +void mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u64 sas_address_parent); +int mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy + *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev); +int mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, + struct _sas_phy *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1, + struct device *parent_dev); +void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, u16 handle, u8 phy_number, u8 link_rate); +extern struct sas_function_template mpt3sas_transport_functions; +extern struct scsi_transport_template *mpt3sas_transport_template; +extern int scsi_internal_device_block(struct scsi_device *sdev); +extern int scsi_internal_device_unblock(struct scsi_device *sdev, + enum scsi_device_state new_state); +/* trigger data externs */ +void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data); +void mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data); +void mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, + u32 tigger_bitmask); +void mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, + u16 log_entry_qualifier); +void mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, + u8 asc, u8 ascq); +void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, + u32 loginfo); + +/* warpdrive APIs */ +u8 mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device); +u8 +mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid); +void +mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io); +void +mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request, + u16 smid); + +#endif /* MPT3SAS_BASE_H_INCLUDED */ diff --git a/addons/mpt3sas/src/4.4.180/mpt3sas_config.c b/addons/mpt3sas/src/4.4.180/mpt3sas_config.c new file mode 100644 index 00000000..a6914ec9 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpt3sas_config.c @@ -0,0 +1,1716 @@ +/* + * This module provides common API for accessing firmware configuration pages + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mpt3sas_base.h" + +/* local definitions */ + +/* Timeout for config page request (in seconds) */ +#define MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT 15 + +/* Common sgl flags for READING a config page. */ +#define MPT3_CONFIG_COMMON_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \ + | MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT) + +/* Common sgl flags for WRITING a config page. */ +#define MPT3_CONFIG_COMMON_WRITE_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \ + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \ + | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC) \ + << MPI2_SGE_FLAGS_SHIFT) + +/** + * struct config_request - obtain dma memory via routine + * @sz: size + * @page: virt pointer + * @page_dma: phys pointer + * + */ +struct config_request { + u16 sz; + void *page; + dma_addr_t page_dma; +}; + +/** + * _config_display_some_debug - debug routine + * @ioc: per adapter object + * @smid: system request message index + * @calling_function_name: string pass from calling function + * @mpi_reply: reply message frame + * Context: none. + * + * Function for displaying debug info helpful when debugging issues + * in this module. + */ +static void +_config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, + char *calling_function_name, MPI2DefaultReply_t *mpi_reply) +{ + Mpi2ConfigRequest_t *mpi_request; + char *desc = NULL; + + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + return; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) { + case MPI2_CONFIG_PAGETYPE_IO_UNIT: + desc = "io_unit"; + break; + case MPI2_CONFIG_PAGETYPE_IOC: + desc = "ioc"; + break; + case MPI2_CONFIG_PAGETYPE_BIOS: + desc = "bios"; + break; + case MPI2_CONFIG_PAGETYPE_RAID_VOLUME: + desc = "raid_volume"; + break; + case MPI2_CONFIG_PAGETYPE_MANUFACTURING: + desc = "manufaucturing"; + break; + case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK: + desc = "physdisk"; + break; + case MPI2_CONFIG_PAGETYPE_EXTENDED: + switch (mpi_request->ExtPageType) { + case MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT: + desc = "sas_io_unit"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER: + desc = "sas_expander"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE: + desc = "sas_device"; + break; + case MPI2_CONFIG_EXTPAGETYPE_SAS_PHY: + desc = "sas_phy"; + break; + case MPI2_CONFIG_EXTPAGETYPE_LOG: + desc = "log"; + break; + case MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE: + desc = "enclosure"; + break; + case MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG: + desc = "raid_config"; + break; + case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING: + desc = "driver_mapping"; + break; + } + break; + } + + if (!desc) + return; + + pr_info(MPT3SAS_FMT + "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n", + ioc->name, calling_function_name, desc, + mpi_request->Header.PageNumber, mpi_request->Action, + le32_to_cpu(mpi_request->PageAddress), smid); + + if (!mpi_reply) + return; + + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + pr_info(MPT3SAS_FMT + "\tiocstatus(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); +} + +/** + * _config_alloc_config_dma_memory - obtain physical memory + * @ioc: per adapter object + * @mem: struct config_request + * + * A wrapper for obtaining dma-able memory for config page request. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, + struct config_request *mem) +{ + int r = 0; + + if (mem->sz > ioc->config_page_sz) { + mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz, + &mem->page_dma, GFP_KERNEL); + if (!mem->page) { + pr_err(MPT3SAS_FMT + "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n", + ioc->name, __func__, mem->sz); + r = -ENOMEM; + } + } else { /* use tmp buffer if less than 512 bytes */ + mem->page = ioc->config_page; + mem->page_dma = ioc->config_page_dma; + } + return r; +} + +/** + * _config_free_config_dma_memory - wrapper to free the memory + * @ioc: per adapter object + * @mem: struct config_request + * + * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory. + * + * Returns 0 for success, non-zero for failure. + */ +static void +_config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc, + struct config_request *mem) +{ + if (mem->sz > ioc->config_page_sz) + dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page, + mem->page_dma); +} + +/** + * mpt3sas_config_done - config page completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: none. + * + * The callback handler when using _config_request. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + if (ioc->config_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->config_cmds.smid != smid) + return 1; + ioc->config_cmds.status |= MPT3_CMD_COMPLETE; + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + ioc->config_cmds.status |= MPT3_CMD_REPLY_VALID; + memcpy(ioc->config_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + } + ioc->config_cmds.status &= ~MPT3_CMD_PENDING; + _config_display_some_debug(ioc, smid, "config_done", mpi_reply); + ioc->config_cmds.smid = USHRT_MAX; + complete(&ioc->config_cmds.done); + return 1; +} + +/** + * _config_request - main routine for sending config page requests + * @ioc: per adapter object + * @mpi_request: request message frame + * @mpi_reply: reply mf payload returned from firmware + * @timeout: timeout in seconds + * @config_page: contents of the config page + * @config_page_sz: size of config page + * Context: sleep + * + * A generic API for config page requests to firmware. + * + * The ioc->config_cmds.status flag should be MPT3_CMD_NOT_USED before calling + * this API. + * + * The callback index is set inside `ioc->config_cb_idx. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t + *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout, + void *config_page, u16 config_page_sz) +{ + u16 smid; + u32 ioc_state; + unsigned long timeleft; + Mpi2ConfigRequest_t *config_request; + int r; + u8 retry_count, issue_host_reset = 0; + u16 wait_state_count; + struct config_request mem; + u32 ioc_status = UINT_MAX; + + mutex_lock(&ioc->config_cmds.mutex); + if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: config_cmd in use\n", + ioc->name, __func__); + mutex_unlock(&ioc->config_cmds.mutex); + return -EAGAIN; + } + + retry_count = 0; + memset(&mem, 0, sizeof(struct config_request)); + + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + if (config_page) { + mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion; + mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber; + mpi_request->Header.PageType = mpi_reply->Header.PageType; + mpi_request->Header.PageLength = mpi_reply->Header.PageLength; + mpi_request->ExtPageLength = mpi_reply->ExtPageLength; + mpi_request->ExtPageType = mpi_reply->ExtPageType; + if (mpi_request->Header.PageLength) + mem.sz = mpi_request->Header.PageLength * 4; + else + mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4; + r = _config_alloc_config_dma_memory(ioc, &mem); + if (r != 0) + goto out; + if (mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT || + mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) { + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + MPT3_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz, + mem.page_dma); + memcpy(mem.page, config_page, min_t(u16, mem.sz, + config_page_sz)); + } else { + memset(config_page, 0, config_page_sz); + ioc->base_add_sg_single(&mpi_request->PageBufferSGE, + MPT3_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma); + memset(mem.page, 0, min_t(u16, mem.sz, config_page_sz)); + } + } + + retry_config: + if (retry_count) { + if (retry_count > 2) { /* attempt only 2 retries */ + r = -EFAULT; + goto free_mem; + } + pr_info(MPT3SAS_FMT "%s: attempting retry (%d)\n", + ioc->name, __func__, retry_count); + } + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + r = -EFAULT; + goto free_mem; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info(MPT3SAS_FMT "%s: ioc is operational\n", + ioc->name, __func__); + + smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + r = -EAGAIN; + goto free_mem; + } + + r = 0; + memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t)); + ioc->config_cmds.status = MPT3_CMD_PENDING; + config_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->config_cmds.smid = smid; + memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t)); + _config_display_some_debug(ioc, smid, "config_request", NULL); + init_completion(&ioc->config_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->config_cmds.done, + timeout*HZ); + if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2ConfigRequest_t)/4); + retry_count++; + if (ioc->config_cmds.smid == smid) + mpt3sas_base_free_smid(ioc, smid); + if ((ioc->shost_recovery) || (ioc->config_cmds.status & + MPT3_CMD_RESET) || ioc->pci_error_recovery) + goto retry_config; + issue_host_reset = 1; + r = -EFAULT; + goto free_mem; + } + + if (ioc->config_cmds.status & MPT3_CMD_REPLY_VALID) { + memcpy(mpi_reply, ioc->config_cmds.reply, + sizeof(Mpi2ConfigReply_t)); + + /* Reply Frame Sanity Checks to workaround FW issues */ + if ((mpi_request->Header.PageType & 0xF) != + (mpi_reply->Header.PageType & 0xF)) { + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->request_sz/4); + panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \ + " mpi_reply mismatch: Requested PageType(0x%02x)" \ + " Reply PageType(0x%02x)\n", \ + ioc->name, __func__, + (mpi_request->Header.PageType & 0xF), + (mpi_reply->Header.PageType & 0xF)); + } + + if (((mpi_request->Header.PageType & 0xF) == + MPI2_CONFIG_PAGETYPE_EXTENDED) && + mpi_request->ExtPageType != mpi_reply->ExtPageType) { + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->request_sz/4); + panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \ + " mpi_reply mismatch: Requested ExtPageType(0x%02x)" + " Reply ExtPageType(0x%02x)\n", + ioc->name, __func__, mpi_request->ExtPageType, + mpi_reply->ExtPageType); + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & MPI2_IOCSTATUS_MASK; + } + + if (retry_count) + pr_info(MPT3SAS_FMT "%s: retry (%d) completed!!\n", \ + ioc->name, __func__, retry_count); + + if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) && + config_page && mpi_request->Action == + MPI2_CONFIG_ACTION_PAGE_READ_CURRENT) { + u8 *p = (u8 *)mem.page; + + /* Config Page Sanity Checks to workaround FW issues */ + if (p) { + if ((mpi_request->Header.PageType & 0xF) != + (p[3] & 0xF)) { + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz)/4); + panic(KERN_WARNING MPT3SAS_FMT + "%s: Firmware BUG:" \ + " config page mismatch:" + " Requested PageType(0x%02x)" + " Reply PageType(0x%02x)\n", + ioc->name, __func__, + (mpi_request->Header.PageType & 0xF), + (p[3] & 0xF)); + } + + if (((mpi_request->Header.PageType & 0xF) == + MPI2_CONFIG_PAGETYPE_EXTENDED) && + (mpi_request->ExtPageType != p[6])) { + _debug_dump_mf(mpi_request, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_config(p, min_t(u16, mem.sz, + config_page_sz)/4); + panic(KERN_WARNING MPT3SAS_FMT + "%s: Firmware BUG:" \ + " config page mismatch:" + " Requested ExtPageType(0x%02x)" + " Reply ExtPageType(0x%02x)\n", + ioc->name, __func__, + mpi_request->ExtPageType, p[6]); + } + } + memcpy(config_page, mem.page, min_t(u16, mem.sz, + config_page_sz)); + } + + free_mem: + if (config_page) + _config_free_config_dma_memory(ioc, &mem); + out: + ioc->config_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->config_cmds.mutex); + + if (issue_host_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg0 - obtain manufacturing page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 7; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING7_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sz); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg10 - obtain manufacturing page 10 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage10_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 10; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_manufacturing_pg11 - obtain manufacturing page 11 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_manufacturing_pg11 - set manufacturing page 11 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, + struct Mpi2ManufacturingPage11_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING; + mpi_request.Header.PageNumber = 11; + mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_bios_pg2 - obtain bios page 2 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 2; + mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_bios_pg3 - obtain bios page 3 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2BiosPage3_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg0 - obtain iounit page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage0_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg1 - obtain iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_set_iounit_pg1 - set iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg3 - obtain iounit page 3 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg3(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage3_t *config_page, u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 3; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE3_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_iounit_pg8 - obtain iounit page 8 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_iounit_pg8(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage8_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = MPI2_IOUNITPAGE8_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC; + mpi_request.Header.PageNumber = 8; + mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_sas_device_pg0 - obtain sas device page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: device handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page, + u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; + mpi_request.Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_sas_device_pg1 - obtain sas device page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: device handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page, + u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE; + mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION; + mpi_request.Header.PageNumber = 1; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_number_hba_phys - obtain number of phys on the host + * @ioc: per adapter object + * @num_phys: pointer returned with the number of phys + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + u16 ioc_status; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t config_page; + + *num_phys = 0; + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(Mpi2SasIOUnitPage0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) + *num_phys = config_page.NumPhys; + } + out: + return r; +} + +/** + * mpt3sas_config_get_sas_iounit_pg0 - obtain sas iounit page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Calling function should call config_get_number_hba_phys prior to + * this function, so enough memory is allocated for config_page. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Calling function should call config_get_number_hba_phys prior to + * this function, so enough memory is allocated for config_page. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_set_sas_iounit_pg1 - send sas iounit page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Calling function should call config_get_number_hba_phys prior to + * this function, so enough memory is allocated for config_page. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, + u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT; + _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_expander_pg0 - obtain expander page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: expander handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_expander_pg1 - obtain expander page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @phy_number: phy number + * @handle: expander handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number, + u16 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM | + (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_enclosure_pg0 - obtain enclosure page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: expander handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_phy_pg0 - obtain phy page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @phy_number: phy number + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_phy_pg1 - obtain phy page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @phy_number: phy number + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_raid_volume_pg1 - obtain raid volume page 1 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: volume handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form, + u32 handle) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 1; + mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_number_pds - obtain number of phys disk assigned to volume + * @ioc: per adapter object + * @handle: volume handle + * @num_pds: returns pds count + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 *num_pds) +{ + Mpi2ConfigRequest_t mpi_request; + Mpi2RaidVolPage0_t config_page; + Mpi2ConfigReply_t mpi_reply; + int r; + u16 ioc_status; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + *num_pds = 0; + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = + cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page, + sizeof(Mpi2RaidVolPage0_t)); + if (!r) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) + *num_pds = config_page.NumPhysDisks; + } + + out: + return r; +} + +/** + * mpt3sas_config_get_raid_volume_pg0 - obtain raid volume page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_HANDLE or HANDLE + * @handle: volume handle + * @sz: size of buffer passed in config_page + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc, + Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form, + u32 handle, u16 sz) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | handle); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz); + out: + return r; +} + +/** + * mpt3sas_config_get_phys_disk_pg0 - obtain phys disk page 0 + * @ioc: per adapter object + * @mpi_reply: reply mf payload returned from firmware + * @config_page: contents of the config page + * @form: GET_NEXT_PHYSDISKNUM, PHYSDISKNUM, DEVHANDLE + * @form_specific: specific to the form + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t + *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form, + u32 form_specific) +{ + Mpi2ConfigRequest_t mpi_request; + int r; + + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK; + mpi_request.Header.PageNumber = 0; + mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.PageAddress = cpu_to_le32(form | form_specific); + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + r = _config_request(ioc, &mpi_request, mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + sizeof(*config_page)); + out: + return r; +} + +/** + * mpt3sas_config_get_volume_handle - returns volume handle for give hidden + * raid components + * @ioc: per adapter object + * @pd_handle: phys disk handle + * @volume_handle: volume handle + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle, + u16 *volume_handle) +{ + Mpi2RaidConfigurationPage0_t *config_page = NULL; + Mpi2ConfigRequest_t mpi_request; + Mpi2ConfigReply_t mpi_reply; + int r, i, config_page_sz; + u16 ioc_status; + int config_num; + u16 element_type; + u16 phys_disk_dev_handle; + + *volume_handle = 0; + memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t)); + mpi_request.Function = MPI2_FUNCTION_CONFIG; + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER; + mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; + mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG; + mpi_request.Header.PageVersion = MPI2_RAIDCONFIG0_PAGEVERSION; + mpi_request.Header.PageNumber = 0; + ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0); + if (r) + goto out; + + mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT; + config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4); + config_page = kmalloc(config_page_sz, GFP_KERNEL); + if (!config_page) { + r = -1; + goto out; + } + + config_num = 0xff; + while (1) { + mpi_request.PageAddress = cpu_to_le32(config_num + + MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM); + r = _config_request(ioc, &mpi_request, &mpi_reply, + MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, + config_page_sz); + if (r) + goto out; + r = -1; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < config_page->NumElements; i++) { + element_type = le16_to_cpu(config_page-> + ConfigElement[i].ElementFlags) & + MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE; + if (element_type == + MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT || + element_type == + MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) { + phys_disk_dev_handle = + le16_to_cpu(config_page->ConfigElement[i]. + PhysDiskDevHandle); + if (phys_disk_dev_handle == pd_handle) { + *volume_handle = + le16_to_cpu(config_page-> + ConfigElement[i].VolDevHandle); + r = 0; + goto out; + } + } else if (element_type == + MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) { + *volume_handle = 0; + r = 0; + goto out; + } + } + config_num = config_page->ConfigNum; + } + out: + kfree(config_page); + return r; +} + +/** + * mpt3sas_config_get_volume_wwid - returns wwid given the volume handle + * @ioc: per adapter object + * @volume_handle: volume handle + * @wwid: volume wwid + * Context: sleep. + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle, + u64 *wwid) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2RaidVolPage1_t raid_vol_pg1; + + *wwid = 0; + if (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &raid_vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, + volume_handle))) { + *wwid = le64_to_cpu(raid_vol_pg1.WWID); + return 0; + } else + return -1; +} diff --git a/addons/mpt3sas/src/4.4.180/mpt3sas_ctl.c b/addons/mpt3sas/src/4.4.180/mpt3sas_ctl.c new file mode 100644 index 00000000..4ccde5a0 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpt3sas_ctl.c @@ -0,0 +1,3464 @@ +/* + * Management Module Support for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mpt3sas_base.h" +#include "mpt3sas_ctl.h" + + +static struct fasync_struct *async_queue; +static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait); + + +/** + * enum block_state - blocking state + * @NON_BLOCKING: non blocking + * @BLOCKING: blocking + * + * These states are for ioctls that need to wait for a response + * from firmware, so they probably require sleep. + */ +enum block_state { + NON_BLOCKING, + BLOCKING, +}; + +/** + * _ctl_sas_device_find_by_handle - sas device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for sas_device based on sas_address, then return sas_device + * object. + */ +static struct _sas_device * +_ctl_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_device *sas_device, *r; + + r = NULL; + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (sas_device->handle != handle) + continue; + r = sas_device; + goto out; + } + + out: + return r; +} + +/** + * _ctl_display_some_debug - debug routine + * @ioc: per adapter object + * @smid: system request message index + * @calling_function_name: string pass from calling function + * @mpi_reply: reply message frame + * Context: none. + * + * Function for displaying debug info helpful when debugging issues + * in this module. + */ +static void +_ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, + char *calling_function_name, MPI2DefaultReply_t *mpi_reply) +{ + Mpi2ConfigRequest_t *mpi_request; + char *desc = NULL; + + if (!(ioc->logging_level & MPT_DEBUG_IOCTL)) + return; + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + switch (mpi_request->Function) { + case MPI2_FUNCTION_SCSI_IO_REQUEST: + { + Mpi2SCSIIORequest_t *scsi_request = + (Mpi2SCSIIORequest_t *)mpi_request; + + snprintf(ioc->tmp_string, MPT_STRING_LENGTH, + "scsi_io, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case MPI2_FUNCTION_SCSI_TASK_MGMT: + desc = "task_mgmt"; + break; + case MPI2_FUNCTION_IOC_INIT: + desc = "ioc_init"; + break; + case MPI2_FUNCTION_IOC_FACTS: + desc = "ioc_facts"; + break; + case MPI2_FUNCTION_CONFIG: + { + Mpi2ConfigRequest_t *config_request = + (Mpi2ConfigRequest_t *)mpi_request; + + snprintf(ioc->tmp_string, MPT_STRING_LENGTH, + "config, type(0x%02x), ext_type(0x%02x), number(%d)", + (config_request->Header.PageType & + MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType, + config_request->Header.PageNumber); + desc = ioc->tmp_string; + break; + } + case MPI2_FUNCTION_PORT_FACTS: + desc = "port_facts"; + break; + case MPI2_FUNCTION_PORT_ENABLE: + desc = "port_enable"; + break; + case MPI2_FUNCTION_EVENT_NOTIFICATION: + desc = "event_notification"; + break; + case MPI2_FUNCTION_FW_DOWNLOAD: + desc = "fw_download"; + break; + case MPI2_FUNCTION_FW_UPLOAD: + desc = "fw_upload"; + break; + case MPI2_FUNCTION_RAID_ACTION: + desc = "raid_action"; + break; + case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + { + Mpi2SCSIIORequest_t *scsi_request = + (Mpi2SCSIIORequest_t *)mpi_request; + + snprintf(ioc->tmp_string, MPT_STRING_LENGTH, + "raid_pass, cmd(0x%02x), cdb_len(%d)", + scsi_request->CDB.CDB32[0], + le16_to_cpu(scsi_request->IoFlags) & 0xF); + desc = ioc->tmp_string; + break; + } + case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: + desc = "sas_iounit_cntl"; + break; + case MPI2_FUNCTION_SATA_PASSTHROUGH: + desc = "sata_pass"; + break; + case MPI2_FUNCTION_DIAG_BUFFER_POST: + desc = "diag_buffer_post"; + break; + case MPI2_FUNCTION_DIAG_RELEASE: + desc = "diag_release"; + break; + case MPI2_FUNCTION_SMP_PASSTHROUGH: + desc = "smp_passthrough"; + break; + } + + if (!desc) + return; + + pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n", + ioc->name, calling_function_name, desc, smid); + + if (!mpi_reply) + return; + + if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo) + pr_info(MPT3SAS_FMT + "\tiocstatus(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_request->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { + Mpi2SCSIIOReply_t *scsi_reply = + (Mpi2SCSIIOReply_t *)mpi_reply; + struct _sas_device *sas_device = NULL; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = _ctl_sas_device_find_by_handle(ioc, + le16_to_cpu(scsi_reply->DevHandle)); + if (sas_device) { + pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n", + ioc->name, (unsigned long long) + sas_device->sas_address, sas_device->phy); + pr_warn(MPT3SAS_FMT + "\tenclosure_logical_id(0x%016llx), slot(%d)\n", + ioc->name, (unsigned long long) + sas_device->enclosure_logical_id, sas_device->slot); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (scsi_reply->SCSIState || scsi_reply->SCSIStatus) + pr_info(MPT3SAS_FMT + "\tscsi_state(0x%02x), scsi_status" + "(0x%02x)\n", ioc->name, + scsi_reply->SCSIState, + scsi_reply->SCSIStatus); + } +} + +/** + * mpt3sas_ctl_done - ctl module completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: none. + * + * The callback handler when using ioc->ctl_cb_idx. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + Mpi2SCSIIOReply_t *scsiio_reply; + const void *sense_data; + u32 sz; + + if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->ctl_cmds.smid != smid) + return 1; + ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE; + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID; + /* get sense data */ + if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_reply->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { + scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply; + if (scsiio_reply->SCSIState & + MPI2_SCSI_STATE_AUTOSENSE_VALID) { + sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(scsiio_reply->SenseCount)); + sense_data = mpt3sas_base_get_sense_buffer(ioc, + smid); + memcpy(ioc->ctl_cmds.sense, sense_data, sz); + } + } + } + _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply); + ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->ctl_cmds.done); + return 1; +} + +/** + * _ctl_check_event_type - determines when an event needs logging + * @ioc: per adapter object + * @event: firmware event + * + * The bitmask in ioc->event_type[] indicates which events should be + * be saved in the driver event_log. This bitmask is set by application. + * + * Returns 1 when event should be captured, or zero means no match. + */ +static int +_ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event) +{ + u16 i; + u32 desired_event; + + if (event >= 128 || !event || !ioc->event_log) + return 0; + + desired_event = (1 << (event % 32)); + if (!desired_event) + desired_event = 1; + i = event / 32; + return desired_event & ioc->event_type[i]; +} + +/** + * mpt3sas_ctl_add_to_event_log - add event + * @ioc: per adapter object + * @mpi_reply: reply message frame + * + * Return nothing. + */ +void +mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventNotificationReply_t *mpi_reply) +{ + struct MPT3_IOCTL_EVENTS *event_log; + u16 event; + int i; + u32 sz, event_data_sz; + u8 send_aen = 0; + + if (!ioc->event_log) + return; + + event = le16_to_cpu(mpi_reply->Event); + + if (_ctl_check_event_type(ioc, event)) { + + /* insert entry into circular event_log */ + i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE; + event_log = ioc->event_log; + event_log[i].event = event; + event_log[i].context = ioc->event_context++; + + event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4; + sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE); + memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE); + memcpy(event_log[i].data, mpi_reply->EventData, sz); + send_aen = 1; + } + + /* This aen_event_read_flag flag is set until the + * application has read the event log. + * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify. + */ + if (event == MPI2_EVENT_LOG_ENTRY_ADDED || + (send_aen && !ioc->aen_event_read_flag)) { + ioc->aen_event_read_flag = 1; + wake_up_interruptible(&ctl_poll_wait); + if (async_queue) + kill_fasync(&async_queue, SIGIO, POLL_IN); + } +} + +/** + * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time) + * @ioc: per adapter object + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt. + * + * This function merely adds a new work task into ioc->firmware_event_thread. + * The tasks are worked from _firmware_event_work in user context. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, + u32 reply) +{ + Mpi2EventNotificationReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); + return 1; +} + +/** + * _ctl_verify_adapter - validates ioc_number passed from application + * @ioc: per adapter object + * @iocpp: The ioc pointer is returned in this. + * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & + * MPI25_VERSION for mpt3ctl ioctl device. + * + * Return (-1) means error, else ioc_number. + */ +static int +_ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp, + int mpi_version) +{ + struct MPT3SAS_ADAPTER *ioc; + /* global ioc lock to protect controller on list operations */ + spin_lock(&gioc_lock); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + if (ioc->id != ioc_number) + continue; + /* Check whether this ioctl command is from right + * ioctl device or not, if not continue the search. + */ + if (ioc->hba_mpi_version_belonged != mpi_version) + continue; + spin_unlock(&gioc_lock); + *iocpp = ioc; + return ioc_number; + } + spin_unlock(&gioc_lock); + *iocpp = NULL; + return -1; +} + +/** + * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) + * @ioc: per adapter object + * @reset_phase: phase + * + * The handler for doing any required cleanup or initialization. + * + * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, + * MPT3_IOC_DONE_RESET + */ +void +mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) +{ + int i; + u8 issue_reset; + + switch (reset_phase) { + case MPT3_IOC_PRE_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!(ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + continue; + if ((ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + continue; + mpt3sas_send_diag_release(ioc, i, &issue_reset); + } + break; + case MPT3_IOC_AFTER_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); + if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { + ioc->ctl_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); + complete(&ioc->ctl_cmds.done); + } + break; + case MPT3_IOC_DONE_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); + + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!(ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + continue; + if ((ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + continue; + ioc->diag_buffer_status[i] |= + MPT3_DIAG_BUFFER_IS_DIAG_RESET; + } + break; + } +} + +/** + * _ctl_fasync - + * @fd - + * @filep - + * @mode - + * + * Called when application request fasyn callback handler. + */ +int +_ctl_fasync(int fd, struct file *filep, int mode) +{ + return fasync_helper(fd, filep, mode, &async_queue); +} + +/** + * _ctl_poll - + * @file - + * @wait - + * + */ +unsigned int +_ctl_poll(struct file *filep, poll_table *wait) +{ + struct MPT3SAS_ADAPTER *ioc; + + poll_wait(filep, &ctl_poll_wait, wait); + + /* global ioc lock to protect controller on list operations */ + spin_lock(&gioc_lock); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + if (ioc->aen_event_read_flag) { + spin_unlock(&gioc_lock); + return POLLIN | POLLRDNORM; + } + } + spin_unlock(&gioc_lock); + return 0; +} + +/** + * _ctl_set_task_mid - assign an active smid to tm request + * @ioc: per adapter object + * @karg - (struct mpt3_ioctl_command) + * @tm_request - pointer to mf from user space + * + * Returns 0 when an smid if found, else fail. + * during failure, the reply frame is filled. + */ +static int +_ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, + Mpi2SCSITaskManagementRequest_t *tm_request) +{ + u8 found = 0; + u16 i; + u16 handle; + struct scsi_cmnd *scmd; + struct MPT3SAS_DEVICE *priv_data; + unsigned long flags; + Mpi2SCSITaskManagementReply_t *tm_reply; + u32 sz; + u32 lun; + char *desc = NULL; + + if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + desc = "abort_task"; + else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) + desc = "query_task"; + else + return 0; + + lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN); + + handle = le16_to_cpu(tm_request->DevHandle); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + for (i = ioc->scsiio_depth; i && !found; i--) { + scmd = ioc->scsi_lookup[i - 1].scmd; + if (scmd == NULL || scmd->device == NULL || + scmd->device->hostdata == NULL) + continue; + if (lun != scmd->device->lun) + continue; + priv_data = scmd->device->hostdata; + if (priv_data->sas_target == NULL) + continue; + if (priv_data->sas_target->handle != handle) + continue; + tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid); + found = 1; + } + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + if (!found) { + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), lun(%d), no active mid!!\n", + ioc->name, + desc, le16_to_cpu(tm_request->DevHandle), lun)); + tm_reply = ioc->ctl_cmds.reply; + tm_reply->DevHandle = tm_request->DevHandle; + tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + tm_reply->TaskType = tm_request->TaskType; + tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4; + tm_reply->VP_ID = tm_request->VP_ID; + tm_reply->VF_ID = tm_request->VF_ID; + sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + return 1; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name, + desc, le16_to_cpu(tm_request->DevHandle), lun, + le16_to_cpu(tm_request->TaskMID))); + return 0; +} + +/** + * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode + * @ioc: per adapter object + * @karg - (struct mpt3_ioctl_command) + * @mf - pointer to mf in user space + */ +static long +_ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, + void __user *mf) +{ + MPI2RequestHeader_t *mpi_request = NULL, *request; + MPI2DefaultReply_t *mpi_reply; + u32 ioc_state; + u16 ioc_status; + u16 smid; + unsigned long timeout, timeleft; + u8 issue_reset; + u32 sz; + void *psge; + void *data_out = NULL; + dma_addr_t data_out_dma = 0; + size_t data_out_sz = 0; + void *data_in = NULL; + dma_addr_t data_in_dma = 0; + size_t data_in_sz = 0; + long ret; + u16 wait_state_count; + + issue_reset = 0; + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); + ret = -EAGAIN; + goto out; + } + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + ret = -EFAULT; + goto out; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, + __func__, wait_state_count); + } + if (wait_state_count) + pr_info(MPT3SAS_FMT "%s: ioc is operational\n", + ioc->name, __func__); + + mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL); + if (!mpi_request) { + pr_err(MPT3SAS_FMT + "%s: failed obtaining a memory for mpi_request\n", + ioc->name, __func__); + ret = -ENOMEM; + goto out; + } + + /* Check for overflow and wraparound */ + if (karg.data_sge_offset * 4 > ioc->request_sz || + karg.data_sge_offset > (UINT_MAX / 4)) { + ret = -EINVAL; + goto out; + } + + /* copy in request message frame from user */ + if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__, + __func__); + ret = -EFAULT; + goto out; + } + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + ret = -EAGAIN; + goto out; + } + } else { + + smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + ret = -EAGAIN; + goto out; + } + } + + ret = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + request = mpt3sas_base_get_msg_frame(ioc, smid); + memcpy(request, mpi_request, karg.data_sge_offset*4); + ioc->ctl_cmds.smid = smid; + data_out_sz = karg.data_out_size; + data_in_sz = karg.data_in_size; + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { + if (!le16_to_cpu(mpi_request->FunctionDependent1) || + le16_to_cpu(mpi_request->FunctionDependent1) > + ioc->facts.MaxDevHandle) { + ret = -EINVAL; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + /* obtain dma-able memory for data transfer */ + if (data_out_sz) /* WRITE */ { + data_out = pci_alloc_consistent(ioc->pdev, data_out_sz, + &data_out_dma); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + if (copy_from_user(data_out, karg.data_out_buf_ptr, + data_out_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -EFAULT; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + if (data_in_sz) /* READ */ { + data_in = pci_alloc_consistent(ioc->pdev, data_in_sz, + &data_in_dma); + if (!data_in) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + psge = (void *)request + (karg.data_sge_offset*4); + + /* send command to firmware */ + _ctl_display_some_debug(ioc, smid, "ctl_request", NULL); + + init_completion(&ioc->ctl_cmds.done); + switch (mpi_request->Function) { + case MPI2_FUNCTION_SCSI_IO_REQUEST: + case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: + { + Mpi2SCSIIORequest_t *scsiio_request = + (Mpi2SCSIIORequest_t *)request; + scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + scsiio_request->SenseBufferLowAddress = + mpt3sas_base_get_sense_buffer_dma(ioc, smid); + memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE); + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + + if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) + mpt3sas_base_put_smid_scsi_io(ioc, smid, + le16_to_cpu(mpi_request->FunctionDependent1)); + else + mpt3sas_base_put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_SCSI_TASK_MGMT: + { + Mpi2SCSITaskManagementRequest_t *tm_request = + (Mpi2SCSITaskManagementRequest_t *)request; + + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n", + ioc->name, + le16_to_cpu(tm_request->DevHandle), tm_request->TaskType)); + + if (tm_request->TaskType == + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK || + tm_request->TaskType == + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) { + if (_ctl_set_task_mid(ioc, &karg, tm_request)) { + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + } + + mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu( + tm_request->DevHandle)); + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); + break; + } + case MPI2_FUNCTION_SMP_PASSTHROUGH: + { + Mpi2SmpPassthroughRequest_t *smp_request = + (Mpi2SmpPassthroughRequest_t *)mpi_request; + u8 *data; + + /* ioc determines which port to use */ + smp_request->PhysicalPort = 0xFF; + if (smp_request->PassthroughFlags & + MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE) + data = (u8 *)&smp_request->SGL; + else { + if (unlikely(data_out == NULL)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } + data = data_out; + } + + if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + mpt3sas_base_put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_SATA_PASSTHROUGH: + case MPI2_FUNCTION_FW_DOWNLOAD: + case MPI2_FUNCTION_FW_UPLOAD: + { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + mpt3sas_base_put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_TOOLBOX: + { + Mpi2ToolboxCleanRequest_t *toolbox_request = + (Mpi2ToolboxCleanRequest_t *)mpi_request; + + if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) { + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + } else { + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + } + mpt3sas_base_put_smid_default(ioc, smid); + break; + } + case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL: + { + Mpi2SasIoUnitControlRequest_t *sasiounit_request = + (Mpi2SasIoUnitControlRequest_t *)mpi_request; + + if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET + || sasiounit_request->Operation == + MPI2_SAS_OP_PHY_LINK_RESET) { + ioc->ioc_link_reset_in_progress = 1; + ioc->ignore_loginfos = 1; + } + /* drop to default case for posting the request */ + } + default: + ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, + data_in_dma, data_in_sz); + mpt3sas_base_put_smid_default(ioc, smid); + break; + } + + if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT) + timeout = MPT3_IOCTL_DEFAULT_TIMEOUT; + else + timeout = karg.timeout; + timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, + timeout*HZ); + if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) { + Mpi2SCSITaskManagementRequest_t *tm_request = + (Mpi2SCSITaskManagementRequest_t *)mpi_request; + mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu( + tm_request->DevHandle)); + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); + } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH || + mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) && + ioc->ioc_link_reset_in_progress) { + ioc->ioc_link_reset_in_progress = 0; + ioc->ignore_loginfos = 0; + } + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, + __func__); + _debug_dump_mf(mpi_request, karg.data_sge_offset); + if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT && + (ioc->logging_level & MPT_DEBUG_TM)) { + Mpi2SCSITaskManagementReply_t *tm_reply = + (Mpi2SCSITaskManagementReply_t *)mpi_reply; + + pr_info(MPT3SAS_FMT "TASK_MGMT: " \ + "IOCStatus(0x%04x), IOCLogInfo(0x%08x), " + "TerminationCount(0x%08x)\n", ioc->name, + le16_to_cpu(tm_reply->IOCStatus), + le32_to_cpu(tm_reply->IOCLogInfo), + le32_to_cpu(tm_reply->TerminationCount)); + } + + /* copy out xdata to user */ + if (data_in_sz) { + if (copy_to_user(karg.data_in_buf_ptr, data_in, + data_in_sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + + /* copy out reply message frame to user */ + if (karg.max_reply_bytes) { + sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz); + if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, + sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + + /* copy out sense to user */ + if (karg.max_sense_bytes && (mpi_request->Function == + MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) { + sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE); + if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense, + sz)) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + ret = -ENODATA; + goto out; + } + } + + issue_host_reset: + if (issue_reset) { + ret = -ENODATA; + if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST || + mpi_request->Function == + MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH || + mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) { + pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n", + ioc->name, + le16_to_cpu(mpi_request->FunctionDependent1)); + mpt3sas_halt_firmware(ioc); + mpt3sas_scsih_issue_tm(ioc, + le16_to_cpu(mpi_request->FunctionDependent1), 0, 0, + 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30, + TM_MUTEX_ON); + } else + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + } + + out: + + /* free memory associated with sg buffers */ + if (data_in) + pci_free_consistent(ioc->pdev, data_in_sz, data_in, + data_in_dma); + + if (data_out) + pci_free_consistent(ioc->pdev, data_out_sz, data_out, + data_out_dma); + + kfree(mpi_request); + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return ret; +} + +/** + * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_iocinfo karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, + __func__)); + + memset(&karg, 0 , sizeof(karg)); + if (ioc->pfacts) + karg.port_number = ioc->pfacts[0].PortNumber; + karg.hw_rev = ioc->pdev->revision; + karg.pci_id = ioc->pdev->device; + karg.subsystem_device = ioc->pdev->subsystem_device; + karg.subsystem_vendor = ioc->pdev->subsystem_vendor; + karg.pci_information.u.bits.bus = ioc->pdev->bus->number; + karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn); + karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn); + karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus); + karg.firmware_version = ioc->facts.FWVersion.Word; + strcpy(karg.driver_version, ioc->driver_name); + strcat(karg.driver_version, "-"); + switch (ioc->hba_mpi_version_belonged) { + case MPI2_VERSION: + if (ioc->is_warpdrive) + karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2_SSS6200; + else + karg.adapter_type = MPT2_IOCTL_INTERFACE_SAS2; + strcat(karg.driver_version, MPT2SAS_DRIVER_VERSION); + break; + case MPI25_VERSION: + karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3; + strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION); + break; + } + karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +/** + * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_eventquery karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, + __func__)); + + karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE; + memcpy(karg.event_types, ioc->event_type, + MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +/** + * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_eventenable karg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, + __func__)); + + memcpy(ioc->event_type, karg.event_types, + MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32)); + mpt3sas_base_validate_event_type(ioc, ioc->event_type); + + if (ioc->event_log) + return 0; + /* initialize event_log */ + ioc->event_context = 0; + ioc->aen_event_read_flag = 0; + ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE, + sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL); + if (!ioc->event_log) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -ENOMEM; + } + return 0; +} + +/** + * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_eventreport karg; + u32 number_bytes, max_events, max; + struct mpt3_ioctl_eventreport __user *uarg = arg; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, + __func__)); + + number_bytes = karg.hdr.max_data_size - + sizeof(struct mpt3_ioctl_header); + max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS); + max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events); + + /* If fewer than 1 event is requested, there must have + * been some type of error. + */ + if (!max || !ioc->event_log) + return -ENODATA; + + number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS); + if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + /* reset flag so SIGIO can restart */ + ioc->aen_event_read_flag = 0; + return 0; +} + +/** + * _ctl_do_reset - main handler for MPT3HARDRESET opcode + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_diag_reset karg; + int retval; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + if (ioc->shost_recovery || ioc->pci_error_recovery || + ioc->is_driver_loading) + return -EAGAIN; + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name, + __func__)); + + retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + pr_info(MPT3SAS_FMT "host reset: %s\n", + ioc->name, ((!retval) ? "SUCCESS" : "FAILED")); + return 0; +} + +/** + * _ctl_btdh_search_sas_device - searching for sas device + * @ioc: per adapter object + * @btdh: btdh ioctl payload + */ +static int +_ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_ioctl_btdh_mapping *btdh) +{ + struct _sas_device *sas_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->sas_device_list)) + return rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == sas_device->handle) { + btdh->bus = sas_device->channel; + btdh->id = sas_device->id; + rc = 1; + goto out; + } else if (btdh->bus == sas_device->channel && btdh->id == + sas_device->id && btdh->handle == 0xFFFF) { + btdh->handle = sas_device->handle; + rc = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/** + * _ctl_btdh_search_raid_device - searching for raid device + * @ioc: per adapter object + * @btdh: btdh ioctl payload + */ +static int +_ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_ioctl_btdh_mapping *btdh) +{ + struct _raid_device *raid_device; + unsigned long flags; + int rc = 0; + + if (list_empty(&ioc->raid_device_list)) + return rc; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF && + btdh->handle == raid_device->handle) { + btdh->bus = raid_device->channel; + btdh->id = raid_device->id; + rc = 1; + goto out; + } else if (btdh->bus == raid_device->channel && btdh->id == + raid_device->id && btdh->handle == 0xFFFF) { + btdh->handle = raid_device->handle; + rc = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return rc; +} + +/** + * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_ioctl_btdh_mapping karg; + int rc; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + rc = _ctl_btdh_search_sas_device(ioc, &karg); + if (!rc) + _ctl_btdh_search_raid_device(ioc, &karg); + + if (copy_to_user(arg, &karg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + return 0; +} + +/** + * _ctl_diag_capability - return diag buffer capability + * @ioc: per adapter object + * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED + * + * returns 1 when diag buffer support is enabled in firmware + */ +static u8 +_ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type) +{ + u8 rc = 0; + + switch (buffer_type) { + case MPI2_DIAG_BUF_TYPE_TRACE: + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) + rc = 1; + break; + case MPI2_DIAG_BUF_TYPE_SNAPSHOT: + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) + rc = 1; + break; + case MPI2_DIAG_BUF_TYPE_EXTENDED: + if (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) + rc = 1; + } + + return rc; +} + + +/** + * _ctl_diag_register_2 - wrapper for registering diag buffer support + * @ioc: per adapter object + * @diag_register: the diag_register struct passed in from user space + * + */ +static long +_ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, + struct mpt3_diag_register *diag_register) +{ + int rc, i; + void *request_data = NULL; + dma_addr_t request_data_dma; + u32 request_data_sz = 0; + Mpi2DiagBufferPostRequest_t *mpi_request; + Mpi2DiagBufferPostReply_t *mpi_reply; + u8 buffer_type; + unsigned long timeleft; + u16 smid; + u16 ioc_status; + u32 ioc_state; + u8 issue_reset = 0; + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + buffer_type = diag_register->buffer_type; + if (!_ctl_diag_capability(ioc, buffer_type)) { + pr_err(MPT3SAS_FMT + "%s: doesn't have capability for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -EPERM; + } + + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) { + pr_err(MPT3SAS_FMT + "%s: already has a registered buffer for buffer_type(0x%02x)\n", + ioc->name, __func__, + buffer_type); + return -EINVAL; + } + + if (diag_register->requested_buffer_size % 4) { + pr_err(MPT3SAS_FMT + "%s: the requested_buffer_size is not 4 byte aligned\n", + ioc->name, __func__); + return -EINVAL; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->ctl_cmds.smid = smid; + + request_data = ioc->diag_buffer[buffer_type]; + request_data_sz = diag_register->requested_buffer_size; + ioc->unique_id[buffer_type] = diag_register->unique_id; + ioc->diag_buffer_status[buffer_type] = 0; + memcpy(ioc->product_specific[buffer_type], + diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); + ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; + + if (request_data) { + request_data_dma = ioc->diag_buffer_dma[buffer_type]; + if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) { + pci_free_consistent(ioc->pdev, + ioc->diag_buffer_sz[buffer_type], + request_data, request_data_dma); + request_data = NULL; + } + } + + if (request_data == NULL) { + ioc->diag_buffer_sz[buffer_type] = 0; + ioc->diag_buffer_dma[buffer_type] = 0; + request_data = pci_alloc_consistent( + ioc->pdev, request_data_sz, &request_data_dma); + if (request_data == NULL) { + pr_err(MPT3SAS_FMT "%s: failed allocating memory" \ + " for diag buffers, requested size(%d)\n", + ioc->name, __func__, request_data_sz); + mpt3sas_base_free_smid(ioc, smid); + return -ENOMEM; + } + ioc->diag_buffer[buffer_type] = request_data; + ioc->diag_buffer_sz[buffer_type] = request_data_sz; + ioc->diag_buffer_dma[buffer_type] = request_data_dma; + } + + mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; + mpi_request->BufferType = diag_register->buffer_type; + mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags); + mpi_request->BufferAddress = cpu_to_le64(request_data_dma); + mpi_request->BufferLength = cpu_to_le32(request_data_sz); + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n", + ioc->name, __func__, request_data, + (unsigned long long)request_data_dma, + le32_to_cpu(mpi_request->BufferLength))); + + for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) + mpi_request->ProductSpecific[i] = + cpu_to_le32(ioc->product_specific[buffer_type][i]); + + init_completion(&ioc->ctl_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, + MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); + + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, + __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4); + if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + /* process the completed Reply Message Frame */ + if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + pr_err(MPT3SAS_FMT "%s: no reply message\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_REGISTERED; + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", + ioc->name, __func__)); + } else { + pr_info(MPT3SAS_FMT + "%s: ioc_status(0x%04x) log_info(0x%08x)\n", + ioc->name, __func__, + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); + rc = -EFAULT; + } + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + + out: + + if (rc && request_data) + pci_free_consistent(ioc->pdev, request_data_sz, + request_data, request_data_dma); + + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + +/** + * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time + * @ioc: per adapter object + * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1 + * + * This is called when command line option diag_buffer_enable is enabled + * at driver load time. + */ +void +mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) +{ + struct mpt3_diag_register diag_register; + + memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); + + if (bits_to_register & 1) { + pr_info(MPT3SAS_FMT "registering trace buffer support\n", + ioc->name); + ioc->diag_trigger_master.MasterData = + (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + diag_register.unique_id = 0x7075900; + _ctl_diag_register_2(ioc, &diag_register); + } + + if (bits_to_register & 2) { + pr_info(MPT3SAS_FMT "registering snapshot buffer support\n", + ioc->name); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT; + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + diag_register.unique_id = 0x7075901; + _ctl_diag_register_2(ioc, &diag_register); + } + + if (bits_to_register & 4) { + pr_info(MPT3SAS_FMT "registering extended buffer support\n", + ioc->name); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED; + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + diag_register.unique_id = 0x7075901; + _ctl_diag_register_2(ioc, &diag_register); + } +} + +/** + * _ctl_diag_register - application register with driver + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + * + * This will allow the driver to setup any required buffers that will be + * needed by firmware to communicate with the driver. + */ +static long +_ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_register karg; + long rc; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + rc = _ctl_diag_register_2(ioc, &karg); + return rc; +} + +/** + * _ctl_diag_unregister - application unregister with driver + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + * + * This will allow the driver to cleanup any memory allocated for diag + * messages and to free up any resources. + */ +static long +_ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_unregister karg; + void *request_data; + dma_addr_t request_data_dma; + u32 request_data_sz; + u8 buffer_type; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + buffer_type = karg.unique_id & 0x000000ff; + if (!_ctl_diag_capability(ioc, buffer_type)) { + pr_err(MPT3SAS_FMT + "%s: doesn't have capability for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -EPERM; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + pr_err(MPT3SAS_FMT + "%s: buffer_type(0x%02x) is not registered\n", + ioc->name, __func__, buffer_type); + return -EINVAL; + } + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + pr_err(MPT3SAS_FMT + "%s: buffer_type(0x%02x) has not been released\n", + ioc->name, __func__, buffer_type); + return -EINVAL; + } + + if (karg.unique_id != ioc->unique_id[buffer_type]) { + pr_err(MPT3SAS_FMT + "%s: unique_id(0x%08x) is not registered\n", + ioc->name, __func__, karg.unique_id); + return -EINVAL; + } + + request_data = ioc->diag_buffer[buffer_type]; + if (!request_data) { + pr_err(MPT3SAS_FMT + "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -ENOMEM; + } + + request_data_sz = ioc->diag_buffer_sz[buffer_type]; + request_data_dma = ioc->diag_buffer_dma[buffer_type]; + pci_free_consistent(ioc->pdev, request_data_sz, + request_data, request_data_dma); + ioc->diag_buffer[buffer_type] = NULL; + ioc->diag_buffer_status[buffer_type] = 0; + return 0; +} + +/** + * _ctl_diag_query - query relevant info associated with diag buffers + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + * + * The application will send only buffer_type and unique_id. Driver will + * inspect unique_id first, if valid, fill in all the info. If unique_id is + * 0x00, the driver will return info specified by Buffer Type. + */ +static long +_ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_query karg; + void *request_data; + int i; + u8 buffer_type; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + karg.application_flags = 0; + buffer_type = karg.buffer_type; + + if (!_ctl_diag_capability(ioc, buffer_type)) { + pr_err(MPT3SAS_FMT + "%s: doesn't have capability for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -EPERM; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + pr_err(MPT3SAS_FMT + "%s: buffer_type(0x%02x) is not registered\n", + ioc->name, __func__, buffer_type); + return -EINVAL; + } + + if (karg.unique_id & 0xffffff00) { + if (karg.unique_id != ioc->unique_id[buffer_type]) { + pr_err(MPT3SAS_FMT + "%s: unique_id(0x%08x) is not registered\n", + ioc->name, __func__, karg.unique_id); + return -EINVAL; + } + } + + request_data = ioc->diag_buffer[buffer_type]; + if (!request_data) { + pr_err(MPT3SAS_FMT + "%s: doesn't have buffer for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -ENOMEM; + } + + if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) + karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | + MPT3_APP_FLAGS_BUFFER_VALID); + else + karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | + MPT3_APP_FLAGS_BUFFER_VALID | + MPT3_APP_FLAGS_FW_BUFFER_ACCESS); + + for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) + karg.product_specific[i] = + ioc->product_specific[buffer_type][i]; + + karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type]; + karg.driver_added_buffer_size = 0; + karg.unique_id = ioc->unique_id[buffer_type]; + karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type]; + + if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) { + pr_err(MPT3SAS_FMT + "%s: unable to write mpt3_diag_query data @ %p\n", + ioc->name, __func__, arg); + return -EFAULT; + } + return 0; +} + +/** + * mpt3sas_send_diag_release - Diag Release Message + * @ioc: per adapter object + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED + * @issue_reset - specifies whether host reset is required. + * + */ +int +mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, + u8 *issue_reset) +{ + Mpi2DiagReleaseRequest_t *mpi_request; + Mpi2DiagReleaseReply_t *mpi_reply; + u16 smid; + u16 ioc_status; + u32 ioc_state; + int rc; + unsigned long timeleft; + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + rc = 0; + *issue_reset = 0; + + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_RELEASED; + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: skipping due to FAULT state\n", ioc->name, + __func__)); + rc = -EAGAIN; + goto out; + } + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->ctl_cmds.smid = smid; + + mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE; + mpi_request->BufferType = buffer_type; + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + init_completion(&ioc->ctl_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, + MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); + + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, + __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2DiagReleaseRequest_t)/4); + if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) + *issue_reset = 1; + rc = -EFAULT; + goto out; + } + + /* process the completed Reply Message Frame */ + if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + pr_err(MPT3SAS_FMT "%s: no reply message\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_RELEASED; + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", + ioc->name, __func__)); + } else { + pr_info(MPT3SAS_FMT + "%s: ioc_status(0x%04x) log_info(0x%08x)\n", + ioc->name, __func__, + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); + rc = -EFAULT; + } + + out: + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + +/** + * _ctl_diag_release - request to send Diag Release Message to firmware + * @arg - user space buffer containing ioctl content + * + * This allows ownership of the specified buffer to returned to the driver, + * allowing an application to read the buffer without fear that firmware is + * overwritting information in the buffer. + */ +static long +_ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_release karg; + void *request_data; + int rc; + u8 buffer_type; + u8 issue_reset = 0; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + buffer_type = karg.unique_id & 0x000000ff; + if (!_ctl_diag_capability(ioc, buffer_type)) { + pr_err(MPT3SAS_FMT + "%s: doesn't have capability for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -EPERM; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + pr_err(MPT3SAS_FMT + "%s: buffer_type(0x%02x) is not registered\n", + ioc->name, __func__, buffer_type); + return -EINVAL; + } + + if (karg.unique_id != ioc->unique_id[buffer_type]) { + pr_err(MPT3SAS_FMT + "%s: unique_id(0x%08x) is not registered\n", + ioc->name, __func__, karg.unique_id); + return -EINVAL; + } + + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + pr_err(MPT3SAS_FMT + "%s: buffer_type(0x%02x) is already released\n", + ioc->name, __func__, + buffer_type); + return 0; + } + + request_data = ioc->diag_buffer[buffer_type]; + + if (!request_data) { + pr_err(MPT3SAS_FMT + "%s: doesn't have memory allocated for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -ENOMEM; + } + + /* buffers were released by due to host reset */ + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DIAG_RESET)) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_RELEASED; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_DIAG_RESET; + pr_err(MPT3SAS_FMT + "%s: buffer_type(0x%02x) was released due to host reset\n", + ioc->name, __func__, buffer_type); + return 0; + } + + rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset); + + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + + return rc; +} + +/** + * _ctl_diag_read_buffer - request for copy of the diag buffer + * @ioc: per adapter object + * @arg - user space buffer containing ioctl content + */ +static long +_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) +{ + struct mpt3_diag_read_buffer karg; + struct mpt3_diag_read_buffer __user *uarg = arg; + void *request_data, *diag_data; + Mpi2DiagBufferPostRequest_t *mpi_request; + Mpi2DiagBufferPostReply_t *mpi_reply; + int rc, i; + u8 buffer_type; + unsigned long timeleft, request_size, copy_size; + u16 smid; + u16 ioc_status; + u8 issue_reset = 0; + + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name, + __func__)); + + buffer_type = karg.unique_id & 0x000000ff; + if (!_ctl_diag_capability(ioc, buffer_type)) { + pr_err(MPT3SAS_FMT + "%s: doesn't have capability for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -EPERM; + } + + if (karg.unique_id != ioc->unique_id[buffer_type]) { + pr_err(MPT3SAS_FMT + "%s: unique_id(0x%08x) is not registered\n", + ioc->name, __func__, karg.unique_id); + return -EINVAL; + } + + request_data = ioc->diag_buffer[buffer_type]; + if (!request_data) { + pr_err(MPT3SAS_FMT + "%s: doesn't have buffer for buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type); + return -ENOMEM; + } + + request_size = ioc->diag_buffer_sz[buffer_type]; + + if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) { + pr_err(MPT3SAS_FMT "%s: either the starting_offset " \ + "or bytes_to_read are not 4 byte aligned\n", ioc->name, + __func__); + return -EINVAL; + } + + if (karg.starting_offset > request_size) + return -EINVAL; + + diag_data = (void *)(request_data + karg.starting_offset); + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: diag_buffer(%p), offset(%d), sz(%d)\n", + ioc->name, __func__, + diag_data, karg.starting_offset, karg.bytes_to_read)); + + /* Truncate data on requests that are too large */ + if ((diag_data + karg.bytes_to_read < diag_data) || + (diag_data + karg.bytes_to_read > request_data + request_size)) + copy_size = request_size - karg.starting_offset; + else + copy_size = karg.bytes_to_read; + + if (copy_to_user((void __user *)uarg->diagnostic_data, + diag_data, copy_size)) { + pr_err(MPT3SAS_FMT + "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n", + ioc->name, __func__, diag_data); + return -EFAULT; + } + + if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0) + return 0; + + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: Reregister buffer_type(0x%02x)\n", + ioc->name, __func__, buffer_type)); + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "%s: buffer_type(0x%02x) is still registered\n", + ioc->name, __func__, buffer_type)); + return 0; + } + /* Get a free request frame and save the message context. + */ + + if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + ioc->ctl_cmds.status = MPT3_CMD_PENDING; + memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->ctl_cmds.smid = smid; + + mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST; + mpi_request->BufferType = buffer_type; + mpi_request->BufferLength = + cpu_to_le32(ioc->diag_buffer_sz[buffer_type]); + mpi_request->BufferAddress = + cpu_to_le64(ioc->diag_buffer_dma[buffer_type]); + for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) + mpi_request->ProductSpecific[i] = + cpu_to_le32(ioc->product_specific[buffer_type][i]); + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + + init_completion(&ioc->ctl_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done, + MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); + + if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name, + __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4); + if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + /* process the completed Reply Message Frame */ + if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) { + pr_err(MPT3SAS_FMT "%s: no reply message\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + + mpi_reply = ioc->ctl_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK; + + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + ioc->diag_buffer_status[buffer_type] |= + MPT3_DIAG_BUFFER_IS_REGISTERED; + dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n", + ioc->name, __func__)); + } else { + pr_info(MPT3SAS_FMT + "%s: ioc_status(0x%04x) log_info(0x%08x)\n", + ioc->name, __func__, + ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo)); + rc = -EFAULT; + } + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + + out: + + ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; + return rc; +} + + + +#ifdef CONFIG_COMPAT +/** + * _ctl_compat_mpt_command - convert 32bit pointers to 64bit. + * @ioc: per adapter object + * @cmd - ioctl opcode + * @arg - (struct mpt3_ioctl_command32) + * + * MPT3COMMAND32 - Handle 32bit applications running on 64bit os. + */ +static long +_ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd, + void __user *arg) +{ + struct mpt3_ioctl_command32 karg32; + struct mpt3_ioctl_command32 __user *uarg; + struct mpt3_ioctl_command karg; + + if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32)) + return -EINVAL; + + uarg = (struct mpt3_ioctl_command32 __user *) arg; + + if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + memset(&karg, 0, sizeof(struct mpt3_ioctl_command)); + karg.hdr.ioc_number = karg32.hdr.ioc_number; + karg.hdr.port_number = karg32.hdr.port_number; + karg.hdr.max_data_size = karg32.hdr.max_data_size; + karg.timeout = karg32.timeout; + karg.max_reply_bytes = karg32.max_reply_bytes; + karg.data_in_size = karg32.data_in_size; + karg.data_out_size = karg32.data_out_size; + karg.max_sense_bytes = karg32.max_sense_bytes; + karg.data_sge_offset = karg32.data_sge_offset; + karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr); + karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr); + karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr); + karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr); + return _ctl_do_mpt_command(ioc, karg, &uarg->mf); +} +#endif + +/** + * _ctl_ioctl_main - main ioctl entry point + * @file - (struct file) + * @cmd - ioctl opcode + * @arg - user space data buffer + * @compat - handles 32 bit applications in 64bit os + * @mpi_version: will be MPI2_VERSION for mpt2ctl ioctl device & + * MPI25_VERSION for mpt3ctl ioctl device. + */ +static long +_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg, + u8 compat, u16 mpi_version) +{ + struct MPT3SAS_ADAPTER *ioc; + struct mpt3_ioctl_header ioctl_header; + enum block_state state; + long ret = -EINVAL; + + /* get IOCTL header */ + if (copy_from_user(&ioctl_header, (char __user *)arg, + sizeof(struct mpt3_ioctl_header))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + return -EFAULT; + } + + if (_ctl_verify_adapter(ioctl_header.ioc_number, + &ioc, mpi_version) == -1 || !ioc) + return -ENODEV; + + /* pci_access_mutex lock acquired by ioctl path */ + mutex_lock(&ioc->pci_access_mutex); + + if (ioc->shost_recovery || ioc->pci_error_recovery || + ioc->is_driver_loading || ioc->remove_host) { + ret = -EAGAIN; + goto out_unlock_pciaccess; + } + + state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING; + if (state == NON_BLOCKING) { + if (!mutex_trylock(&ioc->ctl_cmds.mutex)) { + ret = -EAGAIN; + goto out_unlock_pciaccess; + } + } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex)) { + ret = -ERESTARTSYS; + goto out_unlock_pciaccess; + } + + + switch (cmd) { + case MPT3IOCINFO: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo)) + ret = _ctl_getiocinfo(ioc, arg); + break; +#ifdef CONFIG_COMPAT + case MPT3COMMAND32: +#endif + case MPT3COMMAND: + { + struct mpt3_ioctl_command __user *uarg; + struct mpt3_ioctl_command karg; + +#ifdef CONFIG_COMPAT + if (compat) { + ret = _ctl_compat_mpt_command(ioc, cmd, arg); + break; + } +#endif + if (copy_from_user(&karg, arg, sizeof(karg))) { + pr_err("failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + ret = -EFAULT; + break; + } + + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) { + uarg = arg; + ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf); + } + break; + } + case MPT3EVENTQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery)) + ret = _ctl_eventquery(ioc, arg); + break; + case MPT3EVENTENABLE: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable)) + ret = _ctl_eventenable(ioc, arg); + break; + case MPT3EVENTREPORT: + ret = _ctl_eventreport(ioc, arg); + break; + case MPT3HARDRESET: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset)) + ret = _ctl_do_reset(ioc, arg); + break; + case MPT3BTDHMAPPING: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping)) + ret = _ctl_btdh_mapping(ioc, arg); + break; + case MPT3DIAGREGISTER: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register)) + ret = _ctl_diag_register(ioc, arg); + break; + case MPT3DIAGUNREGISTER: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister)) + ret = _ctl_diag_unregister(ioc, arg); + break; + case MPT3DIAGQUERY: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query)) + ret = _ctl_diag_query(ioc, arg); + break; + case MPT3DIAGRELEASE: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release)) + ret = _ctl_diag_release(ioc, arg); + break; + case MPT3DIAGREADBUFFER: + if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer)) + ret = _ctl_diag_read_buffer(ioc, arg); + break; + default: + dctlprintk(ioc, pr_info(MPT3SAS_FMT + "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd)); + break; + } + + mutex_unlock(&ioc->ctl_cmds.mutex); +out_unlock_pciaccess: + mutex_unlock(&ioc->pci_access_mutex); + return ret; +} + +/** + * _ctl_ioctl - mpt3ctl main ioctl entry point (unlocked) + * @file - (struct file) + * @cmd - ioctl opcode + * @arg - + */ +long +_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + /* pass MPI25_VERSION value, to indicate that this ioctl cmd + * came from mpt3ctl ioctl device. + */ + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI25_VERSION); + return ret; +} + +/** + * _ctl_mpt2_ioctl - mpt2ctl main ioctl entry point (unlocked) + * @file - (struct file) + * @cmd - ioctl opcode + * @arg - + */ +long +_ctl_mpt2_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + long ret; + + /* pass MPI2_VERSION value, to indicate that this ioctl cmd + * came from mpt2ctl ioctl device. + */ + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0, MPI2_VERSION); + return ret; +} +#ifdef CONFIG_COMPAT +/** + *_ ctl_ioctl_compat - main ioctl entry point (compat) + * @file - + * @cmd - + * @arg - + * + * This routine handles 32 bit applications in 64bit os. + */ +long +_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) +{ + long ret; + + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI25_VERSION); + return ret; +} + +/** + *_ ctl_mpt2_ioctl_compat - main ioctl entry point (compat) + * @file - + * @cmd - + * @arg - + * + * This routine handles 32 bit applications in 64bit os. + */ +long +_ctl_mpt2_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg) +{ + long ret; + + ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1, MPI2_VERSION); + return ret; +} +#endif + +/* scsi host attributes */ +/** + * _ctl_version_fw_show - firmware version + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_version_fw_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (ioc->facts.FWVersion.Word & 0xFF000000) >> 24, + (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16, + (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8, + ioc->facts.FWVersion.Word & 0x000000FF); +} +static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL); + +/** + * _ctl_version_bios_show - bios version + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_version_bios_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion); + + return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n", + (version & 0xFF000000) >> 24, + (version & 0x00FF0000) >> 16, + (version & 0x0000FF00) >> 8, + version & 0x000000FF); +} +static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL); + +/** + * _ctl_version_mpi_show - MPI (message passing interface) version + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%03x.%02x\n", + ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8); +} +static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL); + +/** + * _ctl_version_product_show - product name + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_version_product_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName); +} +static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL); + +/** + * _ctl_version_nvdata_persistent_show - ndvata persistent version + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_version_nvdata_persistent_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word)); +} +static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO, + _ctl_version_nvdata_persistent_show, NULL); + +/** + * _ctl_version_nvdata_default_show - nvdata default version + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute + *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", + le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word)); +} +static DEVICE_ATTR(version_nvdata_default, S_IRUGO, + _ctl_version_nvdata_default_show, NULL); + +/** + * _ctl_board_name_show - board name + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_board_name_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName); +} +static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL); + +/** + * _ctl_board_assembly_show - board assembly name + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly); +} +static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL); + +/** + * _ctl_board_tracer_show - board tracer number + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber); +} +static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL); + +/** + * _ctl_io_delay_show - io missing delay + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is for firmware implemention for deboucing device + * removal events. + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_io_delay_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay); +} +static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL); + +/** + * _ctl_device_delay_show - device missing delay + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is for firmware implemention for deboucing device + * removal events. + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_device_delay_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay); +} +static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL); + +/** + * _ctl_fw_queue_depth_show - global credits + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is firmware queue depth limit + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit); +} +static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL); + +/** + * _ctl_sas_address_show - sas address + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is the controller sas address + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr, + char *buf) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)ioc->sas_hba.sas_address); +} +static DEVICE_ATTR(host_sas_address, S_IRUGO, + _ctl_host_sas_address_show, NULL); + +/** + * _ctl_logging_level_show - logging level + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_logging_level_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level); +} +static ssize_t +_ctl_logging_level_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int val = 0; + + if (sscanf(buf, "%x", &val) != 1) + return -EINVAL; + + ioc->logging_level = val; + pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name, + ioc->logging_level); + return strlen(buf); +} +static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show, + _ctl_logging_level_store); + +/** + * _ctl_fwfault_debug_show - show/store fwfault_debug + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * mpt3sas_fwfault_debug is command line option + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug); +} +static ssize_t +_ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int val = 0; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + ioc->fwfault_debug = val; + pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name, + ioc->fwfault_debug); + return strlen(buf); +} +static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR, + _ctl_fwfault_debug_show, _ctl_fwfault_debug_store); + +/** + * _ctl_ioc_reset_count_show - ioc reset count + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is firmware queue depth limit + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count); +} +static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL); + +/** + * _ctl_ioc_reply_queue_count_show - number of reply queues + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is number of reply queues + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_ioc_reply_queue_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + u8 reply_queue_count; + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + if ((ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) + reply_queue_count = ioc->reply_queue_count; + else + reply_queue_count = 1; + + return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); +} +static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show, + NULL); + +/** + * _ctl_BRM_status_show - Backup Rail Monitor Status + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is number of reply queues + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_BRM_status_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + Mpi2IOUnitPage3_t *io_unit_pg3 = NULL; + Mpi2ConfigReply_t mpi_reply; + u16 backup_rail_monitor_status = 0; + u16 ioc_status; + int sz; + ssize_t rc = 0; + + if (!ioc->is_warpdrive) { + pr_err(MPT3SAS_FMT "%s: BRM attribute is only for" + " warpdrive\n", ioc->name, __func__); + goto out; + } + /* pci_access_mutex lock acquired by sysfs show path */ + mutex_lock(&ioc->pci_access_mutex); + if (ioc->pci_error_recovery || ioc->remove_host) { + mutex_unlock(&ioc->pci_access_mutex); + return 0; + } + + /* allocate upto GPIOVal 36 entries */ + sz = offsetof(Mpi2IOUnitPage3_t, GPIOVal) + (sizeof(u16) * 36); + io_unit_pg3 = kzalloc(sz, GFP_KERNEL); + if (!io_unit_pg3) { + pr_err(MPT3SAS_FMT "%s: failed allocating memory " + "for iounit_pg3: (%d) bytes\n", ioc->name, __func__, sz); + goto out; + } + + if (mpt3sas_config_get_iounit_pg3(ioc, &mpi_reply, io_unit_pg3, sz) != + 0) { + pr_err(MPT3SAS_FMT + "%s: failed reading iounit_pg3\n", ioc->name, + __func__); + goto out; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "%s: iounit_pg3 failed with " + "ioc_status(0x%04x)\n", ioc->name, __func__, ioc_status); + goto out; + } + + if (io_unit_pg3->GPIOCount < 25) { + pr_err(MPT3SAS_FMT "%s: iounit_pg3->GPIOCount less than " + "25 entries, detected (%d) entries\n", ioc->name, __func__, + io_unit_pg3->GPIOCount); + goto out; + } + + /* BRM status is in bit zero of GPIOVal[24] */ + backup_rail_monitor_status = le16_to_cpu(io_unit_pg3->GPIOVal[24]); + rc = snprintf(buf, PAGE_SIZE, "%d\n", (backup_rail_monitor_status & 1)); + + out: + kfree(io_unit_pg3); + mutex_unlock(&ioc->pci_access_mutex); + return rc; +} +static DEVICE_ATTR(BRM_status, S_IRUGO, _ctl_BRM_status_show, NULL); + +struct DIAG_BUFFER_START { + __le32 Size; + __le32 DiagVersion; + u8 BufferType; + u8 Reserved[3]; + __le32 Reserved1; + __le32 Reserved2; + __le32 Reserved3; +}; + +/** + * _ctl_host_trace_buffer_size_show - host buffer size (trace only) + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_host_trace_buffer_size_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + u32 size = 0; + struct DIAG_BUFFER_START *request_data; + + if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { + pr_err(MPT3SAS_FMT + "%s: host_trace_buffer is not registered\n", + ioc->name, __func__); + return 0; + } + + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + pr_err(MPT3SAS_FMT + "%s: host_trace_buffer is not registered\n", + ioc->name, __func__); + return 0; + } + + request_data = (struct DIAG_BUFFER_START *) + ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]; + if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 || + le32_to_cpu(request_data->DiagVersion) == 0x01000000 || + le32_to_cpu(request_data->DiagVersion) == 0x01010000) && + le32_to_cpu(request_data->Reserved3) == 0x4742444c) + size = le32_to_cpu(request_data->Size); + + ioc->ring_buffer_sz = size; + return snprintf(buf, PAGE_SIZE, "%d\n", size); +} +static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO, + _ctl_host_trace_buffer_size_show, NULL); + +/** + * _ctl_host_trace_buffer_show - firmware ring buffer (trace only) + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + * + * You will only be able to read 4k bytes of ring buffer at a time. + * In order to read beyond 4k bytes, you will have to write out the + * offset to the same attribute, it will move the pointer. + */ +static ssize_t +_ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + void *request_data; + u32 size; + + if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) { + pr_err(MPT3SAS_FMT + "%s: host_trace_buffer is not registered\n", + ioc->name, __func__); + return 0; + } + + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + pr_err(MPT3SAS_FMT + "%s: host_trace_buffer is not registered\n", + ioc->name, __func__); + return 0; + } + + if (ioc->ring_buffer_offset > ioc->ring_buffer_sz) + return 0; + + size = ioc->ring_buffer_sz - ioc->ring_buffer_offset; + size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size; + request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset; + memcpy(buf, request_data, size); + return size; +} + +static ssize_t +_ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int val = 0; + + if (sscanf(buf, "%d", &val) != 1) + return -EINVAL; + + ioc->ring_buffer_offset = val; + return strlen(buf); +} +static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR, + _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store); + + +/*****************************************/ + +/** + * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only) + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + * + * This is a mechnism to post/release host_trace_buffers + */ +static ssize_t +_ctl_host_trace_buffer_enable_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) || + ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)) + return snprintf(buf, PAGE_SIZE, "off\n"); + else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + return snprintf(buf, PAGE_SIZE, "release\n"); + else + return snprintf(buf, PAGE_SIZE, "post\n"); +} + +static ssize_t +_ctl_host_trace_buffer_enable_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + char str[10] = ""; + struct mpt3_diag_register diag_register; + u8 issue_reset = 0; + + /* don't allow post/release occurr while recovery is active */ + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery || ioc->is_driver_loading) + return -EBUSY; + + if (sscanf(buf, "%9s", str) != 1) + return -EINVAL; + + if (!strcmp(str, "post")) { + /* exit out if host buffers are already posted */ + if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) && + (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) && + ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0)) + goto out; + memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); + pr_info(MPT3SAS_FMT "posting host trace buffers\n", + ioc->name); + diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; + diag_register.requested_buffer_size = (1024 * 1024); + diag_register.unique_id = 0x7075900; + ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; + _ctl_diag_register_2(ioc, &diag_register); + } else if (!strcmp(str, "release")) { + /* exit out if host buffers are already released */ + if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) + goto out; + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) + goto out; + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + goto out; + pr_info(MPT3SAS_FMT "releasing host trace buffer\n", + ioc->name); + mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, + &issue_reset); + } + + out: + return strlen(buf); +} +static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR, + _ctl_host_trace_buffer_enable_show, + _ctl_host_trace_buffer_enable_store); + +/*********** diagnostic trigger suppport *********************************/ + +/** + * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_master_show(struct device *cdev, + struct device_attribute *attr, char *buf) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_MASTER_TRIGGER_T); + memcpy(buf, &ioc->diag_trigger_master, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_master_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count); + memset(&ioc->diag_trigger_master, 0, + sizeof(struct SL_WH_MASTER_TRIGGER_T)); + memcpy(&ioc->diag_trigger_master, buf, rc); + ioc->diag_trigger_master.MasterData |= + (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} +static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store); + + +/** + * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_event_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T); + memcpy(buf, &ioc->diag_trigger_event, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_event_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) + +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t sz; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count); + memset(&ioc->diag_trigger_event, 0, + sizeof(struct SL_WH_EVENT_TRIGGERS_T)); + memcpy(&ioc->diag_trigger_event, buf, sz); + if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES) + ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; +} +static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store); + + +/** + * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_scsi_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T); + memcpy(buf, &ioc->diag_trigger_scsi, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_scsi_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t sz; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); + memset(&ioc->diag_trigger_scsi, 0, + sizeof(struct SL_WH_EVENT_TRIGGERS_T)); + memcpy(&ioc->diag_trigger_scsi, buf, sz); + if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) + ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; +} +static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store); + + +/** + * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_mpi_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t rc; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + rc = sizeof(struct SL_WH_MPI_TRIGGERS_T); + memcpy(buf, &ioc->diag_trigger_mpi, rc); + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return rc; +} + +/** + * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read/write' shost attribute. + */ +static ssize_t +_ctl_diag_trigger_mpi_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + unsigned long flags; + ssize_t sz; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count); + memset(&ioc->diag_trigger_mpi, 0, + sizeof(ioc->diag_trigger_mpi)); + memcpy(&ioc->diag_trigger_mpi, buf, sz); + if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES) + ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return sz; +} + +static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR, + _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store); + +/*********** diagnostic trigger suppport *** END ****************************/ + + + +/*****************************************/ + +struct device_attribute *mpt3sas_host_attrs[] = { + &dev_attr_version_fw, + &dev_attr_version_bios, + &dev_attr_version_mpi, + &dev_attr_version_product, + &dev_attr_version_nvdata_persistent, + &dev_attr_version_nvdata_default, + &dev_attr_board_name, + &dev_attr_board_assembly, + &dev_attr_board_tracer, + &dev_attr_io_delay, + &dev_attr_device_delay, + &dev_attr_logging_level, + &dev_attr_fwfault_debug, + &dev_attr_fw_queue_depth, + &dev_attr_host_sas_address, + &dev_attr_ioc_reset_count, + &dev_attr_host_trace_buffer_size, + &dev_attr_host_trace_buffer, + &dev_attr_host_trace_buffer_enable, + &dev_attr_reply_queue_count, + &dev_attr_diag_trigger_master, + &dev_attr_diag_trigger_event, + &dev_attr_diag_trigger_scsi, + &dev_attr_diag_trigger_mpi, + &dev_attr_BRM_status, + NULL, +}; + +/* device attributes */ + +/** + * _ctl_device_sas_address_show - sas address + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is the sas address for the target + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "0x%016llx\n", + (unsigned long long)sas_device_priv_data->sas_target->sas_address); +} +static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL); + +/** + * _ctl_device_handle_show - device handle + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is the firmware assigned device handle + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_device_handle_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata; + + return snprintf(buf, PAGE_SIZE, "0x%04x\n", + sas_device_priv_data->sas_target->handle); +} +static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL); + +struct device_attribute *mpt3sas_dev_attrs[] = { + &dev_attr_sas_address, + &dev_attr_sas_device_handle, + NULL, +}; + +/* file operations table for mpt3ctl device */ +static const struct file_operations ctl_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = _ctl_ioctl, + .poll = _ctl_poll, + .fasync = _ctl_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = _ctl_ioctl_compat, +#endif +}; + +/* file operations table for mpt2ctl device */ +static const struct file_operations ctl_gen2_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = _ctl_mpt2_ioctl, + .poll = _ctl_poll, + .fasync = _ctl_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = _ctl_mpt2_ioctl_compat, +#endif +}; + +static struct miscdevice ctl_dev = { + .minor = MPT3SAS_MINOR, + .name = MPT3SAS_DEV_NAME, + .fops = &ctl_fops, +}; + +static struct miscdevice gen2_ctl_dev = { + .minor = MPT2SAS_MINOR, + .name = MPT2SAS_DEV_NAME, + .fops = &ctl_gen2_fops, +}; + +/** + * mpt3sas_ctl_init - main entry point for ctl. + * + */ +void +mpt3sas_ctl_init(ushort hbas_to_enumerate) +{ + async_queue = NULL; + + /* Don't register mpt3ctl ioctl device if + * hbas_to_enumarate is one. + */ + if (hbas_to_enumerate != 1) + if (misc_register(&ctl_dev) < 0) + pr_err("%s can't register misc device [minor=%d]\n", + MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR); + + /* Don't register mpt3ctl ioctl device if + * hbas_to_enumarate is two. + */ + if (hbas_to_enumerate != 2) + if (misc_register(&gen2_ctl_dev) < 0) + pr_err("%s can't register misc device [minor=%d]\n", + MPT2SAS_DRIVER_NAME, MPT2SAS_MINOR); + + init_waitqueue_head(&ctl_poll_wait); +} + +/** + * mpt3sas_ctl_exit - exit point for ctl + * + */ +void +mpt3sas_ctl_exit(ushort hbas_to_enumerate) +{ + struct MPT3SAS_ADAPTER *ioc; + int i; + + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) { + + /* free memory associated to diag buffers */ + for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { + if (!ioc->diag_buffer[i]) + continue; + if (!(ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + continue; + if ((ioc->diag_buffer_status[i] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + continue; + pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i], + ioc->diag_buffer[i], ioc->diag_buffer_dma[i]); + ioc->diag_buffer[i] = NULL; + ioc->diag_buffer_status[i] = 0; + } + + kfree(ioc->event_log); + } + if (hbas_to_enumerate != 1) + misc_deregister(&ctl_dev); + if (hbas_to_enumerate != 2) + misc_deregister(&gen2_ctl_dev); +} diff --git a/addons/mpt3sas/src/4.4.180/mpt3sas_ctl.h b/addons/mpt3sas/src/4.4.180/mpt3sas_ctl.h new file mode 100644 index 00000000..89408356 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpt3sas_ctl.h @@ -0,0 +1,423 @@ +/* + * Management Module Support for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef MPT3SAS_CTL_H_INCLUDED +#define MPT3SAS_CTL_H_INCLUDED + +#ifdef __KERNEL__ +#include +#endif + +#ifndef MPT2SAS_MINOR +#define MPT2SAS_MINOR (MPT_MINOR + 1) +#endif +#ifndef MPT3SAS_MINOR +#define MPT3SAS_MINOR (MPT_MINOR + 2) +#endif +#define MPT2SAS_DEV_NAME "mpt2ctl" +#define MPT3SAS_DEV_NAME "mpt3ctl" +#define MPT3_MAGIC_NUMBER 'L' +#define MPT3_IOCTL_DEFAULT_TIMEOUT (10) /* in seconds */ + +/** + * IOCTL opcodes + */ +#define MPT3IOCINFO _IOWR(MPT3_MAGIC_NUMBER, 17, \ + struct mpt3_ioctl_iocinfo) +#define MPT3COMMAND _IOWR(MPT3_MAGIC_NUMBER, 20, \ + struct mpt3_ioctl_command) +#ifdef CONFIG_COMPAT +#define MPT3COMMAND32 _IOWR(MPT3_MAGIC_NUMBER, 20, \ + struct mpt3_ioctl_command32) +#endif +#define MPT3EVENTQUERY _IOWR(MPT3_MAGIC_NUMBER, 21, \ + struct mpt3_ioctl_eventquery) +#define MPT3EVENTENABLE _IOWR(MPT3_MAGIC_NUMBER, 22, \ + struct mpt3_ioctl_eventenable) +#define MPT3EVENTREPORT _IOWR(MPT3_MAGIC_NUMBER, 23, \ + struct mpt3_ioctl_eventreport) +#define MPT3HARDRESET _IOWR(MPT3_MAGIC_NUMBER, 24, \ + struct mpt3_ioctl_diag_reset) +#define MPT3BTDHMAPPING _IOWR(MPT3_MAGIC_NUMBER, 31, \ + struct mpt3_ioctl_btdh_mapping) + +/* diag buffer support */ +#define MPT3DIAGREGISTER _IOWR(MPT3_MAGIC_NUMBER, 26, \ + struct mpt3_diag_register) +#define MPT3DIAGRELEASE _IOWR(MPT3_MAGIC_NUMBER, 27, \ + struct mpt3_diag_release) +#define MPT3DIAGUNREGISTER _IOWR(MPT3_MAGIC_NUMBER, 28, \ + struct mpt3_diag_unregister) +#define MPT3DIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 29, \ + struct mpt3_diag_query) +#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \ + struct mpt3_diag_read_buffer) + +/** + * struct mpt3_ioctl_header - main header structure + * @ioc_number - IOC unit number + * @port_number - IOC port number + * @max_data_size - maximum number bytes to transfer on read + */ +struct mpt3_ioctl_header { + uint32_t ioc_number; + uint32_t port_number; + uint32_t max_data_size; +}; + +/** + * struct mpt3_ioctl_diag_reset - diagnostic reset + * @hdr - generic header + */ +struct mpt3_ioctl_diag_reset { + struct mpt3_ioctl_header hdr; +}; + + +/** + * struct mpt3_ioctl_pci_info - pci device info + * @device - pci device id + * @function - pci function id + * @bus - pci bus id + * @segment_id - pci segment id + */ +struct mpt3_ioctl_pci_info { + union { + struct { + uint32_t device:5; + uint32_t function:3; + uint32_t bus:24; + } bits; + uint32_t word; + } u; + uint32_t segment_id; +}; + + +#define MPT2_IOCTL_INTERFACE_SCSI (0x00) +#define MPT2_IOCTL_INTERFACE_FC (0x01) +#define MPT2_IOCTL_INTERFACE_FC_IP (0x02) +#define MPT2_IOCTL_INTERFACE_SAS (0x03) +#define MPT2_IOCTL_INTERFACE_SAS2 (0x04) +#define MPT2_IOCTL_INTERFACE_SAS2_SSS6200 (0x05) +#define MPT3_IOCTL_INTERFACE_SAS3 (0x06) +#define MPT2_IOCTL_VERSION_LENGTH (32) + +/** + * struct mpt3_ioctl_iocinfo - generic controller info + * @hdr - generic header + * @adapter_type - type of adapter (spi, fc, sas) + * @port_number - port number + * @pci_id - PCI Id + * @hw_rev - hardware revision + * @sub_system_device - PCI subsystem Device ID + * @sub_system_vendor - PCI subsystem Vendor ID + * @rsvd0 - reserved + * @firmware_version - firmware version + * @bios_version - BIOS version + * @driver_version - driver version - 32 ASCII characters + * @rsvd1 - reserved + * @scsi_id - scsi id of adapter 0 + * @rsvd2 - reserved + * @pci_information - pci info (2nd revision) + */ +struct mpt3_ioctl_iocinfo { + struct mpt3_ioctl_header hdr; + uint32_t adapter_type; + uint32_t port_number; + uint32_t pci_id; + uint32_t hw_rev; + uint32_t subsystem_device; + uint32_t subsystem_vendor; + uint32_t rsvd0; + uint32_t firmware_version; + uint32_t bios_version; + uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH]; + uint8_t rsvd1; + uint8_t scsi_id; + uint16_t rsvd2; + struct mpt3_ioctl_pci_info pci_information; +}; + + +/* number of event log entries */ +#define MPT3SAS_CTL_EVENT_LOG_SIZE (50) + +/** + * struct mpt3_ioctl_eventquery - query event count and type + * @hdr - generic header + * @event_entries - number of events returned by get_event_report + * @rsvd - reserved + * @event_types - type of events currently being captured + */ +struct mpt3_ioctl_eventquery { + struct mpt3_ioctl_header hdr; + uint16_t event_entries; + uint16_t rsvd; + uint32_t event_types[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; +}; + +/** + * struct mpt3_ioctl_eventenable - enable/disable event capturing + * @hdr - generic header + * @event_types - toggle off/on type of events to be captured + */ +struct mpt3_ioctl_eventenable { + struct mpt3_ioctl_header hdr; + uint32_t event_types[4]; +}; + +#define MPT3_EVENT_DATA_SIZE (192) +/** + * struct MPT3_IOCTL_EVENTS - + * @event - the event that was reported + * @context - unique value for each event assigned by driver + * @data - event data returned in fw reply message + */ +struct MPT3_IOCTL_EVENTS { + uint32_t event; + uint32_t context; + uint8_t data[MPT3_EVENT_DATA_SIZE]; +}; + +/** + * struct mpt3_ioctl_eventreport - returing event log + * @hdr - generic header + * @event_data - (see struct MPT3_IOCTL_EVENTS) + */ +struct mpt3_ioctl_eventreport { + struct mpt3_ioctl_header hdr; + struct MPT3_IOCTL_EVENTS event_data[1]; +}; + +/** + * struct mpt3_ioctl_command - generic mpt firmware passthru ioctl + * @hdr - generic header + * @timeout - command timeout in seconds. (if zero then use driver default + * value). + * @reply_frame_buf_ptr - reply location + * @data_in_buf_ptr - destination for read + * @data_out_buf_ptr - data source for write + * @sense_data_ptr - sense data location + * @max_reply_bytes - maximum number of reply bytes to be sent to app. + * @data_in_size - number bytes for data transfer in (read) + * @data_out_size - number bytes for data transfer out (write) + * @max_sense_bytes - maximum number of bytes for auto sense buffers + * @data_sge_offset - offset in words from the start of the request message to + * the first SGL + * @mf[1]; + */ +struct mpt3_ioctl_command { + struct mpt3_ioctl_header hdr; + uint32_t timeout; + void __user *reply_frame_buf_ptr; + void __user *data_in_buf_ptr; + void __user *data_out_buf_ptr; + void __user *sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[1]; +}; + +#ifdef CONFIG_COMPAT +struct mpt3_ioctl_command32 { + struct mpt3_ioctl_header hdr; + uint32_t timeout; + uint32_t reply_frame_buf_ptr; + uint32_t data_in_buf_ptr; + uint32_t data_out_buf_ptr; + uint32_t sense_data_ptr; + uint32_t max_reply_bytes; + uint32_t data_in_size; + uint32_t data_out_size; + uint32_t max_sense_bytes; + uint32_t data_sge_offset; + uint8_t mf[1]; +}; +#endif + +/** + * struct mpt3_ioctl_btdh_mapping - mapping info + * @hdr - generic header + * @id - target device identification number + * @bus - SCSI bus number that the target device exists on + * @handle - device handle for the target device + * @rsvd - reserved + * + * To obtain a bus/id the application sets + * handle to valid handle, and bus/id to 0xFFFF. + * + * To obtain the device handle the application sets + * bus/id valid value, and the handle to 0xFFFF. + */ +struct mpt3_ioctl_btdh_mapping { + struct mpt3_ioctl_header hdr; + uint32_t id; + uint32_t bus; + uint16_t handle; + uint16_t rsvd; +}; + + + +/* application flags for mpt3_diag_register, mpt3_diag_query */ +#define MPT3_APP_FLAGS_APP_OWNED (0x0001) +#define MPT3_APP_FLAGS_BUFFER_VALID (0x0002) +#define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004) + +/* flags for mpt3_diag_read_buffer */ +#define MPT3_FLAGS_REREGISTER (0x0001) + +#define MPT3_PRODUCT_SPECIFIC_DWORDS 23 + +/** + * struct mpt3_diag_register - application register with driver + * @hdr - generic header + * @reserved - + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED + * @application_flags - misc flags + * @diagnostic_flags - specifies flags affecting command processing + * @product_specific - product specific information + * @requested_buffer_size - buffers size in bytes + * @unique_id - tag specified by application that is used to signal ownership + * of the buffer. + * + * This will allow the driver to setup any required buffers that will be + * needed by firmware to communicate with the driver. + */ +struct mpt3_diag_register { + struct mpt3_ioctl_header hdr; + uint8_t reserved; + uint8_t buffer_type; + uint16_t application_flags; + uint32_t diagnostic_flags; + uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS]; + uint32_t requested_buffer_size; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_unregister - application unregister with driver + * @hdr - generic header + * @unique_id - tag uniquely identifies the buffer to be unregistered + * + * This will allow the driver to cleanup any memory allocated for diag + * messages and to free up any resources. + */ +struct mpt3_diag_unregister { + struct mpt3_ioctl_header hdr; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_query - query relevant info associated with diag buffers + * @hdr - generic header + * @reserved - + * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED + * @application_flags - misc flags + * @diagnostic_flags - specifies flags affecting command processing + * @product_specific - product specific information + * @total_buffer_size - diag buffer size in bytes + * @driver_added_buffer_size - size of extra space appended to end of buffer + * @unique_id - unique id associated with this buffer. + * + * The application will send only buffer_type and unique_id. Driver will + * inspect unique_id first, if valid, fill in all the info. If unique_id is + * 0x00, the driver will return info specified by Buffer Type. + */ +struct mpt3_diag_query { + struct mpt3_ioctl_header hdr; + uint8_t reserved; + uint8_t buffer_type; + uint16_t application_flags; + uint32_t diagnostic_flags; + uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS]; + uint32_t total_buffer_size; + uint32_t driver_added_buffer_size; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_release - request to send Diag Release Message to firmware + * @hdr - generic header + * @unique_id - tag uniquely identifies the buffer to be released + * + * This allows ownership of the specified buffer to returned to the driver, + * allowing an application to read the buffer without fear that firmware is + * overwritting information in the buffer. + */ +struct mpt3_diag_release { + struct mpt3_ioctl_header hdr; + uint32_t unique_id; +}; + +/** + * struct mpt3_diag_read_buffer - request for copy of the diag buffer + * @hdr - generic header + * @status - + * @reserved - + * @flags - misc flags + * @starting_offset - starting offset within drivers buffer where to start + * reading data at into the specified application buffer + * @bytes_to_read - number of bytes to copy from the drivers buffer into the + * application buffer starting at starting_offset. + * @unique_id - unique id associated with this buffer. + * @diagnostic_data - data payload + */ +struct mpt3_diag_read_buffer { + struct mpt3_ioctl_header hdr; + uint8_t status; + uint8_t reserved; + uint16_t flags; + uint32_t starting_offset; + uint32_t bytes_to_read; + uint32_t unique_id; + uint32_t diagnostic_data[1]; +}; + +#endif /* MPT3SAS_CTL_H_INCLUDED */ diff --git a/addons/mpt3sas/src/4.4.180/mpt3sas_debug.h b/addons/mpt3sas/src/4.4.180/mpt3sas_debug.h new file mode 100644 index 00000000..cceeb2c1 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpt3sas_debug.h @@ -0,0 +1,206 @@ +/* + * Logging Support for MPT (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#ifndef MPT3SAS_DEBUG_H_INCLUDED +#define MPT3SAS_DEBUG_H_INCLUDED + +#define MPT_DEBUG 0x00000001 +#define MPT_DEBUG_MSG_FRAME 0x00000002 +#define MPT_DEBUG_SG 0x00000004 +#define MPT_DEBUG_EVENTS 0x00000008 +#define MPT_DEBUG_EVENT_WORK_TASK 0x00000010 +#define MPT_DEBUG_INIT 0x00000020 +#define MPT_DEBUG_EXIT 0x00000040 +#define MPT_DEBUG_FAIL 0x00000080 +#define MPT_DEBUG_TM 0x00000100 +#define MPT_DEBUG_REPLY 0x00000200 +#define MPT_DEBUG_HANDSHAKE 0x00000400 +#define MPT_DEBUG_CONFIG 0x00000800 +#define MPT_DEBUG_DL 0x00001000 +#define MPT_DEBUG_RESET 0x00002000 +#define MPT_DEBUG_SCSI 0x00004000 +#define MPT_DEBUG_IOCTL 0x00008000 +#define MPT_DEBUG_SAS 0x00020000 +#define MPT_DEBUG_TRANSPORT 0x00040000 +#define MPT_DEBUG_TASK_SET_FULL 0x00080000 + +#define MPT_DEBUG_TRIGGER_DIAG 0x00200000 + + +#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \ +{ \ + if (IOC->logging_level & BITS) \ + CMD; \ +} + +/* + * debug macros + */ + +#define dprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG) + +#define dsgprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SG) + +#define devtprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENTS) + +#define dewtprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENT_WORK_TASK) + +#define dinitprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_INIT) + +#define dexitprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EXIT) + +#define dfailprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_FAIL) + +#define dtmprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TM) + +#define dreplyprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_REPLY) + +#define dhsprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_HANDSHAKE) + +#define dcprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CONFIG) + +#define ddlprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_DL) + +#define drsprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_RESET) + +#define dsprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SCSI) + +#define dctlprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_IOCTL) + +#define dsasprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS) + +#define dsastransport(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE) + +#define dmfprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_MSG_FRAME) + +#define dtsfprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TASK_SET_FULL) + +#define dtransportprintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRANSPORT) + +#define dTriggerDiagPrintk(IOC, CMD) \ + MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRIGGER_DIAG) + + + +/* inline functions for dumping debug data*/ + +/** + * _debug_dump_mf - print message frame contents + * @mpi_request: pointer to message frame + * @sz: number of dwords + */ +static inline void +_debug_dump_mf(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + pr_info("mf:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} +/** + * _debug_dump_reply - print message frame contents + * @mpi_request: pointer to message frame + * @sz: number of dwords + */ +static inline void +_debug_dump_reply(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + pr_info("reply:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} +/** + * _debug_dump_config - print config page contents + * @mpi_request: pointer to message frame + * @sz: number of dwords + */ +static inline void +_debug_dump_config(void *mpi_request, int sz) +{ + int i; + __le32 *mfp = (__le32 *)mpi_request; + + pr_info("config:\n\t"); + for (i = 0; i < sz; i++) { + if (i && ((i % 8) == 0)) + pr_info("\n\t"); + pr_info("%08x ", le32_to_cpu(mfp[i])); + } + pr_info("\n"); +} + +#endif /* MPT3SAS_DEBUG_H_INCLUDED */ diff --git a/addons/mpt3sas/src/4.4.180/mpt3sas_scsih.c b/addons/mpt3sas/src/4.4.180/mpt3sas_scsih.c new file mode 100644 index 00000000..7d67a68b --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpt3sas_scsih.c @@ -0,0 +1,9107 @@ +/* + * Scsi Host Layer for MPT (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mpt3sas_base.h" + +#define RAID_CHANNEL 1 +/* forward proto's */ +static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander); +static void _firmware_event_work(struct work_struct *work); + +static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device); +static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u8 retry_count, u8 is_pd); + +static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid); + +/* global parameters */ +LIST_HEAD(mpt3sas_ioc_list); +/* global ioc lock for list operations */ +DEFINE_SPINLOCK(gioc_lock); + +MODULE_AUTHOR(MPT3SAS_AUTHOR); +MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION); +MODULE_LICENSE("GPL"); +MODULE_VERSION(MPT3SAS_DRIVER_VERSION); +MODULE_ALIAS("mpt2sas"); + +/* local parameters */ +static u8 scsi_io_cb_idx = -1; +static u8 tm_cb_idx = -1; +static u8 ctl_cb_idx = -1; +static u8 base_cb_idx = -1; +static u8 port_enable_cb_idx = -1; +static u8 transport_cb_idx = -1; +static u8 scsih_cb_idx = -1; +static u8 config_cb_idx = -1; +static int mpt2_ids; +static int mpt3_ids; + +static u8 tm_tr_cb_idx = -1 ; +static u8 tm_tr_volume_cb_idx = -1 ; +static u8 tm_sas_control_cb_idx = -1; + +/* command line options */ +static u32 logging_level; +MODULE_PARM_DESC(logging_level, + " bits for enabling additional logging info (default=0)"); + + +static ushort max_sectors = 0xFFFF; +module_param(max_sectors, ushort, 0); +MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767"); + + +static int missing_delay[2] = {-1, -1}; +module_param_array(missing_delay, int, NULL, 0); +MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay"); + +/* scsi-mid layer global parmeter is max_report_luns, which is 511 */ +#define MPT3SAS_MAX_LUN (16895) +static u64 max_lun = MPT3SAS_MAX_LUN; +module_param(max_lun, ullong, 0); +MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); + +static ushort hbas_to_enumerate; +module_param(hbas_to_enumerate, ushort, 0); +MODULE_PARM_DESC(hbas_to_enumerate, + " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \ + 1 - enumerates only SAS 2.0 generation HBAs\n \ + 2 - enumerates only SAS 3.0 generation HBAs (default=0)"); + +/* diag_buffer_enable is bitwise + * bit 0 set = TRACE + * bit 1 set = SNAPSHOT + * bit 2 set = EXTENDED + * + * Either bit can be set, or both + */ +static int diag_buffer_enable = -1; +module_param(diag_buffer_enable, int, 0); +MODULE_PARM_DESC(diag_buffer_enable, + " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)"); +static int disable_discovery = -1; +module_param(disable_discovery, int, 0); +MODULE_PARM_DESC(disable_discovery, " disable discovery "); + + +/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */ +static int prot_mask = -1; +module_param(prot_mask, int, 0); +MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); + + +/* raid transport support */ +struct raid_template *mpt3sas_raid_template; +struct raid_template *mpt2sas_raid_template; + + +/** + * struct sense_info - common structure for obtaining sense keys + * @skey: sense key + * @asc: additional sense code + * @ascq: additional sense code qualifier + */ +struct sense_info { + u8 skey; + u8 asc; + u8 ascq; +}; + +#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB) +#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC) +#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD) +#define MPT3SAS_ABRT_TASK_SET (0xFFFE) +#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF) +/** + * struct fw_event_work - firmware event struct + * @list: link list framework + * @work: work object (ioc->fault_reset_work_q) + * @cancel_pending_work: flag set during reset handling + * @ioc: per adapter object + * @device_handle: device handle + * @VF_ID: virtual function id + * @VP_ID: virtual port id + * @ignore: flag meaning this event has been marked to ignore + * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h + * @event_data: reply event data payload follows + * + * This object stored on ioc->fw_event_list. + */ +struct fw_event_work { + struct list_head list; + struct work_struct work; + u8 cancel_pending_work; + struct delayed_work delayed_work; + + struct MPT3SAS_ADAPTER *ioc; + u16 device_handle; + u8 VF_ID; + u8 VP_ID; + u8 ignore; + u16 event; + struct kref refcount; + char event_data[0] __aligned(4); +}; + +static void fw_event_work_free(struct kref *r) +{ + kfree(container_of(r, struct fw_event_work, refcount)); +} + +static void fw_event_work_get(struct fw_event_work *fw_work) +{ + kref_get(&fw_work->refcount); +} + +static void fw_event_work_put(struct fw_event_work *fw_work) +{ + kref_put(&fw_work->refcount, fw_event_work_free); +} + +static struct fw_event_work *alloc_fw_event_work(int len) +{ + struct fw_event_work *fw_event; + + fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC); + if (!fw_event) + return NULL; + + kref_init(&fw_event->refcount); + return fw_event; +} + +/** + * struct _scsi_io_transfer - scsi io transfer + * @handle: sas device handle (assigned by firmware) + * @is_raid: flag set for hidden raid components + * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE, + * @data_length: data transfer length + * @data_dma: dma pointer to data + * @sense: sense data + * @lun: lun number + * @cdb_length: cdb length + * @cdb: cdb contents + * @timeout: timeout for this command + * @VF_ID: virtual function id + * @VP_ID: virtual port id + * @valid_reply: flag set for reply message + * @sense_length: sense length + * @ioc_status: ioc status + * @scsi_state: scsi state + * @scsi_status: scsi staus + * @log_info: log information + * @transfer_length: data length transfer when there is a reply message + * + * Used for sending internal scsi commands to devices within this module. + * Refer to _scsi_send_scsi_io(). + */ +struct _scsi_io_transfer { + u16 handle; + u8 is_raid; + enum dma_data_direction dir; + u32 data_length; + dma_addr_t data_dma; + u8 sense[SCSI_SENSE_BUFFERSIZE]; + u32 lun; + u8 cdb_length; + u8 cdb[32]; + u8 timeout; + u8 VF_ID; + u8 VP_ID; + u8 valid_reply; + /* the following bits are only valid when 'valid_reply = 1' */ + u32 sense_length; + u16 ioc_status; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + u32 transfer_length; +}; + +/** + * _scsih_set_debug_level - global setting of ioc->logging_level. + * + * Note: The logging levels are defined in mpt3sas_debug.h. + */ +static int +_scsih_set_debug_level(const char *val, struct kernel_param *kp) +{ + int ret = param_set_int(val, kp); + struct MPT3SAS_ADAPTER *ioc; + + if (ret) + return ret; + + pr_info("setting logging_level(0x%08x)\n", logging_level); + spin_lock(&gioc_lock); + list_for_each_entry(ioc, &mpt3sas_ioc_list, list) + ioc->logging_level = logging_level; + spin_unlock(&gioc_lock); + return 0; +} +module_param_call(logging_level, _scsih_set_debug_level, param_get_int, + &logging_level, 0644); + +/** + * _scsih_srch_boot_sas_address - search based on sas_address + * @sas_address: sas address + * @boot_device: boot device object from bios page 2 + * + * Returns 1 when there's a match, 0 means no match. + */ +static inline int +_scsih_srch_boot_sas_address(u64 sas_address, + Mpi2BootDeviceSasWwid_t *boot_device) +{ + return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0; +} + +/** + * _scsih_srch_boot_device_name - search based on device name + * @device_name: device name specified in INDENTIFY fram + * @boot_device: boot device object from bios page 2 + * + * Returns 1 when there's a match, 0 means no match. + */ +static inline int +_scsih_srch_boot_device_name(u64 device_name, + Mpi2BootDeviceDeviceName_t *boot_device) +{ + return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0; +} + +/** + * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot + * @enclosure_logical_id: enclosure logical id + * @slot_number: slot number + * @boot_device: boot device object from bios page 2 + * + * Returns 1 when there's a match, 0 means no match. + */ +static inline int +_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number, + Mpi2BootDeviceEnclosureSlot_t *boot_device) +{ + return (enclosure_logical_id == le64_to_cpu(boot_device-> + EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device-> + SlotNumber)) ? 1 : 0; +} + +/** + * _scsih_is_boot_device - search for matching boot device. + * @sas_address: sas address + * @device_name: device name specified in INDENTIFY fram + * @enclosure_logical_id: enclosure logical id + * @slot_number: slot number + * @form: specifies boot device form + * @boot_device: boot device object from bios page 2 + * + * Returns 1 when there's a match, 0 means no match. + */ +static int +_scsih_is_boot_device(u64 sas_address, u64 device_name, + u64 enclosure_logical_id, u16 slot, u8 form, + Mpi2BiosPage2BootDevice_t *boot_device) +{ + int rc = 0; + + switch (form) { + case MPI2_BIOSPAGE2_FORM_SAS_WWID: + if (!sas_address) + break; + rc = _scsih_srch_boot_sas_address( + sas_address, &boot_device->SasWwid); + break; + case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT: + if (!enclosure_logical_id) + break; + rc = _scsih_srch_boot_encl_slot( + enclosure_logical_id, + slot, &boot_device->EnclosureSlot); + break; + case MPI2_BIOSPAGE2_FORM_DEVICE_NAME: + if (!device_name) + break; + rc = _scsih_srch_boot_device_name( + device_name, &boot_device->DeviceName); + break; + case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED: + break; + } + + return rc; +} + +/** + * _scsih_get_sas_address - set the sas_address for given device handle + * @handle: device handle + * @sas_address: sas address + * + * Returns 0 success, non-zero when failure + */ +static int +_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u64 *sas_address) +{ + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 ioc_status; + + *sas_address = 0; + + if (handle <= ioc->sas_hba.num_phys) { + *sas_address = ioc->sas_hba.sas_address; + return 0; + } + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return -ENXIO; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { + *sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + return 0; + } + + /* we hit this becuase the given parent handle doesn't exist */ + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + return -ENXIO; + + /* else error case */ + pr_err(MPT3SAS_FMT + "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n", + ioc->name, handle, ioc_status, + __FILE__, __LINE__, __func__); + return -EIO; +} + +/** + * _scsih_determine_boot_device - determine boot device. + * @ioc: per adapter object + * @device: either sas_device or raid_device object + * @is_raid: [flag] 1 = raid object, 0 = sas object + * + * Determines whether this device should be first reported device to + * to scsi-ml or sas transport, this purpose is for persistent boot device. + * There are primary, alternate, and current entries in bios page 2. The order + * priority is primary, alternate, then current. This routine saves + * the corresponding device object and is_raid flag in the ioc object. + * The saved data to be used later in _scsih_probe_boot_devices(). + */ +static void +_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, + void *device, u8 is_raid) +{ + struct _sas_device *sas_device; + struct _raid_device *raid_device; + u64 sas_address; + u64 device_name; + u64 enclosure_logical_id; + u16 slot; + + /* only process this function when driver loads */ + if (!ioc->is_driver_loading) + return; + + /* no Bios, return immediately */ + if (!ioc->bios_pg3.BiosVersion) + return; + + if (!is_raid) { + sas_device = device; + sas_address = sas_device->sas_address; + device_name = sas_device->device_name; + enclosure_logical_id = sas_device->enclosure_logical_id; + slot = sas_device->slot; + } else { + raid_device = device; + sas_address = raid_device->wwid; + device_name = 0; + enclosure_logical_id = 0; + slot = 0; + } + + if (!ioc->req_boot_device.device) { + if (_scsih_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedBootDevice)) { + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "%s: req_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->req_boot_device.device = device; + ioc->req_boot_device.is_raid = is_raid; + } + } + + if (!ioc->req_alt_boot_device.device) { + if (_scsih_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.ReqAltBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.RequestedAltBootDevice)) { + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "%s: req_alt_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->req_alt_boot_device.device = device; + ioc->req_alt_boot_device.is_raid = is_raid; + } + } + + if (!ioc->current_boot_device.device) { + if (_scsih_is_boot_device(sas_address, device_name, + enclosure_logical_id, slot, + (ioc->bios_pg2.CurrentBootDeviceForm & + MPI2_BIOSPAGE2_FORM_MASK), + &ioc->bios_pg2.CurrentBootDevice)) { + dinitprintk(ioc, pr_info(MPT3SAS_FMT + "%s: current_boot_device(0x%016llx)\n", + ioc->name, __func__, + (unsigned long long)sas_address)); + ioc->current_boot_device.device = device; + ioc->current_boot_device.is_raid = is_raid; + } + } +} + +static struct _sas_device * +__mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, + struct MPT3SAS_TARGET *tgt_priv) +{ + struct _sas_device *ret; + + assert_spin_locked(&ioc->sas_device_lock); + + ret = tgt_priv->sdev; + if (ret) + sas_device_get(ret); + + return ret; +} + +static struct _sas_device * +mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc, + struct MPT3SAS_TARGET *tgt_priv) +{ + struct _sas_device *ret; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return ret; +} + + +struct _sas_device * +__mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address) +{ + struct _sas_device *sas_device; + + assert_spin_locked(&ioc->sas_device_lock); + + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->sas_address == sas_address) + goto found_device; + + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->sas_address == sas_address) + goto found_device; + + return NULL; + +found_device: + sas_device_get(sas_device); + return sas_device; +} + +/** + * mpt3sas_get_sdev_by_addr - sas device search + * @ioc: per adapter object + * @sas_address: sas address + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for sas_device based on sas_address, then return sas_device + * object. + */ +struct _sas_device * +mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address) +{ + struct _sas_device *sas_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_address); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return sas_device; +} + +static struct _sas_device * +__mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_device *sas_device; + + assert_spin_locked(&ioc->sas_device_lock); + + list_for_each_entry(sas_device, &ioc->sas_device_list, list) + if (sas_device->handle == handle) + goto found_device; + + list_for_each_entry(sas_device, &ioc->sas_device_init_list, list) + if (sas_device->handle == handle) + goto found_device; + + return NULL; + +found_device: + sas_device_get(sas_device); + return sas_device; +} + +/** + * mpt3sas_get_sdev_by_handle - sas device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for sas_device based on sas_address, then return sas_device + * object. + */ +static struct _sas_device * +mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_device *sas_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return sas_device; +} + +/** + * _scsih_sas_device_remove - remove sas_device from list. + * @ioc: per adapter object + * @sas_device: the sas_device object + * Context: This function will acquire ioc->sas_device_lock. + * + * If sas_device is on the list, remove it and decrement its reference count. + */ +static void +_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + if (!sas_device) + return; + pr_info(MPT3SAS_FMT + "removing handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, sas_device->handle, + (unsigned long long) sas_device->sas_address); + + if (sas_device->enclosure_handle != 0) + pr_info(MPT3SAS_FMT + "removing enclosure logical id(0x%016llx), slot(%d)\n", + ioc->name, (unsigned long long) + sas_device->enclosure_logical_id, sas_device->slot); + + if (sas_device->connector_name[0] != '\0') + pr_info(MPT3SAS_FMT + "removing enclosure level(0x%04x), connector name( %s)\n", + ioc->name, sas_device->enclosure_level, + sas_device->connector_name); + + /* + * The lock serializes access to the list, but we still need to verify + * that nobody removed the entry while we were waiting on the lock. + */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +/** + * _scsih_device_remove_by_handle - removing device object by handle + * @ioc: per adapter object + * @handle: device handle + * + * Return nothing. + */ +static void +_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_device *sas_device; + unsigned long flags; + + if (ioc->shost_recovery) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) { + _scsih_remove_device(ioc, sas_device); + sas_device_put(sas_device); + } +} + +/** + * mpt3sas_device_remove_by_sas_address - removing device object by sas address + * @ioc: per adapter object + * @sas_address: device sas_address + * + * Return nothing. + */ +void +mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address) +{ + struct _sas_device *sas_device; + unsigned long flags; + + if (ioc->shost_recovery) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address); + if (sas_device) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) { + _scsih_remove_device(ioc, sas_device); + sas_device_put(sas_device); + } +} + +/** + * _scsih_sas_device_add - insert sas_device to the list. + * @ioc: per adapter object + * @sas_device: the sas_device object + * Context: This function will acquire ioc->sas_device_lock. + * + * Adding new object to the ioc->sas_device_list. + */ +static void +_scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address)); + + if (sas_device->enclosure_handle != 0) + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: enclosure logical id(0x%016llx), slot( %d)\n", + ioc->name, __func__, (unsigned long long) + sas_device->enclosure_logical_id, sas_device->slot)); + + if (sas_device->connector_name[0] != '\0') + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: enclosure level(0x%04x), connector name( %s)\n", + ioc->name, __func__, + sas_device->enclosure_level, sas_device->connector_name)); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (!mpt3sas_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent)) { + _scsih_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + /* + * When asyn scanning is enabled, its not possible to remove + * devices while scanning is turned on due to an oops in + * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start() + */ + if (!ioc->is_driver_loading) { + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent); + _scsih_sas_device_remove(ioc, sas_device); + } + } +} + +/** + * _scsih_sas_device_init_add - insert sas_device to the list. + * @ioc: per adapter object + * @sas_device: the sas_device object + * Context: This function will acquire ioc->sas_device_lock. + * + * Adding new object at driver load time to the ioc->sas_device_init_list. + */ +static void +_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, + __func__, sas_device->handle, + (unsigned long long)sas_device->sas_address)); + + if (sas_device->enclosure_handle != 0) + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: enclosure logical id(0x%016llx), slot( %d)\n", + ioc->name, __func__, (unsigned long long) + sas_device->enclosure_logical_id, sas_device->slot)); + + if (sas_device->connector_name[0] != '\0') + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: enclosure level(0x%04x), connector name( %s)\n", + ioc->name, __func__, sas_device->enclosure_level, + sas_device->connector_name)); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_init_list); + _scsih_determine_boot_device(ioc, sas_device, 0); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +/** + * _scsih_raid_device_find_by_id - raid device search + * @ioc: per adapter object + * @id: sas device target id + * @channel: sas device channel + * Context: Calling function should acquire ioc->raid_device_lock + * + * This searches for raid_device based on target id, then return raid_device + * object. + */ +static struct _raid_device * +_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel) +{ + struct _raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->id == id && raid_device->channel == channel) { + r = raid_device; + goto out; + } + } + + out: + return r; +} + +/** + * mpt3sas_raid_device_find_by_handle - raid device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * Context: Calling function should acquire ioc->raid_device_lock + * + * This searches for raid_device based on handle, then return raid_device + * object. + */ +struct _raid_device * +mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->handle != handle) + continue; + r = raid_device; + goto out; + } + + out: + return r; +} + +/** + * _scsih_raid_device_find_by_wwid - raid device search + * @ioc: per adapter object + * @handle: sas device handle (assigned by firmware) + * Context: Calling function should acquire ioc->raid_device_lock + * + * This searches for raid_device based on wwid, then return raid_device + * object. + */ +static struct _raid_device * +_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid) +{ + struct _raid_device *raid_device, *r; + + r = NULL; + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid != wwid) + continue; + r = raid_device; + goto out; + } + + out: + return r; +} + +/** + * _scsih_raid_device_add - add raid_device object + * @ioc: per adapter object + * @raid_device: raid_device object + * + * This is added to the raid_device_list link list. + */ +static void +_scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + unsigned long flags; + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__, + raid_device->handle, (unsigned long long)raid_device->wwid)); + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_add_tail(&raid_device->list, &ioc->raid_device_list); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * _scsih_raid_device_remove - delete raid_device object + * @ioc: per adapter object + * @raid_device: raid_device object + * + */ +static void +_scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_del(&raid_device->list); + kfree(raid_device); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * mpt3sas_scsih_expander_find_by_handle - expander device search + * @ioc: per adapter object + * @handle: expander handle (assigned by firmware) + * Context: Calling function should acquire ioc->sas_device_lock + * + * This searches for expander device based on handle, then returns the + * sas_node object. + */ +struct _sas_node * +mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_node *sas_expander, *r; + + r = NULL; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->handle != handle) + continue; + r = sas_expander; + goto out; + } + out: + return r; +} + +/** + * mpt3sas_scsih_expander_find_by_sas_address - expander device search + * @ioc: per adapter object + * @sas_address: sas address + * Context: Calling function should acquire ioc->sas_node_lock. + * + * This searches for expander device based on sas_address, then returns the + * sas_node object. + */ +struct _sas_node * +mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address) +{ + struct _sas_node *sas_expander, *r; + + r = NULL; + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address) + continue; + r = sas_expander; + goto out; + } + out: + return r; +} + +/** + * _scsih_expander_node_add - insert expander device to the list. + * @ioc: per adapter object + * @sas_expander: the sas_device object + * Context: This function will acquire ioc->sas_node_lock. + * + * Adding new object to the ioc->sas_expander_list. + * + * Return nothing. + */ +static void +_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&sas_expander->list, &ioc->sas_expander_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +/** + * _scsih_is_end_device - determines if device is an end device + * @device_info: bitfield providing information about the device. + * Context: none + * + * Returns 1 if end device. + */ +static int +_scsih_is_end_device(u32 device_info) +{ + if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE && + ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) | + (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) | + (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE))) + return 1; + else + return 0; +} + +/** + * _scsih_scsi_lookup_get - returns scmd entry + * @ioc: per adapter object + * @smid: system request message index + * + * Returns the smid stored scmd pointer. + */ +static struct scsi_cmnd * +_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return ioc->scsi_lookup[smid - 1].scmd; +} + +/** + * _scsih_scsi_lookup_get_clear - returns scmd entry + * @ioc: per adapter object + * @smid: system request message index + * + * Returns the smid stored scmd pointer. + * Then will derefrence the stored scmd pointer. + */ +static inline struct scsi_cmnd * +_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + unsigned long flags; + struct scsi_cmnd *scmd; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + scmd = ioc->scsi_lookup[smid - 1].scmd; + ioc->scsi_lookup[smid - 1].scmd = NULL; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + + return scmd; +} + +/** + * _scsih_scsi_lookup_find_by_scmd - scmd lookup + * @ioc: per adapter object + * @smid: system request message index + * @scmd: pointer to scsi command object + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a scmd pointer in the scsi_lookup array, + * returning the revelent smid. A returned value of zero means invalid. + */ +static u16 +_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd + *scmd) +{ + u16 smid; + unsigned long flags; + int i; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + smid = 0; + for (i = 0; i < ioc->scsiio_depth; i++) { + if (ioc->scsi_lookup[i].scmd == scmd) { + smid = ioc->scsi_lookup[i].smid; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return smid; +} + +/** + * _scsih_scsi_lookup_find_by_target - search for matching channel:id + * @ioc: per adapter object + * @id: target id + * @channel: channel + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a matching channel:id in the scsi_lookup array, + * returning 1 if found. + */ +static u8 +_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id, + int channel) +{ + u8 found; + unsigned long flags; + int i; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + found = 0; + for (i = 0 ; i < ioc->scsiio_depth; i++) { + if (ioc->scsi_lookup[i].scmd && + (ioc->scsi_lookup[i].scmd->device->id == id && + ioc->scsi_lookup[i].scmd->device->channel == channel)) { + found = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return found; +} + +/** + * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun + * @ioc: per adapter object + * @id: target id + * @lun: lun number + * @channel: channel + * Context: This function will acquire ioc->scsi_lookup_lock. + * + * This will search for a matching channel:id:lun in the scsi_lookup array, + * returning 1 if found. + */ +static u8 +_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id, + unsigned int lun, int channel) +{ + u8 found; + unsigned long flags; + int i; + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + found = 0; + for (i = 0 ; i < ioc->scsiio_depth; i++) { + if (ioc->scsi_lookup[i].scmd && + (ioc->scsi_lookup[i].scmd->device->id == id && + ioc->scsi_lookup[i].scmd->device->channel == channel && + ioc->scsi_lookup[i].scmd->device->lun == lun)) { + found = 1; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + return found; +} + +/** + * scsih_change_queue_depth - setting device queue depth + * @sdev: scsi device struct + * @qdepth: requested queue depth + * + * Returns queue depth. + */ +int +scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + int max_depth; + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + unsigned long flags; + + max_depth = shost->can_queue; + + /* limit max device queue for SATA to 32 */ + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + goto not_sata; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + goto not_sata; + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) + goto not_sata; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); + if (sas_device) { + if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + max_depth = MPT3SAS_SATA_QUEUE_DEPTH; + + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + not_sata: + + if (!sdev->tagged_supported) + max_depth = 1; + if (qdepth > max_depth) + qdepth = max_depth; + return scsi_change_queue_depth(sdev, qdepth); +} + +/** + * scsih_target_alloc - target add routine + * @starget: scsi target struct + * + * Returns 0 if ok. Any other return is assumed to be an error and + * the device is ignored. + */ +int +scsih_target_alloc(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + unsigned long flags; + struct sas_rphy *rphy; + + sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data), + GFP_KERNEL); + if (!sas_target_priv_data) + return -ENOMEM; + + starget->hostdata = sas_target_priv_data; + sas_target_priv_data->starget = starget; + sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; + + /* RAID volumes */ + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, + starget->channel); + if (raid_device) { + sas_target_priv_data->handle = raid_device->handle; + sas_target_priv_data->sas_address = raid_device->wwid; + sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME; + if (ioc->is_warpdrive) + sas_target_priv_data->raid_device = raid_device; + raid_device->starget = starget; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return 0; + } + + /* sas/sata devices */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + rphy = dev_to_rphy(starget->dev.parent); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + rphy->identify.sas_address); + + if (sas_device) { + sas_target_priv_data->handle = sas_device->handle; + sas_target_priv_data->sas_address = sas_device->sas_address; + sas_target_priv_data->sdev = sas_device; + sas_device->starget = starget; + sas_device->id = starget->id; + sas_device->channel = starget->channel; + if (test_bit(sas_device->handle, ioc->pd_handles)) + sas_target_priv_data->flags |= + MPT_TARGET_FLAGS_RAID_COMPONENT; + if (sas_device->fast_path) + sas_target_priv_data->flags |= MPT_TARGET_FASTPATH_IO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return 0; +} + +/** + * scsih_target_destroy - target destroy routine + * @starget: scsi target struct + * + * Returns nothing. + */ +void +scsih_target_destroy(struct scsi_target *starget) +{ + struct Scsi_Host *shost = dev_to_shost(&starget->dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + unsigned long flags; + struct sas_rphy *rphy; + + sas_target_priv_data = starget->hostdata; + if (!sas_target_priv_data) + return; + + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, starget->id, + starget->channel); + if (raid_device) { + raid_device->starget = NULL; + raid_device->sdev = NULL; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + goto out; + } + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + rphy = dev_to_rphy(starget->dev.parent); + sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data); + if (sas_device && (sas_device->starget == starget) && + (sas_device->id == starget->id) && + (sas_device->channel == starget->channel)) + sas_device->starget = NULL; + + if (sas_device) { + /* + * Corresponding get() is in _scsih_target_alloc() + */ + sas_target_priv_data->sdev = NULL; + sas_device_put(sas_device); + + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + out: + kfree(sas_target_priv_data); + starget->hostdata = NULL; +} + +/** + * scsih_slave_alloc - device add routine + * @sdev: scsi device struct + * + * Returns 0 if ok. Any other return is assumed to be an error and + * the device is ignored. + */ +int +scsih_slave_alloc(struct scsi_device *sdev) +{ + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_target *starget; + struct _raid_device *raid_device; + struct _sas_device *sas_device; + unsigned long flags; + + sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data), + GFP_KERNEL); + if (!sas_device_priv_data) + return -ENOMEM; + + sas_device_priv_data->lun = sdev->lun; + sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT; + + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns++; + sas_device_priv_data->sas_target = sas_target_priv_data; + sdev->hostdata = sas_device_priv_data; + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT)) + sdev->no_uld_attach = 1; + + shost = dev_to_shost(&starget->dev); + ioc = shost_priv(shost); + if (starget->channel == RAID_CHANNEL) { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, + starget->id, starget->channel); + if (raid_device) + raid_device->sdev = sdev; /* raid is single lun */ + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } + + if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_target_priv_data->sas_address); + if (sas_device && (sas_device->starget == NULL)) { + sdev_printk(KERN_INFO, sdev, + "%s : sas_device->starget set to starget @ %d\n", + __func__, __LINE__); + sas_device->starget = starget; + } + + if (sas_device) + sas_device_put(sas_device); + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + + return 0; +} + +/** + * scsih_slave_destroy - device destroy routine + * @sdev: scsi device struct + * + * Returns nothing. + */ +void +scsih_slave_destroy(struct scsi_device *sdev) +{ + struct MPT3SAS_TARGET *sas_target_priv_data; + struct scsi_target *starget; + struct Scsi_Host *shost; + struct MPT3SAS_ADAPTER *ioc; + struct _sas_device *sas_device; + unsigned long flags; + + if (!sdev->hostdata) + return; + + starget = scsi_target(sdev); + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->num_luns--; + + shost = dev_to_shost(&starget->dev); + ioc = shost_priv(shost); + + if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_from_target(ioc, + sas_target_priv_data); + if (sas_device && !sas_target_priv_data->num_luns) + sas_device->starget = NULL; + + if (sas_device) + sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + + kfree(sdev->hostdata); + sdev->hostdata = NULL; +} + +/** + * _scsih_display_sata_capabilities - sata capabilities + * @ioc: per adapter object + * @handle: device handle + * @sdev: scsi device struct + */ +static void +_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc, + u16 handle, struct scsi_device *sdev) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; + u16 flags; + u32 device_info; + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + flags = le16_to_cpu(sas_device_pg0.Flags); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + + sdev_printk(KERN_INFO, sdev, + "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), " + "sw_preserve(%s)\n", + (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" : + "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n", + (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n"); +} + +/* + * raid transport support - + * Enabled for SLES11 and newer, in older kernels the driver will panic when + * unloading the driver followed by a load - I beleive that the subroutine + * raid_class_release() is not cleaning up properly. + */ + +/** + * scsih_is_raid - return boolean indicating device is raid volume + * @dev the device struct object + */ +int +scsih_is_raid(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); + + if (ioc->is_warpdrive) + return 0; + return (sdev->channel == RAID_CHANNEL) ? 1 : 0; +} + +/** + * scsih_get_resync - get raid volume resync percent complete + * @dev the device struct object + */ +void +scsih_get_resync(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); + static struct _raid_device *raid_device; + unsigned long flags; + Mpi2RaidVolPage0_t vol_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 volume_status_flags; + u8 percent_complete; + u16 handle; + + percent_complete = 0; + handle = 0; + if (ioc->is_warpdrive) + goto out; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, + sdev->channel); + if (raid_device) { + handle = raid_device->handle; + percent_complete = raid_device->percent_complete; + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (!handle) + goto out; + + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + percent_complete = 0; + goto out; + } + + volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (!(volume_status_flags & + MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS)) + percent_complete = 0; + + out: + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) + raid_set_resync(mpt2sas_raid_template, dev, percent_complete); + if (ioc->hba_mpi_version_belonged == MPI25_VERSION) + raid_set_resync(mpt3sas_raid_template, dev, percent_complete); +} + +/** + * scsih_get_state - get raid volume level + * @dev the device struct object + */ +void +scsih_get_state(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host); + static struct _raid_device *raid_device; + unsigned long flags; + Mpi2RaidVolPage0_t vol_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 volstate; + enum raid_state state = RAID_STATE_UNKNOWN; + u16 handle = 0; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id, + sdev->channel); + if (raid_device) + handle = raid_device->handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (!raid_device) + goto out; + + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags); + if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) { + state = RAID_STATE_RESYNCING; + goto out; + } + + switch (vol_pg0.VolumeState) { + case MPI2_RAID_VOL_STATE_OPTIMAL: + case MPI2_RAID_VOL_STATE_ONLINE: + state = RAID_STATE_ACTIVE; + break; + case MPI2_RAID_VOL_STATE_DEGRADED: + state = RAID_STATE_DEGRADED; + break; + case MPI2_RAID_VOL_STATE_FAILED: + case MPI2_RAID_VOL_STATE_MISSING: + state = RAID_STATE_OFFLINE; + break; + } + out: + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) + raid_set_state(mpt2sas_raid_template, dev, state); + if (ioc->hba_mpi_version_belonged == MPI25_VERSION) + raid_set_state(mpt3sas_raid_template, dev, state); +} + +/** + * _scsih_set_level - set raid level + * @sdev: scsi device struct + * @volume_type: volume type + */ +static void +_scsih_set_level(struct MPT3SAS_ADAPTER *ioc, + struct scsi_device *sdev, u8 volume_type) +{ + enum raid_level level = RAID_LEVEL_UNKNOWN; + + switch (volume_type) { + case MPI2_RAID_VOL_TYPE_RAID0: + level = RAID_LEVEL_0; + break; + case MPI2_RAID_VOL_TYPE_RAID10: + level = RAID_LEVEL_10; + break; + case MPI2_RAID_VOL_TYPE_RAID1E: + level = RAID_LEVEL_1E; + break; + case MPI2_RAID_VOL_TYPE_RAID1: + level = RAID_LEVEL_1; + break; + } + + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) + raid_set_level(mpt2sas_raid_template, + &sdev->sdev_gendev, level); + if (ioc->hba_mpi_version_belonged == MPI25_VERSION) + raid_set_level(mpt3sas_raid_template, + &sdev->sdev_gendev, level); +} + + +/** + * _scsih_get_volume_capabilities - volume capabilities + * @ioc: per adapter object + * @sas_device: the raid_device object + * + * Returns 0 for success, else 1 + */ +static int +_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + Mpi2RaidVolPage0_t *vol_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 sz; + u8 num_pds; + + if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, + &num_pds)) || !num_pds) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + + raid_device->num_pds = num_pds; + sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * + sizeof(Mpi2RaidVol0PhysDisk_t)); + vol_pg0 = kzalloc(sz, GFP_KERNEL); + if (!vol_pg0) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + + if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, + __func__)); + kfree(vol_pg0); + return 1; + } + + raid_device->volume_type = vol_pg0->VolumeType; + + /* figure out what the underlying devices are by + * obtaining the device_info bits for the 1st device + */ + if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, + vol_pg0->PhysDisk[0].PhysDiskNum))) { + if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + le16_to_cpu(pd_pg0.DevHandle)))) { + raid_device->device_info = + le32_to_cpu(sas_device_pg0.DeviceInfo); + } + } + + kfree(vol_pg0); + return 0; +} + +/** + * _scsih_enable_tlr - setting TLR flags + * @ioc: per adapter object + * @sdev: scsi device struct + * + * Enabling Transaction Layer Retries for tape devices when + * vpd page 0x90 is present + * + */ +static void +_scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev) +{ + + /* only for TAPE */ + if (sdev->type != TYPE_TAPE) + return; + + if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR)) + return; + + sas_enable_tlr(sdev); + sdev_printk(KERN_INFO, sdev, "TLR %s\n", + sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled"); + return; + +} + +/** + * scsih_slave_configure - device configure routine. + * @sdev: scsi device struct + * + * Returns 0 if ok. Any other return is assumed to be an error and + * the device is ignored. + */ +int +scsih_slave_configure(struct scsi_device *sdev) +{ + struct Scsi_Host *shost = sdev->host; + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + unsigned long flags; + int qdepth; + u8 ssp_target = 0; + char *ds = ""; + char *r_level = ""; + u16 handle, volume_handle = 0; + u64 volume_wwid = 0; + + qdepth = 1; + sas_device_priv_data = sdev->hostdata; + sas_device_priv_data->configured_lun = 1; + sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT; + sas_target_priv_data = sas_device_priv_data->sas_target; + handle = sas_target_priv_data->handle; + + /* raid volume handling */ + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) { + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (!raid_device) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, + __LINE__, __func__)); + return 1; + } + + if (_scsih_get_volume_capabilities(ioc, raid_device)) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, + __LINE__, __func__)); + return 1; + } + + /* + * WARPDRIVE: Initialize the required data for Direct IO + */ + mpt3sas_init_warpdrive_properties(ioc, raid_device); + + /* RAID Queue Depth Support + * IS volume = underlying qdepth of drive type, either + * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH + * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH) + */ + if (raid_device->device_info & + MPI2_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + ds = "SSP"; + } else { + qdepth = MPT3SAS_SATA_QUEUE_DEPTH; + if (raid_device->device_info & + MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + else + ds = "STP"; + } + + switch (raid_device->volume_type) { + case MPI2_RAID_VOL_TYPE_RAID0: + r_level = "RAID0"; + break; + case MPI2_RAID_VOL_TYPE_RAID1E: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + if (ioc->manu_pg10.OEMIdentifier && + (le32_to_cpu(ioc->manu_pg10.GenericFlags0) & + MFG10_GF0_R10_DISPLAY) && + !(raid_device->num_pds % 2)) + r_level = "RAID10"; + else + r_level = "RAID1E"; + break; + case MPI2_RAID_VOL_TYPE_RAID1: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + r_level = "RAID1"; + break; + case MPI2_RAID_VOL_TYPE_RAID10: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + r_level = "RAID10"; + break; + case MPI2_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + r_level = "RAIDX"; + break; + } + + if (!ioc->hide_ir_msg) + sdev_printk(KERN_INFO, sdev, + "%s: handle(0x%04x), wwid(0x%016llx)," + " pd_count(%d), type(%s)\n", + r_level, raid_device->handle, + (unsigned long long)raid_device->wwid, + raid_device->num_pds, ds); + + scsih_change_queue_depth(sdev, qdepth); + + /* raid transport support */ + if (!ioc->is_warpdrive) + _scsih_set_level(ioc, sdev, raid_device->volume_type); + return 0; + } + + /* non-raid handling */ + if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) { + if (mpt3sas_config_get_volume_handle(ioc, handle, + &volume_handle)) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + return 1; + } + if (volume_handle && mpt3sas_config_get_volume_wwid(ioc, + volume_handle, &volume_wwid)) { + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__)); + return 1; + } + } + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_device_priv_data->sas_target->sas_address); + if (!sas_device) { + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + dfailprintk(ioc, pr_warn(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, + __func__)); + return 1; + } + + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) { + qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + ssp_target = 1; + ds = "SSP"; + } else { + qdepth = MPT3SAS_SATA_QUEUE_DEPTH; + if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) + ds = "STP"; + else if (sas_device->device_info & + MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + ds = "SATA"; + } + + sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \ + "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n", + ds, handle, (unsigned long long)sas_device->sas_address, + sas_device->phy, (unsigned long long)sas_device->device_name); + if (sas_device->enclosure_handle != 0) + sdev_printk(KERN_INFO, sdev, + "%s: enclosure_logical_id(0x%016llx), slot(%d)\n", + ds, (unsigned long long) + sas_device->enclosure_logical_id, sas_device->slot); + if (sas_device->connector_name[0] != '\0') + sdev_printk(KERN_INFO, sdev, + "%s: enclosure level(0x%04x), connector name( %s)\n", + ds, sas_device->enclosure_level, + sas_device->connector_name); + + sas_device_put(sas_device); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (!ssp_target) + _scsih_display_sata_capabilities(ioc, handle, sdev); + + + scsih_change_queue_depth(sdev, qdepth); + + if (ssp_target) { + sas_read_port_mode_page(sdev); + _scsih_enable_tlr(ioc, sdev); + } + + return 0; +} + +/** + * scsih_bios_param - fetch head, sector, cylinder info for a disk + * @sdev: scsi device struct + * @bdev: pointer to block device context + * @capacity: device size (in 512 byte sectors) + * @params: three element array to place output: + * params[0] number of heads (max 255) + * params[1] number of sectors (max 63) + * params[2] number of cylinders + * + * Return nothing. + */ +int +scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int params[]) +{ + int heads; + int sectors; + sector_t cylinders; + ulong dummy; + + heads = 64; + sectors = 32; + + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + + /* + * Handle extended translation size for logical drives + * > 1Gb + */ + if ((ulong)capacity >= 0x200000) { + heads = 255; + sectors = 63; + dummy = heads * sectors; + cylinders = capacity; + sector_div(cylinders, dummy); + } + + /* return result */ + params[0] = heads; + params[1] = sectors; + params[2] = cylinders; + + return 0; +} + +/** + * _scsih_response_code - translation of device response code + * @ioc: per adapter object + * @response_code: response code returned by the device + * + * Return nothing. + */ +static void +_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code) +{ + char *desc; + + switch (response_code) { + case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE: + desc = "task management request completed"; + break; + case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME: + desc = "invalid frame"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED: + desc = "task management request not supported"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_FAILED: + desc = "task management request failed"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED: + desc = "task management request succeeded"; + break; + case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN: + desc = "invalid lun"; + break; + case 0xA: + desc = "overlapped tag attempted"; + break; + case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC: + desc = "task queued, however not sent to target"; + break; + default: + desc = "unknown"; + break; + } + pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n", + ioc->name, response_code, desc); +} + +/** + * _scsih_tm_done - tm completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: none. + * + * The callback handler when using scsih_issue_tm. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->tm_cmds.smid != smid) + return 1; + mpt3sas_base_flush_reply_queues(ioc); + ioc->tm_cmds.status |= MPT3_CMD_COMPLETE; + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (mpi_reply) { + memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); + ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID; + } + ioc->tm_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->tm_cmds.done); + return 1; +} + +/** + * mpt3sas_scsih_set_tm_flag - set per target tm_busy + * @ioc: per adapter object + * @handle: device handle + * + * During taskmangement request, we need to freeze the device queue. + */ +void +mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 1; + skip = 1; + ioc->ignore_loginfos = 1; + } + } +} + +/** + * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy + * @ioc: per adapter object + * @handle: device handle + * + * During taskmangement request, we need to freeze the device queue. + */ +void +mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + u8 skip = 0; + + shost_for_each_device(sdev, ioc->shost) { + if (skip) + continue; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle == handle) { + sas_device_priv_data->sas_target->tm_busy = 0; + skip = 1; + ioc->ignore_loginfos = 0; + } + } +} + +/** + * mpt3sas_scsih_issue_tm - main routine for sending tm requests + * @ioc: per adapter struct + * @device_handle: device handle + * @channel: the channel assigned by the OS + * @id: the id assigned by the OS + * @lun: lun number + * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h) + * @smid_task: smid assigned to the task + * @timeout: timeout in seconds + * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF + * Context: user + * + * A generic API for sending task management requests to firmware. + * + * The callback index is set inside `ioc->tm_cb_idx`. + * + * Return SUCCESS or FAILED. + */ +int +mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel, + uint id, uint lun, u8 type, u16 smid_task, ulong timeout, + enum mutex_type m_type) +{ + Mpi2SCSITaskManagementRequest_t *mpi_request; + Mpi2SCSITaskManagementReply_t *mpi_reply; + u16 smid = 0; + u32 ioc_state; + unsigned long timeleft; + struct scsiio_tracker *scsi_lookup = NULL; + int rc; + u16 msix_task = 0; + + if (m_type == TM_MUTEX_ON) + mutex_lock(&ioc->tm_cmds.mutex); + if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) { + pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n", + __func__, ioc->name); + rc = FAILED; + goto err_out; + } + + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) { + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + rc = FAILED; + goto err_out; + } + + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + if (ioc_state & MPI2_DOORBELL_USED) { + dhsprintk(ioc, pr_info(MPT3SAS_FMT + "unexpected doorbell active!\n", ioc->name)); + rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + rc = (!rc) ? SUCCESS : FAILED; + goto err_out; + } + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_base_fault_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + rc = (!rc) ? SUCCESS : FAILED; + goto err_out; + } + + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = FAILED; + goto err_out; + } + + if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) + scsi_lookup = &ioc->scsi_lookup[smid_task - 1]; + + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n", + ioc->name, handle, type, smid_task)); + ioc->tm_cmds.status = MPT3_CMD_PENDING; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->tm_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = type; + mpi_request->TaskMID = cpu_to_le16(smid_task); + int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN); + mpt3sas_scsih_set_tm_flag(ioc, handle); + init_completion(&ioc->tm_cmds.done); + if ((type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) && + (scsi_lookup->msix_io < ioc->reply_queue_count)) + msix_task = scsi_lookup->msix_io; + else + msix_task = 0; + mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task); + timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); + if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SCSITaskManagementRequest_t)/4); + if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) { + rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + rc = (!rc) ? SUCCESS : FAILED; + ioc->tm_cmds.status = MPT3_CMD_NOT_USED; + mpt3sas_scsih_clear_tm_flag(ioc, handle); + goto err_out; + } + } + + if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) { + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); + mpi_reply = ioc->tm_cmds.reply; + dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \ + "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + if (ioc->logging_level & MPT_DEBUG_TM) { + _scsih_response_code(ioc, mpi_reply->ResponseCode); + if (mpi_reply->IOCStatus) + _debug_dump_mf(mpi_request, + sizeof(Mpi2SCSITaskManagementRequest_t)/4); + } + } + + switch (type) { + case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK: + rc = SUCCESS; + if (scsi_lookup->scmd == NULL) + break; + rc = FAILED; + break; + + case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET: + if (_scsih_scsi_lookup_find_by_target(ioc, id, channel)) + rc = FAILED; + else + rc = SUCCESS; + break; + case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET: + case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET: + if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel)) + rc = FAILED; + else + rc = SUCCESS; + break; + case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK: + rc = SUCCESS; + break; + default: + rc = FAILED; + break; + } + + mpt3sas_scsih_clear_tm_flag(ioc, handle); + ioc->tm_cmds.status = MPT3_CMD_NOT_USED; + if (m_type == TM_MUTEX_ON) + mutex_unlock(&ioc->tm_cmds.mutex); + + return rc; + + err_out: + if (m_type == TM_MUTEX_ON) + mutex_unlock(&ioc->tm_cmds.mutex); + return rc; +} + +/** + * _scsih_tm_display_info - displays info about the device + * @ioc: per adapter struct + * @scmd: pointer to scsi command object + * + * Called by task management callback handlers. + */ +static void +_scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd) +{ + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *priv_target = starget->hostdata; + struct _sas_device *sas_device = NULL; + unsigned long flags; + char *device_str = NULL; + + if (!priv_target) + return; + if (ioc->hide_ir_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; + + scsi_print_command(scmd); + if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { + starget_printk(KERN_INFO, starget, + "%s handle(0x%04x), %s wwid(0x%016llx)\n", + device_str, priv_target->handle, + device_str, (unsigned long long)priv_target->sas_address); + } else { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target); + if (sas_device) { + if (priv_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + starget_printk(KERN_INFO, starget, + "volume handle(0x%04x), " + "volume wwid(0x%016llx)\n", + sas_device->volume_handle, + (unsigned long long)sas_device->volume_wwid); + } + starget_printk(KERN_INFO, starget, + "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n", + sas_device->handle, + (unsigned long long)sas_device->sas_address, + sas_device->phy); + if (sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure_logical_id(0x%016llx), slot(%d)\n", + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name) + starget_printk(KERN_INFO, starget, + "enclosure level(0x%04x),connector name(%s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } +} + +/** + * scsih_abort - eh threads main abort routine + * @scmd: pointer to scsi command object + * + * Returns SUCCESS if command aborted else FAILED + */ +int +scsih_abort(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + u16 smid; + u16 handle; + int r; + + sdev_printk(KERN_INFO, scmd->device, + "attempting task abort! scmd(%p)\n", scmd); + _scsih_tm_display_info(ioc, scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* search for the command */ + smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd); + if (!smid) { + scmd->result = DID_RESET << 16; + r = SUCCESS; + goto out; + } + + /* for hidden raid components and volumes this is not supported */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT || + sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + + mpt3sas_halt_firmware(ioc); + + handle = sas_device_priv_data->sas_target->handle; + r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, + scmd->device->id, scmd->device->lun, + MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, TM_MUTEX_ON); + + out: + sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + return r; +} + +/** + * scsih_dev_reset - eh threads main device reset routine + * @scmd: pointer to scsi command object + * + * Returns SUCCESS if command aborted else FAILED + */ +int +scsih_dev_reset(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct _sas_device *sas_device = NULL; + u16 handle; + int r; + + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; + + sdev_printk(KERN_INFO, scmd->device, + "attempting device reset! scmd(%p)\n", scmd); + _scsih_tm_display_info(ioc, scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + sdev_printk(KERN_INFO, scmd->device, + "device been deleted! scmd(%p)\n", scmd); + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* for hidden raid components obtain the volume_handle */ + handle = 0; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + sas_device = mpt3sas_get_sdev_from_target(ioc, + target_priv_data); + if (sas_device) + handle = sas_device->volume_handle; + } else + handle = sas_device_priv_data->sas_target->handle; + + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + + r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, + scmd->device->id, scmd->device->lun, + MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, TM_MUTEX_ON); + + out: + sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + + if (sas_device) + sas_device_put(sas_device); + + return r; +} + +/** + * scsih_target_reset - eh threads main target reset routine + * @scmd: pointer to scsi command object + * + * Returns SUCCESS if command aborted else FAILED + */ +int +scsih_target_reset(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct _sas_device *sas_device = NULL; + u16 handle; + int r; + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; + + starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n", + scmd); + _scsih_tm_display_info(ioc, scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", + scmd); + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + r = SUCCESS; + goto out; + } + + /* for hidden raid components obtain the volume_handle */ + handle = 0; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) { + sas_device = mpt3sas_get_sdev_from_target(ioc, + target_priv_data); + if (sas_device) + handle = sas_device->volume_handle; + } else + handle = sas_device_priv_data->sas_target->handle; + + if (!handle) { + scmd->result = DID_RESET << 16; + r = FAILED; + goto out; + } + + r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel, + scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, + 30, TM_MUTEX_ON); + + out: + starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", + ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + + if (sas_device) + sas_device_put(sas_device); + + return r; +} + + +/** + * scsih_host_reset - eh threads main host reset routine + * @scmd: pointer to scsi command object + * + * Returns SUCCESS if command aborted else FAILED + */ +int +scsih_host_reset(struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); + int r, retval; + + pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n", + ioc->name, scmd); + scsi_print_command(scmd); + + if (ioc->is_driver_loading) { + pr_info(MPT3SAS_FMT "Blocking the host reset\n", + ioc->name); + r = FAILED; + goto out; + } + + retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + r = (retval < 0) ? FAILED : SUCCESS; +out: + pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n", + ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); + + return r; +} + +/** + * _scsih_fw_event_add - insert and queue up fw_event + * @ioc: per adapter object + * @fw_event: object describing the event + * Context: This function will acquire ioc->fw_event_lock. + * + * This adds the firmware event object into link list, then queues it up to + * be processed from user context. + * + * Return nothing. + */ +static void +_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) +{ + unsigned long flags; + + if (ioc->firmware_event_thread == NULL) + return; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + fw_event_work_get(fw_event); + INIT_LIST_HEAD(&fw_event->list); + list_add_tail(&fw_event->list, &ioc->fw_event_list); + INIT_WORK(&fw_event->work, _firmware_event_work); + fw_event_work_get(fw_event); + queue_work(ioc->firmware_event_thread, &fw_event->work); + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +/** + * _scsih_fw_event_del_from_list - delete fw_event from the list + * @ioc: per adapter object + * @fw_event: object describing the event + * Context: This function will acquire ioc->fw_event_lock. + * + * If the fw_event is on the fw_event_list, remove it and do a put. + * + * Return nothing. + */ +static void +_scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work + *fw_event) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + if (!list_empty(&fw_event->list)) { + list_del_init(&fw_event->list); + fw_event_work_put(fw_event); + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + + + /** + * mpt3sas_send_trigger_data_event - send event for processing trigger data + * @ioc: per adapter object + * @event_data: trigger event data + * + * Return nothing. + */ +void +mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + struct fw_event_work *fw_event; + u16 sz; + + if (ioc->is_driver_loading) + return; + sz = sizeof(*event_data); + fw_event = alloc_fw_event_work(sz); + if (!fw_event) + return; + fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG; + fw_event->ioc = ioc; + memcpy(fw_event->event_data, event_data, sizeof(*event_data)); + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); +} + +/** + * _scsih_error_recovery_delete_devices - remove devices not responding + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc) +{ + struct fw_event_work *fw_event; + + if (ioc->is_driver_loading) + return; + fw_event = alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); +} + +/** + * mpt3sas_port_enable_complete - port enable completed (fake event) + * @ioc: per adapter object + * + * Return nothing. + */ +void +mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc) +{ + struct fw_event_work *fw_event; + + fw_event = alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); +} + +static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc) +{ + unsigned long flags; + struct fw_event_work *fw_event = NULL; + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + if (!list_empty(&ioc->fw_event_list)) { + fw_event = list_first_entry(&ioc->fw_event_list, + struct fw_event_work, list); + list_del_init(&fw_event->list); + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + + return fw_event; +} + +/** + * _scsih_fw_event_cleanup_queue - cleanup event queue + * @ioc: per adapter object + * + * Walk the firmware event queue, either killing timers, or waiting + * for outstanding events to complete + * + * Return nothing. + */ +static void +_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc) +{ + struct fw_event_work *fw_event; + + if (list_empty(&ioc->fw_event_list) || + !ioc->firmware_event_thread || in_interrupt()) + return; + + while ((fw_event = dequeue_next_fw_event(ioc))) { + /* + * Wait on the fw_event to complete. If this returns 1, then + * the event was never executed, and we need a put for the + * reference the delayed_work had on the fw_event. + * + * If it did execute, we wait for it to finish, and the put will + * happen from _firmware_event_work() + */ + if (cancel_delayed_work_sync(&fw_event->delayed_work)) + fw_event_work_put(fw_event); + + fw_event_work_put(fw_event); + } +} + +/** + * _scsih_internal_device_block - block the sdev device + * @sdev: per device object + * @sas_device_priv_data : per device driver private data + * + * make sure device is blocked without error, if not + * print an error + */ +static void +_scsih_internal_device_block(struct scsi_device *sdev, + struct MPT3SAS_DEVICE *sas_device_priv_data) +{ + int r = 0; + + sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 1; + + r = scsi_internal_device_block(sdev); + if (r == -EINVAL) + sdev_printk(KERN_WARNING, sdev, + "device_block failed with return(%d) for handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle, r); +} + +/** + * _scsih_internal_device_unblock - unblock the sdev device + * @sdev: per device object + * @sas_device_priv_data : per device driver private data + * make sure device is unblocked without error, if not retry + * by blocking and then unblocking + */ + +static void +_scsih_internal_device_unblock(struct scsi_device *sdev, + struct MPT3SAS_DEVICE *sas_device_priv_data) +{ + int r = 0; + + sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, " + "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle); + sas_device_priv_data->block = 0; + r = scsi_internal_device_unblock(sdev, SDEV_RUNNING); + if (r == -EINVAL) { + /* The device has been set to SDEV_RUNNING by SD layer during + * device addition but the request queue is still stopped by + * our earlier block call. We need to perform a block again + * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */ + + sdev_printk(KERN_WARNING, sdev, + "device_unblock failed with return(%d) for handle(0x%04x) " + "performing a block followed by an unblock\n", + sas_device_priv_data->sas_target->handle, r); + sas_device_priv_data->block = 1; + r = scsi_internal_device_block(sdev); + if (r) + sdev_printk(KERN_WARNING, sdev, "retried device_block " + "failed with return(%d) for handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle, r); + + sas_device_priv_data->block = 0; + r = scsi_internal_device_unblock(sdev, SDEV_RUNNING); + if (r) + sdev_printk(KERN_WARNING, sdev, "retried device_unblock" + " failed with return(%d) for handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle, r); + } +} + +/** + * _scsih_ublock_io_all_device - unblock every device + * @ioc: per adapter object + * + * change the device state from block to running + */ +static void +_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (!sas_device_priv_data->block) + continue; + + dewtprintk(ioc, sdev_printk(KERN_INFO, sdev, + "device_running, handle(0x%04x)\n", + sas_device_priv_data->sas_target->handle)); + _scsih_internal_device_unblock(sdev, sas_device_priv_data); + } +} + + +/** + * _scsih_ublock_io_device - prepare device to be deleted + * @ioc: per adapter object + * @sas_addr: sas address + * + * unblock then put device in offline state + */ +static void +_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->sas_address + != sas_address) + continue; + if (sas_device_priv_data->block) + _scsih_internal_device_unblock(sdev, + sas_device_priv_data); + } +} + +/** + * _scsih_block_io_all_device - set the device state to SDEV_BLOCK + * @ioc: per adapter object + * @handle: device handle + * + * During device pull we need to appropiately set the sdev state. + */ +static void +_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->block) + continue; + _scsih_internal_device_block(sdev, sas_device_priv_data); + } +} + +/** + * _scsih_block_io_device - set the device state to SDEV_BLOCK + * @ioc: per adapter object + * @handle: device handle + * + * During device pull we need to appropiately set the sdev state. + */ +static void +_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + struct _sas_device *sas_device; + + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (!sas_device) + return; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + if (sas_device_priv_data->sas_target->handle != handle) + continue; + if (sas_device_priv_data->block) + continue; + if (sas_device->pend_sas_rphy_add) + continue; + _scsih_internal_device_block(sdev, sas_device_priv_data); + } + + sas_device_put(sas_device); +} + +/** + * _scsih_block_io_to_children_attached_to_ex + * @ioc: per adapter object + * @sas_expander: the sas_device object + * + * This routine set sdev state to SDEV_BLOCK for all devices + * attached to this expander. This function called when expander is + * pulled. + */ +static void +_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander) +{ + struct _sas_port *mpt3sas_port; + struct _sas_device *sas_device; + struct _sas_node *expander_sibling; + unsigned long flags; + + if (!sas_expander) + return; + + list_for_each_entry(mpt3sas_port, + &sas_expander->sas_port_list, port_list) { + if (mpt3sas_port->remote_identify.device_type == + SAS_END_DEVICE) { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + mpt3sas_port->remote_identify.sas_address); + if (sas_device) { + set_bit(sas_device->handle, + ioc->blocking_handles); + sas_device_put(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + } + } + + list_for_each_entry(mpt3sas_port, + &sas_expander->sas_port_list, port_list) { + + if (mpt3sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mpt3sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) { + expander_sibling = + mpt3sas_scsih_expander_find_by_sas_address( + ioc, mpt3sas_port->remote_identify.sas_address); + _scsih_block_io_to_children_attached_to_ex(ioc, + expander_sibling); + } + } +} + +/** + * _scsih_block_io_to_children_attached_directly + * @ioc: per adapter object + * @event_data: topology change event data + * + * This routine set sdev state to SDEV_BLOCK for all devices + * direct attached during device pull. + */ +static void +_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasTopologyChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING) + _scsih_block_io_device(ioc, handle); + } +} + +/** + * _scsih_tm_tr_send - send task management request + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt time. + * + * This code is to initiate the device removal handshake protocol + * with controller firmware. This function will issue target reset + * using high priority request queue. It will send a sas iounit + * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion. + * + * This is designed to send muliple task management request at the same + * time to the fifo. If the fifo is full, we will append the request, + * and process it in a future completion. + */ +static void +_scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2SCSITaskManagementRequest_t *mpi_request; + u16 smid; + struct _sas_device *sas_device = NULL; + struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + u64 sas_address = 0; + unsigned long flags; + struct _tr_list *delayed_tr; + u32 ioc_state; + + if (ioc->remove_host) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host has been removed: handle(0x%04x)\n", + __func__, ioc->name, handle)); + return; + } else if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host in pci error recovery: handle(0x%04x)\n", + __func__, ioc->name, + handle)); + return; + } + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host is not operational: handle(0x%04x)\n", + __func__, ioc->name, + handle)); + return; + } + + /* if PD, then return */ + if (test_bit(handle, ioc->pd_handles)) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device && sas_device->starget && + sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + sas_address = sas_device->sas_address; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (sas_target_priv_data) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, + (unsigned long long)sas_address)); + if (sas_device->enclosure_handle != 0) + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "setting delete flag:enclosure logical id(0x%016llx)," + " slot(%d)\n", ioc->name, (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot)); + if (sas_device->connector_name) + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "setting delete flag: enclosure level(0x%04x)," + " connector name( %s)\n", ioc->name, + sas_device->enclosure_level, + sas_device->connector_name)); + _scsih_ublock_io_device(ioc, sas_address); + sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE; + } + + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + goto out; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + goto out; + } + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, smid, + ioc->tm_tr_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL); + +out: + if (sas_device) + sas_device_put(sas_device); +} + +/** + * _scsih_tm_tr_complete - + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt time. + * + * This is the target reset completion routine. + * This code is part of the code to initiate the device removal + * handshake protocol with controller firmware. + * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE) + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + u16 handle; + Mpi2SCSITaskManagementRequest_t *mpi_request_tm; + Mpi2SCSITaskManagementReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + Mpi2SasIoUnitControlRequest_t *mpi_request; + u16 smid_sas_ctrl; + u32 ioc_state; + + if (ioc->remove_host) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host has been removed\n", __func__, ioc->name)); + return 1; + } else if (ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host in pci error recovery\n", __func__, + ioc->name)); + return 1; + } + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host is not operational\n", __func__, ioc->name)); + return 1; + } + if (unlikely(!mpi_reply)) { + pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 1; + } + mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, pr_err(MPT3SAS_FMT + "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + ioc->name, handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); + return 0; + } + + mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT); + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), " + "loginfo(0x%08x), completed(%d)\n", ioc->name, + handle, smid, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + + smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx); + if (!smid_sas_ctrl) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + return 1; + } + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, smid_sas_ctrl, + ioc->tm_sas_control_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl); + memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE; + mpi_request->DevHandle = mpi_request_tm->DevHandle; + mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl); + + return _scsih_check_for_pending_tm(ioc, smid); +} + + +/** + * _scsih_sas_control_complete - completion routine + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt time. + * + * This is the sas iounit control completion routine. + * This code is part of the code to initiate the device removal + * handshake protocol with controller firmware. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + Mpi2SasIoUnitControlReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + + if (likely(mpi_reply)) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "sc_complete:handle(0x%04x), (open) " + "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid, + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo))); + } else { + pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + } + return 1; +} + +/** + * _scsih_tm_tr_volume_send - send target reset request for volumes + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt time. + * + * This is designed to send muliple task management request at the same + * time to the fifo. If the fifo is full, we will append the request, + * and process it in a future completion. + */ +static void +_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2SCSITaskManagementRequest_t *mpi_request; + u16 smid; + struct _tr_list *delayed_tr; + + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host reset in progress!\n", + __func__, ioc->name)); + return; + } + + smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx); + if (!smid) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + if (!delayed_tr) + return; + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list); + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "DELAYED:tr:handle(0x%04x), (open)\n", + ioc->name, handle)); + return; + } + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n", + ioc->name, handle, smid, + ioc->tm_tr_volume_cb_idx)); + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET; + mpt3sas_base_put_smid_hi_priority(ioc, smid, 0); +} + +/** + * _scsih_tm_volume_tr_complete - target reset completion + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt time. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, + u8 msix_index, u32 reply) +{ + u16 handle; + Mpi2SCSITaskManagementRequest_t *mpi_request_tm; + Mpi2SCSITaskManagementReply_t *mpi_reply = + mpt3sas_base_get_reply_virt_addr(ioc, reply); + + if (ioc->shost_recovery || ioc->remove_host || + ioc->pci_error_recovery) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: host reset in progress!\n", + __func__, ioc->name)); + return 1; + } + if (unlikely(!mpi_reply)) { + pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 1; + } + + mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid); + handle = le16_to_cpu(mpi_request_tm->DevHandle); + if (handle != le16_to_cpu(mpi_reply->DevHandle)) { + dewtprintk(ioc, pr_err(MPT3SAS_FMT + "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n", + ioc->name, handle, + le16_to_cpu(mpi_reply->DevHandle), smid)); + return 0; + } + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), " + "loginfo(0x%08x), completed(%d)\n", ioc->name, + handle, smid, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo), + le32_to_cpu(mpi_reply->TerminationCount))); + + return _scsih_check_for_pending_tm(ioc, smid); +} + + +/** + * _scsih_check_for_pending_tm - check for pending task management + * @ioc: per adapter object + * @smid: system request message index + * + * This will check delayed target reset list, and feed the + * next reqeust. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + struct _tr_list *delayed_tr; + + if (!list_empty(&ioc->delayed_tr_volume_list)) { + delayed_tr = list_entry(ioc->delayed_tr_volume_list.next, + struct _tr_list, list); + mpt3sas_base_free_smid(ioc, smid); + _scsih_tm_tr_volume_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + + if (!list_empty(&ioc->delayed_tr_list)) { + delayed_tr = list_entry(ioc->delayed_tr_list.next, + struct _tr_list, list); + mpt3sas_base_free_smid(ioc, smid); + _scsih_tm_tr_send(ioc, delayed_tr->handle); + list_del(&delayed_tr->list); + kfree(delayed_tr); + return 0; + } + + return 1; +} + +/** + * _scsih_check_topo_delete_events - sanity check on topo events + * @ioc: per adapter object + * @event_data: the event data payload + * + * This routine added to better handle cable breaker. + * + * This handles the case where driver receives multiple expander + * add and delete events in a single shot. When there is a delete event + * the routine will void any pending add events waiting in the event queue. + * + * Return nothing. + */ +static void +_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasTopologyChangeList_t *event_data) +{ + struct fw_event_work *fw_event; + Mpi2EventDataSasTopologyChangeList_t *local_event_data; + u16 expander_handle; + struct _sas_node *sas_expander; + unsigned long flags; + int i, reason_code; + u16 handle; + + for (i = 0 ; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING) + _scsih_tm_tr_send(ioc, handle); + } + + expander_handle = le16_to_cpu(event_data->ExpanderDevHandle); + if (expander_handle < ioc->sas_hba.num_phys) { + _scsih_block_io_to_children_attached_directly(ioc, event_data); + return; + } + if (event_data->ExpStatus == + MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) { + /* put expander attached devices into blocking state */ + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, + expander_handle); + _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + do { + handle = find_first_bit(ioc->blocking_handles, + ioc->facts.MaxDevHandle); + if (handle < ioc->facts.MaxDevHandle) + _scsih_block_io_device(ioc, handle); + } while (test_and_clear_bit(handle, ioc->blocking_handles)); + } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING) + _scsih_block_io_to_children_attached_directly(ioc, event_data); + + if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING) + return; + + /* mark ignore flag for pending events */ + spin_lock_irqsave(&ioc->fw_event_lock, flags); + list_for_each_entry(fw_event, &ioc->fw_event_list, list) { + if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST || + fw_event->ignore) + continue; + local_event_data = (Mpi2EventDataSasTopologyChangeList_t *) + fw_event->event_data; + if (local_event_data->ExpStatus == + MPI2_EVENT_SAS_TOPO_ES_ADDED || + local_event_data->ExpStatus == + MPI2_EVENT_SAS_TOPO_ES_RESPONDING) { + if (le16_to_cpu(local_event_data->ExpanderDevHandle) == + expander_handle) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "setting ignoring flag\n", ioc->name)); + fw_event->ignore = 1; + } + } + } + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); +} + +/** + * _scsih_set_volume_delete_flag - setting volume delete flag + * @ioc: per adapter object + * @handle: device handle + * + * This returns nothing. + */ +static void +_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _raid_device *raid_device; + struct MPT3SAS_TARGET *sas_target_priv_data; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + if (raid_device && raid_device->starget && + raid_device->starget->hostdata) { + sas_target_priv_data = + raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "setting delete flag: handle(0x%04x), " + "wwid(0x%016llx)\n", ioc->name, handle, + (unsigned long long) raid_device->wwid)); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * _scsih_set_volume_handle_for_tr - set handle for target reset to volume + * @handle: input handle + * @a: handle for volume a + * @b: handle for volume b + * + * IR firmware only supports two raid volumes. The purpose of this + * routine is to set the volume handle in either a or b. When the given + * input handle is non-zero, or when a and b have not been set before. + */ +static void +_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b) +{ + if (!handle || handle == *a || handle == *b) + return; + if (!*a) + *a = handle; + else if (!*b) + *b = handle; +} + +/** + * _scsih_check_ir_config_unhide_events - check for UNHIDE events + * @ioc: per adapter object + * @event_data: the event data payload + * Context: interrupt time. + * + * This routine will send target reset to volume, followed by target + * resets to the PDs. This is called when a PD has been removed, or + * volume has been deleted or removed. When the target reset is sent + * to volume, the PD target resets need to be queued to start upon + * completion of the volume target reset. + * + * Return nothing. + */ +static void +_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrConfigChangeList_t *event_data) +{ + Mpi2EventIrConfigElement_t *element; + int i; + u16 handle, volume_handle, a, b; + struct _tr_list *delayed_tr; + + a = 0; + b = 0; + + if (ioc->is_warpdrive) + return; + + /* Volume Resets for Deleted or Removed */ + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == + MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED || + element->ReasonCode == + MPI2_EVENT_IR_CHANGE_RC_REMOVED) { + volume_handle = le16_to_cpu(element->VolDevHandle); + _scsih_set_volume_delete_flag(ioc, volume_handle); + _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); + } + } + + /* Volume Resets for UNHIDE events */ + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) + continue; + if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) { + volume_handle = le16_to_cpu(element->VolDevHandle); + _scsih_set_volume_handle_for_tr(volume_handle, &a, &b); + } + } + + if (a) + _scsih_tm_tr_volume_send(ioc, a); + if (b) + _scsih_tm_tr_volume_send(ioc, b); + + /* PD target resets */ + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE) + continue; + handle = le16_to_cpu(element->PhysDiskDevHandle); + volume_handle = le16_to_cpu(element->VolDevHandle); + clear_bit(handle, ioc->pd_handles); + if (!volume_handle) + _scsih_tm_tr_send(ioc, handle); + else if (volume_handle == a || volume_handle == b) { + delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC); + BUG_ON(!delayed_tr); + INIT_LIST_HEAD(&delayed_tr->list); + delayed_tr->handle = handle; + list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list); + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name, + handle)); + } else + _scsih_tm_tr_send(ioc, handle); + } +} + + +/** + * _scsih_check_volume_delete_events - set delete flag for volumes + * @ioc: per adapter object + * @event_data: the event data payload + * Context: interrupt time. + * + * This will handle the case when the cable connected to entire volume is + * pulled. We will take care of setting the deleted flag so normal IO will + * not be sent. + * + * Return nothing. + */ +static void +_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrVolume_t *event_data) +{ + u32 state; + + if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + state = le32_to_cpu(event_data->NewValue); + if (state == MPI2_RAID_VOL_STATE_MISSING || state == + MPI2_RAID_VOL_STATE_FAILED) + _scsih_set_volume_delete_flag(ioc, + le16_to_cpu(event_data->VolDevHandle)); +} + +/** + * _scsih_temp_threshold_events - display temperature threshold exceeded events + * @ioc: per adapter object + * @event_data: the temp threshold event data + * Context: interrupt time. + * + * Return nothing. + */ +static void +_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataTemperature_t *event_data) +{ + if (ioc->temp_sensors_count >= event_data->SensorNum) { + pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s" + " exceeded for Sensor: %d !!!\n", ioc->name, + ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ", + ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ", + ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ", + ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ", + event_data->SensorNum); + pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n", + ioc->name, event_data->CurrentTemperature); + } +} + +static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending) +{ + struct MPT3SAS_DEVICE *priv = scmd->device->hostdata; + + if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16) + return 0; + + if (pending) + return test_and_set_bit(0, &priv->ata_command_pending); + + clear_bit(0, &priv->ata_command_pending); + return 0; +} + +/** + * _scsih_flush_running_cmds - completing outstanding commands. + * @ioc: per adapter object + * + * The flushing out of all pending scmd commands following host reset, + * where all IO is dropped to the floor. + * + * Return nothing. + */ +static void +_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc) +{ + struct scsi_cmnd *scmd; + u16 smid; + u16 count = 0; + + for (smid = 1; smid <= ioc->scsiio_depth; smid++) { + scmd = _scsih_scsi_lookup_get_clear(ioc, smid); + if (!scmd) + continue; + count++; + _scsih_set_satl_pending(scmd, false); + mpt3sas_base_free_smid(ioc, smid); + scsi_dma_unmap(scmd); + if (ioc->pci_error_recovery) + scmd->result = DID_NO_CONNECT << 16; + else + scmd->result = DID_RESET << 16; + scmd->scsi_done(scmd); + } + dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n", + ioc->name, count)); +} + +/** + * _scsih_setup_eedp - setup MPI request for EEDP transfer + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * @mpi_request: pointer to the SCSI_IO reqest message frame + * + * Supporting protection 1 and 3. + * + * Returns nothing + */ +static void +_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + Mpi2SCSIIORequest_t *mpi_request) +{ + u16 eedp_flags; + unsigned char prot_op = scsi_get_prot_op(scmd); + unsigned char prot_type = scsi_get_prot_type(scmd); + Mpi25SCSIIORequest_t *mpi_request_3v = + (Mpi25SCSIIORequest_t *)mpi_request; + + if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL) + return; + + if (prot_op == SCSI_PROT_READ_STRIP) + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP; + else if (prot_op == SCSI_PROT_WRITE_INSERT) + eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; + else + return; + + switch (prot_type) { + case SCSI_PROT_DIF_TYPE1: + case SCSI_PROT_DIF_TYPE2: + + /* + * enable ref/guard checking + * auto increment ref tag + */ + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | + MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + mpi_request->CDB.EEDP32.PrimaryReferenceTag = + cpu_to_be32(scsi_get_lba(scmd)); + break; + + case SCSI_PROT_DIF_TYPE3: + + /* + * enable guard checking + */ + eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; + + break; + } + + mpi_request_3v->EEDPBlockSize = + cpu_to_le16(scmd->device->sector_size); + mpi_request->EEDPFlags = cpu_to_le16(eedp_flags); +} + +/** + * _scsih_eedp_error_handling - return sense code for EEDP errors + * @scmd: pointer to scsi command object + * @ioc_status: ioc status + * + * Returns nothing + */ +static void +_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) +{ + u8 ascq; + + switch (ioc_status) { + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + ascq = 0x01; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + ascq = 0x02; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + ascq = 0x03; + break; + default: + ascq = 0x00; + break; + } + scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10, + ascq); + scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) | + SAM_STAT_CHECK_CONDITION; +} + +/** + * scsih_qcmd - main scsi request entry point + * @scmd: pointer to scsi command object + * @done: function pointer to be invoked on completion + * + * The callback index is set inside `ioc->scsi_io_cb_idx`. + * + * Returns 0 on success. If there's a failure, return either: + * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or + * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full + */ +int +scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct _raid_device *raid_device; + Mpi2SCSIIORequest_t *mpi_request; + u32 mpi_control; + u16 smid; + u16 handle; + + if (ioc->logging_level & MPT_DEBUG_SCSI) + scsi_print_command(scmd); + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + return 0; + } + + if (ioc->pci_error_recovery || ioc->remove_host) { + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + return 0; + } + + sas_target_priv_data = sas_device_priv_data->sas_target; + + /* invalid device handle */ + handle = sas_target_priv_data->handle; + if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) { + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + return 0; + } + + + /* host recovery or link resets sent via IOCTLs */ + if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress) + return SCSI_MLQUEUE_HOST_BUSY; + + /* device has been deleted */ + else if (sas_target_priv_data->deleted) { + scmd->result = DID_NO_CONNECT << 16; + scmd->scsi_done(scmd); + return 0; + /* device busy with task managment */ + } else if (sas_target_priv_data->tm_busy || + sas_device_priv_data->block) + return SCSI_MLQUEUE_DEVICE_BUSY; + + /* + * Bug work around for firmware SATL handling. The loop + * is based on atomic operations and ensures consistency + * since we're lockless at this point + */ + do { + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { + scmd->result = SAM_STAT_BUSY; + scmd->scsi_done(scmd); + return 0; + } + } while (_scsih_set_satl_pending(scmd, true)); + + if (scmd->sc_data_direction == DMA_FROM_DEVICE) + mpi_control = MPI2_SCSIIO_CONTROL_READ; + else if (scmd->sc_data_direction == DMA_TO_DEVICE) + mpi_control = MPI2_SCSIIO_CONTROL_WRITE; + else + mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER; + + /* set tags */ + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; + + /* Make sure Device is not raid volume. + * We do not expose raid functionality to upper layer for warpdrive. + */ + if (!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev) + && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32) + mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON; + + smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + _scsih_set_satl_pending(scmd, false); + goto out; + } + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t)); + _scsih_setup_eedp(ioc, scmd, mpi_request); + + if (scmd->cmd_len == 32) + mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT; + mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) + mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; + else + mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; + mpi_request->DevHandle = cpu_to_le16(handle); + mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); + mpi_request->Control = cpu_to_le32(mpi_control); + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len); + mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR; + mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; + mpi_request->SenseBufferLowAddress = + mpt3sas_base_get_sense_buffer_dma(ioc, smid); + mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4; + int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *) + mpi_request->LUN); + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + + if (mpi_request->DataLength) { + if (ioc->build_sg_scmd(ioc, scmd, smid)) { + mpt3sas_base_free_smid(ioc, smid); + _scsih_set_satl_pending(scmd, false); + goto out; + } + } else + ioc->build_zero_len_sge(ioc, &mpi_request->SGL); + + raid_device = sas_target_priv_data->raid_device; + if (raid_device && raid_device->direct_io_enabled) + mpt3sas_setup_direct_io(ioc, scmd, raid_device, mpi_request, + smid); + + if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) { + if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) { + mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len | + MPI25_SCSIIO_IOFLAGS_FAST_PATH); + mpt3sas_base_put_smid_fast_path(ioc, smid, handle); + } else + mpt3sas_base_put_smid_scsi_io(ioc, smid, + le16_to_cpu(mpi_request->DevHandle)); + } else + mpt3sas_base_put_smid_default(ioc, smid); + return 0; + + out: + return SCSI_MLQUEUE_HOST_BUSY; +} + +/** + * _scsih_normalize_sense - normalize descriptor and fixed format sense data + * @sense_buffer: sense data returned by target + * @data: normalized skey/asc/ascq + * + * Return nothing. + */ +static void +_scsih_normalize_sense(char *sense_buffer, struct sense_info *data) +{ + if ((sense_buffer[0] & 0x7F) >= 0x72) { + /* descriptor format */ + data->skey = sense_buffer[1] & 0x0F; + data->asc = sense_buffer[2]; + data->ascq = sense_buffer[3]; + } else { + /* fixed format */ + data->skey = sense_buffer[2] & 0x0F; + data->asc = sense_buffer[12]; + data->ascq = sense_buffer[13]; + } +} + +/** + * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * @mpi_reply: reply mf payload returned from firmware + * + * scsi_status - SCSI Status code returned from target device + * scsi_state - state info associated with SCSI_IO determined by ioc + * ioc_status - ioc supplied status info + * + * Return nothing. + */ +static void +_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + Mpi2SCSIIOReply_t *mpi_reply, u16 smid) +{ + u32 response_info; + u8 *response_bytes; + u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & + MPI2_IOCSTATUS_MASK; + u8 scsi_state = mpi_reply->SCSIState; + u8 scsi_status = mpi_reply->SCSIStatus; + char *desc_ioc_state = NULL; + char *desc_scsi_status = NULL; + char *desc_scsi_state = ioc->tmp_string; + u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + struct _sas_device *sas_device = NULL; + struct scsi_target *starget = scmd->device->sdev_target; + struct MPT3SAS_TARGET *priv_target = starget->hostdata; + char *device_str = NULL; + + if (!priv_target) + return; + if (ioc->hide_ir_msg) + device_str = "WarpDrive"; + else + device_str = "volume"; + + if (log_info == 0x31170000) + return; + + switch (ioc_status) { + case MPI2_IOCSTATUS_SUCCESS: + desc_ioc_state = "success"; + break; + case MPI2_IOCSTATUS_INVALID_FUNCTION: + desc_ioc_state = "invalid function"; + break; + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: + desc_ioc_state = "scsi recovered error"; + break; + case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE: + desc_ioc_state = "scsi invalid dev handle"; + break; + case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + desc_ioc_state = "scsi device not there"; + break; + case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: + desc_ioc_state = "scsi data overrun"; + break; + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: + desc_ioc_state = "scsi data underrun"; + break; + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: + desc_ioc_state = "scsi io data error"; + break; + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: + desc_ioc_state = "scsi protocol error"; + break; + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: + desc_ioc_state = "scsi task terminated"; + break; + case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + desc_ioc_state = "scsi residual mismatch"; + break; + case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + desc_ioc_state = "scsi task mgmt failed"; + break; + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: + desc_ioc_state = "scsi ioc terminated"; + break; + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: + desc_ioc_state = "scsi ext terminated"; + break; + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + desc_ioc_state = "eedp guard error"; + break; + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + desc_ioc_state = "eedp ref tag error"; + break; + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + desc_ioc_state = "eedp app tag error"; + break; + default: + desc_ioc_state = "unknown"; + break; + } + + switch (scsi_status) { + case MPI2_SCSI_STATUS_GOOD: + desc_scsi_status = "good"; + break; + case MPI2_SCSI_STATUS_CHECK_CONDITION: + desc_scsi_status = "check condition"; + break; + case MPI2_SCSI_STATUS_CONDITION_MET: + desc_scsi_status = "condition met"; + break; + case MPI2_SCSI_STATUS_BUSY: + desc_scsi_status = "busy"; + break; + case MPI2_SCSI_STATUS_INTERMEDIATE: + desc_scsi_status = "intermediate"; + break; + case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET: + desc_scsi_status = "intermediate condmet"; + break; + case MPI2_SCSI_STATUS_RESERVATION_CONFLICT: + desc_scsi_status = "reservation conflict"; + break; + case MPI2_SCSI_STATUS_COMMAND_TERMINATED: + desc_scsi_status = "command terminated"; + break; + case MPI2_SCSI_STATUS_TASK_SET_FULL: + desc_scsi_status = "task set full"; + break; + case MPI2_SCSI_STATUS_ACA_ACTIVE: + desc_scsi_status = "aca active"; + break; + case MPI2_SCSI_STATUS_TASK_ABORTED: + desc_scsi_status = "task aborted"; + break; + default: + desc_scsi_status = "unknown"; + break; + } + + desc_scsi_state[0] = '\0'; + if (!scsi_state) + desc_scsi_state = " "; + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) + strcat(desc_scsi_state, "response info "); + if (scsi_state & MPI2_SCSI_STATE_TERMINATED) + strcat(desc_scsi_state, "state terminated "); + if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS) + strcat(desc_scsi_state, "no status "); + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED) + strcat(desc_scsi_state, "autosense failed "); + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) + strcat(desc_scsi_state, "autosense valid "); + + scsi_print_command(scmd); + + if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) { + pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name, + device_str, (unsigned long long)priv_target->sas_address); + } else { + sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target); + if (sas_device) { + pr_warn(MPT3SAS_FMT + "\tsas_address(0x%016llx), phy(%d)\n", + ioc->name, (unsigned long long) + sas_device->sas_address, sas_device->phy); + if (sas_device->enclosure_handle != 0) + pr_warn(MPT3SAS_FMT + "\tenclosure_logical_id(0x%016llx)," + "slot(%d)\n", ioc->name, + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0]) + pr_warn(MPT3SAS_FMT + "\tenclosure level(0x%04x)," + " connector name( %s)\n", ioc->name, + sas_device->enclosure_level, + sas_device->connector_name); + + sas_device_put(sas_device); + } + } + + pr_warn(MPT3SAS_FMT + "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->DevHandle), + desc_ioc_state, ioc_status, smid); + pr_warn(MPT3SAS_FMT + "\trequest_len(%d), underflow(%d), resid(%d)\n", + ioc->name, scsi_bufflen(scmd), scmd->underflow, + scsi_get_resid(scmd)); + pr_warn(MPT3SAS_FMT + "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->TaskTag), + le32_to_cpu(mpi_reply->TransferCount), scmd->result); + pr_warn(MPT3SAS_FMT + "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n", + ioc->name, desc_scsi_status, + scsi_status, desc_scsi_state, scsi_state); + + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + _scsih_normalize_sense(scmd->sense_buffer, &data); + pr_warn(MPT3SAS_FMT + "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n", + ioc->name, data.skey, + data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount)); + } + + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) { + response_info = le32_to_cpu(mpi_reply->ResponseInfo); + response_bytes = (u8 *)&response_info; + _scsih_response_code(ioc, response_bytes[0]); + } +} + +/** + * _scsih_turn_on_pfa_led - illuminate PFA LED + * @ioc: per adapter object + * @handle: device handle + * Context: process + * + * Return nothing. + */ +static void +_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + Mpi2SepReply_t mpi_reply; + Mpi2SepRequest_t mpi_request; + struct _sas_device *sas_device; + + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (!sas_device) + return; + + memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); + mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = + cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT); + mpi_request.DevHandle = cpu_to_le16(handle); + mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS; + if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + goto out; + } + sas_device->pfa_led_on = 1; + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + goto out; + } +out: + sas_device_put(sas_device); +} + +/** + * _scsih_turn_off_pfa_led - turn off Fault LED + * @ioc: per adapter object + * @sas_device: sas device whose PFA LED has to turned off + * Context: process + * + * Return nothing. + */ +static void +_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + Mpi2SepReply_t mpi_reply; + Mpi2SepRequest_t mpi_request; + + memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t)); + mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR; + mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS; + mpi_request.SlotStatus = 0; + mpi_request.Slot = cpu_to_le16(sas_device->slot); + mpi_request.DevHandle = 0; + mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle); + mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS; + if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply, + &mpi_request)) != 0) { + printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) { + dewtprintk(ioc, printk(MPT3SAS_FMT + "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo))); + return; + } +} + +/** + * _scsih_send_event_to_turn_on_pfa_led - fire delayed event + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt. + * + * Return nothing. + */ +static void +_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct fw_event_work *fw_event; + + fw_event = alloc_fw_event_work(0); + if (!fw_event) + return; + fw_event->event = MPT3SAS_TURN_ON_PFA_LED; + fw_event->device_handle = handle; + fw_event->ioc = ioc; + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); +} + +/** + * _scsih_smart_predicted_fault - process smart errors + * @ioc: per adapter object + * @handle: device handle + * Context: interrupt. + * + * Return nothing. + */ +static void +_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct scsi_target *starget; + struct MPT3SAS_TARGET *sas_target_priv_data; + Mpi2EventNotificationReply_t *event_reply; + Mpi2EventDataSasDeviceStatusChange_t *event_data; + struct _sas_device *sas_device; + ssize_t sz; + unsigned long flags; + + /* only handle non-raid devices */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (!sas_device) + goto out_unlock; + + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + + if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) || + ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) + goto out_unlock; + + if (sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, "predicted fault, " + "enclosure logical id(0x%016llx), slot(%d)\n", + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + starget_printk(KERN_WARNING, starget, "predicted fault, " + "enclosure level(0x%04x), connector name( %s)\n", + sas_device->enclosure_level, + sas_device->connector_name); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) + _scsih_send_event_to_turn_on_pfa_led(ioc, handle); + + /* insert into event log */ + sz = offsetof(Mpi2EventNotificationReply_t, EventData) + + sizeof(Mpi2EventDataSasDeviceStatusChange_t); + event_reply = kzalloc(sz, GFP_KERNEL); + if (!event_reply) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; + event_reply->Event = + cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE); + event_reply->MsgLength = sz/4; + event_reply->EventDataLength = + cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4); + event_data = (Mpi2EventDataSasDeviceStatusChange_t *) + event_reply->EventData; + event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA; + event_data->ASC = 0x5D; + event_data->DevHandle = cpu_to_le16(handle); + event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address); + mpt3sas_ctl_add_to_event_log(ioc, event_reply); + kfree(event_reply); +out: + if (sas_device) + sas_device_put(sas_device); + return; + +out_unlock: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + goto out; +} + +/** + * _scsih_io_done - scsi request callback + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when using _scsih_qcmd. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) +{ + Mpi2SCSIIORequest_t *mpi_request; + Mpi2SCSIIOReply_t *mpi_reply; + struct scsi_cmnd *scmd; + u16 ioc_status; + u32 xfer_cnt; + u8 scsi_state; + u8 scsi_status; + u32 log_info; + struct MPT3SAS_DEVICE *sas_device_priv_data; + u32 response_code = 0; + unsigned long flags; + unsigned int sector_sz; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + scmd = _scsih_scsi_lookup_get_clear(ioc, smid); + if (scmd == NULL) + return 1; + + _scsih_set_satl_pending(scmd, false); + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + + if (mpi_reply == NULL) { + scmd->result = DID_OK << 16; + goto out; + } + + sas_device_priv_data = scmd->device->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target || + sas_device_priv_data->sas_target->deleted) { + scmd->result = DID_NO_CONNECT << 16; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + + /* + * WARPDRIVE: If direct_io is set then it is directIO, + * the failed direct I/O should be redirected to volume + */ + if (mpt3sas_scsi_direct_io_get(ioc, smid) && + ((ioc_status & MPI2_IOCSTATUS_MASK) + != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) { + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + ioc->scsi_lookup[smid - 1].scmd = scmd; + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + mpt3sas_scsi_direct_io_set(ioc, smid, 0); + memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len); + mpi_request->DevHandle = + cpu_to_le16(sas_device_priv_data->sas_target->handle); + mpt3sas_base_put_smid_scsi_io(ioc, smid, + sas_device_priv_data->sas_target->handle); + return 0; + } + /* turning off TLR */ + scsi_state = mpi_reply->SCSIState; + if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) + response_code = + le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF; + if (!sas_device_priv_data->tlr_snoop_check) { + sas_device_priv_data->tlr_snoop_check++; + if (!ioc->is_warpdrive && + !scsih_is_raid(&scmd->device->sdev_gendev) && + sas_is_tlr_enabled(scmd->device) && + response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) { + sas_disable_tlr(scmd->device); + sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n"); + } + } + + xfer_cnt = le32_to_cpu(mpi_reply->TransferCount); + + /* In case of bogus fw or device, we could end up having + * unaligned partial completion. We can force alignment here, + * then scsi-ml does not need to handle this misbehavior. + */ + sector_sz = scmd->device->sector_size; + if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz && + xfer_cnt % sector_sz)) { + sdev_printk(KERN_INFO, scmd->device, + "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n", + xfer_cnt, sector_sz); + xfer_cnt = round_down(xfer_cnt, sector_sz); + } + + scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt); + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= MPI2_IOCSTATUS_MASK; + scsi_status = mpi_reply->SCSIStatus; + + if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 && + (scsi_status == MPI2_SCSI_STATUS_BUSY || + scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT || + scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) { + ioc_status = MPI2_IOCSTATUS_SUCCESS; + } + + if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) { + struct sense_info data; + const void *sense_data = mpt3sas_base_get_sense_buffer(ioc, + smid); + u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE, + le32_to_cpu(mpi_reply->SenseCount)); + memcpy(scmd->sense_buffer, sense_data, sz); + _scsih_normalize_sense(scmd->sense_buffer, &data); + /* failure prediction threshold exceeded */ + if (data.asc == 0x5D) + _scsih_smart_predicted_fault(ioc, + le16_to_cpu(mpi_reply->DevHandle)); + mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq); + + if ((ioc->logging_level & MPT_DEBUG_REPLY) && + ((scmd->sense_buffer[2] == UNIT_ATTENTION) || + (scmd->sense_buffer[2] == MEDIUM_ERROR) || + (scmd->sense_buffer[2] == HARDWARE_ERROR))) + _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid); + } + switch (ioc_status) { + case MPI2_IOCSTATUS_BUSY: + case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES: + scmd->result = SAM_STAT_BUSY; + break; + + case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE: + scmd->result = DID_NO_CONNECT << 16; + break; + + case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED: + if (sas_device_priv_data->block) { + scmd->result = DID_TRANSPORT_DISRUPTED << 16; + goto out; + } + if (log_info == 0x31110630) { + if (scmd->retries > 2) { + scmd->result = DID_NO_CONNECT << 16; + scsi_device_set_state(scmd->device, + SDEV_OFFLINE); + } else { + scmd->result = DID_SOFT_ERROR << 16; + scmd->device->expecting_cc_ua = 1; + } + break; + } else if (log_info == VIRTUAL_IO_FAILED_RETRY) { + scmd->result = DID_RESET << 16; + break; + } else if ((scmd->device->channel == RAID_CHANNEL) && + (scsi_state == (MPI2_SCSI_STATE_TERMINATED | + MPI2_SCSI_STATE_NO_SCSI_STATUS))) { + scmd->result = DID_RESET << 16; + break; + } + scmd->result = DID_SOFT_ERROR << 16; + break; + case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED: + case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED: + scmd->result = DID_RESET << 16; + break; + + case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: + if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt)) + scmd->result = DID_SOFT_ERROR << 16; + else + scmd->result = (DID_OK << 16) | scsi_status; + break; + + case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN: + scmd->result = (DID_OK << 16) | scsi_status; + + if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)) + break; + + if (xfer_cnt < scmd->underflow) { + if (scsi_status == SAM_STAT_BUSY) + scmd->result = SAM_STAT_BUSY; + else + scmd->result = DID_SOFT_ERROR << 16; + } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | + MPI2_SCSI_STATE_NO_SCSI_STATUS)) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) { + mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID; + mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION; + scmd->result = (DRIVER_SENSE << 24) | + SAM_STAT_CHECK_CONDITION; + scmd->sense_buffer[0] = 0x70; + scmd->sense_buffer[2] = ILLEGAL_REQUEST; + scmd->sense_buffer[12] = 0x20; + scmd->sense_buffer[13] = 0; + } + break; + + case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN: + scsi_set_resid(scmd, 0); + case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR: + case MPI2_IOCSTATUS_SUCCESS: + scmd->result = (DID_OK << 16) | scsi_status; + if (response_code == + MPI2_SCSITASKMGMT_RSP_INVALID_FRAME || + (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED | + MPI2_SCSI_STATE_NO_SCSI_STATUS))) + scmd->result = DID_SOFT_ERROR << 16; + else if (scsi_state & MPI2_SCSI_STATE_TERMINATED) + scmd->result = DID_RESET << 16; + break; + + case MPI2_IOCSTATUS_EEDP_GUARD_ERROR: + case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR: + case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR: + _scsih_eedp_error_handling(scmd, ioc_status); + break; + + case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR: + case MPI2_IOCSTATUS_INVALID_FUNCTION: + case MPI2_IOCSTATUS_INVALID_SGL: + case MPI2_IOCSTATUS_INTERNAL_ERROR: + case MPI2_IOCSTATUS_INVALID_FIELD: + case MPI2_IOCSTATUS_INVALID_STATE: + case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR: + case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED: + default: + scmd->result = DID_SOFT_ERROR << 16; + break; + + } + + if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY)) + _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid); + + out: + + scsi_dma_unmap(scmd); + + scmd->scsi_done(scmd); + return 1; +} + +/** + * _scsih_sas_host_refresh - refreshing sas host object contents + * @ioc: per adapter object + * Context: user + * + * During port enable, fw will send topology events for every device. Its + * possible that the handles may change from the previous setting, so this + * code keeping handles updating if changed. + * + * Return nothing. + */ +static void +_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc) +{ + u16 sz; + u16 ioc_status; + int i; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + u16 attached_handle; + u8 link_rate; + + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "updating handles for sas_host(0x%016llx)\n", + ioc->name, (unsigned long long)ioc->sas_hba.sas_address)); + + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys + * sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz)) != 0) + goto out; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + goto out; + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { + link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4; + if (i == 0) + ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> + PhyData[0].ControllerDevHandle); + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i]. + AttachedDevHandle); + if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) + link_rate = MPI2_SAS_NEG_LINK_RATE_1_5; + mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address, + attached_handle, i, link_rate); + } + out: + kfree(sas_iounit_pg0); +} + +/** + * _scsih_sas_host_add - create sas host object + * @ioc: per adapter object + * + * Creating host side data object, stored in ioc->sas_hba + * + * Return nothing. + */ +static void +_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc) +{ + int i; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2SasPhyPage0_t phy_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2SasEnclosurePage0_t enclosure_pg0; + u16 ioc_status; + u16 sz; + u8 device_missing_delay; + + mpt3sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys); + if (!ioc->sas_hba.num_phys) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + /* sas_iounit page 0 */ + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + /* sas_iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + ioc->io_missing_delay = + sas_iounit_pg1->IODeviceMissingDelay; + device_missing_delay = + sas_iounit_pg1->ReportDeviceMissingDelay; + if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16) + ioc->device_missing_delay = (device_missing_delay & + MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16; + else + ioc->device_missing_delay = device_missing_delay & + MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK; + + ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev; + ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys, + sizeof(struct _sas_phy), GFP_KERNEL); + if (!ioc->sas_hba.phy) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { + if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + i))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + + if (i == 0) + ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0-> + PhyData[0].ControllerDevHandle); + ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle; + ioc->sas_hba.phy[i].phy_id = i; + mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i], + phy_pg0, ioc->sas_hba.parent_dev); + } + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out; + } + ioc->sas_hba.enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + pr_info(MPT3SAS_FMT + "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n", + ioc->name, ioc->sas_hba.handle, + (unsigned long long) ioc->sas_hba.sas_address, + ioc->sas_hba.num_phys) ; + + if (ioc->sas_hba.enclosure_handle) { + if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, + ioc->sas_hba.enclosure_handle))) + ioc->sas_hba.enclosure_logical_id = + le64_to_cpu(enclosure_pg0.EnclosureLogicalID); + } + + out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); +} + +/** + * _scsih_expander_add - creating expander object + * @ioc: per adapter object + * @handle: expander handle + * + * Creating expander object, stored in ioc->sas_expander_list. + * + * Return 0 for success, else error. + */ +static int +_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _sas_node *sas_expander; + Mpi2ConfigReply_t mpi_reply; + Mpi2ExpanderPage0_t expander_pg0; + Mpi2ExpanderPage1_t expander_pg1; + Mpi2SasEnclosurePage0_t enclosure_pg0; + u32 ioc_status; + u16 parent_handle; + u64 sas_address, sas_address_parent = 0; + int i; + unsigned long flags; + struct _sas_port *mpt3sas_port = NULL; + + int rc = 0; + + if (!handle) + return -1; + + if (ioc->shost_recovery || ioc->pci_error_recovery) + return -1; + + if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + + /* handle out of order topology events */ + parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle); + if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent) + != 0) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + if (sas_address_parent != ioc->sas_hba.sas_address) { + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address_parent); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (!sas_expander) { + rc = _scsih_expander_add(ioc, parent_handle); + if (rc != 0) + return rc; + } + } + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (sas_expander) + return 0; + + sas_expander = kzalloc(sizeof(struct _sas_node), + GFP_KERNEL); + if (!sas_expander) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + + sas_expander->handle = handle; + sas_expander->num_phys = expander_pg0.NumPhys; + sas_expander->sas_address_parent = sas_address_parent; + sas_expander->sas_address = sas_address; + + pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \ + " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name, + handle, parent_handle, (unsigned long long) + sas_expander->sas_address, sas_expander->num_phys); + + if (!sas_expander->num_phys) + goto out_fail; + sas_expander->phy = kcalloc(sas_expander->num_phys, + sizeof(struct _sas_phy), GFP_KERNEL); + if (!sas_expander->phy) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + + INIT_LIST_HEAD(&sas_expander->sas_port_list); + mpt3sas_port = mpt3sas_transport_port_add(ioc, handle, + sas_address_parent); + if (!mpt3sas_port) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->parent_dev = &mpt3sas_port->rphy->dev; + + for (i = 0 ; i < sas_expander->num_phys ; i++) { + if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, + &expander_pg1, i, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + sas_expander->phy[i].handle = handle; + sas_expander->phy[i].phy_id = i; + + if ((mpt3sas_transport_add_expander_phy(ioc, + &sas_expander->phy[i], expander_pg1, + sas_expander->parent_dev))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -1; + goto out_fail; + } + } + + if (sas_expander->enclosure_handle) { + if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply, + &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, + sas_expander->enclosure_handle))) + sas_expander->enclosure_logical_id = + le64_to_cpu(enclosure_pg0.EnclosureLogicalID); + } + + _scsih_expander_node_add(ioc, sas_expander); + return 0; + + out_fail: + + if (mpt3sas_port) + mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, + sas_address_parent); + kfree(sas_expander); + return rc; +} + +/** + * mpt3sas_expander_remove - removing expander object + * @ioc: per adapter object + * @sas_address: expander sas_address + * + * Return nothing. + */ +void +mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address) +{ + struct _sas_node *sas_expander; + unsigned long flags; + + if (ioc->shost_recovery) + return; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address); + if (sas_expander) + list_del(&sas_expander->list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (sas_expander) + _scsih_expander_node_remove(ioc, sas_expander); +} + +/** + * _scsih_done - internal SCSI_IO callback handler. + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when sending internal generated SCSI_IO. + * The callback index passed is `ioc->scsih_cb_idx` + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +static u8 +_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->scsih_cmds.smid != smid) + return 1; + ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->scsih_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID; + } + ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->scsih_cmds.done); + return 1; +} + + + + +#define MPT3_MAX_LUNS (255) + + +/** + * _scsih_check_access_status - check access flags + * @ioc: per adapter object + * @sas_address: sas address + * @handle: sas device handle + * @access_flags: errors returned during discovery of the device + * + * Return 0 for success, else failure + */ +static u8 +_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u16 handle, u8 access_status) +{ + u8 rc = 1; + char *desc = NULL; + + switch (access_status) { + case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS: + case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION: + rc = 0; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED: + desc = "sata capability failed"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT: + desc = "sata affiliation conflict"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE: + desc = "route not addressable"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE: + desc = "smp error not addressable"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED: + desc = "device blocked"; + break; + case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE: + case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX: + desc = "sata initialization failed"; + break; + default: + desc = "unknown"; + break; + } + + if (!rc) + return 0; + + pr_err(MPT3SAS_FMT + "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n", + ioc->name, desc, (unsigned long long)sas_address, handle); + return rc; +} + +/** + * _scsih_check_device - checking device responsiveness + * @ioc: per adapter object + * @parent_sas_address: sas address of parent expander or sas host + * @handle: attached device handle + * @phy_numberv: phy number + * @link_rate: new link rate + * + * Returns nothing. + */ +static void +_scsih_check_device(struct MPT3SAS_ADAPTER *ioc, + u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + struct _sas_device *sas_device; + u32 ioc_status; + unsigned long flags; + u64 sas_address; + struct scsi_target *starget; + struct MPT3SAS_TARGET *sas_target_priv_data; + u32 device_info; + + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) + return; + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + return; + + /* wide port handling ~ we need only handle device once for the phy that + * is matched in sas device page zero + */ + if (phy_number != sas_device_pg0.PhyNum) + return; + + /* check if this is end device */ + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(_scsih_is_end_device(device_info))) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_address); + + if (!sas_device) + goto out_unlock; + + if (unlikely(sas_device->handle != handle)) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + starget_printk(KERN_INFO, starget, + "handle changed from(0x%04x) to (0x%04x)!!!\n", + sas_device->handle, handle); + sas_target_priv_data->handle = handle; + sas_device->handle = handle; + if (sas_device_pg0.Flags & + MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + le16_to_cpu(sas_device_pg0.EnclosureLevel); + memcpy(&sas_device->connector_name[0], + &sas_device_pg0.ConnectorName[0], 4); + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + } + + /* check if device is present */ + if (!(le16_to_cpu(sas_device_pg0.Flags) & + MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + pr_err(MPT3SAS_FMT + "device is not present handle(0x%04x), flags!!!\n", + ioc->name, handle); + goto out_unlock; + } + + /* check if there were any issues with discovery */ + if (_scsih_check_access_status(ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) + goto out_unlock; + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + _scsih_ublock_io_device(ioc, sas_address); + + if (sas_device) + sas_device_put(sas_device); + return; + +out_unlock: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (sas_device) + sas_device_put(sas_device); +} + +/** + * _scsih_add_device - creating sas device object + * @ioc: per adapter object + * @handle: sas device handle + * @phy_num: phy number end device attached to + * @is_pd: is this hidden raid component + * + * Creating end device object, stored in ioc->sas_device_list. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num, + u8 is_pd) +{ + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2SasEnclosurePage0_t enclosure_pg0; + struct _sas_device *sas_device; + u32 ioc_status; + u64 sas_address; + u32 device_info; + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + + /* check if this is end device */ + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(_scsih_is_end_device(device_info))) + return -1; + sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + + /* check if device is present */ + if (!(le16_to_cpu(sas_device_pg0.Flags) & + MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) { + pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n", + ioc->name, handle); + return -1; + } + + /* check if there were any issues with discovery */ + if (_scsih_check_access_status(ioc, sas_address, handle, + sas_device_pg0.AccessStatus)) + return -1; + + sas_device = mpt3sas_get_sdev_by_addr(ioc, + sas_address); + if (sas_device) { + sas_device_put(sas_device); + return -1; + } + + sas_device = kzalloc(sizeof(struct _sas_device), + GFP_KERNEL); + if (!sas_device) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 0; + } + + kref_init(&sas_device->refcount); + sas_device->handle = handle; + if (_scsih_get_sas_address(ioc, + le16_to_cpu(sas_device_pg0.ParentDevHandle), + &sas_device->sas_address_parent) != 0) + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_device->enclosure_handle = + le16_to_cpu(sas_device_pg0.EnclosureHandle); + if (sas_device->enclosure_handle != 0) + sas_device->slot = + le16_to_cpu(sas_device_pg0.Slot); + sas_device->device_info = device_info; + sas_device->sas_address = sas_address; + sas_device->phy = sas_device_pg0.PhyNum; + sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) & + MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0; + + if (sas_device_pg0.Flags & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + le16_to_cpu(sas_device_pg0.EnclosureLevel); + memcpy(&sas_device->connector_name[0], + &sas_device_pg0.ConnectorName[0], 4); + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + /* get enclosure_logical_id */ + if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0( + ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE, + sas_device->enclosure_handle))) + sas_device->enclosure_logical_id = + le64_to_cpu(enclosure_pg0.EnclosureLogicalID); + + /* get device name */ + sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName); + + if (ioc->wait_for_discovery_to_complete) + _scsih_sas_device_init_add(ioc, sas_device); + else + _scsih_sas_device_add(ioc, sas_device); + + sas_device_put(sas_device); + return 0; +} + +/** + * _scsih_remove_device - removing sas device object + * @ioc: per adapter object + * @sas_device_delete: the sas_device object + * + * Return nothing. + */ +static void +_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + struct MPT3SAS_TARGET *sas_target_priv_data; + + if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) && + (sas_device->pfa_led_on)) { + _scsih_turn_off_pfa_led(ioc, sas_device); + sas_device->pfa_led_on = 0; + } + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, + sas_device->handle, (unsigned long long) + sas_device->sas_address)); + if (sas_device->enclosure_handle != 0) + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n", + ioc->name, __func__, + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot)); + if (sas_device->connector_name[0] != '\0') + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: enter: enclosure level(0x%04x), connector name( %s)\n", + ioc->name, __func__, + sas_device->enclosure_level, + sas_device->connector_name)); + + if (sas_device->starget && sas_device->starget->hostdata) { + sas_target_priv_data = sas_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + _scsih_ublock_io_device(ioc, sas_device->sas_address); + sas_target_priv_data->handle = + MPT3SAS_INVALID_DEVICE_HANDLE; + } + + if (!ioc->hide_drives) + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent); + + pr_info(MPT3SAS_FMT + "removing handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, sas_device->handle, + (unsigned long long) sas_device->sas_address); + if (sas_device->enclosure_handle != 0) + pr_info(MPT3SAS_FMT + "removing : enclosure logical id(0x%016llx), slot(%d)\n", + ioc->name, + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot); + if (sas_device->connector_name[0] != '\0') + pr_info(MPT3SAS_FMT + "removing enclosure level(0x%04x), connector name( %s)\n", + ioc->name, sas_device->enclosure_level, + sas_device->connector_name); + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, __func__, + sas_device->handle, (unsigned long long) + sas_device->sas_address)); + if (sas_device->enclosure_handle != 0) + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n", + ioc->name, __func__, + (unsigned long long)sas_device->enclosure_logical_id, + sas_device->slot)); + if (sas_device->connector_name[0] != '\0') + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: exit: enclosure level(0x%04x), connector name(%s)\n", + ioc->name, __func__, sas_device->enclosure_level, + sas_device->connector_name)); +} + +/** + * _scsih_sas_topology_change_event_debug - debug for topology event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + */ +static void +_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasTopologyChangeList_t *event_data) +{ + int i; + u16 handle; + u16 reason_code; + u8 phy_number; + char *status_str = NULL; + u8 link_rate, prev_link_rate; + + switch (event_data->ExpStatus) { + case MPI2_EVENT_SAS_TOPO_ES_ADDED: + status_str = "add"; + break; + case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING: + status_str = "remove"; + break; + case MPI2_EVENT_SAS_TOPO_ES_RESPONDING: + case 0: + status_str = "responding"; + break; + case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING: + status_str = "remove delay"; + break; + default: + status_str = "unknown status"; + break; + } + pr_info(MPT3SAS_FMT "sas topology change: (%s)\n", + ioc->name, status_str); + pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \ + "start_phy(%02d), count(%d)\n", + le16_to_cpu(event_data->ExpanderDevHandle), + le16_to_cpu(event_data->EnclosureHandle), + event_data->StartPhyNum, event_data->NumEntries); + for (i = 0; i < event_data->NumEntries; i++) { + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + phy_number = event_data->StartPhyNum + i; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + switch (reason_code) { + case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: + status_str = "target add"; + break; + case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + status_str = "target remove"; + break; + case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING: + status_str = "delay target remove"; + break; + case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: + status_str = "link rate change"; + break; + case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE: + status_str = "target responding"; + break; + default: + status_str = "unknown"; + break; + } + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \ + " link rate: new(0x%02x), old(0x%02x)\n", phy_number, + handle, status_str, link_rate, prev_link_rate); + + } +} + +/** + * _scsih_sas_topology_change_event - handle topology changes + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + */ +static int +_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + int i; + u16 parent_handle, handle; + u16 reason_code; + u8 phy_number, max_phys; + struct _sas_node *sas_expander; + u64 sas_address; + unsigned long flags; + u8 link_rate, prev_link_rate; + Mpi2EventDataSasTopologyChangeList_t *event_data = + (Mpi2EventDataSasTopologyChangeList_t *) + fw_event->event_data; + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_topology_change_event_debug(ioc, event_data); + + if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery) + return 0; + + if (!ioc->sas_hba.num_phys) + _scsih_sas_host_add(ioc); + else + _scsih_sas_host_refresh(ioc); + + if (fw_event->ignore) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "ignoring expander event\n", ioc->name)); + return 0; + } + + parent_handle = le16_to_cpu(event_data->ExpanderDevHandle); + + /* handle expander add */ + if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED) + if (_scsih_expander_add(ioc, parent_handle) != 0) + return 0; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc, + parent_handle); + if (sas_expander) { + sas_address = sas_expander->sas_address; + max_phys = sas_expander->num_phys; + } else if (parent_handle < ioc->sas_hba.num_phys) { + sas_address = ioc->sas_hba.sas_address; + max_phys = ioc->sas_hba.num_phys; + } else { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* handle siblings events */ + for (i = 0; i < event_data->NumEntries; i++) { + if (fw_event->ignore) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "ignoring expander event\n", ioc->name)); + return 0; + } + if (ioc->remove_host || ioc->pci_error_recovery) + return 0; + phy_number = event_data->StartPhyNum + i; + if (phy_number >= max_phys) + continue; + reason_code = event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_RC_MASK; + if ((event_data->PHY[i].PhyStatus & + MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code != + MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) + continue; + handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle); + if (!handle) + continue; + link_rate = event_data->PHY[i].LinkRate >> 4; + prev_link_rate = event_data->PHY[i].LinkRate & 0xF; + switch (reason_code) { + case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED: + + if (ioc->shost_recovery) + break; + + if (link_rate == prev_link_rate) + break; + + mpt3sas_transport_update_links(ioc, sas_address, + handle, phy_number, link_rate); + + if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5) + break; + + _scsih_check_device(ioc, sas_address, handle, + phy_number, link_rate); + + + case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED: + + if (ioc->shost_recovery) + break; + + mpt3sas_transport_update_links(ioc, sas_address, + handle, phy_number, link_rate); + + _scsih_add_device(ioc, handle, phy_number, 0); + + break; + case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING: + + _scsih_device_remove_by_handle(ioc, handle); + break; + } + } + + /* handle expander removal */ + if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING && + sas_expander) + mpt3sas_expander_remove(ioc, sas_address); + + return 0; +} + +/** + * _scsih_sas_device_status_change_event_debug - debug for device event + * @event_data: event data payload + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasDeviceStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA: + reason_str = "smart data"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED: + reason_str = "unsupported device discovered"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: + reason_str = "internal device reset"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL: + reason_str = "internal task abort"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL: + reason_str = "internal task abort set"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL: + reason_str = "internal clear task set"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL: + reason_str = "internal query task"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE: + reason_str = "sata init failure"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET: + reason_str = "internal device reset complete"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL: + reason_str = "internal task abort complete"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION: + reason_str = "internal async notification"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality"; + break; + case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY: + reason_str = "expander reduced functionality complete"; + break; + default: + reason_str = "unknown reason"; + break; + } + pr_info(MPT3SAS_FMT "device status change: (%s)\n" + "\thandle(0x%04x), sas address(0x%016llx), tag(%d)", + ioc->name, reason_str, le16_to_cpu(event_data->DevHandle), + (unsigned long long)le64_to_cpu(event_data->SASAddress), + le16_to_cpu(event_data->TaskTag)); + if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA) + pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name, + event_data->ASC, event_data->ASCQ); + pr_info("\n"); +} + +/** + * _scsih_sas_device_status_change_event - handle device status change + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + struct MPT3SAS_TARGET *target_priv_data; + struct _sas_device *sas_device; + u64 sas_address; + unsigned long flags; + Mpi2EventDataSasDeviceStatusChange_t *event_data = + (Mpi2EventDataSasDeviceStatusChange_t *) + fw_event->event_data; + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_device_status_change_event_debug(ioc, + event_data); + + /* In MPI Revision K (0xC), the internal device reset complete was + * implemented, so avoid setting tm_busy flag for older firmware. + */ + if ((ioc->facts.HeaderVersion >> 8) < 0xC) + return; + + if (event_data->ReasonCode != + MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET && + event_data->ReasonCode != + MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET) + return; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_address = le64_to_cpu(event_data->SASAddress); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + sas_address); + + if (!sas_device || !sas_device->starget) + goto out; + + target_priv_data = sas_device->starget->hostdata; + if (!target_priv_data) + goto out; + + if (event_data->ReasonCode == + MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET) + target_priv_data->tm_busy = 1; + else + target_priv_data->tm_busy = 0; + +out: + if (sas_device) + sas_device_put(sas_device); + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + +} + +/** + * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure + * event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataSasEnclDevStatusChange_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->ReasonCode) { + case MPI2_EVENT_SAS_ENCL_RC_ADDED: + reason_str = "enclosure add"; + break; + case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING: + reason_str = "enclosure remove"; + break; + default: + reason_str = "unknown reason"; + break; + } + + pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n" + "\thandle(0x%04x), enclosure logical id(0x%016llx)" + " number slots(%d)\n", ioc->name, reason_str, + le16_to_cpu(event_data->EnclosureHandle), + (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID), + le16_to_cpu(event_data->StartSlot)); +} + +/** + * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_enclosure_dev_status_change_event_debug(ioc, + (Mpi2EventDataSasEnclDevStatusChange_t *) + fw_event->event_data); +} + +/** + * _scsih_sas_broadcast_primitive_event - handle broadcast events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + struct scsi_cmnd *scmd; + struct scsi_device *sdev; + u16 smid, handle; + u32 lun; + struct MPT3SAS_DEVICE *sas_device_priv_data; + u32 termination_count; + u32 query_count; + Mpi2SCSITaskManagementReply_t *mpi_reply; + Mpi2EventDataSasBroadcastPrimitive_t *event_data = + (Mpi2EventDataSasBroadcastPrimitive_t *) + fw_event->event_data; + u16 ioc_status; + unsigned long flags; + int r; + u8 max_retries = 0; + u8 task_abort_retries; + + mutex_lock(&ioc->tm_cmds.mutex); + pr_info(MPT3SAS_FMT + "%s: enter: phy number(%d), width(%d)\n", + ioc->name, __func__, event_data->PhyNum, + event_data->PortWidth); + + _scsih_block_io_all_device(ioc); + + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + mpi_reply = ioc->tm_cmds.reply; + broadcast_aen_retry: + + /* sanity checks for retrying this loop */ + if (max_retries++ == 5) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n", + ioc->name, __func__)); + goto out; + } else if (max_retries > 1) + dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n", + ioc->name, __func__, max_retries - 1)); + + termination_count = 0; + query_count = 0; + for (smid = 1; smid <= ioc->scsiio_depth; smid++) { + if (ioc->shost_recovery) + goto out; + scmd = _scsih_scsi_lookup_get(ioc, smid); + if (!scmd) + continue; + sdev = scmd->device; + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data || !sas_device_priv_data->sas_target) + continue; + /* skip hidden raid components */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_RAID_COMPONENT) + continue; + /* skip volumes */ + if (sas_device_priv_data->sas_target->flags & + MPT_TARGET_FLAGS_VOLUME) + continue; + + handle = sas_device_priv_data->sas_target->handle; + lun = sas_device_priv_data->lun; + query_count++; + + if (ioc->shost_recovery) + goto out; + + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun, + MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, + TM_MUTEX_OFF); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "mpt3sas_scsih_issue_tm: FAILED when sending " + "QUERY_TASK: scmd(%p)\n", scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + ioc_status = le16_to_cpu(mpi_reply->IOCStatus) + & MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + sdev_printk(KERN_WARNING, sdev, + "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n", + ioc_status, scmd); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + + /* see if IO is still owned by IOC and target */ + if (mpi_reply->ResponseCode == + MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED || + mpi_reply->ResponseCode == + MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) { + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + continue; + } + task_abort_retries = 0; + tm_retry: + if (task_abort_retries++ == 60) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: ABORT_TASK: giving up\n", ioc->name, + __func__)); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + goto broadcast_aen_retry; + } + + if (ioc->shost_recovery) + goto out_no_lock; + + r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id, + sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30, + TM_MUTEX_OFF); + if (r == FAILED) { + sdev_printk(KERN_WARNING, sdev, + "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : " + "scmd(%p)\n", scmd); + goto tm_retry; + } + + if (task_abort_retries > 1) + sdev_printk(KERN_WARNING, sdev, + "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):" + " scmd(%p)\n", + task_abort_retries - 1, scmd); + + termination_count += le32_to_cpu(mpi_reply->TerminationCount); + spin_lock_irqsave(&ioc->scsi_lookup_lock, flags); + } + + if (ioc->broadcast_aen_pending) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: loop back due to pending AEN\n", + ioc->name, __func__)); + ioc->broadcast_aen_pending = 0; + goto broadcast_aen_retry; + } + + out: + spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags); + out_no_lock: + + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s - exit, query_count = %d termination_count = %d\n", + ioc->name, __func__, query_count, termination_count)); + + ioc->broadcast_aen_busy = 0; + if (!ioc->shost_recovery) + _scsih_ublock_io_all_device(ioc); + mutex_unlock(&ioc->tm_cmds.mutex); +} + +/** + * _scsih_sas_discovery_event - handle discovery events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2EventDataSasDiscovery_t *event_data = + (Mpi2EventDataSasDiscovery_t *) fw_event->event_data; + + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) { + pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name, + (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ? + "start" : "stop"); + if (event_data->DiscoveryStatus) + pr_info("discovery_status(0x%08x)", + le32_to_cpu(event_data->DiscoveryStatus)); + pr_info("\n"); + } + + if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED && + !ioc->sas_hba.num_phys) { + if (disable_discovery > 0 && ioc->shost_recovery) { + /* Wait for the reset to complete */ + while (ioc->shost_recovery) + ssleep(1); + } + _scsih_sas_host_add(ioc); + } +} + +/** + * _scsih_ir_fastpath - turn on fastpath for IR physdisk + * @ioc: per adapter object + * @handle: device handle for physical disk + * @phys_disk_num: physical disk number + * + * Return 0 for success, else failure. + */ +static int +_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) +{ + Mpi2RaidActionRequest_t *mpi_request; + Mpi2RaidActionReply_t *mpi_reply; + u16 smid; + u8 issue_reset = 0; + int rc = 0; + u16 ioc_status; + u32 log_info; + + if (ioc->hba_mpi_version_belonged == MPI2_VERSION) + return rc; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + rc = -EAGAIN; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); + + mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; + mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN; + mpi_request->PhysDiskNum = phys_disk_num; + + dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\ + "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name, + handle, phys_disk_num)); + + init_completion(&ioc->scsih_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); + + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + rc = -EFAULT; + goto out; + } + + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->scsih_cmds.reply; + ioc_status = le16_to_cpu(mpi_reply->IOCStatus); + if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) + log_info = le32_to_cpu(mpi_reply->IOCLogInfo); + else + log_info = 0; + ioc_status &= MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "IR RAID_ACTION: failed: ioc_status(0x%04x), " + "loginfo(0x%08x)!!!\n", ioc->name, ioc_status, + log_info)); + rc = -EFAULT; + } else + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "IR RAID_ACTION: completed successfully\n", + ioc->name)); + } + + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); + + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + return rc; +} + +/** + * _scsih_reprobe_lun - reprobing lun + * @sdev: scsi device struct + * @no_uld_attach: sdev->no_uld_attach flag setting + * + **/ +static void +_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach) +{ + int rc; + sdev->no_uld_attach = no_uld_attach ? 1 : 0; + sdev_printk(KERN_INFO, sdev, "%s raid component\n", + sdev->no_uld_attach ? "hidding" : "exposing"); + rc = scsi_device_reprobe(sdev); +} + +/** + * _scsih_sas_volume_add - add new volume + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _raid_device *raid_device; + unsigned long flags; + u64 wwid; + u16 handle = le16_to_cpu(element->VolDevHandle); + int rc; + + mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + pr_err(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (raid_device) + return; + + raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); + if (!raid_device) { + pr_err(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + return; + } + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + _scsih_raid_device_add(ioc, raid_device); + if (!ioc->wait_for_discovery_to_complete) { + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + } else { + spin_lock_irqsave(&ioc->raid_device_lock, flags); + _scsih_determine_boot_device(ioc, raid_device, 1); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +/** + * _scsih_sas_volume_delete - delete volume + * @ioc: per adapter object + * @handle: volume device handle + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle) +{ + struct _raid_device *raid_device; + unsigned long flags; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct scsi_target *starget = NULL; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + if (raid_device) { + if (raid_device->starget) { + starget = raid_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 1; + } + pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, raid_device->handle, + (unsigned long long) raid_device->wwid); + list_del(&raid_device->list); + kfree(raid_device); + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (starget) + scsi_remove_target(&starget->dev); +} + +/** + * _scsih_sas_pd_expose - expose pd component to /dev/sdX + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _sas_device *sas_device; + struct scsi_target *starget = NULL; + struct MPT3SAS_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + sas_device->volume_handle = 0; + sas_device->volume_wwid = 0; + clear_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags &= + ~MPT_TARGET_FLAGS_RAID_COMPONENT; + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + + /* exposing raid component */ + if (starget) + starget_for_each_device(starget, NULL, _scsih_reprobe_lun); + + sas_device_put(sas_device); +} + +/** + * _scsih_sas_pd_hide - hide pd component from /dev/sdX + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _sas_device *sas_device; + struct scsi_target *starget = NULL; + struct MPT3SAS_TARGET *sas_target_priv_data; + unsigned long flags; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + u16 volume_handle = 0; + u64 volume_wwid = 0; + + mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle); + if (volume_handle) + mpt3sas_config_get_volume_wwid(ioc, volume_handle, + &volume_wwid); + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + set_bit(handle, ioc->pd_handles); + if (sas_device->starget && sas_device->starget->hostdata) { + starget = sas_device->starget; + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->flags |= + MPT_TARGET_FLAGS_RAID_COMPONENT; + sas_device->volume_handle = volume_handle; + sas_device->volume_wwid = volume_wwid; + } + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + if (!sas_device) + return; + + /* hiding raid component */ + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + + if (starget) + starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun); + + sas_device_put(sas_device); +} + +/** + * _scsih_sas_pd_delete - delete pd component + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + + _scsih_device_remove_by_handle(ioc, handle); +} + +/** + * _scsih_sas_pd_add - remove pd component + * @ioc: per adapter object + * @element: IR config element data + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventIrConfigElement_t *element) +{ + struct _sas_device *sas_device; + u16 handle = le16_to_cpu(element->PhysDiskDevHandle); + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; + u64 sas_address; + u16 parent_handle; + + set_bit(handle, ioc->pd_handles); + + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + sas_device_put(sas_device); + return; + } + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) + mpt3sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); + + _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum); + _scsih_add_device(ioc, handle, 0, 1); +} + +/** + * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrConfigChangeList_t *event_data) +{ + Mpi2EventIrConfigElement_t *element; + u8 element_type; + int i; + char *reason_str = NULL, *element_str = NULL; + + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + + pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n", + ioc->name, (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? + "foreign" : "native", event_data->NumElements); + for (i = 0; i < event_data->NumElements; i++, element++) { + switch (element->ReasonCode) { + case MPI2_EVENT_IR_CHANGE_RC_ADDED: + reason_str = "add"; + break; + case MPI2_EVENT_IR_CHANGE_RC_REMOVED: + reason_str = "remove"; + break; + case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE: + reason_str = "no change"; + break; + case MPI2_EVENT_IR_CHANGE_RC_HIDE: + reason_str = "hide"; + break; + case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: + reason_str = "unhide"; + break; + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + reason_str = "volume_created"; + break; + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + reason_str = "volume_deleted"; + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: + reason_str = "pd_created"; + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: + reason_str = "pd_deleted"; + break; + default: + reason_str = "unknown reason"; + break; + } + element_type = le16_to_cpu(element->ElementFlags) & + MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK; + switch (element_type) { + case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT: + element_str = "volume"; + break; + case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT: + element_str = "phys disk"; + break; + case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT: + element_str = "hot spare"; + break; + default: + element_str = "unknown element"; + break; + } + pr_info("\t(%s:%s), vol handle(0x%04x), " \ + "pd handle(0x%04x), pd num(0x%02x)\n", element_str, + reason_str, le16_to_cpu(element->VolDevHandle), + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } +} + +/** + * _scsih_sas_ir_config_change_event - handle ir configuration change events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2EventIrConfigElement_t *element; + int i; + u8 foreign_config; + Mpi2EventDataIrConfigChangeList_t *event_data = + (Mpi2EventDataIrConfigChangeList_t *) + fw_event->event_data; + + if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && + (!ioc->hide_ir_msg)) + _scsih_sas_ir_config_change_event_debug(ioc, event_data); + + foreign_config = (le32_to_cpu(event_data->Flags) & + MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0; + + element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; + if (ioc->shost_recovery && + ioc->hba_mpi_version_belonged != MPI2_VERSION) { + for (i = 0; i < event_data->NumElements; i++, element++) { + if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE) + _scsih_ir_fastpath(ioc, + le16_to_cpu(element->PhysDiskDevHandle), + element->PhysDiskNum); + } + return; + } + + for (i = 0; i < event_data->NumElements; i++, element++) { + + switch (element->ReasonCode) { + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED: + case MPI2_EVENT_IR_CHANGE_RC_ADDED: + if (!foreign_config) + _scsih_sas_volume_add(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED: + case MPI2_EVENT_IR_CHANGE_RC_REMOVED: + if (!foreign_config) + _scsih_sas_volume_delete(ioc, + le16_to_cpu(element->VolDevHandle)); + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED: + if (!ioc->is_warpdrive) + _scsih_sas_pd_hide(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED: + if (!ioc->is_warpdrive) + _scsih_sas_pd_expose(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_HIDE: + if (!ioc->is_warpdrive) + _scsih_sas_pd_add(ioc, element); + break; + case MPI2_EVENT_IR_CHANGE_RC_UNHIDE: + if (!ioc->is_warpdrive) + _scsih_sas_pd_delete(ioc, element); + break; + } + } +} + +/** + * _scsih_sas_ir_volume_event - IR volume event + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + u64 wwid; + unsigned long flags; + struct _raid_device *raid_device; + u16 handle; + u32 state; + int rc; + Mpi2EventDataIrVolume_t *event_data = + (Mpi2EventDataIrVolume_t *) fw_event->event_data; + + if (ioc->shost_recovery) + return; + + if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED) + return; + + handle = le16_to_cpu(event_data->VolDevHandle); + state = le32_to_cpu(event_data->NewValue); + if (!ioc->hide_ir_msg) + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + ioc->name, __func__, handle, + le32_to_cpu(event_data->PreviousValue), state)); + switch (state) { + case MPI2_RAID_VOL_STATE_MISSING: + case MPI2_RAID_VOL_STATE_FAILED: + _scsih_sas_volume_delete(ioc, handle); + break; + + case MPI2_RAID_VOL_STATE_ONLINE: + case MPI2_RAID_VOL_STATE_DEGRADED: + case MPI2_RAID_VOL_STATE_OPTIMAL: + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + + if (raid_device) + break; + + mpt3sas_config_get_volume_wwid(ioc, handle, &wwid); + if (!wwid) { + pr_err(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + break; + } + + raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL); + if (!raid_device) { + pr_err(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", ioc->name, + __FILE__, __LINE__, __func__); + break; + } + + raid_device->id = ioc->sas_id++; + raid_device->channel = RAID_CHANNEL; + raid_device->handle = handle; + raid_device->wwid = wwid; + _scsih_raid_device_add(ioc, raid_device); + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + break; + + case MPI2_RAID_VOL_STATE_INITIALIZING: + default: + break; + } +} + +/** + * _scsih_sas_ir_physical_disk_event - PD event + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + u16 handle, parent_handle; + u32 state; + struct _sas_device *sas_device; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasDevicePage0_t sas_device_pg0; + u32 ioc_status; + Mpi2EventDataIrPhysicalDisk_t *event_data = + (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data; + u64 sas_address; + + if (ioc->shost_recovery) + return; + + if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED) + return; + + handle = le16_to_cpu(event_data->PhysDiskDevHandle); + state = le32_to_cpu(event_data->NewValue); + + if (!ioc->hide_ir_msg) + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n", + ioc->name, __func__, handle, + le32_to_cpu(event_data->PreviousValue), state)); + + switch (state) { + case MPI2_RAID_PD_STATE_ONLINE: + case MPI2_RAID_PD_STATE_DEGRADED: + case MPI2_RAID_PD_STATE_REBUILDING: + case MPI2_RAID_PD_STATE_OPTIMAL: + case MPI2_RAID_PD_STATE_HOT_SPARE: + + if (!ioc->is_warpdrive) + set_bit(handle, ioc->pd_handles); + + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + sas_device_put(sas_device); + return; + } + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) + mpt3sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); + + _scsih_add_device(ioc, handle, 0, 1); + + break; + + case MPI2_RAID_PD_STATE_OFFLINE: + case MPI2_RAID_PD_STATE_NOT_CONFIGURED: + case MPI2_RAID_PD_STATE_NOT_COMPATIBLE: + default: + break; + } +} + +/** + * _scsih_sas_ir_operation_status_event_debug - debug for IR op event + * @ioc: per adapter object + * @event_data: event data payload + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc, + Mpi2EventDataIrOperationStatus_t *event_data) +{ + char *reason_str = NULL; + + switch (event_data->RAIDOperation) { + case MPI2_EVENT_IR_RAIDOP_RESYNC: + reason_str = "resync"; + break; + case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION: + reason_str = "online capacity expansion"; + break; + case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK: + reason_str = "consistency check"; + break; + case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT: + reason_str = "background init"; + break; + case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT: + reason_str = "make data consistent"; + break; + } + + if (!reason_str) + return; + + pr_info(MPT3SAS_FMT "raid operational status: (%s)" \ + "\thandle(0x%04x), percent complete(%d)\n", + ioc->name, reason_str, + le16_to_cpu(event_data->VolDevHandle), + event_data->PercentComplete); +} + +/** + * _scsih_sas_ir_operation_status_event - handle RAID operation events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc, + struct fw_event_work *fw_event) +{ + Mpi2EventDataIrOperationStatus_t *event_data = + (Mpi2EventDataIrOperationStatus_t *) + fw_event->event_data; + static struct _raid_device *raid_device; + unsigned long flags; + u16 handle; + + if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) && + (!ioc->hide_ir_msg)) + _scsih_sas_ir_operation_status_event_debug(ioc, + event_data); + + /* code added for raid transport support */ + if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) { + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + handle = le16_to_cpu(event_data->VolDevHandle); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + if (raid_device) + raid_device->percent_complete = + event_data->PercentComplete; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } +} + +/** + * _scsih_prep_device_scan - initialize parameters prior to device scan + * @ioc: per adapter object + * + * Set the deleted flag prior to device scan. If the device is found during + * the scan, then we clear the deleted flag. + */ +static void +_scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc) +{ + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct scsi_device *sdev; + + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (sas_device_priv_data && sas_device_priv_data->sas_target) + sas_device_priv_data->sas_target->deleted = 1; + } +} + +/** + * _scsih_mark_responding_sas_device - mark a sas_devices as responding + * @ioc: per adapter object + * @sas_device_pg0: SAS Device page 0 + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponsive_sas_devices. + * + * Return nothing. + */ +static void +_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, +Mpi2SasDevicePage0_t *sas_device_pg0) +{ + struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + struct scsi_target *starget; + struct _sas_device *sas_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry(sas_device, &ioc->sas_device_list, list) { + if ((sas_device->sas_address == sas_device_pg0->SASAddress) && + (sas_device->slot == sas_device_pg0->Slot)) { + sas_device->responding = 1; + starget = sas_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->tm_busy = 0; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + if (starget) { + starget_printk(KERN_INFO, starget, + "handle(0x%04x), sas_addr(0x%016llx)\n", + sas_device_pg0->DevHandle, + (unsigned long long) + sas_device->sas_address); + + if (sas_device->enclosure_handle != 0) + starget_printk(KERN_INFO, starget, + "enclosure logical id(0x%016llx)," + " slot(%d)\n", + (unsigned long long) + sas_device->enclosure_logical_id, + sas_device->slot); + } + if (sas_device_pg0->Flags & + MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) { + sas_device->enclosure_level = + le16_to_cpu(sas_device_pg0->EnclosureLevel); + memcpy(&sas_device->connector_name[0], + &sas_device_pg0->ConnectorName[0], 4); + } else { + sas_device->enclosure_level = 0; + sas_device->connector_name[0] = '\0'; + } + + if (sas_device->handle == sas_device_pg0->DevHandle) + goto out; + pr_info("\thandle changed from(0x%04x)!!!\n", + sas_device->handle); + sas_device->handle = sas_device_pg0->DevHandle; + if (sas_target_priv_data) + sas_target_priv_data->handle = + sas_device_pg0->DevHandle; + goto out; + } + } + out: + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +/** + * _scsih_search_responding_sas_devices - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + * + * Return nothing. + */ +static void +_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 handle; + u32 device_info; + + pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name); + + if (list_empty(&ioc->sas_device_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + handle = sas_device_pg0.DevHandle = + le16_to_cpu(sas_device_pg0.DevHandle); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + if (!(_scsih_is_end_device(device_info))) + continue; + sas_device_pg0.SASAddress = + le64_to_cpu(sas_device_pg0.SASAddress); + sas_device_pg0.Slot = le16_to_cpu(sas_device_pg0.Slot); + _scsih_mark_responding_sas_device(ioc, &sas_device_pg0); + } + + out: + pr_info(MPT3SAS_FMT "search for end-devices: complete\n", + ioc->name); +} + +/** + * _scsih_mark_responding_raid_device - mark a raid_device as responding + * @ioc: per adapter object + * @wwid: world wide identifier for raid volume + * @handle: device handle + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponsive_raid_devices. + * + * Return nothing. + */ +static void +_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid, + u16 handle) +{ + struct MPT3SAS_TARGET *sas_target_priv_data = NULL; + struct scsi_target *starget; + struct _raid_device *raid_device; + unsigned long flags; + + spin_lock_irqsave(&ioc->raid_device_lock, flags); + list_for_each_entry(raid_device, &ioc->raid_device_list, list) { + if (raid_device->wwid == wwid && raid_device->starget) { + starget = raid_device->starget; + if (starget && starget->hostdata) { + sas_target_priv_data = starget->hostdata; + sas_target_priv_data->deleted = 0; + } else + sas_target_priv_data = NULL; + raid_device->responding = 1; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + starget_printk(KERN_INFO, raid_device->starget, + "handle(0x%04x), wwid(0x%016llx)\n", handle, + (unsigned long long)raid_device->wwid); + + /* + * WARPDRIVE: The handles of the PDs might have changed + * across the host reset so re-initialize the + * required data for Direct IO + */ + mpt3sas_init_warpdrive_properties(ioc, raid_device); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + if (raid_device->handle == handle) { + spin_unlock_irqrestore(&ioc->raid_device_lock, + flags); + return; + } + pr_info("\thandle changed from(0x%04x)!!!\n", + raid_device->handle); + raid_device->handle = handle; + if (sas_target_priv_data) + sas_target_priv_data->handle = handle; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + return; + } + } + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); +} + +/** + * _scsih_search_responding_raid_devices - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + * + * Return nothing. + */ +static void +_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidVolPage1_t volume_pg1; + Mpi2RaidVolPage0_t volume_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 handle; + u8 phys_disk_num; + + if (!ioc->ir_firmware) + return; + + pr_info(MPT3SAS_FMT "search for raid volumes: start\n", + ioc->name); + + if (list_empty(&ioc->raid_device_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + handle = le16_to_cpu(volume_pg1.DevHandle); + + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, + &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) + continue; + + if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) + _scsih_mark_responding_raid_device(ioc, + le64_to_cpu(volume_pg1.WWID), handle); + } + + /* refresh the pd_handles */ + if (!ioc->is_warpdrive) { + phys_disk_num = 0xFF; + memset(ioc->pd_handles, 0, ioc->pd_handles_sz); + while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + set_bit(handle, ioc->pd_handles); + } + } + out: + pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n", + ioc->name); +} + +/** + * _scsih_mark_responding_expander - mark a expander as responding + * @ioc: per adapter object + * @sas_address: sas address + * @handle: + * + * After host reset, find out whether devices are still responding. + * Used in _scsih_remove_unresponsive_expanders. + * + * Return nothing. + */ +static void +_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u16 handle) +{ + struct _sas_node *sas_expander; + unsigned long flags; + int i; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) { + if (sas_expander->sas_address != sas_address) + continue; + sas_expander->responding = 1; + if (sas_expander->handle == handle) + goto out; + pr_info("\texpander(0x%016llx): handle changed" \ + " from(0x%04x) to (0x%04x)!!!\n", + (unsigned long long)sas_expander->sas_address, + sas_expander->handle, handle); + sas_expander->handle = handle; + for (i = 0 ; i < sas_expander->num_phys ; i++) + sas_expander->phy[i].handle = handle; + goto out; + } + out: + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); +} + +/** + * _scsih_search_responding_expanders - + * @ioc: per adapter object + * + * After host reset, find out whether devices are still responding. + * If not remove. + * + * Return nothing. + */ +static void +_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ExpanderPage0_t expander_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u64 sas_address; + u16 handle; + + pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name); + + if (list_empty(&ioc->sas_expander_list)) + goto out; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) + break; + + handle = le16_to_cpu(expander_pg0.DevHandle); + sas_address = le64_to_cpu(expander_pg0.SASAddress); + pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n", + handle, + (unsigned long long)sas_address); + _scsih_mark_responding_expander(ioc, sas_address, handle); + } + + out: + pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name); +} + +/** + * _scsih_remove_unresponding_sas_devices - removing unresponding devices + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc) +{ + struct _sas_device *sas_device, *sas_device_next; + struct _sas_node *sas_expander, *sas_expander_next; + struct _raid_device *raid_device, *raid_device_next; + struct list_head tmp_list; + unsigned long flags; + LIST_HEAD(head); + + pr_info(MPT3SAS_FMT "removing unresponding devices: start\n", + ioc->name); + + /* removing unresponding end devices */ + pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n", + ioc->name); + /* + * Iterate, pulling off devices marked as non-responding. We become the + * owner for the reference the list had on any object we prune. + */ + spin_lock_irqsave(&ioc->sas_device_lock, flags); + list_for_each_entry_safe(sas_device, sas_device_next, + &ioc->sas_device_list, list) { + if (!sas_device->responding) + list_move_tail(&sas_device->list, &head); + else + sas_device->responding = 0; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + /* + * Now, uninitialize and remove the unresponding devices we pruned. + */ + list_for_each_entry_safe(sas_device, sas_device_next, &head, list) { + _scsih_remove_device(ioc, sas_device); + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + + /* removing unresponding volumes */ + if (ioc->ir_firmware) { + pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n", + ioc->name); + list_for_each_entry_safe(raid_device, raid_device_next, + &ioc->raid_device_list, list) { + if (!raid_device->responding) + _scsih_sas_volume_delete(ioc, + raid_device->handle); + else + raid_device->responding = 0; + } + } + + /* removing unresponding expanders */ + pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n", + ioc->name); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + INIT_LIST_HEAD(&tmp_list); + list_for_each_entry_safe(sas_expander, sas_expander_next, + &ioc->sas_expander_list, list) { + if (!sas_expander->responding) + list_move_tail(&sas_expander->list, &tmp_list); + else + sas_expander->responding = 0; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list, + list) { + list_del(&sas_expander->list); + _scsih_expander_node_remove(ioc, sas_expander); + } + + pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n", + ioc->name); + + /* unblock devices */ + _scsih_ublock_io_all_device(ioc); +} + +static void +_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander, u16 handle) +{ + Mpi2ExpanderPage1_t expander_pg1; + Mpi2ConfigReply_t mpi_reply; + int i; + + for (i = 0 ; i < sas_expander->num_phys ; i++) { + if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply, + &expander_pg1, i, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return; + } + + mpt3sas_transport_update_links(ioc, sas_expander->sas_address, + le16_to_cpu(expander_pg1.AttachedDevHandle), i, + expander_pg1.NegotiatedLinkRate >> 4); + } +} + +/** + * _scsih_scan_for_devices_after_reset - scan for devices after host reset + * @ioc: per adapter object + * + * Return nothing. + */ +static void +_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2ExpanderPage0_t expander_pg0; + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2RaidVolPage1_t volume_pg1; + Mpi2RaidVolPage0_t volume_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2EventIrConfigElement_t element; + Mpi2ConfigReply_t mpi_reply; + u8 phys_disk_num; + u16 ioc_status; + u16 handle, parent_handle; + u64 sas_address; + struct _sas_device *sas_device; + struct _sas_node *expander_device; + static struct _raid_device *raid_device; + u8 retry_count; + unsigned long flags; + + pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name); + + _scsih_sas_host_refresh(ioc); + + pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name); + + /* expanders */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0, + MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \ + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(expander_pg0.DevHandle); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + expander_device = mpt3sas_scsih_expander_find_by_sas_address( + ioc, le64_to_cpu(expander_pg0.SASAddress)); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (expander_device) + _scsih_refresh_expander_links(ioc, expander_device, + handle); + else { + pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \ + "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, + handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); + _scsih_expander_add(ioc, handle); + pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \ + "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, + handle, (unsigned long long) + le64_to_cpu(expander_pg0.SASAddress)); + } + } + + pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n", + ioc->name); + + if (!ioc->ir_firmware) + goto skip_to_sas; + + pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name); + + /* phys disk */ + phys_disk_num = 0xFF; + while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM, + phys_disk_num))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\ + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + phys_disk_num = pd_pg0.PhysDiskNum; + handle = le16_to_cpu(pd_pg0.DevHandle); + sas_device = mpt3sas_get_sdev_by_handle(ioc, handle); + if (sas_device) { + sas_device_put(sas_device); + continue; + } + if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + handle) != 0) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \ + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, + &sas_address)) { + pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \ + " handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + mpt3sas_transport_update_links(ioc, sas_address, + handle, sas_device_pg0.PhyNum, + MPI2_SAS_NEG_LINK_RATE_1_5); + set_bit(handle, ioc->pd_handles); + retry_count = 0; + /* This will retry adding the end device. + * _scsih_add_device() will decide on retries and + * return "1" when it should be retried + */ + while (_scsih_add_device(ioc, handle, retry_count++, + 1)) { + ssleep(1); + } + pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \ + " handle (0x%04x), sas_addr(0x%016llx)\n", + ioc->name, handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + + pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n", + ioc->name); + + pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name); + + /* volumes */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \ + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(volume_pg1.DevHandle); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = _scsih_raid_device_find_by_wwid(ioc, + le64_to_cpu(volume_pg1.WWID)); + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + if (raid_device) + continue; + if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, + &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle, + sizeof(Mpi2RaidVolPage0_t))) + continue; + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \ + "ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE || + volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) { + memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t)); + element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED; + element.VolDevHandle = volume_pg1.DevHandle; + pr_info(MPT3SAS_FMT + "\tBEFORE adding volume: handle (0x%04x)\n", + ioc->name, volume_pg1.DevHandle); + _scsih_sas_volume_add(ioc, &element); + pr_info(MPT3SAS_FMT + "\tAFTER adding volume: handle (0x%04x)\n", + ioc->name, volume_pg1.DevHandle); + } + } + + pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n", + ioc->name); + + skip_to_sas: + + pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n", + ioc->name); + + /* sas devices */ + handle = 0xFFFF; + while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, + &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, + handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\ + " ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, ioc_status, + le32_to_cpu(mpi_reply.IOCLogInfo)); + break; + } + handle = le16_to_cpu(sas_device_pg0.DevHandle); + if (!(_scsih_is_end_device( + le32_to_cpu(sas_device_pg0.DeviceInfo)))) + continue; + sas_device = mpt3sas_get_sdev_by_addr(ioc, + le64_to_cpu(sas_device_pg0.SASAddress)); + if (sas_device) { + sas_device_put(sas_device); + continue; + } + parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle); + if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) { + pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \ + "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, + handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + mpt3sas_transport_update_links(ioc, sas_address, handle, + sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5); + retry_count = 0; + /* This will retry adding the end device. + * _scsih_add_device() will decide on retries and + * return "1" when it should be retried + */ + while (_scsih_add_device(ioc, handle, retry_count++, + 0)) { + ssleep(1); + } + pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \ + "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name, + handle, (unsigned long long) + le64_to_cpu(sas_device_pg0.SASAddress)); + } + } + pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n", + ioc->name); + + pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name); +} +/** + * mpt3sas_scsih_reset_handler - reset callback handler (for scsih) + * @ioc: per adapter object + * @reset_phase: phase + * + * The handler for doing any required cleanup or initialization. + * + * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET, + * MPT3_IOC_DONE_RESET + * + * Return nothing. + */ +void +mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase) +{ + switch (reset_phase) { + case MPT3_IOC_PRE_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__)); + break; + case MPT3_IOC_AFTER_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__)); + if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { + ioc->scsih_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); + complete(&ioc->scsih_cmds.done); + } + if (ioc->tm_cmds.status & MPT3_CMD_PENDING) { + ioc->tm_cmds.status |= MPT3_CMD_RESET; + mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid); + complete(&ioc->tm_cmds.done); + } + + _scsih_fw_event_cleanup_queue(ioc); + _scsih_flush_running_cmds(ioc); + break; + case MPT3_IOC_DONE_RESET: + dtmprintk(ioc, pr_info(MPT3SAS_FMT + "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__)); + if ((!ioc->is_driver_loading) && !(disable_discovery > 0 && + !ioc->sas_hba.num_phys)) { + _scsih_prep_device_scan(ioc); + _scsih_search_responding_sas_devices(ioc); + _scsih_search_responding_raid_devices(ioc); + _scsih_search_responding_expanders(ioc); + _scsih_error_recovery_delete_devices(ioc); + } + break; + } +} + +/** + * _mpt3sas_fw_work - delayed task for processing firmware events + * @ioc: per adapter object + * @fw_event: The fw_event_work object + * Context: user. + * + * Return nothing. + */ +static void +_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) +{ + _scsih_fw_event_del_from_list(ioc, fw_event); + + /* the queue is being flushed so ignore this event */ + if (ioc->remove_host || ioc->pci_error_recovery) { + fw_event_work_put(fw_event); + return; + } + + switch (fw_event->event) { + case MPT3SAS_PROCESS_TRIGGER_DIAG: + mpt3sas_process_trigger_data(ioc, + (struct SL_WH_TRIGGERS_EVENT_DATA_T *) + fw_event->event_data); + break; + case MPT3SAS_REMOVE_UNRESPONDING_DEVICES: + while (scsi_host_in_recovery(ioc->shost) || + ioc->shost_recovery) { + /* + * If we're unloading, bail. Otherwise, this can become + * an infinite loop. + */ + if (ioc->remove_host) + goto out; + ssleep(1); + } + _scsih_remove_unresponding_sas_devices(ioc); + _scsih_scan_for_devices_after_reset(ioc); + break; + case MPT3SAS_PORT_ENABLE_COMPLETE: + ioc->start_scan = 0; + if (missing_delay[0] != -1 && missing_delay[1] != -1) + mpt3sas_base_update_missing_delay(ioc, missing_delay[0], + missing_delay[1]); + dewtprintk(ioc, pr_info(MPT3SAS_FMT + "port enable: complete from worker thread\n", + ioc->name)); + break; + case MPT3SAS_TURN_ON_PFA_LED: + _scsih_turn_on_pfa_led(ioc, fw_event->device_handle); + break; + case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + _scsih_sas_topology_change_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: + _scsih_sas_device_status_change_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_DISCOVERY: + _scsih_sas_discovery_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: + _scsih_sas_broadcast_primitive_event(ioc, fw_event); + break; + case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + _scsih_sas_enclosure_dev_status_change_event(ioc, + fw_event); + break; + case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: + _scsih_sas_ir_config_change_event(ioc, fw_event); + break; + case MPI2_EVENT_IR_VOLUME: + _scsih_sas_ir_volume_event(ioc, fw_event); + break; + case MPI2_EVENT_IR_PHYSICAL_DISK: + _scsih_sas_ir_physical_disk_event(ioc, fw_event); + break; + case MPI2_EVENT_IR_OPERATION_STATUS: + _scsih_sas_ir_operation_status_event(ioc, fw_event); + break; + } +out: + fw_event_work_put(fw_event); +} + +/** + * _firmware_event_work + * @ioc: per adapter object + * @work: The fw_event_work object + * Context: user. + * + * wrappers for the work thread handling firmware events + * + * Return nothing. + */ + +static void +_firmware_event_work(struct work_struct *work) +{ + struct fw_event_work *fw_event = container_of(work, + struct fw_event_work, work); + + _mpt3sas_fw_work(fw_event->ioc, fw_event); +} + +/** + * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time) + * @ioc: per adapter object + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * Context: interrupt. + * + * This function merely adds a new work task into ioc->firmware_event_thread. + * The tasks are worked from _firmware_event_work in user context. + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, + u32 reply) +{ + struct fw_event_work *fw_event; + Mpi2EventNotificationReply_t *mpi_reply; + u16 event; + u16 sz; + + /* events turned off due to host reset or driver unloading */ + if (ioc->remove_host || ioc->pci_error_recovery) + return 1; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + + if (unlikely(!mpi_reply)) { + pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 1; + } + + event = le16_to_cpu(mpi_reply->Event); + + if (event != MPI2_EVENT_LOG_ENTRY_ADDED) + mpt3sas_trigger_event(ioc, event, 0); + + switch (event) { + /* handle these */ + case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE: + { + Mpi2EventDataSasBroadcastPrimitive_t *baen_data = + (Mpi2EventDataSasBroadcastPrimitive_t *) + mpi_reply->EventData; + + if (baen_data->Primitive != + MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT) + return 1; + + if (ioc->broadcast_aen_busy) { + ioc->broadcast_aen_pending++; + return 1; + } else + ioc->broadcast_aen_busy = 1; + break; + } + + case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST: + _scsih_check_topo_delete_events(ioc, + (Mpi2EventDataSasTopologyChangeList_t *) + mpi_reply->EventData); + break; + case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST: + _scsih_check_ir_config_unhide_events(ioc, + (Mpi2EventDataIrConfigChangeList_t *) + mpi_reply->EventData); + break; + case MPI2_EVENT_IR_VOLUME: + _scsih_check_volume_delete_events(ioc, + (Mpi2EventDataIrVolume_t *) + mpi_reply->EventData); + break; + case MPI2_EVENT_LOG_ENTRY_ADDED: + { + Mpi2EventDataLogEntryAdded_t *log_entry; + u32 *log_code; + + if (!ioc->is_warpdrive) + break; + + log_entry = (Mpi2EventDataLogEntryAdded_t *) + mpi_reply->EventData; + log_code = (u32 *)log_entry->LogData; + + if (le16_to_cpu(log_entry->LogEntryQualifier) + != MPT2_WARPDRIVE_LOGENTRY) + break; + + switch (le32_to_cpu(*log_code)) { + case MPT2_WARPDRIVE_LC_SSDT: + pr_warn(MPT3SAS_FMT "WarpDrive Warning: " + "IO Throttling has occurred in the WarpDrive " + "subsystem. Check WarpDrive documentation for " + "additional details.\n", ioc->name); + break; + case MPT2_WARPDRIVE_LC_SSDLW: + pr_warn(MPT3SAS_FMT "WarpDrive Warning: " + "Program/Erase Cycles for the WarpDrive subsystem " + "in degraded range. Check WarpDrive documentation " + "for additional details.\n", ioc->name); + break; + case MPT2_WARPDRIVE_LC_SSDLF: + pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: " + "There are no Program/Erase Cycles for the " + "WarpDrive subsystem. The storage device will be " + "in read-only mode. Check WarpDrive documentation " + "for additional details.\n", ioc->name); + break; + case MPT2_WARPDRIVE_LC_BRMF: + pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: " + "The Backup Rail Monitor has failed on the " + "WarpDrive subsystem. Check WarpDrive " + "documentation for additional details.\n", + ioc->name); + break; + } + + break; + } + case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: + case MPI2_EVENT_IR_OPERATION_STATUS: + case MPI2_EVENT_SAS_DISCOVERY: + case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE: + case MPI2_EVENT_IR_PHYSICAL_DISK: + break; + + case MPI2_EVENT_TEMP_THRESHOLD: + _scsih_temp_threshold_events(ioc, + (Mpi2EventDataTemperature_t *) + mpi_reply->EventData); + break; + + default: /* ignore the rest */ + return 1; + } + + sz = le16_to_cpu(mpi_reply->EventDataLength) * 4; + fw_event = alloc_fw_event_work(sz); + if (!fw_event) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return 1; + } + + memcpy(fw_event->event_data, mpi_reply->EventData, sz); + fw_event->ioc = ioc; + fw_event->VF_ID = mpi_reply->VF_ID; + fw_event->VP_ID = mpi_reply->VP_ID; + fw_event->event = event; + _scsih_fw_event_add(ioc, fw_event); + fw_event_work_put(fw_event); + return 1; +} + +/** + * _scsih_expander_node_remove - removing expander device from list. + * @ioc: per adapter object + * @sas_expander: the sas_device object + * Context: Calling function should acquire ioc->sas_node_lock. + * + * Removing object and freeing associated memory from the + * ioc->sas_expander_list. + * + * Return nothing. + */ +static void +_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_expander) +{ + struct _sas_port *mpt3sas_port, *next; + + /* remove sibling ports attached to this expander */ + list_for_each_entry_safe(mpt3sas_port, next, + &sas_expander->sas_port_list, port_list) { + if (ioc->shost_recovery) + return; + if (mpt3sas_port->remote_identify.device_type == + SAS_END_DEVICE) + mpt3sas_device_remove_by_sas_address(ioc, + mpt3sas_port->remote_identify.sas_address); + else if (mpt3sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mpt3sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + mpt3sas_expander_remove(ioc, + mpt3sas_port->remote_identify.sas_address); + } + + mpt3sas_transport_port_remove(ioc, sas_expander->sas_address, + sas_expander->sas_address_parent); + + pr_info(MPT3SAS_FMT + "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n", + ioc->name, + sas_expander->handle, (unsigned long long) + sas_expander->sas_address); + + kfree(sas_expander->phy); + kfree(sas_expander); +} + +/** + * _scsih_ir_shutdown - IR shutdown notification + * @ioc: per adapter object + * + * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that + * the host system is shutting down. + * + * Return nothing. + */ +static void +_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidActionRequest_t *mpi_request; + Mpi2RaidActionReply_t *mpi_reply; + u16 smid; + + /* is IR firmware build loaded ? */ + if (!ioc->ir_firmware) + return; + + /* are there any volumes ? */ + if (list_empty(&ioc->raid_device_list)) + return; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n", + ioc->name, __func__); + goto out; + } + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t)); + + mpi_request->Function = MPI2_FUNCTION_RAID_ACTION; + mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED; + + if (!ioc->hide_ir_msg) + pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name); + init_completion(&ioc->scsih_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); + + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + goto out; + } + + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + if (!ioc->hide_ir_msg) + pr_info(MPT3SAS_FMT "IR shutdown " + "(complete): ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } + + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + +/** + * scsih_remove - detach and remove add host + * @pdev: PCI device struct + * + * Routine called when unloading the driver. + * Return nothing. + */ +void scsih_remove(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct _sas_port *mpt3sas_port, *next_port; + struct _raid_device *raid_device, *next; + struct MPT3SAS_TARGET *sas_target_priv_data; + struct workqueue_struct *wq; + unsigned long flags; + + ioc->remove_host = 1; + _scsih_fw_event_cleanup_queue(ioc); + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + + /* release all the volumes */ + _scsih_ir_shutdown(ioc); + list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list, + list) { + if (raid_device->starget) { + sas_target_priv_data = + raid_device->starget->hostdata; + sas_target_priv_data->deleted = 1; + scsi_remove_target(&raid_device->starget->dev); + } + pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n", + ioc->name, raid_device->handle, + (unsigned long long) raid_device->wwid); + _scsih_raid_device_remove(ioc, raid_device); + } + + /* free ports attached to the sas_host */ + list_for_each_entry_safe(mpt3sas_port, next_port, + &ioc->sas_hba.sas_port_list, port_list) { + if (mpt3sas_port->remote_identify.device_type == + SAS_END_DEVICE) + mpt3sas_device_remove_by_sas_address(ioc, + mpt3sas_port->remote_identify.sas_address); + else if (mpt3sas_port->remote_identify.device_type == + SAS_EDGE_EXPANDER_DEVICE || + mpt3sas_port->remote_identify.device_type == + SAS_FANOUT_EXPANDER_DEVICE) + mpt3sas_expander_remove(ioc, + mpt3sas_port->remote_identify.sas_address); + } + + /* free phys attached to the sas_host */ + if (ioc->sas_hba.num_phys) { + kfree(ioc->sas_hba.phy); + ioc->sas_hba.phy = NULL; + ioc->sas_hba.num_phys = 0; + } + + sas_remove_host(shost); + scsi_remove_host(shost); + mpt3sas_base_detach(ioc); + spin_lock(&gioc_lock); + list_del(&ioc->list); + spin_unlock(&gioc_lock); + scsi_host_put(shost); +} + +/** + * scsih_shutdown - routine call during system shutdown + * @pdev: PCI device struct + * + * Return nothing. + */ +void +scsih_shutdown(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct workqueue_struct *wq; + unsigned long flags; + + ioc->remove_host = 1; + _scsih_fw_event_cleanup_queue(ioc); + + spin_lock_irqsave(&ioc->fw_event_lock, flags); + wq = ioc->firmware_event_thread; + ioc->firmware_event_thread = NULL; + spin_unlock_irqrestore(&ioc->fw_event_lock, flags); + if (wq) + destroy_workqueue(wq); + + _scsih_ir_shutdown(ioc); + mpt3sas_base_detach(ioc); +} + + +/** + * _scsih_probe_boot_devices - reports 1st device + * @ioc: per adapter object + * + * If specified in bios page 2, this routine reports the 1st + * device scsi-ml or sas transport for persistent boot device + * purposes. Please refer to function _scsih_determine_boot_device() + */ +static void +_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc) +{ + u8 is_raid; + void *device; + struct _sas_device *sas_device; + struct _raid_device *raid_device; + u16 handle; + u64 sas_address_parent; + u64 sas_address; + unsigned long flags; + int rc; + + /* no Bios, return immediately */ + if (!ioc->bios_pg3.BiosVersion) + return; + + device = NULL; + is_raid = 0; + if (ioc->req_boot_device.device) { + device = ioc->req_boot_device.device; + is_raid = ioc->req_boot_device.is_raid; + } else if (ioc->req_alt_boot_device.device) { + device = ioc->req_alt_boot_device.device; + is_raid = ioc->req_alt_boot_device.is_raid; + } else if (ioc->current_boot_device.device) { + device = ioc->current_boot_device.device; + is_raid = ioc->current_boot_device.is_raid; + } + + if (!device) + return; + + if (is_raid) { + raid_device = device; + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + } else { + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = device; + handle = sas_device->handle; + sas_address_parent = sas_device->sas_address_parent; + sas_address = sas_device->sas_address; + list_move_tail(&sas_device->list, &ioc->sas_device_list); + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + if (ioc->hide_drives) + return; + if (!mpt3sas_transport_port_add(ioc, handle, + sas_address_parent)) { + _scsih_sas_device_remove(ioc, sas_device); + } else if (!sas_device->starget) { + if (!ioc->is_driver_loading) { + mpt3sas_transport_port_remove(ioc, + sas_address, + sas_address_parent); + _scsih_sas_device_remove(ioc, sas_device); + } + } + } +} + +/** + * _scsih_probe_raid - reporting raid volumes to scsi-ml + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc) +{ + struct _raid_device *raid_device, *raid_next; + int rc; + + list_for_each_entry_safe(raid_device, raid_next, + &ioc->raid_device_list, list) { + if (raid_device->starget) + continue; + rc = scsi_add_device(ioc->shost, RAID_CHANNEL, + raid_device->id, 0); + if (rc) + _scsih_raid_device_remove(ioc, raid_device); + } +} + +static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc) +{ + struct _sas_device *sas_device = NULL; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + if (!list_empty(&ioc->sas_device_init_list)) { + sas_device = list_first_entry(&ioc->sas_device_init_list, + struct _sas_device, list); + sas_device_get(sas_device); + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + + return sas_device; +} + +static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc, + struct _sas_device *sas_device) +{ + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + + /* + * Since we dropped the lock during the call to port_add(), we need to + * be careful here that somebody else didn't move or delete this item + * while we were busy with other things. + * + * If it was on the list, we need a put() for the reference the list + * had. Either way, we need a get() for the destination list. + */ + if (!list_empty(&sas_device->list)) { + list_del_init(&sas_device->list); + sas_device_put(sas_device); + } + + sas_device_get(sas_device); + list_add_tail(&sas_device->list, &ioc->sas_device_list); + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); +} + +/** + * _scsih_probe_sas - reporting sas devices to sas transport + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc) +{ + struct _sas_device *sas_device; + + if (ioc->hide_drives) + return; + + while ((sas_device = get_next_sas_device(ioc))) { + if (!mpt3sas_transport_port_add(ioc, sas_device->handle, + sas_device->sas_address_parent)) { + _scsih_sas_device_remove(ioc, sas_device); + sas_device_put(sas_device); + continue; + } else if (!sas_device->starget) { + /* + * When asyn scanning is enabled, its not possible to + * remove devices while scanning is turned on due to an + * oops in scsi_sysfs_add_sdev()->add_device()-> + * sysfs_addrm_start() + */ + if (!ioc->is_driver_loading) { + mpt3sas_transport_port_remove(ioc, + sas_device->sas_address, + sas_device->sas_address_parent); + _scsih_sas_device_remove(ioc, sas_device); + sas_device_put(sas_device); + continue; + } + } + sas_device_make_active(ioc, sas_device); + sas_device_put(sas_device); + } +} + +/** + * _scsih_probe_devices - probing for devices + * @ioc: per adapter object + * + * Called during initial loading of the driver. + */ +static void +_scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc) +{ + u16 volume_mapping_flags; + + if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR)) + return; /* return when IOC doesn't support initiator mode */ + + _scsih_probe_boot_devices(ioc); + + if (ioc->ir_firmware) { + volume_mapping_flags = + le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) & + MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; + if (volume_mapping_flags == + MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { + _scsih_probe_raid(ioc); + _scsih_probe_sas(ioc); + } else { + _scsih_probe_sas(ioc); + _scsih_probe_raid(ioc); + } + } else + _scsih_probe_sas(ioc); +} + +/** + * scsih_scan_start - scsi lld callback for .scan_start + * @shost: SCSI host pointer + * + * The shost has the ability to discover targets on its own instead + * of scanning the entire bus. In our implemention, we will kick off + * firmware discovery. + */ +void +scsih_scan_start(struct Scsi_Host *shost) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int rc; + if (diag_buffer_enable != -1 && diag_buffer_enable != 0) + mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); + + if (disable_discovery > 0) + return; + + ioc->start_scan = 1; + rc = mpt3sas_port_enable(ioc); + + if (rc != 0) + pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name); +} + +/** + * scsih_scan_finished - scsi lld callback for .scan_finished + * @shost: SCSI host pointer + * @time: elapsed time of the scan in jiffies + * + * This function will be called periodicallyn until it returns 1 with the + * scsi_host and the elapsed time of the scan in jiffies. In our implemention, + * we wait for firmware discovery to complete, then return 1. + */ +int +scsih_scan_finished(struct Scsi_Host *shost, unsigned long time) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + if (disable_discovery > 0) { + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + return 1; + } + + if (time >= (300 * HZ)) { + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + pr_info(MPT3SAS_FMT + "port enable: FAILED with timeout (timeout=300s)\n", + ioc->name); + ioc->is_driver_loading = 0; + return 1; + } + + if (ioc->start_scan) + return 0; + + if (ioc->start_scan_failed) { + pr_info(MPT3SAS_FMT + "port enable: FAILED with (ioc_status=0x%08x)\n", + ioc->name, ioc->start_scan_failed); + ioc->is_driver_loading = 0; + ioc->wait_for_discovery_to_complete = 0; + ioc->remove_host = 1; + return 1; + } + + pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name); + ioc->base_cmds.status = MPT3_CMD_NOT_USED; + + if (ioc->wait_for_discovery_to_complete) { + ioc->wait_for_discovery_to_complete = 0; + _scsih_probe_devices(ioc); + } + mpt3sas_base_start_watchdog(ioc); + ioc->is_driver_loading = 0; + return 1; +} + +/* shost template for SAS 2.0 HBA devices */ +static struct scsi_host_template mpt2sas_driver_template = { + .module = THIS_MODULE, + .name = "Fusion MPT SAS Host", + .proc_name = MPT2SAS_DRIVER_NAME, + .queuecommand = scsih_qcmd, + .target_alloc = scsih_target_alloc, + .slave_alloc = scsih_slave_alloc, + .slave_configure = scsih_slave_configure, + .target_destroy = scsih_target_destroy, + .slave_destroy = scsih_slave_destroy, + .scan_finished = scsih_scan_finished, + .scan_start = scsih_scan_start, + .change_queue_depth = scsih_change_queue_depth, + .eh_abort_handler = scsih_abort, + .eh_device_reset_handler = scsih_dev_reset, + .eh_target_reset_handler = scsih_target_reset, + .eh_host_reset_handler = scsih_host_reset, + .bios_param = scsih_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = MPT2SAS_SG_DEPTH, + .max_sectors = 32767, + .cmd_per_lun = 7, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = mpt3sas_host_attrs, + .sdev_attrs = mpt3sas_dev_attrs, + .track_queue_depth = 1, +}; + +/* raid transport support for SAS 2.0 HBA devices */ +static struct raid_function_template mpt2sas_raid_functions = { + .cookie = &mpt2sas_driver_template, + .is_raid = scsih_is_raid, + .get_resync = scsih_get_resync, + .get_state = scsih_get_state, +}; + +/* shost template for SAS 3.0 HBA devices */ +static struct scsi_host_template mpt3sas_driver_template = { + .module = THIS_MODULE, + .name = "Fusion MPT SAS Host", + .proc_name = MPT3SAS_DRIVER_NAME, + .queuecommand = scsih_qcmd, + .target_alloc = scsih_target_alloc, + .slave_alloc = scsih_slave_alloc, + .slave_configure = scsih_slave_configure, + .target_destroy = scsih_target_destroy, + .slave_destroy = scsih_slave_destroy, + .scan_finished = scsih_scan_finished, + .scan_start = scsih_scan_start, + .change_queue_depth = scsih_change_queue_depth, + .eh_abort_handler = scsih_abort, + .eh_device_reset_handler = scsih_dev_reset, + .eh_target_reset_handler = scsih_target_reset, + .eh_host_reset_handler = scsih_host_reset, + .bios_param = scsih_bios_param, + .can_queue = 1, + .this_id = -1, + .sg_tablesize = MPT3SAS_SG_DEPTH, + .max_sectors = 32767, + .cmd_per_lun = 7, + .use_clustering = ENABLE_CLUSTERING, + .shost_attrs = mpt3sas_host_attrs, + .sdev_attrs = mpt3sas_dev_attrs, + .track_queue_depth = 1, +}; + +/* raid transport support for SAS 3.0 HBA devices */ +static struct raid_function_template mpt3sas_raid_functions = { + .cookie = &mpt3sas_driver_template, + .is_raid = scsih_is_raid, + .get_resync = scsih_get_resync, + .get_state = scsih_get_state, +}; + +/** + * _scsih_determine_hba_mpi_version - determine in which MPI version class + * this device belongs to. + * @pdev: PCI device struct + * + * return MPI2_VERSION for SAS 2.0 HBA devices, + * MPI25_VERSION for SAS 3.0 HBA devices. + */ +u16 +_scsih_determine_hba_mpi_version(struct pci_dev *pdev) +{ + + switch (pdev->device) { + case MPI2_MFGPAGE_DEVID_SSS6200: + case MPI2_MFGPAGE_DEVID_SAS2004: + case MPI2_MFGPAGE_DEVID_SAS2008: + case MPI2_MFGPAGE_DEVID_SAS2108_1: + case MPI2_MFGPAGE_DEVID_SAS2108_2: + case MPI2_MFGPAGE_DEVID_SAS2108_3: + case MPI2_MFGPAGE_DEVID_SAS2116_1: + case MPI2_MFGPAGE_DEVID_SAS2116_2: + case MPI2_MFGPAGE_DEVID_SAS2208_1: + case MPI2_MFGPAGE_DEVID_SAS2208_2: + case MPI2_MFGPAGE_DEVID_SAS2208_3: + case MPI2_MFGPAGE_DEVID_SAS2208_4: + case MPI2_MFGPAGE_DEVID_SAS2208_5: + case MPI2_MFGPAGE_DEVID_SAS2208_6: + case MPI2_MFGPAGE_DEVID_SAS2308_1: + case MPI2_MFGPAGE_DEVID_SAS2308_2: + case MPI2_MFGPAGE_DEVID_SAS2308_3: + return MPI2_VERSION; + case MPI25_MFGPAGE_DEVID_SAS3004: + case MPI25_MFGPAGE_DEVID_SAS3008: + case MPI25_MFGPAGE_DEVID_SAS3108_1: + case MPI25_MFGPAGE_DEVID_SAS3108_2: + case MPI25_MFGPAGE_DEVID_SAS3108_5: + case MPI25_MFGPAGE_DEVID_SAS3108_6: + return MPI25_VERSION; + } + return 0; +} + +/** + * _scsih_probe - attach and add scsi host + * @pdev: PCI device struct + * @id: pci device id + * + * Returns 0 success, anything else error. + */ +int +_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct MPT3SAS_ADAPTER *ioc; + struct Scsi_Host *shost = NULL; + int rv; + u16 hba_mpi_version; + + /* Determine in which MPI version class this pci device belongs */ + hba_mpi_version = _scsih_determine_hba_mpi_version(pdev); + if (hba_mpi_version == 0) + return -ENODEV; + + /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one, + * for other generation HBA's return with -ENODEV + */ + if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION)) + return -ENODEV; + + /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two, + * for other generation HBA's return with -ENODEV + */ + if ((hbas_to_enumerate == 2) && (hba_mpi_version != MPI25_VERSION)) + return -ENODEV; + + switch (hba_mpi_version) { + case MPI2_VERSION: + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | + PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM); + /* Use mpt2sas driver host template for SAS 2.0 HBA's */ + shost = scsi_host_alloc(&mpt2sas_driver_template, + sizeof(struct MPT3SAS_ADAPTER)); + if (!shost) + return -ENODEV; + ioc = shost_priv(shost); + memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); + ioc->hba_mpi_version_belonged = hba_mpi_version; + ioc->id = mpt2_ids++; + sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME); + if (pdev->device == MPI2_MFGPAGE_DEVID_SSS6200) { + ioc->is_warpdrive = 1; + ioc->hide_ir_msg = 1; + } else + ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS; + break; + case MPI25_VERSION: + /* Use mpt3sas driver host template for SAS 3.0 HBA's */ + shost = scsi_host_alloc(&mpt3sas_driver_template, + sizeof(struct MPT3SAS_ADAPTER)); + if (!shost) + return -ENODEV; + ioc = shost_priv(shost); + memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER)); + ioc->hba_mpi_version_belonged = hba_mpi_version; + ioc->id = mpt3_ids++; + sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME); + if (pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) + ioc->msix96_vector = 1; + break; + default: + return -ENODEV; + } + + INIT_LIST_HEAD(&ioc->list); + spin_lock(&gioc_lock); + list_add_tail(&ioc->list, &mpt3sas_ioc_list); + spin_unlock(&gioc_lock); + ioc->shost = shost; + ioc->pdev = pdev; + ioc->scsi_io_cb_idx = scsi_io_cb_idx; + ioc->tm_cb_idx = tm_cb_idx; + ioc->ctl_cb_idx = ctl_cb_idx; + ioc->base_cb_idx = base_cb_idx; + ioc->port_enable_cb_idx = port_enable_cb_idx; + ioc->transport_cb_idx = transport_cb_idx; + ioc->scsih_cb_idx = scsih_cb_idx; + ioc->config_cb_idx = config_cb_idx; + ioc->tm_tr_cb_idx = tm_tr_cb_idx; + ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx; + ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; + ioc->logging_level = logging_level; + ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; + /* misc semaphores and spin locks */ + mutex_init(&ioc->reset_in_progress_mutex); + /* initializing pci_access_mutex lock */ + mutex_init(&ioc->pci_access_mutex); + spin_lock_init(&ioc->ioc_reset_in_progress_lock); + spin_lock_init(&ioc->scsi_lookup_lock); + spin_lock_init(&ioc->sas_device_lock); + spin_lock_init(&ioc->sas_node_lock); + spin_lock_init(&ioc->fw_event_lock); + spin_lock_init(&ioc->raid_device_lock); + spin_lock_init(&ioc->diag_trigger_lock); + + INIT_LIST_HEAD(&ioc->sas_device_list); + INIT_LIST_HEAD(&ioc->sas_device_init_list); + INIT_LIST_HEAD(&ioc->sas_expander_list); + INIT_LIST_HEAD(&ioc->fw_event_list); + INIT_LIST_HEAD(&ioc->raid_device_list); + INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list); + INIT_LIST_HEAD(&ioc->delayed_tr_list); + INIT_LIST_HEAD(&ioc->delayed_tr_volume_list); + INIT_LIST_HEAD(&ioc->reply_queue_list); + + sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id); + + /* init shost parameters */ + shost->max_cmd_len = 32; + shost->max_lun = max_lun; + shost->transportt = mpt3sas_transport_template; + shost->unique_id = ioc->id; + + if (max_sectors != 0xFFFF) { + if (max_sectors < 64) { + shost->max_sectors = 64; + pr_warn(MPT3SAS_FMT "Invalid value %d passed " \ + "for max_sectors, range is 64 to 32767. Assigning " + "value of 64.\n", ioc->name, max_sectors); + } else if (max_sectors > 32767) { + shost->max_sectors = 32767; + pr_warn(MPT3SAS_FMT "Invalid value %d passed " \ + "for max_sectors, range is 64 to 32767. Assigning " + "default value of 32767.\n", ioc->name, + max_sectors); + } else { + shost->max_sectors = max_sectors & 0xFFFE; + pr_info(MPT3SAS_FMT + "The max_sectors value is set to %d\n", + ioc->name, shost->max_sectors); + } + } + + /* register EEDP capabilities with SCSI layer */ + if (prot_mask > 0) + scsi_host_set_prot(shost, prot_mask); + else + scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION + | SHOST_DIF_TYPE2_PROTECTION + | SHOST_DIF_TYPE3_PROTECTION); + + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + + /* event thread */ + snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name), + "fw_event_%s%d", ioc->driver_name, ioc->id); + ioc->firmware_event_thread = alloc_ordered_workqueue( + ioc->firmware_event_name, 0); + if (!ioc->firmware_event_thread) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rv = -ENODEV; + goto out_thread_fail; + } + + ioc->is_driver_loading = 1; + if ((mpt3sas_base_attach(ioc))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rv = -ENODEV; + goto out_attach_fail; + } + + if (ioc->is_warpdrive) { + if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) + ioc->hide_drives = 0; + else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS) + ioc->hide_drives = 1; + else { + if (mpt3sas_get_num_volumes(ioc)) + ioc->hide_drives = 1; + else + ioc->hide_drives = 0; + } + } else + ioc->hide_drives = 0; + + rv = scsi_add_host(shost, &pdev->dev); + if (rv) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_add_shost_fail; + } + + scsi_scan_host(shost); + return 0; +out_add_shost_fail: + mpt3sas_base_detach(ioc); + out_attach_fail: + destroy_workqueue(ioc->firmware_event_thread); + out_thread_fail: + spin_lock(&gioc_lock); + list_del(&ioc->list); + spin_unlock(&gioc_lock); + scsi_host_put(shost); + return rv; +} + +#ifdef CONFIG_PM +/** + * scsih_suspend - power management suspend main entry point + * @pdev: PCI device struct + * @state: PM state change to (usually PCI_D3) + * + * Returns 0 success, anything else error. + */ +int +scsih_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + pci_power_t device_state; + + mpt3sas_base_stop_watchdog(ioc); + flush_scheduled_work(); + scsi_block_requests(shost); + device_state = pci_choose_state(pdev, state); + pr_info(MPT3SAS_FMT + "pdev=0x%p, slot=%s, entering operating state [D%d]\n", + ioc->name, pdev, pci_name(pdev), device_state); + + pci_save_state(pdev); + mpt3sas_base_free_resources(ioc); + pci_set_power_state(pdev, device_state); + return 0; +} + +/** + * scsih_resume - power management resume main entry point + * @pdev: PCI device struct + * + * Returns 0 success, anything else error. + */ +int +scsih_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + pci_power_t device_state = pdev->current_state; + int r; + + pr_info(MPT3SAS_FMT + "pdev=0x%p, slot=%s, previous operating state [D%d]\n", + ioc->name, pdev, pci_name(pdev), device_state); + + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, PCI_D0, 0); + pci_restore_state(pdev); + ioc->pdev = pdev; + r = mpt3sas_base_map_resources(ioc); + if (r) + return r; + + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET); + scsi_unblock_requests(shost); + mpt3sas_base_start_watchdog(ioc); + return 0; +} +#endif /* CONFIG_PM */ + +/** + * scsih_pci_error_detected - Called when a PCI error is detected. + * @pdev: PCI device struct + * @state: PCI channel state + * + * Description: Called when a PCI error is detected. + * + * Return value: + * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT + */ +pci_ers_result_t +scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n", + ioc->name, state); + + switch (state) { + case pci_channel_io_normal: + return PCI_ERS_RESULT_CAN_RECOVER; + case pci_channel_io_frozen: + /* Fatal error, prepare for slot reset */ + ioc->pci_error_recovery = 1; + scsi_block_requests(ioc->shost); + mpt3sas_base_stop_watchdog(ioc); + mpt3sas_base_free_resources(ioc); + return PCI_ERS_RESULT_NEED_RESET; + case pci_channel_io_perm_failure: + /* Permanent error, prepare for device removal */ + ioc->pci_error_recovery = 1; + mpt3sas_base_stop_watchdog(ioc); + _scsih_flush_running_cmds(ioc); + return PCI_ERS_RESULT_DISCONNECT; + } + return PCI_ERS_RESULT_NEED_RESET; +} + +/** + * scsih_pci_slot_reset - Called when PCI slot has been reset. + * @pdev: PCI device struct + * + * Description: This routine is called by the pci error recovery + * code after the PCI slot has been reset, just before we + * should resume normal operations. + */ +pci_ers_result_t +scsih_pci_slot_reset(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + int rc; + + pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n", + ioc->name); + + ioc->pci_error_recovery = 0; + ioc->pdev = pdev; + pci_restore_state(pdev); + rc = mpt3sas_base_map_resources(ioc); + if (rc) + return PCI_ERS_RESULT_DISCONNECT; + + rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + + pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name, + (rc == 0) ? "success" : "failed"); + + if (!rc) + return PCI_ERS_RESULT_RECOVERED; + else + return PCI_ERS_RESULT_DISCONNECT; +} + +/** + * scsih_pci_resume() - resume normal ops after PCI reset + * @pdev: pointer to PCI device + * + * Called when the error recovery driver tells us that its + * OK to resume normal operation. Use completion to allow + * halted scsi ops to resume. + */ +void +scsih_pci_resume(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name); + + pci_cleanup_aer_uncorrect_error_status(pdev); + mpt3sas_base_start_watchdog(ioc); + scsi_unblock_requests(ioc->shost); +} + +/** + * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers + * @pdev: pointer to PCI device + */ +pci_ers_result_t +scsih_pci_mmio_enabled(struct pci_dev *pdev) +{ + struct Scsi_Host *shost = pci_get_drvdata(pdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n", + ioc->name); + + /* TODO - dump whatever for debugging purposes */ + + /* Request a slot reset. */ + return PCI_ERS_RESULT_NEED_RESET; +} + +/* + * The pci device ids are defined in mpi/mpi2_cnfg.h. + */ +static const struct pci_device_id mpt3sas_pci_table[] = { + /* Spitfire ~ 2004 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004, + PCI_ANY_ID, PCI_ANY_ID }, + /* Falcon ~ 2008 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008, + PCI_ANY_ID, PCI_ANY_ID }, + /* Liberator ~ 2108 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, + PCI_ANY_ID, PCI_ANY_ID }, + /* Meteor ~ 2116 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, + PCI_ANY_ID, PCI_ANY_ID }, + /* Thunderbolt ~ 2208 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, + PCI_ANY_ID, PCI_ANY_ID }, + /* Mustang ~ 2308 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, + PCI_ANY_ID, PCI_ANY_ID }, + /* SSS6200 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, + PCI_ANY_ID, PCI_ANY_ID }, + /* Fury ~ 3004 and 3008 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, + PCI_ANY_ID, PCI_ANY_ID }, + /* Invader ~ 3108 */ + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5, + PCI_ANY_ID, PCI_ANY_ID }, + { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, + PCI_ANY_ID, PCI_ANY_ID }, + {0} /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table); + +static struct pci_error_handlers _mpt3sas_err_handler = { + .error_detected = scsih_pci_error_detected, + .mmio_enabled = scsih_pci_mmio_enabled, + .slot_reset = scsih_pci_slot_reset, + .resume = scsih_pci_resume, +}; + +static struct pci_driver mpt3sas_driver = { + .name = MPT3SAS_DRIVER_NAME, + .id_table = mpt3sas_pci_table, + .probe = _scsih_probe, + .remove = scsih_remove, + .shutdown = scsih_shutdown, + .err_handler = &_mpt3sas_err_handler, +#ifdef CONFIG_PM + .suspend = scsih_suspend, + .resume = scsih_resume, +#endif +}; + +/** + * scsih_init - main entry point for this driver. + * + * Returns 0 success, anything else error. + */ +int +scsih_init(void) +{ + mpt2_ids = 0; + mpt3_ids = 0; + + mpt3sas_base_initialize_callback_handler(); + + /* queuecommand callback hander */ + scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done); + + /* task managment callback handler */ + tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done); + + /* base internal commands callback handler */ + base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done); + port_enable_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_port_enable_done); + + /* transport internal commands callback handler */ + transport_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_transport_done); + + /* scsih internal commands callback handler */ + scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done); + + /* configuration page API internal commands callback handler */ + config_cb_idx = mpt3sas_base_register_callback_handler( + mpt3sas_config_done); + + /* ctl module callback handler */ + ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done); + + tm_tr_cb_idx = mpt3sas_base_register_callback_handler( + _scsih_tm_tr_complete); + + tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler( + _scsih_tm_volume_tr_complete); + + tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler( + _scsih_sas_control_complete); + + return 0; +} + +/** + * scsih_exit - exit point for this driver (when it is a module). + * + * Returns 0 success, anything else error. + */ +void +scsih_exit(void) +{ + + mpt3sas_base_release_callback_handler(scsi_io_cb_idx); + mpt3sas_base_release_callback_handler(tm_cb_idx); + mpt3sas_base_release_callback_handler(base_cb_idx); + mpt3sas_base_release_callback_handler(port_enable_cb_idx); + mpt3sas_base_release_callback_handler(transport_cb_idx); + mpt3sas_base_release_callback_handler(scsih_cb_idx); + mpt3sas_base_release_callback_handler(config_cb_idx); + mpt3sas_base_release_callback_handler(ctl_cb_idx); + + mpt3sas_base_release_callback_handler(tm_tr_cb_idx); + mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx); + mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx); + +/* raid transport support */ + if (hbas_to_enumerate != 1) + raid_class_release(mpt3sas_raid_template); + if (hbas_to_enumerate != 2) + raid_class_release(mpt2sas_raid_template); + sas_release_transport(mpt3sas_transport_template); +} + +/** + * _mpt3sas_init - main entry point for this driver. + * + * Returns 0 success, anything else error. + */ +static int __init +_mpt3sas_init(void) +{ + int error; + + pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME, + MPT3SAS_DRIVER_VERSION); + + mpt3sas_transport_template = + sas_attach_transport(&mpt3sas_transport_functions); + if (!mpt3sas_transport_template) + return -ENODEV; + + /* No need attach mpt3sas raid functions template + * if hbas_to_enumarate value is one. + */ + if (hbas_to_enumerate != 1) { + mpt3sas_raid_template = + raid_class_attach(&mpt3sas_raid_functions); + if (!mpt3sas_raid_template) { + sas_release_transport(mpt3sas_transport_template); + return -ENODEV; + } + } + + /* No need to attach mpt2sas raid functions template + * if hbas_to_enumarate value is two + */ + if (hbas_to_enumerate != 2) { + mpt2sas_raid_template = + raid_class_attach(&mpt2sas_raid_functions); + if (!mpt2sas_raid_template) { + sas_release_transport(mpt3sas_transport_template); + return -ENODEV; + } + } + + error = scsih_init(); + if (error) { + scsih_exit(); + return error; + } + + mpt3sas_ctl_init(hbas_to_enumerate); + + error = pci_register_driver(&mpt3sas_driver); + if (error) + scsih_exit(); + + return error; +} + +/** + * _mpt3sas_exit - exit point for this driver (when it is a module). + * + */ +static void __exit +_mpt3sas_exit(void) +{ + pr_info("mpt3sas version %s unloading\n", + MPT3SAS_DRIVER_VERSION); + + pci_unregister_driver(&mpt3sas_driver); + + mpt3sas_ctl_exit(hbas_to_enumerate); + + scsih_exit(); +} + +module_init(_mpt3sas_init); +module_exit(_mpt3sas_exit); diff --git a/addons/mpt3sas/src/4.4.180/mpt3sas_transport.c b/addons/mpt3sas/src/4.4.180/mpt3sas_transport.c new file mode 100644 index 00000000..ca36d7ea --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpt3sas_transport.c @@ -0,0 +1,2154 @@ +/* + * SAS Transport Layer for MPT (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mpt3sas_base.h" + +/** + * _transport_sas_node_find_by_sas_address - sas node search + * @ioc: per adapter object + * @sas_address: sas address of expander or sas host + * Context: Calling function should acquire ioc->sas_node_lock. + * + * Search for either hba phys or expander device based on handle, then returns + * the sas_node object. + */ +static struct _sas_node * +_transport_sas_node_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address) +{ + if (ioc->sas_hba.sas_address == sas_address) + return &ioc->sas_hba; + else + return mpt3sas_scsih_expander_find_by_sas_address(ioc, + sas_address); +} + +/** + * _transport_convert_phy_link_rate - + * @link_rate: link rate returned from mpt firmware + * + * Convert link_rate from mpi fusion into sas_transport form. + */ +static enum sas_linkrate +_transport_convert_phy_link_rate(u8 link_rate) +{ + enum sas_linkrate rc; + + switch (link_rate) { + case MPI2_SAS_NEG_LINK_RATE_1_5: + rc = SAS_LINK_RATE_1_5_GBPS; + break; + case MPI2_SAS_NEG_LINK_RATE_3_0: + rc = SAS_LINK_RATE_3_0_GBPS; + break; + case MPI2_SAS_NEG_LINK_RATE_6_0: + rc = SAS_LINK_RATE_6_0_GBPS; + break; + case MPI25_SAS_NEG_LINK_RATE_12_0: + rc = SAS_LINK_RATE_12_0_GBPS; + break; + case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED: + rc = SAS_PHY_DISABLED; + break; + case MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED: + rc = SAS_LINK_RATE_FAILED; + break; + case MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR: + rc = SAS_SATA_PORT_SELECTOR; + break; + case MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS: + rc = SAS_PHY_RESET_IN_PROGRESS; + break; + + default: + case MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE: + case MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE: + rc = SAS_LINK_RATE_UNKNOWN; + break; + } + return rc; +} + +/** + * _transport_set_identify - set identify for phys and end devices + * @ioc: per adapter object + * @handle: device handle + * @identify: sas identify info + * + * Populates sas identify info. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle, + struct sas_identify *identify) +{ + Mpi2SasDevicePage0_t sas_device_pg0; + Mpi2ConfigReply_t mpi_reply; + u32 device_info; + u32 ioc_status; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + + if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0, + MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT + "handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n", + ioc->name, handle, ioc_status, + __FILE__, __LINE__, __func__); + return -EIO; + } + + memset(identify, 0, sizeof(struct sas_identify)); + device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); + + /* sas_address */ + identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress); + + /* phy number of the parent device this device is linked to */ + identify->phy_identifier = sas_device_pg0.PhyNum; + + /* device_type */ + switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) { + case MPI2_SAS_DEVICE_INFO_NO_DEVICE: + identify->device_type = SAS_PHY_UNUSED; + break; + case MPI2_SAS_DEVICE_INFO_END_DEVICE: + identify->device_type = SAS_END_DEVICE; + break; + case MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER: + identify->device_type = SAS_EDGE_EXPANDER_DEVICE; + break; + case MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER: + identify->device_type = SAS_FANOUT_EXPANDER_DEVICE; + break; + } + + /* initiator_port_protocols */ + if (device_info & MPI2_SAS_DEVICE_INFO_SSP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & MPI2_SAS_DEVICE_INFO_STP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & MPI2_SAS_DEVICE_INFO_SMP_INITIATOR) + identify->initiator_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & MPI2_SAS_DEVICE_INFO_SATA_HOST) + identify->initiator_port_protocols |= SAS_PROTOCOL_SATA; + + /* target_port_protocols */ + if (device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SSP; + if (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_STP; + if (device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET) + identify->target_port_protocols |= SAS_PROTOCOL_SMP; + if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE) + identify->target_port_protocols |= SAS_PROTOCOL_SATA; + + return 0; +} + +/** + * mpt3sas_transport_done - internal transport layer callback handler. + * @ioc: per adapter object + * @smid: system request message index + * @msix_index: MSIX table index supplied by the OS + * @reply: reply message frame(lower 32bit addr) + * + * Callback handler when sending internal generated transport cmds. + * The callback index passed is `ioc->transport_cb_idx` + * + * Return 1 meaning mf should be freed from _base_interrupt + * 0 means the mf is freed from this function. + */ +u8 +mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, + u32 reply) +{ + MPI2DefaultReply_t *mpi_reply; + + mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); + if (ioc->transport_cmds.status == MPT3_CMD_NOT_USED) + return 1; + if (ioc->transport_cmds.smid != smid) + return 1; + ioc->transport_cmds.status |= MPT3_CMD_COMPLETE; + if (mpi_reply) { + memcpy(ioc->transport_cmds.reply, mpi_reply, + mpi_reply->MsgLength*4); + ioc->transport_cmds.status |= MPT3_CMD_REPLY_VALID; + } + ioc->transport_cmds.status &= ~MPT3_CMD_PENDING; + complete(&ioc->transport_cmds.done); + return 1; +} + +/* report manufacture request structure */ +struct rep_manu_request { + u8 smp_frame_type; + u8 function; + u8 reserved; + u8 request_length; +}; + +/* report manufacture reply structure */ +struct rep_manu_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x01 */ + u8 function_result; + u8 response_length; + u16 expander_change_count; + u8 reserved0[2]; + u8 sas_format; + u8 reserved2[3]; + u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN]; + u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN]; + u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN]; + u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN]; + u16 component_id; + u8 component_revision_id; + u8 reserved3; + u8 vendor_specific[8]; +}; + +/** + * transport_expander_report_manufacture - obtain SMP report_manufacture + * @ioc: per adapter object + * @sas_address: expander sas address + * @edev: the sas_expander_device object + * + * Fills in the sas_expander_device object when SMP port is created. + * + * Returns 0 for success, non-zero for failure. + */ +static int +_transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, struct sas_expander_device *edev) +{ + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + struct rep_manu_reply *manufacture_reply; + struct rep_manu_request *manufacture_request; + int rc; + u16 smid; + u32 ioc_state; + unsigned long timeleft; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + dma_addr_t data_in_dma; + size_t data_in_sz; + size_t data_out_sz; + u16 wait_state_count; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + + mutex_lock(&ioc->transport_cmds.mutex); + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info(MPT3SAS_FMT "%s: ioc is operational\n", + ioc->name, __func__); + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + rc = 0; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + data_out_sz = sizeof(struct rep_manu_request); + data_in_sz = sizeof(struct rep_manu_reply); + data_out = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz, + &data_out_dma); + + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + + data_in_dma = data_out_dma + sizeof(struct rep_manu_request); + + manufacture_request = data_out; + manufacture_request->smp_frame_type = 0x40; + manufacture_request->function = 1; + manufacture_request->reserved = 0; + manufacture_request->request_length = 0; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = 0xFF; + mpi_request->SASAddress = cpu_to_le64(sas_address); + mpi_request->RequestDataLength = cpu_to_le16(data_out_sz); + psge = &mpi_request->SGL; + + ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, + data_in_sz); + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "report_manufacture - send to sas_addr(0x%016llx)\n", + ioc->name, (unsigned long long)sas_address)); + init_completion(&ioc->transport_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, + 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "report_manufacture - complete\n", ioc->name)); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + u8 *tmp; + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "report_manufacture - reply data transfer size(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength))); + + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct rep_manu_reply)) + goto out; + + manufacture_reply = data_out + sizeof(struct rep_manu_request); + strncpy(edev->vendor_id, manufacture_reply->vendor_id, + SAS_EXPANDER_VENDOR_ID_LEN); + strncpy(edev->product_id, manufacture_reply->product_id, + SAS_EXPANDER_PRODUCT_ID_LEN); + strncpy(edev->product_rev, manufacture_reply->product_rev, + SAS_EXPANDER_PRODUCT_REV_LEN); + edev->level = manufacture_reply->sas_format & 1; + if (edev->level) { + strncpy(edev->component_vendor_id, + manufacture_reply->component_vendor_id, + SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN); + tmp = (u8 *)&manufacture_reply->component_id; + edev->component_id = tmp[0] << 8 | tmp[1]; + edev->component_revision_id = + manufacture_reply->component_revision_id; + } + } else + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "report_manufacture - no reply\n", ioc->name)); + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + if (data_out) + pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz, + data_out, data_out_dma); + + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + + +/** + * _transport_delete_port - helper function to removing a port + * @ioc: per adapter object + * @mpt3sas_port: mpt3sas per port object + * + * Returns nothing. + */ +static void +_transport_delete_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_port *mpt3sas_port) +{ + u64 sas_address = mpt3sas_port->remote_identify.sas_address; + enum sas_device_type device_type = + mpt3sas_port->remote_identify.device_type; + + dev_printk(KERN_INFO, &mpt3sas_port->port->dev, + "remove: sas_addr(0x%016llx)\n", + (unsigned long long) sas_address); + + ioc->logging_level |= MPT_DEBUG_TRANSPORT; + if (device_type == SAS_END_DEVICE) + mpt3sas_device_remove_by_sas_address(ioc, sas_address); + else if (device_type == SAS_EDGE_EXPANDER_DEVICE || + device_type == SAS_FANOUT_EXPANDER_DEVICE) + mpt3sas_expander_remove(ioc, sas_address); + ioc->logging_level &= ~MPT_DEBUG_TRANSPORT; +} + +/** + * _transport_delete_phy - helper function to removing single phy from port + * @ioc: per adapter object + * @mpt3sas_port: mpt3sas per port object + * @mpt3sas_phy: mpt3sas per phy object + * + * Returns nothing. + */ +static void +_transport_delete_phy(struct MPT3SAS_ADAPTER *ioc, + struct _sas_port *mpt3sas_port, struct _sas_phy *mpt3sas_phy) +{ + u64 sas_address = mpt3sas_port->remote_identify.sas_address; + + dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long) sas_address, mpt3sas_phy->phy_id); + + list_del(&mpt3sas_phy->port_siblings); + mpt3sas_port->num_phys--; + sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy); + mpt3sas_phy->phy_belongs_to_port = 0; +} + +/** + * _transport_add_phy - helper function to adding single phy to port + * @ioc: per adapter object + * @mpt3sas_port: mpt3sas per port object + * @mpt3sas_phy: mpt3sas per phy object + * + * Returns nothing. + */ +static void +_transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port, + struct _sas_phy *mpt3sas_phy) +{ + u64 sas_address = mpt3sas_port->remote_identify.sas_address; + + dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev, + "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long) + sas_address, mpt3sas_phy->phy_id); + + list_add_tail(&mpt3sas_phy->port_siblings, &mpt3sas_port->phy_list); + mpt3sas_port->num_phys++; + sas_port_add_phy(mpt3sas_port->port, mpt3sas_phy->phy); + mpt3sas_phy->phy_belongs_to_port = 1; +} + +/** + * _transport_add_phy_to_an_existing_port - adding new phy to existing port + * @ioc: per adapter object + * @sas_node: sas node object (either expander or sas host) + * @mpt3sas_phy: mpt3sas per phy object + * @sas_address: sas address of device/expander were phy needs to be added to + * + * Returns nothing. + */ +static void +_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy, + u64 sas_address) +{ + struct _sas_port *mpt3sas_port; + struct _sas_phy *phy_srch; + + if (mpt3sas_phy->phy_belongs_to_port == 1) + return; + + list_for_each_entry(mpt3sas_port, &sas_node->sas_port_list, + port_list) { + if (mpt3sas_port->remote_identify.sas_address != + sas_address) + continue; + list_for_each_entry(phy_srch, &mpt3sas_port->phy_list, + port_siblings) { + if (phy_srch == mpt3sas_phy) + return; + } + _transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy); + return; + } + +} + +/** + * _transport_del_phy_from_an_existing_port - delete phy from existing port + * @ioc: per adapter object + * @sas_node: sas node object (either expander or sas host) + * @mpt3sas_phy: mpt3sas per phy object + * + * Returns nothing. + */ +static void +_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc, + struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy) +{ + struct _sas_port *mpt3sas_port, *next; + struct _sas_phy *phy_srch; + + if (mpt3sas_phy->phy_belongs_to_port == 0) + return; + + list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list, + port_list) { + list_for_each_entry(phy_srch, &mpt3sas_port->phy_list, + port_siblings) { + if (phy_srch != mpt3sas_phy) + continue; + + if (mpt3sas_port->num_phys == 1) + _transport_delete_port(ioc, mpt3sas_port); + else + _transport_delete_phy(ioc, mpt3sas_port, + mpt3sas_phy); + return; + } + } +} + +/** + * _transport_sanity_check - sanity check when adding a new port + * @ioc: per adapter object + * @sas_node: sas node object (either expander or sas host) + * @sas_address: sas address of device being added + * + * See the explanation above from _transport_delete_duplicate_port + */ +static void +_transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node, + u64 sas_address) +{ + int i; + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != sas_address) + continue; + if (sas_node->phy[i].phy_belongs_to_port == 1) + _transport_del_phy_from_an_existing_port(ioc, sas_node, + &sas_node->phy[i]); + } +} + +/** + * mpt3sas_transport_port_add - insert port to the list + * @ioc: per adapter object + * @handle: handle of attached device + * @sas_address: sas address of parent expander or sas host + * Context: This function will acquire ioc->sas_node_lock. + * + * Adding new port object to the sas_node->sas_port_list. + * + * Returns mpt3sas_port. + */ +struct _sas_port * +mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, + u64 sas_address) +{ + struct _sas_phy *mpt3sas_phy, *next; + struct _sas_port *mpt3sas_port; + unsigned long flags; + struct _sas_node *sas_node; + struct sas_rphy *rphy; + struct _sas_device *sas_device = NULL; + int i; + struct sas_port *port; + + mpt3sas_port = kzalloc(sizeof(struct _sas_port), + GFP_KERNEL); + if (!mpt3sas_port) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return NULL; + } + + INIT_LIST_HEAD(&mpt3sas_port->port_list); + INIT_LIST_HEAD(&mpt3sas_port->phy_list); + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (!sas_node) { + pr_err(MPT3SAS_FMT + "%s: Could not find parent sas_address(0x%016llx)!\n", + ioc->name, __func__, (unsigned long long)sas_address); + goto out_fail; + } + + if ((_transport_set_identify(ioc, handle, + &mpt3sas_port->remote_identify))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + + if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + + _transport_sanity_check(ioc, sas_node, + mpt3sas_port->remote_identify.sas_address); + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address != + mpt3sas_port->remote_identify.sas_address) + continue; + list_add_tail(&sas_node->phy[i].port_siblings, + &mpt3sas_port->phy_list); + mpt3sas_port->num_phys++; + } + + if (!mpt3sas_port->num_phys) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + + port = sas_port_alloc_num(sas_node->parent_dev); + if ((sas_port_add(port))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + goto out_fail; + } + + list_for_each_entry(mpt3sas_phy, &mpt3sas_port->phy_list, + port_siblings) { + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &port->dev, + "add: handle(0x%04x), sas_addr(0x%016llx), phy(%d)\n", + handle, (unsigned long long) + mpt3sas_port->remote_identify.sas_address, + mpt3sas_phy->phy_id); + sas_port_add_phy(port, mpt3sas_phy->phy); + mpt3sas_phy->phy_belongs_to_port = 1; + } + + mpt3sas_port->port = port; + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) + rphy = sas_end_device_alloc(port); + else + rphy = sas_expander_alloc(port, + mpt3sas_port->remote_identify.device_type); + + rphy->identify = mpt3sas_port->remote_identify; + + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { + sas_device = mpt3sas_get_sdev_by_addr(ioc, + mpt3sas_port->remote_identify.sas_address); + if (!sas_device) { + dfailprintk(ioc, printk(MPT3SAS_FMT + "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__)); + goto out_fail; + } + sas_device->pend_sas_rphy_add = 1; + } + + if ((sas_rphy_add(rphy))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + } + + if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { + sas_device->pend_sas_rphy_add = 0; + sas_device_put(sas_device); + } + + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &rphy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n", + handle, (unsigned long long) + mpt3sas_port->remote_identify.sas_address); + mpt3sas_port->rphy = rphy; + spin_lock_irqsave(&ioc->sas_node_lock, flags); + list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list); + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* fill in report manufacture */ + if (mpt3sas_port->remote_identify.device_type == + MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER || + mpt3sas_port->remote_identify.device_type == + MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER) + _transport_expander_report_manufacture(ioc, + mpt3sas_port->remote_identify.sas_address, + rphy_to_expander_device(rphy)); + return mpt3sas_port; + + out_fail: + list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list, + port_siblings) + list_del(&mpt3sas_phy->port_siblings); + kfree(mpt3sas_port); + return NULL; +} + +/** + * mpt3sas_transport_port_remove - remove port from the list + * @ioc: per adapter object + * @sas_address: sas address of attached device + * @sas_address_parent: sas address of parent expander or sas host + * Context: This function will acquire ioc->sas_node_lock. + * + * Removing object and freeing associated memory from the + * ioc->sas_port_list. + * + * Return nothing. + */ +void +mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, + u64 sas_address_parent) +{ + int i; + unsigned long flags; + struct _sas_port *mpt3sas_port, *next; + struct _sas_node *sas_node; + u8 found = 0; + struct _sas_phy *mpt3sas_phy, *next_phy; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = _transport_sas_node_find_by_sas_address(ioc, + sas_address_parent); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list, + port_list) { + if (mpt3sas_port->remote_identify.sas_address != sas_address) + continue; + found = 1; + list_del(&mpt3sas_port->port_list); + goto out; + } + out: + if (!found) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + + for (i = 0; i < sas_node->num_phys; i++) { + if (sas_node->phy[i].remote_identify.sas_address == sas_address) + memset(&sas_node->phy[i].remote_identify, 0 , + sizeof(struct sas_identify)); + } + + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + list_for_each_entry_safe(mpt3sas_phy, next_phy, + &mpt3sas_port->phy_list, port_siblings) { + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &mpt3sas_port->port->dev, + "remove: sas_addr(0x%016llx), phy(%d)\n", + (unsigned long long) + mpt3sas_port->remote_identify.sas_address, + mpt3sas_phy->phy_id); + mpt3sas_phy->phy_belongs_to_port = 0; + sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy); + list_del(&mpt3sas_phy->port_siblings); + } + sas_port_delete(mpt3sas_port->port); + kfree(mpt3sas_port); +} + +/** + * mpt3sas_transport_add_host_phy - report sas_host phy to transport + * @ioc: per adapter object + * @mpt3sas_phy: mpt3sas per phy object + * @phy_pg0: sas phy page 0 + * @parent_dev: parent device class object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy + *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = mpt3sas_phy->phy_id; + + + INIT_LIST_HEAD(&mpt3sas_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + if ((_transport_set_identify(ioc, mpt3sas_phy->handle, + &mpt3sas_phy->identify))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = mpt3sas_phy->identify; + mpt3sas_phy->attached_handle = le16_to_cpu(phy_pg0.AttachedDevHandle); + if (mpt3sas_phy->attached_handle) + _transport_set_identify(ioc, mpt3sas_phy->attached_handle, + &mpt3sas_phy->remote_identify); + phy->identify.phy_identifier = mpt3sas_phy->phy_id; + phy->negotiated_linkrate = _transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = _transport_convert_phy_link_rate( + phy_pg0.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = _transport_convert_phy_link_rate( + phy_pg0.HwLinkRate >> 4); + phy->minimum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + + if ((sas_phy_add(phy))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + mpt3sas_phy->handle, (unsigned long long) + mpt3sas_phy->identify.sas_address, + mpt3sas_phy->attached_handle, + (unsigned long long) + mpt3sas_phy->remote_identify.sas_address); + mpt3sas_phy->phy = phy; + return 0; +} + + +/** + * mpt3sas_transport_add_expander_phy - report expander phy to transport + * @ioc: per adapter object + * @mpt3sas_phy: mpt3sas per phy object + * @expander_pg1: expander page 1 + * @parent_dev: parent device class object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy + *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1, + struct device *parent_dev) +{ + struct sas_phy *phy; + int phy_index = mpt3sas_phy->phy_id; + + INIT_LIST_HEAD(&mpt3sas_phy->port_siblings); + phy = sas_phy_alloc(parent_dev, phy_index); + if (!phy) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -1; + } + if ((_transport_set_identify(ioc, mpt3sas_phy->handle, + &mpt3sas_phy->identify))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + phy->identify = mpt3sas_phy->identify; + mpt3sas_phy->attached_handle = + le16_to_cpu(expander_pg1.AttachedDevHandle); + if (mpt3sas_phy->attached_handle) + _transport_set_identify(ioc, mpt3sas_phy->attached_handle, + &mpt3sas_phy->remote_identify); + phy->identify.phy_identifier = mpt3sas_phy->phy_id; + phy->negotiated_linkrate = _transport_convert_phy_link_rate( + expander_pg1.NegotiatedLinkRate & + MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + phy->minimum_linkrate_hw = _transport_convert_phy_link_rate( + expander_pg1.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK); + phy->maximum_linkrate_hw = _transport_convert_phy_link_rate( + expander_pg1.HwLinkRate >> 4); + phy->minimum_linkrate = _transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = _transport_convert_phy_link_rate( + expander_pg1.ProgrammedLinkRate >> 4); + + if ((sas_phy_add(phy))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + sas_phy_free(phy); + return -1; + } + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &phy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + mpt3sas_phy->handle, (unsigned long long) + mpt3sas_phy->identify.sas_address, + mpt3sas_phy->attached_handle, + (unsigned long long) + mpt3sas_phy->remote_identify.sas_address); + mpt3sas_phy->phy = phy; + return 0; +} + +/** + * mpt3sas_transport_update_links - refreshing phy link changes + * @ioc: per adapter object + * @sas_address: sas address of parent expander or sas host + * @handle: attached device handle + * @phy_numberv: phy number + * @link_rate: new link rate + * + * Returns nothing. + */ +void +mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc, + u64 sas_address, u16 handle, u8 phy_number, u8 link_rate) +{ + unsigned long flags; + struct _sas_node *sas_node; + struct _sas_phy *mpt3sas_phy; + + if (ioc->shost_recovery || ioc->pci_error_recovery) + return; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address); + if (!sas_node) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return; + } + + mpt3sas_phy = &sas_node->phy[phy_number]; + mpt3sas_phy->attached_handle = handle; + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) { + _transport_set_identify(ioc, handle, + &mpt3sas_phy->remote_identify); + _transport_add_phy_to_an_existing_port(ioc, sas_node, + mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address); + } else + memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct + sas_identify)); + + if (mpt3sas_phy->phy) + mpt3sas_phy->phy->negotiated_linkrate = + _transport_convert_phy_link_rate(link_rate); + + if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) + dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev, + "refresh: parent sas_addr(0x%016llx),\n" + "\tlink_rate(0x%02x), phy(%d)\n" + "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n", + (unsigned long long)sas_address, + link_rate, phy_number, handle, (unsigned long long) + mpt3sas_phy->remote_identify.sas_address); +} + +static inline void * +phy_to_ioc(struct sas_phy *phy) +{ + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + return shost_priv(shost); +} + +static inline void * +rphy_to_ioc(struct sas_rphy *rphy) +{ + struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent); + return shost_priv(shost); +} + +/* report phy error log structure */ +struct phy_error_log_request { + u8 smp_frame_type; /* 0x40 */ + u8 function; /* 0x11 */ + u8 allocated_response_length; + u8 request_length; /* 02 */ + u8 reserved_1[5]; + u8 phy_identifier; + u8 reserved_2[2]; +}; + +/* report phy error log reply structure */ +struct phy_error_log_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x11 */ + u8 function_result; + u8 response_length; + __be16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 reserved_2[2]; + __be32 invalid_dword; + __be32 running_disparity_error; + __be32 loss_of_dword_sync; + __be32 phy_reset_problem; +}; + +/** + * _transport_get_expander_phy_error_log - return expander counters + * @ioc: per adapter object + * @phy: The sas phy object + * + * Returns 0 for success, non-zero for failure. + * + */ +static int +_transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc, + struct sas_phy *phy) +{ + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + struct phy_error_log_request *phy_error_log_request; + struct phy_error_log_reply *phy_error_log_reply; + int rc; + u16 smid; + u32 ioc_state; + unsigned long timeleft; + void *psge; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + u16 wait_state_count; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + + mutex_lock(&ioc->transport_cmds.mutex); + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info(MPT3SAS_FMT "%s: ioc is operational\n", + ioc->name, __func__); + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + sz = sizeof(struct phy_error_log_request) + + sizeof(struct phy_error_log_reply); + data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + + rc = -EINVAL; + memset(data_out, 0, sz); + phy_error_log_request = data_out; + phy_error_log_request->smp_frame_type = 0x40; + phy_error_log_request->function = 0x11; + phy_error_log_request->request_length = 2; + phy_error_log_request->allocated_response_length = 0; + phy_error_log_request->phy_identifier = phy->number; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = 0xFF; + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct phy_error_log_request)); + psge = &mpi_request->SGL; + + ioc->build_sg(ioc, psge, data_out_dma, + sizeof(struct phy_error_log_request), + data_out_dma + sizeof(struct phy_error_log_request), + sizeof(struct phy_error_log_reply)); + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n", + ioc->name, (unsigned long long)phy->identify.sas_address, + phy->number)); + init_completion(&ioc->transport_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, + 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_error_log - complete\n", ioc->name)); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_error_log - reply data transfer size(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength))); + + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct phy_error_log_reply)) + goto out; + + phy_error_log_reply = data_out + + sizeof(struct phy_error_log_request); + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_error_log - function_result(%d)\n", + ioc->name, phy_error_log_reply->function_result)); + + phy->invalid_dword_count = + be32_to_cpu(phy_error_log_reply->invalid_dword); + phy->running_disparity_error_count = + be32_to_cpu(phy_error_log_reply->running_disparity_error); + phy->loss_of_dword_sync_count = + be32_to_cpu(phy_error_log_reply->loss_of_dword_sync); + phy->phy_reset_problem_count = + be32_to_cpu(phy_error_log_reply->phy_reset_problem); + rc = 0; + } else + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_error_log - no reply\n", ioc->name)); + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + if (data_out) + pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma); + + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +/** + * _transport_get_linkerrors - return phy counters for both hba and expanders + * @phy: The sas phy object + * + * Returns 0 for success, non-zero for failure. + * + */ +static int +_transport_get_linkerrors(struct sas_phy *phy) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + unsigned long flags; + Mpi2ConfigReply_t mpi_reply; + Mpi2SasPhyPage1_t phy_pg1; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return _transport_get_expander_phy_error_log(ioc, phy); + + /* get hba phy error logs */ + if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1, + phy->number))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + pr_info(MPT3SAS_FMT + "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n", + ioc->name, phy->number, + le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + + phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount); + phy->running_disparity_error_count = + le32_to_cpu(phy_pg1.RunningDisparityErrorCount); + phy->loss_of_dword_sync_count = + le32_to_cpu(phy_pg1.LossDwordSynchCount); + phy->phy_reset_problem_count = + le32_to_cpu(phy_pg1.PhyResetProblemCount); + return 0; +} + +/** + * _transport_get_enclosure_identifier - + * @phy: The sas phy object + * + * Obtain the enclosure logical id for an expander. + * Returns 0 for success, non-zero for failure. + */ +static int +_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) +{ + struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy); + struct _sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + rphy->identify.sas_address); + if (sas_device) { + *identifier = sas_device->enclosure_logical_id; + rc = 0; + sas_device_put(sas_device); + } else { + *identifier = 0; + rc = -ENXIO; + } + + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/** + * _transport_get_bay_identifier - + * @phy: The sas phy object + * + * Returns the slot id for a device that resides inside an enclosure. + */ +static int +_transport_get_bay_identifier(struct sas_rphy *rphy) +{ + struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy); + struct _sas_device *sas_device; + unsigned long flags; + int rc; + + spin_lock_irqsave(&ioc->sas_device_lock, flags); + sas_device = __mpt3sas_get_sdev_by_addr(ioc, + rphy->identify.sas_address); + if (sas_device) { + rc = sas_device->slot; + sas_device_put(sas_device); + } else { + rc = -ENXIO; + } + spin_unlock_irqrestore(&ioc->sas_device_lock, flags); + return rc; +} + +/* phy control request structure */ +struct phy_control_request { + u8 smp_frame_type; /* 0x40 */ + u8 function; /* 0x91 */ + u8 allocated_response_length; + u8 request_length; /* 0x09 */ + u16 expander_change_count; + u8 reserved_1[3]; + u8 phy_identifier; + u8 phy_operation; + u8 reserved_2[13]; + u64 attached_device_name; + u8 programmed_min_physical_link_rate; + u8 programmed_max_physical_link_rate; + u8 reserved_3[6]; +}; + +/* phy control reply structure */ +struct phy_control_reply { + u8 smp_frame_type; /* 0x41 */ + u8 function; /* 0x11 */ + u8 function_result; + u8 response_length; +}; + +#define SMP_PHY_CONTROL_LINK_RESET (0x01) +#define SMP_PHY_CONTROL_HARD_RESET (0x02) +#define SMP_PHY_CONTROL_DISABLE (0x03) + +/** + * _transport_expander_phy_control - expander phy control + * @ioc: per adapter object + * @phy: The sas phy object + * + * Returns 0 for success, non-zero for failure. + * + */ +static int +_transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc, + struct sas_phy *phy, u8 phy_operation) +{ + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + struct phy_control_request *phy_control_request; + struct phy_control_reply *phy_control_reply; + int rc; + u16 smid; + u32 ioc_state; + unsigned long timeleft; + void *psge; + u32 sgl_flags; + u8 issue_reset = 0; + void *data_out = NULL; + dma_addr_t data_out_dma; + u32 sz; + u16 wait_state_count; + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + + mutex_lock(&ioc->transport_cmds.mutex); + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto out; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info(MPT3SAS_FMT "%s: ioc is operational\n", + ioc->name, __func__); + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + sz = sizeof(struct phy_control_request) + + sizeof(struct phy_control_reply); + data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma); + if (!data_out) { + pr_err("failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + rc = -ENOMEM; + mpt3sas_base_free_smid(ioc, smid); + goto out; + } + + rc = -EINVAL; + memset(data_out, 0, sz); + phy_control_request = data_out; + phy_control_request->smp_frame_type = 0x40; + phy_control_request->function = 0x91; + phy_control_request->request_length = 9; + phy_control_request->allocated_response_length = 0; + phy_control_request->phy_identifier = phy->number; + phy_control_request->phy_operation = phy_operation; + phy_control_request->programmed_min_physical_link_rate = + phy->minimum_linkrate << 4; + phy_control_request->programmed_max_physical_link_rate = + phy->maximum_linkrate << 4; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = 0xFF; + mpi_request->VF_ID = 0; /* TODO */ + mpi_request->VP_ID = 0; + mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address); + mpi_request->RequestDataLength = + cpu_to_le16(sizeof(struct phy_error_log_request)); + psge = &mpi_request->SGL; + + /* WRITE sgel first */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + sizeof(struct phy_control_request), data_out_dma); + + /* incr sgel */ + psge += ioc->sge_size; + + /* READ sgel last */ + sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | + MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | + MPI2_SGE_FLAGS_END_OF_LIST); + sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; + ioc->base_add_sg_single(psge, sgl_flags | + sizeof(struct phy_control_reply), data_out_dma + + sizeof(struct phy_control_request)); + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", + ioc->name, (unsigned long long)phy->identify.sas_address, + phy->number, phy_operation)); + init_completion(&ioc->transport_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, + 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s: timeout\n", + ioc->name, __func__); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_control - complete\n", ioc->name)); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_control - reply data transfer size(%d)\n", + ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength))); + + if (le16_to_cpu(mpi_reply->ResponseDataLength) != + sizeof(struct phy_control_reply)) + goto out; + + phy_control_reply = data_out + + sizeof(struct phy_control_request); + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_control - function_result(%d)\n", + ioc->name, phy_control_reply->function_result)); + + rc = 0; + } else + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "phy_control - no reply\n", ioc->name)); + + issue_host_reset: + if (issue_reset) + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + if (data_out) + pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma); + + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +/** + * _transport_phy_reset - + * @phy: The sas phy object + * @hard_reset: + * + * Returns 0 for success, non-zero for failure. + */ +static int +_transport_phy_reset(struct sas_phy *phy, int hard_reset) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + Mpi2SasIoUnitControlReply_t mpi_reply; + Mpi2SasIoUnitControlRequest_t mpi_request; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* handle expander phys */ + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return _transport_expander_phy_control(ioc, phy, + (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET : + SMP_PHY_CONTROL_LINK_RESET); + + /* handle hba phys */ + memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t)); + mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL; + mpi_request.Operation = hard_reset ? + MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET; + mpi_request.PhyNum = phy->number; + + if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + return -ENXIO; + } + + if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) + pr_info(MPT3SAS_FMT + "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n", + ioc->name, phy->number, le16_to_cpu(mpi_reply.IOCStatus), + le32_to_cpu(mpi_reply.IOCLogInfo)); + + return 0; +} + +/** + * _transport_phy_enable - enable/disable phys + * @phy: The sas phy object + * @enable: enable phy when true + * + * Only support sas_host direct attached phys. + * Returns 0 for success, non-zero for failure. + */ +static int +_transport_phy_enable(struct sas_phy *phy, int enable) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 sz; + int rc = 0; + unsigned long flags; + int i, discovery_active; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + /* handle expander phys */ + if (phy->identify.sas_address != ioc->sas_hba.sas_address) + return _transport_expander_phy_control(ioc, phy, + (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET : + SMP_PHY_CONTROL_DISABLE); + + /* handle hba phys */ + + /* read sas_iounit page 0 */ + sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit0PhyData_t)); + sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg0) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply, + sas_iounit_pg0, sz))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + /* unable to enable/disable phys when when discovery is active */ + for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) { + if (sas_iounit_pg0->PhyData[i].PortFlags & + MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) { + pr_err(MPT3SAS_FMT "discovery is active on " \ + "port = %d, phy = %d: unable to enable/disable " + "phys, try again later!\n", ioc->name, + sas_iounit_pg0->PhyData[i].Port, i); + discovery_active = 1; + } + } + + if (discovery_active) { + rc = -EAGAIN; + goto out; + } + + /* read sas_iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + /* copy Port/PortFlags/PhyFlags from page 0 */ + for (i = 0; i < ioc->sas_hba.num_phys ; i++) { + sas_iounit_pg1->PhyData[i].Port = + sas_iounit_pg0->PhyData[i].Port; + sas_iounit_pg1->PhyData[i].PortFlags = + (sas_iounit_pg0->PhyData[i].PortFlags & + MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG); + sas_iounit_pg1->PhyData[i].PhyFlags = + (sas_iounit_pg0->PhyData[i].PhyFlags & + (MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED + + MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED)); + } + + if (enable) + sas_iounit_pg1->PhyData[phy->number].PhyFlags + &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + else + sas_iounit_pg1->PhyData[phy->number].PhyFlags + |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE; + + mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz); + + /* link reset */ + if (enable) + _transport_phy_reset(phy, 0); + + out: + kfree(sas_iounit_pg1); + kfree(sas_iounit_pg0); + return rc; +} + +/** + * _transport_phy_speed - set phy min/max link rates + * @phy: The sas phy object + * @rates: rates defined in sas_phy_linkrates + * + * Only support sas_host direct attached phys. + * Returns 0 for success, non-zero for failure. + */ +static int +_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates) +{ + struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy); + Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; + Mpi2SasPhyPage0_t phy_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 ioc_status; + u16 sz; + int i; + int rc = 0; + unsigned long flags; + + spin_lock_irqsave(&ioc->sas_node_lock, flags); + if (_transport_sas_node_find_by_sas_address(ioc, + phy->identify.sas_address) == NULL) { + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + return -EINVAL; + } + spin_unlock_irqrestore(&ioc->sas_node_lock, flags); + + if (!rates->minimum_linkrate) + rates->minimum_linkrate = phy->minimum_linkrate; + else if (rates->minimum_linkrate < phy->minimum_linkrate_hw) + rates->minimum_linkrate = phy->minimum_linkrate_hw; + + if (!rates->maximum_linkrate) + rates->maximum_linkrate = phy->maximum_linkrate; + else if (rates->maximum_linkrate > phy->maximum_linkrate_hw) + rates->maximum_linkrate = phy->maximum_linkrate_hw; + + /* handle expander phys */ + if (phy->identify.sas_address != ioc->sas_hba.sas_address) { + phy->minimum_linkrate = rates->minimum_linkrate; + phy->maximum_linkrate = rates->maximum_linkrate; + return _transport_expander_phy_control(ioc, phy, + SMP_PHY_CONTROL_LINK_RESET); + } + + /* handle hba phys */ + + /* sas_iounit page 1 */ + sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys * + sizeof(Mpi2SasIOUnit1PhyData_t)); + sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + if (!sas_iounit_pg1) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENOMEM; + goto out; + } + if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply, + sas_iounit_pg1, sz))) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -EIO; + goto out; + } + + for (i = 0; i < ioc->sas_hba.num_phys; i++) { + if (phy->number != i) { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (ioc->sas_hba.phy[i].phy->minimum_linkrate + + (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4)); + } else { + sas_iounit_pg1->PhyData[i].MaxMinLinkRate = + (rates->minimum_linkrate + + (rates->maximum_linkrate << 4)); + } + } + + if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, + sz)) { + pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", + ioc->name, __FILE__, __LINE__, __func__); + rc = -ENXIO; + goto out; + } + + /* link reset */ + _transport_phy_reset(phy, 0); + + /* read phy page 0, then update the rates in the sas transport phy */ + if (!mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0, + phy->number)) { + phy->minimum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK); + phy->maximum_linkrate = _transport_convert_phy_link_rate( + phy_pg0.ProgrammedLinkRate >> 4); + phy->negotiated_linkrate = _transport_convert_phy_link_rate( + phy_pg0.NegotiatedLinkRate & + MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL); + } + + out: + kfree(sas_iounit_pg1); + return rc; +} + +/** + * _transport_smp_handler - transport portal for smp passthru + * @shost: shost object + * @rphy: sas transport rphy object + * @req: + * + * This used primarily for smp_utils. + * Example: + * smp_rep_general /sys/class/bsg/expander-5:0 + */ +static int +_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy, + struct request *req) +{ + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + Mpi2SmpPassthroughRequest_t *mpi_request; + Mpi2SmpPassthroughReply_t *mpi_reply; + int rc; + u16 smid; + u32 ioc_state; + unsigned long timeleft; + void *psge; + u8 issue_reset = 0; + dma_addr_t dma_addr_in = 0; + dma_addr_t dma_addr_out = 0; + dma_addr_t pci_dma_in = 0; + dma_addr_t pci_dma_out = 0; + void *pci_addr_in = NULL; + void *pci_addr_out = NULL; + u16 wait_state_count; + struct request *rsp = req->next_rq; + struct bio_vec bvec; + struct bvec_iter iter; + + if (!rsp) { + pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n", + ioc->name, __func__); + return -EINVAL; + } + + if (ioc->shost_recovery || ioc->pci_error_recovery) { + pr_info(MPT3SAS_FMT "%s: host reset in progress!\n", + __func__, ioc->name); + return -EFAULT; + } + + rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex); + if (rc) + return rc; + + if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) { + pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name, + __func__); + rc = -EAGAIN; + goto out; + } + ioc->transport_cmds.status = MPT3_CMD_PENDING; + + /* Check if the request is split across multiple segments */ + if (bio_multiple_segments(req->bio)) { + u32 offset = 0; + + /* Allocate memory and copy the request */ + pci_addr_out = pci_alloc_consistent(ioc->pdev, + blk_rq_bytes(req), &pci_dma_out); + if (!pci_addr_out) { + pr_info(MPT3SAS_FMT "%s(): PCI Addr out = NULL\n", + ioc->name, __func__); + rc = -ENOMEM; + goto out; + } + + bio_for_each_segment(bvec, req->bio, iter) { + memcpy(pci_addr_out + offset, + page_address(bvec.bv_page) + bvec.bv_offset, + bvec.bv_len); + offset += bvec.bv_len; + } + } else { + dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), + blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(ioc->pdev, dma_addr_out)) { + pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n", + ioc->name, __func__); + rc = -ENOMEM; + goto free_pci; + } + } + + /* Check if the response needs to be populated across + * multiple segments */ + if (bio_multiple_segments(rsp->bio)) { + pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), + &pci_dma_in); + if (!pci_addr_in) { + pr_info(MPT3SAS_FMT "%s(): PCI Addr in = NULL\n", + ioc->name, __func__); + rc = -ENOMEM; + goto unmap; + } + } else { + dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), + blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(ioc->pdev, dma_addr_in)) { + pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n", + ioc->name, __func__); + rc = -ENOMEM; + goto unmap; + } + } + + wait_state_count = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { + if (wait_state_count++ == 10) { + pr_err(MPT3SAS_FMT + "%s: failed due to ioc not operational\n", + ioc->name, __func__); + rc = -EFAULT; + goto unmap; + } + ssleep(1); + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); + pr_info(MPT3SAS_FMT + "%s: waiting for operational state(count=%d)\n", + ioc->name, __func__, wait_state_count); + } + if (wait_state_count) + pr_info(MPT3SAS_FMT "%s: ioc is operational\n", + ioc->name, __func__); + + smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx); + if (!smid) { + pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n", + ioc->name, __func__); + rc = -EAGAIN; + goto unmap; + } + + rc = 0; + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->transport_cmds.smid = smid; + + memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t)); + mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH; + mpi_request->PhysicalPort = 0xFF; + mpi_request->SASAddress = (rphy) ? + cpu_to_le64(rphy->identify.sas_address) : + cpu_to_le64(ioc->sas_hba.sas_address); + mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); + psge = &mpi_request->SGL; + + if (bio_multiple_segments(req->bio)) + ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4), + pci_dma_in, (blk_rq_bytes(rsp) + 4)); + else + ioc->build_sg(ioc, psge, dma_addr_out, (blk_rq_bytes(req) - 4), + dma_addr_in, (blk_rq_bytes(rsp) + 4)); + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "%s - sending smp request\n", ioc->name, __func__)); + + init_completion(&ioc->transport_cmds.done); + mpt3sas_base_put_smid_default(ioc, smid); + timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done, + 10*HZ); + + if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) { + pr_err(MPT3SAS_FMT "%s : timeout\n", + __func__, ioc->name); + _debug_dump_mf(mpi_request, + sizeof(Mpi2SmpPassthroughRequest_t)/4); + if (!(ioc->transport_cmds.status & MPT3_CMD_RESET)) + issue_reset = 1; + goto issue_host_reset; + } + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "%s - complete\n", ioc->name, __func__)); + + if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) { + + mpi_reply = ioc->transport_cmds.reply; + + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "%s - reply data transfer size(%d)\n", + ioc->name, __func__, + le16_to_cpu(mpi_reply->ResponseDataLength))); + + memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); + req->sense_len = sizeof(*mpi_reply); + req->resid_len = 0; + rsp->resid_len -= + le16_to_cpu(mpi_reply->ResponseDataLength); + + /* check if the resp needs to be copied from the allocated + * pci mem */ + if (bio_multiple_segments(rsp->bio)) { + u32 offset = 0; + u32 bytes_to_copy = + le16_to_cpu(mpi_reply->ResponseDataLength); + bio_for_each_segment(bvec, rsp->bio, iter) { + if (bytes_to_copy <= bvec.bv_len) { + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + + offset, bytes_to_copy); + break; + } else { + memcpy(page_address(bvec.bv_page) + + bvec.bv_offset, pci_addr_in + + offset, bvec.bv_len); + bytes_to_copy -= bvec.bv_len; + } + offset += bvec.bv_len; + } + } + } else { + dtransportprintk(ioc, pr_info(MPT3SAS_FMT + "%s - no reply\n", ioc->name, __func__)); + rc = -ENXIO; + } + + issue_host_reset: + if (issue_reset) { + mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, + FORCE_BIG_HAMMER); + rc = -ETIMEDOUT; + } + + unmap: + if (dma_addr_out) + pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req), + PCI_DMA_BIDIRECTIONAL); + if (dma_addr_in) + pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp), + PCI_DMA_BIDIRECTIONAL); + + free_pci: + if (pci_addr_out) + pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out, + pci_dma_out); + + if (pci_addr_in) + pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in, + pci_dma_in); + + out: + ioc->transport_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->transport_cmds.mutex); + return rc; +} + +struct sas_function_template mpt3sas_transport_functions = { + .get_linkerrors = _transport_get_linkerrors, + .get_enclosure_identifier = _transport_get_enclosure_identifier, + .get_bay_identifier = _transport_get_bay_identifier, + .phy_reset = _transport_phy_reset, + .phy_enable = _transport_phy_enable, + .set_phy_speed = _transport_phy_speed, + .smp_handler = _transport_smp_handler, +}; + +struct scsi_transport_template *mpt3sas_transport_template; diff --git a/addons/mpt3sas/src/4.4.180/mpt3sas_trigger_diag.c b/addons/mpt3sas/src/4.4.180/mpt3sas_trigger_diag.c new file mode 100644 index 00000000..b60fd7a3 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpt3sas_trigger_diag.c @@ -0,0 +1,434 @@ +/* + * This module provides common API to set Diagnostic trigger for MPT + * (Message Passing Technology) based controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mpt3sas_base.h" + +/** + * _mpt3sas_raise_sigio - notifiy app + * @ioc: per adapter object + * @event_data: + */ +static void +_mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + Mpi2EventNotificationReply_t *mpi_reply; + u16 sz, event_data_sz; + unsigned long flags; + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", + ioc->name, __func__)); + + sz = offsetof(Mpi2EventNotificationReply_t, EventData) + + sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4; + mpi_reply = kzalloc(sz, GFP_KERNEL); + if (!mpi_reply) + goto out; + mpi_reply->Event = cpu_to_le16(MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED); + event_data_sz = (sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4) / 4; + mpi_reply->EventDataLength = cpu_to_le16(event_data_sz); + memcpy(&mpi_reply->EventData, event_data, + sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: add to driver event log\n", + ioc->name, __func__)); + mpt3sas_ctl_add_to_event_log(ioc, mpi_reply); + kfree(mpi_reply); + out: + + /* clearing the diag_trigger_active flag */ + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: clearing diag_trigger_active flag\n", + ioc->name, __func__)); + ioc->diag_trigger_active = 0; + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_process_trigger_data - process the event data for the trigger + * @ioc: per adapter object + * @event_data: + */ +void +mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, + struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) +{ + u8 issue_reset = 0; + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", + ioc->name, __func__)); + + /* release the diag buffer trace */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: release trace diag buffer\n", ioc->name, __func__)); + mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, + &issue_reset); + } + + _mpt3sas_raise_sigio(ioc, event_data); + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_trigger_master - Master trigger handler + * @ioc: per adapter object + * @trigger_bitmask: + * + */ +void +mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + unsigned long flags; + u8 found_match = 0; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT || + trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) + goto by_pass_checks; + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + by_pass_checks: + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: enter - trigger_bitmask = 0x%08x\n", + ioc->name, __func__, trigger_bitmask)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + if (ioc->diag_trigger_master.MasterData & trigger_bitmask) { + found_match = 1; + ioc->diag_trigger_active = 1; + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: setting diag_trigger_active flag\n", + ioc->name, __func__)); + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_MASTER; + event_data.u.master.MasterData = trigger_bitmask; + + if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT || + trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) + _mpt3sas_raise_sigio(ioc, &event_data); + else + mpt3sas_send_trigger_data_event(ioc, &event_data); + + out: + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_trigger_event - Event trigger handler + * @ioc: per adapter object + * @event: + * @log_entry_qualifier: + * + */ +void +mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event, + u16 log_entry_qualifier) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_EVENT_TRIGGER_T *event_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n", + ioc->name, __func__, event, log_entry_qualifier)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + event_trigger = ioc->diag_trigger_event.EventTriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_event.ValidEntries + && !found_match; i++, event_trigger++) { + if (event_trigger->EventValue != event) + continue; + if (event == MPI2_EVENT_LOG_ENTRY_ADDED) { + if (event_trigger->LogEntryQualifier == + log_entry_qualifier) + found_match = 1; + continue; + } + found_match = 1; + ioc->diag_trigger_active = 1; + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: setting diag_trigger_active flag\n", + ioc->name, __func__)); + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: setting diag_trigger_active flag\n", + ioc->name, __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_EVENT; + event_data.u.event.EventValue = event; + event_data.u.event.LogEntryQualifier = log_entry_qualifier; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_trigger_scsi - SCSI trigger handler + * @ioc: per adapter object + * @sense_key: + * @asc: + * @ascq: + * + */ +void +mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc, + u8 ascq) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_SCSI_TRIGGER_T *scsi_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n", + ioc->name, __func__, sense_key, asc, ascq)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + scsi_trigger = ioc->diag_trigger_scsi.SCSITriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_scsi.ValidEntries + && !found_match; i++, scsi_trigger++) { + if (scsi_trigger->SenseKey != sense_key) + continue; + if (!(scsi_trigger->ASC == 0xFF || scsi_trigger->ASC == asc)) + continue; + if (!(scsi_trigger->ASCQ == 0xFF || scsi_trigger->ASCQ == ascq)) + continue; + found_match = 1; + ioc->diag_trigger_active = 1; + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: setting diag_trigger_active flag\n", + ioc->name, __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_SCSI; + event_data.u.scsi.SenseKey = sense_key; + event_data.u.scsi.ASC = asc; + event_data.u.scsi.ASCQ = ascq; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); +} + +/** + * mpt3sas_trigger_mpi - MPI trigger handler + * @ioc: per adapter object + * @ioc_status: + * @loginfo: + * + */ +void +mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo) +{ + struct SL_WH_TRIGGERS_EVENT_DATA_T event_data; + struct SL_WH_MPI_TRIGGER_T *mpi_trigger; + int i; + unsigned long flags; + u8 found_match; + + spin_lock_irqsave(&ioc->diag_trigger_lock, flags); + + /* check to see if trace buffers are currently registered */ + if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + /* check to see if trace buffers are currently released */ + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_RELEASED) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + return; + } + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n", + ioc->name, __func__, ioc_status, loginfo)); + + /* don't send trigger if an trigger is currently active */ + if (ioc->diag_trigger_active) { + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + goto out; + } + + /* check for the trigger condition */ + mpi_trigger = ioc->diag_trigger_mpi.MPITriggerEntry; + for (i = 0 , found_match = 0; i < ioc->diag_trigger_mpi.ValidEntries + && !found_match; i++, mpi_trigger++) { + if (mpi_trigger->IOCStatus != ioc_status) + continue; + if (!(mpi_trigger->IocLogInfo == 0xFFFFFFFF || + mpi_trigger->IocLogInfo == loginfo)) + continue; + found_match = 1; + ioc->diag_trigger_active = 1; + } + spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags); + + if (!found_match) + goto out; + + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT + "%s: setting diag_trigger_active flag\n", + ioc->name, __func__)); + memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T)); + event_data.trigger_type = MPT3SAS_TRIGGER_MPI; + event_data.u.mpi.IOCStatus = ioc_status; + event_data.u.mpi.IocLogInfo = loginfo; + mpt3sas_send_trigger_data_event(ioc, &event_data); + out: + dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name, + __func__)); +} diff --git a/addons/mpt3sas/src/4.4.180/mpt3sas_trigger_diag.h b/addons/mpt3sas/src/4.4.180/mpt3sas_trigger_diag.h new file mode 100644 index 00000000..6586a463 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpt3sas_trigger_diag.h @@ -0,0 +1,194 @@ +/* + * This is the Fusion MPT base driver providing common API layer interface + * to set Diagnostic triggers for MPT (Message Passing Technology) based + * controllers + * + * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2014 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, + * USA. + */ + /* Diagnostic Trigger Configuration Data Structures */ + +#ifndef MPT3SAS_TRIGGER_DIAG_H_INCLUDED +#define MPT3SAS_TRIGGER_DIAG_H_INCLUDED + +/* limitation on number of entries */ +#define NUM_VALID_ENTRIES (20) + +/* trigger types */ +#define MPT3SAS_TRIGGER_MASTER (1) +#define MPT3SAS_TRIGGER_EVENT (2) +#define MPT3SAS_TRIGGER_SCSI (3) +#define MPT3SAS_TRIGGER_MPI (4) + +/* trigger names */ +#define MASTER_TRIGGER_FILE_NAME "diag_trigger_master" +#define EVENT_TRIGGERS_FILE_NAME "diag_trigger_event" +#define SCSI_TRIGGERS_FILE_NAME "diag_trigger_scsi" +#define MPI_TRIGGER_FILE_NAME "diag_trigger_mpi" + +/* master trigger bitmask */ +#define MASTER_TRIGGER_FW_FAULT (0x00000001) +#define MASTER_TRIGGER_ADAPTER_RESET (0x00000002) +#define MASTER_TRIGGER_TASK_MANAGMENT (0x00000004) +#define MASTER_TRIGGER_DEVICE_REMOVAL (0x00000008) + +/* fake firmware event for tigger */ +#define MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED (0x6E) + +/** + * MasterTrigger is a single U32 passed to/from sysfs. + * + * Bit Flags (enables) include: + * 1. FW Faults + * 2. Adapter Reset issued by driver + * 3. TMs + * 4. Device Remove Event sent by FW + */ + +struct SL_WH_MASTER_TRIGGER_T { + uint32_t MasterData; +}; + +/** + * struct SL_WH_EVENT_TRIGGER_T - Definition of an event trigger element + * @EventValue: Event Code to trigger on + * @LogEntryQualifier: Type of FW event that logged (Log Entry Added Event only) + * + * Defines an event that should induce a DIAG_TRIGGER driver event if observed. + */ +struct SL_WH_EVENT_TRIGGER_T { + uint16_t EventValue; + uint16_t LogEntryQualifier; +}; + +/** + * struct SL_WH_EVENT_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of Event Triggers to be monitored for. + * @ValidEntries: Number of _SL_WH_EVENT_TRIGGER_T structures contained in this + * structure. + * @EventTriggerEntry: List of Event trigger elements. + * + * This binary structure is transferred via sysfs to get/set Event Triggers + * in the Linux Driver. + */ + +struct SL_WH_EVENT_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_EVENT_TRIGGER_T EventTriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_SCSI_TRIGGER_T - Definition of a SCSI trigger element + * @ASCQ: Additional Sense Code Qualifier. Can be specific or 0xFF for + * wildcard. + * @ASC: Additional Sense Code. Can be specific or 0xFF for wildcard + * @SenseKey: SCSI Sense Key + * + * Defines a sense key (single or many variants) that should induce a + * DIAG_TRIGGER driver event if observed. + */ +struct SL_WH_SCSI_TRIGGER_T { + U8 ASCQ; + U8 ASC; + U8 SenseKey; + U8 Reserved; +}; + +/** + * struct SL_WH_SCSI_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of SCSI sense codes that should trigger a DIAG_SERVICE event when + * observed. + * @ValidEntries: Number of _SL_WH_SCSI_TRIGGER_T structures contained in this + * structure. + * @SCSITriggerEntry: List of SCSI Sense Code trigger elements. + * + * This binary structure is transferred via sysfs to get/set SCSI Sense Code + * Triggers in the Linux Driver. + */ +struct SL_WH_SCSI_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_SCSI_TRIGGER_T SCSITriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_MPI_TRIGGER_T - Definition of an MPI trigger element + * @IOCStatus: MPI IOCStatus + * @IocLogInfo: MPI IocLogInfo. Can be specific or 0xFFFFFFFF for wildcard + * + * Defines a MPI IOCStatus/IocLogInfo pair that should induce a DIAG_TRIGGER + * driver event if observed. + */ +struct SL_WH_MPI_TRIGGER_T { + uint16_t IOCStatus; + uint16_t Reserved; + uint32_t IocLogInfo; +}; + +/** + * struct SL_WH_MPI_TRIGGERS_T - Structure passed to/from sysfs containing a + * list of MPI IOCStatus/IocLogInfo pairs that should trigger a DIAG_SERVICE + * event when observed. + * @ValidEntries: Number of _SL_WH_MPI_TRIGGER_T structures contained in this + * structure. + * @MPITriggerEntry: List of MPI IOCStatus/IocLogInfo trigger elements. + * + * This binary structure is transferred via sysfs to get/set MPI Error Triggers + * in the Linux Driver. + */ +struct SL_WH_MPI_TRIGGERS_T { + uint32_t ValidEntries; + struct SL_WH_MPI_TRIGGER_T MPITriggerEntry[NUM_VALID_ENTRIES]; +}; + +/** + * struct SL_WH_TRIGGERS_EVENT_DATA_T - event data for trigger + * @trigger_type: trigger type (see MPT3SAS_TRIGGER_XXXX) + * @u: trigger condition that caused trigger to be sent + */ +struct SL_WH_TRIGGERS_EVENT_DATA_T { + uint32_t trigger_type; + union { + struct SL_WH_MASTER_TRIGGER_T master; + struct SL_WH_EVENT_TRIGGER_T event; + struct SL_WH_SCSI_TRIGGER_T scsi; + struct SL_WH_MPI_TRIGGER_T mpi; + } u; +}; +#endif /* MPT3SAS_TRIGGER_DIAG_H_INCLUDED */ diff --git a/addons/mpt3sas/src/4.4.180/mpt3sas_warpdrive.c b/addons/mpt3sas/src/4.4.180/mpt3sas_warpdrive.c new file mode 100644 index 00000000..540bd500 --- /dev/null +++ b/addons/mpt3sas/src/4.4.180/mpt3sas_warpdrive.c @@ -0,0 +1,344 @@ +/* + * Scsi Host Layer for MPT (Message Passing Technology) based controllers + * + * Copyright (C) 2012-2014 LSI Corporation + * Copyright (C) 2013-2015 Avago Technologies + * (mailto: MPT-FusionLinux.pdl@avagotech.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * NO WARRANTY + * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT + * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT, + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is + * solely responsible for determining the appropriateness of using and + * distributing the Program and assumes all risks associated with its + * exercise of rights under this Agreement, including but not limited to + * the risks and costs of program errors, damage to or loss of data, + * programs or equipment, and unavailability or interruption of operations. + + * DISCLAIMER OF LIABILITY + * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR + * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE + * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED + * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES + + * You should have received a copy of the GNU General Public License + * along with this program. + */ +#include +#include +#include +#include +#include + +#include "mpt3sas_base.h" + +/** + * _warpdrive_disable_ddio - Disable direct I/O for all the volumes + * @ioc: per adapter object + */ +static void +_warpdrive_disable_ddio(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidVolPage1_t vol_pg1; + Mpi2ConfigReply_t mpi_reply; + struct _raid_device *raid_device; + u16 handle; + u16 ioc_status; + unsigned long flags; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + break; + handle = le16_to_cpu(vol_pg1.DevHandle); + spin_lock_irqsave(&ioc->raid_device_lock, flags); + raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle); + if (raid_device) + raid_device->direct_io_enabled = 0; + spin_unlock_irqrestore(&ioc->raid_device_lock, flags); + } + return; +} + + +/** + * mpt3sas_get_num_volumes - Get number of volumes in the ioc + * @ioc: per adapter object + */ +u8 +mpt3sas_get_num_volumes(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi2RaidVolPage1_t vol_pg1; + Mpi2ConfigReply_t mpi_reply; + u16 handle; + u8 vol_cnt = 0; + u16 ioc_status; + + handle = 0xFFFF; + while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply, + &vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) { + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE) + break; + vol_cnt++; + handle = le16_to_cpu(vol_pg1.DevHandle); + } + return vol_cnt; +} + + +/** + * mpt3sas_init_warpdrive_properties - Set properties for warpdrive direct I/O. + * @ioc: per adapter object + * @raid_device: the raid_device object + */ +void +mpt3sas_init_warpdrive_properties(struct MPT3SAS_ADAPTER *ioc, + struct _raid_device *raid_device) +{ + Mpi2RaidVolPage0_t *vol_pg0; + Mpi2RaidPhysDiskPage0_t pd_pg0; + Mpi2ConfigReply_t mpi_reply; + u16 sz; + u8 num_pds, count; + unsigned long stripe_sz, block_sz; + u8 stripe_exp, block_exp; + u64 dev_max_lba; + + if (!ioc->is_warpdrive) + return; + + if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS) { + pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " + "globally as drives are exposed\n", ioc->name); + return; + } + if (mpt3sas_get_num_volumes(ioc) > 1) { + _warpdrive_disable_ddio(ioc); + pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " + "globally as number of drives > 1\n", ioc->name); + return; + } + if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle, + &num_pds)) || !num_pds) { + pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " + "Failure in computing number of drives\n", ioc->name); + return; + } + + sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds * + sizeof(Mpi2RaidVol0PhysDisk_t)); + vol_pg0 = kzalloc(sz, GFP_KERNEL); + if (!vol_pg0) { + pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " + "Memory allocation failure for RVPG0\n", ioc->name); + return; + } + + if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0, + MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) { + pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " + "Failure in retrieving RVPG0\n", ioc->name); + kfree(vol_pg0); + return; + } + + /* + * WARPDRIVE:If number of physical disks in a volume exceeds the max pds + * assumed for WARPDRIVE, disable direct I/O + */ + if (num_pds > MPT_MAX_WARPDRIVE_PDS) { + pr_warn(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " + "for the drive with handle(0x%04x): num_mem=%d, " + "max_mem_allowed=%d\n", ioc->name, raid_device->handle, + num_pds, MPT_MAX_WARPDRIVE_PDS); + kfree(vol_pg0); + return; + } + for (count = 0; count < num_pds; count++) { + if (mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply, + &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM, + vol_pg0->PhysDisk[count].PhysDiskNum) || + pd_pg0.DevHandle == MPT3SAS_INVALID_DEVICE_HANDLE) { + pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is " + "disabled for the drive with handle(0x%04x) member" + "handle retrieval failed for member number=%d\n", + ioc->name, raid_device->handle, + vol_pg0->PhysDisk[count].PhysDiskNum); + goto out_error; + } + /* Disable direct I/O if member drive lba exceeds 4 bytes */ + dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA); + if (dev_max_lba >> 32) { + pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is " + "disabled for the drive with handle(0x%04x) member" + " handle (0x%04x) unsupported max lba 0x%016llx\n", + ioc->name, raid_device->handle, + le16_to_cpu(pd_pg0.DevHandle), + (unsigned long long)dev_max_lba); + goto out_error; + } + + raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle); + } + + /* + * Assumption for WD: Direct I/O is not supported if the volume is + * not RAID0 + */ + if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) { + pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " + "for the drive with handle(0x%04x): type=%d, " + "s_sz=%uK, blk_size=%u\n", ioc->name, + raid_device->handle, raid_device->volume_type, + (le32_to_cpu(vol_pg0->StripeSize) * + le16_to_cpu(vol_pg0->BlockSize)) / 1024, + le16_to_cpu(vol_pg0->BlockSize)); + goto out_error; + } + + stripe_sz = le32_to_cpu(vol_pg0->StripeSize); + stripe_exp = find_first_bit(&stripe_sz, 32); + if (stripe_exp == 32) { + pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " + "for the drive with handle(0x%04x) invalid stripe sz %uK\n", + ioc->name, raid_device->handle, + (le32_to_cpu(vol_pg0->StripeSize) * + le16_to_cpu(vol_pg0->BlockSize)) / 1024); + goto out_error; + } + raid_device->stripe_exponent = stripe_exp; + block_sz = le16_to_cpu(vol_pg0->BlockSize); + block_exp = find_first_bit(&block_sz, 16); + if (block_exp == 16) { + pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is disabled " + "for the drive with handle(0x%04x) invalid block sz %u\n", + ioc->name, raid_device->handle, + le16_to_cpu(vol_pg0->BlockSize)); + goto out_error; + } + raid_device->block_exponent = block_exp; + raid_device->direct_io_enabled = 1; + + pr_info(MPT3SAS_FMT "WarpDrive : Direct IO is Enabled for the drive" + " with handle(0x%04x)\n", ioc->name, raid_device->handle); + /* + * WARPDRIVE: Though the following fields are not used for direct IO, + * stored for future purpose: + */ + raid_device->max_lba = le64_to_cpu(vol_pg0->MaxLBA); + raid_device->stripe_sz = le32_to_cpu(vol_pg0->StripeSize); + raid_device->block_sz = le16_to_cpu(vol_pg0->BlockSize); + + + kfree(vol_pg0); + return; + +out_error: + raid_device->direct_io_enabled = 0; + for (count = 0; count < num_pds; count++) + raid_device->pd_handle[count] = 0; + kfree(vol_pg0); + return; +} + +/** + * mpt3sas_scsi_direct_io_get - returns direct io flag + * @ioc: per adapter object + * @smid: system request message index + * + * Returns the smid stored scmd pointer. + */ +inline u8 +mpt3sas_scsi_direct_io_get(struct MPT3SAS_ADAPTER *ioc, u16 smid) +{ + return ioc->scsi_lookup[smid - 1].direct_io; +} + +/** + * mpt3sas_scsi_direct_io_set - sets direct io flag + * @ioc: per adapter object + * @smid: system request message index + * @direct_io: Zero or non-zero value to set in the direct_io flag + * + * Returns Nothing. + */ +inline void +mpt3sas_scsi_direct_io_set(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 direct_io) +{ + ioc->scsi_lookup[smid - 1].direct_io = direct_io; +} + +/** + * mpt3sas_setup_direct_io - setup MPI request for WARPDRIVE Direct I/O + * @ioc: per adapter object + * @scmd: pointer to scsi command object + * @raid_device: pointer to raid device data structure + * @mpi_request: pointer to the SCSI_IO reqest message frame + * @smid: system request message index + * + * Returns nothing + */ +void +mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, + struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request, + u16 smid) +{ + sector_t v_lba, p_lba, stripe_off, column, io_size; + u32 stripe_sz, stripe_exp; + u8 num_pds, cmd = scmd->cmnd[0]; + + if (cmd != READ_10 && cmd != WRITE_10 && + cmd != READ_16 && cmd != WRITE_16) + return; + + if (cmd == READ_10 || cmd == WRITE_10) + v_lba = get_unaligned_be32(&mpi_request->CDB.CDB32[2]); + else + v_lba = get_unaligned_be64(&mpi_request->CDB.CDB32[2]); + + io_size = scsi_bufflen(scmd) >> raid_device->block_exponent; + + if (v_lba + io_size - 1 > raid_device->max_lba) + return; + + stripe_sz = raid_device->stripe_sz; + stripe_exp = raid_device->stripe_exponent; + stripe_off = v_lba & (stripe_sz - 1); + + /* Return unless IO falls within a stripe */ + if (stripe_off + io_size > stripe_sz) + return; + + num_pds = raid_device->num_pds; + p_lba = v_lba >> stripe_exp; + column = sector_div(p_lba, num_pds); + p_lba = (p_lba << stripe_exp) + stripe_off; + mpi_request->DevHandle = cpu_to_le16(raid_device->pd_handle[column]); + + if (cmd == READ_10 || cmd == WRITE_10) + put_unaligned_be32(lower_32_bits(p_lba), + &mpi_request->CDB.CDB32[2]); + else + put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]); + + mpt3sas_scsi_direct_io_set(ioc, smid, 1); +} diff --git a/addons/qjs-dtb/all/addons/dts.js b/addons/qjs-dtb/all/addons/dts.js new file mode 100644 index 00000000..4d0fca87 --- /dev/null +++ b/addons/qjs-dtb/all/addons/dts.js @@ -0,0 +1,2 @@ +/*! For license information please see bundle.js.LICENSE.txt */ +(()=>{var t={654:()=>{}},e={};function n(s){var i=e[s];if(void 0!==i)return i.exports;var r=e[s]={exports:{}};return t[s](r,r.exports,n),r.exports}(()=>{"use strict";String.prototype.seed=String.prototype.seed||Math.round(Math.random()*Math.pow(2,32)),String.prototype.hashCode=function(){const t=this.toString();let e,n;const s=3&t.length,i=t.length-s;let r=String.prototype.seed;const o=3432918353,a=461845907;let l=0;for(;l>>16)*o&65535)<<16)&4294967295,n=n<<15|n>>>17,n=(65535&n)*a+(((n>>>16)*a&65535)<<16)&4294967295,r^=n,r=r<<13|r>>>19,e=5*(65535&r)+((5*(r>>>16)&65535)<<16)&4294967295,r=27492+(65535&e)+((58964+(e>>>16)&65535)<<16);switch(n=0,s){case 3:n^=(255&t.charCodeAt(l+2))<<16;case 2:n^=(255&t.charCodeAt(l+1))<<8;case 1:n^=255&t.charCodeAt(l),n=(65535&n)*o+(((n>>>16)*o&65535)<<16)&4294967295,n=n<<15|n>>>17,n=(65535&n)*a+(((n>>>16)*a&65535)<<16)&4294967295,r^=n}return r^=t.length,r^=r>>>16,r=2246822507*(65535&r)+((2246822507*(r>>>16)&65535)<<16)&4294967295,r^=r>>>13,r=3266489909*(65535&r)+((3266489909*(r>>>16)&65535)<<16)&4294967295,r^=r>>>16,r>>>0},String.prototype.codePointAt||function(){var t=function(){let t;try{const e={},n=Object.defineProperty;t=n(e,e,e)&&n}catch(t){}return t}();const e=function(t){if(null==this)throw TypeError();const e=String(this),n=e.length;let s=t?Number(t):0;if(s!=s&&(s=0),s<0||s>=n)return;const i=e.charCodeAt(s);let r;return i>=55296&&i<=56319&&n>s+1&&(r=e.charCodeAt(s+1),r>=56320&&r<=57343)?1024*(i-55296)+r-56320+65536:i};t?t(String.prototype,"codePointAt",{value:e,configurable:!0,writable:!0}):String.prototype.codePointAt=e}(),String.fromCodePoint||function(){const t=function(){let t;try{const e={},n=Object.defineProperty;t=n(e,e,e)&&n}catch(t){}return t}(),e=String.fromCharCode,n=Math.floor,s=function(t){const s=16384,i=[];let r,o,a=-1;const l=arguments.length;if(!l)return"";let h="";for(;++a1114111||n(t)!==t)throw RangeError("Invalid code point: "+t);t<=65535?i.push(t):(t-=65536,r=55296+(t>>10),o=t%1024+56320,i.push(r,o)),(a+1===l||i.length>s)&&(h+=e.apply(null,i),i.length=0)}return h};t?t(String,"fromCodePoint",{value:s,configurable:!0,writable:!0}):String.fromCodePoint=s}();class t{constructor(){this.source=null,this.type=null,this.channel=null,this.start=null,this.stop=null,this.tokenIndex=null,this.line=null,this.column=null,this._text=null}getTokenSource(){return this.source[0]}getInputStream(){return this.source[1]}get text(){return this._text}set text(t){this._text=t}}function e(t,e){if(!Array.isArray(t)||!Array.isArray(e))return!1;if(t===e)return!0;if(t.length!==e.length)return!1;for(let n=0;n>>17,t*=461845907,this.count=this.count+1;let n=this.hash^t;n=n<<13|n>>>19,n=5*n+3864292196,this.hash=n}}}finish(){let t=this.hash^4*this.count;return t^=t>>>16,t*=2246822507,t^=t>>>13,t*=3266489909,t^=t>>>16,t}static hashStuff(){const t=new s;return t.update.apply(t,arguments),t.finish()}}function i(t){return t?t.hashCode():-1}function r(t,e){return t?t.equals(e):t===e}function o(t){return null===t?"null":t}function a(t){return Array.isArray(t)?"["+t.map(o).join(", ")+"]":"null"}const l="h-";class h{constructor(t,e){this.data={},this.hashFunction=t||i,this.equalsFunction=e||r}add(t){const e=l+this.hashFunction(t);if(e in this.data){const n=this.data[e];for(let e=0;et.startsWith(l))).flatMap((t=>this.data[t]),this)}toString(){return a(this.values())}get length(){return Object.keys(this.data).filter((t=>t.startsWith(l))).map((t=>this.data[t].length),this).reduce(((t,e)=>t+e),0)}}class c{hashCode(){const t=new s;return this.updateHashCode(t),t.finish()}evaluate(t,e){}evalPrecedence(t,e){return this}static andContext(t,e){if(null===t||t===c.NONE)return e;if(null===e||e===c.NONE)return t;const n=new u(t,e);return 1===n.opnds.length?n.opnds[0]:n}static orContext(t,e){if(null===t)return e;if(null===e)return t;if(t===c.NONE||e===c.NONE)return c.NONE;const n=new d(t,e);return 1===n.opnds.length?n.opnds[0]:n}}class u extends c{constructor(t,e){super();const n=new h;t instanceof u?t.opnds.map((function(t){n.add(t)})):n.add(t),e instanceof u?e.opnds.map((function(t){n.add(t)})):n.add(e);const s=p(n);if(s.length>0){let t=null;s.map((function(e){(null===t||e.precedencet.toString()));return(t.length>3?t.slice(3):t).join("&&")}}class d extends c{constructor(t,e){super();const n=new h;t instanceof d?t.opnds.map((function(t){n.add(t)})):n.add(t),e instanceof d?e.opnds.map((function(t){n.add(t)})):n.add(e);const s=p(n);if(s.length>0){const t=s.sort((function(t,e){return t.compareTo(e)})),e=t[t.length-1];n.add(e)}this.opnds=Array.from(n.values())}equals(t){return this===t||t instanceof d&&e(this.opnds,t.opnds)}updateHashCode(t){t.update(this.opnds,"OR")}evaluate(t,e){for(let n=0;nt.toString()));return(t.length>3?t.slice(3):t).join("||")}}function p(t){const e=[];return t.values().map((function(t){t instanceof c.PrecedencePredicate&&e.push(t)})),e}function g(t,e){if(null===t){const t={state:null,alt:null,context:null,semanticContext:null};return e&&(t.reachesIntoOuterContext=0),t}{const n={};return n.state=t.state||null,n.alt=void 0===t.alt?null:t.alt,n.context=t.context||null,n.semanticContext=t.semanticContext||null,e&&(n.reachesIntoOuterContext=t.reachesIntoOuterContext||0,n.precedenceFilterSuppressed=t.precedenceFilterSuppressed||!1),n}}class f{constructor(t,e){this.checkContext(t,e),t=g(t),e=g(e,!0),this.state=null!==t.state?t.state:e.state,this.alt=null!==t.alt?t.alt:e.alt,this.context=null!==t.context?t.context:e.context,this.semanticContext=null!==t.semanticContext?t.semanticContext:null!==e.semanticContext?e.semanticContext:c.NONE,this.reachesIntoOuterContext=e.reachesIntoOuterContext,this.precedenceFilterSuppressed=e.precedenceFilterSuppressed}checkContext(t,e){null!==t.context&&void 0!==t.context||null!==e&&null!==e.context&&void 0!==e.context||(this.context=null)}hashCode(){const t=new s;return this.updateHashCode(t),t.finish()}updateHashCode(t){t.update(this.state.stateNumber,this.alt,this.context,this.semanticContext)}equals(t){return this===t||t instanceof f&&this.state.stateNumber===t.state.stateNumber&&this.alt===t.alt&&(null===this.context?null===t.context:this.context.equals(t.context))&&this.semanticContext.equals(t.semanticContext)&&this.precedenceFilterSuppressed===t.precedenceFilterSuppressed}hashCodeForConfigSet(){const t=new s;return t.update(this.state.stateNumber,this.alt,this.semanticContext),t.finish()}equalsForConfigSet(t){return this===t||t instanceof f&&this.state.stateNumber===t.state.stateNumber&&this.alt===t.alt&&this.semanticContext.equals(t.semanticContext)}toString(){return"("+this.state+","+this.alt+(null!==this.context?",["+this.context.toString()+"]":"")+(this.semanticContext!==c.NONE?","+this.semanticContext.toString():"")+(this.reachesIntoOuterContext>0?",up="+this.reachesIntoOuterContext:"")+")"}}class x{constructor(t,e){this.start=t,this.stop=e}clone(){return new x(this.start,this.stop)}contains(t){return t>=this.start&&tthis.addInterval(t)),this),this}reduce(t){if(t=n.stop?(this.intervals.splice(t+1,1),this.reduce(t)):e.stop>=n.start&&(this.intervals[t]=new x(e.start,n.stop),this.intervals.splice(t+1,1))}}complement(t,e){const n=new T;return n.addInterval(new x(t,e+1)),null!==this.intervals&&this.intervals.forEach((t=>n.removeRange(t))),n}contains(t){if(null===this.intervals)return!1;for(let e=0;en.start&&t.stop=n.stop?(this.intervals.splice(e,1),e-=1):t.start"):e.push("'"+String.fromCharCode(s.start)+"'"):e.push("'"+String.fromCharCode(s.start)+"'..'"+String.fromCharCode(s.stop-1)+"'")}return e.length>1?"{"+e.join(", ")+"}":e[0]}toIndexString(){const e=[];for(let n=0;n"):e.push(s.start.toString()):e.push(s.start.toString()+".."+(s.stop-1).toString())}return e.length>1?"{"+e.join(", ")+"}":e[0]}toTokenString(t,e){const n=[];for(let s=0;s1?"{"+n.join(", ")+"}":n[0]}elementName(e,n,s){return s===t.EOF?"":s===t.EPSILON?"":e[s]||n[s]}get length(){return this.intervals.map((t=>t.length)).reduce(((t,e)=>t+e))}}class S{constructor(){this.atn=null,this.stateNumber=S.INVALID_STATE_NUMBER,this.stateType=null,this.ruleIndex=0,this.epsilonOnlyTransitions=!1,this.transitions=[],this.nextTokenWithinRule=null}toString(){return this.stateNumber}equals(t){return t instanceof S&&this.stateNumber===t.stateNumber}isNonGreedyExitState(){return!1}addTransition(t,e){void 0===e&&(e=-1),0===this.transitions.length?this.epsilonOnlyTransitions=t.isEpsilon:this.epsilonOnlyTransitions!==t.isEpsilon&&(this.epsilonOnlyTransitions=!1),-1===e?this.transitions.push(t):this.transitions.splice(e,1,t)}}S.INVALID_TYPE=0,S.BASIC=1,S.RULE_START=2,S.BLOCK_START=3,S.PLUS_BLOCK_START=4,S.STAR_BLOCK_START=5,S.TOKEN_START=6,S.RULE_STOP=7,S.BLOCK_END=8,S.STAR_LOOP_BACK=9,S.STAR_LOOP_ENTRY=10,S.PLUS_LOOP_BACK=11,S.LOOP_END=12,S.serializationNames=["INVALID","BASIC","RULE_START","BLOCK_START","PLUS_BLOCK_START","STAR_BLOCK_START","TOKEN_START","RULE_STOP","BLOCK_END","STAR_LOOP_BACK","STAR_LOOP_ENTRY","PLUS_LOOP_BACK","LOOP_END"],S.INVALID_STATE_NUMBER=-1;class E extends S{constructor(){return super(),this.stateType=S.RULE_STOP,this}}class m{constructor(t){if(null==t)throw"target cannot be null.";this.target=t,this.isEpsilon=!1,this.label=null}}m.EPSILON=1,m.RANGE=2,m.RULE=3,m.PREDICATE=4,m.ATOM=5,m.ACTION=6,m.SET=7,m.NOT_SET=8,m.WILDCARD=9,m.PRECEDENCE=10,m.serializationNames=["INVALID","EPSILON","RANGE","RULE","PREDICATE","ATOM","ACTION","SET","NOT_SET","WILDCARD","PRECEDENCE"],m.serializationTypes={EpsilonTransition:m.EPSILON,RangeTransition:m.RANGE,RuleTransition:m.RULE,PredicateTransition:m.PREDICATE,AtomTransition:m.ATOM,ActionTransition:m.ACTION,SetTransition:m.SET,NotSetTransition:m.NOT_SET,WildcardTransition:m.WILDCARD,PrecedencePredicateTransition:m.PRECEDENCE};class _ extends m{constructor(t,e,n,s){super(t),this.ruleIndex=e,this.precedence=n,this.followState=s,this.serializationType=m.RULE,this.isEpsilon=!0}matches(t,e,n){return!1}}class C extends m{constructor(e,n){super(e),this.serializationType=m.SET,null!=n?this.label=n:(this.label=new T,this.label.addOne(t.INVALID_TYPE))}matches(t,e,n){return this.label.contains(t)}toString(){return this.label.toString()}}class A extends C{constructor(t,e){super(t,e),this.serializationType=m.NOT_SET}matches(t,e,n){return t>=e&&t<=n&&!super.matches(t,e,n)}toString(){return"~"+super.toString()}}class N extends m{constructor(t){super(t),this.serializationType=m.WILDCARD}matches(t,e,n){return t>=e&&t<=n}toString(){return"."}}class y extends m{constructor(t){super(t)}}class I extends class extends class{}{}{}class k extends I{getRuleContext(){throw new Error("missing interface implementation")}}class O extends I{}class R extends O{}const L={toStringTree:function(t,e,n){e=e||null,null!==(n=n||null)&&(e=n.ruleNames);let s=L.getNodeText(t,e);s=function(t,e){return t=t.replace(/\t/g,"\\t").replace(/\n/g,"\\n").replace(/\r/g,"\\r")}(s);const i=t.getChildCount();if(0===i)return s;let r="("+s+" ";i>0&&(s=L.toStringTree(t.getChild(0),e),r=r.concat(s));for(let n=1;n=0&&e0&&(t+=", "),this.returnStates[e]!==P.EMPTY_RETURN_STATE?(t+=this.returnStates[e],null!==this.parents[e]?t=t+" "+this.parents[e]:t+="null"):t+="$";return t+"]"}}get length(){return this.returnStates.length}}class D extends P{constructor(t,e){let n=0;const i=new s;null!==t?i.update(t,e):i.update(1),n=i.finish(),super(n),this.parentCtx=t,this.returnState=e}getParent(t){return this.parentCtx}getReturnState(t){return this.returnState}equals(t){return this===t||t instanceof D&&this.hashCode()===t.hashCode()&&this.returnState===t.returnState&&(null==this.parentCtx?null==t.parentCtx:this.parentCtx.equals(t.parentCtx))}toString(){const t=null===this.parentCtx?"":this.parentCtx.toString();return 0===t.length?this.returnState===P.EMPTY_RETURN_STATE?"$":""+this.returnState:this.returnState+" "+t}get length(){return 1}static create(t,e){return e===P.EMPTY_RETURN_STATE&&null===t?P.EMPTY:new D(t,e)}}class F extends D{constructor(){super(null,P.EMPTY_RETURN_STATE)}isEmpty(){return!0}getParent(t){return null}getReturnState(t){return this.returnState}equals(t){return this===t}toString(){return"$"}}P.EMPTY=new F;const M="h-";class U{constructor(t,e){this.data={},this.hashFunction=t||i,this.equalsFunction=e||r}set(t,e){const n=M+this.hashFunction(t);if(n in this.data){const s=this.data[n];for(let n=0;nt.startsWith(M))).flatMap((t=>this.data[t]),this)}getKeys(){return this.entries().map((t=>t.key))}getValues(){return this.entries().map((t=>t.value))}toString(){return"["+this.entries().map((t=>"{"+t.key+":"+t.value+"}")).join(", ")+"]"}get length(){return Object.keys(this.data).filter((t=>t.startsWith(M))).map((t=>this.data[t].length),this).reduce(((t,e)=>t+e),0)}}function B(t,e){if(null==e&&(e=w.EMPTY),null===e.parentCtx||e===w.EMPTY)return P.EMPTY;const n=B(t,e.parentCtx),s=t.states[e.invokingState].transitions[0];return D.create(n,s.followState.stateNumber)}function V(t,e,n){if(t.isEmpty())return t;let s=n.get(t)||null;if(null!==s)return s;if(s=e.get(t),null!==s)return n.set(t,s),s;let i=!1,r=[];for(let s=0;se.returnState&&(i[0]=e.returnState,i[1]=t.returnState);const r=new b([n,n],i);return null!==s&&s.set(t,e,r),r}const i=[t.returnState,e.returnState];let r=[t.parentCtx,e.parentCtx];t.returnState>e.returnState&&(i[0]=e.returnState,i[1]=t.returnState,r=[e.parentCtx,t.parentCtx]);const o=new b(r,i);return null!==s&&s.set(t,e,o),o}}(t,e,n,s);if(n){if(t instanceof F)return t;if(e instanceof F)return e}return t instanceof D&&(t=new b([t.getParent()],[t.returnState])),e instanceof D&&(e=new b([e.getParent()],[e.returnState])),function(t,e,n,s){if(null!==s){let n=s.get(t,e);if(null!==n)return n;if(n=s.get(e,t),null!==n)return n}let i=0,r=0,o=0,a=[],l=[];for(;ithis.add(t)),this)}remove(t){delete this.data[t]}has(t){return!0===this.data[t]}values(){return Object.keys(this.data)}minValue(){return Math.min.apply(null,this.values())}hashCode(){return s.hashStuff(this.values())}equals(t){return t instanceof K&&e(this.data,t.data)}toString(){return"{"+this.values().join(", ")+"}"}get length(){return this.values().length}}class q{constructor(t){this.atn=t}getDecisionLookahead(t){if(null===t)return null;const e=t.transitions.length,n=[];for(let s=0;s=this.states.length)throw"Invalid state number.";const s=this.states[e];let i=this.nextTokens(s);if(!i.contains(t.EPSILON))return i;const r=new T;for(r.addSet(i),r.removeOne(t.EPSILON);null!==n&&n.invokingState>=0&&i.contains(t.EPSILON);){const e=this.states[n.invokingState].transitions[0];i=this.nextTokens(e.followState),r.addSet(i),r.removeOne(t.EPSILON),n=n.parentCtx}return i.contains(t.EPSILON)&&r.addOne(t.EOF),r}}Y.INVALID_ALT_NUMBER=0;class z extends S{constructor(){super(),this.stateType=S.BASIC}}class G extends S{constructor(){return super(),this.decision=-1,this.nonGreedy=!1,this}}class W extends G{constructor(){return super(),this.endState=null,this}}class j extends S{constructor(){return super(),this.stateType=S.BLOCK_END,this.startState=null,this}}class Q extends S{constructor(){return super(),this.stateType=S.LOOP_END,this.loopBackState=null,this}}class $ extends S{constructor(){return super(),this.stateType=S.RULE_START,this.stopState=null,this.isPrecedenceRule=!1,this}}class X extends G{constructor(){return super(),this.stateType=S.TOKEN_START,this}}class J extends G{constructor(){return super(),this.stateType=S.PLUS_LOOP_BACK,this}}class Z extends S{constructor(){return super(),this.stateType=S.STAR_LOOP_BACK,this}}class tt extends G{constructor(){return super(),this.stateType=S.STAR_LOOP_ENTRY,this.loopBackState=null,this.isPrecedenceDecision=null,this}}class et extends W{constructor(){return super(),this.stateType=S.PLUS_BLOCK_START,this.loopBackState=null,this}}class nt extends W{constructor(){return super(),this.stateType=S.STAR_BLOCK_START,this}}class st extends W{constructor(){return super(),this.stateType=S.BLOCK_START,this}}class it extends m{constructor(t,e){super(t),this.label_=e,this.label=this.makeLabel(),this.serializationType=m.ATOM}makeLabel(){const t=new T;return t.addOne(this.label_),t}matches(t,e,n){return this.label_===t}toString(){return this.label_}}class rt extends m{constructor(t,e,n){super(t),this.serializationType=m.RANGE,this.start=e,this.stop=n,this.label=this.makeLabel()}makeLabel(){const t=new T;return t.addRange(this.start,this.stop),t}matches(t,e,n){return t>=this.start&&t<=this.stop}toString(){return"'"+String.fromCharCode(this.start)+"'..'"+String.fromCharCode(this.stop)+"'"}}class ot extends m{constructor(t,e,n,s){super(t),this.serializationType=m.ACTION,this.ruleIndex=e,this.actionIndex=void 0===n?-1:n,this.isCtxDependent=void 0!==s&&s,this.isEpsilon=!0}matches(t,e,n){return!1}toString(){return"action_"+this.ruleIndex+":"+this.actionIndex}}class at extends m{constructor(t,e){super(t),this.serializationType=m.EPSILON,this.isEpsilon=!0,this.outermostPrecedenceReturn=e}matches(t,e,n){return!1}toString(){return"epsilon"}}class lt extends c{constructor(t,e,n){super(),this.ruleIndex=void 0===t?-1:t,this.predIndex=void 0===e?-1:e,this.isCtxDependent=void 0!==n&&n}evaluate(t,e){const n=this.isCtxDependent?e:null;return t.sempred(n,this.ruleIndex,this.predIndex)}updateHashCode(t){t.update(this.ruleIndex,this.predIndex,this.isCtxDependent)}equals(t){return this===t||t instanceof lt&&this.ruleIndex===t.ruleIndex&&this.predIndex===t.predIndex&&this.isCtxDependent===t.isCtxDependent}toString(){return"{"+this.ruleIndex+":"+this.predIndex+"}?"}}c.NONE=new lt;class ht extends y{constructor(t,e,n,s){super(t),this.serializationType=m.PREDICATE,this.ruleIndex=e,this.predIndex=n,this.isCtxDependent=s,this.isEpsilon=!0}matches(t,e,n){return!1}getPredicate(){return new lt(this.ruleIndex,this.predIndex,this.isCtxDependent)}toString(){return"pred_"+this.ruleIndex+":"+this.predIndex}}class ct extends c{constructor(t){super(),this.precedence=void 0===t?0:t}evaluate(t,e){return t.precpred(e,this.precedence)}evalPrecedence(t,e){return t.precpred(e,this.precedence)?c.NONE:null}compareTo(t){return this.precedence-t.precedence}updateHashCode(t){t.update(this.precedence)}equals(t){return this===t||t instanceof ct&&this.precedence===t.precedence}toString(){return"{"+this.precedence+">=prec}?"}}c.PrecedencePredicate=ct;class ut extends y{constructor(t,e){super(t),this.serializationType=m.PRECEDENCE,this.precedence=e,this.isEpsilon=!0}matches(t,e,n){return!1}getPredicate(){return new ct(this.precedence)}toString(){return this.precedence+" >= _p"}}class dt{constructor(t){void 0===t&&(t=null),this.readOnly=!1,this.verifyATN=null===t||t.verifyATN,this.generateRuleBypassTransitions=null!==t&&t.generateRuleBypassTransitions}}dt.defaultOptions=new dt,dt.defaultOptions.readOnly=!0;class pt{constructor(t){this.actionType=t,this.isPositionDependent=!1}hashCode(){const t=new s;return this.updateHashCode(t),t.finish()}updateHashCode(t){t.update(this.actionType)}equals(t){return this===t}}class gt extends pt{constructor(){super(6)}execute(t){t.skip()}toString(){return"skip"}}gt.INSTANCE=new gt;class ft extends pt{constructor(t){super(0),this.channel=t}execute(t){t._channel=this.channel}updateHashCode(t){t.update(this.actionType,this.channel)}equals(t){return this===t||t instanceof ft&&this.channel===t.channel}toString(){return"channel("+this.channel+")"}}class xt extends pt{constructor(t,e){super(1),this.ruleIndex=t,this.actionIndex=e,this.isPositionDependent=!0}execute(t){t.action(null,this.ruleIndex,this.actionIndex)}updateHashCode(t){t.update(this.actionType,this.ruleIndex,this.actionIndex)}equals(t){return this===t||t instanceof xt&&this.ruleIndex===t.ruleIndex&&this.actionIndex===t.actionIndex}}class Tt extends pt{constructor(){super(3)}execute(t){t.more()}toString(){return"more"}}Tt.INSTANCE=new Tt;class St extends pt{constructor(t){super(7),this.type=t}execute(t){t.type=this.type}updateHashCode(t){t.update(this.actionType,this.type)}equals(t){return this===t||t instanceof St&&this.type===t.type}toString(){return"type("+this.type+")"}}class Et extends pt{constructor(t){super(5),this.mode=t}execute(t){t.pushMode(this.mode)}updateHashCode(t){t.update(this.actionType,this.mode)}equals(t){return this===t||t instanceof Et&&this.mode===t.mode}toString(){return"pushMode("+this.mode+")"}}class mt extends pt{constructor(){super(4)}execute(t){t.popMode()}toString(){return"popMode"}}mt.INSTANCE=new mt;class _t extends pt{constructor(t){super(2),this.mode=t}execute(t){t.mode(this.mode)}updateHashCode(t){t.update(this.actionType,this.mode)}equals(t){return this===t||t instanceof _t&&this.mode===t.mode}toString(){return"mode("+this.mode+")"}}function Ct(t,e){const n=[];return n[t-1]=e,n.map((function(t){return e}))}class At{constructor(t){null==t&&(t=dt.defaultOptions),this.deserializationOptions=t,this.stateFactories=null,this.actionFactories=null}deserialize(t){const e=this.reset(t);this.checkVersion(e),e&&this.skipUUID();const n=this.readATN();this.readStates(n,e),this.readRules(n,e),this.readModes(n);const s=[];return this.readSets(n,s,this.readInt.bind(this)),e&&this.readSets(n,s,this.readInt32.bind(this)),this.readEdges(n,s),this.readDecisions(n),this.readLexerActions(n,e),this.markPrecedenceDecisions(n),this.verifyATN(n),this.deserializationOptions.generateRuleBypassTransitions&&1===n.grammarType&&(this.generateRuleBypassTransitions(n),this.verifyATN(n)),n}reset(t){if(3===(t.charCodeAt?t.charCodeAt(0):t[0])){const e=function(t){const e=t.charCodeAt(0);return e>1?e-2:e+65534},n=t.split("").map(e);return n[0]=t.charCodeAt(0),this.data=n,this.pos=0,!0}return this.data=t,this.pos=0,!1}skipUUID(){let t=0;for(;t++<8;)this.readInt()}checkVersion(t){const e=this.readInt();if(!t&&4!==e)throw"Could not deserialize ATN with version "+e+" (expected 4)."}readATN(){const t=this.readInt(),e=this.readInt();return new Y(t,e)}readStates(t,e){let n,s,i;const r=[],o=[],a=this.readInt();for(let n=0;n0;)i.addTransition(l.transitions[h-1]),l.transitions=l.transitions.slice(-1);t.ruleToStartState[e].addTransition(new at(i)),r.addTransition(new at(a));const c=new z;t.addState(c),c.addTransition(new it(r,t.ruleToTokenType[e])),i.addTransition(new at(c))}stateIsEndStateFor(t,e){if(t.ruleIndex!==e)return null;if(!(t instanceof tt))return null;const n=t.transitions[t.transitions.length-1].target;return n instanceof Q&&n.epsilonOnlyTransitions&&n.transitions[0].target instanceof E?t:null}markPrecedenceDecisions(t){for(let e=0;e=0):this.checkCondition(n.transitions.length<=1||n instanceof E)}}checkCondition(t,e){if(!t)throw null==e&&(e="IllegalState"),e}readInt(){return this.data[this.pos++]}readInt32(){return this.readInt()|this.readInt()<<16}edgeFactory(e,n,s,i,r,o,a,l){const h=e.states[i];switch(n){case m.EPSILON:return new at(h);case m.RANGE:return new rt(h,0!==a?t.EOF:r,o);case m.RULE:return new _(e.states[r],o,a,h);case m.PREDICATE:return new ht(h,r,o,0!==a);case m.PRECEDENCE:return new ut(h,r);case m.ATOM:return new it(h,0!==a?t.EOF:r);case m.ACTION:return new ot(h,r,o,0!==a);case m.SET:return new C(h,l[r]);case m.NOT_SET:return new A(h,l[r]);case m.WILDCARD:return new N(h);default:throw"The specified transition type: "+n+" is not valid."}}stateFactory(t,e){if(null===this.stateFactories){const t=[];t[S.INVALID_TYPE]=null,t[S.BASIC]=()=>new z,t[S.RULE_START]=()=>new $,t[S.BLOCK_START]=()=>new st,t[S.PLUS_BLOCK_START]=()=>new et,t[S.STAR_BLOCK_START]=()=>new nt,t[S.TOKEN_START]=()=>new X,t[S.RULE_STOP]=()=>new E,t[S.BLOCK_END]=()=>new j,t[S.STAR_LOOP_BACK]=()=>new Z,t[S.STAR_LOOP_ENTRY]=()=>new tt,t[S.PLUS_LOOP_BACK]=()=>new J,t[S.LOOP_END]=()=>new Q,this.stateFactories=t}if(t>this.stateFactories.length||null===this.stateFactories[t])throw"The specified state type "+t+" is not valid.";{const n=this.stateFactories[t]();if(null!==n)return n.ruleIndex=e,n}}lexerActionFactory(t,e,n){if(null===this.actionFactories){const t=[];t[0]=(t,e)=>new ft(t),t[1]=(t,e)=>new xt(t,e),t[2]=(t,e)=>new _t(t),t[3]=(t,e)=>Tt.INSTANCE,t[4]=(t,e)=>mt.INSTANCE,t[5]=(t,e)=>new Et(t),t[6]=(t,e)=>gt.INSTANCE,t[7]=(t,e)=>new St(t),this.actionFactories=t}if(t>this.actionFactories.length||null===this.actionFactories[t])throw"The specified lexer action type "+t+" is not valid.";return this.actionFactories[t](e,n)}}class Nt{syntaxError(t,e,n,s,i,r){}reportAmbiguity(t,e,n,s,i,r,o){}reportAttemptingFullContext(t,e,n,s,i,r){}reportContextSensitivity(t,e,n,s,i,r){}}class yt extends Nt{constructor(){super()}syntaxError(t,e,n,s,i,r){console.error("line "+n+":"+s+" "+i)}}yt.INSTANCE=new yt;class It extends Nt{constructor(t){if(super(),null===t)throw"delegates";return this.delegates=t,this}syntaxError(t,e,n,s,i,r){this.delegates.map((o=>o.syntaxError(t,e,n,s,i,r)))}reportAmbiguity(t,e,n,s,i,r,o){this.delegates.map((a=>a.reportAmbiguity(t,e,n,s,i,r,o)))}reportAttemptingFullContext(t,e,n,s,i,r){this.delegates.map((o=>o.reportAttemptingFullContext(t,e,n,s,i,r)))}reportContextSensitivity(t,e,n,s,i,r){this.delegates.map((o=>o.reportContextSensitivity(t,e,n,s,i,r)))}}class kt{constructor(){this._listeners=[yt.INSTANCE],this._interp=null,this._stateNumber=-1}checkVersion(t){"4.10.1"!==t&&console.log("ANTLR runtime and generated code versions disagree: 4.10.1!="+t)}addErrorListener(t){this._listeners.push(t)}removeErrorListeners(){this._listeners=[]}getLiteralNames(){return Object.getPrototypeOf(this).constructor.literalNames||[]}getSymbolicNames(){return Object.getPrototypeOf(this).constructor.symbolicNames||[]}getTokenNames(){if(!this.tokenNames){const t=this.getLiteralNames(),e=this.getSymbolicNames(),n=t.length>e.length?t.length:e.length;this.tokenNames=[];for(let s=0;s";let n=e.text;return null===n&&(n=e.type===t.EOF?"":"<"+e.type+">"),n=n.replace("\n","\\n").replace("\r","\\r").replace("\t","\\t"),"'"+n+"'"}getErrorListenerDispatch(){return new It(this._listeners)}sempred(t,e,n){return!0}precpred(t,e){return!0}get state(){return this._stateNumber}set state(t){this._stateNumber=t}}kt.tokenTypeMapCache={},kt.ruleIndexMapCache={};class Ot extends t{constructor(e,n,s,i,r){super(),this.source=void 0!==e?e:Ot.EMPTY_SOURCE,this.type=void 0!==n?n:null,this.channel=void 0!==s?s:t.DEFAULT_CHANNEL,this.start=void 0!==i?i:-1,this.stop=void 0!==r?r:-1,this.tokenIndex=-1,null!==this.source[0]?(this.line=e[0].line,this.column=e[0].column):this.column=-1}clone(){const t=new Ot(this.source,this.type,this.channel,this.start,this.stop);return t.tokenIndex=this.tokenIndex,t.line=this.line,t.column=this.column,t.text=this.text,t}toString(){let t=this.text;return t=null!==t?t.replace(/\n/g,"\\n").replace(/\r/g,"\\r").replace(/\t/g,"\\t"):"","[@"+this.tokenIndex+","+this.start+":"+this.stop+"='"+t+"',<"+this.type+">"+(this.channel>0?",channel="+this.channel:"")+","+this.line+":"+this.column+"]"}get text(){if(null!==this._text)return this._text;const t=this.getInputStream();if(null===t)return null;const e=t.size;return this.start"}set text(t){this._text=t}}Ot.EMPTY_SOURCE=[null,null];class Rt extends class{}{constructor(t){super(),this.copyText=void 0!==t&&t}create(t,e,n,s,i,r,o,a){const l=new Ot(t,e,s,i,r);return l.line=o,l.column=a,null!==n?l.text=n:this.copyText&&null!==t[1]&&(l.text=t[1].getText(i,r)),l}createThin(t,e){const n=new Ot(null,t);return n.text=e,n}}Rt.DEFAULT=new Rt;class Lt extends Error{constructor(t){super(t.message),Error.captureStackTrace&&Error.captureStackTrace(this,Lt),this.message=t.message,this.recognizer=t.recognizer,this.input=t.input,this.ctx=t.ctx,this.offendingToken=null,this.offendingState=-1,null!==this.recognizer&&(this.offendingState=this.recognizer.state)}getExpectedTokens(){return null!==this.recognizer?this.recognizer.atn.getExpectedTokens(this.offendingState,this.ctx):null}toString(){return this.message}}class vt extends Lt{constructor(t,e,n,s){super({message:"",recognizer:t,input:e,ctx:null}),this.startIndex=n,this.deadEndConfigs=s}toString(){let t="";return this.startIndex>=0&&this.startIndex":"\n"===e?"\\n":"\t"===e?"\\t":"\r"===e?"\\r":e}getCharErrorDisplay(t){return"'"+this.getErrorDisplayForChar(t)+"'"}recover(e){this._input.LA(1)!==t.EOF&&(e instanceof vt?this._interp.consume(this._input):this._input.consume())}get inputStream(){return this._input}set inputStream(t){this._input=null,this._tokenFactorySourcePair=[this,this._input],this.reset(),this._input=t,this._tokenFactorySourcePair=[this,this._input]}get sourceName(){return this._input.sourceName}get type(){return this._type}set type(t){this._type=t}get line(){return this._interp.line}set line(t){this._interp.line=t}get column(){return this._interp.column}set column(t){this._interp.column=t}get text(){return null!==this._text?this._text:this._interp.getText(this._input)}set text(t){this._text=t}}function Pt(t){return t.hashCodeForConfigSet()}function bt(t,e){return t===e||null!==t&&null!==e&&t.equalsForConfigSet(e)}wt.DEFAULT_MODE=0,wt.MORE=-2,wt.SKIP=-3,wt.DEFAULT_TOKEN_CHANNEL=t.DEFAULT_CHANNEL,wt.HIDDEN=t.HIDDEN_CHANNEL,wt.MIN_CHAR_VALUE=0,wt.MAX_CHAR_VALUE=1114111;class Dt{constructor(t){this.configLookup=new h(Pt,bt),this.fullCtx=void 0===t||t,this.readOnly=!1,this.configs=[],this.uniqueAlt=0,this.conflictingAlts=null,this.hasSemanticContext=!1,this.dipsIntoOuterContext=!1,this.cachedHashCode=-1}add(t,e){if(void 0===e&&(e=null),this.readOnly)throw"This set is readonly";t.semanticContext!==c.NONE&&(this.hasSemanticContext=!0),t.reachesIntoOuterContext>0&&(this.dipsIntoOuterContext=!0);const n=this.configLookup.add(t);if(n===t)return this.cachedHashCode=-1,this.configs.push(t),!0;const s=!this.fullCtx,i=H(n.context,t.context,s,e);return n.reachesIntoOuterContext=Math.max(n.reachesIntoOuterContext,t.reachesIntoOuterContext),t.precedenceFilterSuppressed&&(n.precedenceFilterSuppressed=!0),n.context=i,!0}getStates(){const t=new h;for(let e=0;eYt.MAX_DFA_EDGE)return null;let n=t.edges[e-Yt.MIN_DFA_EDGE];return void 0===n&&(n=null),Yt.debug&&null!==n&&console.log("reuse state "+t.stateNumber+" edge to "+n.stateNumber),n}computeTargetState(t,e,n){const s=new Ut;return this.getReachableConfigSet(t,e.configs,s,n),0===s.items.length?(s.hasSemanticContext||this.addDFAEdge(e,n,Mt.ERROR),Mt.ERROR):this.addDFAEdge(e,n,null,s)}failOrAccept(e,n,s,i){if(null!==this.prevAccept.dfaState){const t=e.dfaState.lexerActionExecutor;return this.accept(n,t,this.startIndex,e.index,e.line,e.column),e.dfaState.prediction}if(i===t.EOF&&n.index===this.startIndex)return t.EOF;throw new vt(this.recog,n,this.startIndex,s)}getReachableConfigSet(e,n,s,i){let r=Y.INVALID_ALT_NUMBER;for(let o=0;oYt.MAX_DFA_EDGE||(Yt.debug&&console.log("EDGE "+t+" -> "+n+" upon "+e),null===t.edges&&(t.edges=[]),t.edges[e-Yt.MIN_DFA_EDGE]=n),n}addDFAState(t){const e=new Ft(null,t);let n=null;for(let e=0;et.startsWith("k-"))).map((t=>this.data[t]),this)}}const Wt={SLL:0,LL:1,LL_EXACT_AMBIG_DETECTION:2,hasSLLConflictTerminatingPrediction:function(t,e){if(Wt.allConfigsInRuleStopStates(e))return!0;if(t===Wt.SLL&&e.hasSemanticContext){const t=new Dt;for(let n=0;n1)return!0;return!1},allSubsetsEqual:function(t){let e=null;for(let n=0;n0&&(r=this.getAltThatFinishedDecisionEntryRule(i),r!==Y.INVALID_ALT_NUMBER)?r:Y.INVALID_ALT_NUMBER}getAltThatFinishedDecisionEntryRule(t){const e=[];for(let n=0;n0||s.state instanceof E&&s.context.hasEmptyPath())&&e.indexOf(s.alt)<0&&e.push(s.alt)}return 0===e.length?Y.INVALID_ALT_NUMBER:Math.min.apply(null,e)}splitAccordingToSemanticValidity(t,e){const n=new Dt(t.fullCtx),s=new Dt(t.fullCtx);for(let i=0;i50))throw"problem";if(t.state instanceof E){if(!t.context.isEmpty()){for(let a=0;a=0&&(s+=1)}this.closureCheckingStopState(u,e,n,c,i,s,o)}}}canDropLoopEntryEdgeInLeftRecursiveRule(t){const e=t.state;if(e.stateType!==S.STAR_LOOP_ENTRY)return!1;if(e.stateType!==S.STAR_LOOP_ENTRY||!e.isPrecedenceDecision||t.context.isEmpty()||t.context.hasEmptyPath())return!1;const n=t.context.length;for(let s=0;s=0?this.parser.ruleNames[t]:""}getEpsilonTarget(e,n,s,i,r,o){switch(n.serializationType){case m.RULE:return this.ruleTransition(e,n);case m.PRECEDENCE:return this.precedenceTransition(e,n,s,i,r);case m.PREDICATE:return this.predTransition(e,n,s,i,r);case m.ACTION:return this.actionTransition(e,n);case m.EPSILON:return new f({state:n.target},e);case m.ATOM:case m.RANGE:case m.SET:return o&&n.matches(t.EOF,0,1)?new f({state:n.target},e):null;default:return null}}actionTransition(t,e){if(this.debug){const t=-1===e.actionIndex?65535:e.actionIndex;console.log("ACTION edge "+e.ruleIndex+":"+t)}return new f({state:e.target},t)}precedenceTransition(t,e,n,s,i){this.debug&&(console.log("PRED (collectPredicates="+n+") "+e.precedence+">=_p, ctx dependent=true"),null!==this.parser&&console.log("context surrounding pred is "+a(this.parser.getRuleInvocationStack())));let r=null;if(n&&s)if(i){const n=this._input.index;this._input.seek(this._startIndex);const s=e.getPredicate().evaluate(this.parser,this._outerContext);this._input.seek(n),s&&(r=new f({state:e.target},t))}else{const n=c.andContext(t.semanticContext,e.getPredicate());r=new f({state:e.target,semanticContext:n},t)}else r=new f({state:e.target},t);return this.debug&&console.log("config from pred transition="+r),r}predTransition(t,e,n,s,i){this.debug&&(console.log("PRED (collectPredicates="+n+") "+e.ruleIndex+":"+e.predIndex+", ctx dependent="+e.isCtxDependent),null!==this.parser&&console.log("context surrounding pred is "+a(this.parser.getRuleInvocationStack())));let r=null;if(n&&(e.isCtxDependent&&s||!e.isCtxDependent))if(i){const n=this._input.index;this._input.seek(this._startIndex);const s=e.getPredicate().evaluate(this.parser,this._outerContext);this._input.seek(n),s&&(r=new f({state:e.target},t))}else{const n=c.andContext(t.semanticContext,e.getPredicate());r=new f({state:e.target,semanticContext:n},t)}else r=new f({state:e.target},t);return this.debug&&console.log("config from pred transition="+r),r}ruleTransition(t,e){this.debug&&console.log("CALL rule "+this.getRuleName(e.target.ruleIndex)+", ctx="+t.context);const n=e.followState,s=D.create(t.context,n.stateNumber);return new f({state:e.target,context:s},t)}getConflictingAlts(t){const e=jt.getConflictingAltSubsets(t);return jt.getAlts(e)}getConflictingAltsOrUniqueAlt(t){let e=null;return t.uniqueAlt!==Y.INVALID_ALT_NUMBER?(e=new K,e.add(t.uniqueAlt)):e=t.conflictingAlts,e}getTokenName(e){if(e===t.EOF)return"EOF";if(null!==this.parser&&null!==this.parser.literalNames){if(!(e>=this.parser.literalNames.length&&e>=this.parser.symbolicNames.length))return(this.parser.literalNames[e]||this.parser.symbolicNames[e])+"<"+e+">";console.log(e+" ttype out of range: "+this.parser.literalNames),console.log(""+this.parser.getInputStream().getTokens())}return""+e}getLookaheadName(t){return this.getTokenName(t.LA(1))}dumpDeadEndConfigs(t){console.log("dead end configs: ");const e=t.getDeadEndConfigs();for(let t=0;t0){const t=n.state.transitions[0];t instanceof it?s="Atom "+this.getTokenName(t.label):t instanceof C&&(s=(t instanceof A?"~":"")+"Set "+t.set)}console.error(n.toString(this.parser,!0)+":"+s)}}noViableAlt(t,e,n,s){return new Qt(this.parser,t,t.get(s),t.LT(1),n,e)}getUniqueAlt(t){let e=Y.INVALID_ALT_NUMBER;for(let n=0;n "+s+" upon "+this.getTokenName(n)),null===s)return null;if(s=this.addDFAState(t,s),null===e||n<-1||n>this.atn.maxTokenType)return s;if(null===e.edges&&(e.edges=[]),e.edges[n+1]=s,this.debug){const e=null===this.parser?null:this.parser.literalNames,n=null===this.parser?null:this.parser.symbolicNames;console.log("DFA=\n"+t.toString(e,n))}return s}addDFAState(t,e){if(e===Mt.ERROR)return e;const n=t.states.get(e);return null!==n?n:(e.stateNumber=t.states.length,e.configs.readOnly||(e.configs.optimizeConfigs(this),e.configs.setReadonly(!0)),t.states.add(e),this.debug&&console.log("adding new DFA state: "+e),e)}reportAttemptingFullContext(t,e,n,s,i){if(this.debug||this.retry_debug){const e=new x(s,i+1);console.log("reportAttemptingFullContext decision="+t.decision+":"+n+", input="+this.parser.getTokenStream().getText(e))}null!==this.parser&&this.parser.getErrorListenerDispatch().reportAttemptingFullContext(this.parser,t,s,i,e,n)}reportContextSensitivity(t,e,n,s,i){if(this.debug||this.retry_debug){const e=new x(s,i+1);console.log("reportContextSensitivity decision="+t.decision+":"+n+", input="+this.parser.getTokenStream().getText(e))}null!==this.parser&&this.parser.getErrorListenerDispatch().reportContextSensitivity(this.parser,t,s,i,e,n)}reportAmbiguity(t,e,n,s,i,r,o){if(this.debug||this.retry_debug){const t=new x(n,s+1);console.log("reportAmbiguity "+r+":"+o+", input="+this.parser.getTokenStream().getText(t))}null!==this.parser&&this.parser.getErrorListenerDispatch().reportAmbiguity(this.parser,t,n,s,i,r,o)}},PredictionMode:jt};class Jt{constructor(t,e,n){this.dfa=t,this.literalNames=e||[],this.symbolicNames=n||[]}toString(){if(null===this.dfa.s0)return null;let t="";const e=this.dfa.sortedStates();for(let n=0;n"),t=t.concat(this.getStateString(e)),t=t.concat("\n"))}}}return 0===t.length?null:t}getEdgeLabel(t){return 0===t?"EOF":null!==this.literalNames||null!==this.symbolicNames?this.literalNames[t-1]||this.symbolicNames[t-1]:String.fromCharCode(t-1)}getStateString(t){const e=(t.isAcceptState?":":"")+"s"+t.stateNumber+(t.requiresFullContext?"^":"");return t.isAcceptState?null!==t.predicates?e+"=>"+a(t.predicates):e+"=>"+t.prediction.toString():e}}class Zt extends Jt{constructor(t){super(t,null)}getEdgeLabel(t){return"'"+String.fromCharCode(t)+"'"}}const te={DFA:class{constructor(t,e){if(void 0===e&&(e=0),this.atnStartState=t,this.decision=e,this._states=new h,this.s0=null,this.precedenceDfa=!1,t instanceof tt&&t.isPrecedenceDecision){this.precedenceDfa=!0;const t=new Ft(null,new Dt);t.edges=[],t.isAcceptState=!1,t.requiresFullContext=!1,this.s0=t}}getPrecedenceStartState(t){if(!this.precedenceDfa)throw"Only precedence DFAs may contain a precedence start state.";return t<0||t>=this.s0.edges.length?null:this.s0.edges[t]||null}setPrecedenceStartState(t,e){if(!this.precedenceDfa)throw"Only precedence DFAs may contain a precedence start state.";t<0||(this.s0.edges[t]=e)}setPrecedenceDfa(t){if(this.precedenceDfa!==t){if(this._states=new h,t){const t=new Ft(null,new Dt);t.edges=[],t.isAcceptState=!1,t.requiresFullContext=!1,this.s0=t}else this.s0=null;this.precedenceDfa=t}}sortedStates(){return this._states.values().sort((function(t,e){return t.stateNumber-e.stateNumber}))}toString(t,e){return t=t||null,e=e||null,null===this.s0?"":new Jt(this,t,e).toString()}toLexerString(){return null===this.s0?"":new Zt(this).toString()}get states(){return this._states}},DFASerializer:Jt,LexerDFASerializer:Zt,PredPrediction:zt};class ee{visitTerminal(t){}visitErrorNode(t){}enterEveryRule(t){}exitEveryRule(t){}}class ne{walk(t,e){if(e instanceof R||void 0!==e.isErrorNode&&e.isErrorNode())t.visitErrorNode(e);else if(e instanceof O)t.visitTerminal(e);else{this.enterRule(t,e);for(let n=0;n=0&&t.consume(),this.lastErrorIndex=t._input.index,null===this.lastErrorStates&&(this.lastErrorStates=[]),this.lastErrorStates.push(t.state);const n=this.getErrorRecoverySet(t);this.consumeUntil(t,n)}sync(e){if(this.inErrorRecoveryMode(e))return;const n=e._interp.atn.states[e.state],s=e.getTokenStream().LA(1),i=e.atn.nextTokens(n);if(i.contains(s))return this.nextTokensContext=null,void(this.nextTokenState=S.INVALID_STATE_NUMBER);if(i.contains(t.EPSILON))null===this.nextTokensContext&&(this.nextTokensContext=e._ctx,this.nextTokensState=e._stateNumber);else switch(n.stateType){case S.BLOCK_START:case S.STAR_BLOCK_START:case S.PLUS_BLOCK_START:case S.STAR_LOOP_ENTRY:if(null!==this.singleTokenDeletion(e))return;throw new ie(e);case S.PLUS_LOOP_BACK:case S.STAR_LOOP_BACK:{this.reportUnwantedToken(e);const t=new T;t.addSet(e.getExpectedTokens());const n=t.addSet(this.getErrorRecoverySet(e));this.consumeUntil(e,n)}}}reportNoViableAlternative(e,n){const s=e.getTokenStream();let i;i=null!==s?n.startToken.type===t.EOF?"":s.getText(new x(n.startToken.tokenIndex,n.offendingToken.tokenIndex)):"";const r="no viable alternative at input "+this.escapeWSAndQuote(i);e.notifyErrorListeners(r,n.offendingToken,n)}reportInputMismatch(t,e){const n="mismatched input "+this.getTokenErrorDisplay(e.offendingToken)+" expecting "+e.getExpectedTokens().toString(t.literalNames,t.symbolicNames);t.notifyErrorListeners(n,e.offendingToken,e)}reportFailedPredicate(t,e){const n="rule "+t.ruleNames[t._ctx.ruleIndex]+" "+e.message;t.notifyErrorListeners(n,e.offendingToken,e)}reportUnwantedToken(t){if(this.inErrorRecoveryMode(t))return;this.beginErrorCondition(t);const e=t.getCurrentToken(),n="extraneous input "+this.getTokenErrorDisplay(e)+" expecting "+this.getExpectedTokens(t).toString(t.literalNames,t.symbolicNames);t.notifyErrorListeners(n,e,null)}reportMissingToken(t){if(this.inErrorRecoveryMode(t))return;this.beginErrorCondition(t);const e=t.getCurrentToken(),n="missing "+this.getExpectedTokens(t).toString(t.literalNames,t.symbolicNames)+" at "+this.getTokenErrorDisplay(e);t.notifyErrorListeners(n,e,null)}recoverInline(t){const e=this.singleTokenDeletion(t);if(null!==e)return t.consume(),e;if(this.singleTokenInsertion(t))return this.getMissingSymbol(t);throw new ie(t)}singleTokenInsertion(t){const e=t.getTokenStream().LA(1),n=t._interp.atn,s=n.states[t.state].transitions[0].target;return!!n.nextTokens(s,t._ctx).contains(e)&&(this.reportMissingToken(t),!0)}singleTokenDeletion(t){const e=t.getTokenStream().LA(2);if(this.getExpectedTokens(t).contains(e)){this.reportUnwantedToken(t),t.consume();const e=t.getCurrentToken();return this.reportMatch(t),e}return null}getMissingSymbol(e){const n=e.getCurrentToken(),s=this.getExpectedTokens(e).first();let i;i=s===t.EOF?"":"";let r=n;const o=e.getTokenStream().LT(-1);return r.type===t.EOF&&null!==o&&(r=o),e.getTokenFactory().create(r.source,s,i,t.DEFAULT_CHANNEL,-1,-1,r.line,r.column)}getExpectedTokens(t){return t.getExpectedTokens()}getTokenErrorDisplay(e){if(null===e)return"";let n=e.text;return null===n&&(n=e.type===t.EOF?"":"<"+e.type+">"),this.escapeWSAndQuote(n)}escapeWSAndQuote(t){return"'"+(t=(t=(t=t.replace(/\n/g,"\\n")).replace(/\r/g,"\\r")).replace(/\t/g,"\\t"))+"'"}getErrorRecoverySet(e){const n=e._interp.atn;let s=e._ctx;const i=new T;for(;null!==s&&s.invokingState>=0;){const t=n.states[s.invokingState].transitions[0],e=n.nextTokens(t.followState);i.addSet(e),s=s.parentCtx}return i.removeOne(t.EPSILON),i}consumeUntil(e,n){let s=e.getTokenStream().LA(1);for(;s!==t.EOF&&!n.contains(s);)e.consume(),s=e.getTokenStream().LA(1)}}const he={RecognitionException:Lt,NoViableAltException:Qt,LexerNoViableAltException:vt,InputMismatchException:ie,FailedPredicateException:re,DiagnosticErrorListener:class extends Nt{constructor(t){super(),t=t||!0,this.exactOnly=t}reportAmbiguity(t,e,n,s,i,r,o){if(this.exactOnly&&!i)return;const a="reportAmbiguity d="+this.getDecisionDescription(t,e)+": ambigAlts="+this.getConflictingAlts(r,o)+", input='"+t.getTokenStream().getText(new x(n,s))+"'";t.notifyErrorListeners(a)}reportAttemptingFullContext(t,e,n,s,i,r){const o="reportAttemptingFullContext d="+this.getDecisionDescription(t,e)+", input='"+t.getTokenStream().getText(new x(n,s))+"'";t.notifyErrorListeners(o)}reportContextSensitivity(t,e,n,s,i,r){const o="reportContextSensitivity d="+this.getDecisionDescription(t,e)+", input='"+t.getTokenStream().getText(new x(n,s))+"'";t.notifyErrorListeners(o)}getDecisionDescription(t,e){const n=e.decision,s=e.atnStartState.ruleIndex,i=t.ruleNames;if(s<0||s>=i.length)return""+n;const r=i[s]||null;return null===r||0===r.length?""+n:`${n} (${r})`}getConflictingAlts(t,e){if(null!==t)return t;const n=new K;for(let t=0;t=this._size)throw"cannot consume EOF";this._index+=1}LA(e){if(0===e)return 0;e<0&&(e+=1);const n=this._index+e-1;return n<0||n>=this._size?t.EOF:this.data[n]}LT(t){return this.LA(t)}mark(){return-1}release(t){}seek(t){t<=this._index?this._index=t:this._index=Math.min(t,this._size)}getText(t,e){if(e>=this._size&&(e=this._size-1),t>=this._size)return"";if(this.decodeToUnicodeCodePoints){let n="";for(let s=t;s<=e;s++)n+=String.fromCodePoint(this.data[s]);return n}return this.strdata.slice(t,e+1)}toString(){return this.strdata}get index(){return this._index}get size(){return this._size}}var ue=n(654);const de={fromString:function(t){return new ce(t,!0)},fromBlob:function(t,e,n,s){const i=new window.FileReader;i.onload=function(t){const e=new ce(t.target.result,!0);n(e)},i.onerror=s,i.readAsText(t,e)},fromBuffer:function(t,e){return new ce(t.toString(e),!0)},fromPath:function(t,e,n){ue.readFile(t,e,(function(t,e){let s=null;null!==e&&(s=new ce(e,!0)),n(t,s)}))},fromPathSync:function(t,e){const n=ue.readFileSync(t,e);return new ce(n,!0)}};class pe extends ee{constructor(t){super(),this.parser=t}enterEveryRule(t){console.log("enter "+this.parser.ruleNames[t.ruleIndex]+", LT(1)="+this.parser._input.LT(1).text)}visitTerminal(t){console.log("consume "+t.symbol+" rule "+this.parser.ruleNames[this.parser._ctx.ruleIndex])}exitEveryRule(t){console.log("exit "+this.parser.ruleNames[t.ruleIndex]+", LT(1)="+this.parser._input.LT(1).text)}}class ge extends kt{constructor(t){super(),this._input=null,this._errHandler=new le,this._precedenceStack=[],this._precedenceStack.push(0),this._ctx=null,this.buildParseTrees=!0,this._tracer=null,this._parseListeners=null,this._syntaxErrors=0,this.setInputStream(t)}reset(){null!==this._input&&this._input.seek(0),this._errHandler.reset(this),this._ctx=null,this._syntaxErrors=0,this.setTrace(!1),this._precedenceStack=[],this._precedenceStack.push(0),null!==this._interp&&this._interp.reset()}match(t){let e=this.getCurrentToken();return e.type===t?(this._errHandler.reportMatch(this),this.consume()):(e=this._errHandler.recoverInline(this),this.buildParseTrees&&-1===e.tokenIndex&&this._ctx.addErrorNode(e)),e}matchWildcard(){let t=this.getCurrentToken();return t.type>0?(this._errHandler.reportMatch(this),this.consume()):(t=this._errHandler.recoverInline(this),this._buildParseTrees&&-1===t.tokenIndex&&this._ctx.addErrorNode(t)),t}getParseListeners(){return this._parseListeners||[]}addParseListener(t){if(null===t)throw"listener";null===this._parseListeners&&(this._parseListeners=[]),this._parseListeners.push(t)}removeParseListener(t){if(null!==this._parseListeners){const e=this._parseListeners.indexOf(t);e>=0&&this._parseListeners.splice(e,1),0===this._parseListeners.length&&(this._parseListeners=null)}}removeParseListeners(){this._parseListeners=null}triggerEnterRuleEvent(){if(null!==this._parseListeners){const t=this._ctx;this._parseListeners.forEach((function(e){e.enterEveryRule(t),t.enterRule(e)}))}}triggerExitRuleEvent(){if(null!==this._parseListeners){const t=this._ctx;this._parseListeners.slice(0).reverse().forEach((function(e){t.exitRule(e),e.exitEveryRule(t)}))}}getTokenFactory(){return this._input.tokenSource._factory}setTokenFactory(t){this._input.tokenSource._factory=t}getATNWithBypassAlts(){const t=this.getSerializedATN();if(null===t)throw"The current parser does not support an ATN with bypass alternatives.";let e=this.bypassAltsAtnCache[t];if(null===e){const n=new dt;n.generateRuleBypassTransitions=!0,e=new At(n).deserialize(t),this.bypassAltsAtnCache[t]=e}return e}getInputStream(){return this.getTokenStream()}setInputStream(t){this.setTokenStream(t)}getTokenStream(){return this._input}setTokenStream(t){this._input=null,this.reset(),this._input=t}getCurrentToken(){return this._input.LT(1)}notifyErrorListeners(t,e,n){n=n||null,null===(e=e||null)&&(e=this.getCurrentToken()),this._syntaxErrors+=1;const s=e.line,i=e.column;this.getErrorListenerDispatch().syntaxError(this,e,s,i,t,n)}consume(){const e=this.getCurrentToken();e.type!==t.EOF&&this.getInputStream().consume();const n=null!==this._parseListeners&&this._parseListeners.length>0;if(this.buildParseTrees||n){let t;t=this._errHandler.inErrorRecoveryMode(this)?this._ctx.addErrorNode(e):this._ctx.addTokenNode(e),t.invokingState=this.state,n&&this._parseListeners.forEach((function(e){t instanceof R||void 0!==t.isErrorNode&&t.isErrorNode()?e.visitErrorNode(t):t instanceof O&&e.visitTerminal(t)}))}return e}addContextToParseTree(){null!==this._ctx.parentCtx&&this._ctx.parentCtx.addChild(this._ctx)}enterRule(t,e,n){this.state=e,this._ctx=t,this._ctx.start=this._input.LT(1),this.buildParseTrees&&this.addContextToParseTree(),this.triggerEnterRuleEvent()}exitRule(){this._ctx.stop=this._input.LT(-1),this.triggerExitRuleEvent(),this.state=this._ctx.invokingState,this._ctx=this._ctx.parentCtx}enterOuterAlt(t,e){t.setAltNumber(e),this.buildParseTrees&&this._ctx!==t&&null!==this._ctx.parentCtx&&(this._ctx.parentCtx.removeLastChild(),this._ctx.parentCtx.addChild(t)),this._ctx=t}getPrecedence(){return 0===this._precedenceStack.length?-1:this._precedenceStack[this._precedenceStack.length-1]}enterRecursionRule(t,e,n,s){this.state=e,this._precedenceStack.push(s),this._ctx=t,this._ctx.start=this._input.LT(1),this.triggerEnterRuleEvent()}pushNewRecursionContext(t,e,n){const s=this._ctx;s.parentCtx=t,s.invokingState=e,s.stop=this._input.LT(-1),this._ctx=t,this._ctx.start=s.start,this.buildParseTrees&&this._ctx.addChild(s),this.triggerEnterRuleEvent()}unrollRecursionContexts(t){this._precedenceStack.pop(),this._ctx.stop=this._input.LT(-1);const e=this._ctx,n=this.getParseListeners();if(null!==n&&n.length>0)for(;this._ctx!==t;)this.triggerExitRuleEvent(),this._ctx=this._ctx.parentCtx;else this._ctx=t;e.parentCtx=t,this.buildParseTrees&&null!==t&&t.addChild(e)}getInvokingContext(t){let e=this._ctx;for(;null!==e;){if(e.ruleIndex===t)return e;e=e.parentCtx}return null}precpred(t,e){return e>=this._precedenceStack[this._precedenceStack.length-1]}inContext(t){return!1}isExpectedToken(e){const n=this._interp.atn;let s=this._ctx;const i=n.states[this.state];let r=n.nextTokens(i);if(r.contains(e))return!0;if(!r.contains(t.EPSILON))return!1;for(;null!==s&&s.invokingState>=0&&r.contains(t.EPSILON);){const t=n.states[s.invokingState].transitions[0];if(r=n.nextTokens(t.followState),r.contains(e))return!0;s=s.parentCtx}return!(!r.contains(t.EPSILON)||e!==t.EOF)}getExpectedTokens(){return this._interp.atn.getExpectedTokens(this.state,this._ctx)}getExpectedTokensWithinCurrentRule(){const t=this._interp.atn,e=t.states[this.state];return t.nextTokens(e)}getRuleIndex(t){const e=this.getRuleIndexMap()[t];return null!==e?e:-1}getRuleInvocationStack(t){null===(t=t||null)&&(t=this._ctx);const e=[];for(;null!==t;){const n=t.ruleIndex;n<0?e.push("n/a"):e.push(this.ruleNames[n]),t=t.parentCtx}return e}getDFAStrings(){return this._interp.decisionToDFA.toString()}dumpDFA(){let t=!1;for(let e=0;e0&&(t&&console.log(),this.printer.println("Decision "+n.decision+":"),this.printer.print(n.toString(this.literalNames,this.symbolicNames)),t=!0)}}getSourceName(){return this._input.sourceName}setTrace(t){t?(null!==this._tracer&&this.removeParseListener(this._tracer),this._tracer=new pe(this),this.addParseListener(this._tracer)):(this.removeParseListener(this._tracer),this._tracer=null)}}ge.bypassAltsAtnCache={};class fe extends O{constructor(t){super(),this.parentCtx=null,this.symbol=t}getChild(t){return null}getSymbol(){return this.symbol}getParent(){return this.parentCtx}getPayload(){return this.symbol}getSourceInterval(){if(null===this.symbol)return x.INVALID_INTERVAL;const t=this.symbol.tokenIndex;return new x(t,t)}getChildCount(){return 0}accept(t){return t.visitTerminal(this)}getText(){return this.symbol.text}toString(){return this.symbol.type===t.EOF?"":this.symbol.text}}class xe extends fe{constructor(t){super(t)}isErrorNode(){return!0}accept(t){return t.visitErrorNode(this)}}class Te extends w{constructor(t,e){super(t=t||null,e=e||null),this.ruleIndex=-1,this.children=null,this.start=null,this.stop=null,this.exception=null}copyFrom(t){this.parentCtx=t.parentCtx,this.invokingState=t.invokingState,this.children=null,this.start=t.start,this.stop=t.stop,t.children&&(this.children=[],t.children.map((function(t){t instanceof xe&&(this.children.push(t),t.parentCtx=this)}),this))}enterRule(t){}exitRule(t){}addChild(t){return null===this.children&&(this.children=[]),this.children.push(t),t}removeLastChild(){null!==this.children&&this.children.pop()}addTokenNode(t){const e=new fe(t);return this.addChild(e),e.parentCtx=this,e}addErrorNode(t){const e=new xe(t);return this.addChild(e),e.parentCtx=this,e}getChild(t,e){if(e=e||null,null===this.children||t<0||t>=this.children.length)return null;if(null===e)return this.children[t];for(let n=0;n=this.children.length)return null;for(let n=0;n=0&&(this.fetchedEOF?this.index0)||this.fetch(e)>=e}fetch(e){if(this.fetchedEOF)return 0;for(let n=0;n=this.tokens.length&&(n=this.tokens.length-1);for(let r=e;r=this.tokens.length?this.tokens[this.tokens.length-1]:this.tokens[e]}adjustSeekIndex(t){return t}lazyInit(){-1===this.index&&this.setup()}setup(){this.sync(0),this.index=this.adjustSeekIndex(0)}setTokenSource(t){this.tokenSource=t,this.tokens=[],this.index=-1,this.fetchedEOF=!1}nextTokenOnChannel(e,n){if(this.sync(e),e>=this.tokens.length)return-1;let s=this.tokens[e];for(;s.channel!==this.channel;){if(s.type===t.EOF)return-1;e+=1,this.sync(e),s=this.tokens[e]}return e}previousTokenOnChannel(t,e){for(;t>=0&&this.tokens[t].channel!==e;)t-=1;return t}getHiddenTokensToRight(t,e){if(void 0===e&&(e=-1),this.lazyInit(),t<0||t>=this.tokens.length)throw t+" not in 0.."+this.tokens.length-1;const n=this.nextTokenOnChannel(t+1,wt.DEFAULT_TOKEN_CHANNEL),s=t+1,i=-1===n?this.tokens.length-1:n;return this.filterForChannel(s,i,e)}getHiddenTokensToLeft(t,e){if(void 0===e&&(e=-1),this.lazyInit(),t<0||t>=this.tokens.length)throw t+" not in 0.."+this.tokens.length-1;const n=this.previousTokenOnChannel(t-1,wt.DEFAULT_TOKEN_CHANNEL);if(n===t-1)return null;const s=n+1,i=t-1;return this.filterForChannel(s,i,e)}filterForChannel(t,e,n){const s=[];for(let i=t;i=this.tokens.length&&(s=this.tokens.length-1);let i="";for(let e=n;enew me.dfa.DFA(t,e)));class Ae extends me.Lexer{static grammarFileName="DTS.g4";static channelNames=["DEFAULT_TOKEN_CHANNEL","HIDDEN"];static modeNames=["DEFAULT_MODE"];static literalNames=[null,"'{'","'}'","','",null,null,null,null,null,null,"'='","';'","'/'"];static symbolicNames=[null,null,null,null,"STRING","NUMBER2","ARR","KEY","NUMBER","WS","EQ","SEMICOLON","ROOT","VERSION"];static ruleNames=["T__0","T__1","T__2","STRING","NUMBER2","ARR","KEY","ESC","UNICODE","HEX","SAFECODEPOINT","NUMBER","INT","EXP","WS","EQ","SEMICOLON","ROOT","VERSION"];constructor(t){super(t),this._interp=new me.atn.LexerATNSimulator(this,_e,Ce,new me.PredictionContextCache)}get atn(){return _e}}Ae.EOF=me.Token.EOF,Ae.T__0=1,Ae.T__1=2,Ae.T__2=3,Ae.STRING=4,Ae.NUMBER2=5,Ae.ARR=6,Ae.KEY=7,Ae.NUMBER=8,Ae.WS=9,Ae.EQ=10,Ae.SEMICOLON=11,Ae.ROOT=12,Ae.VERSION=13;class Ne extends me.tree.ParseTreeListener{enterDts(t){}exitDts(t){}enterDic(t){}exitDic(t){}enterPair(t){}exitPair(t){}enterArray(t){}exitArray(t){}enterValue(t){}exitValue(t){}visitTerminal(t){}}class ye extends me.tree.ParseTreeVisitor{visitDts(t){return this.visitChildren(t)}visitDic(t){return this.visitChildren(t)}visitPair(t){return this.visitChildren(t)}visitArray(t){return this.visitChildren(t)}visitValue(t){return this.visitChildren(t)}}const Ie=(new me.atn.ATNDeserializer).deserialize([4,1,13,49,2,0,7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,1,0,1,0,1,0,1,0,1,1,1,1,5,1,17,8,1,10,1,12,1,20,9,1,1,1,1,1,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,1,2,3,2,38,8,2,1,3,1,3,1,3,4,3,43,8,3,11,3,12,3,44,1,4,1,4,1,4,0,0,5,0,2,4,6,8,0,1,2,0,4,6,8,8,47,0,10,1,0,0,0,2,14,1,0,0,0,4,37,1,0,0,0,6,39,1,0,0,0,8,46,1,0,0,0,10,11,5,13,0,0,11,12,3,4,2,0,12,13,5,0,0,1,13,1,1,0,0,0,14,18,5,1,0,0,15,17,3,4,2,0,16,15,1,0,0,0,17,20,1,0,0,0,18,16,1,0,0,0,18,19,1,0,0,0,19,21,1,0,0,0,20,18,1,0,0,0,21,22,5,2,0,0,22,3,1,0,0,0,23,24,5,7,0,0,24,25,5,10,0,0,25,26,3,8,4,0,26,27,5,11,0,0,27,38,1,0,0,0,28,29,5,7,0,0,29,30,5,10,0,0,30,31,3,6,3,0,31,32,5,11,0,0,32,38,1,0,0,0,33,34,5,7,0,0,34,35,3,2,1,0,35,36,5,11,0,0,36,38,1,0,0,0,37,23,1,0,0,0,37,28,1,0,0,0,37,33,1,0,0,0,38,5,1,0,0,0,39,42,3,8,4,0,40,41,5,3,0,0,41,43,3,8,4,0,42,40,1,0,0,0,43,44,1,0,0,0,44,42,1,0,0,0,44,45,1,0,0,0,45,7,1,0,0,0,46,47,7,0,0,0,47,9,1,0,0,0,3,18,37,44]),ke=Ie.decisionToState.map(((t,e)=>new me.dfa.DFA(t,e))),Oe=new me.PredictionContextCache;class Re extends me.Parser{static grammarFileName="DTS.g4";static literalNames=[null,"'{'","'}'","','",null,null,null,null,null,null,"'='","';'","'/'"];static symbolicNames=[null,null,null,null,"STRING","NUMBER2","ARR","KEY","NUMBER","WS","EQ","SEMICOLON","ROOT","VERSION"];static ruleNames=["dts","dic","pair","array","value"];constructor(t){super(t),this._interp=new me.atn.ParserATNSimulator(this,Ie,ke,Oe),this.ruleNames=Re.ruleNames,this.literalNames=Re.literalNames,this.symbolicNames=Re.symbolicNames}get atn(){return Ie}dts(){let t=new Le(this,this._ctx,this.state);this.enterRule(t,0,Re.RULE_dts);try{this.enterOuterAlt(t,1),this.state=10,this.match(Re.VERSION),this.state=11,this.pair(),this.state=12,this.match(Re.EOF)}catch(e){if(!(e instanceof me.error.RecognitionException))throw e;t.exception=e,this._errHandler.reportError(this,e),this._errHandler.recover(this,e)}finally{this.exitRule()}return t}dic(){let t=new ve(this,this._ctx,this.state);this.enterRule(t,2,Re.RULE_dic);var e=0;try{for(this.enterOuterAlt(t,1),this.state=14,this.match(Re.T__0),this.state=18,this._errHandler.sync(this),e=this._input.LA(1);e===Re.KEY;)this.state=15,this.pair(),this.state=20,this._errHandler.sync(this),e=this._input.LA(1);this.state=21,this.match(Re.T__1)}catch(e){if(!(e instanceof me.error.RecognitionException))throw e;t.exception=e,this._errHandler.reportError(this,e),this._errHandler.recover(this,e)}finally{this.exitRule()}return t}pair(){let t=new we(this,this._ctx,this.state);this.enterRule(t,4,Re.RULE_pair);try{switch(this.state=37,this._errHandler.sync(this),this._interp.adaptivePredict(this._input,1,this._ctx)){case 1:this.enterOuterAlt(t,1),this.state=23,this.match(Re.KEY),this.state=24,this.match(Re.EQ),this.state=25,this.value(),this.state=26,this.match(Re.SEMICOLON);break;case 2:this.enterOuterAlt(t,2),this.state=28,this.match(Re.KEY),this.state=29,this.match(Re.EQ),this.state=30,this.array(),this.state=31,this.match(Re.SEMICOLON);break;case 3:this.enterOuterAlt(t,3),this.state=33,this.match(Re.KEY),this.state=34,this.dic(),this.state=35,this.match(Re.SEMICOLON)}}catch(e){if(!(e instanceof me.error.RecognitionException))throw e;t.exception=e,this._errHandler.reportError(this,e),this._errHandler.recover(this,e)}finally{this.exitRule()}return t}array(){let t=new Pe(this,this._ctx,this.state);this.enterRule(t,6,Re.RULE_array);var e=0;try{this.enterOuterAlt(t,1),this.state=39,this.value(),this.state=42,this._errHandler.sync(this),e=this._input.LA(1);do{this.state=40,this.match(Re.T__2),this.state=41,this.value(),this.state=44,this._errHandler.sync(this),e=this._input.LA(1)}while(e===Re.T__2)}catch(e){if(!(e instanceof me.error.RecognitionException))throw e;t.exception=e,this._errHandler.reportError(this,e),this._errHandler.recover(this,e)}finally{this.exitRule()}return t}value(){let t=new be(this,this._ctx,this.state);this.enterRule(t,8,Re.RULE_value);var e=0;try{this.enterOuterAlt(t,1),this.state=46,0!=(-32&(e=this._input.LA(1)))||0==(1<e||t<0||e<0||e>=this.tokens.tokens.length)throw new RangeError(`replace: range invalid: ${t}..${e}(size=${this.tokens.tokens.length})`);let i=this.getProgram(s),r=new Be(this.tokens,t,e,i.length,n);i.push(r)}delete(t,e,n=De.DEFAULT_PROGRAM_NAME){void 0===e&&(e=t),this.replace(t,e,"",n)}getProgram(t){let e=this.programs.get(t);return null==e&&(e=this.initializeProgram(t)),e}initializeProgram(t){const e=[];return this.programs.set(t,e),e}getText(t,e=De.DEFAULT_PROGRAM_NAME){let n;n=t instanceof me.Interval?t:new me.Interval(0,this.tokens.tokens.length-1),"string"==typeof t&&(e=t);const s=this.programs.get(e);let i=n.start,r=n.stop;if(r>this.tokens.tokens.length-1&&(r=this.tokens.tokens.length-1),i<0&&(i=0),null==s||0===s.length)return this.tokens.getText(n);let o=[],a=this.reduceToSingleOperationPerIndex(s),l=i;for(;l<=r&&l=this.tokens.tokens.length-1&&o.push(t.text.toString());return o.join("")}reduceToSingleOperationPerIndex(t){for(let e=0;es.index&&e.index<=s.lastIndex&&(t[e.instructionIndex]=void 0);let r=this.getKindOfOps(t,Be,e);for(let e of r){if(e.index>=s.index&&e.lastIndex<=s.lastIndex){t[e.instructionIndex]=void 0;continue}let n=e.lastIndexs.lastIndex;if(null!=e.text||null!=s.text||n){if(!n)throw new Error(`replace op boundaries of ${s} overlap with previous ${e}`)}else t[e.instructionIndex]=void 0,s.index=Math.min(e.index,s.index),s.lastIndex=Math.max(e.lastIndex,s.lastIndex)}}for(let e=0;e=n.index&&s.index<=n.lastIndex)throw new Error(`insert op ${s} within boundaries of previous ${n}`)}else n.text=this.catOpText(s.text,n.text),t[e]=void 0}let e=new Map;for(let n of t)if(null!=n){if(null!=e.get(n.index))throw new Error("should only be one op per index");e.set(n.index,n)}return e}catOpText(t,e){let n="",s="";return null!=t&&(n=t.toString()),null!=e&&(s=e.toString()),n+s}getKindOfOps(t,e,n){let s=[];for(let i=0;i'}}class Me extends Fe{constructor(t,e,n,s){super(t,e,n,s)}execute(t){return t.push(this.text.toString()),this.tokens.get(this.index).type!==me.Token.EOF&&t.push(String(this.tokens.get(this.index).text)),this.index+1}}class Ue extends Me{constructor(t,e,n,s){super(t,e+1,n,s)}}class Be extends Fe{constructor(t,e,n,s,i){super(t,e,s,i),this.lastIndex=n}execute(t){return null!=this.text&&t.push(this.text.toString()),this.lastIndex+1}toString(){return null==this.text?"":"'}}String.format||(String.format=function(t){let e=Array.prototype.slice.call(arguments,1);return t.replace(/{(\d+)}/g,(function(t,n){return void 0!==e[n]?e[n]:t}))});const Ve=scriptArgs;(Ve.length<2||Ve.length>3)&&(std.out.printf("Usage: qjs dts.js [output path]\n"),std.exit(1));let He=Ve[1],Ke=Ve.length>2?Ve[2]:He+".out";const qe=std.loadFile(He),Ye=new Ae(new me.InputStream(qe)),ze=new me.CommonTokenStream(Ye),Ge=new Re(ze);Ge.buildParseTrees=!0;const We=Ge.dts(),je=new me.tree.ParseTreeWalker,Qe=new class extends Ne{static tokens;static rewriter;constructor(t){super(),this.tokens=t,this.rewriter=new De(t)}enterDts(t){}exitDts(t){}enterDic(t){}exitDic(t){}enterPair(t){}exitPair(t){}enterArray(t){}exitArray(t){}enterValue(t){}exitValue(t){}tabCnt=0;lastKey="";map=new Map;visitTerminal(t){t.getSymbol().type===Re.T__0&&this.tabCnt++,t.getSymbol().type===Re.T__1&&this.tabCnt--,t.getSymbol().type!==Re.KEY&&t.getSymbol().type!==Re.T__1||this.rewriter.insertBefore(t.getSymbol(),"\t".repeat(Math.max(0,this.tabCnt))),t.getSymbol().type!==Re.SEMICOLON&&t.getSymbol().type!==Re.T__0&&t.getSymbol().type!==Re.VERSION||this.rewriter.insertAfter(t.getSymbol(),"\n"),t.getSymbol().type!==Re.KEY&&t.getSymbol().type!==Re.EQ||this.rewriter.insertAfter(t.getSymbol()," "),t.getSymbol().type===Re.KEY&&("/"===t.getText()?this.lastKey="/":this.lastKey+="/"+t.getText(),this.map.set(this.lastKey.replace("//","/"),t.getSymbol().tokenIndex)),t.getSymbol().type===Re.SEMICOLON&&(this.lastKey=this.lastKey.substring(0,this.lastKey.lastIndexOf("/"))),super.visitTerminal(t)}put(t,e){let n=this.map.get(t);if(console.log("modify path "+t),null==n)console.log("[err] path not found");else{if(this.tokens.get(n+1).type===Re.EQ){let t=this.tokens.get(n+2);return console.log("[ok] value "+e),this.rewriter.replaceSingle(t,e),1}console.log("[err] path not correct")}return 0}}(ze);je.walk(Qe,We);const $e="/sys/block/";let Xe=os.readdir($e);if(0===Xe[1]){let t="/sys/block/sata",e=/\/sys\/block\/nvme(\d{1,2})n(\d{1,2})/i;for(let n in Xe[0]){let s=$e+Xe[0][n];try{if(std.out.printf("start processing path:%s\n",s),s.toString().startsWith(t)){let e=Number.parseInt(s.toString().substring(t.length)),n=Je(s+"/device/syno_block_info");"ahci"===n.get("driver")?(Qe.put(String.format("/internal_slot@{0}/ahci/pcie_root",e),String.format('"{0}"',n.get("pciepath"))),Qe.put(String.format("/internal_slot@{0}/ahci/ata_port",e),String.format("<0x{0}>",Number.parseInt(n.get("ata_port_no")).toString(16).padStart(2,"0")))):std.out.printf("not ahci\n")}else if(s.toString().startsWith("/sys/block/nvme")){let t;if(t=e.exec(s.toString())){let e=Number.parseInt(t[1]),n=Je(s+"/device/syno_block_info");Qe.put(String.format("/nvme_slot@{0}/pcie_root",e+1),String.format('"{0}"',n.get("pciepath")))}else std.out.printf("nvme not found\n")}else std.out.printf("unsupported\n")}catch(t){std.out.printf("update path: %s to dts error: %s\n",s,t.message),std.exit(1)}}const n=std.open(Ke,"w");n.puts(Qe.rewriter.getText()),n.close()}else std.out.printf("update dts error:%d\n",Xe[1]),std.exit(1);function Je(t){const e=std.open(t,"r");let n=new Map;for(;;){let t=e.getline();if(null===t)break;let s=t.indexOf("=");if(s>0){let e=t.substring(0,s),i=t.substring(s+1);n.set(e,i),std.out.printf("%s = %s\n",e,i)}}return e.close(),n}})()})(); \ No newline at end of file diff --git a/addons/qjs-dtb/all/usr/sbin/dtc b/addons/qjs-dtb/all/usr/sbin/dtc new file mode 100755 index 00000000..d3da15be Binary files /dev/null and b/addons/qjs-dtb/all/usr/sbin/dtc differ diff --git a/addons/qjs-dtb/all/usr/sbin/qjs b/addons/qjs-dtb/all/usr/sbin/qjs new file mode 100755 index 00000000..84a05e4c Binary files /dev/null and b/addons/qjs-dtb/all/usr/sbin/qjs differ diff --git a/addons/qjs-dtb/install.sh b/addons/qjs-dtb/install.sh new file mode 100644 index 00000000..5605f5f2 --- /dev/null +++ b/addons/qjs-dtb/install.sh @@ -0,0 +1,27 @@ +if [ "${1}" = "rd" ]; then + echo "Jumkey's qjs-dtb arpl version - ramdisk time" + # fix executable flag + chmod +x /usr/sbin/dtc + chmod +x /usr/sbin/qjs + + # copy file + if [ ! -f model_${PLATFORM_ID%%_*}.dtb ]; then + # Dynamic generation + dtc -I dtb -O dts -o output.dts /etc.defaults/model.dtb + qjs --std /addons/dts.js output.dts output.dts.out + if [ $? -ne 0 ]; then + echo "auto generated dts file is broken" + else + dtc -I dts -O dtb -o model_r2.dtb output.dts.out + cp -vf model_r2.dtb /etc.defaults/model.dtb + cp -vf model_r2.dtb /var/run/model.dtb + fi + else + cp -vf model_${PLATFORM_ID%%_*}.dtb /etc.defaults/model.dtb + cp -vf model_${PLATFORM_ID%%_*}.dtb /var/run/model.dtb + fi +else + echo "Jumkey's qjs-dtb arpl version - sys time" + # copy file + cp -vf /etc.defaults/model.dtb /tmpRoot/etc.defaults/model.dtb +fi diff --git a/addons/qjs-dtb/manifest.yml b/addons/qjs-dtb/manifest.yml new file mode 100644 index 00000000..7fc1726b --- /dev/null +++ b/addons/qjs-dtb/manifest.yml @@ -0,0 +1,16 @@ +version: 1 +name: qjs-dtb +system: true +description: "Jumkey's script to change model.dtb dynamically" +all: + install-script: "install.sh" + copy: "all" +available-for: + bromolow-3.10.108: + apollolake-4.4.180: + broadwell-4.4.180: + broadwellnk-4.4.180: + denverton-4.4.180: + geminilake-4.4.180: + v1000-4.4.180: + purley-4.4.180: diff --git a/addons/r8168/install.sh b/addons/r8168/install.sh new file mode 100644 index 00000000..8794d078 --- /dev/null +++ b/addons/r8168/install.sh @@ -0,0 +1,4 @@ +if [ "${1}" = "rd" ]; then + echo "Installing module for Realtek R8168/8111 Ethernet adapter" + ${INSMOD} "/modules/r8168.ko" ${PARAMS} +fi diff --git a/addons/r8168/manifest.yml b/addons/r8168/manifest.yml new file mode 100644 index 00000000..53e5d9bf --- /dev/null +++ b/addons/r8168/manifest.yml @@ -0,0 +1,30 @@ +version: 1 +name: r8168 +description: "Driver for Realtek R8168/8111 Ethernet adapters" +conflits: + - r8169 +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/addons/r8168/src/3.10.108/Makefile b/addons/r8168/src/3.10.108/Makefile new file mode 100644 index 00000000..74c19689 --- /dev/null +++ b/addons/r8168/src/3.10.108/Makefile @@ -0,0 +1,62 @@ +CONFIG_SOC_LAN = n +ENABLE_FIBER_SUPPORT = n +ENABLE_REALWOW_SUPPORT = n +ENABLE_DASH_SUPPORT = n +ENABLE_DASH_PRINTER_SUPPORT = n +CONFIG_DOWN_SPEED_100 = n +CONFIG_ASPM = y +ENABLE_S5WOL = y +ENABLE_S5_KEEP_CURR_MAC = n +ENABLE_EEE = y +ENABLE_S0_MAGIC_PACKET = n +CONFIG_DYNAMIC_ASPM = y +ENABLE_USE_FIRMWARE_FILE = n + +obj-m := r8168.o +r8168-objs := r8168_n.o r8168_asf.o rtl_eeprom.o rtltool.o +ifeq ($(CONFIG_SOC_LAN), y) + EXTRA_CFLAGS += -DCONFIG_SOC_LAN +endif +ifeq ($(ENABLE_FIBER_SUPPORT), y) + r8168-objs += r8168_fiber.o + EXTRA_CFLAGS += -DENABLE_FIBER_SUPPORT +endif +ifeq ($(ENABLE_REALWOW_SUPPORT), y) + r8168-objs += r8168_realwow.o + EXTRA_CFLAGS += -DENABLE_REALWOW_SUPPORT +endif +ifeq ($(ENABLE_DASH_SUPPORT), y) + r8168-objs += r8168_dash.o + EXTRA_CFLAGS += -DENABLE_DASH_SUPPORT +endif +ifeq ($(ENABLE_DASH_PRINTER_SUPPORT), y) + r8168-objs += r8168_dash.o + EXTRA_CFLAGS += -DENABLE_DASH_SUPPORT -DENABLE_DASH_PRINTER_SUPPORT +endif +EXTRA_CFLAGS += -DCONFIG_R8168_NAPI +EXTRA_CFLAGS += -DCONFIG_R8168_VLAN +ifeq ($(CONFIG_DOWN_SPEED_100), y) + EXTRA_CFLAGS += -DCONFIG_DOWN_SPEED_100 +endif +ifeq ($(CONFIG_ASPM), y) + EXTRA_CFLAGS += -DCONFIG_ASPM +endif +ifeq ($(ENABLE_S5WOL), y) + EXTRA_CFLAGS += -DENABLE_S5WOL +endif +ifeq ($(ENABLE_S5_KEEP_CURR_MAC), y) + EXTRA_CFLAGS += -DENABLE_S5_KEEP_CURR_MAC +endif +ifeq ($(ENABLE_EEE), y) + EXTRA_CFLAGS += -DENABLE_EEE +endif +ifeq ($(ENABLE_S0_MAGIC_PACKET), y) + EXTRA_CFLAGS += -DENABLE_S0_MAGIC_PACKET +endif +ifeq ($(CONFIG_DYNAMIC_ASPM), y) + EXTRA_CFLAGS += -DCONFIG_DYNAMIC_ASPM +endif +ifeq ($(ENABLE_USE_FIRMWARE_FILE), y) + r8168-objs += r8168_firmware.o + EXTRA_CFLAGS += -DENABLE_USE_FIRMWARE_FILE +endif diff --git a/addons/r8168/src/3.10.108/r8168.h b/addons/r8168/src/3.10.108/r8168.h new file mode 100644 index 00000000..4bd1047a --- /dev/null +++ b/addons/r8168/src/3.10.108/r8168.h @@ -0,0 +1,1842 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#include +#include "r8168_dash.h" +#include "r8168_realwow.h" +#include "r8168_fiber.h" + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) +typedef int netdev_tx_t; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) +#define device_set_wakeup_enable(dev, val) do {} while (0) +#endif + +#if 0 +static inline void ether_addr_copy(u8 *dst, const u8 *src) +{ + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) +#define IS_ERR_OR_NULL(ptr) (!ptr) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) +#define reinit_completion(x) ((x)->done = 0) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) +#define pm_runtime_mark_last_busy(x) +#define pm_runtime_put_autosuspend(x) pm_runtime_put(x) +#define pm_runtime_put_sync_autosuspend(x) pm_runtime_put_sync(x) + +static inline bool pm_runtime_suspended(struct device *dev) +{ + return dev->power.runtime_status == RPM_SUSPENDED + && !dev->power.disable_depth; +} + +static inline bool pm_runtime_active(struct device *dev) +{ + return dev->power.runtime_status == RPM_ACTIVE + || dev->power.disable_depth; +} +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) +#define queue_delayed_work(long_wq, work, delay) schedule_delayed_work(work, delay) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) +#define netif_printk(priv, type, level, netdev, fmt, args...) \ + do { \ + if (netif_msg_##type(priv)) \ + printk(level "%s: " fmt,(netdev)->name , ##args); \ + } while (0) + +#define netif_emerg(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_EMERG, netdev, fmt, ##args) +#define netif_alert(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_ALERT, netdev, fmt, ##args) +#define netif_crit(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_CRIT, netdev, fmt, ##args) +#define netif_err(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_ERR, netdev, fmt, ##args) +#define netif_warn(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_WARNING, netdev, fmt, ##args) +#define netif_notice(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_NOTICE, netdev, fmt, ##args) +#define netif_info(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_INFO, (netdev), fmt, ##args) +#endif +#endif +#endif +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) +#if defined(skb_vlan_tag_present) && !defined(vlan_tx_tag_present) +#define vlan_tx_tag_present skb_vlan_tag_present +#endif +#if defined(skb_vlan_tag_get) && !defined(vlan_tx_tag_get) +#define vlan_tx_tag_get skb_vlan_tag_get +#endif +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) + +#define RTL_ALLOC_SKB_INTR(tp, length) dev_alloc_skb(length) +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) +#undef RTL_ALLOC_SKB_INTR +#define RTL_ALLOC_SKB_INTR(tp, length) napi_alloc_skb(&tp->napi, length) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) +#define netdev_features_t u32 +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,5,0) +#define NETIF_F_ALL_CSUM NETIF_F_CSUM_MASK +#else +#ifndef NETIF_F_ALL_CSUM +#define NETIF_F_ALL_CSUM NETIF_F_CSUM_MASK +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,37) +#define ENABLE_R8168_PROCFS +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) +#define NETIF_F_HW_VLAN_RX NETIF_F_HW_VLAN_CTAG_RX +#define NETIF_F_HW_VLAN_TX NETIF_F_HW_VLAN_CTAG_TX +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0) +#define __devinit +#define __devexit +#define __devexit_p(func) func +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) +#define CHECKSUM_PARTIAL CHECKSUM_HW +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) +#define irqreturn_t void +#define IRQ_HANDLED 1 +#define IRQ_NONE 0 +#define IRQ_RETVAL(x) +#endif + +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif + +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif + +#ifndef HAVE_FREE_NETDEV +#define free_netdev(x) kfree(x) +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) +#endif + +#ifndef SA_SHIRQ +#define SA_SHIRQ IRQF_SHARED +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef PCI_VENDOR_ID_DLINK +#define PCI_VENDOR_ID_DLINK 0x1186 +#endif + +#ifndef dma_mapping_error +#define dma_mapping_error(a,b) 0 +#endif + +#ifndef netif_err +#define netif_err(a,b,c,d) +#endif + +#ifndef AUTONEG_DISABLE +#define AUTONEG_DISABLE 0x00 +#endif + +#ifndef AUTONEG_ENABLE +#define AUTONEG_ENABLE 0x01 +#endif + +#ifndef BMCR_SPEED1000 +#define BMCR_SPEED1000 0x0040 +#endif + +#ifndef BMCR_SPEED100 +#define BMCR_SPEED100 0x2000 +#endif + +#ifndef BMCR_SPEED10 +#define BMCR_SPEED10 0x0000 +#endif + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif + +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif + +#ifndef SUPPORTED_Pause +#define SUPPORTED_Pause (1 << 13) +#endif + +#ifndef SUPPORTED_Asym_Pause +#define SUPPORTED_Asym_Pause (1 << 14) +#endif + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 +#endif + +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) +#ifdef CONFIG_NET_POLL_CONTROLLER +#define RTL_NET_POLL_CONTROLLER dev->poll_controller=rtl8168_netpoll +#else +#define RTL_NET_POLL_CONTROLLER +#endif + +#ifdef CONFIG_R8168_VLAN +#define RTL_SET_VLAN dev->vlan_rx_register=rtl8168_vlan_rx_register +#else +#define RTL_SET_VLAN +#endif + +#define RTL_NET_DEVICE_OPS(ops) dev->open=rtl8168_open; \ + dev->hard_start_xmit=rtl8168_start_xmit; \ + dev->get_stats=rtl8168_get_stats; \ + dev->stop=rtl8168_close; \ + dev->tx_timeout=rtl8168_tx_timeout; \ + dev->set_multicast_list=rtl8168_set_rx_mode; \ + dev->change_mtu=rtl8168_change_mtu; \ + dev->set_mac_address=rtl8168_set_mac_address; \ + dev->do_ioctl=rtl8168_do_ioctl; \ + RTL_NET_POLL_CONTROLLER; \ + RTL_SET_VLAN; +#else +#define RTL_NET_DEVICE_OPS(ops) dev->netdev_ops=&ops +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef false +#define false 0 +#endif + +#ifndef true +#define true 1 +#endif + +//Hardware will continue interrupt 10 times after interrupt finished. +#define RTK_KEEP_INTERRUPT_COUNT (10) + +//Due to the hardware design of RTL8111B, the low 32 bit address of receive +//buffer must be 8-byte alignment. +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif +#define RTK_RX_ALIGN 8 + +#ifdef CONFIG_R8168_NAPI +#define NAPI_SUFFIX "-NAPI" +#else +#define NAPI_SUFFIX "" +#endif +#ifdef ENABLE_FIBER_SUPPORT +#define FIBER_SUFFIX "-FIBER" +#else +#define FIBER_SUFFIX "" +#endif +#ifdef ENABLE_REALWOW_SUPPORT +#define REALWOW_SUFFIX "-REALWOW" +#else +#define REALWOW_SUFFIX "" +#endif +#if defined(ENABLE_DASH_PRINTER_SUPPORT) +#define DASH_SUFFIX "-PRINTER" +#elif defined(ENABLE_DASH_SUPPORT) +#define DASH_SUFFIX "-DASH" +#else +#define DASH_SUFFIX "" +#endif + +#define RTL8168_VERSION "8.049.02" NAPI_SUFFIX FIBER_SUFFIX REALWOW_SUFFIX DASH_SUFFIX +#define MODULENAME "r8168" +#define PFX MODULENAME ": " + +#define GPL_CLAIM "\ +r8168 Copyright (C) 2021 Realtek NIC software team \n \ +This program comes with ABSOLUTELY NO WARRANTY; for details, please see . \n \ +This is free software, and you are welcome to redistribute it under certain conditions; see . \n" + +#ifdef RTL8168_DEBUG +#define assert(expr) \ + if(!(expr)) { \ + printk( "Assertion failed! %s,%s,%s,line=%d\n", \ + #expr,__FILE__,__FUNCTION__,__LINE__); \ + } +#define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0) +#else +#define assert(expr) do {} while (0) +#define dprintk(fmt, args...) do {} while (0) +#endif /* RTL8168_DEBUG */ + +#define R8168_MSG_DEFAULT \ + (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) + +#ifdef CONFIG_R8168_NAPI +#define rtl8168_rx_hwaccel_skb vlan_hwaccel_receive_skb +#define rtl8168_rx_quota(count, quota) min(count, quota) +#else +#define rtl8168_rx_hwaccel_skb vlan_hwaccel_rx +#define rtl8168_rx_quota(count, quota) count +#endif + +/* MAC address length */ +#ifndef MAC_ADDR_LEN +#define MAC_ADDR_LEN 6 +#endif + +#ifndef MAC_PROTOCOL_LEN +#define MAC_PROTOCOL_LEN 2 +#endif + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif + +#ifndef NETIF_F_TSO6 +#define NETIF_F_TSO6 0 +#endif + +#define Reserved2_data 7 +#define RX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */ +#define TX_DMA_BURST_unlimited 7 +#define TX_DMA_BURST_1024 6 +#define TX_DMA_BURST_512 5 +#define TX_DMA_BURST_256 4 +#define TX_DMA_BURST_128 3 +#define TX_DMA_BURST_64 2 +#define TX_DMA_BURST_32 1 +#define TX_DMA_BURST_16 0 +#define Reserved1_data 0x3F +#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */ +#define Jumbo_Frame_1k ETH_DATA_LEN +#define Jumbo_Frame_2k (2*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_3k (3*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_4k (4*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_5k (5*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_6k (6*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_7k (7*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_8k (8*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_9k (9*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ +#define RxEarly_off_V1 (0x07 << 11) +#define RxEarly_off_V2 (1 << 11) +#define Rx_Single_fetch_V2 (1 << 14) + +#define R8168_REGS_SIZE (256) +#define R8168_MAC_REGS_SIZE (256) +#define R8168_PHY_REGS_SIZE (16*2) +#define R8168_EPHY_REGS_SIZE (31*2) +#define R8168_ERI_REGS_SIZE (0x100) +#define R8168_REGS_DUMP_SIZE (0x400) +#define R8168_PCI_REGS_SIZE (0x100) +#define R8168_NAPI_WEIGHT 64 + +#define RTL8168_TX_TIMEOUT (6 * HZ) +#define RTL8168_LINK_TIMEOUT (1 * HZ) +#define RTL8168_ESD_TIMEOUT (2 * HZ) + +#define NUM_TX_DESC 1024 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 1024 /* Number of Rx descriptor registers */ + +#define RX_BUF_SIZE 0x05F3 /* 0x05F3 = 1522bye + 1 */ +#define R8168_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8168_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define OCP_STD_PHY_BASE 0xa400 + +#define NODE_ADDRESS_SIZE 6 + +#define SHORT_PACKET_PADDING_BUF_SIZE 256 + +#define RTK_MAGIC_DEBUG_VALUE 0x0badbeef + +/* write/read MMIO register */ +#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) +#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) +#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) +#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) +#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) +#define RTL_R32(tp, reg) ((unsigned long) readl(tp->mmio_addr + (reg))) + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 /* driver took care of packet */ +#endif + +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ +#endif + +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ +#endif + +#ifndef ADVERTISED_Pause +#define ADVERTISED_Pause (1 << 13) +#endif + +#ifndef ADVERTISED_Asym_Pause +#define ADVERTISED_Asym_Pause (1 << 14) +#endif + +#ifndef ADVERTISE_PAUSE_CAP +#define ADVERTISE_PAUSE_CAP 0x400 +#endif + +#ifndef ADVERTISE_PAUSE_ASYM +#define ADVERTISE_PAUSE_ASYM 0x800 +#endif + +#ifndef MII_CTRL1000 +#define MII_CTRL1000 0x09 +#endif + +#ifndef ADVERTISE_1000FULL +#define ADVERTISE_1000FULL 0x200 +#endif + +#ifndef ADVERTISE_1000HALF +#define ADVERTISE_1000HALF 0x100 +#endif + +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif + +/*****************************************************************************/ + +//#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) +/* copied from linux kernel 2.6.20 include/linux/netdev.h */ +#define NETDEV_ALIGN 32 +#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) + +static inline void *netdev_priv(struct net_device *dev) +{ + return (char *)dev + ((sizeof(struct net_device) + + NETDEV_ALIGN_CONST) + & ~NETDEV_ALIGN_CONST); +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) + +/*****************************************************************************/ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) +#define RTLDEV tp +#else +#define RTLDEV dev +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) +/*****************************************************************************/ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) +typedef struct net_device *napi_ptr; +typedef int *napi_budget; + +#define napi dev +#define RTL_NAPI_CONFIG(ndev, priv, function, weig) ndev->poll=function; \ + ndev->weight=weig; +#define RTL_NAPI_QUOTA(budget, ndev) min(*budget, ndev->quota) +#define RTL_GET_PRIV(stuct_ptr, priv_struct) netdev_priv(stuct_ptr) +#define RTL_GET_NETDEV(priv_ptr) +#define RTL_RX_QUOTA(budget) *budget +#define RTL_NAPI_QUOTA_UPDATE(ndev, work_done, budget) *budget -= work_done; \ + ndev->quota -= work_done; +#define RTL_NETIF_RX_COMPLETE(dev, napi, work_done) netif_rx_complete(dev) +#define RTL_NETIF_RX_SCHEDULE_PREP(dev, napi) netif_rx_schedule_prep(dev) +#define __RTL_NETIF_RX_SCHEDULE(dev, napi) __netif_rx_schedule(dev) +#define RTL_NAPI_RETURN_VALUE work_done >= work_to_do +#define RTL_NAPI_ENABLE(dev, napi) netif_poll_enable(dev) +#define RTL_NAPI_DISABLE(dev, napi) netif_poll_disable(dev) +#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) +#else +typedef struct napi_struct *napi_ptr; +typedef int napi_budget; + +#define RTL_NAPI_CONFIG(ndev, priv, function, weight) netif_napi_add(ndev, &priv->napi, function, weight) +#define RTL_NAPI_QUOTA(budget, ndev) min(budget, budget) +#define RTL_GET_PRIV(stuct_ptr, priv_struct) container_of(stuct_ptr, priv_struct, stuct_ptr) +#define RTL_GET_NETDEV(priv_ptr) struct net_device *dev = priv_ptr->dev; +#define RTL_RX_QUOTA(budget) budget +#define RTL_NAPI_QUOTA_UPDATE(ndev, work_done, budget) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) +#define RTL_NETIF_RX_COMPLETE(dev, napi, work_done) netif_rx_complete(dev, napi) +#define RTL_NETIF_RX_SCHEDULE_PREP(dev, napi) netif_rx_schedule_prep(dev, napi) +#define __RTL_NETIF_RX_SCHEDULE(dev, napi) __netif_rx_schedule(dev, napi) +#endif +#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,29) +#define RTL_NETIF_RX_COMPLETE(dev, napi, work_done) netif_rx_complete(napi) +#define RTL_NETIF_RX_SCHEDULE_PREP(dev, napi) netif_rx_schedule_prep(napi) +#define __RTL_NETIF_RX_SCHEDULE(dev, napi) __netif_rx_schedule(napi) +#endif +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) +#define RTL_NETIF_RX_COMPLETE(dev, napi, work_done) napi_complete_done(napi, work_done) +#else +#define RTL_NETIF_RX_COMPLETE(dev, napi, work_done) napi_complete(napi) +#endif +#define RTL_NETIF_RX_SCHEDULE_PREP(dev, napi) napi_schedule_prep(napi) +#define __RTL_NETIF_RX_SCHEDULE(dev, napi) __napi_schedule(napi) +#endif +#define RTL_NAPI_RETURN_VALUE work_done +#define RTL_NAPI_ENABLE(dev, napi) napi_enable(napi) +#define RTL_NAPI_DISABLE(dev, napi) napi_disable(napi) +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) +#define RTL_NAPI_DEL(priv) +#else +#define RTL_NAPI_DEL(priv) netif_napi_del(&priv->napi) +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) + +/*****************************************************************************/ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) +#ifdef __CHECKER__ +#define __iomem __attribute__((noderef, address_space(2))) +extern void __chk_io_ptr(void __iomem *); +#define __bitwise __attribute__((bitwise)) +#else +#define __iomem +#define __chk_io_ptr(x) (void)0 +#define __bitwise +#endif +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) + +/*****************************************************************************/ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) +#ifdef __CHECKER__ +#define __force __attribute__((force)) +#else +#define __force +#endif +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + +static inline void _kc_synchronize_irq(void) +{ + synchronize_irq(); +} +#undef synchronize_irq +#define synchronize_irq(X) _kc_synchronize_irq() + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a,b,c) INIT_TQUEUE(a,(void (*)(void *))b,c) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 +#endif + +#define pci_set_consistent_dma_mask(dev,mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif + +#undef ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +#endif /* 2.6.0 => 2.5.28 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) +#endif /* 2.4.22 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define pci_dma_sync_single_for_cpu pci_dma_sync_single +#define pci_dma_sync_single_for_device pci_dma_sync_single_for_cpu +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) +/* + * initialize a work-struct's func and data pointers: + */ +#define PREPARE_WORK(_work, _func, _data) \ + do { \ + (_work)->func = _func; \ + (_work)->data = _data; \ + } while (0) + +#endif +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) && \ + LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4))) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = (1 << 0), + ETH_TEST_FL_FAILED = (1 << 1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autonet' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable +* (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable +* (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +/*****************************************************************************/ + +enum RTL8168_DSM_STATE { + DSM_MAC_INIT = 1, + DSM_NIC_GOTO_D3 = 2, + DSM_IF_DOWN = 3, + DSM_NIC_RESUME_D3 = 4, + DSM_IF_UP = 5, +}; + +enum RTL8168_registers { + MAC0 = 0x00, /* Ethernet hardware address. */ + MAC4 = 0x04, + MAR0 = 0x08, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + CustomLED = 0x18, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3C, + IntrStatus = 0x3E, + TxConfig = 0x40, + RxConfig = 0x44, + TCTR = 0x48, + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + TDFNR = 0x57, + TimeInt0 = 0x58, + TimeInt1 = 0x5C, + PHYAR = 0x60, + CSIDR = 0x64, + CSIAR = 0x68, + PHYstatus = 0x6C, + MACDBG = 0x6D, + GPIO = 0x6E, + PMCH = 0x6F, + ERIDR = 0x70, + ERIAR = 0x74, + EPHY_RXER_NUM = 0x7C, + EPHYAR = 0x80, + TimeInt2 = 0x8C, + OCPDR = 0xB0, + MACOCP = 0xB0, + OCPAR = 0xB4, + SecMAC0 = 0xB4, + SecMAC4 = 0xB8, + PHYOCP = 0xB8, + DBG_reg = 0xD1, + TwiCmdReg = 0xD2, + MCUCmd_reg = 0xD3, + RxMaxSize = 0xDA, + EFUSEAR = 0xDC, + CPlusCmd = 0xE0, + IntrMitigate = 0xE2, + RxDescAddrLow = 0xE4, + RxDescAddrHigh = 0xE8, + MTPS = 0xEC, + FuncEvent = 0xF0, + PPSW = 0xF2, + FuncEventMask = 0xF4, + TimeInt3 = 0xF4, + FuncPresetState = 0xF8, + CMAC_IBCR0 = 0xF8, + CMAC_IBCR2 = 0xF9, + CMAC_IBIMR0 = 0xFA, + CMAC_IBISR0 = 0xFB, + FuncForceEvent = 0xFC, +}; + +enum RTL8168_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxDescUnavail = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xC0, + Cfg9346_EEDO = (1 << 0), + Cfg9346_EEDI = (1 << 1), + Cfg9346_EESK = (1 << 2), + Cfg9346_EECS = (1 << 3), + Cfg9346_EEM0 = (1 << 6), + Cfg9346_EEM1 = (1 << 7), + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, + + /* Transmit Priority Polling*/ + HPQ = 0x80, + NPQ = 0x40, + FSWInt = 0x01, + + /* RxConfigBits */ + Reserved2_shift = 13, + RxCfgDMAShift = 8, + RxCfg_128_int_en = (1 << 15), + RxCfg_fet_multi_en = (1 << 14), + RxCfg_half_refetch = (1 << 13), + RxCfg_9356SEL = (1 << 6), + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + TxMACLoopBack = (1 << 17), /* MAC loopback */ + + /* Config1 register */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + PMSTS_En = (1 << 5), + + /* Config3 register */ + Isolate_en = (1 << 12), /* Isolate enable */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* This bit is reserved in RTL8168B.*/ + /* Wake up when the cable connection is re-established */ + ECRCEN = (1 << 3), /* This bit is reserved in RTL8168B*/ + Jumbo_En0 = (1 << 2), /* This bit is reserved in RTL8168B*/ + RDY_TO_L23 = (1 << 1), /* This bit is reserved in RTL8168B*/ + Beacon_en = (1 << 0), /* This bit is reserved in RTL8168B*/ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* This bit is reserved in RTL8168B*/ + + /* Config5 register */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* CPlusCmd */ + EnableBist = (1 << 15), + Macdbgo_oe = (1 << 14), + Normal_mode = (1 << 13), + Force_halfdup = (1 << 12), + Force_rxflow_en = (1 << 11), + Force_txflow_en = (1 << 10), + Cxpl_dbg_sel = (1 << 9),//This bit is reserved in RTL8168B + ASF = (1 << 8),//This bit is reserved in RTL8168C + PktCntrDisable = (1 << 7), + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + Macdbgo_sel = 0x001C, + INTT_0 = 0x0000, + INTT_1 = 0x0001, + INTT_2 = 0x0002, + INTT_3 = 0x0003, + + /* rtl8168_PHYstatus */ + PowerSaveStatus = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* DBG_reg */ + Fix_Nak_1 = (1 << 4), + Fix_Nak_2 = (1 << 3), + DBGPIN_E2 = (1 << 0), + + /* ResetCounterCommand */ + CounterReset = 0x1, + /* DumpCounterCommand */ + CounterDump = 0x8, + + /* PHY access */ + PHYAR_Flag = 0x80000000, + PHYAR_Write = 0x80000000, + PHYAR_Read = 0x00000000, + PHYAR_Reg_Mask = 0x1f, + PHYAR_Reg_shift = 16, + PHYAR_Data_Mask = 0xffff, + + /* EPHY access */ + EPHYAR_Flag = 0x80000000, + EPHYAR_Write = 0x80000000, + EPHYAR_Read = 0x00000000, + EPHYAR_Reg_Mask = 0x3f, + EPHYAR_Reg_shift = 16, + EPHYAR_Data_Mask = 0xffff, + + /* CSI access */ + CSIAR_Flag = 0x80000000, + CSIAR_Write = 0x80000000, + CSIAR_Read = 0x00000000, + CSIAR_ByteEn = 0x0f, + CSIAR_ByteEn_shift = 12, + CSIAR_Addr_Mask = 0x0fff, + + /* ERI access */ + ERIAR_Flag = 0x80000000, + ERIAR_Write = 0x80000000, + ERIAR_Read = 0x00000000, + ERIAR_Addr_Align = 4, /* ERI access register address must be 4 byte alignment */ + ERIAR_ExGMAC = 0, + ERIAR_MSIX = 1, + ERIAR_ASF = 2, + ERIAR_OOB = 2, + ERIAR_Type_shift = 16, + ERIAR_ByteEn = 0x0f, + ERIAR_ByteEn_shift = 12, + + /* OCP GPHY access */ + OCPDR_Write = 0x80000000, + OCPDR_Read = 0x00000000, + OCPDR_Reg_Mask = 0xFF, + OCPDR_Data_Mask = 0xFFFF, + OCPDR_GPHY_Reg_shift = 16, + OCPAR_Flag = 0x80000000, + OCPAR_GPHY_Write = 0x8000F060, + OCPAR_GPHY_Read = 0x0000F060, + OCPR_Write = 0x80000000, + OCPR_Read = 0x00000000, + OCPR_Addr_Reg_shift = 16, + OCPR_Flag = 0x80000000, + OCP_STD_PHY_BASE_PAGE = 0x0A40, + + /* MCU Command */ + Now_is_oob = (1 << 7), + Txfifo_empty = (1 << 5), + Rxfifo_empty = (1 << 4), + + /* E-FUSE access */ + EFUSE_WRITE = 0x80000000, + EFUSE_WRITE_OK = 0x00000000, + EFUSE_READ = 0x00000000, + EFUSE_READ_OK = 0x80000000, + EFUSE_WRITE_V3 = 0x40000000, + EFUSE_WRITE_OK_V3 = 0x00000000, + EFUSE_READ_V3 = 0x80000000, + EFUSE_READ_OK_V3 = 0x00000000, + EFUSE_Reg_Mask = 0x03FF, + EFUSE_Reg_Shift = 8, + EFUSE_Check_Cnt = 300, + EFUSE_READ_FAIL = 0xFF, + EFUSE_Data_Mask = 0x000000FF, + + /* GPIO */ + GPIO_en = (1 << 0), + +}; + +enum _DescStatusBit { + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ + + /* Tx private */ + /*------ offset 0 of tx descriptor ------*/ + LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */ + GiantSendv4 = (1 << 26), /* TCP Giant Send Offload V4 (GSOv4) */ + GiantSendv6 = (1 << 25), /* TCP Giant Send Offload V6 (GSOv6) */ + LargeSend_DP = (1 << 16), /* TCP Large Send Offload (TSO) */ + MSSShift = 16, /* MSS value position */ + MSSMask = 0x7FFU, /* MSS value 11 bits */ + TxIPCS = (1 << 18), /* Calculate IP checksum */ + TxUDPCS = (1 << 17), /* Calculate UDP/IP checksum */ + TxTCPCS = (1 << 16), /* Calculate TCP/IP checksum */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ + + /*@@@@@@ offset 4 of tx descriptor => bits for RTL8168C/CP only begin @@@@@@*/ + TxUDPCS_C = (1 << 31), /* Calculate UDP/IP checksum */ + TxTCPCS_C = (1 << 30), /* Calculate TCP/IP checksum */ + TxIPCS_C = (1 << 29), /* Calculate IP checksum */ + TxIPV6F_C = (1 << 28), /* Indicate it is an IPv6 packet */ + /*@@@@@@ offset 4 of tx descriptor => bits for RTL8168C/CP only end @@@@@@*/ + + + /* Rx private */ + /*------ offset 0 of rx descriptor ------*/ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 2/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + RxIPF = (1 << 16), /* IP checksum failed */ + RxUDPF = (1 << 15), /* UDP/IP checksum failed */ + RxTCPF = (1 << 14), /* TCP/IP checksum failed */ + RxVlanTag = (1 << 16), /* VLAN tag available */ + + /*@@@@@@ offset 0 of rx descriptor => bits for RTL8168C/CP only begin @@@@@@*/ + RxUDPT = (1 << 18), + RxTCPT = (1 << 17), + /*@@@@@@ offset 0 of rx descriptor => bits for RTL8168C/CP only end @@@@@@*/ + + /*@@@@@@ offset 4 of rx descriptor => bits for RTL8168C/CP only begin @@@@@@*/ + RxV6F = (1 << 31), + RxV4F = (1 << 30), + /*@@@@@@ offset 4 of rx descriptor => bits for RTL8168C/CP only end @@@@@@*/ +}; + +enum features { +// RTL_FEATURE_WOL = (1 << 0), + RTL_FEATURE_MSI = (1 << 1), +}; + +enum wol_capability { + WOL_DISABLED = 0, + WOL_ENABLED = 1 +}; + +enum bits { + BIT_0 = (1 << 0), + BIT_1 = (1 << 1), + BIT_2 = (1 << 2), + BIT_3 = (1 << 3), + BIT_4 = (1 << 4), + BIT_5 = (1 << 5), + BIT_6 = (1 << 6), + BIT_7 = (1 << 7), + BIT_8 = (1 << 8), + BIT_9 = (1 << 9), + BIT_10 = (1 << 10), + BIT_11 = (1 << 11), + BIT_12 = (1 << 12), + BIT_13 = (1 << 13), + BIT_14 = (1 << 14), + BIT_15 = (1 << 15), + BIT_16 = (1 << 16), + BIT_17 = (1 << 17), + BIT_18 = (1 << 18), + BIT_19 = (1 << 19), + BIT_20 = (1 << 20), + BIT_21 = (1 << 21), + BIT_22 = (1 << 22), + BIT_23 = (1 << 23), + BIT_24 = (1 << 24), + BIT_25 = (1 << 25), + BIT_26 = (1 << 26), + BIT_27 = (1 << 27), + BIT_28 = (1 << 28), + BIT_29 = (1 << 29), + BIT_30 = (1 << 30), + BIT_31 = (1 << 31) +}; + +enum effuse { + EFUSE_NOT_SUPPORT = 0, + EFUSE_SUPPORT_V1, + EFUSE_SUPPORT_V2, + EFUSE_SUPPORT_V3, +}; +#define RsvdMask 0x3fffc000 + +struct TxDesc { + u32 opts1; + u32 opts2; + u64 addr; +}; + +struct RxDesc { + u32 opts1; + u32 opts2; + u64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; + u8 __pad[sizeof(void *) - sizeof(u32)]; +}; + +struct pci_resource { + u8 cmd; + u8 cls; + u16 io_base_h; + u16 io_base_l; + u16 mem_base_h; + u16 mem_base_l; + u8 ilr; + u16 resv_0x1c_h; + u16 resv_0x1c_l; + u16 resv_0x20_h; + u16 resv_0x20_l; + u16 resv_0x24_h; + u16 resv_0x24_l; + u16 resv_0x2c_h; + u16 resv_0x2c_l; + u32 pci_sn_l; + u32 pci_sn_h; +}; + +struct rtl8168_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; /* Index of PCI device */ + struct net_device *dev; +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) + struct napi_struct napi; +#endif +#endif + struct net_device_stats stats; /* statistics of net device */ + spinlock_t lock; /* spin lock flag */ + u32 msg_enable; + u32 tx_tcp_csum_cmd; + u32 tx_udp_csum_cmd; + u32 tx_ip_csum_cmd; + u32 tx_ipv6_csum_cmd; + int max_jumbo_frame_size; + int chipset; + u32 mcfg; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_rx; + u32 dirty_tx; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + unsigned rx_buf_sz; + struct timer_list esd_timer; + struct timer_list link_timer; + struct pci_resource pci_cfg_space; + unsigned int esd_flag; + unsigned int pci_cfg_is_read; + unsigned int rtl8168_rx_config; + u16 cp_cmd; + u16 intr_mask; + u16 timer_intr_mask; + int phy_auto_nego_reg; + int phy_1000_ctrl_reg; + u8 org_mac_addr[NODE_ADDRESS_SIZE]; + struct rtl8168_counters *tally_vaddr; + dma_addr_t tally_paddr; + +#ifdef CONFIG_R8168_VLAN + struct vlan_group *vlgrp; +#endif + u8 wol_enabled; + u32 wol_opts; + u8 efuse_ver; + u8 eeprom_type; + u8 autoneg; + u8 duplex; + u32 speed; + u32 advertising; + u16 eeprom_len; + u16 cur_page; + u32 bios_setting; + + int (*set_speed)(struct net_device *, u8 autoneg, u32 speed, u8 duplex, u32 adv); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + void (*get_settings)(struct net_device *, struct ethtool_cmd *); +#else + void (*get_settings)(struct net_device *, struct ethtool_link_ksettings *); +#endif + void (*phy_reset_enable)(struct net_device *); + unsigned int (*phy_reset_pending)(struct net_device *); + unsigned int (*link_ok)(struct net_device *); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + struct work_struct task; +#else + struct delayed_work task; +#endif + unsigned features; + + u8 org_pci_offset_99; + u8 org_pci_offset_180; + u8 issue_offset_99_event; + + u8 org_pci_offset_80; + u8 org_pci_offset_81; + u8 use_timer_interrrupt; + + u32 keep_intr_cnt; + + u8 HwIcVerUnknown; + u8 NotWrRamCodeToMicroP; + u8 NotWrMcuPatchCode; + u8 HwHasWrRamCodeToMicroP; + + u16 sw_ram_code_ver; + u16 hw_ram_code_ver; + + u8 rtk_enable_diag; + + u8 ShortPacketSwChecksum; + + u8 UseSwPaddingShortPkt; + + u8 RequireAdcBiasPatch; + u16 AdcBiasPatchIoffset; + + u8 RequireAdjustUpsTxLinkPulseTiming; + u16 SwrCnt1msIni; + + u8 HwSuppNowIsOobVer; + + u8 RequiredSecLanDonglePatch; + + u32 HwFiberModeVer; + u32 HwFiberStat; + u8 HwSwitchMdiToFiber; + + u8 HwSuppSerDesPhyVer; + + u8 HwSuppPhyOcpVer; + + u8 HwSuppAspmClkIntrLock; + + u16 NicCustLedValue; + + u8 HwSuppUpsVer; + + u8 HwSuppMagicPktVer; + + u8 HwSuppCheckPhyDisableModeVer; + + u8 random_mac; + + u16 phy_reg_aner; + u16 phy_reg_anlpar; + u16 phy_reg_gbsr; + + u32 HwPcieSNOffset; + + u8 HwSuppEsdVer; + u8 TestPhyOcpReg; + u16 BackupPhyFuseDout_15_0; + u16 BackupPhyFuseDout_47_32; + u16 BackupPhyFuseDout_63_48; + + const char *fw_name; + struct rtl8168_fw *rtl_fw; + u32 ocp_base; + + //Dash+++++++++++++++++ + u8 HwSuppDashVer; + u8 DASH; + u8 dash_printer_enabled; + u8 HwPkgDet; + void __iomem *mapped_cmac_ioaddr; /* mapped cmac memory map physical address */ + void __iomem *cmac_ioaddr; /* cmac memory map physical address */ + +#ifdef ENABLE_DASH_SUPPORT + u16 AfterRecvFromFwBufLen; + u8 AfterRecvFromFwBuf[RECV_FROM_FW_BUF_SIZE]; + u16 AfterSendToFwBufLen; + u8 AfterSendToFwBuf[SEND_TO_FW_BUF_SIZE]; + u16 SendToFwBufferLen; + u32 SizeOfSendToFwBuffer ; + u32 SizeOfSendToFwBufferMemAlloc ; + u32 NumOfSendToFwBuffer ; + + u8 OobReq; + u8 OobAck; + u32 OobReqComplete; + u32 OobAckComplete; + + u8 RcvFwReqSysOkEvt; + u8 RcvFwDashOkEvt; + u8 SendFwHostOkEvt; + + u8 DashFwDisableRx; + + void *SendToFwBuffer ; + dma_addr_t SendToFwBufferPhy ; + u8 SendingToFw; + PTX_DASH_SEND_FW_DESC TxDashSendFwDesc; + dma_addr_t TxDashSendFwDescPhy; + u32 SizeOfTxDashSendFwDescMemAlloc; + u32 SizeOfTxDashSendFwDesc ; + u32 NumTxDashSendFwDesc ; + u32 CurrNumTxDashSendFwDesc ; + u32 LastSendNumTxDashSendFwDesc ; + + u32 NumRecvFromFwBuffer ; + u32 SizeOfRecvFromFwBuffer ; + u32 SizeOfRecvFromFwBufferMemAlloc ; + void *RecvFromFwBuffer ; + dma_addr_t RecvFromFwBufferPhy ; + + PRX_DASH_FROM_FW_DESC RxDashRecvFwDesc; + dma_addr_t RxDashRecvFwDescPhy; + u32 SizeOfRxDashRecvFwDescMemAlloc; + u32 SizeOfRxDashRecvFwDesc ; + u32 NumRxDashRecvFwDesc ; + u32 CurrNumRxDashRecvFwDesc ; + u8 DashReqRegValue; + u16 HostReqValue; + + u32 CmacResetIsrCounter; + u8 CmacResetIntr ; + u8 CmacResetting ; + u8 CmacOobIssueCmacReset ; + u32 CmacResetbyFwCnt; + +#if defined(ENABLE_DASH_PRINTER_SUPPORT) + struct completion fw_ack; + struct completion fw_req; + struct completion fw_host_ok; +#endif + //Dash----------------- +#endif //ENABLE_DASH_SUPPORT + + //Realwow++++++++++++++ + u8 HwSuppKCPOffloadVer; + + u8 EnableDhcpTimeoutWake; + u8 EnableTeredoOffload; + u8 EnableKCPOffload; +#ifdef ENABLE_REALWOW_SUPPORT + u32 DhcpTimeout; + MP_KCP_INFO MpKCPInfo; + //Realwow-------------- +#endif //ENABLE_REALWOW_SUPPORT + + u32 eee_adv_t; + u8 eee_enabled; + + u32 dynamic_aspm_packet_count; + +#ifdef ENABLE_R8168_PROCFS + //Procfs support + struct proc_dir_entry *proc_dir; +#endif +}; + +enum eetype { + EEPROM_TYPE_NONE=0, + EEPROM_TYPE_93C46, + EEPROM_TYPE_93C56, + EEPROM_TWSI +}; + +enum mcfg { + CFG_METHOD_1=0, + CFG_METHOD_2, + CFG_METHOD_3, + CFG_METHOD_4, + CFG_METHOD_5, + CFG_METHOD_6, + CFG_METHOD_7, + CFG_METHOD_8, + CFG_METHOD_9 , + CFG_METHOD_10, + CFG_METHOD_11, + CFG_METHOD_12, + CFG_METHOD_13, + CFG_METHOD_14, + CFG_METHOD_15, + CFG_METHOD_16, + CFG_METHOD_17, + CFG_METHOD_18, + CFG_METHOD_19, + CFG_METHOD_20, + CFG_METHOD_21, + CFG_METHOD_22, + CFG_METHOD_23, + CFG_METHOD_24, + CFG_METHOD_25, + CFG_METHOD_26, + CFG_METHOD_27, + CFG_METHOD_28, + CFG_METHOD_29, + CFG_METHOD_30, + CFG_METHOD_31, + CFG_METHOD_32, + CFG_METHOD_33, + CFG_METHOD_MAX, + CFG_METHOD_DEFAULT = 0xFF +}; + +#define LSO_32K 32000 +#define LSO_64K 64000 + +#define NIC_MIN_PHYS_BUF_COUNT (2) +#define NIC_MAX_PHYS_BUF_COUNT_LSO_64K (24) +#define NIC_MAX_PHYS_BUF_COUNT_LSO2 (16*4) + +#define GTTCPHO_SHIFT 18 +#define GTTCPHO_MAX 0x7fU +#define GTPKTSIZE_MAX 0x3ffffU +#define TCPHO_SHIFT 18 +#define TCPHO_MAX 0x3ffU +#define LSOPKTSIZE_MAX 0xffffU +#define MSS_MAX 0x07ffu /* MSS value */ + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 +#define OOB_CMD_SET_IPMAC 0x41 + +#define WAKEUP_MAGIC_PACKET_NOT_SUPPORT (0) +#define WAKEUP_MAGIC_PACKET_V1 (1) +#define WAKEUP_MAGIC_PACKET_V2 (2) + +//Ram Code Version +#define NIC_RAMCODE_VERSION_CFG_METHOD_14 (0x0057) +#define NIC_RAMCODE_VERSION_CFG_METHOD_16 (0x0055) +#define NIC_RAMCODE_VERSION_CFG_METHOD_18 (0x0052) +#define NIC_RAMCODE_VERSION_CFG_METHOD_20 (0x0044) +#define NIC_RAMCODE_VERSION_CFG_METHOD_21 (0x0042) +#define NIC_RAMCODE_VERSION_CFG_METHOD_24 (0x0001) +#define NIC_RAMCODE_VERSION_CFG_METHOD_23 (0x0015) +#define NIC_RAMCODE_VERSION_CFG_METHOD_26 (0x0012) +#define NIC_RAMCODE_VERSION_CFG_METHOD_28 (0x0019) +#define NIC_RAMCODE_VERSION_CFG_METHOD_29 (0x0055) +#define NIC_RAMCODE_VERSION_CFG_METHOD_31 (0x0003) + +//hwoptimize +#define HW_PATCH_SOC_LAN (BIT_0) +#define HW_PATCH_SAMSUNG_LAN_DONGLE (BIT_2) + +#define HW_PHY_STATUS_INI 1 +#define HW_PHY_STATUS_EXT_INI 2 +#define HW_PHY_STATUS_LAN_ON 3 + +void rtl8168_mdio_write(struct rtl8168_private *tp, u16 RegAddr, u16 value); +void rtl8168_mdio_prot_write(struct rtl8168_private *tp, u32 RegAddr, u32 value); +void rtl8168_mdio_prot_direct_write_phy_ocp(struct rtl8168_private *tp, u32 RegAddr, u32 value); +u32 rtl8168_mdio_read(struct rtl8168_private *tp, u16 RegAddr); +u32 rtl8168_mdio_prot_read(struct rtl8168_private *tp, u32 RegAddr); +u32 rtl8168_mdio_prot_direct_read_phy_ocp(struct rtl8168_private *tp, u32 RegAddr); +void rtl8168_ephy_write(struct rtl8168_private *tp, int RegAddr, int value); +void rtl8168_mac_ocp_write(struct rtl8168_private *tp, u16 reg_addr, u16 value); +u16 rtl8168_mac_ocp_read(struct rtl8168_private *tp, u16 reg_addr); +void rtl8168_clear_eth_phy_bit(struct rtl8168_private *tp, u8 addr, u16 mask); +void rtl8168_set_eth_phy_bit(struct rtl8168_private *tp, u8 addr, u16 mask); +void rtl8168_ocp_write(struct rtl8168_private *tp, u16 addr, u8 len, u32 data); +void rtl8168_oob_notify(struct rtl8168_private *tp, u8 cmd); +void rtl8168_init_ring_indexes(struct rtl8168_private *tp); +int rtl8168_eri_write(struct rtl8168_private *tp, int addr, int len, u32 value, int type); +void rtl8168_oob_mutex_lock(struct rtl8168_private *tp); +u32 rtl8168_ocp_read(struct rtl8168_private *tp, u16 addr, u8 len); +u32 rtl8168_ocp_read_with_oob_base_address(struct rtl8168_private *tp, u16 addr, u8 len, u32 base_address); +u32 rtl8168_ocp_write_with_oob_base_address(struct rtl8168_private *tp, u16 addr, u8 len, u32 value, u32 base_address); +u32 rtl8168_eri_read(struct rtl8168_private *tp, int addr, int len, int type); +u32 rtl8168_eri_read_with_oob_base_address(struct rtl8168_private *tp, int addr, int len, int type, u32 base_address); +int rtl8168_eri_write_with_oob_base_address(struct rtl8168_private *tp, int addr, int len, u32 value, int type, u32 base_address); +u16 rtl8168_ephy_read(struct rtl8168_private *tp, int RegAddr); +void rtl8168_wait_txrx_fifo_empty(struct net_device *dev); +void rtl8168_wait_ll_share_fifo_ready(struct net_device *dev); +void rtl8168_enable_now_is_oob(struct rtl8168_private *tp); +void rtl8168_disable_now_is_oob(struct rtl8168_private *tp); +void rtl8168_oob_mutex_unlock(struct rtl8168_private *tp); +void rtl8168_dash2_disable_tx(struct rtl8168_private *tp); +void rtl8168_dash2_enable_tx(struct rtl8168_private *tp); +void rtl8168_dash2_disable_rx(struct rtl8168_private *tp); +void rtl8168_dash2_enable_rx(struct rtl8168_private *tp); +void rtl8168_hw_disable_mac_mcu_bps(struct net_device *dev); + +#define HW_SUPPORT_CHECK_PHY_DISABLE_MODE(_M) ((_M)->HwSuppCheckPhyDisableModeVer > 0 ) +#define HW_SUPP_SERDES_PHY(_M) ((_M)->HwSuppSerDesPhyVer > 0) +#define HW_HAS_WRITE_PHY_MCU_RAM_CODE(_M) (((_M)->HwHasWrRamCodeToMicroP == TRUE) ? 1 : 0) +#define HW_SUPPORT_UPS_MODE(_M) ((_M)->HwSuppUpsVer > 0) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) +#define netdev_mc_count(dev) ((dev)->mc_count) +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif diff --git a/addons/r8168/src/3.10.108/r8168.mod.c b/addons/r8168/src/3.10.108/r8168.mod.c new file mode 100644 index 00000000..28be82f9 --- /dev/null +++ b/addons/r8168/src/3.10.108/r8168.mod.c @@ -0,0 +1,31 @@ +#include +#include +#include + +MODULE_INFO(vermagic, VERMAGIC_STRING); + +struct module __this_module +__attribute__((section(".gnu.linkonce.this_module"))) = { + .name = KBUILD_MODNAME, + .init = init_module, +#ifdef CONFIG_MODULE_UNLOAD + .exit = cleanup_module, +#endif + .arch = MODULE_ARCH_INIT, +}; + +static const char __module_depends[] +__used +__attribute__((section(".modinfo"))) = +"depends="; + +MODULE_ALIAS("pci:v000010ECd00008168sv*sd*bc*sc*i*"); +MODULE_ALIAS("pci:v000010ECd00008161sv*sd*bc*sc*i*"); +MODULE_ALIAS("pci:v000010ECd00002502sv*sd*bc*sc*i*"); +MODULE_ALIAS("pci:v000010ECd00002600sv*sd*bc*sc*i*"); +MODULE_ALIAS("pci:v00001186d00004300sv00001186sd00004B10bc*sc*i*"); + +MODULE_INFO(srcversion, "7584797F71DC2D0613CBA69"); +#ifdef RETPOLINE + MODULE_INFO(retpoline, "Y"); +#endif diff --git a/addons/r8168/src/3.10.108/r8168_asf.c b/addons/r8168/src/3.10.108/r8168_asf.c new file mode 100644 index 00000000..77524f04 --- /dev/null +++ b/addons/r8168/src/3.10.108/r8168_asf.c @@ -0,0 +1,422 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "r8168.h" +#include "r8168_asf.h" +#include "rtl_eeprom.h" + +int rtl8168_asf_ioctl(struct net_device *dev, + struct ifreq *ifr) +{ + struct rtl8168_private *tp = netdev_priv(dev); + void *user_data = ifr->ifr_data; + struct asf_ioctl_struct asf_usrdata; + unsigned long flags; + + if (tp->mcfg != CFG_METHOD_7 && tp->mcfg != CFG_METHOD_8) + return -EOPNOTSUPP; + + if (copy_from_user(&asf_usrdata, user_data, sizeof(struct asf_ioctl_struct))) + return -EFAULT; + + spin_lock_irqsave(&tp->lock, flags); + + switch (asf_usrdata.offset) { + case HBPeriod: + rtl8168_asf_hbperiod(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case WD8Timer: + break; + case WD16Rst: + rtl8168_asf_wd16rst(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case WD8Rst: + rtl8168_asf_time_period(tp, asf_usrdata.arg, WD8Rst, asf_usrdata.u.data); + break; + case LSnsrPollCycle: + rtl8168_asf_time_period(tp, asf_usrdata.arg, LSnsrPollCycle, asf_usrdata.u.data); + break; + case ASFSnsrPollPrd: + rtl8168_asf_time_period(tp, asf_usrdata.arg, ASFSnsrPollPrd, asf_usrdata.u.data); + break; + case AlertReSendItvl: + rtl8168_asf_time_period(tp, asf_usrdata.arg, AlertReSendItvl, asf_usrdata.u.data); + break; + case SMBAddr: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, SMBAddr, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case ASFConfigR0: + rtl8168_asf_config_regs(tp, asf_usrdata.arg, ASFConfigR0, asf_usrdata.u.data); + break; + case ASFConfigR1: + rtl8168_asf_config_regs(tp, asf_usrdata.arg, ASFConfigR1, asf_usrdata.u.data); + break; + case ConsoleMA: + rtl8168_asf_console_mac(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case ConsoleIP: + rtl8168_asf_ip_address(tp, asf_usrdata.arg, ConsoleIP, asf_usrdata.u.data); + break; + case IPAddr: + rtl8168_asf_ip_address(tp, asf_usrdata.arg, IPAddr, asf_usrdata.u.data); + break; + case UUID: + rtl8168_asf_rw_uuid(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case IANA: + rtl8168_asf_rw_iana(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case SysID: + rtl8168_asf_rw_systemid(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case Community: + rtl8168_asf_community_string(tp, asf_usrdata.arg, asf_usrdata.u.string); + break; + case StringLength: + rtl8168_asf_community_string_len(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case FmCapMsk: + rtl8168_asf_capability_masks(tp, asf_usrdata.arg, FmCapMsk, asf_usrdata.u.data); + break; + case SpCMDMsk: + rtl8168_asf_capability_masks(tp, asf_usrdata.arg, SpCMDMsk, asf_usrdata.u.data); + break; + case SysCapMsk: + rtl8168_asf_capability_masks(tp, asf_usrdata.arg, SysCapMsk, asf_usrdata.u.data); + break; + case RmtRstAddr: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtRstAddr, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtRstCmd: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtRstCmd, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtRstData: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtRstData, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPwrOffAddr: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPwrOffAddr, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPwrOffCmd: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPwrOffCmd, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPwrOffData: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPwrOffData, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPwrOnAddr: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPwrOnAddr, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPwrOnCmd: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPwrOnCmd, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPwrOnData: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPwrOnData, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPCRAddr: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPCRAddr, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPCRCmd: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPCRCmd, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPCRData: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPCRData, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case ASFSnsr0Addr: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, ASFSnsr0Addr, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case LSnsrAddr0: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, LSnsrAddr0, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case KO: + /* Get/Set Key Operation */ + rtl8168_asf_key_access(tp, asf_usrdata.arg, KO, asf_usrdata.u.data); + break; + case KA: + /* Get/Set Key Administrator */ + rtl8168_asf_key_access(tp, asf_usrdata.arg, KA, asf_usrdata.u.data); + break; + case KG: + /* Get/Set Key Generation */ + rtl8168_asf_key_access(tp, asf_usrdata.arg, KG, asf_usrdata.u.data); + break; + case KR: + /* Get/Set Key Random */ + rtl8168_asf_key_access(tp, asf_usrdata.arg, KR, asf_usrdata.u.data); + break; + default: + spin_unlock_irqrestore(&tp->lock, flags); + return -EOPNOTSUPP; + } + + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(user_data, &asf_usrdata, sizeof(struct asf_ioctl_struct))) + return -EFAULT; + + return 0; +} + +void rtl8168_asf_hbperiod(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + if (arg == ASF_GET) + data[ASFHBPERIOD] = rtl8168_eri_read(tp, HBPeriod, RW_TWO_BYTES, ERIAR_ASF); + else if (arg == ASF_SET) { + rtl8168_eri_write(tp, HBPeriod, RW_TWO_BYTES, data[ASFHBPERIOD], ERIAR_ASF); + rtl8168_eri_write(tp, 0x1EC, RW_ONE_BYTE, 0x07, ERIAR_ASF); + } +} + +void rtl8168_asf_wd16rst(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + data[ASFWD16RST] = rtl8168_eri_read(tp, WD16Rst, RW_TWO_BYTES, ERIAR_ASF); +} + +void rtl8168_asf_console_mac(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + int i; + + if (arg == ASF_GET) { + for (i = 0; i < 6; i++) + data[i] = rtl8168_eri_read(tp, ConsoleMA + i, RW_ONE_BYTE, ERIAR_ASF); + } else if (arg == ASF_SET) { + for (i = 0; i < 6; i++) + rtl8168_eri_write(tp, ConsoleMA + i, RW_ONE_BYTE, data[i], ERIAR_ASF); + + /* write the new console MAC address to EEPROM */ + rtl8168_eeprom_write_sc(tp, 70, (data[1] << 8) | data[0]); + rtl8168_eeprom_write_sc(tp, 71, (data[3] << 8) | data[2]); + rtl8168_eeprom_write_sc(tp, 72, (data[5] << 8) | data[4]); + } +} + +void rtl8168_asf_ip_address(struct rtl8168_private *tp, int arg, int offset, unsigned int *data) +{ + int i; + int eeprom_off = 0; + + if (arg == ASF_GET) { + for (i = 0; i < 4; i++) + data[i] = rtl8168_eri_read(tp, offset + i, RW_ONE_BYTE, ERIAR_ASF); + } else if (arg == ASF_SET) { + for (i = 0; i < 4; i++) + rtl8168_eri_write(tp, offset + i, RW_ONE_BYTE, data[i], ERIAR_ASF); + + if (offset == ConsoleIP) + eeprom_off = 73; + else if (offset == IPAddr) + eeprom_off = 75; + + /* write the new IP address to EEPROM */ + rtl8168_eeprom_write_sc(tp, eeprom_off, (data[1] << 8) | data[0]); + rtl8168_eeprom_write_sc(tp, eeprom_off + 1, (data[3] << 8) | data[2]); + + } +} + +void rtl8168_asf_config_regs(struct rtl8168_private *tp, int arg, int offset, unsigned int *data) +{ + unsigned int value; + + if (arg == ASF_GET) { + data[ASFCAPABILITY] = (rtl8168_eri_read(tp, offset, RW_ONE_BYTE, ERIAR_ASF) & data[ASFCONFIG]) ? FUNCTION_ENABLE : FUNCTION_DISABLE; + } else if (arg == ASF_SET) { + value = rtl8168_eri_read(tp, offset, RW_ONE_BYTE, ERIAR_ASF); + + if (data[ASFCAPABILITY] == FUNCTION_ENABLE) + value |= data[ASFCONFIG]; + else if (data[ASFCAPABILITY] == FUNCTION_DISABLE) + value &= ~data[ASFCONFIG]; + + rtl8168_eri_write(tp, offset, RW_ONE_BYTE, value, ERIAR_ASF); + } +} + +void rtl8168_asf_capability_masks(struct rtl8168_private *tp, int arg, int offset, unsigned int *data) +{ + unsigned int len, bit_mask; + + bit_mask = DISABLE_MASK; + + if (offset == FmCapMsk) { + /* System firmware capabilities */ + len = RW_FOUR_BYTES; + if (data[ASFCAPMASK] == FUNCTION_ENABLE) + bit_mask = FMW_CAP_MASK; + } else if (offset == SpCMDMsk) { + /* Special commands */ + len = RW_TWO_BYTES; + if (data[ASFCAPMASK] == FUNCTION_ENABLE) + bit_mask = SPC_CMD_MASK; + } else { + /* System capability (offset == SysCapMsk)*/ + len = RW_ONE_BYTE; + if (data[ASFCAPMASK] == FUNCTION_ENABLE) + bit_mask = SYS_CAP_MASK; + } + + if (arg == ASF_GET) + data[ASFCAPMASK] = rtl8168_eri_read(tp, offset, len, ERIAR_ASF) ? FUNCTION_ENABLE : FUNCTION_DISABLE; + else /* arg == ASF_SET */ + rtl8168_eri_write(tp, offset, len, bit_mask, ERIAR_ASF); +} + +void rtl8168_asf_community_string(struct rtl8168_private *tp, int arg, char *string) +{ + int i; + + if (arg == ASF_GET) { + for (i = 0; i < COMMU_STR_MAX_LEN; i++) + string[i] = rtl8168_eri_read(tp, Community + i, RW_ONE_BYTE, ERIAR_ASF); + } else { /* arg == ASF_SET */ + for (i = 0; i < COMMU_STR_MAX_LEN; i++) + rtl8168_eri_write(tp, Community + i, RW_ONE_BYTE, string[i], ERIAR_ASF); + } +} + +void rtl8168_asf_community_string_len(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + if (arg == ASF_GET) + data[ASFCOMMULEN] = rtl8168_eri_read(tp, StringLength, RW_ONE_BYTE, ERIAR_ASF); + else /* arg == ASF_SET */ + rtl8168_eri_write(tp, StringLength, RW_ONE_BYTE, data[ASFCOMMULEN], ERIAR_ASF); +} + +void rtl8168_asf_time_period(struct rtl8168_private *tp, int arg, int offset, unsigned int *data) +{ + int pos = 0; + + if (offset == WD8Rst) + pos = ASFWD8RESET; + else if (offset == LSnsrPollCycle) + pos = ASFLSNRPOLLCYC; + else if (offset == ASFSnsrPollPrd) + pos = ASFSNRPOLLCYC; + else if (offset == AlertReSendItvl) + pos = ASFALERTRESND; + + if (arg == ASF_GET) + data[pos] = rtl8168_eri_read(tp, offset, RW_ONE_BYTE, ERIAR_ASF); + else /* arg == ASF_SET */ + rtl8168_eri_write(tp, offset, RW_ONE_BYTE, data[pos], ERIAR_ASF); + +} + +void rtl8168_asf_key_access(struct rtl8168_private *tp, int arg, int offset, unsigned int *data) +{ + int i, j; + int key_off = 0; + + if (arg == ASF_GET) { + for (i = 0; i < KEY_LEN; i++) + data[i] = rtl8168_eri_read(tp, offset + KEY_LEN - (i + 1), RW_ONE_BYTE, ERIAR_ASF); + } else { + if (offset == KO) + key_off = 162; + else if (offset == KA) + key_off = 172; + else if (offset == KG) + key_off = 182; + else if (offset == KR) + key_off = 192; + + /* arg == ASF_SET */ + for (i = 0; i < KEY_LEN; i++) + rtl8168_eri_write(tp, offset + KEY_LEN - (i + 1), RW_ONE_BYTE, data[i], ERIAR_ASF); + + /* write the new key to EEPROM */ + for (i = 0, j = 19; i < 10; i++, j = j - 2) + rtl8168_eeprom_write_sc(tp, key_off + i, (data[j - 1] << 8) | data[j]); + } +} + +void rtl8168_asf_rw_hexadecimal(struct rtl8168_private *tp, int arg, int offset, int len, unsigned int *data) +{ + if (arg == ASF_GET) + data[ASFRWHEXNUM] = rtl8168_eri_read(tp, offset, len, ERIAR_ASF); + else /* arg == ASF_SET */ + rtl8168_eri_write(tp, offset, len, data[ASFRWHEXNUM], ERIAR_ASF); +} + +void rtl8168_asf_rw_systemid(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + int i; + + if (arg == ASF_GET) + for (i = 0; i < SYSID_LEN ; i++) + data[i] = rtl8168_eri_read(tp, SysID + i, RW_ONE_BYTE, ERIAR_ASF); + else /* arg == ASF_SET */ + for (i = 0; i < SYSID_LEN ; i++) + rtl8168_eri_write(tp, SysID + i, RW_ONE_BYTE, data[i], ERIAR_ASF); +} + +void rtl8168_asf_rw_iana(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + int i; + + if (arg == ASF_GET) + for (i = 0; i < RW_FOUR_BYTES; i++) + data[i] = rtl8168_eri_read(tp, IANA + i, RW_ONE_BYTE, ERIAR_ASF); + else /* arg == ASF_SET */ + for (i = 0; i < RW_FOUR_BYTES; i++) + rtl8168_eri_write(tp, IANA + i, RW_ONE_BYTE, data[i], ERIAR_ASF); +} + +void rtl8168_asf_rw_uuid(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + int i, j; + + if (arg == ASF_GET) + for (i = UUID_LEN - 1, j = 0; i >= 0 ; i--, j++) + data[j] = rtl8168_eri_read(tp, UUID + i, RW_ONE_BYTE, ERIAR_ASF); + else /* arg == ASF_SET */ + for (i = UUID_LEN - 1, j = 0; i >= 0 ; i--, j++) + rtl8168_eri_write(tp, UUID + i, RW_ONE_BYTE, data[j], ERIAR_ASF); +} diff --git a/addons/r8168/src/3.10.108/r8168_asf.h b/addons/r8168/src/3.10.108/r8168_asf.h new file mode 100644 index 00000000..c4b412a6 --- /dev/null +++ b/addons/r8168/src/3.10.108/r8168_asf.h @@ -0,0 +1,295 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#define SIOCDEVPRIVATE_RTLASF SIOCDEVPRIVATE + +#define FUNCTION_ENABLE 1 +#define FUNCTION_DISABLE 0 + +#define ASFCONFIG 0 +#define ASFCAPABILITY 1 +#define ASFCOMMULEN 0 +#define ASFHBPERIOD 0 +#define ASFWD16RST 0 +#define ASFCAPMASK 0 +#define ASFALERTRESND 0 +#define ASFLSNRPOLLCYC 0 +#define ASFSNRPOLLCYC 0 +#define ASFWD8RESET 0 +#define ASFRWHEXNUM 0 + +#define FMW_CAP_MASK 0x0000F867 +#define SPC_CMD_MASK 0x1F00 +#define SYS_CAP_MASK 0xFF +#define DISABLE_MASK 0x00 + +#define MAX_DATA_LEN 200 +#define MAX_STR_LEN 200 + +#define COMMU_STR_MAX_LEN 23 + +#define KEY_LEN 20 +#define UUID_LEN 16 +#define SYSID_LEN 2 + +#define RW_ONE_BYTE 1 +#define RW_TWO_BYTES 2 +#define RW_FOUR_BYTES 4 + +enum asf_registers { + HBPeriod = 0x0000, + WD8Rst = 0x0002, + WD8Timer = 0x0003, + WD16Rst = 0x0004, + LSnsrPollCycle = 0x0006, + ASFSnsrPollPrd = 0x0007, + AlertReSendCnt = 0x0008, + AlertReSendItvl = 0x0009, + SMBAddr = 0x000A, + SMBCap = 0x000B, + ASFConfigR0 = 0x000C, + ASFConfigR1 = 0x000D, + WD16Timer = 0x000E, + ConsoleMA = 0x0010, + ConsoleIP = 0x0016, + IPAddr = 0x001A, + + UUID = 0x0020, + IANA = 0x0030, + SysID = 0x0034, + Community = 0x0036, + StringLength = 0x004D, + LC = 0x004E, + EntityInst = 0x004F, + FmCapMsk = 0x0050, + SpCMDMsk = 0x0054, + SysCapMsk = 0x0056, + WDSysSt = 0x0057, + RxMsgType = 0x0058, + RxSpCMD = 0x0059, + RxSpCMDPa = 0x005A, + RxBtOpMsk = 0x005C, + RmtRstAddr = 0x005E, + RmtRstCmd = 0x005F, + RmtRstData = 0x0060, + RmtPwrOffAddr = 0x0061, + RmtPwrOffCmd = 0x0062, + RmtPwrOffData = 0x0063, + RmtPwrOnAddr = 0x0064, + RmtPwrOnCmd = 0x0065, + RmtPwrOnData = 0x0066, + RmtPCRAddr = 0x0067, + RmtPCRCmd = 0x0068, + RmtPCRData = 0x0069, + RMCP_IANA = 0x006A, + RMCP_OEM = 0x006E, + ASFSnsr0Addr = 0x0070, + + ASFSnsrEvSt = 0x0073, + ASFSnsrEvAlert = 0x0081, + + LSnsrNo = 0x00AD, + AssrtEvntMsk = 0x00AE, + DeAssrtEvntMsk = 0x00AF, + + LSnsrAddr0 = 0x00B0, + LAlertCMD0 = 0x00B1, + LAlertDataMsk0 = 0x00B2, + LAlertCmp0 = 0x00B3, + LAlertESnsrT0 = 0x00B4, + LAlertET0 = 0x00B5, + LAlertEOffset0 = 0x00B6, + LAlertES0 = 0x00B7, + LAlertSN0 = 0x00B8, + LAlertEntity0 = 0x00B9, + LAlertEI0 = 0x00BA, + LSnsrState0 = 0x00BB, + + LSnsrAddr1 = 0x00BD, + LAlertCMD1 = 0x00BE, + LAlertDataMsk1 = 0x00BF, + LAlertCmp1 = 0x00C0, + LAlertESnsrT1 = 0x00C1, + LAlertET1 = 0x00C2, + LAlertEOffset1 = 0x00C3, + LAlertES1 = 0x00C4, + LAlertSN1 = 0x00C5, + LAlertEntity1 = 0x00C6, + LAlertEI1 = 0x00C7, + LSnsrState1 = 0x00C8, + + LSnsrAddr2 = 0x00CA, + LAlertCMD2 = 0x00CB, + LAlertDataMsk2 = 0x00CC, + LAlertCmp2 = 0x00CD, + LAlertESnsrT2 = 0x00CE, + LAlertET2 = 0x00CF, + LAlertEOffset2 = 0x00D0, + LAlertES2 = 0x00D1, + LAlertSN2 = 0x00D2, + LAlertEntity2 = 0x00D3, + LAlertEI2 = 0x00D4, + LSnsrState2 = 0x00D5, + + LSnsrAddr3 = 0x00D7, + LAlertCMD3 = 0x00D8, + LAlertDataMsk3 = 0x00D9, + LAlertCmp3 = 0x00DA, + LAlertESnsrT3 = 0x00DB, + LAlertET3 = 0x00DC, + LAlertEOffset3 = 0x00DD, + LAlertES3 = 0x00DE, + LAlertSN3 = 0x00DF, + LAlertEntity3 = 0x00E0, + LAlertEI3 = 0x00E1, + LSnsrState3 = 0x00E2, + + LSnsrAddr4 = 0x00E4, + LAlertCMD4 = 0x00E5, + LAlertDataMsk4 = 0x00E6, + LAlertCmp4 = 0x00E7, + LAlertESnsrT4 = 0x00E8, + LAlertET4 = 0x00E9, + LAlertEOffset4 = 0x00EA, + LAlertES4 = 0x00EB, + LAlertSN4 = 0x00EC, + LAlertEntity4 = 0x00ED, + LAlertEI4 = 0x00EE, + LSnsrState4 = 0x00EF, + + LSnsrAddr5 = 0x00F1, + LAlertCMD5 = 0x00F2, + LAlertDataMsk5 = 0x00F3, + LAlertCmp5 = 0x00F4, + LAlertESnsrT5 = 0x00F5, + LAlertET5 = 0x00F6, + LAlertEOffset5 = 0x00F7, + LAlertES5 = 0x00F8, + LAlertSN5 = 0x00F9, + LAlertEntity5 = 0x00FA, + LAlertEI5 = 0x00FB, + LSnsrState5 = 0x00FC, + + LSnsrAddr6 = 0x00FE, + LAlertCMD6 = 0x00FF, + LAlertDataMsk6 = 0x0100, + LAlertCmp6 = 0x0101, + LAlertESnsrT6 = 0x0102, + LAlertET6 = 0x0103, + LAlertEOffset6 = 0x0104, + LAlertES6 = 0x0105, + LAlertSN6 = 0x0106, + LAlertEntity6 = 0x0107, + LAlertEI6 = 0x0108, + LSnsrState6 = 0x0109, + + LSnsrAddr7 = 0x010B, + LAlertCMD7 = 0x010C, + LAlertDataMsk7 = 0x010D, + LAlertCmp7 = 0x010E, + LAlertESnsrT7 = 0x010F, + LAlertET7 = 0x0110, + LAlertEOffset7 = 0x0111, + LAlertES7 = 0x0112, + LAlertSN7 = 0x0113, + LAlertEntity7 = 0x0114, + LAlertEI7 = 0x0115, + LSnsrState7 = 0x0116, + LAssert = 0x0117, + LDAssert = 0x0118, + IPServiceType = 0x0119, + IPIdfr = 0x011A, + FlagFOffset = 0x011C, + TTL = 0x011E, + HbtEI = 0x011F, + MgtConSID1 = 0x0120, + MgtConSID2 = 0x0124, + MgdCltSID = 0x0128, + StCd = 0x012C, + MgtConUR = 0x012D, + MgtConUNL = 0x012E, + + AuthPd = 0x0130, + IntyPd = 0x0138, + MgtConRN = 0x0140, + MgdCtlRN = 0x0150, + MgtConUN = 0x0160, + Rakp2IntCk = 0x0170, + KO = 0x017C, + KA = 0x0190, + KG = 0x01A4, + KR = 0x01B8, + CP = 0x01CC, + CQ = 0x01D0, + KC = 0x01D4, + ConsoleSid = 0x01E8, + + SIK1 = 0x01FC, + SIK2 = 0x0210, + Udpsrc_port = 0x0224, + Udpdes_port = 0x0226, + Asf_debug_mux = 0x0228 +}; + +enum asf_cmdln_opt { + ASF_GET, + ASF_SET, + ASF_HELP +}; + +struct asf_ioctl_struct { + unsigned int arg; + unsigned int offset; + union { + unsigned int data[MAX_DATA_LEN]; + char string[MAX_STR_LEN]; + } u; +}; + +int rtl8168_asf_ioctl(struct net_device *dev, struct ifreq *ifr); +void rtl8168_asf_hbperiod(struct rtl8168_private *tp, int arg, unsigned int *data); +void rtl8168_asf_wd16rst(struct rtl8168_private *tp, int arg, unsigned int *data); +void rtl8168_asf_console_mac(struct rtl8168_private *, int arg, unsigned int *data); +void rtl8168_asf_ip_address(struct rtl8168_private *, int arg, int offset, unsigned int *data); +void rtl8168_asf_config_regs(struct rtl8168_private *tp, int arg, int offset, unsigned int *data); +void rtl8168_asf_capability_masks(struct rtl8168_private *tp, int arg, int offset, unsigned int *data); +void rtl8168_asf_community_string(struct rtl8168_private *tp, int arg, char *string); +void rtl8168_asf_community_string_len(struct rtl8168_private *tp, int arg, unsigned int *data); +void rtl8168_asf_alert_resend_interval(struct rtl8168_private *tp, int arg, unsigned int *data); +void rtl8168_asf_time_period(struct rtl8168_private *tp, int arg, int offset, unsigned int *data); +void rtl8168_asf_key_access(struct rtl8168_private *, int arg, int offset, unsigned int *data); +void rtl8168_asf_rw_hexadecimal(struct rtl8168_private *tp, int arg, int offset, int len, unsigned int *data); +void rtl8168_asf_rw_iana(struct rtl8168_private *tp, int arg, unsigned int *data); +void rtl8168_asf_rw_uuid(struct rtl8168_private *tp, int arg, unsigned int *data); +void rtl8168_asf_rw_systemid(struct rtl8168_private *tp, int arg, unsigned int *data); diff --git a/addons/r8168/src/3.10.108/r8168_dash.h b/addons/r8168/src/3.10.108/r8168_dash.h new file mode 100644 index 00000000..b4d358f1 --- /dev/null +++ b/addons/r8168/src/3.10.108/r8168_dash.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#ifndef _LINUX_R8168_DASH_H +#define _LINUX_R8168_DASH_H + +#define SIOCDEVPRIVATE_RTLDASH SIOCDEVPRIVATE+2 + +enum rtl_dash_cmd { + RTL_DASH_ARP_NS_OFFLOAD = 0, + RTL_DASH_SET_OOB_IPMAC, + RTL_DASH_NOTIFY_OOB, + + RTL_DASH_SEND_BUFFER_DATA_TO_DASH_FW, + RTL_DASH_CHECK_SEND_BUFFER_TO_DASH_FW_COMPLETE, + RTL_DASH_GET_RCV_FROM_FW_BUFFER_DATA, + RTL_DASH_OOB_REQ, + RTL_DASH_OOB_ACK, + RTL_DASH_DETACH_OOB_REQ, + RTL_DASH_DETACH_OOB_ACK, + + RTL_FW_SET_IPV4 = 0x10, + RTL_FW_GET_IPV4, + RTL_FW_SET_IPV6, + RTL_FW_GET_IPV6, + RTL_FW_SET_EXT_SNMP, + RTL_FW_GET_EXT_SNMP, + RTL_FW_SET_WAKEUP_PATTERN, + RTL_FW_GET_WAKEUP_PATTERN, + RTL_FW_DEL_WAKEUP_PATTERN, + + RTLT_DASH_COMMAND_INVALID, +}; + +struct rtl_dash_ip_mac { + struct sockaddr ifru_addr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; +}; + +struct rtl_dash_ioctl_struct { + __u32 cmd; + __u32 offset; + __u32 len; + union { + __u32 data; + void *data_buffer; + }; +}; + +struct settings_ipv4 { + __u32 IPv4addr; + __u32 IPv4mask; + __u32 IPv4Gateway; +}; + +struct settings_ipv6 { + __u32 reserved; + __u32 prefixLen; + __u16 IPv6addr[8]; + __u16 IPv6Gateway[8]; +}; + +struct settings_ext_snmp { + __u16 index; + __u16 oid_get_len; + __u8 oid_for_get[24]; + __u8 reserved0[26]; + __u16 value_len; + __u8 value[256]; + __u8 supported; + __u8 reserved1[27]; +}; + +struct wakeup_pattern { + __u8 index; + __u8 valid; + __u8 start; + __u8 length; + __u8 name[36]; + __u8 mask[16]; + __u8 pattern[128]; + __u32 reserved[2]; +}; + +typedef struct _RX_DASH_FROM_FW_DESC { + __le16 length; + __le16 status; + __le32 resv; + __le64 BufferAddress; +} +RX_DASH_FROM_FW_DESC, *PRX_DASH_FROM_FW_DESC; + +typedef struct _TX_DASH_SEND_FW_DESC { + __le16 length; + __le16 status; + __le32 resv; + __le64 BufferAddress; +} +TX_DASH_SEND_FW_DESC, *PTX_DASH_SEND_FW_DESC; + +typedef struct _OSOOBHdr { + __le32 len; + u8 type; + u8 flag; + u8 hostReqV; + u8 res; +} +OSOOBHdr, *POSOOBHdr; + +typedef struct _RX_DASH_BUFFER_TYPE_2 { + OSOOBHdr oobhdr; + u8 RxDataBuffer[0]; +} +RX_DASH_BUFFER_TYPE_2, *PRX_DASH_BUFFER_TYPE_2; + +#define ALIGN_8 (0x7) +#define ALIGN_16 (0xf) +#define ALIGN_32 (0x1f) +#define ALIGN_64 (0x3f) +#define ALIGN_256 (0xff) +#define ALIGN_4096 (0xfff) + +#define OCP_REG_CONFIG0 (0x10) +#define OCP_REG_CONFIG0_REV_F (0xB8) +#define OCP_REG_DASH_POLL (0x30) +#define OCP_REG_HOST_REQ (0x34) +#define OCP_REG_DASH_REQ (0x35) +#define OCP_REG_CR (0x36) +#define OCP_REG_DMEMSTA (0x38) +#define OCP_REG_GPHYAR (0x60) + + +#define OCP_REG_CONFIG0_DASHEN BIT_15 +#define OCP_REG_CONFIG0_OOBRESET BIT_14 +#define OCP_REG_CONFIG0_APRDY BIT_13 +#define OCP_REG_CONFIG0_FIRMWARERDY BIT_12 +#define OCP_REG_CONFIG0_DRIVERRDY BIT_11 +#define OCP_REG_CONFIG0_OOB_WDT BIT_9 +#define OCP_REG_CONFIG0_DRV_WAIT_OOB BIT_8 +#define OCP_REG_CONFIG0_TLSEN BIT_7 + +#define HW_DASH_SUPPORT_DASH(_M) ((_M)->HwSuppDashVer > 0) +#define HW_DASH_SUPPORT_TYPE_1(_M) ((_M)->HwSuppDashVer == 1) +#define HW_DASH_SUPPORT_TYPE_2(_M) ((_M)->HwSuppDashVer == 2) +#define HW_DASH_SUPPORT_TYPE_3(_M) ((_M)->HwSuppDashVer == 3) + +#define RECV_FROM_FW_BUF_SIZE (2048) +#define SEND_TO_FW_BUF_SIZE (2048) + +#define RX_DASH_FROM_FW_OWN BIT_15 +#define TX_DASH_SEND_FW_OWN BIT_15 + +#define TXS_CC3_0 (BIT_0|BIT_1|BIT_2|BIT_3) +#define TXS_EXC BIT_4 +#define TXS_LNKF BIT_5 +#define TXS_OWC BIT_6 +#define TXS_TES BIT_7 +#define TXS_UNF BIT_9 +#define TXS_LGSEN BIT_11 +#define TXS_LS BIT_12 +#define TXS_FS BIT_13 +#define TXS_EOR BIT_14 +#define TXS_OWN BIT_15 + +#define TPPool_HRDY 0x20 + +#define HostReqReg (0xC0) +#define SystemMasterDescStartAddrLow (0xF0) +#define SystemMasterDescStartAddrHigh (0xF4) +#define SystemSlaveDescStartAddrLow (0xF8) +#define SystemSlaveDescStartAddrHigh (0xFC) + +//DASH Request Type +#define WSMANREG 0x01 +#define OSPUSHDATA 0x02 + +#define RXS_OWN BIT_15 +#define RXS_EOR BIT_14 +#define RXS_FS BIT_13 +#define RXS_LS BIT_12 + +#define ISRIMR_DP_DASH_OK BIT_15 +#define ISRIMR_DP_HOST_OK BIT_13 +#define ISRIMR_DP_REQSYS_OK BIT_11 + +#define ISRIMR_DASH_INTR_EN BIT_12 +#define ISRIMR_DASH_INTR_CMAC_RESET BIT_15 + +#define ISRIMR_DASH_TYPE2_ROK BIT_0 +#define ISRIMR_DASH_TYPE2_RDU BIT_1 +#define ISRIMR_DASH_TYPE2_TOK BIT_2 +#define ISRIMR_DASH_TYPE2_TDU BIT_3 +#define ISRIMR_DASH_TYPE2_TX_FIFO_FULL BIT_4 +#define ISRIMR_DASH_TYPE2_TX_DISABLE_IDLE BIT_5 +#define ISRIMR_DASH_TYPE2_RX_DISABLE_IDLE BIT_6 + +#define CMAC_OOB_STOP 0x25 +#define CMAC_OOB_INIT 0x26 +#define CMAC_OOB_RESET 0x2a + +#define NO_BASE_ADDRESS 0x00000000 +#define RTL8168FP_OOBMAC_BASE 0xBAF70000 +#define RTL8168FP_CMAC_IOBASE 0xBAF20000 +#define RTL8168FP_KVM_BASE 0xBAF80400 +#define CMAC_SYNC_REG 0x20 +#define CMAC_RXDESC_OFFSET 0x90 //RX: 0x90 - 0x98 +#define CMAC_TXDESC_OFFSET 0x98 //TX: 0x98 - 0x9F + +/* cmac write/read MMIO register */ +#define RTL_CMAC_W8(tp, reg, val8) writeb ((val8), tp->cmac_ioaddr + (reg)) +#define RTL_CMAC_W16(tp, reg, val16) writew ((val16), tp->cmac_ioaddr + (reg)) +#define RTL_CMAC_W32(tp, reg, val32) writel ((val32), tp->cmac_ioaddr + (reg)) +#define RTL_CMAC_R8(tp, reg) readb (tp->cmac_ioaddr + (reg)) +#define RTL_CMAC_R16(tp, reg) readw (tp->cmac_ioaddr + (reg)) +#define RTL_CMAC_R32(tp, reg) ((unsigned long) readl (tp->cmac_ioaddr + (reg))) + +int rtl8168_dash_ioctl(struct net_device *dev, struct ifreq *ifr); +void HandleDashInterrupt(struct net_device *dev); +int AllocateDashShareMemory(struct net_device *dev); +void FreeAllocatedDashShareMemory(struct net_device *dev); +void DashHwInit(struct net_device *dev); + + +#endif /* _LINUX_R8168_DASH_H */ diff --git a/addons/r8168/src/3.10.108/r8168_fiber.h b/addons/r8168/src/3.10.108/r8168_fiber.h new file mode 100644 index 00000000..2e303fe2 --- /dev/null +++ b/addons/r8168/src/3.10.108/r8168_fiber.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#ifndef _LINUX_R8168_FIBER_H +#define _LINUX_R8168_FIBER_H + +enum { + FIBER_MODE_NIC_ONLY = 0, + FIBER_MODE_RTL8168H_RTL8211FS, + FIBER_MODE_RTL8168H_MDI_SWITCH_RTL8211FS, + FIBER_MODE_MAX +}; + +enum { + FIBER_STAT_NOT_CHECKED = 0, + FIBER_STAT_CONNECT, + FIBER_STAT_DISCONNECT, + FIBER_STAT_MAX +}; + +#define HW_FIBER_MODE_ENABLED(_M) ((_M)->HwFiberModeVer > 0) + + + +void rtl8168_hw_init_fiber_nic(struct net_device *dev); +void rtl8168_hw_fiber_nic_d3_para(struct net_device *dev); +void rtl8168_hw_fiber_phy_config(struct net_device *dev); +void rtl8168_hw_switch_mdi_to_fiber(struct net_device *dev); +void rtl8168_hw_switch_mdi_to_nic(struct net_device *dev); +unsigned int rtl8168_hw_fiber_link_ok(struct net_device *dev); +void rtl8168_check_fiber_link_status(struct net_device *dev); +void rtl8168_check_hw_fiber_mode_support(struct net_device *dev); +void rtl8168_set_fiber_mode_software_variable(struct net_device *dev); + + +#endif /* _LINUX_R8168_FIBER_H */ diff --git a/addons/r8168/src/3.10.108/r8168_firmware.c b/addons/r8168/src/3.10.108/r8168_firmware.c new file mode 100644 index 00000000..3fe95db9 --- /dev/null +++ b/addons/r8168/src/3.10.108/r8168_firmware.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#include +#include +#include + +#include "r8168_firmware.h" + +enum rtl_fw_opcode { + PHY_READ = 0x0, + PHY_DATA_OR = 0x1, + PHY_DATA_AND = 0x2, + PHY_BJMPN = 0x3, + PHY_MDIO_CHG = 0x4, + PHY_CLEAR_READCOUNT = 0x7, + PHY_WRITE = 0x8, + PHY_READCOUNT_EQ_SKIP = 0x9, + PHY_COMP_EQ_SKIPN = 0xa, + PHY_COMP_NEQ_SKIPN = 0xb, + PHY_WRITE_PREVIOUS = 0xc, + PHY_SKIPN = 0xd, + PHY_DELAY_MS = 0xe, +}; + +struct fw_info { + u32 magic; + char version[RTL8168_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0) +#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) +#endif +#define FW_OPCODE_SIZE sizeof_field(struct rtl8168_fw_phy_action, code[0]) + +static bool rtl8168_fw_format_ok(struct rtl8168_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl8168_fw_phy_action *pa = &rtl_fw->phy_action; + + if (fw->size < FW_OPCODE_SIZE) + return false; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + return false; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + return false; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + return false; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, fw_info->version, RTL8168_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, rtl_fw->fw_name, RTL8168_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + + return true; +} + +static bool rtl8168_fw_data_ok(struct rtl8168_fw *rtl_fw) +{ + struct rtl8168_fw_phy_action *pa = &rtl_fw->phy_action; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 val = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + switch (action >> 28) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_MDIO_CHG: + if (val > 1) + goto out; + break; + + case PHY_BJMPN: + if (regno > index) + goto out; + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) + goto out; + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) + goto out; + break; + + default: + dev_err(rtl_fw->dev, "Invalid action 0x%08x\n", action); + return false; + } + } + + return true; +out: + dev_err(rtl_fw->dev, "Out of range of firmware\n"); + return false; +} + +void rtl8168_fw_write_firmware(struct rtl8168_private *tp, struct rtl8168_fw *rtl_fw) +{ + struct rtl8168_fw_phy_action *pa = &rtl_fw->phy_action; + rtl8168_fw_write_t fw_write = rtl_fw->phy_write; + rtl8168_fw_read_t fw_read = rtl_fw->phy_read; + int predata = 0, count = 0; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + enum rtl_fw_opcode opcode = action >> 28; + + if (!action) + break; + + switch (opcode) { + case PHY_READ: + predata = fw_read(tp, regno); + count++; + break; + case PHY_DATA_OR: + predata |= data; + break; + case PHY_DATA_AND: + predata &= data; + break; + case PHY_BJMPN: + index -= (regno + 1); + break; + case PHY_MDIO_CHG: + if (data) { + fw_write = rtl_fw->mac_mcu_write; + fw_read = rtl_fw->mac_mcu_read; + } else { + fw_write = rtl_fw->phy_write; + fw_read = rtl_fw->phy_read; + } + + break; + case PHY_CLEAR_READCOUNT: + count = 0; + break; + case PHY_WRITE: + fw_write(tp, regno, data); + break; + case PHY_READCOUNT_EQ_SKIP: + if (count == data) + index++; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + break; + case PHY_WRITE_PREVIOUS: + fw_write(tp, regno, predata); + break; + case PHY_SKIPN: + index += regno; + break; + case PHY_DELAY_MS: + mdelay(data); + break; + } + } +} + +void rtl8168_fw_release_firmware(struct rtl8168_fw *rtl_fw) +{ + release_firmware(rtl_fw->fw); +} + +int rtl8168_fw_request_firmware(struct rtl8168_fw *rtl_fw) +{ + int rc; + + rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev); + if (rc < 0) + goto out; + + if (!rtl8168_fw_format_ok(rtl_fw) || !rtl8168_fw_data_ok(rtl_fw)) { + release_firmware(rtl_fw->fw); + rc = -EINVAL; + goto out; + } + + return 0; +out: + dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n", + rtl_fw->fw_name, rc); + return rc; +} diff --git a/addons/r8168/src/3.10.108/r8168_firmware.h b/addons/r8168/src/3.10.108/r8168_firmware.h new file mode 100644 index 00000000..563280ff --- /dev/null +++ b/addons/r8168/src/3.10.108/r8168_firmware.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek 2.5Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#ifndef _LINUX_RTL8168_FIRMWARE_H +#define _LINUX_RTL8168_FIRMWARE_H + +#include +#include + +struct rtl8168_private; +typedef void (*rtl8168_fw_write_t)(struct rtl8168_private *tp, u16 reg, u16 val); +typedef u32 (*rtl8168_fw_read_t)(struct rtl8168_private *tp, u16 reg); + +#define RTL8168_VER_SIZE 32 + +struct rtl8168_fw { + rtl8168_fw_write_t phy_write; + rtl8168_fw_read_t phy_read; + rtl8168_fw_write_t mac_mcu_write; + rtl8168_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + + char version[RTL8168_VER_SIZE]; + + struct rtl8168_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; +}; + +int rtl8168_fw_request_firmware(struct rtl8168_fw *rtl_fw); +void rtl8168_fw_release_firmware(struct rtl8168_fw *rtl_fw); +void rtl8168_fw_write_firmware(struct rtl8168_private *tp, struct rtl8168_fw *rtl_fw); + +#endif /* _LINUX_RTL8168_FIRMWARE_H */ diff --git a/addons/r8168/src/3.10.108/r8168_n.c b/addons/r8168/src/3.10.108/r8168_n.c new file mode 100644 index 00000000..a44352f3 --- /dev/null +++ b/addons/r8168/src/3.10.108/r8168_n.c @@ -0,0 +1,28743 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +/* + * This driver is modified from r8169.c in Linux kernel 2.6.18 + */ + +/* In Linux 5.4 asm_inline was introduced, but it's not supported by clang. + * Redefine it to just asm to enable successful compilation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) +#include +#include +#endif +#include +#include +#include +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) +#include +#endif +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,37) +#include +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) +#define dev_printk(A,B,fmt,args...) printk(A fmt,##args) +#else +#include +#include +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) +#include +#endif + +#include +#include + +#include "r8168.h" +#include "r8168_asf.h" +#include "rtl_eeprom.h" +#include "rtltool.h" +#include "r8168_firmware.h" + +#ifdef ENABLE_R8168_PROCFS +#include +#include +#endif + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168E_4 "rtl_nic/rtl8168e-4.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168EP_1 "rtl_nic/rtl8168ep-1.fw" +#define FIRMWARE_8168EP_2 "rtl_nic/rtl8168ep-2.fw" +#define FIRMWARE_8168EP_3 "rtl_nic/rtl8168ep-3.fw" +#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw" +#define FIRMWARE_8168FP_4 "rtl_nic/rtl8168fp-4.fw" + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +static const int multicast_filter_limit = 32; + +static const struct { + const char *name; + const char *fw_name; +} rtl_chip_fw_infos[] = { + /* PCI-E devices. */ + [CFG_METHOD_1] = {"RTL8168B/8111", }, + [CFG_METHOD_2] = {"RTL8168B/8111", }, + [CFG_METHOD_3] = {"RTL8168B/8111", }, + [CFG_METHOD_4] = {"RTL8168C/8111C", }, + [CFG_METHOD_5] = {"RTL8168C/8111C", }, + [CFG_METHOD_6] = {"RTL8168C/8111C", }, + [CFG_METHOD_7] = {"RTL8168CP/8111CP", }, + [CFG_METHOD_8] = {"RTL8168CP/8111CP", }, + [CFG_METHOD_9] = {"RTL8168D/8111D", FIRMWARE_8168D_1}, + [CFG_METHOD_10] = {"RTL8168D/8111D", FIRMWARE_8168D_2}, + [CFG_METHOD_11] = {"RTL8168DP/8111DP", }, + [CFG_METHOD_12] = {"RTL8168DP/8111DP", }, + [CFG_METHOD_13] = {"RTL8168DP/8111DP", }, + [CFG_METHOD_14] = {"RTL8168E/8111E", FIRMWARE_8168E_1}, + [CFG_METHOD_15] = {"RTL8168E/8111E", FIRMWARE_8168E_2}, + [CFG_METHOD_16] = {"RTL8168E-VL/8111E-VL", FIRMWARE_8168E_3}, + [CFG_METHOD_17] = {"RTL8168E-VL/8111E-VL", FIRMWARE_8168E_4}, + [CFG_METHOD_18] = {"RTL8168F/8111F", FIRMWARE_8168F_1}, + [CFG_METHOD_19] = {"RTL8168F/8111F", FIRMWARE_8168F_2}, + [CFG_METHOD_20] = {"RTL8411", FIRMWARE_8411_1}, + [CFG_METHOD_21] = {"RTL8168G/8111G", FIRMWARE_8168G_2}, + [CFG_METHOD_22] = {"RTL8168G/8111G", }, + [CFG_METHOD_23] = {"RTL8168EP/8111EP", FIRMWARE_8168EP_1}, + [CFG_METHOD_24] = {"RTL8168GU/8111GU", }, + [CFG_METHOD_25] = {"RTL8168GU/8111GU", FIRMWARE_8168G_3}, + [CFG_METHOD_26] = {"8411B", FIRMWARE_8411_2}, + [CFG_METHOD_27] = {"RTL8168EP/8111EP", FIRMWARE_8168EP_2}, + [CFG_METHOD_28] = {"RTL8168EP/8111EP", FIRMWARE_8168EP_3}, + [CFG_METHOD_29] = {"RTL8168H/8111H", FIRMWARE_8168H_1}, + [CFG_METHOD_30] = {"RTL8168H/8111H", FIRMWARE_8168H_2}, + [CFG_METHOD_31] = {"RTL8168FP/8111FP", }, + [CFG_METHOD_32] = {"RTL8168FP/8111FP", FIRMWARE_8168FP_3}, + [CFG_METHOD_33] = {"RTL8168FP/8111FP", FIRMWARE_8168FP_4}, + [CFG_METHOD_DEFAULT] = {"Unknown", }, +}; + +#define _R(NAME,MAC,RCR,MASK, JumFrameSz) \ + { .name = NAME, .mcfg = MAC, .RCR_Cfg = RCR, .RxConfigMask = MASK, .jumbo_frame_sz = JumFrameSz } + +static const struct { + const char *name; + u8 mcfg; + u32 RCR_Cfg; + u32 RxConfigMask; /* Clears the bits supported by this chip */ + u32 jumbo_frame_sz; +} rtl_chip_info[] = { + _R("RTL8168B/8111B", + CFG_METHOD_1, + (Reserved2_data << Reserved2_shift) | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_4k), + + _R("RTL8168B/8111B", + CFG_METHOD_2, + (Reserved2_data << Reserved2_shift) | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_4k), + + _R("RTL8168B/8111B", + CFG_METHOD_3, + (Reserved2_data << Reserved2_shift) | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_4k), + + _R("RTL8168C/8111C", + CFG_METHOD_4, + RxCfg_128_int_en | RxCfg_fet_multi_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_6k), + + _R("RTL8168C/8111C", + CFG_METHOD_5, + RxCfg_128_int_en | RxCfg_fet_multi_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_6k), + + _R("RTL8168C/8111C", + CFG_METHOD_6, + RxCfg_128_int_en | RxCfg_fet_multi_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_6k), + + _R("RTL8168CP/8111CP", + CFG_METHOD_7, + RxCfg_128_int_en | RxCfg_fet_multi_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_6k), + + _R("RTL8168CP/8111CP", + CFG_METHOD_8, + RxCfg_128_int_en | RxCfg_fet_multi_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_6k), + + _R("RTL8168D/8111D", + CFG_METHOD_9, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168D/8111D", + CFG_METHOD_10, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168DP/8111DP", + CFG_METHOD_11, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168DP/8111DP", + CFG_METHOD_12, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168DP/8111DP", + CFG_METHOD_13, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168E/8111E", + CFG_METHOD_14, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168E/8111E", + CFG_METHOD_15, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168E-VL/8111E-VL", + CFG_METHOD_16, + RxCfg_128_int_en | RxEarly_off_V1 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e0080, + Jumbo_Frame_9k), + + _R("RTL8168E-VL/8111E-VL", + CFG_METHOD_17, + RxCfg_128_int_en | RxEarly_off_V1 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168F/8111F", + CFG_METHOD_18, + RxCfg_128_int_en | RxEarly_off_V1 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168F/8111F", + CFG_METHOD_19, + RxCfg_128_int_en | RxEarly_off_V1 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8411", + CFG_METHOD_20, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168G/8111G", + CFG_METHOD_21, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168G/8111G", + CFG_METHOD_22, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168EP/8111EP", + CFG_METHOD_23, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168GU/8111GU", + CFG_METHOD_24, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168GU/8111GU", + CFG_METHOD_25, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("8411B", + CFG_METHOD_26, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168EP/8111EP", + CFG_METHOD_27, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168EP/8111EP", + CFG_METHOD_28, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168H/8111H", + CFG_METHOD_29, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168H/8111H", + CFG_METHOD_30, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168FP/8111FP", + CFG_METHOD_31, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168FP/8111FP", + CFG_METHOD_32, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168FP/8111FP", + CFG_METHOD_33, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("Unknown", + CFG_METHOD_DEFAULT, + (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_1k) +}; +#undef _R + +#ifndef PCI_VENDOR_ID_DLINK +#define PCI_VENDOR_ID_DLINK 0x1186 +#endif + +static struct pci_device_id rtl8168_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x2502), }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x2600), }, + { PCI_VENDOR_ID_DLINK, 0x4300, 0x1186, 0x4b10,}, + {0,}, +}; + +MODULE_DEVICE_TABLE(pci, rtl8168_pci_tbl); + +static int rx_copybreak = 0; +static int use_dac = 1; +static int timer_count = 0x2600; +static int dynamic_aspm_packet_threshold = 10; + +static struct { + u32 msg_enable; +} debug = { -1 }; + +static unsigned int speed_mode = SPEED_1000; +static unsigned int duplex_mode = DUPLEX_FULL; +static unsigned int autoneg_mode = AUTONEG_ENABLE; +static unsigned int advertising_mode = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; +#ifdef CONFIG_ASPM +static int aspm = 1; +#else +static int aspm = 0; +#endif +#ifdef CONFIG_DYNAMIC_ASPM +static int dynamic_aspm = 1; +#else +static int dynamic_aspm = 0; +#endif +#ifdef ENABLE_S5WOL +static int s5wol = 1; +#else +static int s5wol = 0; +#endif +#ifdef ENABLE_S5_KEEP_CURR_MAC +static int s5_keep_curr_mac = 1; +#else +static int s5_keep_curr_mac = 0; +#endif +#ifdef ENABLE_EEE +static int eee_enable = 1; +#else +static int eee_enable = 0; +#endif +#ifdef CONFIG_SOC_LAN +static ulong hwoptimize = HW_PATCH_SOC_LAN; +#else +static ulong hwoptimize = 0; +#endif +#ifdef ENABLE_S0_MAGIC_PACKET +static int s0_magic_packet = 1; +#else +static int s0_magic_packet = 0; +#endif + +MODULE_AUTHOR("Realtek and the Linux r8168 crew "); +MODULE_DESCRIPTION("RealTek RTL-8168 Gigabit Ethernet driver"); + +module_param(speed_mode, uint, 0); +MODULE_PARM_DESC(speed_mode, "force phy operation. Deprecated by ethtool (8)."); + +module_param(duplex_mode, uint, 0); +MODULE_PARM_DESC(duplex_mode, "force phy operation. Deprecated by ethtool (8)."); + +module_param(autoneg_mode, uint, 0); +MODULE_PARM_DESC(autoneg_mode, "force phy operation. Deprecated by ethtool (8)."); + +module_param(advertising_mode, uint, 0); +MODULE_PARM_DESC(advertising_mode, "force phy operation. Deprecated by ethtool (8)."); + +module_param(aspm, int, 0); +MODULE_PARM_DESC(aspm, "Enable ASPM."); + +module_param(dynamic_aspm, int, 0); +MODULE_PARM_DESC(aspm, "Enable Software Dynamic ASPM."); + +module_param(s5wol, int, 0); +MODULE_PARM_DESC(s5wol, "Enable Shutdown Wake On Lan."); + +module_param(s5_keep_curr_mac, int, 0); +MODULE_PARM_DESC(s5_keep_curr_mac, "Enable Shutdown Keep Current MAC Address."); + +module_param(rx_copybreak, int, 0); +MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); + +module_param(use_dac, int, 0); +MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); + +module_param(timer_count, int, 0); +MODULE_PARM_DESC(timer_count, "Timer Interrupt Interval."); + +module_param(eee_enable, int, 0); +MODULE_PARM_DESC(eee_enable, "Enable Energy Efficient Ethernet."); + +module_param(hwoptimize, ulong, 0); +MODULE_PARM_DESC(hwoptimize, "Enable HW optimization function."); + +module_param(s0_magic_packet, int, 0); +MODULE_PARM_DESC(s0_magic_packet, "Enable S0 Magic Packet."); + +module_param(dynamic_aspm_packet_threshold, int, 0); +MODULE_PARM_DESC(dynamic_aspm_packet_threshold, "Dynamic ASPM packet threshold."); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) +module_param_named(debug, debug.msg_enable, int, 0); +MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); +#endif//LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + +MODULE_LICENSE("GPL"); +#ifdef ENABLE_USE_FIRMWARE_FILE +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8168E_4); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168EP_1); +MODULE_FIRMWARE(FIRMWARE_8168EP_2); +MODULE_FIRMWARE(FIRMWARE_8168EP_3); +MODULE_FIRMWARE(FIRMWARE_8168H_1); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8168FP_3); +MODULE_FIRMWARE(FIRMWARE_8168FP_4); +#endif + +MODULE_VERSION(RTL8168_VERSION); + +static void rtl8168_sleep_rx_enable(struct net_device *dev); +static void rtl8168_dsm(struct net_device *dev, int dev_state); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) +static void rtl8168_esd_timer(unsigned long __opaque); +#else +static void rtl8168_esd_timer(struct timer_list *t); +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) +static void rtl8168_link_timer(unsigned long __opaque); +#else +static void rtl8168_link_timer(struct timer_list *t); +#endif +static void rtl8168_tx_clear(struct rtl8168_private *tp); +static void rtl8168_rx_clear(struct rtl8168_private *tp); + +static int rtl8168_open(struct net_device *dev); +static netdev_tx_t rtl8168_start_xmit(struct sk_buff *skb, struct net_device *dev); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) +static irqreturn_t rtl8168_interrupt(int irq, void *dev_instance, struct pt_regs *regs); +#else +static irqreturn_t rtl8168_interrupt(int irq, void *dev_instance); +#endif +static void rtl8168_rx_desc_offset0_init(struct rtl8168_private *, int); +static int rtl8168_init_ring(struct net_device *dev); +static void rtl8168_hw_config(struct net_device *dev); +static void rtl8168_hw_start(struct net_device *dev); +static int rtl8168_close(struct net_device *dev); +static void rtl8168_set_rx_mode(struct net_device *dev); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) +static void rtl8168_tx_timeout(struct net_device *dev, unsigned int txqueue); +#else +static void rtl8168_tx_timeout(struct net_device *dev); +#endif +static struct net_device_stats *rtl8168_get_stats(struct net_device *dev); +static int rtl8168_rx_interrupt(struct net_device *, struct rtl8168_private *, napi_budget); +static int rtl8168_change_mtu(struct net_device *dev, int new_mtu); +static void rtl8168_down(struct net_device *dev); + +static int rtl8168_set_mac_address(struct net_device *dev, void *p); +void rtl8168_rar_set(struct rtl8168_private *tp, uint8_t *addr); +static void rtl8168_desc_addr_fill(struct rtl8168_private *); +static void rtl8168_tx_desc_init(struct rtl8168_private *tp); +static void rtl8168_rx_desc_init(struct rtl8168_private *tp); + +static u16 rtl8168_get_hw_phy_mcu_code_ver(struct rtl8168_private *tp); + +static void rtl8168_hw_reset(struct net_device *dev); + +static void rtl8168_phy_power_up(struct net_device *dev); +static void rtl8168_phy_power_down(struct net_device *dev); +static int rtl8168_set_speed(struct net_device *dev, u8 autoneg, u32 speed, u8 duplex, u32 adv); + +static int rtl8168_set_phy_mcu_patch_request(struct rtl8168_private *tp); +static int rtl8168_clear_phy_mcu_patch_request(struct rtl8168_private *tp); + +#ifdef CONFIG_R8168_NAPI +static int rtl8168_poll(napi_ptr napi, napi_budget budget); +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) +static void rtl8168_reset_task(void *_data); +#else +static void rtl8168_reset_task(struct work_struct *work); +#endif + +static inline struct device *tp_to_dev(struct rtl8168_private *tp) +{ + return &tp->pci_dev->dev; +} + +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,00))) +void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, + u32 legacy_u32) +{ + bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); + dst[0] = legacy_u32; +} + +bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, + const unsigned long *src) +{ + bool retval = true; + + /* TODO: following test will soon always be true */ + if (__ETHTOOL_LINK_MODE_MASK_NBITS > 32) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(ext); + + bitmap_zero(ext, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_fill(ext, 32); + bitmap_complement(ext, ext, __ETHTOOL_LINK_MODE_MASK_NBITS); + if (bitmap_intersects(ext, src, + __ETHTOOL_LINK_MODE_MASK_NBITS)) { + /* src mask goes beyond bit 31 */ + retval = false; + } + } + *legacy_u32 = src[0]; + return retval; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) + +#ifndef LPA_1000FULL +#define LPA_1000FULL 0x0800 +#endif + +#ifndef LPA_1000HALF +#define LPA_1000HALF 0x0400 +#endif + +static inline u32 mii_adv_to_ethtool_adv_t(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_10HALF) + result |= ADVERTISED_10baseT_Half; + if (adv & ADVERTISE_10FULL) + result |= ADVERTISED_10baseT_Full; + if (adv & ADVERTISE_100HALF) + result |= ADVERTISED_100baseT_Half; + if (adv & ADVERTISE_100FULL) + result |= ADVERTISED_100baseT_Full; + if (adv & ADVERTISE_PAUSE_CAP) + result |= ADVERTISED_Pause; + if (adv & ADVERTISE_PAUSE_ASYM) + result |= ADVERTISED_Asym_Pause; + + return result; +} + +static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_LPACK) + result |= ADVERTISED_Autoneg; + + return result | mii_adv_to_ethtool_adv_t(lpa); +} + +static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_1000HALF) + result |= ADVERTISED_1000baseT_Half; + if (lpa & LPA_1000FULL) + result |= ADVERTISED_1000baseT_Full; + + return result; +} + +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) +static inline void eth_hw_addr_random(struct net_device *dev) +{ + random_ether_addr(dev->dev_addr); +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) +#undef ethtool_ops +#define ethtool_ops _kc_ethtool_ops + +struct _kc_ethtool_ops { + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + int (*set_settings)(struct net_device *, struct ethtool_cmd *); + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); + void (*get_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + int (*set_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + u32 (*get_rx_csum)(struct net_device *); + int (*set_rx_csum)(struct net_device *, u32); + u32 (*get_tx_csum)(struct net_device *); + int (*set_tx_csum)(struct net_device *, u32); + u32 (*get_sg)(struct net_device *); + int (*set_sg)(struct net_device *, u32); + u32 (*get_tso)(struct net_device *); + int (*set_tso)(struct net_device *, u32); + int (*self_test_count)(struct net_device *); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32 stringset, u8 *); + int (*phys_id)(struct net_device *, u32); + int (*get_stats_count)(struct net_device *); + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, + u64 *); +} *ethtool_ops = NULL; + +#undef SET_ETHTOOL_OPS +#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops)) + +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) +#ifndef SET_ETHTOOL_OPS +#define SET_ETHTOOL_OPS(netdev,ops) \ + ( (netdev)->ethtool_ops = (ops) ) +#endif //SET_ETHTOOL_OPS +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) + +//#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +/* copied from linux kernel 2.6.20 include/linux/netdevice.h */ +static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) - 1; +} + +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) +static inline void eth_copy_and_sum (struct sk_buff *dest, + const unsigned char *src, + int len, int base) +{ + memcpy (dest->data, src, len); +} +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) +/* copied from linux kernel 2.6.20 /include/linux/time.h */ +/* Parameters used to convert the timespec values: */ +#define MSEC_PER_SEC 1000L + +/* copied from linux kernel 2.6.20 /include/linux/jiffies.h */ +/* + * Change timeval to jiffies, trying to avoid the + * most obvious overflows.. + * + * And some not so obvious. + * + * Note that we don't want to return MAX_LONG, because + * for various timeout reasons we often end up having + * to wait "jiffies+1" in order to guarantee that we wait + * at _least_ "jiffies" - so "jiffies+1" had better still + * be positive. + */ +#define MAX_JIFFY_OFFSET ((~0UL >> 1)-1) + +/* + * Convert jiffies to milliseconds and back. + * + * Avoid unnecessary multiplications/divisions in the + * two most common HZ cases: + */ +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} + +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) + + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) + +/* copied from linux kernel 2.6.12.6 /include/linux/pm.h */ +typedef int __bitwise pci_power_t; + +/* copied from linux kernel 2.6.12.6 /include/linux/pci.h */ +typedef u32 __bitwise pm_message_t; + +#define PCI_D0 ((pci_power_t __force) 0) +#define PCI_D1 ((pci_power_t __force) 1) +#define PCI_D2 ((pci_power_t __force) 2) +#define PCI_D3hot ((pci_power_t __force) 3) +#define PCI_D3cold ((pci_power_t __force) 4) +#define PCI_POWER_ERROR ((pci_power_t __force) -1) + +/* copied from linux kernel 2.6.12.6 /drivers/pci/pci.c */ +/** + * pci_choose_state - Choose the power state of a PCI device + * @dev: PCI device to be suspended + * @state: target sleep state for the whole system. This is the value + * that is passed to suspend() function. + * + * Returns PCI power state suitable for given device and given system + * message. + */ + +pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) +{ + if (!pci_find_capability(dev, PCI_CAP_ID_PM)) + return PCI_D0; + + switch (state) { + case 0: + return PCI_D0; + case 3: + return PCI_D3hot; + default: + printk("They asked me for state %d\n", state); +// BUG(); + } + return PCI_D0; +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) +/** + * msleep_interruptible - sleep waiting for waitqueue interruptions + * @msecs: Time in milliseconds to sleep for + */ +#define msleep_interruptible _kc_msleep_interruptible +unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs); + + while (timeout && !signal_pending(current)) { + set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) +/* copied from linux kernel 2.6.20 include/linux/sched.h */ +#ifndef __sched +#define __sched __attribute__((__section__(".sched.text"))) +#endif + +/* copied from linux kernel 2.6.20 kernel/timer.c */ +signed long __sched schedule_timeout_uninterruptible(signed long timeout) +{ + __set_current_state(TASK_UNINTERRUPTIBLE); + return schedule_timeout(timeout); +} + +/* copied from linux kernel 2.6.20 include/linux/mii.h */ +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) + +struct rtl8168_counters { + u64 tx_packets; + u64 rx_packets; + u64 tx_errors; + u32 rx_errors; + u16 rx_missed; + u16 align_errors; + u32 tx_one_collision; + u32 tx_multi_collision; + u64 rx_unicast; + u64 rx_broadcast; + u32 rx_multicast; + u16 tx_aborted; + u16 tx_underrun; +}; + +#ifdef ENABLE_R8168_PROCFS +/**************************************************************************** +* -----------------------------PROCFS STUFF------------------------- +***************************************************************************** +*/ + +static struct proc_dir_entry *rtl8168_proc; +static int proc_init_num = 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) +static int proc_get_driver_variable(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + seq_puts(m, "\nDump Driver Variable\n"); + + spin_lock_irqsave(&tp->lock, flags); + seq_puts(m, "Variable\tValue\n----------\t-----\n"); + seq_printf(m, "MODULENAME\t%s\n", MODULENAME); + seq_printf(m, "driver version\t%s\n", RTL8168_VERSION); + seq_printf(m, "chipset\t%d\n", tp->chipset); + seq_printf(m, "chipset_name\t%s\n", rtl_chip_info[tp->chipset].name); + seq_printf(m, "mtu\t%d\n", dev->mtu); + seq_printf(m, "NUM_RX_DESC\t0x%x\n", NUM_RX_DESC); + seq_printf(m, "cur_rx\t0x%x\n", tp->cur_rx); + seq_printf(m, "dirty_rx\t0x%x\n", tp->dirty_rx); + seq_printf(m, "NUM_TX_DESC\t0x%x\n", NUM_TX_DESC); + seq_printf(m, "cur_tx\t0x%x\n", tp->cur_tx); + seq_printf(m, "dirty_tx\t0x%x\n", tp->dirty_tx); + seq_printf(m, "rx_buf_sz\t0x%x\n", tp->rx_buf_sz); + seq_printf(m, "esd_flag\t0x%x\n", tp->esd_flag); + seq_printf(m, "pci_cfg_is_read\t0x%x\n", tp->pci_cfg_is_read); + seq_printf(m, "rtl8168_rx_config\t0x%x\n", tp->rtl8168_rx_config); + seq_printf(m, "cp_cmd\t0x%x\n", tp->cp_cmd); + seq_printf(m, "intr_mask\t0x%x\n", tp->intr_mask); + seq_printf(m, "timer_intr_mask\t0x%x\n", tp->timer_intr_mask); + seq_printf(m, "wol_enabled\t0x%x\n", tp->wol_enabled); + seq_printf(m, "wol_opts\t0x%x\n", tp->wol_opts); + seq_printf(m, "efuse_ver\t0x%x\n", tp->efuse_ver); + seq_printf(m, "eeprom_type\t0x%x\n", tp->eeprom_type); + seq_printf(m, "autoneg\t0x%x\n", tp->autoneg); + seq_printf(m, "duplex\t0x%x\n", tp->duplex); + seq_printf(m, "speed\t%d\n", tp->speed); + seq_printf(m, "advertising\t0x%x\n", tp->advertising); + seq_printf(m, "eeprom_len\t0x%x\n", tp->eeprom_len); + seq_printf(m, "cur_page\t0x%x\n", tp->cur_page); + seq_printf(m, "bios_setting\t0x%x\n", tp->bios_setting); + seq_printf(m, "features\t0x%x\n", tp->features); + seq_printf(m, "org_pci_offset_99\t0x%x\n", tp->org_pci_offset_99); + seq_printf(m, "org_pci_offset_180\t0x%x\n", tp->org_pci_offset_180); + seq_printf(m, "issue_offset_99_event\t0x%x\n", tp->issue_offset_99_event); + seq_printf(m, "org_pci_offset_80\t0x%x\n", tp->org_pci_offset_80); + seq_printf(m, "org_pci_offset_81\t0x%x\n", tp->org_pci_offset_81); + seq_printf(m, "use_timer_interrrupt\t0x%x\n", tp->use_timer_interrrupt); + seq_printf(m, "HwIcVerUnknown\t0x%x\n", tp->HwIcVerUnknown); + seq_printf(m, "NotWrRamCodeToMicroP\t0x%x\n", tp->NotWrRamCodeToMicroP); + seq_printf(m, "NotWrMcuPatchCode\t0x%x\n", tp->NotWrMcuPatchCode); + seq_printf(m, "HwHasWrRamCodeToMicroP\t0x%x\n", tp->HwHasWrRamCodeToMicroP); + seq_printf(m, "sw_ram_code_ver\t0x%x\n", tp->sw_ram_code_ver); + seq_printf(m, "hw_ram_code_ver\t0x%x\n", tp->hw_ram_code_ver); + seq_printf(m, "rtk_enable_diag\t0x%x\n", tp->rtk_enable_diag); + seq_printf(m, "ShortPacketSwChecksum\t0x%x\n", tp->ShortPacketSwChecksum); + seq_printf(m, "UseSwPaddingShortPkt\t0x%x\n", tp->UseSwPaddingShortPkt); + seq_printf(m, "RequireAdcBiasPatch\t0x%x\n", tp->RequireAdcBiasPatch); + seq_printf(m, "AdcBiasPatchIoffset\t0x%x\n", tp->AdcBiasPatchIoffset); + seq_printf(m, "RequireAdjustUpsTxLinkPulseTiming\t0x%x\n", tp->RequireAdjustUpsTxLinkPulseTiming); + seq_printf(m, "SwrCnt1msIni\t0x%x\n", tp->SwrCnt1msIni); + seq_printf(m, "HwSuppNowIsOobVer\t0x%x\n", tp->HwSuppNowIsOobVer); + seq_printf(m, "HwFiberModeVer\t0x%x\n", tp->HwFiberModeVer); + seq_printf(m, "HwFiberStat\t0x%x\n", tp->HwFiberStat); + seq_printf(m, "HwSwitchMdiToFiber\t0x%x\n", tp->HwSwitchMdiToFiber); + seq_printf(m, "HwSuppSerDesPhyVer\t0x%x\n", tp->HwSuppSerDesPhyVer); + seq_printf(m, "NicCustLedValue\t0x%x\n", tp->NicCustLedValue); + seq_printf(m, "RequiredSecLanDonglePatch\t0x%x\n", tp->RequiredSecLanDonglePatch); + seq_printf(m, "HwSuppDashVer\t0x%x\n", tp->HwSuppDashVer); + seq_printf(m, "DASH\t0x%x\n", tp->DASH); + seq_printf(m, "dash_printer_enabled\t0x%x\n", tp->dash_printer_enabled); + seq_printf(m, "HwSuppKCPOffloadVer\t0x%x\n", tp->HwSuppKCPOffloadVer); + seq_printf(m, "speed_mode\t0x%x\n", speed_mode); + seq_printf(m, "duplex_mode\t0x%x\n", duplex_mode); + seq_printf(m, "autoneg_mode\t0x%x\n", autoneg_mode); + seq_printf(m, "advertising_mode\t0x%x\n", advertising_mode); + seq_printf(m, "aspm\t0x%x\n", aspm); + seq_printf(m, "s5wol\t0x%x\n", s5wol); + seq_printf(m, "s5_keep_curr_mac\t0x%x\n", s5_keep_curr_mac); + seq_printf(m, "eee_enable\t0x%x\n", tp->eee_enabled); + seq_printf(m, "hwoptimize\t0x%lx\n", hwoptimize); + seq_printf(m, "proc_init_num\t0x%x\n", proc_init_num); + seq_printf(m, "s0_magic_packet\t0x%x\n", s0_magic_packet); + seq_printf(m, "HwSuppMagicPktVer\t0x%x\n", tp->HwSuppMagicPktVer); + seq_printf(m, "HwSuppUpsVer\t0x%x\n", tp->HwSuppUpsVer); + seq_printf(m, "HwSuppEsdVer\t0x%x\n", tp->HwSuppEsdVer); + seq_printf(m, "HwSuppCheckPhyDisableModeVer\t0x%x\n", tp->HwSuppCheckPhyDisableModeVer); + seq_printf(m, "HwPkgDet\t0x%x\n", tp->HwPkgDet); + seq_printf(m, "random_mac\t0x%x\n", tp->random_mac); + seq_printf(m, "org_mac_addr\t%pM\n", tp->org_mac_addr); +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) + seq_printf(m, "perm_addr\t%pM\n", dev->perm_addr); +#endif + seq_printf(m, "dev_addr\t%pM\n", dev->dev_addr); + spin_unlock_irqrestore(&tp->lock, flags); + + seq_putc(m, '\n'); + return 0; +} + +static int proc_get_tally_counter(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + struct rtl8168_private *tp = netdev_priv(dev); + struct rtl8168_counters *counters; + dma_addr_t paddr; + u32 cmd; + u32 WaitCnt; + unsigned long flags; + + seq_puts(m, "\nDump Tally Counter\n"); + + //ASSERT_RTNL(); + + counters = tp->tally_vaddr; + paddr = tp->tally_paddr; + if (!counters) { + seq_puts(m, "\nDump Tally Counter Fail\n"); + return 0; + } + + spin_lock_irqsave(&tp->lock, flags); + RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | CounterDump); + + WaitCnt = 0; + while (RTL_R32(tp, CounterAddrLow) & CounterDump) { + udelay(10); + + WaitCnt++; + if (WaitCnt > 20) + break; + } + spin_unlock_irqrestore(&tp->lock, flags); + + seq_puts(m, "Statistics\tValue\n----------\t-----\n"); + seq_printf(m, "tx_packets\t%lld\n", le64_to_cpu(counters->tx_packets)); + seq_printf(m, "rx_packets\t%lld\n", le64_to_cpu(counters->rx_packets)); + seq_printf(m, "tx_errors\t%lld\n", le64_to_cpu(counters->tx_errors)); + seq_printf(m, "rx_errors\t%d\n", le32_to_cpu(counters->rx_errors)); + seq_printf(m, "rx_missed\t%d\n", le16_to_cpu(counters->rx_missed)); + seq_printf(m, "align_errors\t%d\n", le16_to_cpu(counters->align_errors)); + seq_printf(m, "tx_one_collision\t%d\n", le32_to_cpu(counters->tx_one_collision)); + seq_printf(m, "tx_multi_collision\t%d\n", le32_to_cpu(counters->tx_multi_collision)); + seq_printf(m, "rx_unicast\t%lld\n", le64_to_cpu(counters->rx_unicast)); + seq_printf(m, "rx_broadcast\t%lld\n", le64_to_cpu(counters->rx_broadcast)); + seq_printf(m, "rx_multicast\t%d\n", le32_to_cpu(counters->rx_multicast)); + seq_printf(m, "tx_aborted\t%d\n", le16_to_cpu(counters->tx_aborted)); + seq_printf(m, "tx_underrun\t%d\n", le16_to_cpu(counters->tx_underrun)); + + seq_putc(m, '\n'); + return 0; +} + +static int proc_get_registers(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + int i, n, max = R8168_MAC_REGS_SIZE; + u8 byte_rd; + struct rtl8168_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + + seq_puts(m, "\nDump MAC Registers\n"); + seq_puts(m, "Offset\tValue\n------\t-----\n"); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + seq_printf(m, "\n0x%02x:\t", n); + + for (i = 0; i < 16 && n < max; i++, n++) { + byte_rd = readb(ioaddr + n); + seq_printf(m, "%02x ", byte_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + seq_putc(m, '\n'); + return 0; +} + +static int proc_get_pcie_phy(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + int i, n, max = R8168_EPHY_REGS_SIZE/2; + u16 word_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + seq_puts(m, "\nDump PCIE PHY\n"); + seq_puts(m, "\nOffset\tValue\n------\t-----\n "); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + seq_printf(m, "\n0x%02x:\t", n); + + for (i = 0; i < 8 && n < max; i++, n++) { + word_rd = rtl8168_ephy_read(tp, n); + seq_printf(m, "%04x ", word_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + seq_putc(m, '\n'); + return 0; +} + +static int proc_get_eth_phy(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + int i, n, max = R8168_PHY_REGS_SIZE/2; + u16 word_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + seq_puts(m, "\nDump Ethernet PHY\n"); + seq_puts(m, "\nOffset\tValue\n------\t-----\n "); + + spin_lock_irqsave(&tp->lock, flags); + seq_puts(m, "\n####################page 0##################\n "); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + for (n = 0; n < max;) { + seq_printf(m, "\n0x%02x:\t", n); + + for (i = 0; i < 8 && n < max; i++, n++) { + word_rd = rtl8168_mdio_read(tp, n); + seq_printf(m, "%04x ", word_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + seq_putc(m, '\n'); + return 0; +} + +static int proc_get_extended_registers(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + int i, n, max = R8168_ERI_REGS_SIZE; + u32 dword_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + /* RTL8168B does not support Extend GMAC */ + seq_puts(m, "\nNot Support Dump Extended Registers\n"); + return 0; + } + + seq_puts(m, "\nDump Extended Registers\n"); + seq_puts(m, "\nOffset\tValue\n------\t-----\n "); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + seq_printf(m, "\n0x%02x:\t", n); + + for (i = 0; i < 4 && n < max; i++, n+=4) { + dword_rd = rtl8168_eri_read(tp, n, 4, ERIAR_ExGMAC); + seq_printf(m, "%08x ", dword_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + seq_putc(m, '\n'); + return 0; +} + +static int proc_get_pci_registers(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + int i, n, max = R8168_PCI_REGS_SIZE; + u32 dword_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + seq_puts(m, "\nDump PCI Registers\n"); + seq_puts(m, "\nOffset\tValue\n------\t-----\n "); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + seq_printf(m, "\n0x%03x:\t", n); + + for (i = 0; i < 4 && n < max; i++, n+=4) { + pci_read_config_dword(tp->pci_dev, n, &dword_rd); + seq_printf(m, "%08x ", dword_rd); + } + } + + n = 0x110; + pci_read_config_dword(tp->pci_dev, n, &dword_rd); + seq_printf(m, "\n0x%03x:\t%08x ", n, dword_rd); + n = 0x70c; + pci_read_config_dword(tp->pci_dev, n, &dword_rd); + seq_printf(m, "\n0x%03x:\t%08x ", n, dword_rd); + + spin_unlock_irqrestore(&tp->lock, flags); + + seq_putc(m, '\n'); + return 0; +} +#else + +static int proc_get_driver_variable(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + int len = 0; + + len += snprintf(page + len, count - len, + "\nDump Driver Driver\n"); + + spin_lock_irqsave(&tp->lock, flags); + len += snprintf(page + len, count - len, + "Variable\tValue\n----------\t-----\n"); + + len += snprintf(page + len, count - len, + "MODULENAME\t%s\n" + "driver version\t%s\n" + "chipset\t%d\n" + "chipset_name\t%s\n" + "mtu\t%d\n" + "NUM_RX_DESC\t0x%x\n" + "cur_rx\t0x%x\n" + "dirty_rx\t0x%x\n" + "NUM_TX_DESC\t0x%x\n" + "cur_tx\t0x%x\n" + "dirty_tx\t0x%x\n" + "rx_buf_sz\t0x%x\n" + "esd_flag\t0x%x\n" + "pci_cfg_is_read\t0x%x\n" + "rtl8168_rx_config\t0x%x\n" + "cp_cmd\t0x%x\n" + "intr_mask\t0x%x\n" + "timer_intr_mask\t0x%x\n" + "wol_enabled\t0x%x\n" + "wol_opts\t0x%x\n" + "efuse_ver\t0x%x\n" + "eeprom_type\t0x%x\n" + "autoneg\t0x%x\n" + "duplex\t0x%x\n" + "speed\t%d\n" + "advertising\t0x%x\n" + "eeprom_len\t0x%x\n" + "cur_page\t0x%x\n" + "bios_setting\t0x%x\n" + "features\t0x%x\n" + "org_pci_offset_99\t0x%x\n" + "org_pci_offset_180\t0x%x\n" + "issue_offset_99_event\t0x%x\n" + "org_pci_offset_80\t0x%x\n" + "org_pci_offset_81\t0x%x\n" + "use_timer_interrrupt\t0x%x\n" + "HwIcVerUnknown\t0x%x\n" + "NotWrRamCodeToMicroP\t0x%x\n" + "NotWrMcuPatchCode\t0x%x\n" + "HwHasWrRamCodeToMicroP\t0x%x\n" + "sw_ram_code_ver\t0x%x\n" + "hw_ram_code_ver\t0x%x\n" + "rtk_enable_diag\t0x%x\n" + "ShortPacketSwChecksum\t0x%x\n" + "UseSwPaddingShortPkt\t0x%x\n" + "RequireAdcBiasPatch\t0x%x\n" + "AdcBiasPatchIoffset\t0x%x\n" + "RequireAdjustUpsTxLinkPulseTiming\t0x%x\n" + "SwrCnt1msIni\t0x%x\n" + "HwSuppNowIsOobVer\t0x%x\n" + "HwFiberModeVer\t0x%x\n" + "HwFiberStat\t0x%x\n" + "HwSwitchMdiToFiber\t0x%x\n" + "HwSuppSerDesPhyVer\t0x%x\n" + "NicCustLedValue\t0x%x\n" + "RequiredSecLanDonglePatch\t0x%x\n" + "HwSuppDashVer\t0x%x\n" + "DASH\t0x%x\n" + "dash_printer_enabled\t0x%x\n" + "HwSuppKCPOffloadVer\t0x%x\n" + "speed_mode\t0x%x\n" + "duplex_mode\t0x%x\n" + "autoneg_mode\t0x%x\n" + "advertising_mode\t0x%x\n" + "aspm\t0x%x\n" + "s5wol\t0x%x\n" + "s5_keep_curr_mac\t0x%x\n" + "eee_enable\t0x%x\n" + "hwoptimize\t0x%lx\n" + "proc_init_num\t0x%x\n" + "s0_magic_packet\t0x%x\n" + "HwSuppMagicPktVer\t0x%x\n" + "HwSuppUpsVer\t0x%x\n" + "HwSuppEsdVer\t0x%x\n" + "HwSuppCheckPhyDisableModeVer\t0x%x\n" + "HwPkgDet\t0x%x\n" + "random_mac\t0x%x\n" + "org_mac_addr\t%pM\n" +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) + "perm_addr\t%pM\n" +#endif + "dev_addr\t%pM\n", + MODULENAME, + RTL8168_VERSION, + tp->chipset, + rtl_chip_info[tp->chipset].name, + dev->mtu, + NUM_RX_DESC, + tp->cur_rx, + tp->dirty_rx, + NUM_TX_DESC, + tp->cur_tx, + tp->dirty_tx, + tp->rx_buf_sz, + tp->esd_flag, + tp->pci_cfg_is_read, + tp->rtl8168_rx_config, + tp->cp_cmd, + tp->intr_mask, + tp->timer_intr_mask, + tp->wol_enabled, + tp->wol_opts, + tp->efuse_ver, + tp->eeprom_type, + tp->autoneg, + tp->duplex, + tp->speed, + tp->advertising, + tp->eeprom_len, + tp->cur_page, + tp->bios_setting, + tp->features, + tp->org_pci_offset_99, + tp->org_pci_offset_180, + tp->issue_offset_99_event, + tp->org_pci_offset_80, + tp->org_pci_offset_81, + tp->use_timer_interrrupt, + tp->HwIcVerUnknown, + tp->NotWrRamCodeToMicroP, + tp->NotWrMcuPatchCode, + tp->HwHasWrRamCodeToMicroP, + tp->sw_ram_code_ver, + tp->hw_ram_code_ver, + tp->rtk_enable_diag, + tp->ShortPacketSwChecksum, + tp->UseSwPaddingShortPkt, + tp->RequireAdcBiasPatch, + tp->AdcBiasPatchIoffset, + tp->RequireAdjustUpsTxLinkPulseTiming, + tp->SwrCnt1msIni, + tp->HwSuppNowIsOobVer, + tp->HwFiberModeVer, + tp->HwFiberStat, + tp->HwSwitchMdiToFiber, + tp->HwSuppSerDesPhyVer, + tp->NicCustLedValue, + tp->RequiredSecLanDonglePatch, + tp->HwSuppDashVer, + tp->DASH, + tp->dash_printer_enabled, + tp->HwSuppKCPOffloadVer, + speed_mode, + duplex_mode, + autoneg_mode, + advertising_mode, + aspm, + s5wol, + s5_keep_curr_mac, + tp->eee_enabled, + hwoptimize, + proc_init_num, + s0_magic_packet, + tp->HwSuppMagicPktVer, + tp->HwSuppUpsVer, + tp->HwSuppEsdVer, + tp->HwSuppCheckPhyDisableModeVer, + tp->HwPkgDet, + tp->random_mac, + tp->org_mac_addr, +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) + dev->perm_addr, +#endif + dev->dev_addr + ); + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, "\n"); + + *eof = 1; + return len; +} + +static int proc_get_tally_counter(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + struct rtl8168_private *tp = netdev_priv(dev); + struct rtl8168_counters *counters; + dma_addr_t paddr; + u32 cmd; + u32 WaitCnt; + unsigned long flags; + int len = 0; + + len += snprintf(page + len, count - len, + "\nDump Tally Counter\n"); + + //ASSERT_RTNL(); + + counters = tp->tally_vaddr; + paddr = tp->tally_paddr; + if (!counters) { + len += snprintf(page + len, count - len, + "\nDump Tally Counter Fail\n"); + goto out; + } + + spin_lock_irqsave(&tp->lock, flags); + RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | CounterDump); + + WaitCnt = 0; + while (RTL_R32(tp, CounterAddrLow) & CounterDump) { + udelay(10); + + WaitCnt++; + if (WaitCnt > 20) + break; + } + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, + "Statistics\tValue\n----------\t-----\n"); + + len += snprintf(page + len, count - len, + "tx_packets\t%lld\n" + "rx_packets\t%lld\n" + "tx_errors\t%lld\n" + "rx_errors\t%d\n" + "rx_missed\t%d\n" + "align_errors\t%d\n" + "tx_one_collision\t%d\n" + "tx_multi_collision\t%d\n" + "rx_unicast\t%lld\n" + "rx_broadcast\t%lld\n" + "rx_multicast\t%d\n" + "tx_aborted\t%d\n" + "tx_underrun\t%d\n", + le64_to_cpu(counters->tx_packets), + le64_to_cpu(counters->rx_packets), + le64_to_cpu(counters->tx_errors), + le32_to_cpu(counters->rx_errors), + le16_to_cpu(counters->rx_missed), + le16_to_cpu(counters->align_errors), + le32_to_cpu(counters->tx_one_collision), + le32_to_cpu(counters->tx_multi_collision), + le64_to_cpu(counters->rx_unicast), + le64_to_cpu(counters->rx_broadcast), + le32_to_cpu(counters->rx_multicast), + le16_to_cpu(counters->tx_aborted), + le16_to_cpu(counters->tx_underrun) + ); + + len += snprintf(page + len, count - len, "\n"); +out: + *eof = 1; + return len; +} + +static int proc_get_registers(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + int i, n, max = R8168_MAC_REGS_SIZE; + u8 byte_rd; + struct rtl8168_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + int len = 0; + + len += snprintf(page + len, count - len, + "\nDump MAC Registers\n" + "Offset\tValue\n------\t-----\n"); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + len += snprintf(page + len, count - len, + "\n0x%02x:\t", + n); + + for (i = 0; i < 16 && n < max; i++, n++) { + byte_rd = readb(ioaddr + n); + len += snprintf(page + len, count - len, + "%02x ", + byte_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, "\n"); + + *eof = 1; + return len; +} + +static int proc_get_pcie_phy(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + int i, n, max = R8168_EPHY_REGS_SIZE/2; + u16 word_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + int len = 0; + + len += snprintf(page + len, count - len, + "\nDump PCIE PHY\n" + "Offset\tValue\n------\t-----\n"); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + len += snprintf(page + len, count - len, + "\n0x%02x:\t", + n); + + for (i = 0; i < 8 && n < max; i++, n++) { + word_rd = rtl8168_ephy_read(tp, n); + len += snprintf(page + len, count - len, + "%04x ", + word_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, "\n"); + + *eof = 1; + return len; +} + +static int proc_get_eth_phy(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + int i, n, max = R8168_PHY_REGS_SIZE/2; + u16 word_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + int len = 0; + + len += snprintf(page + len, count - len, + "\nDump Ethernet PHY\n" + "Offset\tValue\n------\t-----\n"); + + spin_lock_irqsave(&tp->lock, flags); + len += snprintf(page + len, count - len, + "\n####################page 0##################\n"); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + for (n = 0; n < max;) { + len += snprintf(page + len, count - len, + "\n0x%02x:\t", + n); + + for (i = 0; i < 8 && n < max; i++, n++) { + word_rd = rtl8168_mdio_read(tp, n); + len += snprintf(page + len, count - len, + "%04x ", + word_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, "\n"); + + *eof = 1; + return len; +} + +static int proc_get_extended_registers(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + int i, n, max = R8168_ERI_REGS_SIZE; + u32 dword_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + int len = 0; + + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + /* RTL8168B does not support Extend GMAC */ + len += snprintf(page + len, count - len, + "\nNot Support Dump Extended Registers\n"); + + goto out; + } + + len += snprintf(page + len, count - len, + "\nDump Extended Registers\n" + "Offset\tValue\n------\t-----\n"); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + len += snprintf(page + len, count - len, + "\n0x%02x:\t", + n); + + for (i = 0; i < 4 && n < max; i++, n+=4) { + dword_rd = rtl8168_eri_read(tp, n, 4, ERIAR_ExGMAC); + len += snprintf(page + len, count - len, + "%08x ", + dword_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, "\n"); +out: + *eof = 1; + return len; +} + +static int proc_get_pci_registers(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + int i, n, max = R8168_PCI_REGS_SIZE; + u32 dword_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + int len = 0; + + len += snprintf(page + len, count - len, + "\nDump PCI Registers\n" + "Offset\tValue\n------\t-----\n"); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + len += snprintf(page + len, count - len, + "\n0x%03x:\t", + n); + + for (i = 0; i < 4 && n < max; i++, n+=4) { + pci_read_config_dword(tp->pci_dev, n, &dword_rd); + len += snprintf(page + len, count - len, + "%08x ", + dword_rd); + } + } + + n = 0x110; + pci_read_config_dword(tp->pci_dev, n, &dword_rd); + len += snprintf(page + len, count - len, + "\n0x%03x:\t%08x ", + n, + dword_rd); + n = 0x70c; + pci_read_config_dword(tp->pci_dev, n, &dword_rd); + len += snprintf(page + len, count - len, + "\n0x%03x:\t%08x ", + n, + dword_rd); + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, "\n"); + + *eof = 1; + return len; +} +#endif +static void rtl8168_proc_module_init(void) +{ + //create /proc/net/r8168 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) + rtl8168_proc = proc_mkdir(MODULENAME, init_net.proc_net); +#else + rtl8168_proc = proc_mkdir(MODULENAME, proc_net); +#endif + if (!rtl8168_proc) + dprintk("cannot create %s proc entry \n", MODULENAME); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) +/* + * seq_file wrappers for procfile show routines. + */ +static int rtl8168_proc_open(struct inode *inode, struct file *file) +{ + struct net_device *dev = proc_get_parent_data(inode); + int (*show)(struct seq_file *, void *) = PDE_DATA(inode); + + return single_open(file, show, dev); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) +static const struct proc_ops rtl8168_proc_fops = { + .proc_open = rtl8168_proc_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; +#else +static const struct file_operations rtl8168_proc_fops = { + .open = rtl8168_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +#endif + +/* + * Table of proc files we need to create. + */ +struct rtl8168_proc_file { + char name[12]; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) + int (*show)(struct seq_file *, void *); +#else + int (*show)(char *, char **, off_t, int, int *, void *); +#endif +}; + +static const struct rtl8168_proc_file rtl8168_proc_files[] = { + { "driver_var", &proc_get_driver_variable }, + { "tally", &proc_get_tally_counter }, + { "registers", &proc_get_registers }, + { "pcie_phy", &proc_get_pcie_phy }, + { "eth_phy", &proc_get_eth_phy }, + { "ext_regs", &proc_get_extended_registers }, + { "pci_regs", &proc_get_pci_registers }, + { "" } +}; + +static void rtl8168_proc_init(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + const struct rtl8168_proc_file *f; + struct proc_dir_entry *dir; + + if (rtl8168_proc && !tp->proc_dir) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) + dir = proc_mkdir_data(dev->name, 0, rtl8168_proc, dev); + if (!dir) { + printk("Unable to initialize /proc/net/%s/%s\n", + MODULENAME, dev->name); + return; + } + + tp->proc_dir = dir; + proc_init_num++; + + for (f = rtl8168_proc_files; f->name[0]; f++) { + if (!proc_create_data(f->name, S_IFREG | S_IRUGO, dir, + &rtl8168_proc_fops, f->show)) { + printk("Unable to initialize " + "/proc/net/%s/%s/%s\n", + MODULENAME, dev->name, f->name); + return; + } + } +#else + dir = proc_mkdir(dev->name, rtl8168_proc); + if (!dir) { + printk("Unable to initialize /proc/net/%s/%s\n", + MODULENAME, dev->name); + return; + } + + tp->proc_dir = dir; + proc_init_num++; + + for (f = rtl8168_proc_files; f->name[0]; f++) { + if (!create_proc_read_entry(f->name, S_IFREG | S_IRUGO, + dir, f->show, dev)) { + printk("Unable to initialize " + "/proc/net/%s/%s/%s\n", + MODULENAME, dev->name, f->name); + return; + } + } +#endif + } +} + +static void rtl8168_proc_remove(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (tp->proc_dir) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) + remove_proc_subtree(dev->name, rtl8168_proc); + proc_init_num--; + +#else + const struct rtl8168_proc_file *f; + struct rtl8168_private *tp = netdev_priv(dev); + + for (f = rtl8168_proc_files; f->name[0]; f++) + remove_proc_entry(f->name, tp->proc_dir); + + remove_proc_entry(dev->name, rtl8168_proc); + proc_init_num--; +#endif + tp->proc_dir = NULL; + } +} + +#endif //ENABLE_R8168_PROCFS + +static inline u16 map_phy_ocp_addr(u16 PageNum, u8 RegNum) +{ + u16 OcpPageNum = 0; + u8 OcpRegNum = 0; + u16 OcpPhyAddress = 0; + + if ( PageNum == 0 ) { + OcpPageNum = OCP_STD_PHY_BASE_PAGE + ( RegNum / 8 ); + OcpRegNum = 0x10 + ( RegNum % 8 ); + } else { + OcpPageNum = PageNum; + OcpRegNum = RegNum; + } + + OcpPageNum <<= 4; + + if ( OcpRegNum < 16 ) { + OcpPhyAddress = 0; + } else { + OcpRegNum -= 16; + OcpRegNum <<= 1; + + OcpPhyAddress = OcpPageNum + OcpRegNum; + } + + + return OcpPhyAddress; +} + +static void mdio_real_direct_write_phy_ocp(struct rtl8168_private *tp, + u32 RegAddr, + u32 value) +{ + u32 data32; + int i; + + if (tp->HwSuppPhyOcpVer == 0) goto out; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) + WARN_ON_ONCE(RegAddr % 2); +#endif + data32 = RegAddr/2; + data32 <<= OCPR_Addr_Reg_shift; + data32 |= OCPR_Write | value; + + RTL_W32(tp, PHYOCP, data32); + for (i = 0; i < 100; i++) { + udelay(1); + + if (!(RTL_R32(tp, PHYOCP) & OCPR_Flag)) + break; + } +out: + return; +} + +static void mdio_direct_write_phy_ocp(struct rtl8168_private *tp, + u16 RegAddr, + u16 value) +{ + if (tp->rtk_enable_diag) return; + + mdio_real_direct_write_phy_ocp(tp, RegAddr, value); +} + +static void rtl8168_mdio_write_phy_ocp(struct rtl8168_private *tp, + u16 PageNum, + u32 RegAddr, + u32 value) +{ + u16 ocp_addr; + + if (tp->rtk_enable_diag) return; + + ocp_addr = map_phy_ocp_addr(PageNum, RegAddr); + + mdio_direct_write_phy_ocp(tp, ocp_addr, value); +} + +static void rtl8168_mdio_real_write_phy_ocp(struct rtl8168_private *tp, + u16 PageNum, + u32 RegAddr, + u32 value) +{ + u16 ocp_addr; + + ocp_addr = map_phy_ocp_addr(PageNum, RegAddr); + + mdio_real_direct_write_phy_ocp(tp, ocp_addr, value); +} + +static void mdio_real_write(struct rtl8168_private *tp, + u32 RegAddr, + u32 value) +{ + int i; + + if (RegAddr == 0x1F) { + tp->cur_page = value; + } + + if (tp->mcfg == CFG_METHOD_11) { + RTL_W32(tp, OCPDR, OCPDR_Write | + (RegAddr & OCPDR_Reg_Mask) << OCPDR_GPHY_Reg_shift | + (value & OCPDR_Data_Mask)); + RTL_W32(tp, OCPAR, OCPAR_GPHY_Write); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + for (i = 0; i < 100; i++) { + mdelay(1); + if (!(RTL_R32(tp, OCPAR) & OCPAR_Flag)) + break; + } + } else { + if (tp->HwSuppPhyOcpVer > 0) { + if (RegAddr == 0x1F) { + return; + } + rtl8168_mdio_real_write_phy_ocp(tp, tp->cur_page, RegAddr, value); + } else { + if (tp->mcfg == CFG_METHOD_12 || tp->mcfg == CFG_METHOD_13) + RTL_W32(tp, 0xD0, RTL_R32(tp, 0xD0) & ~0x00020000); + + RTL_W32(tp, PHYAR, PHYAR_Write | + (RegAddr & PHYAR_Reg_Mask) << PHYAR_Reg_shift | + (value & PHYAR_Data_Mask)); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed writing to the specified MII register */ + if (!(RTL_R32(tp, PHYAR) & PHYAR_Flag)) { + udelay(20); + break; + } + } + + if (tp->mcfg == CFG_METHOD_12 || tp->mcfg == CFG_METHOD_13) + RTL_W32(tp, 0xD0, RTL_R32(tp, 0xD0) | 0x00020000); + } + } +} + +void rtl8168_mdio_write(struct rtl8168_private *tp, + u16 RegAddr, + u16 value) +{ + if (tp->rtk_enable_diag) return; + + mdio_real_write(tp, RegAddr, value); +} + +void rtl8168_mdio_prot_write(struct rtl8168_private *tp, + u32 RegAddr, + u32 value) +{ + mdio_real_write(tp, RegAddr, value); +} + +void rtl8168_mdio_prot_direct_write_phy_ocp(struct rtl8168_private *tp, + u32 RegAddr, + u32 value) +{ + mdio_real_direct_write_phy_ocp(tp, RegAddr, value); +} + +static u32 mdio_real_direct_read_phy_ocp(struct rtl8168_private *tp, + u32 RegAddr) +{ + u32 data32; + int i, value = 0; + + if (tp->HwSuppPhyOcpVer == 0) goto out; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) + WARN_ON_ONCE(RegAddr % 2); +#endif + data32 = RegAddr/2; + data32 <<= OCPR_Addr_Reg_shift; + + RTL_W32(tp, PHYOCP, data32); + for (i = 0; i < 100; i++) { + udelay(1); + + if (RTL_R32(tp, PHYOCP) & OCPR_Flag) + break; + } + value = RTL_R32(tp, PHYOCP) & OCPDR_Data_Mask; + +out: + return value; +} + +static u32 mdio_direct_read_phy_ocp(struct rtl8168_private *tp, + u16 RegAddr) +{ + if (tp->rtk_enable_diag) return 0xffffffff; + + return mdio_real_direct_read_phy_ocp(tp, RegAddr); +} + +static u32 rtl8168_mdio_read_phy_ocp(struct rtl8168_private *tp, + u16 PageNum, + u32 RegAddr) +{ + u16 ocp_addr; + + if (tp->rtk_enable_diag) return 0xffffffff; + + ocp_addr = map_phy_ocp_addr(PageNum, RegAddr); + + return mdio_direct_read_phy_ocp(tp, ocp_addr); +} + +static u32 rtl8168_mdio_real_read_phy_ocp(struct rtl8168_private *tp, + u16 PageNum, + u32 RegAddr) +{ + u16 ocp_addr; + + ocp_addr = map_phy_ocp_addr(PageNum, RegAddr); + + return mdio_real_direct_read_phy_ocp(tp, ocp_addr); +} + +u32 mdio_real_read(struct rtl8168_private *tp, + u32 RegAddr) +{ + int i, value = 0; + + if (tp->mcfg==CFG_METHOD_11) { + RTL_W32(tp, OCPDR, OCPDR_Read | + (RegAddr & OCPDR_Reg_Mask) << OCPDR_GPHY_Reg_shift); + RTL_W32(tp, OCPAR, OCPAR_GPHY_Write); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + for (i = 0; i < 100; i++) { + mdelay(1); + if (!(RTL_R32(tp, OCPAR) & OCPAR_Flag)) + break; + } + + mdelay(1); + RTL_W32(tp, OCPAR, OCPAR_GPHY_Read); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + for (i = 0; i < 100; i++) { + mdelay(1); + if (RTL_R32(tp, OCPAR) & OCPAR_Flag) + break; + } + + value = RTL_R32(tp, OCPDR) & OCPDR_Data_Mask; + } else { + if (tp->HwSuppPhyOcpVer > 0) { + value = rtl8168_mdio_real_read_phy_ocp(tp, tp->cur_page, RegAddr); + } else { + if (tp->mcfg == CFG_METHOD_12 || tp->mcfg == CFG_METHOD_13) + RTL_W32(tp, 0xD0, RTL_R32(tp, 0xD0) & ~0x00020000); + + RTL_W32(tp, PHYAR, + PHYAR_Read | (RegAddr & PHYAR_Reg_Mask) << PHYAR_Reg_shift); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed retrieving data from the specified MII register */ + if (RTL_R32(tp, PHYAR) & PHYAR_Flag) { + value = RTL_R32(tp, PHYAR) & PHYAR_Data_Mask; + udelay(20); + break; + } + } + + if (tp->mcfg == CFG_METHOD_12 || tp->mcfg == CFG_METHOD_13) + RTL_W32(tp, 0xD0, RTL_R32(tp, 0xD0) | 0x00020000); + } + } + + return value; +} + +u32 rtl8168_mdio_read(struct rtl8168_private *tp, + u16 RegAddr) +{ + if (tp->rtk_enable_diag) return 0xffffffff; + + return mdio_real_read(tp, RegAddr); +} + +u32 rtl8168_mdio_prot_read(struct rtl8168_private *tp, + u32 RegAddr) +{ + return mdio_real_read(tp, RegAddr); +} + +u32 rtl8168_mdio_prot_direct_read_phy_ocp(struct rtl8168_private *tp, + u32 RegAddr) +{ + return mdio_real_direct_read_phy_ocp(tp, RegAddr); +} + +static void ClearAndSetEthPhyBit(struct rtl8168_private *tp, u8 addr, u16 clearmask, u16 setmask) +{ + u16 PhyRegValue; + + + PhyRegValue = rtl8168_mdio_read(tp, addr); + PhyRegValue &= ~clearmask; + PhyRegValue |= setmask; + rtl8168_mdio_write(tp, addr, PhyRegValue); +} + +void rtl8168_clear_eth_phy_bit(struct rtl8168_private *tp, u8 addr, u16 mask) +{ + ClearAndSetEthPhyBit(tp, + addr, + mask, + 0 + ); +} + +void rtl8168_set_eth_phy_bit(struct rtl8168_private *tp, u8 addr, u16 mask) +{ + ClearAndSetEthPhyBit(tp, + addr, + 0, + mask + ); +} + +void rtl8168_mac_ocp_write(struct rtl8168_private *tp, u16 reg_addr, u16 value) +{ + u32 data32; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) + WARN_ON_ONCE(reg_addr % 2); +#endif + + data32 = reg_addr/2; + data32 <<= OCPR_Addr_Reg_shift; + data32 += value; + data32 |= OCPR_Write; + + RTL_W32(tp, MACOCP, data32); +} + +u16 rtl8168_mac_ocp_read(struct rtl8168_private *tp, u16 reg_addr) +{ + u32 data32; + u16 data16 = 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) + WARN_ON_ONCE(reg_addr % 2); +#endif + + data32 = reg_addr/2; + data32 <<= OCPR_Addr_Reg_shift; + + RTL_W32(tp, MACOCP, data32); + data16 = (u16)RTL_R32(tp, MACOCP); + + return data16; +} + +#ifdef ENABLE_USE_FIRMWARE_FILE +static void mac_mcu_write(struct rtl8168_private *tp, u16 reg, u16 value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + rtl8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static u32 mac_mcu_read(struct rtl8168_private *tp, u16 reg) +{ + return rtl8168_mac_ocp_read(tp, tp->ocp_base + reg); +} +#endif + +static void +rtl8168_clear_and_set_mcu_ocp_bit( + struct rtl8168_private *tp, + u16 addr, + u16 clearmask, + u16 setmask +) +{ + u16 RegValue; + + RegValue = rtl8168_mac_ocp_read(tp, addr); + RegValue &= ~clearmask; + RegValue |= setmask; + rtl8168_mac_ocp_write(tp, addr, RegValue); +} + +/* +static void +rtl8168_clear_mcu_ocp_bit( + struct rtl8168_private *tp, + u16 addr, + u16 mask +) +{ + rtl8168_clear_and_set_mcu_ocp_bit(tp, + addr, + mask, + 0 + ); +} +*/ + +static void +rtl8168_set_mcu_ocp_bit( + struct rtl8168_private *tp, + u16 addr, + u16 mask +) +{ + rtl8168_clear_and_set_mcu_ocp_bit(tp, + addr, + 0, + mask + ); +} + +static u32 real_ocp_read(struct rtl8168_private *tp, u16 addr, u8 len) +{ + int i, val_shift, shift = 0; + u32 value1 = 0, value2 = 0, mask; + + if (len > 4 || len <= 0) + return -1; + + while (len > 0) { + val_shift = addr % 4; + addr = addr & ~0x3; + + RTL_W32(tp, OCPAR, (0x0F<<12) | (addr&0xFFF)); + + for (i = 0; i < 20; i++) { + udelay(100); + if (RTL_R32(tp, OCPAR) & OCPAR_Flag) + break; + } + + if (len == 1) mask = (0xFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 2) mask = (0xFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 3) mask = (0xFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else mask = (0xFFFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + + value1 = RTL_R32(tp, OCPDR) & mask; + value2 |= (value1 >> val_shift * 8) << shift * 8; + + if (len <= 4 - val_shift) { + len = 0; + } else { + len -= (4 - val_shift); + shift = 4 - val_shift; + addr += 4; + } + } + + udelay(20); + + return value2; +} + +u32 rtl8168_ocp_read_with_oob_base_address(struct rtl8168_private *tp, u16 addr, u8 len, const u32 base_address) +{ + return rtl8168_eri_read_with_oob_base_address(tp, addr, len, ERIAR_OOB, base_address); +} + +u32 rtl8168_ocp_read(struct rtl8168_private *tp, u16 addr, u8 len) +{ + u32 value = 0; + + if (HW_DASH_SUPPORT_TYPE_2(tp)) + value = rtl8168_ocp_read_with_oob_base_address(tp, addr, len, NO_BASE_ADDRESS); + else if (HW_DASH_SUPPORT_TYPE_3(tp)) + value = rtl8168_ocp_read_with_oob_base_address(tp, addr, len, RTL8168FP_OOBMAC_BASE); + else + value = real_ocp_read(tp, addr, len); + + return value; +} + +static int real_ocp_write(struct rtl8168_private *tp, u16 addr, u8 len, u32 value) +{ + int i, val_shift, shift = 0; + u32 value1 = 0, mask; + + if (len > 4 || len <= 0) + return -1; + + while (len > 0) { + val_shift = addr % 4; + addr = addr & ~0x3; + + if (len == 1) mask = (0xFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 2) mask = (0xFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 3) mask = (0xFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else mask = (0xFFFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + + value1 = rtl8168_ocp_read(tp, addr, 4) & ~mask; + value1 |= ((value << val_shift * 8) >> shift * 8); + + RTL_W32(tp, OCPDR, value1); + RTL_W32(tp, OCPAR, OCPAR_Flag | (0x0F<<12) | (addr&0xFFF)); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed ERI write */ + if (!(RTL_R32(tp, OCPAR) & OCPAR_Flag)) + break; + } + + if (len <= 4 - val_shift) { + len = 0; + } else { + len -= (4 - val_shift); + shift = 4 - val_shift; + addr += 4; + } + } + + udelay(20); + + return 0; +} + +u32 rtl8168_ocp_write_with_oob_base_address(struct rtl8168_private *tp, u16 addr, u8 len, u32 value, const u32 base_address) +{ + return rtl8168_eri_write_with_oob_base_address(tp, addr, len, value, ERIAR_OOB, base_address); +} + +void rtl8168_ocp_write(struct rtl8168_private *tp, u16 addr, u8 len, u32 value) +{ + if (HW_DASH_SUPPORT_TYPE_2(tp)) + rtl8168_ocp_write_with_oob_base_address(tp, addr, len, value, NO_BASE_ADDRESS); + else if (HW_DASH_SUPPORT_TYPE_3(tp)) + rtl8168_ocp_write_with_oob_base_address(tp, addr, len, value, RTL8168FP_OOBMAC_BASE); + else + real_ocp_write(tp, addr, len, value); +} + +void rtl8168_oob_mutex_lock(struct rtl8168_private *tp) +{ + u8 reg_16, reg_a0; + u32 wait_cnt_0, wait_Cnt_1; + u16 ocp_reg_mutex_ib; + u16 ocp_reg_mutex_oob; + u16 ocp_reg_mutex_prio; + + if (!tp->DASH) return; + + switch (tp->mcfg) { + case CFG_METHOD_11: + case CFG_METHOD_12: + ocp_reg_mutex_oob = 0x16; + ocp_reg_mutex_ib = 0x17; + ocp_reg_mutex_prio = 0x9C; + break; + case CFG_METHOD_13: + ocp_reg_mutex_oob = 0x06; + ocp_reg_mutex_ib = 0x07; + ocp_reg_mutex_prio = 0x9C; + break; + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + default: + ocp_reg_mutex_oob = 0x110; + ocp_reg_mutex_ib = 0x114; + ocp_reg_mutex_prio = 0x11C; + break; + } + + rtl8168_ocp_write(tp, ocp_reg_mutex_ib, 1, BIT_0); + reg_16 = rtl8168_ocp_read(tp, ocp_reg_mutex_oob, 1); + wait_cnt_0 = 0; + while(reg_16) { + reg_a0 = rtl8168_ocp_read(tp, ocp_reg_mutex_prio, 1); + if (reg_a0) { + rtl8168_ocp_write(tp, ocp_reg_mutex_ib, 1, 0x00); + reg_a0 = rtl8168_ocp_read(tp, ocp_reg_mutex_prio, 1); + wait_Cnt_1 = 0; + while(reg_a0) { + reg_a0 = rtl8168_ocp_read(tp, ocp_reg_mutex_prio, 1); + + wait_Cnt_1++; + + if (wait_Cnt_1 > 2000) + break; + }; + rtl8168_ocp_write(tp, ocp_reg_mutex_ib, 1, BIT_0); + + } + reg_16 = rtl8168_ocp_read(tp, ocp_reg_mutex_oob, 1); + + wait_cnt_0++; + + if (wait_cnt_0 > 2000) + break; + }; +} + +void rtl8168_oob_mutex_unlock(struct rtl8168_private *tp) +{ + u16 ocp_reg_mutex_ib; + u16 ocp_reg_mutex_oob; + u16 ocp_reg_mutex_prio; + + if (!tp->DASH) return; + + switch (tp->mcfg) { + case CFG_METHOD_11: + case CFG_METHOD_12: + ocp_reg_mutex_oob = 0x16; + ocp_reg_mutex_ib = 0x17; + ocp_reg_mutex_prio = 0x9C; + break; + case CFG_METHOD_13: + ocp_reg_mutex_oob = 0x06; + ocp_reg_mutex_ib = 0x07; + ocp_reg_mutex_prio = 0x9C; + break; + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + default: + ocp_reg_mutex_oob = 0x110; + ocp_reg_mutex_ib = 0x114; + ocp_reg_mutex_prio = 0x11C; + break; + } + + rtl8168_ocp_write(tp, ocp_reg_mutex_prio, 1, BIT_0); + rtl8168_ocp_write(tp, ocp_reg_mutex_ib, 1, 0x00); +} + +void rtl8168_oob_notify(struct rtl8168_private *tp, u8 cmd) +{ + rtl8168_eri_write(tp, 0xE8, 1, cmd, ERIAR_ExGMAC); + + rtl8168_ocp_write(tp, 0x30, 1, 0x01); +} + +static int rtl8168_check_dash(struct rtl8168_private *tp) +{ + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + if (rtl8168_ocp_read(tp, 0x128, 1) & BIT_0) + return 1; + else + return 0; + } else { + u32 reg; + + if (tp->mcfg == CFG_METHOD_13) + reg = 0xb8; + else + reg = 0x10; + + if (rtl8168_ocp_read(tp, reg, 2) & 0x00008000) + return 1; + else + return 0; + } +} + +void rtl8168_dash2_disable_tx(struct rtl8168_private *tp) +{ + if (!tp->DASH) return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + u16 WaitCnt; + u8 TmpUchar; + + //Disable oob Tx + RTL_CMAC_W8(tp, CMAC_IBCR2, RTL_CMAC_R8(tp, CMAC_IBCR2) & ~( BIT_0 )); + WaitCnt = 0; + + //wait oob tx disable + do { + TmpUchar = RTL_CMAC_R8(tp, CMAC_IBISR0); + + if ( TmpUchar & ISRIMR_DASH_TYPE2_TX_DISABLE_IDLE ) { + break; + } + + udelay( 50 ); + WaitCnt++; + } while(WaitCnt < 2000); + + //Clear ISRIMR_DASH_TYPE2_TX_DISABLE_IDLE + RTL_CMAC_W8(tp, CMAC_IBISR0, RTL_CMAC_R8(tp, CMAC_IBISR0) | ISRIMR_DASH_TYPE2_TX_DISABLE_IDLE); + } +} + +void rtl8168_dash2_enable_tx(struct rtl8168_private *tp) +{ + if (!tp->DASH) return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + RTL_CMAC_W8(tp, CMAC_IBCR2, RTL_CMAC_R8(tp, CMAC_IBCR2) | BIT_0); + } +} + +void rtl8168_dash2_disable_rx(struct rtl8168_private *tp) +{ + if (!tp->DASH) return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + RTL_CMAC_W8(tp, CMAC_IBCR0, RTL_CMAC_R8(tp, CMAC_IBCR0) & ~( BIT_0 )); + } +} + +void rtl8168_dash2_enable_rx(struct rtl8168_private *tp) +{ + if (!tp->DASH) return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + RTL_CMAC_W8(tp, CMAC_IBCR0, RTL_CMAC_R8(tp, CMAC_IBCR0) | BIT_0); + } +} + +static void rtl8168_dash2_disable_txrx(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + rtl8168_dash2_disable_tx( tp ); + rtl8168_dash2_disable_rx( tp ); + } +} + +void rtl8168_ephy_write(struct rtl8168_private *tp, int RegAddr, int value) +{ + int i; + + RTL_W32(tp, EPHYAR, + EPHYAR_Write | + (RegAddr & EPHYAR_Reg_Mask) << EPHYAR_Reg_shift | + (value & EPHYAR_Data_Mask)); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed EPHY write */ + if (!(RTL_R32(tp, EPHYAR) & EPHYAR_Flag)) + break; + } + + udelay(20); +} + +u16 rtl8168_ephy_read(struct rtl8168_private *tp, int RegAddr) +{ + int i; + u16 value = 0xffff; + + RTL_W32(tp, EPHYAR, + EPHYAR_Read | (RegAddr & EPHYAR_Reg_Mask) << EPHYAR_Reg_shift); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed EPHY read */ + if (RTL_R32(tp, EPHYAR) & EPHYAR_Flag) { + value = (u16) (RTL_R32(tp, EPHYAR) & EPHYAR_Data_Mask); + break; + } + } + + udelay(20); + + return value; +} + +static void ClearAndSetPCIePhyBit(struct rtl8168_private *tp, u8 addr, u16 clearmask, u16 setmask) +{ + u16 EphyValue; + + EphyValue = rtl8168_ephy_read(tp, addr); + EphyValue &= ~clearmask; + EphyValue |= setmask; + rtl8168_ephy_write(tp, addr, EphyValue); +} + +static void ClearPCIePhyBit(struct rtl8168_private *tp, u8 addr, u16 mask) +{ + ClearAndSetPCIePhyBit( tp, + addr, + mask, + 0 + ); +} + +static void SetPCIePhyBit( struct rtl8168_private *tp, u8 addr, u16 mask) +{ + ClearAndSetPCIePhyBit( tp, + addr, + 0, + mask + ); +} + +static u32 +rtl8168_csi_other_fun_read(struct rtl8168_private *tp, + u8 multi_fun_sel_bit, + u32 addr) +{ + u32 cmd; + int i; + u32 value = 0; + + cmd = CSIAR_Read | CSIAR_ByteEn << CSIAR_ByteEn_shift | (addr & CSIAR_Addr_Mask); + + if (tp->mcfg != CFG_METHOD_20 && tp->mcfg != CFG_METHOD_23 && + tp->mcfg != CFG_METHOD_26 && tp->mcfg != CFG_METHOD_27 && + tp->mcfg != CFG_METHOD_28 && tp->mcfg != CFG_METHOD_31 && + tp->mcfg != CFG_METHOD_32 && tp->mcfg != CFG_METHOD_33) { + multi_fun_sel_bit = 0; + } + + if ( multi_fun_sel_bit > 7 ) { + return 0xffffffff; + } + + cmd |= multi_fun_sel_bit << 16; + + RTL_W32(tp, CSIAR, cmd); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed CSI read */ + if (RTL_R32(tp, CSIAR) & CSIAR_Flag) { + value = (u32)RTL_R32(tp, CSIDR); + break; + } + } + + udelay(20); + + return value; +} + +static void +rtl8168_csi_other_fun_write(struct rtl8168_private *tp, + u8 multi_fun_sel_bit, + u32 addr, + u32 value) +{ + u32 cmd; + int i; + + RTL_W32(tp, CSIDR, value); + cmd = CSIAR_Write | CSIAR_ByteEn << CSIAR_ByteEn_shift | (addr & CSIAR_Addr_Mask); + if (tp->mcfg != CFG_METHOD_20 && tp->mcfg != CFG_METHOD_23 && + tp->mcfg != CFG_METHOD_26 && tp->mcfg != CFG_METHOD_27 && + tp->mcfg != CFG_METHOD_28 && tp->mcfg != CFG_METHOD_31 && + tp->mcfg != CFG_METHOD_32 && tp->mcfg != CFG_METHOD_33) { + multi_fun_sel_bit = 0; + } + + if ( multi_fun_sel_bit > 7 ) { + return; + } + + cmd |= multi_fun_sel_bit << 16; + + RTL_W32(tp, CSIAR, cmd); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed CSI write */ + if (!(RTL_R32(tp, CSIAR) & CSIAR_Flag)) + break; + } + + udelay(20); +} + +static u32 +rtl8168_csi_read(struct rtl8168_private *tp, + u32 addr) +{ + u8 multi_fun_sel_bit; + + if (tp->mcfg == CFG_METHOD_20) + multi_fun_sel_bit = 2; + else if (tp->mcfg == CFG_METHOD_26 || tp->mcfg == CFG_METHOD_31 || + tp->mcfg == CFG_METHOD_32 || tp->mcfg == CFG_METHOD_33) + multi_fun_sel_bit = 1; + else + multi_fun_sel_bit = 0; + + return rtl8168_csi_other_fun_read(tp, multi_fun_sel_bit, addr); +} + +static void +rtl8168_csi_write(struct rtl8168_private *tp, + u32 addr, + u32 value) +{ + u8 multi_fun_sel_bit; + + if (tp->mcfg == CFG_METHOD_20) + multi_fun_sel_bit = 2; + else if (tp->mcfg == CFG_METHOD_26 || tp->mcfg == CFG_METHOD_31 || + tp->mcfg == CFG_METHOD_32 || tp->mcfg == CFG_METHOD_33) + multi_fun_sel_bit = 1; + else + multi_fun_sel_bit = 0; + + rtl8168_csi_other_fun_write(tp, multi_fun_sel_bit, addr, value); +} + +static u8 +rtl8168_csi_fun0_read_byte(struct rtl8168_private *tp, + u32 addr) +{ + u8 RetVal = 0; + + if (tp->mcfg == CFG_METHOD_20 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + u32 TmpUlong; + u16 RegAlignAddr; + u8 ShiftByte; + + RegAlignAddr = addr & ~(0x3); + ShiftByte = addr & (0x3); + TmpUlong = rtl8168_csi_other_fun_read(tp, 0, addr); + TmpUlong >>= (8*ShiftByte); + RetVal = (u8)TmpUlong; + } else { + struct pci_dev *pdev = tp->pci_dev; + + pci_read_config_byte(pdev, addr, &RetVal); + } + + udelay(20); + + return RetVal; +} + +static void +rtl8168_csi_fun0_write_byte(struct rtl8168_private *tp, + u32 addr, + u8 value) +{ + if (tp->mcfg == CFG_METHOD_20 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + u32 TmpUlong; + u16 RegAlignAddr; + u8 ShiftByte; + + RegAlignAddr = addr & ~(0x3); + ShiftByte = addr & (0x3); + TmpUlong = rtl8168_csi_other_fun_read(tp, 0, RegAlignAddr); + TmpUlong &= ~(0xFF << (8*ShiftByte)); + TmpUlong |= (value << (8*ShiftByte)); + rtl8168_csi_other_fun_write( tp, 0, RegAlignAddr, TmpUlong ); + } else { + struct pci_dev *pdev = tp->pci_dev; + + pci_write_config_byte(pdev, addr, value); + } + + udelay(20); +} + +static void +rtl8168_clear_and_set_other_fun_pci_bit(struct rtl8168_private *tp, + u8 multi_fun_sel_bit, + u32 addr, + u32 clearmask, + u32 setmask) +{ + u32 TmpUlong; + + TmpUlong = rtl8168_csi_other_fun_read(tp, multi_fun_sel_bit, addr); + TmpUlong &= ~clearmask; + TmpUlong |= setmask; + rtl8168_csi_other_fun_write(tp, multi_fun_sel_bit, addr, TmpUlong); +} + +static void +rtl8168_other_fun_dev_pci_setting(struct rtl8168_private *tp, + u32 addr, + u32 clearmask, + u32 setmask, + u8 multi_fun_sel_bit) +{ + u32 TmpUlong; + u8 i; + u8 FunBit; + + for (i = 0; i < 8; i++) { + FunBit = (1 << i); + if (FunBit & multi_fun_sel_bit) { + u8 set_other_fun = TRUE; + + switch(tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + //0: UMAC, 1: TCR1, 2: TCR2, 3: KCS, 4: EHCI(Control by EHCI Driver) + if (i < 5) { + TmpUlong = rtl8168_csi_other_fun_read(tp, i, 0x00); + if (TmpUlong == 0xFFFFFFFF) + set_other_fun = TRUE; + else + set_other_fun = FALSE; + } + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + //0: BMC, 1: NIC, 2: TCR, 3: VGA/PCIE_TO_USB, 4: EHCI, 5: WIFI, 6: WIFI, 7: KCS + if (i == 5 || i == 6) { + if (tp->DASH) { + TmpUlong = rtl8168_ocp_read(tp, 0x184, 4); + if (TmpUlong & BIT_26) + set_other_fun = FALSE; + else + set_other_fun = TRUE; + } + } else { //function 0/1/2/3/4/7 + TmpUlong = rtl8168_csi_other_fun_read(tp, i, 0x00); + if (TmpUlong == 0xFFFFFFFF) + set_other_fun = TRUE; + else + set_other_fun = FALSE; + } + break; + default: + return; + } + + if (set_other_fun) + rtl8168_clear_and_set_other_fun_pci_bit(tp, i, addr, clearmask, setmask); + } + } +} + +static void +rtl8168_set_dash_other_fun_dev_state_change(struct rtl8168_private *tp, + u8 dev_state, + u8 multi_fun_sel_bit) +{ + u32 clearmask; + u32 setmask; + + if (dev_state == 0) { + // + // goto D0 + // + clearmask = (BIT_0 | BIT_1); + setmask = 0; + + rtl8168_other_fun_dev_pci_setting(tp, 0x44, clearmask, setmask, multi_fun_sel_bit); + } else { + // + // goto D3 + // + clearmask = 0; + setmask = (BIT_0 | BIT_1); + + rtl8168_other_fun_dev_pci_setting(tp, 0x44, clearmask, setmask, multi_fun_sel_bit); + } +} + +static void +rtl8168_set_dash_other_fun_dev_aspm_clkreq(struct rtl8168_private *tp, + u8 aspm_val, + u8 clkreq_en, + u8 multi_fun_sel_bit) +{ + u32 clearmask; + u32 setmask; + + aspm_val &= (BIT_0 | BIT_1); + clearmask = (BIT_0 | BIT_1 | BIT_8); + setmask = aspm_val; + if (clkreq_en) + setmask |= BIT_8; + + rtl8168_other_fun_dev_pci_setting(tp, 0x80, clearmask, setmask, multi_fun_sel_bit); +} + +/* +static void +rtl8168_set_dash_other_fun_dev_pci_cmd_register(struct rtl8168_private *tp, + u8 pci_cmd_reg, + u8 multi_fun_sel_bit) +{ + u32 clearmask; + u32 setmask; + + pci_cmd_reg &= (BIT_0 | BIT_1 | BIT_2); + + clearmask = (BIT_0 | BIT_1 | BIT_2); + setmask = pci_cmd_reg; + + rtl8168_other_fun_dev_pci_setting(tp, 0x04, clearmask, setmask, multi_fun_sel_bit); +} +*/ + +u32 rtl8168_eri_read_with_oob_base_address(struct rtl8168_private *tp, int addr, int len, int type, const u32 base_address) +{ + int i, val_shift, shift = 0; + u32 value1 = 0, value2 = 0, mask; + u32 eri_cmd; + const u32 transformed_base_address = ((base_address & 0x00FFF000) << 6) | (base_address & 0x000FFF); + + if (len > 4 || len <= 0) + return -1; + + while (len > 0) { + val_shift = addr % ERIAR_Addr_Align; + addr = addr & ~0x3; + + eri_cmd = ERIAR_Read | + transformed_base_address | + type << ERIAR_Type_shift | + ERIAR_ByteEn << ERIAR_ByteEn_shift | + (addr & 0x0FFF); + if (addr & 0xF000) { + u32 tmp; + + tmp = addr & 0xF000; + tmp >>= 12; + eri_cmd |= (tmp << 20) & 0x00F00000; + } + + RTL_W32(tp, ERIAR, eri_cmd); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed ERI read */ + if (RTL_R32(tp, ERIAR) & ERIAR_Flag) + break; + } + + if (len == 1) mask = (0xFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 2) mask = (0xFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 3) mask = (0xFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else mask = (0xFFFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + + value1 = RTL_R32(tp, ERIDR) & mask; + value2 |= (value1 >> val_shift * 8) << shift * 8; + + if (len <= 4 - val_shift) { + len = 0; + } else { + len -= (4 - val_shift); + shift = 4 - val_shift; + addr += 4; + } + } + + udelay(20); + + return value2; +} + +u32 rtl8168_eri_read(struct rtl8168_private *tp, int addr, int len, int type) +{ + return rtl8168_eri_read_with_oob_base_address(tp, addr, len, type, 0); +} + +int rtl8168_eri_write_with_oob_base_address(struct rtl8168_private *tp, int addr, int len, u32 value, int type, const u32 base_address) +{ + int i, val_shift, shift = 0; + u32 value1 = 0, mask; + u32 eri_cmd; + const u32 transformed_base_address = ((base_address & 0x00FFF000) << 6) | (base_address & 0x000FFF); + + if (len > 4 || len <= 0) + return -1; + + while (len > 0) { + val_shift = addr % ERIAR_Addr_Align; + addr = addr & ~0x3; + + if (len == 1) mask = (0xFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 2) mask = (0xFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 3) mask = (0xFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else mask = (0xFFFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + + value1 = rtl8168_eri_read_with_oob_base_address(tp, addr, 4, type, base_address) & ~mask; + value1 |= ((value << val_shift * 8) >> shift * 8); + + RTL_W32(tp, ERIDR, value1); + + eri_cmd = ERIAR_Write | + transformed_base_address | + type << ERIAR_Type_shift | + ERIAR_ByteEn << ERIAR_ByteEn_shift | + (addr & 0x0FFF); + if (addr & 0xF000) { + u32 tmp; + + tmp = addr & 0xF000; + tmp >>= 12; + eri_cmd |= (tmp << 20) & 0x00F00000; + } + + RTL_W32(tp, ERIAR, eri_cmd); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed ERI write */ + if (!(RTL_R32(tp, ERIAR) & ERIAR_Flag)) + break; + } + + if (len <= 4 - val_shift) { + len = 0; + } else { + len -= (4 - val_shift); + shift = 4 - val_shift; + addr += 4; + } + } + + udelay(20); + + return 0; +} + +int rtl8168_eri_write(struct rtl8168_private *tp, int addr, int len, u32 value, int type) +{ + return rtl8168_eri_write_with_oob_base_address(tp, addr, len, value, type, NO_BASE_ADDRESS); +} + +static void +rtl8168_enable_rxdvgate(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_3); + mdelay(2); + break; + } +} + +static void +rtl8168_disable_rxdvgate(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) & ~BIT_3); + mdelay(2); + break; + } +} + +static u8 +rtl8168_is_gpio_low(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 gpio_low = FALSE; + + switch (tp->HwSuppCheckPhyDisableModeVer) { + case 1: + case 2: + if (!(rtl8168_mac_ocp_read(tp, 0xDC04) & BIT_9)) + gpio_low = TRUE; + break; + case 3: + if (!(rtl8168_mac_ocp_read(tp, 0xDC04) & BIT_13)) + gpio_low = TRUE; + break; + } + + if (gpio_low) + dprintk("gpio is low.\n"); + + return gpio_low; +} + +static u8 +rtl8168_is_phy_disable_mode_enabled(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 phy_disable_mode_enabled = FALSE; + + switch (tp->HwSuppCheckPhyDisableModeVer) { + case 1: + if (rtl8168_mac_ocp_read(tp, 0xDC20) & BIT_1) + phy_disable_mode_enabled = TRUE; + break; + case 2: + case 3: + if (RTL_R8(tp, 0xF2) & BIT_5) + phy_disable_mode_enabled = TRUE; + break; + } + + if (phy_disable_mode_enabled) + dprintk("phy disable mode enabled.\n"); + + return phy_disable_mode_enabled; +} + +static u8 +rtl8168_is_in_phy_disable_mode(struct net_device *dev) +{ + u8 in_phy_disable_mode = FALSE; + + if (rtl8168_is_phy_disable_mode_enabled(dev) && rtl8168_is_gpio_low(dev)) + in_phy_disable_mode = TRUE; + + if (in_phy_disable_mode) + dprintk("Hardware is in phy disable mode.\n"); + + return in_phy_disable_mode; +} + +void +rtl8168_wait_txrx_fifo_empty(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int i; + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + for (i = 0; i < 10; i++) { + udelay(100); + if (RTL_R32(tp, TxConfig) & BIT_11) + break; + } + + for (i = 0; i < 10; i++) { + udelay(100); + if ((RTL_R8(tp, MCUCmd_reg) & (Txfifo_empty | Rxfifo_empty)) == (Txfifo_empty | Rxfifo_empty)) + break; + } + + mdelay(1); + break; + } +} + +static void rtl8168_driver_start(struct rtl8168_private *tp) +{ + //change other device state to D0. + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + rtl8168_set_dash_other_fun_dev_aspm_clkreq(tp, 3, 1, 0x1E); + rtl8168_set_dash_other_fun_dev_state_change(tp, 3, 0x1E); + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_set_dash_other_fun_dev_aspm_clkreq(tp, 3, 1, 0xFC); + rtl8168_set_dash_other_fun_dev_state_change(tp, 3, 0xFC); + break; + } + + if (!tp->DASH) + return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + int timeout; + u32 tmp_value; + + rtl8168_ocp_write(tp, 0x180, 1, OOB_CMD_DRIVER_START); + tmp_value = rtl8168_ocp_read(tp, 0x30, 1); + tmp_value |= BIT_0; + rtl8168_ocp_write(tp, 0x30, 1, tmp_value); + + for (timeout = 0; timeout < 10; timeout++) { + mdelay(10); + if (rtl8168_ocp_read(tp, 0x124, 1) & BIT_0) + break; + } + } else { + int timeout; + u32 reg; + + if (tp->mcfg == CFG_METHOD_13) { + RTL_W8(tp, TwiCmdReg, RTL_R8(tp, TwiCmdReg) | ( BIT_7 )); + } + + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); + + if (tp->mcfg == CFG_METHOD_13) + reg = 0xB8; + else + reg = 0x10; + + for (timeout = 0; timeout < 10; timeout++) { + mdelay(10); + if (rtl8168_ocp_read(tp, reg, 2) & BIT_11) + break; + } + } +} + +static void rtl8168_driver_stop(struct rtl8168_private *tp) +{ + if (!tp->DASH) + goto update_device_state; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + struct net_device *dev = tp->dev; + int timeout; + u32 tmp_value; + + rtl8168_dash2_disable_txrx(dev); + + rtl8168_ocp_write(tp, 0x180, 1, OOB_CMD_DRIVER_STOP); + tmp_value = rtl8168_ocp_read(tp, 0x30, 1); + tmp_value |= BIT_0; + rtl8168_ocp_write(tp, 0x30, 1, tmp_value); + + for (timeout = 0; timeout < 10; timeout++) { + mdelay(10); + if (!(rtl8168_ocp_read(tp, 0x124, 1) & BIT_0)) + break; + } + } else { + int timeout; + u32 reg; + + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); + + if (tp->mcfg == CFG_METHOD_13) + reg = 0xB8; + else + reg = 0x10; + + for (timeout = 0; timeout < 10; timeout++) { + mdelay(10); + if ((rtl8168_ocp_read(tp, reg, 2) & BIT_11) == 0) + break; + } + + if (tp->mcfg == CFG_METHOD_13) { + RTL_W8(tp, TwiCmdReg, RTL_R8(tp, TwiCmdReg) & ~( BIT_7 )); + } + } + +update_device_state: + //change other device state to D3. + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + rtl8168_set_dash_other_fun_dev_state_change(tp, 3, 0x0E); + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_set_dash_other_fun_dev_state_change(tp, 3, 0xFD); + break; + } +} + +#ifdef ENABLE_DASH_SUPPORT + +inline void +rtl8168_enable_dash2_interrupt(struct rtl8168_private *tp) +{ + if (!tp->DASH) return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + RTL_CMAC_W8(tp, CMAC_IBIMR0, ( ISRIMR_DASH_TYPE2_ROK | ISRIMR_DASH_TYPE2_TOK | ISRIMR_DASH_TYPE2_TDU | ISRIMR_DASH_TYPE2_RDU | ISRIMR_DASH_TYPE2_RX_DISABLE_IDLE )); + } +} + +static inline void +rtl8168_disable_dash2_interrupt(struct rtl8168_private *tp) +{ + if (!tp->DASH) return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + RTL_CMAC_W8(tp, CMAC_IBIMR0, 0); + } +} +#endif + +static inline void +rtl8168_enable_hw_interrupt(struct rtl8168_private *tp) +{ + RTL_W16(tp, IntrMask, tp->intr_mask); + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) + rtl8168_enable_dash2_interrupt(tp); +#endif +} + +static inline void +rtl8168_disable_hw_interrupt(struct rtl8168_private *tp) +{ + RTL_W16(tp, IntrMask, 0x0000); + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) + rtl8168_disable_dash2_interrupt(tp); +#endif +} + + +static inline void +rtl8168_switch_to_hw_interrupt(struct rtl8168_private *tp) +{ + RTL_W32(tp, TimeInt0, 0x0000); + + rtl8168_enable_hw_interrupt(tp); +} + +static inline void +rtl8168_switch_to_timer_interrupt(struct rtl8168_private *tp) +{ + if (tp->use_timer_interrrupt) { + RTL_W32(tp, TimeInt0, timer_count); + RTL_W32(tp, TCTR, timer_count); + RTL_W16(tp, IntrMask, tp->timer_intr_mask); + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) + rtl8168_enable_dash2_interrupt(tp); +#endif + } else { + rtl8168_switch_to_hw_interrupt(tp); + } +} + +static void +rtl8168_irq_mask_and_ack(struct rtl8168_private *tp) +{ + rtl8168_disable_hw_interrupt(tp); +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) { + if (tp->dash_printer_enabled) { + RTL_W16(tp, IntrStatus, RTL_R16(tp, IntrStatus) & + ~(ISRIMR_DASH_INTR_EN | ISRIMR_DASH_INTR_CMAC_RESET)); + } else { + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + RTL_CMAC_W8(tp, CMAC_IBISR0, RTL_CMAC_R8(tp, CMAC_IBISR0)); + } + } + } else { + RTL_W16(tp, IntrStatus, RTL_R16(tp, IntrStatus)); + } +#else + RTL_W16(tp, IntrStatus, RTL_R16(tp, IntrStatus)); +#endif +} + +static void +rtl8168_nic_reset(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int i; + + RTL_W32(tp, RxConfig, (RX_DMA_BURST << RxCfgDMAShift)); + + rtl8168_enable_rxdvgate(dev); + + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + mdelay(10); + break; + case CFG_METHOD_4: + case CFG_METHOD_5: + case CFG_METHOD_6: + case CFG_METHOD_7: + case CFG_METHOD_8: + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_14: + case CFG_METHOD_15: + RTL_W8(tp, ChipCmd, StopReq | CmdRxEnb | CmdTxEnb); + udelay(100); + break; + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + for (i = 0; i < 2000; i++) { + if (!(RTL_R8(tp, TxPoll) & NPQ)) break; + udelay(100); + } + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + mdelay(2); + break; + default: + mdelay(10); + break; + } + + rtl8168_wait_txrx_fifo_empty(dev); + + /* Soft reset the chip. */ + RTL_W8(tp, ChipCmd, CmdReset); + + /* Check that the chip has finished the reset. */ + for (i = 100; i > 0; i--) { + udelay(100); + if ((RTL_R8(tp, ChipCmd) & CmdReset) == 0) + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_11: + rtl8168_oob_mutex_lock(tp); + rtl8168_ocp_write(tp, 0x10, 2, rtl8168_ocp_read(tp, 0x010, 2)&~0x00004000); + rtl8168_oob_mutex_unlock(tp); + + rtl8168_oob_notify(tp, OOB_CMD_RESET); + + for (i = 0; i < 10; i++) { + mdelay(10); + if (rtl8168_ocp_read(tp, 0x010, 2)&0x00004000) + break; + } + + for (i = 0; i < 5; i++) { + if ( rtl8168_ocp_read(tp, 0x034, 1) == 0) + break; + } + break; + } +} + +static void +rtl8168_hw_clear_timer_int(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + RTL_W32(tp, TimeInt0, 0x0000); + + switch (tp->mcfg) { + case CFG_METHOD_4: + case CFG_METHOD_5: + case CFG_METHOD_6: + case CFG_METHOD_7: + case CFG_METHOD_8: + RTL_W32(tp, TimeInt1, 0x0000); + break; + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W32(tp, TimeInt1, 0x0000); + RTL_W32(tp, TimeInt2, 0x0000); + RTL_W32(tp, TimeInt3, 0x0000); + break; + } +} + +static void +rtl8168_hw_reset(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + /* Disable interrupts */ + rtl8168_irq_mask_and_ack(tp); + + rtl8168_hw_clear_timer_int(dev); + + rtl8168_nic_reset(dev); +} + +static void rtl8168_mac_loopback_test(struct rtl8168_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct net_device *dev = tp->dev; + struct sk_buff *skb, *rx_skb; + dma_addr_t mapping; + struct TxDesc *txd; + struct RxDesc *rxd; + void *tmpAddr; + u32 len, rx_len, rx_cmd = 0; + u16 type; + u8 pattern; + int i; + + if (tp->DASH) + return; + + pattern = 0x5A; + len = 60; + type = htons(ETH_P_IP); + txd = tp->TxDescArray; + rxd = tp->RxDescArray; + rx_skb = tp->Rx_skbuff[0]; + RTL_W32(tp, TxConfig, (RTL_R32(tp, TxConfig) & ~0x00060000) | 0x00020000); + + do { + skb = dev_alloc_skb(len + RTK_RX_ALIGN); + if (unlikely(!skb)) + dev_printk(KERN_NOTICE, tp_to_dev(tp), "-ENOMEM;\n"); + } while (unlikely(skb == NULL)); + skb_reserve(skb, RTK_RX_ALIGN); + + memcpy(skb_put(skb, dev->addr_len), dev->dev_addr, dev->addr_len); + memcpy(skb_put(skb, dev->addr_len), dev->dev_addr, dev->addr_len); + memcpy(skb_put(skb, sizeof(type)), &type, sizeof(type)); + tmpAddr = skb_put(skb, len - 14); + + mapping = dma_map_single(tp_to_dev(tp), skb->data, len, DMA_TO_DEVICE); + dma_sync_single_for_cpu(tp_to_dev(tp), le64_to_cpu(mapping), + len, DMA_TO_DEVICE); + txd->addr = cpu_to_le64(mapping); + txd->opts2 = 0; + while (1) { + memset(tmpAddr, pattern++, len - 14); + pci_dma_sync_single_for_device(tp->pci_dev, + le64_to_cpu(mapping), + len, DMA_TO_DEVICE); + txd->opts1 = cpu_to_le32(DescOwn | FirstFrag | LastFrag | len); + + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptMyPhys); + + smp_wmb(); + RTL_W8(tp, TxPoll, NPQ); /* set polling bit */ + + for (i = 0; i < 50; i++) { + udelay(200); + rx_cmd = le32_to_cpu(rxd->opts1); + if ((rx_cmd & DescOwn) == 0) + break; + } + + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~(AcceptErr | AcceptRunt | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys)); + + rx_len = rx_cmd & 0x3FFF; + rx_len -= 4; + rxd->opts1 = cpu_to_le32(DescOwn | tp->rx_buf_sz); + + dma_sync_single_for_cpu(tp_to_dev(tp), le64_to_cpu(mapping), len, DMA_TO_DEVICE); + + if (rx_len == len) { + dma_sync_single_for_cpu(tp_to_dev(tp), le64_to_cpu(rxd->addr), tp->rx_buf_sz, DMA_FROM_DEVICE); + i = memcmp(skb->data, rx_skb->data, rx_len); + pci_dma_sync_single_for_device(tp->pci_dev, le64_to_cpu(rxd->addr), tp->rx_buf_sz, DMA_FROM_DEVICE); + if (i == 0) { +// dev_printk(KERN_INFO, tp_to_dev(tp), "loopback test finished\n",rx_len,len); + break; + } + } + + rtl8168_hw_reset(dev); + rtl8168_disable_rxdvgate(dev); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + } + tp->dirty_tx++; + tp->dirty_rx++; + tp->cur_tx++; + tp->cur_rx++; + dma_unmap_single(&pdev->dev, le64_to_cpu(mapping), + len, DMA_TO_DEVICE); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) & ~0x00060000); + dev_kfree_skb_any(skb); + RTL_W16(tp, IntrStatus, 0xFFBF); +} + +static unsigned int +rtl8168_xmii_reset_pending(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int retval; + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + retval = rtl8168_mdio_read(tp, MII_BMCR) & BMCR_RESET; + + return retval; +} + +static unsigned int +rtl8168_xmii_link_ok(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int retval; + + retval = (RTL_R8(tp, PHYstatus) & LinkStatus) ? 1 : 0; + + return retval; +} + +static int +rtl8168_wait_phy_reset_complete(struct rtl8168_private *tp) +{ + int i, val; + + for (i = 0; i < 2500; i++) { + val = rtl8168_mdio_read(tp, MII_BMCR) & BMCR_RESET; + if (!val) + return 0; + + mdelay(1); + } + + return -1; +} + +static void +rtl8168_xmii_reset_enable(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (rtl8168_is_in_phy_disable_mode(dev)) + return; + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, MII_ADVERTISE, rtl8168_mdio_read(tp, MII_ADVERTISE) & + ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL)); + rtl8168_mdio_write(tp, MII_CTRL1000, rtl8168_mdio_read(tp, MII_CTRL1000) & + ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL)); + rtl8168_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE); + + if (rtl8168_wait_phy_reset_complete(tp) == 0) return; + + if (netif_msg_link(tp)) + printk(KERN_ERR "%s: PHY reset failed.\n", dev->name); +} + +static void +rtl8168dp_10mbps_gphy_para(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 status = RTL_R8(tp, PHYstatus); + + if ((status & LinkStatus) && (status & _10bps)) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x10, 0x04EE); + } else { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x10, 0x01EE); + } +} + +void rtl8168_init_ring_indexes(struct rtl8168_private *tp) +{ + tp->dirty_tx = 0; + tp->dirty_rx = 0; + tp->cur_tx = 0; + tp->cur_rx = 0; +} + +static void +rtl8168_issue_offset_99_event(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_27: + case CFG_METHOD_28: + if (tp->mcfg == CFG_METHOD_24 || tp->mcfg == CFG_METHOD_25 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28) { + rtl8168_eri_write(tp, 0x3FC, 4, 0x00000000, ERIAR_ExGMAC); + } else { + rtl8168_eri_write(tp, 0x3FC, 4, 0x083C083C, ERIAR_ExGMAC); + } + csi_tmp = rtl8168_eri_read(tp, 0x3F8, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0x3F8, 1, csi_tmp, ERIAR_ExGMAC); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x1EA, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0x1EA, 1, csi_tmp, ERIAR_ExGMAC); + break; + } +} + +static void +rtl8168_enable_cfg9346_write(struct rtl8168_private *tp) +{ + RTL_W8(tp, Cfg9346, RTL_R8(tp, Cfg9346) | Cfg9346_Unlock); +} + +static void +rtl8168_disable_cfg9346_write(struct rtl8168_private *tp) +{ + RTL_W8(tp, Cfg9346, RTL_R8(tp, Cfg9346) & ~Cfg9346_Unlock); +} + +static void +rtl8168_enable_exit_l1_mask(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp |= (BIT_8 | BIT_9 | BIT_10 | BIT_11 | BIT_12); + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + break; + case CFG_METHOD_20: + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp |= (BIT_10 | BIT_11); + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + break; + case CFG_METHOD_21 ... CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp |= (BIT_7 | BIT_8 | BIT_9 | BIT_10 | BIT_11 | BIT_12); + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + break; + } +} + +static void +rtl8168_disable_exit_l1_mask(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_8 | BIT_9 | BIT_10 | BIT_11 | BIT_12); + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + break; + case CFG_METHOD_20: + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_10 | BIT_11); + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + break; + case CFG_METHOD_21 ... CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_7 | BIT_8 | BIT_9 | BIT_10 | BIT_11 | BIT_12); + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + break; + } +} + +static void +rtl8168_hw_aspm_clkreq_enable(struct rtl8168_private *tp, bool enable) +{ + if (!tp->HwSuppAspmClkIntrLock) return; + + if (enable && aspm) { + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } + + udelay(10); +} + +#ifdef ENABLE_DASH_SUPPORT +static void +NICChkTypeEnableDashInterrupt(struct rtl8168_private *tp) +{ + if (tp->DASH) { + // + // even disconnected, enable 3 dash interrupt mask bits for in-band/out-band communication + // + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + rtl8168_enable_dash2_interrupt(tp); + RTL_W16(tp, IntrMask, (ISRIMR_DASH_INTR_EN | ISRIMR_DASH_INTR_CMAC_RESET)); + } else { + RTL_W16(tp, IntrMask, (ISRIMR_DP_DASH_OK | ISRIMR_DP_HOST_OK | ISRIMR_DP_REQSYS_OK)); + } + } +} +#endif + +static void +rtl8168_check_link_status(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int link_status_on; + +#ifdef ENABLE_FIBER_SUPPORT + rtl8168_check_fiber_link_status(dev); +#endif //ENABLE_FIBER_SUPPORT + + link_status_on = tp->link_ok(dev); + + if (tp->mcfg == CFG_METHOD_11) + rtl8168dp_10mbps_gphy_para(dev); + + if (netif_carrier_ok(dev) != link_status_on) { + if (link_status_on) { + rtl8168_hw_config(dev); + + if (tp->mcfg == CFG_METHOD_18 || tp->mcfg == CFG_METHOD_19 || tp->mcfg == CFG_METHOD_20) { + if (RTL_R8(tp, PHYstatus) & _1000bpsF) { + rtl8168_eri_write(tp, 0x1bc, 4, 0x00000011, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x0000001f, ERIAR_ExGMAC); + } else if (RTL_R8(tp, PHYstatus) & _100bps) { + rtl8168_eri_write(tp, 0x1bc, 4, 0x0000001f, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x0000001f, ERIAR_ExGMAC); + } else { + rtl8168_eri_write(tp, 0x1bc, 4, 0x0000001f, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x0000002d, ERIAR_ExGMAC); + } + } else if ((tp->mcfg == CFG_METHOD_16 || tp->mcfg == CFG_METHOD_17) && netif_running(dev)) { + if (tp->mcfg == CFG_METHOD_16 && (RTL_R8(tp, PHYstatus) & _10bps)) { + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptAllPhys); + } else if (tp->mcfg == CFG_METHOD_17) { + if (RTL_R8(tp, PHYstatus) & _1000bpsF) { + rtl8168_eri_write(tp, 0x1bc, 4, 0x00000011, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x00000005, ERIAR_ExGMAC); + } else if (RTL_R8(tp, PHYstatus) & _100bps) { + rtl8168_eri_write(tp, 0x1bc, 4, 0x0000001f, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x00000005, ERIAR_ExGMAC); + } else { + rtl8168_eri_write(tp, 0x1bc, 4, 0x0000001f, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x0000003f, ERIAR_ExGMAC); + } + } + } else if ((tp->mcfg == CFG_METHOD_14 || tp->mcfg == CFG_METHOD_15) && tp->eee_enabled == 1) { + /*Full -Duplex mode*/ + if (RTL_R8(tp, PHYstatus)&FullDup) { + rtl8168_mdio_write(tp, 0x1F, 0x0006); + rtl8168_mdio_write(tp, 0x00, 0x5a30); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + if (RTL_R8(tp, PHYstatus) & (_10bps | _100bps)) + RTL_W32(tp, TxConfig, (RTL_R32(tp, TxConfig) & ~BIT_19) | BIT_25); + + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0006); + rtl8168_mdio_write(tp, 0x00, 0x5a00); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + if (RTL_R8(tp, PHYstatus) & (_10bps | _100bps)) + RTL_W32(tp, TxConfig, (RTL_R32(tp, TxConfig) & ~BIT_19) | (InterFrameGap << TxInterFrameGapShift)); + } + } else if ((tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_23 || tp->mcfg == CFG_METHOD_24 || + tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) && netif_running(dev)) { + if (RTL_R8(tp, PHYstatus)&FullDup) + RTL_W32(tp, TxConfig, (RTL_R32(tp, TxConfig) | (BIT_24 | BIT_25)) & ~BIT_19); + else + RTL_W32(tp, TxConfig, (RTL_R32(tp, TxConfig) | BIT_25) & ~(BIT_19 | BIT_24)); + } + + if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + /*half mode*/ + if (!(RTL_R8(tp, PHYstatus)&FullDup)) { + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, MII_ADVERTISE, rtl8168_mdio_read(tp, MII_ADVERTISE)&~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM)); + } + } + + if ((tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) && (RTL_R8(tp, PHYstatus) & _10bps)) { + u32 csi_tmp; + + csi_tmp = rtl8168_eri_read(tp, 0x1D0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_1; + rtl8168_eri_write(tp, 0x1D0, 1, csi_tmp, ERIAR_ExGMAC); + } + + rtl8168_hw_start(dev); + + netif_carrier_on(dev); + + netif_wake_queue(dev); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + tp->phy_reg_aner = rtl8168_mdio_read(tp, MII_EXPANSION); + tp->phy_reg_anlpar = rtl8168_mdio_read(tp, MII_LPA); + tp->phy_reg_gbsr = rtl8168_mdio_read(tp, MII_STAT1000); + + if (netif_msg_ifup(tp)) + printk(KERN_INFO PFX "%s: link up\n", dev->name); + } else { + if (netif_msg_ifdown(tp)) + printk(KERN_INFO PFX "%s: link down\n", dev->name); + + tp->phy_reg_aner = 0; + tp->phy_reg_anlpar = 0; + tp->phy_reg_gbsr = 0; + + netif_stop_queue(dev); + + netif_carrier_off(dev); + + rtl8168_hw_reset(dev); + + rtl8168_tx_clear(tp); + + rtl8168_rx_clear(tp); + + rtl8168_init_ring(dev); + + if (dynamic_aspm) { + rtl8168_enable_cfg9346_write(tp); + rtl8168_hw_aspm_clkreq_enable(tp, true); + rtl8168_disable_cfg9346_write(tp); + } + + rtl8168_set_speed(dev, tp->autoneg, tp->speed, tp->duplex, tp->advertising); + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_27: + case CFG_METHOD_28: + if (tp->org_pci_offset_99 & BIT_2) + tp->issue_offset_99_event = TRUE; + break; + } + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) { + NICChkTypeEnableDashInterrupt(tp); + } +#endif + } + } + + if (!link_status_on) { + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_27: + case CFG_METHOD_28: + if (tp->issue_offset_99_event) { + if (!(RTL_R8(tp, PHYstatus) & PowerSaveStatus)) { + tp->issue_offset_99_event = FALSE; + rtl8168_issue_offset_99_event(tp); + } + } + break; + } + } else { + if (dynamic_aspm) { + bool enable_hw_aspm_clkreq = true; + if (tp->dynamic_aspm_packet_count > dynamic_aspm_packet_threshold) + enable_hw_aspm_clkreq = false; + + rtl8168_enable_cfg9346_write(tp); + rtl8168_hw_aspm_clkreq_enable(tp, enable_hw_aspm_clkreq); + rtl8168_disable_cfg9346_write(tp); + } + tp->dynamic_aspm_packet_count = 0; + } +} + +static void +rtl8168_link_option(u8 *aut, + u32 *spd, + u8 *dup, + u32 *adv) +{ + if ((*spd != SPEED_1000) && (*spd != SPEED_100) && (*spd != SPEED_10)) + *spd = SPEED_1000; + + if ((*dup != DUPLEX_FULL) && (*dup != DUPLEX_HALF)) + *dup = DUPLEX_FULL; + + if ((*aut != AUTONEG_ENABLE) && (*aut != AUTONEG_DISABLE)) + *aut = AUTONEG_ENABLE; + + *adv &= (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + if (*adv == 0) + *adv = (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); +} + +static void +rtl8168_enable_ocp_phy_power_saving(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 val; + + if (tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + val = rtl8168_mdio_read_phy_ocp(tp, 0x0C41, 0x13); + if (val != 0x0050) { + rtl8168_set_phy_mcu_patch_request(tp); + rtl8168_mdio_write_phy_ocp(tp, 0x0C41, 0x13, 0x0000); + rtl8168_mdio_write_phy_ocp(tp, 0x0C41, 0x13, 0x0050); + rtl8168_clear_phy_mcu_patch_request(tp); + } + } +} + +static void +rtl8168_disable_ocp_phy_power_saving(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 val; + + if (tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + val = rtl8168_mdio_read_phy_ocp(tp, 0x0C41, 0x13); + if (val != 0x0500) { + rtl8168_set_phy_mcu_patch_request(tp); + rtl8168_mdio_write_phy_ocp(tp, 0x0C41, 0x13, 0x0000); + rtl8168_mdio_write_phy_ocp(tp, 0x0C41, 0x13, 0x0500); + rtl8168_clear_phy_mcu_patch_request(tp); + } + } +} + +void +rtl8168_wait_ll_share_fifo_ready(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int i; + + for (i = 0; i < 10; i++) { + udelay(100); + if (RTL_R16(tp, 0xD2) & BIT_9) + break; + } +} + +static void +rtl8168_disable_pci_offset_99(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x3F2, 2, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_0 | BIT_1); + rtl8168_eri_write(tp, 0x3F2, 2, csi_tmp, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_csi_fun0_write_byte(tp, 0x99, 0x00); + break; + } +} + +static void +rtl8168_enable_pci_offset_99(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_csi_fun0_write_byte(tp, 0x99, tp->org_pci_offset_99); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x3F2, 2, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_0 | BIT_1); + if (tp->org_pci_offset_99 & (BIT_5 | BIT_6)) + csi_tmp |= BIT_1; + if (tp->org_pci_offset_99 & BIT_2) + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0x3F2, 2, csi_tmp, ERIAR_ExGMAC); + break; + } +} + +static void +rtl8168_init_pci_offset_99(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_26: + if (tp->org_pci_offset_99 & BIT_2) { + csi_tmp = rtl8168_eri_read(tp, 0x5C2, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_1; + rtl8168_eri_write(tp, 0x5C2, 1, csi_tmp, ERIAR_ExGMAC); + } + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x3F2, 2, ERIAR_ExGMAC); + csi_tmp &= ~( BIT_8 | BIT_9 | BIT_10 | BIT_11 | BIT_12 | BIT_13 | BIT_14 | BIT_15 ); + csi_tmp |= ( BIT_9 | BIT_10 | BIT_13 | BIT_14 | BIT_15 ); + rtl8168_eri_write(tp, 0x3F2, 2, csi_tmp, ERIAR_ExGMAC); + csi_tmp = rtl8168_eri_read(tp, 0x3F5, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_6 | BIT_7; + rtl8168_eri_write(tp, 0x3F5, 1, csi_tmp, ERIAR_ExGMAC); + rtl8168_mac_ocp_write(tp, 0xE02C, 0x1880); + rtl8168_mac_ocp_write(tp, 0xE02E, 0x4880); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_26: + rtl8168_eri_write(tp, 0x5C0, 1, 0xFA, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_26: + if (tp->org_pci_offset_99 & BIT_2) { + csi_tmp = rtl8168_eri_read(tp, 0x5C8, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0x5C8, 1, csi_tmp, ERIAR_ExGMAC); + } + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + if (tp->org_pci_offset_99 & BIT_2) + rtl8168_mac_ocp_write(tp, 0xE0A2, rtl8168_mac_ocp_read(tp, 0xE0A2) | BIT_0); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_23: + rtl8168_eri_write(tp, 0x2E8, 2, 0x883C, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2EA, 2, 0x8C12, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2EC, 2, 0x9003, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2E2, 2, 0x883C, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2E4, 2, 0x8C12, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2E6, 2, 0x9003, ERIAR_ExGMAC); + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_eri_write(tp, 0x2E8, 2, 0x9003, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2EA, 2, 0x9003, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2EC, 2, 0x9003, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2E2, 2, 0x883C, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2E4, 2, 0x8C12, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2E6, 2, 0x9003, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + csi_tmp = rtl8168_eri_read(tp, 0x3FA, 2, ERIAR_ExGMAC); + csi_tmp |= BIT_14; + rtl8168_eri_write(tp, 0x3FA, 2, csi_tmp, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + if (tp->org_pci_offset_99 & BIT_2) + RTL_W8(tp, 0xB6, RTL_R8(tp, 0xB6) | BIT_0); + break; + } + + rtl8168_enable_pci_offset_99(tp); +} + +static void +rtl8168_disable_pci_offset_180(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x1E2, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_2; + rtl8168_eri_write(tp, 0x1E2, 1, csi_tmp, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_26: + rtl8168_eri_write(tp, 0x1E9, 1, 0x0A, ERIAR_ExGMAC); + break; + } +} + +static void +rtl8168_enable_pci_offset_180(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_25: + case CFG_METHOD_28: + csi_tmp = rtl8168_eri_read(tp, 0x1E8, 4, ERIAR_ExGMAC); + csi_tmp &= ~(0x0000FF00); + csi_tmp |= (0x00006400); + rtl8168_eri_write(tp, 0x1E8, 4, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x1E4, 4, ERIAR_ExGMAC); + csi_tmp &= ~(0x0000FF00); + rtl8168_eri_write(tp, 0x1E4, 4, csi_tmp, ERIAR_ExGMAC); + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x1E8, 4, ERIAR_ExGMAC); + csi_tmp &= ~(0x0000FFF0); + csi_tmp |= (0x00000640); + rtl8168_eri_write(tp, 0x1E8, 4, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x1E4, 4, ERIAR_ExGMAC); + csi_tmp &= ~(0x0000FF00); + rtl8168_eri_write(tp, 0x1E4, 4, csi_tmp, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + csi_tmp = rtl8168_eri_read(tp, 0x1E2, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_2; + rtl8168_eri_write(tp, 0x1E2, 1, csi_tmp, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_26: + rtl8168_eri_write(tp, 0x1E9, 1, 0x64, ERIAR_ExGMAC); + break; + } + + rtl8168_mac_ocp_write(tp, 0xE094, 0x0000); +} + +static void +rtl8168_init_pci_offset_180(struct rtl8168_private *tp) +{ + if (tp->org_pci_offset_180 & (BIT_0|BIT_1)) + rtl8168_enable_pci_offset_180(tp); + else + rtl8168_disable_pci_offset_180(tp); +} + +static void +rtl8168_set_pci_99_180_exit_driver_para(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_issue_offset_99_event(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_disable_pci_offset_99(tp); + break; + } + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_disable_pci_offset_180(tp); + break; + } +} + +static void +rtl8168_hw_d3_para(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + RTL_W16(tp, RxMaxSize, RX_BUF_SIZE); + + if (tp->HwSuppAspmClkIntrLock) { + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) & ~BIT_7); + rtl8168_enable_cfg9346_write(tp); + rtl8168_hw_aspm_clkreq_enable(tp, false); + rtl8168_disable_cfg9346_write(tp); + } + + rtl8168_disable_exit_l1_mask(tp); + +#ifdef ENABLE_REALWOW_SUPPORT + rtl8168_set_realwow_d3_para(dev); +#endif + + if (tp->mcfg == CFG_METHOD_18 || tp->mcfg == CFG_METHOD_19 || tp->mcfg == CFG_METHOD_20) { + rtl8168_eri_write(tp, 0x1bc, 4, 0x0000001f, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x0000002d, ERIAR_ExGMAC); + } else if (tp->mcfg == CFG_METHOD_16) { + rtl8168_eri_write(tp, 0x1bc, 4, 0x0000001f, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x0000003f, ERIAR_ExGMAC); + } + + if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_23 || tp->mcfg == CFG_METHOD_24 || + tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + rtl8168_eri_write(tp, 0x2F8, 2, 0x0064, ERIAR_ExGMAC); + } + + if (tp->bios_setting & BIT_28) { + if (tp->mcfg == CFG_METHOD_18 || tp->mcfg == CFG_METHOD_19 || + tp->mcfg == CFG_METHOD_20) { + u32 gphy_val; + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x04, 0x0061); + rtl8168_mdio_write(tp, 0x09, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val &= ~BIT_7; + rtl8168_mdio_write(tp, 0x06, gphy_val); + mdelay(1); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + gphy_val = rtl8168_mdio_read(tp, 0x16); + gphy_val &= ~BIT_10; + rtl8168_mdio_write(tp, 0x16, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + } + + rtl8168_set_pci_99_180_exit_driver_para(dev); + + /*disable ocp phy power saving*/ + if (tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) + if (!tp->dash_printer_enabled) + rtl8168_disable_ocp_phy_power_saving(dev); + + rtl8168_disable_rxdvgate(dev); +} + +static void +rtl8168_enable_magic_packet(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 csi_tmp; + + switch (tp->HwSuppMagicPktVer) { + case WAKEUP_MAGIC_PACKET_V1: + rtl8168_enable_cfg9346_write(tp); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | MagicPacket); + rtl8168_disable_cfg9346_write(tp); + break; + case WAKEUP_MAGIC_PACKET_V2: + csi_tmp = rtl8168_eri_read(tp, 0xDE, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDE, 1, csi_tmp, ERIAR_ExGMAC); + break; + } +} +static void +rtl8168_disable_magic_packet(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 csi_tmp; + + switch (tp->HwSuppMagicPktVer) { + case WAKEUP_MAGIC_PACKET_V1: + rtl8168_enable_cfg9346_write(tp); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~MagicPacket); + rtl8168_disable_cfg9346_write(tp); + break; + case WAKEUP_MAGIC_PACKET_V2: + csi_tmp = rtl8168_eri_read(tp, 0xDE, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDE, 1, csi_tmp, ERIAR_ExGMAC); + break; + } +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static void +rtl8168_get_hw_wol(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 options; + u32 csi_tmp; + unsigned long flags; + + + spin_lock_irqsave(&tp->lock, flags); + + tp->wol_opts = 0; + options = RTL_R8(tp, Config1); + if (!(options & PMEnable)) + goto out_unlock; + + options = RTL_R8(tp, Config3); + if (options & LinkUp) + tp->wol_opts |= WAKE_PHY; + + switch (tp->HwSuppMagicPktVer) { + case WAKEUP_MAGIC_PACKET_V2: + csi_tmp = rtl8168_eri_read(tp, 0xDE, 1, ERIAR_ExGMAC); + if (csi_tmp & BIT_0) + tp->wol_opts |= WAKE_MAGIC; + break; + default: + if (options & MagicPacket) + tp->wol_opts |= WAKE_MAGIC; + break; + } + + options = RTL_R8(tp, Config5); + if (options & UWF) + tp->wol_opts |= WAKE_UCAST; + if (options & BWF) + tp->wol_opts |= WAKE_BCAST; + if (options & MWF) + tp->wol_opts |= WAKE_MCAST; + +out_unlock: + tp->wol_enabled = (tp->wol_opts || tp->dash_printer_enabled) ? WOL_ENABLED : WOL_DISABLED; + + spin_unlock_irqrestore(&tp->lock, flags); +} + +static void +rtl8168_set_hw_wol(struct net_device *dev, u32 wolopts) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int i,tmp; + static struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket }, + }; + + switch (tp->HwSuppMagicPktVer) { + case WAKEUP_MAGIC_PACKET_V2: + tmp = ARRAY_SIZE(cfg) - 1; + + if (wolopts & WAKE_MAGIC) + rtl8168_enable_magic_packet(dev); + else + rtl8168_disable_magic_packet(dev); + break; + default: + tmp = ARRAY_SIZE(cfg); + break; + } + + rtl8168_enable_cfg9346_write(tp); + + for (i = 0; i < tmp; i++) { + u8 options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(tp, cfg[i].reg, options); + } + + if (tp->dash_printer_enabled) + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | LanWake); + + rtl8168_disable_cfg9346_write(tp); +} + +static void +rtl8168_phy_restart_nway(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (rtl8168_is_in_phy_disable_mode(dev)) return; + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART); +} + +static void +rtl8168_phy_setup_force_mode(struct net_device *dev, u32 speed, u8 duplex) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 bmcr_true_force = 0; + + if (rtl8168_is_in_phy_disable_mode(dev)) return; + + if ((speed == SPEED_10) && (duplex == DUPLEX_HALF)) { + bmcr_true_force = BMCR_SPEED10; + } else if ((speed == SPEED_10) && (duplex == DUPLEX_FULL)) { + bmcr_true_force = BMCR_SPEED10 | BMCR_FULLDPLX; + } else if ((speed == SPEED_100) && (duplex == DUPLEX_HALF)) { + bmcr_true_force = BMCR_SPEED100; + } else if ((speed == SPEED_100) && (duplex == DUPLEX_FULL)) { + bmcr_true_force = BMCR_SPEED100 | BMCR_FULLDPLX; + } else { + netif_err(tp, drv, dev, "Failed to set phy force mode!\n"); + return; + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, MII_BMCR, bmcr_true_force); +} + +static void +rtl8168_set_pci_pme(struct rtl8168_private *tp, int set) +{ + struct pci_dev *pdev = tp->pci_dev; + u16 pmc; + + if (!pdev->pm_cap) + return; + + pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc); + pmc |= PCI_PM_CTRL_PME_STATUS; + if (set) + pmc |= PCI_PM_CTRL_PME_ENABLE; + else + pmc &= ~PCI_PM_CTRL_PME_ENABLE; + pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc); +} + +static void +rtl8168_set_wol_link_speed(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int auto_nego; + int giga_ctrl; + u32 adv; + u16 anlpar; + u16 gbsr; + u16 aner; + + if (tp->autoneg != AUTONEG_ENABLE) + goto exit; + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + auto_nego = rtl8168_mdio_read(tp, MII_ADVERTISE); + auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL + | ADVERTISE_100HALF | ADVERTISE_100FULL); + + giga_ctrl = rtl8168_mdio_read(tp, MII_CTRL1000); + giga_ctrl &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); + + aner = anlpar = gbsr = 0; + if (tp->link_ok(dev)) { + aner = rtl8168_mdio_read(tp, MII_EXPANSION); + anlpar = rtl8168_mdio_read(tp, MII_LPA); + gbsr = rtl8168_mdio_read(tp, MII_STAT1000); + } else { + if (netif_running(dev)) { + aner = tp->phy_reg_aner; + anlpar = tp->phy_reg_anlpar; + gbsr = tp->phy_reg_gbsr; + } + } + + if ((aner | anlpar | gbsr) == 0) { + int auto_nego_tmp = 0; + adv = tp->advertising; + if ((adv & ADVERTISED_10baseT_Half) && (anlpar & LPA_10HALF)) + auto_nego_tmp |= ADVERTISE_10HALF; + if ((adv & ADVERTISED_10baseT_Full) && (anlpar & LPA_10FULL)) + auto_nego_tmp |= ADVERTISE_10FULL; + if ((adv & ADVERTISED_100baseT_Half) && (anlpar & LPA_100HALF)) + auto_nego_tmp |= ADVERTISE_100HALF; + if ((adv & ADVERTISED_100baseT_Full) && (anlpar & LPA_100FULL)) + auto_nego_tmp |= ADVERTISE_100FULL; + + if (auto_nego_tmp == 0) goto exit; + + auto_nego |= auto_nego_tmp; + goto skip_check_lpa; + } + if (!(aner & EXPANSION_NWAY)) goto exit; + + adv = tp->advertising; + if ((adv & ADVERTISED_10baseT_Half) && (anlpar & LPA_10HALF)) + auto_nego |= ADVERTISE_10HALF; + else if ((adv & ADVERTISED_10baseT_Full) && (anlpar & LPA_10FULL)) + auto_nego |= ADVERTISE_10FULL; + else if ((adv & ADVERTISED_100baseT_Half) && (anlpar & LPA_100HALF)) + auto_nego |= ADVERTISE_100HALF; + else if ((adv & ADVERTISED_100baseT_Full) && (anlpar & LPA_100FULL)) + auto_nego |= ADVERTISE_100FULL; + else if (adv & ADVERTISED_1000baseT_Half && (gbsr & LPA_1000HALF)) + giga_ctrl |= ADVERTISE_1000HALF; + else if (adv & ADVERTISED_1000baseT_Full && (gbsr & LPA_1000FULL)) + giga_ctrl |= ADVERTISE_1000FULL; + else + goto exit; + +skip_check_lpa: + if (tp->DASH) + auto_nego |= (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10HALF | ADVERTISE_10FULL); + + if (((tp->mcfg == CFG_METHOD_7) || (tp->mcfg == CFG_METHOD_8)) && (RTL_R16(tp, CPlusCmd) & ASF)) + auto_nego |= (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10HALF | ADVERTISE_10FULL); + +#ifdef CONFIG_DOWN_SPEED_100 + auto_nego |= (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10HALF | ADVERTISE_10FULL); +#endif + + rtl8168_mdio_write(tp, MII_ADVERTISE, auto_nego); + rtl8168_mdio_write(tp, MII_CTRL1000, giga_ctrl); + + rtl8168_phy_restart_nway(dev); + +exit: + return; +} + +static void +rtl8168_powerdown_pll(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + +#ifdef ENABLE_FIBER_SUPPORT + if (HW_FIBER_MODE_ENABLED(tp)) + return; +#endif //ENABLE_FIBER_SUPPORT + + if (tp->wol_enabled == WOL_ENABLED || tp->DASH || tp->EnableKCPOffload) { + rtl8168_set_hw_wol(dev, tp->wol_opts); + + if (tp->mcfg == CFG_METHOD_16 || tp->mcfg == CFG_METHOD_17 || + tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_24 || tp->mcfg == CFG_METHOD_25 || + tp->mcfg == CFG_METHOD_26 || tp->mcfg == CFG_METHOD_23 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + rtl8168_enable_cfg9346_write(tp); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | PMSTS_En); + rtl8168_disable_cfg9346_write(tp); + } + + /* Enable the PME and clear the status */ + rtl8168_set_pci_pme(tp, 1); + + if (HW_SUPP_SERDES_PHY(tp)) + return; + + rtl8168_set_wol_link_speed(dev); + + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + + return; + } + + if (tp->DASH) + return; + + if (((tp->mcfg == CFG_METHOD_7) || (tp->mcfg == CFG_METHOD_8)) && (RTL_R16(tp, CPlusCmd) & ASF)) + return; + + rtl8168_phy_power_down(dev); + + if (!tp->HwIcVerUnknown) { + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + //case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~BIT_7); + break; + } + } + + switch (tp->mcfg) { + case CFG_METHOD_14 ... CFG_METHOD_15: + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) & ~BIT_6); + break; + case CFG_METHOD_16 ... CFG_METHOD_33: + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) & ~BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) & ~BIT_6); + break; + } +} + +static void rtl8168_powerup_pll(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | BIT_7 | BIT_6); + break; + } + + rtl8168_phy_power_up(dev); +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +static void +rtl8168_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 options; + unsigned long flags; + + wol->wolopts = 0; + + if (tp->mcfg == CFG_METHOD_DEFAULT) { + wol->supported = 0; + return; + } else { + wol->supported = WAKE_ANY; + } + + spin_lock_irqsave(&tp->lock, flags); + + options = RTL_R8(tp, Config1); + if (!(options & PMEnable)) + goto out_unlock; + + wol->wolopts = tp->wol_opts; + +out_unlock: + spin_unlock_irqrestore(&tp->lock, flags); +} + +static int +rtl8168_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + if (tp->mcfg == CFG_METHOD_DEFAULT) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + + tp->wol_opts = wol->wolopts; + + tp->wol_enabled = (tp->wol_opts || tp->dash_printer_enabled) ? WOL_ENABLED : WOL_DISABLED; + + spin_unlock_irqrestore(&tp->lock, flags); + + device_set_wakeup_enable(tp_to_dev(tp), tp->wol_enabled); + + return 0; +} + +static void +rtl8168_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct rtl8168_fw *rtl_fw = tp->rtl_fw; + + strcpy(info->driver, MODULENAME); + strcpy(info->version, RTL8168_VERSION); + strcpy(info->bus_info, pci_name(tp->pci_dev)); + info->regdump_len = R8168_REGS_DUMP_SIZE; + info->eedump_len = tp->eeprom_len; + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (rtl_fw) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int +rtl8168_get_regs_len(struct net_device *dev) +{ + return R8168_REGS_DUMP_SIZE; +} +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) + +static int +rtl8168_set_speed_xmii(struct net_device *dev, + u8 autoneg, + u32 speed, + u8 duplex, + u32 adv) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int auto_nego = 0; + int giga_ctrl = 0; + int rc = -EINVAL; + + if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + //Disable Giga Lite + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + rtl8168_clear_eth_phy_bit(tp, 0x14, BIT_9); + if (tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) + rtl8168_clear_eth_phy_bit(tp, 0x14, BIT_7); + rtl8168_mdio_write(tp, 0x1F, 0x0A40); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + if ((speed != SPEED_1000) && + (speed != SPEED_100) && + (speed != SPEED_10)) { + speed = SPEED_1000; + duplex = DUPLEX_FULL; + } + + giga_ctrl = rtl8168_mdio_read(tp, MII_CTRL1000); + giga_ctrl &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); + + if (autoneg == AUTONEG_ENABLE) { + /*n-way force*/ + auto_nego = rtl8168_mdio_read(tp, MII_ADVERTISE); + auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + if (adv & ADVERTISED_10baseT_Half) + auto_nego |= ADVERTISE_10HALF; + if (adv & ADVERTISED_10baseT_Full) + auto_nego |= ADVERTISE_10FULL; + if (adv & ADVERTISED_100baseT_Half) + auto_nego |= ADVERTISE_100HALF; + if (adv & ADVERTISED_100baseT_Full) + auto_nego |= ADVERTISE_100FULL; + if (adv & ADVERTISED_1000baseT_Half) + giga_ctrl |= ADVERTISE_1000HALF; + if (adv & ADVERTISED_1000baseT_Full) + giga_ctrl |= ADVERTISE_1000FULL; + + //flow control + if (dev->mtu <= ETH_DATA_LEN) + auto_nego |= ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM; + + tp->phy_auto_nego_reg = auto_nego; + tp->phy_1000_ctrl_reg = giga_ctrl; + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, MII_ADVERTISE, auto_nego); + rtl8168_mdio_write(tp, MII_CTRL1000, giga_ctrl); + rtl8168_phy_restart_nway(dev); + mdelay(20); + } else { + /*true force*/ + if (speed == SPEED_10 || speed == SPEED_100) + rtl8168_phy_setup_force_mode(dev, speed, duplex); + else + goto out; + } + + tp->autoneg = autoneg; + tp->speed = speed; + tp->duplex = duplex; + tp->advertising = adv; + + if (tp->mcfg == CFG_METHOD_11) + rtl8168dp_10mbps_gphy_para(dev); + + rc = 0; +out: + return rc; +} + +static int +rtl8168_set_speed(struct net_device *dev, + u8 autoneg, + u32 speed, + u8 duplex, + u32 adv) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int ret; + + ret = tp->set_speed(dev, autoneg, speed, duplex, adv); + + return ret; +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +static int +rtl8168_set_settings(struct net_device *dev, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + struct ethtool_cmd *cmd +#else + const struct ethtool_link_ksettings *cmd +#endif + ) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int ret; + unsigned long flags; + u8 autoneg; + u32 speed; + u8 duplex; + u32 supported, advertising; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + autoneg = cmd->autoneg; + speed = cmd->speed; + duplex = cmd->duplex; + supported = cmd->supported; + advertising = cmd->advertising; +#else + const struct ethtool_link_settings *base = &cmd->base; + autoneg = base->autoneg; + speed = base->speed; + duplex = base->duplex; + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); +#endif + if (advertising & ~supported) + return -EINVAL; + + spin_lock_irqsave(&tp->lock, flags); + ret = rtl8168_set_speed(dev, autoneg, speed, duplex, advertising); + spin_unlock_irqrestore(&tp->lock, flags); + + return ret; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) +static u32 +rtl8168_get_tx_csum(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 ret; + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + ret = ((dev->features & NETIF_F_IP_CSUM) != 0); +#else + ret = ((dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) != 0); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + spin_unlock_irqrestore(&tp->lock, flags); + + return ret; +} + +static u32 +rtl8168_get_rx_csum(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 ret; + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + ret = tp->cp_cmd & RxChkSum; + spin_unlock_irqrestore(&tp->lock, flags); + + return ret; +} + +static int +rtl8168_set_tx_csum(struct net_device *dev, + u32 data) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + if (tp->mcfg == CFG_METHOD_DEFAULT) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + if (data) + dev->features |= NETIF_F_IP_CSUM; + else + dev->features &= ~NETIF_F_IP_CSUM; +#else + if (data) + if ((tp->mcfg == CFG_METHOD_1) || (tp->mcfg == CFG_METHOD_2) || (tp->mcfg == CFG_METHOD_3)) + dev->features |= NETIF_F_IP_CSUM; + else + dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + else + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} + +static int +rtl8168_set_rx_csum(struct net_device *dev, + u32 data) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + if (tp->mcfg == CFG_METHOD_DEFAULT) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + + if (data) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) + +#ifdef CONFIG_R8168_VLAN + +static inline u32 +rtl8168_tx_vlan_tag(struct rtl8168_private *tp, + struct sk_buff *skb) +{ + u32 tag; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + tag = (tp->vlgrp && vlan_tx_tag_present(skb)) ? + TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) + tag = (vlan_tx_tag_present(skb)) ? + TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; +#else + tag = (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +#endif + + return tag; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + +static void +rtl8168_vlan_rx_register(struct net_device *dev, + struct vlan_group *grp) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + tp->vlgrp = grp; + if (tp->vlgrp) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + RTL_R16(tp, CPlusCmd); + spin_unlock_irqrestore(&tp->lock, flags); +} + +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) +static void +rtl8168_vlan_rx_kill_vid(struct net_device *dev, + unsigned short vid) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) + if (tp->vlgrp) + tp->vlgrp->vlan_devices[vid] = NULL; +#else + vlan_group_set_device(tp->vlgrp, vid, NULL); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) + spin_unlock_irqrestore(&tp->lock, flags); +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + +static int +rtl8168_rx_vlan_skb(struct rtl8168_private *tp, + struct RxDesc *desc, + struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + int ret = -1; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + if (tp->vlgrp && (opts2 & RxVlanTag)) { + rtl8168_rx_hwaccel_skb(skb, tp->vlgrp, + swab16(opts2 & 0xffff)); + ret = 0; + } +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); +#else + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +#endif + + desc->opts2 = 0; + return ret; +} + +#else /* !CONFIG_R8168_VLAN */ + +static inline u32 +rtl8168_tx_vlan_tag(struct rtl8168_private *tp, + struct sk_buff *skb) +{ + return 0; +} + +static int +rtl8168_rx_vlan_skb(struct rtl8168_private *tp, + struct RxDesc *desc, + struct sk_buff *skb) +{ + return -1; +} + +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) + +static netdev_features_t rtl8168_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + if (dev->mtu > MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + if (dev->mtu > ETH_DATA_LEN) { + features &= ~NETIF_F_ALL_TSO; + features &= ~NETIF_F_ALL_CSUM; + } + spin_unlock_irqrestore(&tp->lock, flags); + + return features; +} + +static int rtl8168_hw_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 rx_config; + + rx_config = RTL_R32(tp, RxConfig); + if (features & NETIF_F_RXALL) + rx_config |= (AcceptErr | AcceptRunt); + else + rx_config &= ~(AcceptErr | AcceptRunt); + + RTL_W32(tp, RxConfig, rx_config); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (dev->features & NETIF_F_HW_VLAN_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + RTL_R16(tp, CPlusCmd); + + return 0; +} + +static int rtl8168_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; + + spin_lock_irqsave(&tp->lock, flags); + if (features ^ dev->features) + rtl8168_hw_set_features(dev, features); + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} + +#endif + +static void rtl8168_gset_xmii(struct net_device *dev, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + struct ethtool_cmd *cmd +#else + struct ethtool_link_ksettings *cmd +#endif + ) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 status; + u8 autoneg, duplex; + u32 speed = 0; + u16 bmcr, bmsr, anlpar, ctrl1000 = 0, stat1000 = 0; + u32 supported, advertising, lp_advertising; + unsigned long flags; + + supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause; + + advertising = ADVERTISED_TP; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + bmcr = rtl8168_mdio_read(tp, MII_BMCR); + bmsr = rtl8168_mdio_read(tp, MII_BMSR); + anlpar = rtl8168_mdio_read(tp, MII_LPA); + ctrl1000 = rtl8168_mdio_read(tp, MII_CTRL1000); + stat1000 = rtl8168_mdio_read(tp, MII_STAT1000); + spin_unlock_irqrestore(&tp->lock, flags); + + if (bmcr & BMCR_ANENABLE) { + advertising |= ADVERTISED_Autoneg; + autoneg = AUTONEG_ENABLE; + + if (bmsr & BMSR_ANEGCOMPLETE) { + lp_advertising = mii_lpa_to_ethtool_lpa_t(anlpar); + lp_advertising |= + mii_stat1000_to_ethtool_lpa_t(stat1000); + } else { + lp_advertising = 0; + } + + if (tp->phy_auto_nego_reg & ADVERTISE_10HALF) + advertising |= ADVERTISED_10baseT_Half; + if (tp->phy_auto_nego_reg & ADVERTISE_10FULL) + advertising |= ADVERTISED_10baseT_Full; + if (tp->phy_auto_nego_reg & ADVERTISE_100HALF) + advertising |= ADVERTISED_100baseT_Half; + if (tp->phy_auto_nego_reg & ADVERTISE_100FULL) + advertising |= ADVERTISED_100baseT_Full; + if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL) + advertising |= ADVERTISED_1000baseT_Full; + } else { + autoneg = AUTONEG_DISABLE; + lp_advertising = 0; + } + + status = RTL_R8(tp, PHYstatus); + + if (status & LinkStatus) { + /*link on*/ + if (status & _1000bpsF) + speed = SPEED_1000; + else if (status & _100bps) + speed = SPEED_100; + else if (status & _10bps) + speed = SPEED_10; + + if (status & TxFlowCtrl) + advertising |= ADVERTISED_Asym_Pause; + + if (status & RxFlowCtrl) + advertising |= ADVERTISED_Pause; + + duplex = ((status & _1000bpsF) || (status & FullDup)) ? + DUPLEX_FULL : DUPLEX_HALF; + } else { + /*link down*/ + speed = SPEED_UNKNOWN; + duplex = DUPLEX_UNKNOWN; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + cmd->supported = supported; + cmd->advertising = advertising; +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30) + cmd->lp_advertising = lp_advertising; +#endif + cmd->autoneg = autoneg; + cmd->speed = speed; + cmd->duplex = duplex; + cmd->port = PORT_TP; +#else + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); + cmd->base.autoneg = autoneg; + cmd->base.speed = speed; + cmd->base.duplex = duplex; + cmd->base.port = PORT_TP; +#endif +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +static int +rtl8168_get_settings(struct net_device *dev, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + struct ethtool_cmd *cmd +#else + struct ethtool_link_ksettings *cmd +#endif + ) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + tp->get_settings(dev, cmd); + + return 0; +} + +static void rtl8168_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8168_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned int i; + u8 *data = p; + unsigned long flags; + + if (regs->len < R8168_REGS_DUMP_SIZE) + return /* -EINVAL */; + + memset(p, 0, regs->len); + + spin_lock_irqsave(&tp->lock, flags); + for (i = 0; i < R8168_MAC_REGS_SIZE; i++) + *data++ = readb(ioaddr + i); + data = (u8*)p + 256; + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + for (i = 0; i < R8168_PHY_REGS_SIZE/2; i++) { + *(u16*)data = rtl8168_mdio_read(tp, i); + data += 2; + } + data = (u8*)p + 256 * 2; + + for (i = 0; i < R8168_EPHY_REGS_SIZE/2; i++) { + *(u16*)data = rtl8168_ephy_read(tp, i); + data += 2; + } + data = (u8*)p + 256 * 3; + + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + /* RTL8168B does not support Extend GMAC */ + break; + default: + for (i = 0; i < R8168_ERI_REGS_SIZE; i+=4) { + *(u32*)data = rtl8168_eri_read(tp, i , 4, ERIAR_ExGMAC); + data += 4; + } + break; + } + spin_unlock_irqrestore(&tp->lock, flags); +} + +static u32 +rtl8168_get_msglevel(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + return tp->msg_enable; +} + +static void +rtl8168_set_msglevel(struct net_device *dev, + u32 value) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + tp->msg_enable = value; +} + +static const char rtl8168_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; +#endif //#LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +static int rtl8168_get_stats_count(struct net_device *dev) +{ + return ARRAY_SIZE(rtl8168_gstrings); +} +#endif //#LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +#else +static int rtl8168_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8168_gstrings); + default: + return -EOPNOTSUPP; + } +} +#endif + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +static void +rtl8168_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct rtl8168_counters *counters; + dma_addr_t paddr; + u32 cmd; + u32 WaitCnt; + unsigned long flags; + + ASSERT_RTNL(); + + counters = tp->tally_vaddr; + paddr = tp->tally_paddr; + if (!counters) + return; + + spin_lock_irqsave(&tp->lock, flags); + RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | CounterDump); + + WaitCnt = 0; + while (RTL_R32(tp, CounterAddrLow) & CounterDump) { + udelay(10); + + WaitCnt++; + if (WaitCnt > 20) + break; + } + spin_unlock_irqrestore(&tp->lock, flags); + + data[0] = le64_to_cpu(counters->tx_packets); + data[1] = le64_to_cpu(counters->rx_packets); + data[2] = le64_to_cpu(counters->tx_errors); + data[3] = le32_to_cpu(counters->rx_errors); + data[4] = le16_to_cpu(counters->rx_missed); + data[5] = le16_to_cpu(counters->align_errors); + data[6] = le32_to_cpu(counters->tx_one_collision); + data[7] = le32_to_cpu(counters->tx_multi_collision); + data[8] = le64_to_cpu(counters->rx_unicast); + data[9] = le64_to_cpu(counters->rx_broadcast); + data[10] = le32_to_cpu(counters->rx_multicast); + data[11] = le16_to_cpu(counters->tx_aborted); + data[12] = le16_to_cpu(counters->tx_underrun); +} + +static void +rtl8168_get_strings(struct net_device *dev, + u32 stringset, + u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, *rtl8168_gstrings, sizeof(rtl8168_gstrings)); + break; + } +} +#endif //#LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) + +static int rtl_get_eeprom_len(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + return tp->eeprom_len; +} + +static int rtl_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *buf) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int i,j,ret; + int start_w, end_w; + int VPD_addr, VPD_data; + u32 *eeprom_buff; + u16 tmp; + + if (tp->eeprom_type == EEPROM_TYPE_NONE) { + dev_printk(KERN_DEBUG, tp_to_dev(tp), "Detect none EEPROM\n"); + return -EOPNOTSUPP; + } else if (eeprom->len == 0 || (eeprom->offset+eeprom->len) > tp->eeprom_len) { + dev_printk(KERN_DEBUG, tp_to_dev(tp), "Invalid parameter\n"); + return -EINVAL; + } + + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + VPD_addr = 0xCE; + VPD_data = 0xD0; + break; + + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + return -EOPNOTSUPP; + default: + VPD_addr = 0xD2; + VPD_data = 0xD4; + break; + } + + start_w = eeprom->offset >> 2; + end_w = (eeprom->offset + eeprom->len - 1) >> 2; + + eeprom_buff = kmalloc(sizeof(u32)*(end_w - start_w + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + rtl8168_enable_cfg9346_write(tp); + ret = -EFAULT; + for (i=start_w; i<=end_w; i++) { + pci_write_config_word(tp->pci_dev, VPD_addr, (u16)i*4); + ret = -EFAULT; + for (j = 0; j < 10; j++) { + udelay(400); + pci_read_config_word(tp->pci_dev, VPD_addr, &tmp); + if (tmp&0x8000) { + ret = 0; + break; + } + } + + if (ret) + break; + + pci_read_config_dword(tp->pci_dev, VPD_data, &eeprom_buff[i-start_w]); + } + rtl8168_disable_cfg9346_write(tp); + + if (!ret) + memcpy(buf, (u8 *)eeprom_buff + (eeprom->offset & 3), eeprom->len); + + kfree(eeprom_buff); + + return ret; +} + +#undef ethtool_op_get_link +#define ethtool_op_get_link _kc_ethtool_op_get_link +static u32 _kc_ethtool_op_get_link(struct net_device *dev) +{ + return netif_carrier_ok(dev) ? 1 : 0; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) +#undef ethtool_op_get_sg +#define ethtool_op_get_sg _kc_ethtool_op_get_sg +static u32 _kc_ethtool_op_get_sg(struct net_device *dev) +{ +#ifdef NETIF_F_SG + return (dev->features & NETIF_F_SG) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_sg +#define ethtool_op_set_sg _kc_ethtool_op_set_sg +static int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (tp->mcfg == CFG_METHOD_DEFAULT) + return -EOPNOTSUPP; + +#ifdef NETIF_F_SG + if (data) + dev->features |= NETIF_F_SG; + else + dev->features &= ~NETIF_F_SG; +#endif + + return 0; +} +#endif + +static int rtl8168_enable_EEE(struct rtl8168_private *tp) +{ + int ret; + u16 data; + u32 csi_tmp; + + ret = 0; + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0020); + data = rtl8168_mdio_read(tp, 0x15) | 0x0100; + rtl8168_mdio_write(tp, 0x15, data); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + data = rtl8168_mdio_read(tp, 0x06) | 0x2000; + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0006); + rtl8168_mdio_write(tp, 0x00, 0x5A30); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0007); + rtl8168_mdio_write(tp, 0x0E, 0x003C); + rtl8168_mdio_write(tp, 0x0D, 0x4007); + rtl8168_mdio_write(tp, 0x0E, 0x0006); + rtl8168_mdio_write(tp, 0x0D, 0x0000); + if ((RTL_R8(tp, Config4)&0x40) && (RTL_R8(tp, 0x6D) & BIT_7)) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8AC8); + rtl8168_mdio_write(tp, 0x06, RTL_R16(tp, tp->NicCustLedValue)); + rtl8168_mdio_write(tp, 0x05, 0x8B82); + data = rtl8168_mdio_read(tp, 0x06) | 0x0010; + rtl8168_mdio_write(tp, 0x05, 0x8B82); + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + break; + + case CFG_METHOD_16: + case CFG_METHOD_17: + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC) | 0x0003; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + rtl8168_mdio_write(tp,0x1F , 0x0004); + rtl8168_mdio_write(tp,0x1F , 0x0007); + rtl8168_mdio_write(tp,0x1E , 0x0020); + data = rtl8168_mdio_read(tp, 0x15)|0x0100; + rtl8168_mdio_write(tp,0x15 , data); + rtl8168_mdio_write(tp,0x1F , 0x0002); + rtl8168_mdio_write(tp,0x1F , 0x0005); + rtl8168_mdio_write(tp,0x05 , 0x8B85); + data = rtl8168_mdio_read(tp, 0x06)|0x2000; + rtl8168_mdio_write(tp,0x06 , data); + rtl8168_mdio_write(tp,0x1F , 0x0000); + rtl8168_mdio_write(tp,0x0D , 0x0007); + rtl8168_mdio_write(tp,0x0E , 0x003C); + rtl8168_mdio_write(tp,0x0D , 0x4007); + rtl8168_mdio_write(tp,0x0E , 0x0006); + rtl8168_mdio_write(tp,0x0D , 0x0000); + break; + + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp |= BIT_1 | BIT_0; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0020); + data = rtl8168_mdio_read(tp, 0x15); + data |= BIT_8; + rtl8168_mdio_write(tp, 0x15, data); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + data = rtl8168_mdio_read(tp, 0x06); + data |= BIT_13; + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0007); + rtl8168_mdio_write(tp, 0x0E, 0x003C); + rtl8168_mdio_write(tp, 0x0D, 0x4007); + rtl8168_mdio_write(tp, 0x0E, 0x0006); + rtl8168_mdio_write(tp, 0x0D, 0x0000); + break; + + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp |= BIT_1 | BIT_0; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x11); + rtl8168_mdio_write(tp, 0x11, data | BIT_4); + rtl8168_mdio_write(tp, 0x1F, 0x0A5D); + rtl8168_mdio_write(tp, 0x10, tp->eee_adv_t); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + + default: +// dev_printk(KERN_DEBUG, tp_to_dev(tp), "Not Support EEE\n"); + ret = -EOPNOTSUPP; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mdio_write(tp, 0x1F, 0x0A4A); + rtl8168_set_eth_phy_bit(tp, 0x11, BIT_9); + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_7); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + } + + /*Advanced EEE*/ + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_oob_mutex_lock(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_set_phy_mcu_patch_request(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_25: + rtl8168_eri_write(tp, 0x1EA, 1, 0xFA, ERIAR_ExGMAC); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x10); + if (data & BIT_10) { + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + data = rtl8168_mdio_read(tp, 0x16); + data &= ~(BIT_1); + rtl8168_mdio_write(tp, 0x16, data); + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + data = rtl8168_mdio_read(tp, 0x16); + data |= BIT_1; + rtl8168_mdio_write(tp, 0x16, data); + } + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_26: + data = rtl8168_mac_ocp_read(tp, 0xE052); + data |= BIT_0; + rtl8168_mac_ocp_write(tp, 0xE052, data); + data = rtl8168_mac_ocp_read(tp, 0xE056); + data &= 0xFF0F; + data |= (BIT_4 | BIT_5 | BIT_6); + rtl8168_mac_ocp_write(tp, 0xE056, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x10); + if (data & BIT_10) { + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + data = rtl8168_mdio_read(tp, 0x16); + data &= ~(BIT_1); + rtl8168_mdio_write(tp, 0x16, data); + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + data = rtl8168_mdio_read(tp, 0x16); + data |= BIT_1; + rtl8168_mdio_write(tp, 0x16, data); + } + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_27: + case CFG_METHOD_28: + data = rtl8168_mac_ocp_read(tp, 0xE052); + data &= ~BIT_0; + rtl8168_mac_ocp_write(tp, 0xE052, data); + data = rtl8168_mac_ocp_read(tp, 0xE056); + data &= 0xFF0F; + data |= (BIT_4 | BIT_5 | BIT_6); + rtl8168_mac_ocp_write(tp, 0xE056, data); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + data = rtl8168_mac_ocp_read(tp, 0xE052); + data &= ~(BIT_0); + rtl8168_mac_ocp_write(tp, 0xE052, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x10) | BIT_15; + rtl8168_mdio_write(tp, 0x10, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + data = rtl8168_mdio_read(tp, 0x11) | BIT_13 | BIT_14; + data &= ~(BIT_12); + rtl8168_mdio_write(tp, 0x11, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + /* + data = rtl8168_mac_ocp_read(tp, 0xE052); + data |= BIT_0; + rtl8168_mac_ocp_write(tp, 0xE052, data); + */ + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x10) | BIT_15; + rtl8168_mdio_write(tp, 0x10, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + data = rtl8168_mdio_read(tp, 0x11) | BIT_13 | BIT_14; + data &= ~(BIT_12); + rtl8168_mdio_write(tp, 0x11, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_clear_phy_mcu_patch_request(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_oob_mutex_unlock(tp); + break; + } + + return ret; +} + +static int rtl8168_disable_EEE(struct rtl8168_private *tp) +{ + int ret; + u16 data; + u32 csi_tmp; + + ret = 0; + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + data = rtl8168_mdio_read(tp, 0x06) & ~0x2000; + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0020); + data = rtl8168_mdio_read(tp, 0x15) & ~0x0100; + rtl8168_mdio_write(tp, 0x15, data); + rtl8168_mdio_write(tp, 0x1F, 0x0006); + rtl8168_mdio_write(tp, 0x00, 0x5A00); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0007); + rtl8168_mdio_write(tp, 0x0E, 0x003C); + rtl8168_mdio_write(tp, 0x0D, 0x4007); + rtl8168_mdio_write(tp, 0x0E, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + if (RTL_R8(tp, Config4) & 0x40) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B82); + data = rtl8168_mdio_read(tp, 0x06) & ~0x0010; + rtl8168_mdio_write(tp, 0x05, 0x8B82); + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + break; + + case CFG_METHOD_16: + case CFG_METHOD_17: + csi_tmp = rtl8168_eri_read(tp, 0x1B0,4, ERIAR_ExGMAC)& ~0x0003; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + data = rtl8168_mdio_read(tp, 0x06) & ~0x2000; + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0004); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0020); + data = rtl8168_mdio_read(tp, 0x15) & ~0x0100; + rtl8168_mdio_write(tp,0x15 , data); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0007); + rtl8168_mdio_write(tp, 0x0E, 0x003C); + rtl8168_mdio_write(tp, 0x0D, 0x4007); + rtl8168_mdio_write(tp, 0x0E, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_1 | BIT_0); + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + data = rtl8168_mdio_read(tp, 0x06); + data &= ~BIT_13; + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0020); + data = rtl8168_mdio_read(tp, 0x15); + data &= ~BIT_8; + rtl8168_mdio_write(tp, 0x15, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0007); + rtl8168_mdio_write(tp, 0x0E, 0x003C); + rtl8168_mdio_write(tp, 0x0D, 0x4007); + rtl8168_mdio_write(tp, 0x0E, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_1 | BIT_0); + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x11); + rtl8168_mdio_write(tp, 0x11, data & ~BIT_4); + rtl8168_mdio_write(tp, 0x1F, 0x0A5D); + rtl8168_mdio_write(tp, 0x10, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + + default: +// dev_printk(KERN_DEBUG, tp_to_dev(tp), "Not Support EEE\n"); + ret = -EOPNOTSUPP; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + rtl8168_clear_eth_phy_bit(tp, 0x14, BIT_7); + rtl8168_mdio_write(tp, 0x1F, 0x0A4A); + rtl8168_clear_eth_phy_bit(tp, 0x11, BIT_9); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + } + + /*Advanced EEE*/ + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_oob_mutex_lock(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_set_phy_mcu_patch_request(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_25: + rtl8168_eri_write(tp, 0x1EA, 1, 0x00, ERIAR_ExGMAC); + + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + data = rtl8168_mdio_read(tp, 0x16); + data &= ~(BIT_1); + rtl8168_mdio_write(tp, 0x16, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_26: + data = rtl8168_mac_ocp_read(tp, 0xE052); + data &= ~(BIT_0); + rtl8168_mac_ocp_write(tp, 0xE052, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + data = rtl8168_mdio_read(tp, 0x16); + data &= ~(BIT_1); + rtl8168_mdio_write(tp, 0x16, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_27: + case CFG_METHOD_28: + data = rtl8168_mac_ocp_read(tp, 0xE052); + data &= ~(BIT_0); + rtl8168_mac_ocp_write(tp, 0xE052, data); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + data = rtl8168_mac_ocp_read(tp, 0xE052); + data &= ~(BIT_0); + rtl8168_mac_ocp_write(tp, 0xE052, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x10) & ~(BIT_15); + rtl8168_mdio_write(tp, 0x10, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + data = rtl8168_mdio_read(tp, 0x11) & ~(BIT_12 | BIT_13 | BIT_14); + rtl8168_mdio_write(tp, 0x11, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_clear_phy_mcu_patch_request(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_oob_mutex_unlock(tp); + break; + } + + return ret; +} + +static int rtl_nway_reset(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + int ret, bmcr; + + spin_lock_irqsave(&tp->lock, flags); + + if (unlikely(tp->rtk_enable_diag)) { + spin_unlock_irqrestore(&tp->lock, flags); + return -EBUSY; + } + + /* if autoneg is off, it's an error */ + rtl8168_mdio_write(tp, 0x1F, 0x0000); + bmcr = rtl8168_mdio_read(tp, MII_BMCR); + + if (bmcr & BMCR_ANENABLE) { + bmcr |= BMCR_ANRESTART; + rtl8168_mdio_write(tp, MII_BMCR, bmcr); + ret = 0; + } else { + ret = -EINVAL; + } + + spin_unlock_irqrestore(&tp->lock, flags); + + return ret; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) +static int +rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *eee) +{ + struct rtl8168_private *tp = netdev_priv(net); + u32 lp, adv, supported = 0; + unsigned long flags; + u16 val; + + switch (tp->mcfg) { + case CFG_METHOD_21 ... CFG_METHOD_33: + break; + default: + return -EOPNOTSUPP; + } + + spin_lock_irqsave(&tp->lock, flags); + + if (unlikely(tp->rtk_enable_diag)) { + spin_unlock_irqrestore(&tp->lock, flags); + return -EBUSY; + } + + rtl8168_mdio_write(tp, 0x1F, 0x0A5C); + val = rtl8168_mdio_read(tp, 0x12); + supported = mmd_eee_cap_to_ethtool_sup_t(val); + + rtl8168_mdio_write(tp, 0x1F, 0x0A5D); + val = rtl8168_mdio_read(tp, 0x10); + adv = mmd_eee_adv_to_ethtool_adv_t(val); + + val = rtl8168_mdio_read(tp, 0x11); + lp = mmd_eee_adv_to_ethtool_adv_t(val); + + val = rtl8168_eri_read(tp, 0x1B0, 2, ERIAR_ExGMAC); + val &= BIT_1 | BIT_0; + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + spin_unlock_irqrestore(&tp->lock, flags); + + eee->eee_enabled = !!val; + eee->eee_active = !!(supported & adv & lp); + eee->supported = supported; + eee->advertised = adv; + eee->lp_advertised = lp; + + return 0; +} + +static int +rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *eee) +{ + struct rtl8168_private *tp = netdev_priv(net); + unsigned long flags; + + switch (tp->mcfg) { + case CFG_METHOD_21 ... CFG_METHOD_33: + break; + default: + return -EOPNOTSUPP; + } + + if (HW_SUPP_SERDES_PHY(tp) || + !HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp) || + tp->DASH) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + + if (unlikely(tp->rtk_enable_diag)) { + spin_unlock_irqrestore(&tp->lock, flags); + return -EBUSY; + } + + tp->eee_enabled = eee->eee_enabled; + tp->eee_adv_t = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); + + if (tp->eee_enabled) + rtl8168_enable_EEE(tp); + else + rtl8168_disable_EEE(tp); + + spin_unlock_irqrestore(&tp->lock, flags); + + rtl_nway_reset(net); + + return 0; +} +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +static const struct ethtool_ops rtl8168_ethtool_ops = { + .get_drvinfo = rtl8168_get_drvinfo, + .get_regs_len = rtl8168_get_regs_len, + .get_link = ethtool_op_get_link, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + .get_settings = rtl8168_get_settings, + .set_settings = rtl8168_set_settings, +#else + .get_link_ksettings = rtl8168_get_settings, + .set_link_ksettings = rtl8168_set_settings, +#endif + .get_msglevel = rtl8168_get_msglevel, + .set_msglevel = rtl8168_set_msglevel, +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) + .get_rx_csum = rtl8168_get_rx_csum, + .set_rx_csum = rtl8168_set_rx_csum, + .get_tx_csum = rtl8168_get_tx_csum, + .set_tx_csum = rtl8168_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = ethtool_op_set_tso, +#endif +#endif + .get_regs = rtl8168_get_regs, + .get_wol = rtl8168_get_wol, + .set_wol = rtl8168_set_wol, + .get_strings = rtl8168_get_strings, +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) + .get_stats_count = rtl8168_get_stats_count, +#else + .get_sset_count = rtl8168_get_sset_count, +#endif + .get_ethtool_stats = rtl8168_get_ethtool_stats, +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) +#ifdef ETHTOOL_GPERMADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) + .get_eeprom = rtl_get_eeprom, + .get_eeprom_len = rtl_get_eeprom_len, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + .get_ts_info = ethtool_op_get_ts_info, +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) + .get_eee = rtl_ethtool_get_eee, + .set_eee = rtl_ethtool_set_eee, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ + .nway_reset = rtl_nway_reset, +}; +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) + +#if 0 + +static int rtl8168_enable_green_feature(struct rtl8168_private *tp) +{ + u16 gphy_val; + unsigned long flags; + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + rtl8168_mdio_write(tp, 0x1F, 0x0003); + gphy_val = rtl8168_mdio_read(tp, 0x10) | 0x0400; + rtl8168_mdio_write(tp, 0x10, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x19) | 0x0001; + rtl8168_mdio_write(tp, 0x19, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + gphy_val = rtl8168_mdio_read(tp, 0x01) & ~0x0100; + rtl8168_mdio_write(tp, 0x01, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + mdelay(20); + break; + + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + rtl8168_mdio_write(tp, 0x1f, 0x0003); + gphy_val = rtl8168_mdio_read(tp, 0x10); + gphy_val |= BIT_10; + rtl8168_mdio_write(tp, 0x10, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x19); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x19, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + gphy_val = rtl8168_mdio_read(tp, 0x01); + gphy_val |= BIT_8; + rtl8168_mdio_write(tp, 0x01, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + break; + case CFG_METHOD_21: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_set_eth_phy_bit( tp, 0x14, BIT_14 ); + rtl8168_mdio_write(tp, 0x1F, 0x0A40); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8045); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0x804d); + rtl8168_mdio_write(tp, 0x14, 0x1222); + rtl8168_mdio_write(tp, 0x13, 0x805d); + rtl8168_mdio_write(tp, 0x14, 0x0022); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_set_eth_phy_bit( tp, 0x14, BIT_15 ); + rtl8168_mdio_write(tp, 0x1F, 0x0A40); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + break; + default: + dev_printk(KERN_DEBUG, tp_to_dev(tp), "Not Support Green Feature\n"); + break; + } + + return 0; +} + +static int rtl8168_disable_green_feature(struct rtl8168_private *tp) +{ + u16 gphy_val; + unsigned long flags; + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + rtl8168_mdio_write(tp, 0x1F, 0x0005); + gphy_val = rtl8168_mdio_read(tp, 0x01) | 0x0100; + rtl8168_mdio_write(tp, 0x01, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + gphy_val = rtl8168_mdio_read(tp, 0x10) & ~0x0400; + rtl8168_mdio_write(tp, 0x10, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x19) & ~0x0001; + rtl8168_mdio_write(tp, 0x19, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x06) & ~0x7000; + gphy_val |= 0x3000; + rtl8168_mdio_write(tp, 0x06, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x0D) & 0x0700; + gphy_val |= 0x0500; + rtl8168_mdio_write(tp, 0x0D, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + rtl8168_mdio_write(tp, 0x1f, 0x0003); + gphy_val = rtl8168_mdio_read(tp, 0x19); + gphy_val &= ~BIT_0; + rtl8168_mdio_write(tp, 0x19, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x10); + gphy_val &= ~BIT_10; + rtl8168_mdio_write(tp, 0x10, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + break; + case CFG_METHOD_21: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_clear_eth_phy_bit( tp, 0x14, BIT_14 ); + rtl8168_mdio_write(tp, 0x1F, 0x0A40); + rtl8168_mdio_write(tp, 0x00, 0x9200); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8045); + rtl8168_mdio_write(tp, 0x14, 0x2444); + rtl8168_mdio_write(tp, 0x13, 0x804d); + rtl8168_mdio_write(tp, 0x14, 0x2444); + rtl8168_mdio_write(tp, 0x13, 0x805d); + rtl8168_mdio_write(tp, 0x14, 0x2444); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_set_eth_phy_bit( tp, 0x14, BIT_15 ); + rtl8168_mdio_write(tp, 0x1F, 0x0A40); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + break; + default: + dev_printk(KERN_DEBUG, tp_to_dev(tp), "Not Support Green Feature\n"); + break; + } + + return 0; +} + +#endif + +static void rtl8168_get_mac_version(struct rtl8168_private *tp) +{ + u32 reg,val32; + u32 ICVerID; + + val32 = RTL_R32(tp, TxConfig); + reg = val32 & 0x7c800000; + ICVerID = val32 & 0x00700000; + + switch (reg) { + case 0x30000000: + tp->mcfg = CFG_METHOD_1; + tp->efuse_ver = EFUSE_NOT_SUPPORT; + break; + case 0x38000000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_2; + } else if (ICVerID == 0x00500000) { + tp->mcfg = CFG_METHOD_3; + } else { + tp->mcfg = CFG_METHOD_3; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_NOT_SUPPORT; + break; + case 0x3C000000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_4; + } else if (ICVerID == 0x00200000) { + tp->mcfg = CFG_METHOD_5; + } else if (ICVerID == 0x00400000) { + tp->mcfg = CFG_METHOD_6; + } else { + tp->mcfg = CFG_METHOD_6; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_NOT_SUPPORT; + break; + case 0x3C800000: + if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_7; + } else if (ICVerID == 0x00300000) { + tp->mcfg = CFG_METHOD_8; + } else { + tp->mcfg = CFG_METHOD_8; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_NOT_SUPPORT; + break; + case 0x28000000: + if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_9; + } else if (ICVerID == 0x00300000) { + tp->mcfg = CFG_METHOD_10; + } else { + tp->mcfg = CFG_METHOD_10; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V1; + break; + case 0x28800000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_11; + } else if (ICVerID == 0x00200000) { + tp->mcfg = CFG_METHOD_12; + RTL_W32(tp, 0xD0, RTL_R32(tp, 0xD0) | 0x00020000); + } else if (ICVerID == 0x00300000) { + tp->mcfg = CFG_METHOD_13; + } else { + tp->mcfg = CFG_METHOD_13; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V1; + break; + case 0x2C000000: + if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_14; + } else if (ICVerID == 0x00200000) { + tp->mcfg = CFG_METHOD_15; + } else { + tp->mcfg = CFG_METHOD_15; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V2; + break; + case 0x2C800000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_16; + } else if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_17; + } else { + tp->mcfg = CFG_METHOD_17; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x48000000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_18; + } else if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_19; + } else { + tp->mcfg = CFG_METHOD_19; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x48800000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_20; + } else { + tp->mcfg = CFG_METHOD_20; + tp->HwIcVerUnknown = TRUE; + } + + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x4C000000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_21; + } else if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_22; + } else { + tp->mcfg = CFG_METHOD_22; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x50000000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_23; + } else if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_27; + } else if (ICVerID == 0x00200000) { + tp->mcfg = CFG_METHOD_28; + } else { + tp->mcfg = CFG_METHOD_28; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x50800000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_24; + } else if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_25; + } else { + tp->mcfg = CFG_METHOD_25; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x5C800000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_26; + } else { + tp->mcfg = CFG_METHOD_26; + tp->HwIcVerUnknown = TRUE; + } + + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x54000000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_29; + } else if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_30; + } else { + tp->mcfg = CFG_METHOD_30; + tp->HwIcVerUnknown = TRUE; + } + + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x54800000: + if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_31; + } else if (ICVerID == 0x00200000) { + tp->mcfg = CFG_METHOD_32; + } else if (ICVerID == 0x00300000) { + tp->mcfg = CFG_METHOD_33; + } else { + tp->mcfg = CFG_METHOD_33; + tp->HwIcVerUnknown = TRUE; + } + + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + default: + printk("unknown chip version (%x)\n",reg); + tp->mcfg = CFG_METHOD_DEFAULT; + tp->HwIcVerUnknown = TRUE; + tp->efuse_ver = EFUSE_NOT_SUPPORT; + break; + } +} + +static void +rtl8168_print_mac_version(struct rtl8168_private *tp) +{ + int i; + for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) { + if (tp->mcfg == rtl_chip_info[i].mcfg) { + dprintk("Realtek PCIe GbE Family Controller mcfg = %04d\n", + rtl_chip_info[i].mcfg); + return; + } + } + + dprintk("mac_version == Unknown\n"); +} + +static u8 rtl8168_calc_efuse_dummy_bit(u16 reg) +{ + int s,a,b; + u8 dummyBitPos = 0; + + + s=reg% 32; + a=s % 16; + b=s/16; + + if (s/16) { + dummyBitPos = (u8)(16-a); + } else { + dummyBitPos = (u8)a; + } + + return dummyBitPos; +} + +static u32 rtl8168_decode_efuse_cmd(struct rtl8168_private *tp, u32 DwCmd) +{ + u16 reg = (u16)((DwCmd & 0x00FE0000) >> 17); + u32 DummyPos = rtl8168_calc_efuse_dummy_bit(reg); + u32 DeCodeDwCmd = DwCmd; + u32 Dw17BitData; + + + if (tp->efuse_ver < 3) { + DeCodeDwCmd = (DwCmd>>(DummyPos+1))< 0) { + DeCodeDwCmd |= ((DwCmd<<(32-DummyPos))>>(32-DummyPos)); + } + } else { + reg = (u16)((DwCmd & 0x007F0000) >> 16); + DummyPos = rtl8168_calc_efuse_dummy_bit(reg); + Dw17BitData = ((DwCmd & BIT_23) >> 23); + Dw17BitData <<= 16; + Dw17BitData |= (DwCmd & 0x0000FFFF); + DeCodeDwCmd = (Dw17BitData>>(DummyPos+1))< 0) { + DeCodeDwCmd |= ((Dw17BitData<<(32-DummyPos))>>(32-DummyPos)); + } + } + + return DeCodeDwCmd; +} + +static u8 rtl8168_efuse_read(struct rtl8168_private *tp, u16 reg) +{ + u8 efuse_data = 0; + u32 temp; + int cnt; + + if (tp->efuse_ver == EFUSE_NOT_SUPPORT) + return EFUSE_READ_FAIL; + + if (tp->efuse_ver == EFUSE_SUPPORT_V1) { + temp = EFUSE_READ | ((reg & EFUSE_Reg_Mask) << EFUSE_Reg_Shift); + RTL_W32(tp, EFUSEAR, temp); + + cnt = 0; + do { + udelay(100); + temp = RTL_R32(tp, EFUSEAR); + cnt++; + } while (!(temp & EFUSE_READ_OK) && (cnt < EFUSE_Check_Cnt)); + + if (cnt == EFUSE_Check_Cnt) + efuse_data = EFUSE_READ_FAIL; + else + efuse_data = (u8)(RTL_R32(tp, EFUSEAR) & EFUSE_Data_Mask); + } else if (tp->efuse_ver == EFUSE_SUPPORT_V2) { + temp = (reg/2) & 0x03ff; + temp <<= 17; + temp |= EFUSE_READ; + RTL_W32(tp, EFUSEAR, temp); + + cnt = 0; + do { + udelay(100); + temp = RTL_R32(tp, EFUSEAR); + cnt++; + } while (!(temp & EFUSE_READ_OK) && (cnt < EFUSE_Check_Cnt)); + + if (cnt == EFUSE_Check_Cnt) { + efuse_data = EFUSE_READ_FAIL; + } else { + temp = RTL_R32(tp, EFUSEAR); + temp = rtl8168_decode_efuse_cmd(tp, temp); + + if (reg%2) { + temp >>= 8; + efuse_data = (u8)temp; + } else { + efuse_data = (u8)temp; + } + } + } else if (tp->efuse_ver == EFUSE_SUPPORT_V3) { + temp = (reg/2) & 0x03ff; + temp <<= 16; + temp |= EFUSE_READ_V3; + RTL_W32(tp, EFUSEAR, temp); + + cnt = 0; + do { + udelay(100); + temp = RTL_R32(tp, EFUSEAR); + cnt++; + } while ((temp & BIT_31) && (cnt < EFUSE_Check_Cnt)); + + if (cnt == EFUSE_Check_Cnt) { + efuse_data = EFUSE_READ_FAIL; + } else { + temp = RTL_R32(tp, EFUSEAR); + temp = rtl8168_decode_efuse_cmd(tp, temp); + + if (reg%2) { + temp >>= 8; + efuse_data = (u8)temp; + } else { + efuse_data = (u8)temp; + } + } + } + + udelay(20); + + return efuse_data; +} + +static void +rtl8168_tally_counter_addr_fill(struct rtl8168_private *tp) +{ + if (!tp->tally_paddr) + return; + + RTL_W32(tp, CounterAddrHigh, (u64)tp->tally_paddr >> 32); + RTL_W32(tp, CounterAddrLow, (u64)tp->tally_paddr & (DMA_BIT_MASK(32))); +} + +static void +rtl8168_tally_counter_clear(struct rtl8168_private *tp) +{ + if (tp->mcfg == CFG_METHOD_1 || tp->mcfg == CFG_METHOD_2 || + tp->mcfg == CFG_METHOD_3 ) + return; + + if (!tp->tally_paddr) + return; + + RTL_W32(tp, CounterAddrHigh, (u64)tp->tally_paddr >> 32); + RTL_W32(tp, CounterAddrLow, ((u64)tp->tally_paddr & (DMA_BIT_MASK(32))) | CounterReset); +} + +static +u16 +rtl8168_get_phy_state(struct rtl8168_private *tp) +{ + u16 PhyState = 0xFF; + + if (HW_SUPPORT_UPS_MODE(tp) == FALSE) goto exit; + + switch (tp->HwSuppUpsVer) { + case 1: + PhyState = rtl8168_mdio_read_phy_ocp(tp, 0x0A42, 0x10); + PhyState &= 0x7; //bit[2:0] + break; + } + +exit: + return PhyState; +} + +static +bool +rtl8168_wait_phy_state_ready(struct rtl8168_private *tp, + u16 PhyState, + u32 MicroSecondTimeout + ) +{ + u16 TmpPhyState; + u32 WaitCount; + u32 i = 0; + bool PhyStateReady = TRUE; + + if (HW_SUPPORT_UPS_MODE(tp) == FALSE) goto exit; + + WaitCount = MicroSecondTimeout / 1000; + if (WaitCount == 0) WaitCount = 100; + + do { + TmpPhyState = rtl8168_get_phy_state(tp); + mdelay(1); + i++; + } while ((i < WaitCount) && (TmpPhyState != PhyState)); + + PhyStateReady = (i == WaitCount && TmpPhyState != PhyState) ? FALSE : TRUE; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) + WARN_ON_ONCE(i == WaitCount); +#endif + +exit: + return PhyStateReady; +} + +static +bool +rtl8168_test_phy_ocp(struct rtl8168_private *tp) +{ + bool RestorePhyOcpReg = FALSE; + + if (tp->TestPhyOcpReg == FALSE) goto exit; + + if (tp->HwSuppEsdVer == 2) { + u16 PhyRegValue; + u8 ResetPhyType = 0; + + if (HW_PHY_STATUS_INI == rtl8168_get_phy_state(tp)) { + ResetPhyType = 1; + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0C40); + PhyRegValue = rtl8168_mdio_read(tp, 0x12); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + if ((PhyRegValue & 0x03) != 0x00) { + ResetPhyType = 2; + } + } + + if (ResetPhyType > 0) { + u32 WaitCnt; + struct net_device *dev = tp->dev; + + printk(KERN_ERR "%s: test_phy_ocp ResetPhyType = 0x%02x\n.\n", dev->name, ResetPhyType); + + rtl8168_mdio_write(tp, 0x1F, 0x0C41); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + mdelay(24); //24ms + + rtl8168_mdio_write(tp, 0x1F, 0x0C40); + PhyRegValue = rtl8168_mdio_read(tp, 0x12); + if ((PhyRegValue & 0x03) != 0x00) { + WaitCnt = 0; + while ((PhyRegValue & 0x03) != 0x00 && WaitCnt < 5) { + rtl8168_mdio_write(tp, 0x1F, 0x0C40); + rtl8168_set_eth_phy_bit(tp, 0x11, (BIT_15 | BIT_14)); + rtl8168_clear_eth_phy_bit(tp, 0x11, (BIT_15 | BIT_14)); + mdelay(100); + rtl8168_mdio_write(tp, 0x1F, 0x0C40); + PhyRegValue = rtl8168_mdio_read(tp, 0x12); + WaitCnt++; + } + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A46); + rtl8168_mdio_write(tp, 0x10, tp->BackupPhyFuseDout_15_0); + rtl8168_mdio_write(tp, 0x12, tp->BackupPhyFuseDout_47_32); + rtl8168_mdio_write(tp, 0x13, tp->BackupPhyFuseDout_63_48); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_wait_phy_state_ready(tp, HW_PHY_STATUS_INI, 5000000); + rtl8168_mdio_write(tp, 0x1F, 0x0A46); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_wait_phy_state_ready(tp, HW_PHY_STATUS_LAN_ON, 500000); + + tp->HwHasWrRamCodeToMicroP = FALSE; + + RestorePhyOcpReg = TRUE; + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + +exit: + return RestorePhyOcpReg; +} + +static int +rtl8168_is_ups_resume(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + return (rtl8168_mac_ocp_read(tp, 0xD408) & BIT_0); +} + +static void +rtl8168_clear_ups_resume_bit(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + rtl8168_mac_ocp_write(tp, 0xD408, rtl8168_mac_ocp_read(tp, 0xD408) & ~(BIT_0)); +} + +static void +rtl8168_wait_phy_ups_resume(struct net_device *dev, u16 PhyState) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 TmpPhyState; + int i = 0; + + do { + TmpPhyState = rtl8168_mdio_read_phy_ocp(tp, 0x0A42, 0x10); + TmpPhyState &= 0x7; + mdelay(1); + i++; + } while ((i < 100) && (TmpPhyState != PhyState)); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) + WARN_ON_ONCE(i == 100); +#endif +} + +void +rtl8168_enable_now_is_oob(struct rtl8168_private *tp) +{ + if ( tp->HwSuppNowIsOobVer == 1 ) { + RTL_W8(tp, MCUCmd_reg, RTL_R8(tp, MCUCmd_reg) | Now_is_oob); + } +} + +void +rtl8168_disable_now_is_oob(struct rtl8168_private *tp) +{ + if ( tp->HwSuppNowIsOobVer == 1 ) { + RTL_W8(tp, MCUCmd_reg, RTL_R8(tp, MCUCmd_reg) & ~Now_is_oob); + } +} + +static void +rtl8168_switch_to_sgmii_mode( + struct rtl8168_private *tp +) +{ + if (FALSE == HW_SUPP_SERDES_PHY(tp)) return; + + switch (tp->HwSuppSerDesPhyVer) { + case 1: + rtl8168_mac_ocp_write(tp, 0xEB00, 0x2); + rtl8168_set_mcu_ocp_bit(tp, 0xEB16, BIT_1); + break; + } +} + +static void +rtl8168_exit_oob(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 data16; + + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~(AcceptErr | AcceptRunt | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys)); + + if (HW_SUPP_SERDES_PHY(tp)) { + if (tp->HwSuppSerDesPhyVer == 1) { + rtl8168_switch_to_sgmii_mode(tp); + } + } + + if (HW_DASH_SUPPORT_DASH(tp)) { + rtl8168_driver_start(tp); + rtl8168_dash2_disable_txrx(dev); +#ifdef ENABLE_DASH_SUPPORT + DashHwInit(dev); +#endif + } + + //Disable realwow function + switch (tp->mcfg) { + case CFG_METHOD_18: + case CFG_METHOD_19: + RTL_W32(tp, MACOCP, 0xE5A90000); + RTL_W32(tp, MACOCP, 0xF2100010); + break; + case CFG_METHOD_20: + RTL_W32(tp, MACOCP, 0xE5A90000); + RTL_W32(tp, MACOCP, 0xE4640000); + RTL_W32(tp, MACOCP, 0xF2100010); + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + RTL_W32(tp, MACOCP, 0x605E0000); + RTL_W32(tp, MACOCP, (0xE05E << 16) | (RTL_R32(tp, MACOCP) & 0xFFFE)); + RTL_W32(tp, MACOCP, 0xE9720000); + RTL_W32(tp, MACOCP, 0xF2140010); + break; + case CFG_METHOD_26: + RTL_W32(tp, MACOCP, 0xE05E00FF); + RTL_W32(tp, MACOCP, 0xE9720000); + rtl8168_mac_ocp_write(tp, 0xE428, 0x0010); + break; + } + +#ifdef ENABLE_REALWOW_SUPPORT + rtl8168_realwow_hw_init(dev); +#else + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + rtl8168_eri_write(tp, 0x174, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_mac_ocp_write(tp, 0xE428, 0x0010); + break; + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_eri_write(tp, 0x174, 2, 0x00FF, ERIAR_ExGMAC); + rtl8168_mac_ocp_write(tp, 0xE428, 0x0010); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: { + u32 csi_tmp; + csi_tmp = rtl8168_eri_read(tp, 0x174, 2, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_8); + csi_tmp |= (BIT_15); + rtl8168_eri_write(tp, 0x174, 2, csi_tmp, ERIAR_ExGMAC); + rtl8168_mac_ocp_write(tp, 0xE428, 0x0010); + } + break; + } +#endif //ENABLE_REALWOW_SUPPORT + + rtl8168_nic_reset(dev); + + switch (tp->mcfg) { + case CFG_METHOD_20: + rtl8168_wait_ll_share_fifo_ready(dev); + + data16 = rtl8168_mac_ocp_read(tp, 0xD4DE) | BIT_15; + rtl8168_mac_ocp_write(tp, 0xD4DE, data16); + + rtl8168_wait_ll_share_fifo_ready(dev); + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_disable_now_is_oob(tp); + + data16 = rtl8168_mac_ocp_read(tp, 0xE8DE) & ~BIT_14; + rtl8168_mac_ocp_write(tp, 0xE8DE, data16); + rtl8168_wait_ll_share_fifo_ready(dev); + + data16 = rtl8168_mac_ocp_read(tp, 0xE8DE) | BIT_15; + rtl8168_mac_ocp_write(tp, 0xE8DE, data16); + + rtl8168_wait_ll_share_fifo_ready(dev); + break; + } + + //wait ups resume (phy state 2) + if (HW_SUPPORT_UPS_MODE(tp)) + if (rtl8168_is_ups_resume(dev)) { + rtl8168_wait_phy_ups_resume(dev, HW_PHY_STATUS_EXT_INI); + rtl8168_clear_ups_resume_bit(dev); + } + +#ifdef ENABLE_FIBER_SUPPORT + if (HW_FIBER_MODE_ENABLED(tp)) + rtl8168_hw_init_fiber_nic(dev); +#endif //ENABLE_FIBER_SUPPORT +} + +void +rtl8168_hw_disable_mac_mcu_bps(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (tp->HwSuppAspmClkIntrLock) { + rtl8168_enable_cfg9346_write(tp); + rtl8168_hw_aspm_clkreq_enable(tp, false); + rtl8168_disable_cfg9346_write(tp); + } + + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mac_ocp_write(tp, 0xFC38, 0x0000); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mac_ocp_write(tp, 0xFC28, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC30, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC32, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC34, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC36, 0x0000); + mdelay(3); + rtl8168_mac_ocp_write(tp, 0xFC26, 0x0000); + break; + } +} + +#ifndef ENABLE_USE_FIRMWARE_FILE +static void +rtl8168_set_mac_mcu_8168g_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + rtl8168_mac_ocp_write(tp, 0xE43C, 0x0000); + rtl8168_mac_ocp_write(tp, 0xE43E, 0x0000); + + rtl8168_mac_ocp_write(tp, 0xE434, 0x0004); + rtl8168_mac_ocp_write(tp, 0xE43C, 0x0004); + + rtl8168_hw_disable_mac_mcu_bps(dev); + + rtl8168_mac_ocp_write( tp, 0xF800, 0xE008 ); + rtl8168_mac_ocp_write( tp, 0xF802, 0xE01B ); + rtl8168_mac_ocp_write( tp, 0xF804, 0xE022 ); + rtl8168_mac_ocp_write( tp, 0xF806, 0xE094 ); + rtl8168_mac_ocp_write( tp, 0xF808, 0xE097 ); + rtl8168_mac_ocp_write( tp, 0xF80A, 0xE09A ); + rtl8168_mac_ocp_write( tp, 0xF80C, 0xE0B3 ); + rtl8168_mac_ocp_write( tp, 0xF80E, 0xE0BA ); + rtl8168_mac_ocp_write( tp, 0xF810, 0x49D2 ); + rtl8168_mac_ocp_write( tp, 0xF812, 0xF10D ); + rtl8168_mac_ocp_write( tp, 0xF814, 0x766C ); + rtl8168_mac_ocp_write( tp, 0xF816, 0x49E2 ); + rtl8168_mac_ocp_write( tp, 0xF818, 0xF00A ); + rtl8168_mac_ocp_write( tp, 0xF81A, 0x1EC0 ); + rtl8168_mac_ocp_write( tp, 0xF81C, 0x8EE1 ); + rtl8168_mac_ocp_write( tp, 0xF81E, 0xC60A ); + rtl8168_mac_ocp_write( tp, 0xF820, 0x77C0 ); + rtl8168_mac_ocp_write( tp, 0xF822, 0x4870 ); + rtl8168_mac_ocp_write( tp, 0xF824, 0x9FC0 ); + rtl8168_mac_ocp_write( tp, 0xF826, 0x1EA0 ); + rtl8168_mac_ocp_write( tp, 0xF828, 0xC707 ); + rtl8168_mac_ocp_write( tp, 0xF82A, 0x8EE1 ); + rtl8168_mac_ocp_write( tp, 0xF82C, 0x9D6C ); + rtl8168_mac_ocp_write( tp, 0xF82E, 0xC603 ); + rtl8168_mac_ocp_write( tp, 0xF830, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF832, 0xB416 ); + rtl8168_mac_ocp_write( tp, 0xF834, 0x0076 ); + rtl8168_mac_ocp_write( tp, 0xF836, 0xE86C ); + rtl8168_mac_ocp_write( tp, 0xF838, 0xC406 ); + rtl8168_mac_ocp_write( tp, 0xF83A, 0x7580 ); + rtl8168_mac_ocp_write( tp, 0xF83C, 0x4852 ); + rtl8168_mac_ocp_write( tp, 0xF83E, 0x8D80 ); + rtl8168_mac_ocp_write( tp, 0xF840, 0xC403 ); + rtl8168_mac_ocp_write( tp, 0xF842, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF844, 0xD3E0 ); + rtl8168_mac_ocp_write( tp, 0xF846, 0x02C8 ); + rtl8168_mac_ocp_write( tp, 0xF848, 0x8918 ); + rtl8168_mac_ocp_write( tp, 0xF84A, 0xE815 ); + rtl8168_mac_ocp_write( tp, 0xF84C, 0x1100 ); + rtl8168_mac_ocp_write( tp, 0xF84E, 0xF011 ); + rtl8168_mac_ocp_write( tp, 0xF850, 0xE812 ); + rtl8168_mac_ocp_write( tp, 0xF852, 0x4990 ); + rtl8168_mac_ocp_write( tp, 0xF854, 0xF002 ); + rtl8168_mac_ocp_write( tp, 0xF856, 0xE817 ); + rtl8168_mac_ocp_write( tp, 0xF858, 0xE80E ); + rtl8168_mac_ocp_write( tp, 0xF85A, 0x4992 ); + rtl8168_mac_ocp_write( tp, 0xF85C, 0xF002 ); + rtl8168_mac_ocp_write( tp, 0xF85E, 0xE80E ); + rtl8168_mac_ocp_write( tp, 0xF860, 0xE80A ); + rtl8168_mac_ocp_write( tp, 0xF862, 0x4993 ); + rtl8168_mac_ocp_write( tp, 0xF864, 0xF002 ); + rtl8168_mac_ocp_write( tp, 0xF866, 0xE818 ); + rtl8168_mac_ocp_write( tp, 0xF868, 0xE806 ); + rtl8168_mac_ocp_write( tp, 0xF86A, 0x4991 ); + rtl8168_mac_ocp_write( tp, 0xF86C, 0xF002 ); + rtl8168_mac_ocp_write( tp, 0xF86E, 0xE838 ); + rtl8168_mac_ocp_write( tp, 0xF870, 0xC25E ); + rtl8168_mac_ocp_write( tp, 0xF872, 0xBA00 ); + rtl8168_mac_ocp_write( tp, 0xF874, 0xC056 ); + rtl8168_mac_ocp_write( tp, 0xF876, 0x7100 ); + rtl8168_mac_ocp_write( tp, 0xF878, 0xFF80 ); + rtl8168_mac_ocp_write( tp, 0xF87A, 0x7100 ); + rtl8168_mac_ocp_write( tp, 0xF87C, 0x4892 ); + rtl8168_mac_ocp_write( tp, 0xF87E, 0x4813 ); + rtl8168_mac_ocp_write( tp, 0xF880, 0x8900 ); + rtl8168_mac_ocp_write( tp, 0xF882, 0xE00A ); + rtl8168_mac_ocp_write( tp, 0xF884, 0x7100 ); + rtl8168_mac_ocp_write( tp, 0xF886, 0x4890 ); + rtl8168_mac_ocp_write( tp, 0xF888, 0x4813 ); + rtl8168_mac_ocp_write( tp, 0xF88A, 0x8900 ); + rtl8168_mac_ocp_write( tp, 0xF88C, 0xC74B ); + rtl8168_mac_ocp_write( tp, 0xF88E, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF890, 0x48C2 ); + rtl8168_mac_ocp_write( tp, 0xF892, 0x4841 ); + rtl8168_mac_ocp_write( tp, 0xF894, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF896, 0xC746 ); + rtl8168_mac_ocp_write( tp, 0xF898, 0x74FC ); + rtl8168_mac_ocp_write( tp, 0xF89A, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF89C, 0xF120 ); + rtl8168_mac_ocp_write( tp, 0xF89E, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF8A0, 0xF11E ); + rtl8168_mac_ocp_write( tp, 0xF8A2, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF8A4, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF8A6, 0xF01B ); + rtl8168_mac_ocp_write( tp, 0xF8A8, 0x49C6 ); + rtl8168_mac_ocp_write( tp, 0xF8AA, 0xF119 ); + rtl8168_mac_ocp_write( tp, 0xF8AC, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF8AE, 0x49C4 ); + rtl8168_mac_ocp_write( tp, 0xF8B0, 0xF013 ); + rtl8168_mac_ocp_write( tp, 0xF8B2, 0xC536 ); + rtl8168_mac_ocp_write( tp, 0xF8B4, 0x74B0 ); + rtl8168_mac_ocp_write( tp, 0xF8B6, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF8B8, 0xF1FD ); + rtl8168_mac_ocp_write( tp, 0xF8BA, 0xC537 ); + rtl8168_mac_ocp_write( tp, 0xF8BC, 0xC434 ); + rtl8168_mac_ocp_write( tp, 0xF8BE, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF8C0, 0xC435 ); + rtl8168_mac_ocp_write( tp, 0xF8C2, 0x1C13 ); + rtl8168_mac_ocp_write( tp, 0xF8C4, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xF8C6, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xF8C8, 0xC52B ); + rtl8168_mac_ocp_write( tp, 0xF8CA, 0x74B0 ); + rtl8168_mac_ocp_write( tp, 0xF8CC, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF8CE, 0xF1FD ); + rtl8168_mac_ocp_write( tp, 0xF8D0, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF8D2, 0x48C4 ); + rtl8168_mac_ocp_write( tp, 0xF8D4, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF8D6, 0x7100 ); + rtl8168_mac_ocp_write( tp, 0xF8D8, 0x4893 ); + rtl8168_mac_ocp_write( tp, 0xF8DA, 0x8900 ); + rtl8168_mac_ocp_write( tp, 0xF8DC, 0xFF80 ); + rtl8168_mac_ocp_write( tp, 0xF8DE, 0xC520 ); + rtl8168_mac_ocp_write( tp, 0xF8E0, 0x74B0 ); + rtl8168_mac_ocp_write( tp, 0xF8E2, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF8E4, 0xF11C ); + rtl8168_mac_ocp_write( tp, 0xF8E6, 0xC71E ); + rtl8168_mac_ocp_write( tp, 0xF8E8, 0x74FC ); + rtl8168_mac_ocp_write( tp, 0xF8EA, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF8EC, 0xF118 ); + rtl8168_mac_ocp_write( tp, 0xF8EE, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF8F0, 0xF116 ); + rtl8168_mac_ocp_write( tp, 0xF8F2, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF8F4, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF8F6, 0xF013 ); + rtl8168_mac_ocp_write( tp, 0xF8F8, 0x48C3 ); + rtl8168_mac_ocp_write( tp, 0xF8FA, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF8FC, 0xC516 ); + rtl8168_mac_ocp_write( tp, 0xF8FE, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xF900, 0x49CE ); + rtl8168_mac_ocp_write( tp, 0xF902, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xF904, 0xC411 ); + rtl8168_mac_ocp_write( tp, 0xF906, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF908, 0xC411 ); + rtl8168_mac_ocp_write( tp, 0xF90A, 0x1C13 ); + rtl8168_mac_ocp_write( tp, 0xF90C, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xF90E, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xF910, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xF912, 0x49CF ); + rtl8168_mac_ocp_write( tp, 0xF914, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xF916, 0x7100 ); + rtl8168_mac_ocp_write( tp, 0xF918, 0x4891 ); + rtl8168_mac_ocp_write( tp, 0xF91A, 0x8900 ); + rtl8168_mac_ocp_write( tp, 0xF91C, 0xFF80 ); + rtl8168_mac_ocp_write( tp, 0xF91E, 0xE400 ); + rtl8168_mac_ocp_write( tp, 0xF920, 0xD3E0 ); + rtl8168_mac_ocp_write( tp, 0xF922, 0xE000 ); + rtl8168_mac_ocp_write( tp, 0xF924, 0x0481 ); + rtl8168_mac_ocp_write( tp, 0xF926, 0x0C81 ); + rtl8168_mac_ocp_write( tp, 0xF928, 0xDE20 ); + rtl8168_mac_ocp_write( tp, 0xF92A, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xF92C, 0x0992 ); + rtl8168_mac_ocp_write( tp, 0xF92E, 0x1B76 ); + rtl8168_mac_ocp_write( tp, 0xF930, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF932, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF934, 0x059C ); + rtl8168_mac_ocp_write( tp, 0xF936, 0x1B76 ); + rtl8168_mac_ocp_write( tp, 0xF938, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF93A, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF93C, 0x065A ); + rtl8168_mac_ocp_write( tp, 0xF93E, 0xB400 ); + rtl8168_mac_ocp_write( tp, 0xF940, 0x18DE ); + rtl8168_mac_ocp_write( tp, 0xF942, 0x2008 ); + rtl8168_mac_ocp_write( tp, 0xF944, 0x4001 ); + rtl8168_mac_ocp_write( tp, 0xF946, 0xF10F ); + rtl8168_mac_ocp_write( tp, 0xF948, 0x7342 ); + rtl8168_mac_ocp_write( tp, 0xF94A, 0x1880 ); + rtl8168_mac_ocp_write( tp, 0xF94C, 0x2008 ); + rtl8168_mac_ocp_write( tp, 0xF94E, 0x0009 ); + rtl8168_mac_ocp_write( tp, 0xF950, 0x4018 ); + rtl8168_mac_ocp_write( tp, 0xF952, 0xF109 ); + rtl8168_mac_ocp_write( tp, 0xF954, 0x7340 ); + rtl8168_mac_ocp_write( tp, 0xF956, 0x25BC ); + rtl8168_mac_ocp_write( tp, 0xF958, 0x130F ); + rtl8168_mac_ocp_write( tp, 0xF95A, 0xF105 ); + rtl8168_mac_ocp_write( tp, 0xF95C, 0xC00A ); + rtl8168_mac_ocp_write( tp, 0xF95E, 0x7300 ); + rtl8168_mac_ocp_write( tp, 0xF960, 0x4831 ); + rtl8168_mac_ocp_write( tp, 0xF962, 0x9B00 ); + rtl8168_mac_ocp_write( tp, 0xF964, 0xB000 ); + rtl8168_mac_ocp_write( tp, 0xF966, 0x7340 ); + rtl8168_mac_ocp_write( tp, 0xF968, 0x8320 ); + rtl8168_mac_ocp_write( tp, 0xF96A, 0xC302 ); + rtl8168_mac_ocp_write( tp, 0xF96C, 0xBB00 ); + rtl8168_mac_ocp_write( tp, 0xF96E, 0x0C12 ); + rtl8168_mac_ocp_write( tp, 0xF970, 0xE860 ); + rtl8168_mac_ocp_write( tp, 0xF972, 0xC406 ); + rtl8168_mac_ocp_write( tp, 0xF974, 0x7580 ); + rtl8168_mac_ocp_write( tp, 0xF976, 0x4851 ); + rtl8168_mac_ocp_write( tp, 0xF978, 0x8D80 ); + rtl8168_mac_ocp_write( tp, 0xF97A, 0xC403 ); + rtl8168_mac_ocp_write( tp, 0xF97C, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF97E, 0xD3E0 ); + rtl8168_mac_ocp_write( tp, 0xF980, 0x02C8 ); + rtl8168_mac_ocp_write( tp, 0xF982, 0xC406 ); + rtl8168_mac_ocp_write( tp, 0xF984, 0x7580 ); + rtl8168_mac_ocp_write( tp, 0xF986, 0x4850 ); + rtl8168_mac_ocp_write( tp, 0xF988, 0x8D80 ); + rtl8168_mac_ocp_write( tp, 0xF98A, 0xC403 ); + rtl8168_mac_ocp_write( tp, 0xF98C, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF98E, 0xD3E0 ); + rtl8168_mac_ocp_write( tp, 0xF990, 0x0298 ); + + rtl8168_mac_ocp_write( tp, 0xDE30, 0x0080 ); + + rtl8168_mac_ocp_write( tp, 0xFC26, 0x8000 ); + + rtl8168_mac_ocp_write( tp, 0xFC28, 0x0075 ); + rtl8168_mac_ocp_write( tp, 0xFC2A, 0x02B1 ); + rtl8168_mac_ocp_write( tp, 0xFC2C, 0x0991 ); + rtl8168_mac_ocp_write( tp, 0xFC2E, 0x059B ); + rtl8168_mac_ocp_write( tp, 0xFC30, 0x0659 ); + rtl8168_mac_ocp_write( tp, 0xFC32, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFC34, 0x02C7 ); + rtl8168_mac_ocp_write( tp, 0xFC36, 0x0279 ); +} + +static void +rtl8168_set_mac_mcu_8168gu_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + rtl8168_hw_disable_mac_mcu_bps(dev); + + rtl8168_mac_ocp_write( tp, 0xF800, 0xE008 ); + rtl8168_mac_ocp_write( tp, 0xF802, 0xE011 ); + rtl8168_mac_ocp_write( tp, 0xF804, 0xE015 ); + rtl8168_mac_ocp_write( tp, 0xF806, 0xE018 ); + rtl8168_mac_ocp_write( tp, 0xF808, 0xE01B ); + rtl8168_mac_ocp_write( tp, 0xF80A, 0xE027 ); + rtl8168_mac_ocp_write( tp, 0xF80C, 0xE043 ); + rtl8168_mac_ocp_write( tp, 0xF80E, 0xE065 ); + rtl8168_mac_ocp_write( tp, 0xF810, 0x49E2 ); + rtl8168_mac_ocp_write( tp, 0xF812, 0xF005 ); + rtl8168_mac_ocp_write( tp, 0xF814, 0x49EA ); + rtl8168_mac_ocp_write( tp, 0xF816, 0xF003 ); + rtl8168_mac_ocp_write( tp, 0xF818, 0xC404 ); + rtl8168_mac_ocp_write( tp, 0xF81A, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF81C, 0xC403 ); + rtl8168_mac_ocp_write( tp, 0xF81E, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF820, 0x0496 ); + rtl8168_mac_ocp_write( tp, 0xF822, 0x051A ); + rtl8168_mac_ocp_write( tp, 0xF824, 0x1D01 ); + rtl8168_mac_ocp_write( tp, 0xF826, 0x8DE8 ); + rtl8168_mac_ocp_write( tp, 0xF828, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF82A, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF82C, 0x0206 ); + rtl8168_mac_ocp_write( tp, 0xF82E, 0x1B76 ); + rtl8168_mac_ocp_write( tp, 0xF830, 0xC202 ); + rtl8168_mac_ocp_write( tp, 0xF832, 0xBA00 ); + rtl8168_mac_ocp_write( tp, 0xF834, 0x058A ); + rtl8168_mac_ocp_write( tp, 0xF836, 0x1B76 ); + rtl8168_mac_ocp_write( tp, 0xF838, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF83A, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF83C, 0x0648 ); + rtl8168_mac_ocp_write( tp, 0xF83E, 0x74E6 ); + rtl8168_mac_ocp_write( tp, 0xF840, 0x1B78 ); + rtl8168_mac_ocp_write( tp, 0xF842, 0x46DC ); + rtl8168_mac_ocp_write( tp, 0xF844, 0x1300 ); + rtl8168_mac_ocp_write( tp, 0xF846, 0xF005 ); + rtl8168_mac_ocp_write( tp, 0xF848, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF84A, 0x48C3 ); + rtl8168_mac_ocp_write( tp, 0xF84C, 0x48C4 ); + rtl8168_mac_ocp_write( tp, 0xF84E, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF850, 0x64E7 ); + rtl8168_mac_ocp_write( tp, 0xF852, 0xC302 ); + rtl8168_mac_ocp_write( tp, 0xF854, 0xBB00 ); + rtl8168_mac_ocp_write( tp, 0xF856, 0x068E ); + rtl8168_mac_ocp_write( tp, 0xF858, 0x74E4 ); + rtl8168_mac_ocp_write( tp, 0xF85A, 0x49C5 ); + rtl8168_mac_ocp_write( tp, 0xF85C, 0xF106 ); + rtl8168_mac_ocp_write( tp, 0xF85E, 0x49C6 ); + rtl8168_mac_ocp_write( tp, 0xF860, 0xF107 ); + rtl8168_mac_ocp_write( tp, 0xF862, 0x48C8 ); + rtl8168_mac_ocp_write( tp, 0xF864, 0x48C9 ); + rtl8168_mac_ocp_write( tp, 0xF866, 0xE011 ); + rtl8168_mac_ocp_write( tp, 0xF868, 0x48C9 ); + rtl8168_mac_ocp_write( tp, 0xF86A, 0x4848 ); + rtl8168_mac_ocp_write( tp, 0xF86C, 0xE00E ); + rtl8168_mac_ocp_write( tp, 0xF86E, 0x4848 ); + rtl8168_mac_ocp_write( tp, 0xF870, 0x49C7 ); + rtl8168_mac_ocp_write( tp, 0xF872, 0xF00A ); + rtl8168_mac_ocp_write( tp, 0xF874, 0x48C9 ); + rtl8168_mac_ocp_write( tp, 0xF876, 0xC60D ); + rtl8168_mac_ocp_write( tp, 0xF878, 0x1D1F ); + rtl8168_mac_ocp_write( tp, 0xF87A, 0x8DC2 ); + rtl8168_mac_ocp_write( tp, 0xF87C, 0x1D00 ); + rtl8168_mac_ocp_write( tp, 0xF87E, 0x8DC3 ); + rtl8168_mac_ocp_write( tp, 0xF880, 0x1D11 ); + rtl8168_mac_ocp_write( tp, 0xF882, 0x8DC0 ); + rtl8168_mac_ocp_write( tp, 0xF884, 0xE002 ); + rtl8168_mac_ocp_write( tp, 0xF886, 0x4849 ); + rtl8168_mac_ocp_write( tp, 0xF888, 0x94E5 ); + rtl8168_mac_ocp_write( tp, 0xF88A, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF88C, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF88E, 0x0238 ); + rtl8168_mac_ocp_write( tp, 0xF890, 0xE434 ); + rtl8168_mac_ocp_write( tp, 0xF892, 0x49D9 ); + rtl8168_mac_ocp_write( tp, 0xF894, 0xF01B ); + rtl8168_mac_ocp_write( tp, 0xF896, 0xC31E ); + rtl8168_mac_ocp_write( tp, 0xF898, 0x7464 ); + rtl8168_mac_ocp_write( tp, 0xF89A, 0x49C4 ); + rtl8168_mac_ocp_write( tp, 0xF89C, 0xF114 ); + rtl8168_mac_ocp_write( tp, 0xF89E, 0xC31B ); + rtl8168_mac_ocp_write( tp, 0xF8A0, 0x6460 ); + rtl8168_mac_ocp_write( tp, 0xF8A2, 0x14FA ); + rtl8168_mac_ocp_write( tp, 0xF8A4, 0xFA02 ); + rtl8168_mac_ocp_write( tp, 0xF8A6, 0xE00F ); + rtl8168_mac_ocp_write( tp, 0xF8A8, 0xC317 ); + rtl8168_mac_ocp_write( tp, 0xF8AA, 0x7460 ); + rtl8168_mac_ocp_write( tp, 0xF8AC, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF8AE, 0xF10B ); + rtl8168_mac_ocp_write( tp, 0xF8B0, 0xC311 ); + rtl8168_mac_ocp_write( tp, 0xF8B2, 0x7462 ); + rtl8168_mac_ocp_write( tp, 0xF8B4, 0x48C1 ); + rtl8168_mac_ocp_write( tp, 0xF8B6, 0x9C62 ); + rtl8168_mac_ocp_write( tp, 0xF8B8, 0x4841 ); + rtl8168_mac_ocp_write( tp, 0xF8BA, 0x9C62 ); + rtl8168_mac_ocp_write( tp, 0xF8BC, 0xC30A ); + rtl8168_mac_ocp_write( tp, 0xF8BE, 0x1C04 ); + rtl8168_mac_ocp_write( tp, 0xF8C0, 0x8C60 ); + rtl8168_mac_ocp_write( tp, 0xF8C2, 0xE004 ); + rtl8168_mac_ocp_write( tp, 0xF8C4, 0x1C15 ); + rtl8168_mac_ocp_write( tp, 0xF8C6, 0xC305 ); + rtl8168_mac_ocp_write( tp, 0xF8C8, 0x8C60 ); + rtl8168_mac_ocp_write( tp, 0xF8CA, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF8CC, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF8CE, 0x0374 ); + rtl8168_mac_ocp_write( tp, 0xF8D0, 0xE434 ); + rtl8168_mac_ocp_write( tp, 0xF8D2, 0xE030 ); + rtl8168_mac_ocp_write( tp, 0xF8D4, 0xE61C ); + rtl8168_mac_ocp_write( tp, 0xF8D6, 0xE906 ); + rtl8168_mac_ocp_write( tp, 0xF8D8, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF8DA, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF8DC, 0x0000 ); + + rtl8168_mac_ocp_write( tp, 0xFC26, 0x8000 ); + + rtl8168_mac_ocp_write( tp, 0xFC28, 0x0493 ); + rtl8168_mac_ocp_write( tp, 0xFC2A, 0x0205 ); + rtl8168_mac_ocp_write( tp, 0xFC2C, 0x0589 ); + rtl8168_mac_ocp_write( tp, 0xFC2E, 0x0647 ); + rtl8168_mac_ocp_write( tp, 0xFC30, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFC32, 0x0215 ); + rtl8168_mac_ocp_write( tp, 0xFC34, 0x0285 ); +} + +static void +rtl8168_set_mac_mcu_8168gu_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + rtl8168_hw_disable_mac_mcu_bps(dev); + + rtl8168_mac_ocp_write( tp, 0xF800, 0xE008 ); + rtl8168_mac_ocp_write( tp, 0xF802, 0xE00A ); + rtl8168_mac_ocp_write( tp, 0xF804, 0xE00D ); + rtl8168_mac_ocp_write( tp, 0xF806, 0xE02F ); + rtl8168_mac_ocp_write( tp, 0xF808, 0xE031 ); + rtl8168_mac_ocp_write( tp, 0xF80A, 0xE038 ); + rtl8168_mac_ocp_write( tp, 0xF80C, 0xE03A ); + rtl8168_mac_ocp_write( tp, 0xF80E, 0xE051 ); + rtl8168_mac_ocp_write( tp, 0xF810, 0xC202 ); + rtl8168_mac_ocp_write( tp, 0xF812, 0xBA00 ); + rtl8168_mac_ocp_write( tp, 0xF814, 0x0DFC ); + rtl8168_mac_ocp_write( tp, 0xF816, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xF818, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF81A, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF81C, 0x0A30 ); + rtl8168_mac_ocp_write( tp, 0xF81E, 0x49D9 ); + rtl8168_mac_ocp_write( tp, 0xF820, 0xF019 ); + rtl8168_mac_ocp_write( tp, 0xF822, 0xC520 ); + rtl8168_mac_ocp_write( tp, 0xF824, 0x64A5 ); + rtl8168_mac_ocp_write( tp, 0xF826, 0x1400 ); + rtl8168_mac_ocp_write( tp, 0xF828, 0xF007 ); + rtl8168_mac_ocp_write( tp, 0xF82A, 0x0C01 ); + rtl8168_mac_ocp_write( tp, 0xF82C, 0x8CA5 ); + rtl8168_mac_ocp_write( tp, 0xF82E, 0x1C15 ); + rtl8168_mac_ocp_write( tp, 0xF830, 0xC515 ); + rtl8168_mac_ocp_write( tp, 0xF832, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF834, 0xE00F ); + rtl8168_mac_ocp_write( tp, 0xF836, 0xC513 ); + rtl8168_mac_ocp_write( tp, 0xF838, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF83A, 0x48C8 ); + rtl8168_mac_ocp_write( tp, 0xF83C, 0x48CA ); + rtl8168_mac_ocp_write( tp, 0xF83E, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF840, 0xC510 ); + rtl8168_mac_ocp_write( tp, 0xF842, 0x1B00 ); + rtl8168_mac_ocp_write( tp, 0xF844, 0x9BA0 ); + rtl8168_mac_ocp_write( tp, 0xF846, 0x1B1C ); + rtl8168_mac_ocp_write( tp, 0xF848, 0x483F ); + rtl8168_mac_ocp_write( tp, 0xF84A, 0x9BA2 ); + rtl8168_mac_ocp_write( tp, 0xF84C, 0x1B04 ); + rtl8168_mac_ocp_write( tp, 0xF84E, 0xC506 ); + rtl8168_mac_ocp_write( tp, 0xF850, 0x9BA0 ); + rtl8168_mac_ocp_write( tp, 0xF852, 0xC603 ); + rtl8168_mac_ocp_write( tp, 0xF854, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF856, 0x0298 ); + rtl8168_mac_ocp_write( tp, 0xF858, 0x03DE ); + rtl8168_mac_ocp_write( tp, 0xF85A, 0xE434 ); + rtl8168_mac_ocp_write( tp, 0xF85C, 0xE096 ); + rtl8168_mac_ocp_write( tp, 0xF85E, 0xE860 ); + rtl8168_mac_ocp_write( tp, 0xF860, 0xDE20 ); + rtl8168_mac_ocp_write( tp, 0xF862, 0xD3C0 ); + rtl8168_mac_ocp_write( tp, 0xF864, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF866, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF868, 0x0A64 ); + rtl8168_mac_ocp_write( tp, 0xF86A, 0xC707 ); + rtl8168_mac_ocp_write( tp, 0xF86C, 0x1D00 ); + rtl8168_mac_ocp_write( tp, 0xF86E, 0x8DE2 ); + rtl8168_mac_ocp_write( tp, 0xF870, 0x48C1 ); + rtl8168_mac_ocp_write( tp, 0xF872, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF874, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF876, 0x00AA ); + rtl8168_mac_ocp_write( tp, 0xF878, 0xE0C0 ); + rtl8168_mac_ocp_write( tp, 0xF87A, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF87C, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF87E, 0x0132 ); + rtl8168_mac_ocp_write( tp, 0xF880, 0xC50C ); + rtl8168_mac_ocp_write( tp, 0xF882, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xF884, 0x49CE ); + rtl8168_mac_ocp_write( tp, 0xF886, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xF888, 0x1C00 ); + rtl8168_mac_ocp_write( tp, 0xF88A, 0x9EA0 ); + rtl8168_mac_ocp_write( tp, 0xF88C, 0x1C1C ); + rtl8168_mac_ocp_write( tp, 0xF88E, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xF890, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xF892, 0xC402 ); + rtl8168_mac_ocp_write( tp, 0xF894, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF896, 0x0AFA ); + rtl8168_mac_ocp_write( tp, 0xF898, 0xDE20 ); + rtl8168_mac_ocp_write( tp, 0xF89A, 0xE000 ); + rtl8168_mac_ocp_write( tp, 0xF89C, 0xE092 ); + rtl8168_mac_ocp_write( tp, 0xF89E, 0xE430 ); + rtl8168_mac_ocp_write( tp, 0xF8A0, 0xDE20 ); + rtl8168_mac_ocp_write( tp, 0xF8A2, 0xE0C0 ); + rtl8168_mac_ocp_write( tp, 0xF8A4, 0xE860 ); + rtl8168_mac_ocp_write( tp, 0xF8A6, 0xE84C ); + rtl8168_mac_ocp_write( tp, 0xF8A8, 0xB400 ); + rtl8168_mac_ocp_write( tp, 0xF8AA, 0xB430 ); + rtl8168_mac_ocp_write( tp, 0xF8AC, 0xE410 ); + rtl8168_mac_ocp_write( tp, 0xF8AE, 0xC0AE ); + rtl8168_mac_ocp_write( tp, 0xF8B0, 0xB407 ); + rtl8168_mac_ocp_write( tp, 0xF8B2, 0xB406 ); + rtl8168_mac_ocp_write( tp, 0xF8B4, 0xB405 ); + rtl8168_mac_ocp_write( tp, 0xF8B6, 0xB404 ); + rtl8168_mac_ocp_write( tp, 0xF8B8, 0xB403 ); + rtl8168_mac_ocp_write( tp, 0xF8BA, 0xB402 ); + rtl8168_mac_ocp_write( tp, 0xF8BC, 0xB401 ); + rtl8168_mac_ocp_write( tp, 0xF8BE, 0xC7EE ); + rtl8168_mac_ocp_write( tp, 0xF8C0, 0x76F4 ); + rtl8168_mac_ocp_write( tp, 0xF8C2, 0xC2ED ); + rtl8168_mac_ocp_write( tp, 0xF8C4, 0xC3ED ); + rtl8168_mac_ocp_write( tp, 0xF8C6, 0xC1EF ); + rtl8168_mac_ocp_write( tp, 0xF8C8, 0xC5F3 ); + rtl8168_mac_ocp_write( tp, 0xF8CA, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF8CC, 0x49CD ); + rtl8168_mac_ocp_write( tp, 0xF8CE, 0xF001 ); + rtl8168_mac_ocp_write( tp, 0xF8D0, 0xC5EE ); + rtl8168_mac_ocp_write( tp, 0xF8D2, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF8D4, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF8D6, 0xF105 ); + rtl8168_mac_ocp_write( tp, 0xF8D8, 0xC5E4 ); + rtl8168_mac_ocp_write( tp, 0xF8DA, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xF8DC, 0x49CE ); + rtl8168_mac_ocp_write( tp, 0xF8DE, 0xF00B ); + rtl8168_mac_ocp_write( tp, 0xF8E0, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xF8E2, 0x484B ); + rtl8168_mac_ocp_write( tp, 0xF8E4, 0x9C44 ); + rtl8168_mac_ocp_write( tp, 0xF8E6, 0x1C10 ); + rtl8168_mac_ocp_write( tp, 0xF8E8, 0x9C62 ); + rtl8168_mac_ocp_write( tp, 0xF8EA, 0x1C11 ); + rtl8168_mac_ocp_write( tp, 0xF8EC, 0x8C60 ); + rtl8168_mac_ocp_write( tp, 0xF8EE, 0x1C00 ); + rtl8168_mac_ocp_write( tp, 0xF8F0, 0x9CF6 ); + rtl8168_mac_ocp_write( tp, 0xF8F2, 0xE0EC ); + rtl8168_mac_ocp_write( tp, 0xF8F4, 0x49E7 ); + rtl8168_mac_ocp_write( tp, 0xF8F6, 0xF016 ); + rtl8168_mac_ocp_write( tp, 0xF8F8, 0x1D80 ); + rtl8168_mac_ocp_write( tp, 0xF8FA, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xF8FC, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF8FE, 0x4843 ); + rtl8168_mac_ocp_write( tp, 0xF900, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF902, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF904, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF906, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xF908, 0x48C8 ); + rtl8168_mac_ocp_write( tp, 0xF90A, 0x48C9 ); + rtl8168_mac_ocp_write( tp, 0xF90C, 0x48CA ); + rtl8168_mac_ocp_write( tp, 0xF90E, 0x9C44 ); + rtl8168_mac_ocp_write( tp, 0xF910, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF912, 0x4844 ); + rtl8168_mac_ocp_write( tp, 0xF914, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF916, 0x1E01 ); + rtl8168_mac_ocp_write( tp, 0xF918, 0xE8DB ); + rtl8168_mac_ocp_write( tp, 0xF91A, 0x7420 ); + rtl8168_mac_ocp_write( tp, 0xF91C, 0x48C1 ); + rtl8168_mac_ocp_write( tp, 0xF91E, 0x9C20 ); + rtl8168_mac_ocp_write( tp, 0xF920, 0xE0D5 ); + rtl8168_mac_ocp_write( tp, 0xF922, 0x49E6 ); + rtl8168_mac_ocp_write( tp, 0xF924, 0xF02A ); + rtl8168_mac_ocp_write( tp, 0xF926, 0x1D40 ); + rtl8168_mac_ocp_write( tp, 0xF928, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xF92A, 0x74FC ); + rtl8168_mac_ocp_write( tp, 0xF92C, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF92E, 0xF124 ); + rtl8168_mac_ocp_write( tp, 0xF930, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF932, 0xF122 ); + rtl8168_mac_ocp_write( tp, 0xF934, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF936, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF938, 0xF01F ); + rtl8168_mac_ocp_write( tp, 0xF93A, 0xE8D3 ); + rtl8168_mac_ocp_write( tp, 0xF93C, 0x48C4 ); + rtl8168_mac_ocp_write( tp, 0xF93E, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF940, 0x1E00 ); + rtl8168_mac_ocp_write( tp, 0xF942, 0xE8C6 ); + rtl8168_mac_ocp_write( tp, 0xF944, 0xC5B1 ); + rtl8168_mac_ocp_write( tp, 0xF946, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF948, 0x49C3 ); + rtl8168_mac_ocp_write( tp, 0xF94A, 0xF016 ); + rtl8168_mac_ocp_write( tp, 0xF94C, 0xC5AF ); + rtl8168_mac_ocp_write( tp, 0xF94E, 0x74A4 ); + rtl8168_mac_ocp_write( tp, 0xF950, 0x49C2 ); + rtl8168_mac_ocp_write( tp, 0xF952, 0xF005 ); + rtl8168_mac_ocp_write( tp, 0xF954, 0xC5AA ); + rtl8168_mac_ocp_write( tp, 0xF956, 0x74B2 ); + rtl8168_mac_ocp_write( tp, 0xF958, 0x49C9 ); + rtl8168_mac_ocp_write( tp, 0xF95A, 0xF10E ); + rtl8168_mac_ocp_write( tp, 0xF95C, 0xC5A6 ); + rtl8168_mac_ocp_write( tp, 0xF95E, 0x74A8 ); + rtl8168_mac_ocp_write( tp, 0xF960, 0x4845 ); + rtl8168_mac_ocp_write( tp, 0xF962, 0x4846 ); + rtl8168_mac_ocp_write( tp, 0xF964, 0x4847 ); + rtl8168_mac_ocp_write( tp, 0xF966, 0x4848 ); + rtl8168_mac_ocp_write( tp, 0xF968, 0x9CA8 ); + rtl8168_mac_ocp_write( tp, 0xF96A, 0x74B2 ); + rtl8168_mac_ocp_write( tp, 0xF96C, 0x4849 ); + rtl8168_mac_ocp_write( tp, 0xF96E, 0x9CB2 ); + rtl8168_mac_ocp_write( tp, 0xF970, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF972, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xF974, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF976, 0xE0AA ); + rtl8168_mac_ocp_write( tp, 0xF978, 0x49E4 ); + rtl8168_mac_ocp_write( tp, 0xF97A, 0xF018 ); + rtl8168_mac_ocp_write( tp, 0xF97C, 0x1D10 ); + rtl8168_mac_ocp_write( tp, 0xF97E, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xF980, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF982, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF984, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF986, 0x4843 ); + rtl8168_mac_ocp_write( tp, 0xF988, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF98A, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF98C, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF98E, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF990, 0x4844 ); + rtl8168_mac_ocp_write( tp, 0xF992, 0x4842 ); + rtl8168_mac_ocp_write( tp, 0xF994, 0x4841 ); + rtl8168_mac_ocp_write( tp, 0xF996, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF998, 0x1E01 ); + rtl8168_mac_ocp_write( tp, 0xF99A, 0xE89A ); + rtl8168_mac_ocp_write( tp, 0xF99C, 0x7420 ); + rtl8168_mac_ocp_write( tp, 0xF99E, 0x4841 ); + rtl8168_mac_ocp_write( tp, 0xF9A0, 0x9C20 ); + rtl8168_mac_ocp_write( tp, 0xF9A2, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xF9A4, 0x4848 ); + rtl8168_mac_ocp_write( tp, 0xF9A6, 0x9C44 ); + rtl8168_mac_ocp_write( tp, 0xF9A8, 0xE091 ); + rtl8168_mac_ocp_write( tp, 0xF9AA, 0x49E5 ); + rtl8168_mac_ocp_write( tp, 0xF9AC, 0xF03E ); + rtl8168_mac_ocp_write( tp, 0xF9AE, 0x1D20 ); + rtl8168_mac_ocp_write( tp, 0xF9B0, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xF9B2, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF9B4, 0x48C2 ); + rtl8168_mac_ocp_write( tp, 0xF9B6, 0x4841 ); + rtl8168_mac_ocp_write( tp, 0xF9B8, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF9BA, 0x1E01 ); + rtl8168_mac_ocp_write( tp, 0xF9BC, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xF9BE, 0x49CA ); + rtl8168_mac_ocp_write( tp, 0xF9C0, 0xF103 ); + rtl8168_mac_ocp_write( tp, 0xF9C2, 0x49C2 ); + rtl8168_mac_ocp_write( tp, 0xF9C4, 0xF00C ); + rtl8168_mac_ocp_write( tp, 0xF9C6, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF9C8, 0xF004 ); + rtl8168_mac_ocp_write( tp, 0xF9CA, 0x6447 ); + rtl8168_mac_ocp_write( tp, 0xF9CC, 0x2244 ); + rtl8168_mac_ocp_write( tp, 0xF9CE, 0xE002 ); + rtl8168_mac_ocp_write( tp, 0xF9D0, 0x1C01 ); + rtl8168_mac_ocp_write( tp, 0xF9D2, 0x9C62 ); + rtl8168_mac_ocp_write( tp, 0xF9D4, 0x1C11 ); + rtl8168_mac_ocp_write( tp, 0xF9D6, 0x8C60 ); + rtl8168_mac_ocp_write( tp, 0xF9D8, 0x1C00 ); + rtl8168_mac_ocp_write( tp, 0xF9DA, 0x9CF6 ); + rtl8168_mac_ocp_write( tp, 0xF9DC, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xF9DE, 0x49C8 ); + rtl8168_mac_ocp_write( tp, 0xF9E0, 0xF01D ); + rtl8168_mac_ocp_write( tp, 0xF9E2, 0x74FC ); + rtl8168_mac_ocp_write( tp, 0xF9E4, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF9E6, 0xF11A ); + rtl8168_mac_ocp_write( tp, 0xF9E8, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF9EA, 0xF118 ); + rtl8168_mac_ocp_write( tp, 0xF9EC, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF9EE, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF9F0, 0xF015 ); + rtl8168_mac_ocp_write( tp, 0xF9F2, 0x49C6 ); + rtl8168_mac_ocp_write( tp, 0xF9F4, 0xF113 ); + rtl8168_mac_ocp_write( tp, 0xF9F6, 0xE875 ); + rtl8168_mac_ocp_write( tp, 0xF9F8, 0x48C4 ); + rtl8168_mac_ocp_write( tp, 0xF9FA, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF9FC, 0x7420 ); + rtl8168_mac_ocp_write( tp, 0xF9FE, 0x48C1 ); + rtl8168_mac_ocp_write( tp, 0xFA00, 0x9C20 ); + rtl8168_mac_ocp_write( tp, 0xFA02, 0xC50A ); + rtl8168_mac_ocp_write( tp, 0xFA04, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFA06, 0x8CA5 ); + rtl8168_mac_ocp_write( tp, 0xFA08, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xFA0A, 0xC505 ); + rtl8168_mac_ocp_write( tp, 0xFA0C, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xFA0E, 0x1C11 ); + rtl8168_mac_ocp_write( tp, 0xFA10, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xFA12, 0xE00A ); + rtl8168_mac_ocp_write( tp, 0xFA14, 0xE434 ); + rtl8168_mac_ocp_write( tp, 0xFA16, 0xD3C0 ); + rtl8168_mac_ocp_write( tp, 0xFA18, 0xDC00 ); + rtl8168_mac_ocp_write( tp, 0xFA1A, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xFA1C, 0x49CA ); + rtl8168_mac_ocp_write( tp, 0xFA1E, 0xF004 ); + rtl8168_mac_ocp_write( tp, 0xFA20, 0x48CA ); + rtl8168_mac_ocp_write( tp, 0xFA22, 0x9C44 ); + rtl8168_mac_ocp_write( tp, 0xFA24, 0xE855 ); + rtl8168_mac_ocp_write( tp, 0xFA26, 0xE052 ); + rtl8168_mac_ocp_write( tp, 0xFA28, 0x49E8 ); + rtl8168_mac_ocp_write( tp, 0xFA2A, 0xF024 ); + rtl8168_mac_ocp_write( tp, 0xFA2C, 0x1D01 ); + rtl8168_mac_ocp_write( tp, 0xFA2E, 0x8DF5 ); + rtl8168_mac_ocp_write( tp, 0xFA30, 0x7440 ); + rtl8168_mac_ocp_write( tp, 0xFA32, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xFA34, 0xF11E ); + rtl8168_mac_ocp_write( tp, 0xFA36, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xFA38, 0x49C8 ); + rtl8168_mac_ocp_write( tp, 0xFA3A, 0xF01B ); + rtl8168_mac_ocp_write( tp, 0xFA3C, 0x49CA ); + rtl8168_mac_ocp_write( tp, 0xFA3E, 0xF119 ); + rtl8168_mac_ocp_write( tp, 0xFA40, 0xC5EC ); + rtl8168_mac_ocp_write( tp, 0xFA42, 0x76A4 ); + rtl8168_mac_ocp_write( tp, 0xFA44, 0x49E3 ); + rtl8168_mac_ocp_write( tp, 0xFA46, 0xF015 ); + rtl8168_mac_ocp_write( tp, 0xFA48, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xFA4A, 0xF103 ); + rtl8168_mac_ocp_write( tp, 0xFA4C, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xFA4E, 0xF011 ); + rtl8168_mac_ocp_write( tp, 0xFA50, 0x4849 ); + rtl8168_mac_ocp_write( tp, 0xFA52, 0x9C44 ); + rtl8168_mac_ocp_write( tp, 0xFA54, 0x1C00 ); + rtl8168_mac_ocp_write( tp, 0xFA56, 0x9CF6 ); + rtl8168_mac_ocp_write( tp, 0xFA58, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xFA5A, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xFA5C, 0xF004 ); + rtl8168_mac_ocp_write( tp, 0xFA5E, 0x6446 ); + rtl8168_mac_ocp_write( tp, 0xFA60, 0x1E07 ); + rtl8168_mac_ocp_write( tp, 0xFA62, 0xE003 ); + rtl8168_mac_ocp_write( tp, 0xFA64, 0x1C01 ); + rtl8168_mac_ocp_write( tp, 0xFA66, 0x1E03 ); + rtl8168_mac_ocp_write( tp, 0xFA68, 0x9C62 ); + rtl8168_mac_ocp_write( tp, 0xFA6A, 0x1C11 ); + rtl8168_mac_ocp_write( tp, 0xFA6C, 0x8C60 ); + rtl8168_mac_ocp_write( tp, 0xFA6E, 0xE830 ); + rtl8168_mac_ocp_write( tp, 0xFA70, 0xE02D ); + rtl8168_mac_ocp_write( tp, 0xFA72, 0x49E9 ); + rtl8168_mac_ocp_write( tp, 0xFA74, 0xF004 ); + rtl8168_mac_ocp_write( tp, 0xFA76, 0x1D02 ); + rtl8168_mac_ocp_write( tp, 0xFA78, 0x8DF5 ); + rtl8168_mac_ocp_write( tp, 0xFA7A, 0xE79C ); + rtl8168_mac_ocp_write( tp, 0xFA7C, 0x49E3 ); + rtl8168_mac_ocp_write( tp, 0xFA7E, 0xF006 ); + rtl8168_mac_ocp_write( tp, 0xFA80, 0x1D08 ); + rtl8168_mac_ocp_write( tp, 0xFA82, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xFA84, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xFA86, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xFA88, 0xE73A ); + rtl8168_mac_ocp_write( tp, 0xFA8A, 0x49E1 ); + rtl8168_mac_ocp_write( tp, 0xFA8C, 0xF007 ); + rtl8168_mac_ocp_write( tp, 0xFA8E, 0x1D02 ); + rtl8168_mac_ocp_write( tp, 0xFA90, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xFA92, 0x1E01 ); + rtl8168_mac_ocp_write( tp, 0xFA94, 0xE7A7 ); + rtl8168_mac_ocp_write( tp, 0xFA96, 0xDE20 ); + rtl8168_mac_ocp_write( tp, 0xFA98, 0xE410 ); + rtl8168_mac_ocp_write( tp, 0xFA9A, 0x49E0 ); + rtl8168_mac_ocp_write( tp, 0xFA9C, 0xF017 ); + rtl8168_mac_ocp_write( tp, 0xFA9E, 0x1D01 ); + rtl8168_mac_ocp_write( tp, 0xFAA0, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xFAA2, 0xC5FA ); + rtl8168_mac_ocp_write( tp, 0xFAA4, 0x1C00 ); + rtl8168_mac_ocp_write( tp, 0xFAA6, 0x8CA0 ); + rtl8168_mac_ocp_write( tp, 0xFAA8, 0x1C1B ); + rtl8168_mac_ocp_write( tp, 0xFAAA, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xFAAC, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFAAE, 0x49CF ); + rtl8168_mac_ocp_write( tp, 0xFAB0, 0xF0FE ); + rtl8168_mac_ocp_write( tp, 0xFAB2, 0xC5F3 ); + rtl8168_mac_ocp_write( tp, 0xFAB4, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xFAB6, 0x4849 ); + rtl8168_mac_ocp_write( tp, 0xFAB8, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xFABA, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xFABC, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xFABE, 0xF006 ); + rtl8168_mac_ocp_write( tp, 0xFAC0, 0x48C3 ); + rtl8168_mac_ocp_write( tp, 0xFAC2, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xFAC4, 0xE820 ); + rtl8168_mac_ocp_write( tp, 0xFAC6, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xFAC8, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xFACA, 0xC432 ); + rtl8168_mac_ocp_write( tp, 0xFACC, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xFACE, 0xC5E4 ); + rtl8168_mac_ocp_write( tp, 0xFAD0, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFAD2, 0x49CE ); + rtl8168_mac_ocp_write( tp, 0xFAD4, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xFAD6, 0x9EA0 ); + rtl8168_mac_ocp_write( tp, 0xFAD8, 0x1C1C ); + rtl8168_mac_ocp_write( tp, 0xFADA, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xFADC, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xFADE, 0xFF80 ); + rtl8168_mac_ocp_write( tp, 0xFAE0, 0xB404 ); + rtl8168_mac_ocp_write( tp, 0xFAE2, 0xB405 ); + rtl8168_mac_ocp_write( tp, 0xFAE4, 0xC5D9 ); + rtl8168_mac_ocp_write( tp, 0xFAE6, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFAE8, 0x49CE ); + rtl8168_mac_ocp_write( tp, 0xFAEA, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xFAEC, 0xC41F ); + rtl8168_mac_ocp_write( tp, 0xFAEE, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xFAF0, 0xC41C ); + rtl8168_mac_ocp_write( tp, 0xFAF2, 0x1C13 ); + rtl8168_mac_ocp_write( tp, 0xFAF4, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xFAF6, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xFAF8, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFAFA, 0x49CF ); + rtl8168_mac_ocp_write( tp, 0xFAFC, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xFAFE, 0xB005 ); + rtl8168_mac_ocp_write( tp, 0xFB00, 0xB004 ); + rtl8168_mac_ocp_write( tp, 0xFB02, 0xFF80 ); + rtl8168_mac_ocp_write( tp, 0xFB04, 0xB404 ); + rtl8168_mac_ocp_write( tp, 0xFB06, 0xB405 ); + rtl8168_mac_ocp_write( tp, 0xFB08, 0xC5C7 ); + rtl8168_mac_ocp_write( tp, 0xFB0A, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFB0C, 0x49CE ); + rtl8168_mac_ocp_write( tp, 0xFB0E, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xFB10, 0xC40E ); + rtl8168_mac_ocp_write( tp, 0xFB12, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xFB14, 0xC40A ); + rtl8168_mac_ocp_write( tp, 0xFB16, 0x1C13 ); + rtl8168_mac_ocp_write( tp, 0xFB18, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xFB1A, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xFB1C, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFB1E, 0x49CF ); + rtl8168_mac_ocp_write( tp, 0xFB20, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xFB22, 0xB005 ); + rtl8168_mac_ocp_write( tp, 0xFB24, 0xB004 ); + rtl8168_mac_ocp_write( tp, 0xFB26, 0xFF80 ); + rtl8168_mac_ocp_write( tp, 0xFB28, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFB2A, 0x0481 ); + rtl8168_mac_ocp_write( tp, 0xFB2C, 0x0C81 ); + rtl8168_mac_ocp_write( tp, 0xFB2E, 0x0AE0 ); + + + rtl8168_mac_ocp_write( tp, 0xFC26, 0x8000 ); + + rtl8168_mac_ocp_write( tp, 0xFC28, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFC2A, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFC2C, 0x0297 ); + rtl8168_mac_ocp_write( tp, 0xFC2E, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFC30, 0x00A9 ); + rtl8168_mac_ocp_write( tp, 0xFC32, 0x012D ); + rtl8168_mac_ocp_write( tp, 0xFC34, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFC36, 0x08DF ); +} + +static void +rtl8168_set_mac_mcu_8411b_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + rtl8168_hw_disable_mac_mcu_bps(dev); + + rtl8168_mac_ocp_write( tp, 0xF800, 0xE008 ); + rtl8168_mac_ocp_write( tp, 0xF802, 0xE00A ); + rtl8168_mac_ocp_write( tp, 0xF804, 0xE00C ); + rtl8168_mac_ocp_write( tp, 0xF806, 0xE00E ); + rtl8168_mac_ocp_write( tp, 0xF808, 0xE027 ); + rtl8168_mac_ocp_write( tp, 0xF80A, 0xE04F ); + rtl8168_mac_ocp_write( tp, 0xF80C, 0xE05E ); + rtl8168_mac_ocp_write( tp, 0xF80E, 0xE065 ); + rtl8168_mac_ocp_write( tp, 0xF810, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF812, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF814, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xF816, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF818, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF81A, 0x074C ); + rtl8168_mac_ocp_write( tp, 0xF81C, 0xC302 ); + rtl8168_mac_ocp_write( tp, 0xF81E, 0xBB00 ); + rtl8168_mac_ocp_write( tp, 0xF820, 0x080A ); + rtl8168_mac_ocp_write( tp, 0xF822, 0x6420 ); + rtl8168_mac_ocp_write( tp, 0xF824, 0x48C2 ); + rtl8168_mac_ocp_write( tp, 0xF826, 0x8C20 ); + rtl8168_mac_ocp_write( tp, 0xF828, 0xC516 ); + rtl8168_mac_ocp_write( tp, 0xF82A, 0x64A4 ); + rtl8168_mac_ocp_write( tp, 0xF82C, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF82E, 0xF009 ); + rtl8168_mac_ocp_write( tp, 0xF830, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xF832, 0x8CA5 ); + rtl8168_mac_ocp_write( tp, 0xF834, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF836, 0xC50E ); + rtl8168_mac_ocp_write( tp, 0xF838, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xF83A, 0x1C11 ); + rtl8168_mac_ocp_write( tp, 0xF83C, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF83E, 0xE006 ); + rtl8168_mac_ocp_write( tp, 0xF840, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF842, 0x48C4 ); + rtl8168_mac_ocp_write( tp, 0xF844, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF846, 0xC404 ); + rtl8168_mac_ocp_write( tp, 0xF848, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF84A, 0xC403 ); + rtl8168_mac_ocp_write( tp, 0xF84C, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF84E, 0x0BF2 ); + rtl8168_mac_ocp_write( tp, 0xF850, 0x0C0A ); + rtl8168_mac_ocp_write( tp, 0xF852, 0xE434 ); + rtl8168_mac_ocp_write( tp, 0xF854, 0xD3C0 ); + rtl8168_mac_ocp_write( tp, 0xF856, 0x49D9 ); + rtl8168_mac_ocp_write( tp, 0xF858, 0xF01F ); + rtl8168_mac_ocp_write( tp, 0xF85A, 0xC526 ); + rtl8168_mac_ocp_write( tp, 0xF85C, 0x64A5 ); + rtl8168_mac_ocp_write( tp, 0xF85E, 0x1400 ); + rtl8168_mac_ocp_write( tp, 0xF860, 0xF007 ); + rtl8168_mac_ocp_write( tp, 0xF862, 0x0C01 ); + rtl8168_mac_ocp_write( tp, 0xF864, 0x8CA5 ); + rtl8168_mac_ocp_write( tp, 0xF866, 0x1C15 ); + rtl8168_mac_ocp_write( tp, 0xF868, 0xC51B ); + rtl8168_mac_ocp_write( tp, 0xF86A, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF86C, 0xE013 ); + rtl8168_mac_ocp_write( tp, 0xF86E, 0xC519 ); + rtl8168_mac_ocp_write( tp, 0xF870, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF872, 0x48C4 ); + rtl8168_mac_ocp_write( tp, 0xF874, 0x8CA0 ); + rtl8168_mac_ocp_write( tp, 0xF876, 0xC516 ); + rtl8168_mac_ocp_write( tp, 0xF878, 0x74A4 ); + rtl8168_mac_ocp_write( tp, 0xF87A, 0x48C8 ); + rtl8168_mac_ocp_write( tp, 0xF87C, 0x48CA ); + rtl8168_mac_ocp_write( tp, 0xF87E, 0x9CA4 ); + rtl8168_mac_ocp_write( tp, 0xF880, 0xC512 ); + rtl8168_mac_ocp_write( tp, 0xF882, 0x1B00 ); + rtl8168_mac_ocp_write( tp, 0xF884, 0x9BA0 ); + rtl8168_mac_ocp_write( tp, 0xF886, 0x1B1C ); + rtl8168_mac_ocp_write( tp, 0xF888, 0x483F ); + rtl8168_mac_ocp_write( tp, 0xF88A, 0x9BA2 ); + rtl8168_mac_ocp_write( tp, 0xF88C, 0x1B04 ); + rtl8168_mac_ocp_write( tp, 0xF88E, 0xC508 ); + rtl8168_mac_ocp_write( tp, 0xF890, 0x9BA0 ); + rtl8168_mac_ocp_write( tp, 0xF892, 0xC505 ); + rtl8168_mac_ocp_write( tp, 0xF894, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF896, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF898, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF89A, 0x0300 ); + rtl8168_mac_ocp_write( tp, 0xF89C, 0x051E ); + rtl8168_mac_ocp_write( tp, 0xF89E, 0xE434 ); + rtl8168_mac_ocp_write( tp, 0xF8A0, 0xE018 ); + rtl8168_mac_ocp_write( tp, 0xF8A2, 0xE092 ); + rtl8168_mac_ocp_write( tp, 0xF8A4, 0xDE20 ); + rtl8168_mac_ocp_write( tp, 0xF8A6, 0xD3C0 ); + rtl8168_mac_ocp_write( tp, 0xF8A8, 0xC50F ); + rtl8168_mac_ocp_write( tp, 0xF8AA, 0x76A4 ); + rtl8168_mac_ocp_write( tp, 0xF8AC, 0x49E3 ); + rtl8168_mac_ocp_write( tp, 0xF8AE, 0xF007 ); + rtl8168_mac_ocp_write( tp, 0xF8B0, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF8B2, 0xF103 ); + rtl8168_mac_ocp_write( tp, 0xF8B4, 0xC607 ); + rtl8168_mac_ocp_write( tp, 0xF8B6, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF8B8, 0xC606 ); + rtl8168_mac_ocp_write( tp, 0xF8BA, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF8BC, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF8BE, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF8C0, 0x0C4C ); + rtl8168_mac_ocp_write( tp, 0xF8C2, 0x0C28 ); + rtl8168_mac_ocp_write( tp, 0xF8C4, 0x0C2C ); + rtl8168_mac_ocp_write( tp, 0xF8C6, 0xDC00 ); + rtl8168_mac_ocp_write( tp, 0xF8C8, 0xC707 ); + rtl8168_mac_ocp_write( tp, 0xF8CA, 0x1D00 ); + rtl8168_mac_ocp_write( tp, 0xF8CC, 0x8DE2 ); + rtl8168_mac_ocp_write( tp, 0xF8CE, 0x48C1 ); + rtl8168_mac_ocp_write( tp, 0xF8D0, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF8D2, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF8D4, 0x00AA ); + rtl8168_mac_ocp_write( tp, 0xF8D6, 0xE0C0 ); + rtl8168_mac_ocp_write( tp, 0xF8D8, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF8DA, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF8DC, 0x0132 ); + + rtl8168_mac_ocp_write( tp, 0xFC26, 0x8000 ); + + rtl8168_mac_ocp_write( tp, 0xFC2A, 0x0743 ); + rtl8168_mac_ocp_write( tp, 0xFC2C, 0x0801 ); + rtl8168_mac_ocp_write( tp, 0xFC2E, 0x0BE9 ); + rtl8168_mac_ocp_write( tp, 0xFC30, 0x02FD ); + rtl8168_mac_ocp_write( tp, 0xFC32, 0x0C25 ); + rtl8168_mac_ocp_write( tp, 0xFC34, 0x00A9 ); + rtl8168_mac_ocp_write( tp, 0xFC36, 0x012D ); +} + +static void +rtl8168_set_mac_mcu_8168ep_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 i; + static const u16 mcu_patch_code_8168ep_1[] = { + 0xE008, 0xE0D3, 0xE0D6, 0xE0D9, 0xE0DB, 0xE0DD, 0xE0DF, 0xE0E1, 0xC251, + 0x7340, 0x49B1, 0xF010, 0x1D02, 0x8D40, 0xC202, 0xBA00, 0x2C3A, 0xC0F0, + 0xE8DE, 0x2000, 0x8000, 0xC0B6, 0x268C, 0x752C, 0x49D4, 0xF112, 0xE025, + 0xC2F6, 0x7146, 0xC2F5, 0x7340, 0x49BE, 0xF103, 0xC7F2, 0xE002, 0xC7F1, + 0x304F, 0x6226, 0x49A1, 0xF1F0, 0x7222, 0x49A0, 0xF1ED, 0x2525, 0x1F28, + 0x3097, 0x3091, 0x9A36, 0x752C, 0x21DC, 0x25BC, 0xC6E2, 0x77C0, 0x1304, + 0xF014, 0x1303, 0xF014, 0x1302, 0xF014, 0x1301, 0xF014, 0x49D4, 0xF103, + 0xC3D7, 0xBB00, 0xC618, 0x67C6, 0x752E, 0x22D7, 0x26DD, 0x1505, 0xF013, + 0xC60A, 0xBE00, 0xC309, 0xBB00, 0xC308, 0xBB00, 0xC307, 0xBB00, 0xC306, + 0xBB00, 0x25C8, 0x25A6, 0x25AC, 0x25B2, 0x25B8, 0xCD08, 0x0000, 0xC0BC, + 0xC2FF, 0x7340, 0x49B0, 0xF04E, 0x1F46, 0x308F, 0xC3F7, 0x1C04, 0xE84D, + 0x1401, 0xF147, 0x7226, 0x49A7, 0xF044, 0x7222, 0x2525, 0x1F30, 0x3097, + 0x3091, 0x7340, 0xC4EA, 0x401C, 0xF006, 0xC6E8, 0x75C0, 0x49D7, 0xF105, + 0xE036, 0x1D08, 0x8DC1, 0x0208, 0x6640, 0x2764, 0x1606, 0xF12F, 0x6346, + 0x133B, 0xF12C, 0x9B34, 0x1B18, 0x3093, 0xC32A, 0x1C10, 0xE82A, 0x1401, + 0xF124, 0x1A36, 0x308A, 0x7322, 0x25B5, 0x0B0E, 0x1C00, 0xE82C, 0xC71F, + 0x4027, 0xF11A, 0xE838, 0x1F42, 0x308F, 0x1B08, 0xE824, 0x7236, 0x7746, + 0x1700, 0xF00D, 0xC313, 0x401F, 0xF103, 0x1F00, 0x9F46, 0x7744, 0x449F, + 0x445F, 0xE817, 0xC70A, 0x4027, 0xF105, 0xC302, 0xBB00, 0x2E08, 0x2DC2, + 0xC7FF, 0xBF00, 0xCDB8, 0xFFFF, 0x0C02, 0xA554, 0xA5DC, 0x402F, 0xF105, + 0x1400, 0xF1FA, 0x1C01, 0xE002, 0x1C00, 0xFF80, 0x49B0, 0xF004, 0x0B01, + 0xA1D3, 0xE003, 0x0B02, 0xA5D3, 0x3127, 0x3720, 0x0B02, 0xA5D3, 0x3127, + 0x3720, 0x1300, 0xF1FB, 0xFF80, 0x7322, 0x25B5, 0x1E28, 0x30DE, 0x30D9, + 0x7264, 0x1E11, 0x2368, 0x3116, 0xFF80, 0x1B7E, 0xC602, 0xBE00, 0x06A6, + 0x1B7E, 0xC602, 0xBE00, 0x0764, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, + 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, + 0x0000 + }; + + rtl8168_hw_disable_mac_mcu_bps(dev); + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168ep_1); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168ep_1[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x2549); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x06A5); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x0763); +} + +static void +rtl8168_set_mac_mcu_8168ep_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 i; + static const u16 mcu_patch_code_8168ep_2[] = { + 0xE008, 0xE017, 0xE052, 0xE056, 0xE058, 0xE05A, 0xE05C, 0xE05E, 0xC50F, + 0x76A4, 0x49E3, 0xF007, 0x49C0, 0xF103, 0xC607, 0xBE00, 0xC606, 0xBE00, + 0xC602, 0xBE00, 0x0BDA, 0x0BB6, 0x0BBA, 0xDC00, 0xB400, 0xB401, 0xB402, + 0xB403, 0xB404, 0xC02E, 0x7206, 0x49AE, 0xF1FE, 0xC12B, 0x9904, 0xC12A, + 0x9906, 0x7206, 0x49AE, 0xF1FE, 0x7200, 0x49A0, 0xF117, 0xC123, 0xC223, + 0xC323, 0xE808, 0xC322, 0xE806, 0xC321, 0xE804, 0xC320, 0xE802, 0xE00C, + 0x740E, 0x49CE, 0xF1FE, 0x9908, 0x990A, 0x9A0C, 0x9B0E, 0x740E, 0x49CE, + 0xF1FE, 0xFF80, 0xB004, 0xB003, 0xB002, 0xB001, 0xB000, 0xC604, 0xC002, + 0xB800, 0x1FC8, 0xE000, 0xE8E0, 0xF128, 0x0002, 0xFFFF, 0xF000, 0x8001, + 0x8002, 0x8003, 0x8004, 0x48C1, 0x48C2, 0xC502, 0xBD00, 0x0490, 0xC602, + 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, + 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, + }; + + rtl8168_hw_disable_mac_mcu_bps(dev); + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168ep_2); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168ep_2[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x0BB3); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x1FC7); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x0485); +} + +static void +rtl8168_set_mac_mcu_8168h_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 i; + static const u16 mcu_patch_code_8168h_1[] = { + 0xE008, 0xE00F, 0xE011, 0xE047, 0xE049, 0xE073, 0xE075, 0xE079, 0xC707, + 0x1D00, 0x8DE2, 0x48C1, 0xC502, 0xBD00, 0x00E4, 0xE0C0, 0xC502, 0xBD00, + 0x0216, 0xC634, 0x75C0, 0x49D3, 0xF027, 0xC631, 0x75C0, 0x49D3, 0xF123, + 0xC627, 0x75C0, 0xB405, 0xC525, 0x9DC0, 0xC621, 0x75C8, 0x49D5, 0xF00A, + 0x49D6, 0xF008, 0x49D7, 0xF006, 0x49D8, 0xF004, 0x75D2, 0x49D9, 0xF111, + 0xC517, 0x9DC8, 0xC516, 0x9DD2, 0xC618, 0x75C0, 0x49D4, 0xF003, 0x49D0, + 0xF104, 0xC60A, 0xC50E, 0x9DC0, 0xB005, 0xC607, 0x9DC0, 0xB007, 0xC602, + 0xBE00, 0x1A06, 0xB400, 0xE86C, 0xA000, 0x01E1, 0x0200, 0x9200, 0xE84C, + 0xE004, 0xE908, 0xC502, 0xBD00, 0x0B58, 0xB407, 0xB404, 0x2195, 0x25BD, + 0x9BE0, 0x1C1C, 0x484F, 0x9CE2, 0x72E2, 0x49AE, 0xF1FE, 0x0B00, 0xF116, + 0xC71C, 0xC419, 0x9CE0, 0x1C13, 0x484F, 0x9CE2, 0x74E2, 0x49CE, 0xF1FE, + 0xC412, 0x9CE0, 0x1C13, 0x484F, 0x9CE2, 0x74E2, 0x49CE, 0xF1FE, 0xC70C, + 0x74F8, 0x48C3, 0x8CF8, 0xB004, 0xB007, 0xC502, 0xBD00, 0x0F24, 0x0481, + 0x0C81, 0xDE24, 0xE000, 0xC602, 0xBE00, 0x0CA4, 0x48C1, 0x48C2, 0xC502, + 0xBD00, 0x0578, 0xC602, 0xBE00, 0x0000 + }; + + rtl8168_hw_disable_mac_mcu_bps(dev); + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168h_1); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168h_1[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x00E2); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x0210); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x1A04); + rtl8168_mac_ocp_write(tp, 0xFC2E, 0x0B26); + rtl8168_mac_ocp_write(tp, 0xFC30, 0x0F02); + rtl8168_mac_ocp_write(tp, 0xFC32, 0x0CA0); + rtl8168_mac_ocp_write(tp, 0xFC34, 0x056C); + + rtl8168_mac_ocp_write(tp, 0xFC38, 0x007F); +} + +static void +rtl8168_set_mac_mcu_8168fp_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 i; + u16 breakPointEnabled = 0; + + rtl8168_hw_disable_mac_mcu_bps(dev); + + if(tp->HwPkgDet == 0x00 || tp->HwPkgDet == 0x0F) { + static const u16 mcu_patch_code_8168fp_1_1[] = { + 0xE00A, 0xE0C1, 0xE104, 0xE108, 0xE10D, 0xE112, 0xE11C, 0xE121, 0xE000, + 0xE0C8, 0xB400, 0xC1FE, 0x49E2, 0xF04C, 0x49EA, 0xF04A, 0x74E6, 0xC246, + 0x7542, 0x73EC, 0x1800, 0x49C0, 0xF10D, 0x49C1, 0xF10B, 0x49C2, 0xF109, + 0x49B0, 0xF107, 0x49B1, 0xF105, 0x7220, 0x49A2, 0xF102, 0xE002, 0x4800, + 0x49D0, 0xF10A, 0x49D1, 0xF108, 0x49D2, 0xF106, 0x49D3, 0xF104, 0x49DF, + 0xF102, 0xE00C, 0x4801, 0x72E4, 0x49AD, 0xF108, 0xC225, 0x6741, 0x48F0, + 0x8F41, 0x4870, 0x8F41, 0xC7CF, 0x49B5, 0xF01F, 0x49B2, 0xF00B, 0x4980, + 0xF003, 0x484E, 0x94E7, 0x4981, 0xF004, 0x485E, 0xC212, 0x9543, 0xE071, + 0x49B6, 0xF003, 0x49B3, 0xF10F, 0x4980, 0xF003, 0x484E, 0x94E7, 0x4981, + 0xF004, 0x485E, 0xC204, 0x9543, 0xE005, 0xE000, 0xE0FC, 0xE0FA, 0xE065, + 0x49B7, 0xF007, 0x4980, 0xF005, 0x1A38, 0x46D4, 0x1200, 0xF109, 0x4981, + 0xF055, 0x49C3, 0xF105, 0x1A30, 0x46D5, 0x1200, 0xF04F, 0x7220, 0x49A2, + 0xF130, 0x49C1, 0xF12E, 0x49B0, 0xF12C, 0xC2E6, 0x7240, 0x49A8, 0xF003, + 0x49D0, 0xF126, 0x49A9, 0xF003, 0x49D1, 0xF122, 0x49AA, 0xF003, 0x49D2, + 0xF11E, 0x49AB, 0xF003, 0x49DF, 0xF11A, 0x49AC, 0xF003, 0x49D3, 0xF116, + 0x4980, 0xF003, 0x49C7, 0xF105, 0x4981, 0xF02C, 0x49D7, 0xF02A, 0x49C0, + 0xF00C, 0xC721, 0x62F4, 0x49A0, 0xF008, 0x49A4, 0xF106, 0x4824, 0x8AF4, + 0xC71A, 0x1A40, 0x9AE0, 0x49B6, 0xF017, 0x200E, 0xC7B8, 0x72E0, 0x4710, + 0x92E1, 0xC70E, 0x77E0, 0x49F0, 0xF112, 0xC70B, 0x77E0, 0x27FE, 0x1AFA, + 0x4317, 0xC705, 0x9AE2, 0x1A11, 0x8AE0, 0xE008, 0xE41C, 0xC0AE, 0xD23A, + 0xC7A2, 0x74E6, 0x484F, 0x94E7, 0xC79E, 0x8CE6, 0x8BEC, 0xC29C, 0x8D42, + 0x7220, 0xB000, 0xC502, 0xBD00, 0x0932, 0xB400, 0xC240, 0xC340, 0x7060, + 0x498F, 0xF014, 0x488F, 0x9061, 0x744C, 0x49C3, 0xF004, 0x7562, 0x485E, + 0x9563, 0x7446, 0x49C3, 0xF106, 0x7562, 0x1C30, 0x46E5, 0x1200, 0xF004, + 0x7446, 0x484F, 0x9447, 0xC32A, 0x7466, 0x49C0, 0xF00F, 0x48C0, 0x9C66, + 0x7446, 0x4840, 0x4841, 0x4842, 0x9C46, 0x744C, 0x4840, 0x9C4C, 0x744A, + 0x484A, 0x9C4A, 0xE013, 0x498E, 0xF011, 0x488E, 0x9061, 0x744C, 0x49C3, + 0xF004, 0x7446, 0x484E, 0x9447, 0x7446, 0x1D38, 0x46EC, 0x1500, 0xF004, + 0x7446, 0x484F, 0x9447, 0xB000, 0xC502, 0xBD00, 0x074C, 0xE000, 0xE0FC, + 0xE0C0, 0x4830, 0x4837, 0xC502, 0xBD00, 0x0978, 0x63E2, 0x4830, 0x4837, + 0xC502, 0xBD00, 0x09FE, 0x73E2, 0x4830, 0x8BE2, 0xC302, 0xBB00, 0x0A12, + 0x73E2, 0x48B0, 0x48B3, 0x48B4, 0x48B5, 0x48B6, 0x48B7, 0x8BE2, 0xC302, + 0xBB00, 0x0A5A, 0x73E2, 0x4830, 0x8BE2, 0xC302, 0xBB00, 0x0A6C, 0x73E2, + 0x4830, 0x4837, 0xC502, 0xBD00, 0x0A86 + }; + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168fp_1_1); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168fp_1_1[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x0890); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x0712); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x0974); + rtl8168_mac_ocp_write(tp, 0xFC2E, 0x09FC); + rtl8168_mac_ocp_write(tp, 0xFC30, 0x0A0E); + rtl8168_mac_ocp_write(tp, 0xFC32, 0x0A56); + rtl8168_mac_ocp_write(tp, 0xFC34, 0x0A68); + rtl8168_mac_ocp_write(tp, 0xFC36, 0x0A84); + + } else if (tp->HwPkgDet == 0x6) { + static const u16 mcu_patch_code_8168fp_1_2[] = { + 0xE008, 0xE00A, 0xE031, 0xE033, 0xE035, 0xE144, 0xE166, 0xE168, 0xC502, + 0xBD00, 0x0000, 0xC725, 0x75E0, 0x48D0, 0x9DE0, 0xC722, 0x75E0, 0x1C78, + 0x416C, 0x1530, 0xF111, 0xC71D, 0x75F6, 0x49D1, 0xF00D, 0x75E0, 0x1C1F, + 0x416C, 0x1502, 0xF108, 0x75FA, 0x49D3, 0xF005, 0x75EC, 0x9DE4, 0x4853, + 0x9DFA, 0xC70B, 0x75E0, 0x4852, 0x4850, 0x9DE0, 0xC602, 0xBE00, 0x04B8, + 0xE420, 0xE000, 0xE0FC, 0xE43C, 0xDC00, 0xEB00, 0xC202, 0xBA00, 0x0000, + 0xC002, 0xB800, 0x0000, 0xB401, 0xB402, 0xB403, 0xB404, 0xB405, 0xB406, + 0xC44D, 0xC54D, 0x1867, 0xE8A2, 0x2318, 0x276E, 0x1601, 0xF106, 0x1A07, + 0xE861, 0xE86B, 0xE873, 0xE037, 0x231E, 0x276E, 0x1602, 0xF10B, 0x1A07, + 0xE858, 0xE862, 0xC247, 0xC344, 0xE8E3, 0xC73B, 0x66E0, 0xE8B5, 0xE029, + 0x231A, 0x276C, 0xC733, 0x9EE0, 0x1866, 0xE885, 0x251C, 0x120F, 0xF011, + 0x1209, 0xF011, 0x2014, 0x240E, 0x1000, 0xF007, 0x120C, 0xF00D, 0x1203, + 0xF00D, 0x1200, 0xF00D, 0x120C, 0xF00D, 0x1203, 0xF00D, 0x1A03, 0xE00C, + 0x1A07, 0xE00A, 0x1A00, 0xE008, 0x1A01, 0xE006, 0x1A02, 0xE004, 0x1A04, + 0xE002, 0x1A05, 0xE829, 0xE833, 0xB006, 0xB005, 0xB004, 0xB003, 0xB002, + 0xB001, 0x60C4, 0xC702, 0xBF00, 0x2786, 0xDD00, 0xD030, 0xE0C4, 0xE0F8, + 0xDC42, 0xD3F0, 0x0000, 0x0004, 0x0007, 0x0014, 0x0090, 0x1000, 0x0F00, + 0x1004, 0x1008, 0x3000, 0x3004, 0x3008, 0x4000, 0x7777, 0x8000, 0x8001, + 0x8008, 0x8003, 0x8004, 0xC000, 0xC004, 0xF004, 0xFFFF, 0xB406, 0xB407, + 0xC6E5, 0x77C0, 0x27F3, 0x23F3, 0x47FA, 0x9FC0, 0xB007, 0xB006, 0xFF80, + 0xB405, 0xB407, 0xC7D8, 0x75E0, 0x48D0, 0x9DE0, 0xB007, 0xB005, 0xFF80, + 0xB401, 0xC0EA, 0xC2DC, 0xC3D8, 0xE865, 0xC0D3, 0xC1E0, 0xC2E3, 0xE861, + 0xE817, 0xC0CD, 0xC2CF, 0xE85D, 0xC0C9, 0xC1D6, 0xC2DB, 0xE859, 0xE80F, + 0xC1C7, 0xC2CE, 0xE855, 0xC0C0, 0xC1D1, 0xC2D3, 0xE851, 0xE807, 0xC0BE, + 0xC2C2, 0xE84D, 0xE803, 0xB001, 0xFF80, 0xB402, 0xC2C6, 0xE859, 0x499F, + 0xF1FE, 0xB002, 0xFF80, 0xB402, 0xB403, 0xB407, 0xE821, 0x8882, 0x1980, + 0x8983, 0xE81D, 0x7180, 0x218B, 0x25BB, 0x1310, 0xF014, 0x1310, 0xFB03, + 0x1F20, 0x38FB, 0x3288, 0x434B, 0x2491, 0x430B, 0x1F0F, 0x38FB, 0x4313, + 0x2121, 0x4353, 0x2521, 0x418A, 0x6282, 0x2527, 0x212F, 0x418A, 0xB007, + 0xB003, 0xB002, 0xFF80, 0x6183, 0x2496, 0x1100, 0xF1FD, 0xFF80, 0x4800, + 0x4801, 0xC213, 0xC313, 0xE815, 0x4860, 0x8EE0, 0xC210, 0xC310, 0xE822, + 0x481E, 0xC20C, 0xC30C, 0xE80C, 0xC206, 0x7358, 0x483A, 0x9B58, 0xFF80, + 0xE8E0, 0xE000, 0x1008, 0x0F00, 0x800C, 0x0F00, 0xB407, 0xB406, 0xB403, + 0xC7F7, 0x98E0, 0x99E2, 0x9AE4, 0x21B2, 0x4831, 0x483F, 0x9BE6, 0x66E7, + 0x49E6, 0xF1FE, 0xB003, 0xB006, 0xB007, 0xFF80, 0xB407, 0xB406, 0xB403, + 0xC7E5, 0x9AE4, 0x21B2, 0x4831, 0x9BE6, 0x66E7, 0x49E6, 0xF1FE, 0x70E0, + 0x71E2, 0xB003, 0xB006, 0xB007, 0xFF80, 0x4882, 0xB406, 0xB405, 0xC71E, + 0x76E0, 0x1D78, 0x4175, 0x1630, 0xF10C, 0xC715, 0x76E0, 0x4861, 0x9EE0, + 0xC713, 0x1EFF, 0x9EE2, 0x75E0, 0x4850, 0x9DE0, 0xE005, 0xC70B, 0x76E0, + 0x4865, 0x9EE0, 0xB005, 0xB006, 0xC708, 0xC102, 0xB900, 0x279E, 0xEB16, + 0xEB00, 0xE43C, 0xDC00, 0xD3EC, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, + 0x0000 + }; + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168fp_1_2); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168fp_1_2[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x04b4); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC30, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC32, 0x279C); + rtl8168_mac_ocp_write(tp, 0xFC34, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC36, 0x0000); + } + + if (tp->HwPkgDet == 0x00) + breakPointEnabled = 0x00FC; + else if (tp->HwPkgDet == 0x0F) + breakPointEnabled = 0x00FF; + else if (tp->HwPkgDet == 0x06) + breakPointEnabled = 0x0022; + + rtl8168_mac_ocp_write(tp, 0xFC38, breakPointEnabled); +} + +static void +rtl8168_set_mac_mcu_8168fp_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 i; + static const u16 mcu_patch_code_8168fp_2[] = { + 0xE008, 0xE00A, 0xE00F, 0xE014, 0xE05F, 0xE063, 0xE065, 0xE067, 0xC602, + 0xBE00, 0x2AB2, 0x1BC0, 0x46EB, 0x1BFE, 0xC102, 0xB900, 0x0B1A, 0x1BC0, + 0x46EB, 0x1B7E, 0xC102, 0xB900, 0x0BEA, 0xB400, 0xB401, 0xB402, 0xB403, + 0xB404, 0xB405, 0xC03A, 0x7206, 0x49AE, 0xF1FE, 0xC137, 0x9904, 0xC136, + 0x9906, 0x7206, 0x49AE, 0xF1FE, 0x7200, 0x49A0, 0xF10B, 0xC52F, 0xC12E, + 0xC232, 0xC332, 0xE812, 0xC331, 0xE810, 0xC330, 0xE80E, 0xE018, 0xC126, + 0xC229, 0xC525, 0xC328, 0xE808, 0xC523, 0xC326, 0xE805, 0xC521, 0xC324, + 0xE802, 0xE00C, 0x740E, 0x49CE, 0xF1FE, 0x9908, 0x9D0A, 0x9A0C, 0x9B0E, + 0x740E, 0x49CE, 0xF1FE, 0xFF80, 0xB005, 0xB004, 0xB003, 0xB002, 0xB001, + 0xB000, 0xC604, 0xC002, 0xB800, 0x2A5E, 0xE000, 0xE8E0, 0xF128, 0x3DC2, + 0xFFFF, 0x10EC, 0x816A, 0x816D, 0x816C, 0xF000, 0x8002, 0x8004, 0x8007, + 0x48C1, 0x48C2, 0xC502, 0xBD00, 0x07BC, 0xC602, 0xBE00, 0x0000, 0xC602, + 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000 + }; + + rtl8168_hw_disable_mac_mcu_bps(dev); + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168fp_2); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168fp_2[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x2AAC); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x0B14); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x0BE4); + rtl8168_mac_ocp_write(tp, 0xFC2E, 0x2A5C); + rtl8168_mac_ocp_write(tp, 0xFC30, 0x07B0); + + if (tp->HwSuppSerDesPhyVer == 1) + rtl8168_mac_ocp_write(tp, 0xFC38, 0x001F); + else + rtl8168_mac_ocp_write(tp, 0xFC38, 0x001E); + +} + +static void +rtl8168_set_mac_mcu_8168fp_3(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 i; + static const u16 mcu_patch_code_8168fp_3[] = { + 0xE008, 0xE053, 0xE057, 0xE059, 0xE05B, 0xE05D, 0xE05F, 0xE061, 0xB400, + 0xB401, 0xB402, 0xB403, 0xB404, 0xB405, 0xC03A, 0x7206, 0x49AE, 0xF1FE, + 0xC137, 0x9904, 0xC136, 0x9906, 0x7206, 0x49AE, 0xF1FE, 0x7200, 0x49A0, + 0xF10B, 0xC52F, 0xC12E, 0xC232, 0xC332, 0xE812, 0xC331, 0xE810, 0xC330, + 0xE80E, 0xE018, 0xC126, 0xC229, 0xC525, 0xC328, 0xE808, 0xC523, 0xC326, + 0xE805, 0xC521, 0xC324, 0xE802, 0xE00C, 0x740E, 0x49CE, 0xF1FE, 0x9908, + 0x9D0A, 0x9A0C, 0x9B0E, 0x740E, 0x49CE, 0xF1FE, 0xFF80, 0xB005, 0xB004, + 0xB003, 0xB002, 0xB001, 0xB000, 0xC604, 0xC002, 0xB800, 0x2B16, 0xE000, + 0xE8E0, 0xF128, 0x3DC2, 0xFFFF, 0x10EC, 0x816A, 0x816D, 0x816C, 0xF000, + 0x8002, 0x8004, 0x8007, 0x48C1, 0x48C2, 0xC502, 0xBD00, 0x07BC, 0xC602, + 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, + 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000 + }; + + rtl8168_hw_disable_mac_mcu_bps(dev); + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168fp_3); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168fp_3[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x2B14); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x07B0); + + rtl8168_mac_ocp_write(tp, 0xFC38, 0x0003); +} + +static void +rtl8168_hw_mac_mcu_config(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (tp->NotWrMcuPatchCode == TRUE) return; + + switch (tp->mcfg) { + case CFG_METHOD_21: + rtl8168_set_mac_mcu_8168g_1(dev); + break; + case CFG_METHOD_24: + rtl8168_set_mac_mcu_8168gu_1(dev); + break; + case CFG_METHOD_25: + rtl8168_set_mac_mcu_8168gu_2(dev); + break; + case CFG_METHOD_26: + rtl8168_set_mac_mcu_8411b_1(dev); + break; + case CFG_METHOD_27: + rtl8168_set_mac_mcu_8168ep_1(dev); + break; + case CFG_METHOD_28: + rtl8168_set_mac_mcu_8168ep_2(dev); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + rtl8168_set_mac_mcu_8168h_1(dev); + break; + case CFG_METHOD_31: + rtl8168_set_mac_mcu_8168fp_1(dev); + break; + case CFG_METHOD_32: + rtl8168_set_mac_mcu_8168fp_2(dev); + break; + case CFG_METHOD_33: + rtl8168_set_mac_mcu_8168fp_3(dev); + break; + } +} +#endif + +#ifdef ENABLE_USE_FIRMWARE_FILE +static void rtl8168_release_firmware(struct rtl8168_private *tp) +{ + if (tp->rtl_fw) { + rtl8168_fw_release_firmware(tp->rtl_fw); + kfree(tp->rtl_fw); + tp->rtl_fw = NULL; + } +} + +void rtl8168_apply_firmware(struct rtl8168_private *tp) +{ + /* TODO: release firmware if rtl_fw_write_firmware signals failure. */ + if (tp->rtl_fw) { + rtl8168_fw_write_firmware(tp, tp->rtl_fw); + /* At least one firmware doesn't reset tp->ocp_base. */ + tp->ocp_base = OCP_STD_PHY_BASE; + + /* PHY soft reset may still be in progress */ + //phy_read_poll_timeout(tp->phydev, MII_BMCR, val, + // !(val & BMCR_RESET), + // 50000, 600000, true); + rtl8168_wait_phy_reset_complete(tp); + + tp->hw_ram_code_ver = rtl8168_get_hw_phy_mcu_code_ver(tp); + tp->sw_ram_code_ver = tp->hw_ram_code_ver; + tp->HwHasWrRamCodeToMicroP = TRUE; + } +} +#endif + +static void +rtl8168_hw_init(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 csi_tmp; + + if (tp->HwSuppAspmClkIntrLock) { + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) & ~BIT_7); + rtl8168_enable_cfg9346_write(tp); + rtl8168_hw_aspm_clkreq_enable(tp, false); + rtl8168_disable_cfg9346_write(tp); + } + + //Disable UPS + if (HW_SUPPORT_UPS_MODE(tp)) + rtl8168_mac_ocp_write(tp, 0xD400, rtl8168_mac_ocp_read( tp, 0xD400) & ~(BIT_0)); + + //Disable DMA Aggregation + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mac_ocp_write(tp, 0xE63E, rtl8168_mac_ocp_read( tp, 0xE63E) & ~(BIT_3 | BIT_2 | BIT_1)); + rtl8168_mac_ocp_write(tp, 0xE63E, rtl8168_mac_ocp_read( tp, 0xE63E) | (BIT_0)); + rtl8168_mac_ocp_write(tp, 0xE63E, rtl8168_mac_ocp_read( tp, 0xE63E) & ~(BIT_0)); + rtl8168_mac_ocp_write(tp, 0xC094, 0x0); + rtl8168_mac_ocp_write(tp, 0xC09E, 0x0); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + RTL_W8(tp, DBG_reg, RTL_R8(tp, DBG_reg) | BIT_1 | BIT_7); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + RTL_W8(tp, 0xF2, (RTL_R8(tp, 0xF2) & ~(BIT_2 | BIT_1 | BIT_0))); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + if (aspm) { + RTL_W8(tp, 0x6E, RTL_R8(tp, 0x6E) | BIT_6); + rtl8168_eri_write(tp, 0x1AE, 2, 0x0403, ERIAR_ExGMAC); + } + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_29: + case CFG_METHOD_30: + if (aspm) { + if ((rtl8168_mac_ocp_read(tp, 0xDC00) & BIT_3) || (RTL_R8(tp, Config0) & 0x07)) { + RTL_W8(tp, 0x6E, RTL_R8(tp, 0x6E) | BIT_6); + rtl8168_eri_write(tp, 0x1AE, 2, 0x0403, ERIAR_ExGMAC); + } + } + break; + } + + if (tp->mcfg == CFG_METHOD_10 || tp->mcfg == CFG_METHOD_14 || tp->mcfg == CFG_METHOD_15) + RTL_W8(tp, 0xF3, RTL_R8(tp, 0xF3) | BIT_2); + +#ifndef ENABLE_USE_FIRMWARE_FILE + if (!tp->rtl_fw) + rtl8168_hw_mac_mcu_config(dev); +#endif + + /*disable ocp phy power saving*/ + if (tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) + if (!tp->dash_printer_enabled) + rtl8168_disable_ocp_phy_power_saving(dev); + + //Set PCIE uncorrectable error status mask pcie 0x108 + csi_tmp = rtl8168_csi_read(tp, 0x108); + csi_tmp |= BIT_20; + rtl8168_csi_write(tp, 0x108, csi_tmp); + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + csi_tmp = rtl8168_eri_read(tp, 0x1AB, 1, ERIAR_ExGMAC); + csi_tmp |= ( BIT_2 | BIT_3 | BIT_4 | BIT_5 | BIT_6 | BIT_7 ); + rtl8168_eri_write(tp, 0x1AB, 1, csi_tmp, ERIAR_ExGMAC); + break; + } + + rtl8168_set_pci_pme(tp, 0); + + if (s0_magic_packet == 1) + rtl8168_enable_magic_packet(dev); + +#ifdef ENABLE_USE_FIRMWARE_FILE + if (tp->rtl_fw && + !(HW_DASH_SUPPORT_TYPE_3(tp) && + tp->HwPkgDet == 0x06)) + rtl8168_apply_firmware(tp); +#endif +} + +static void +rtl8168_hw_ephy_config(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 ephy_data; + + switch (tp->mcfg) { + case CFG_METHOD_4: + /*Set EPHY registers begin*/ + /*Set EPHY register offset 0x02 bit 11 to 0 and bit 12 to 1*/ + ephy_data = rtl8168_ephy_read(tp, 0x02); + ephy_data &= ~BIT_11; + ephy_data |= BIT_12; + rtl8168_ephy_write(tp, 0x02, ephy_data); + + /*Set EPHY register offset 0x03 bit 1 to 1*/ + ephy_data = rtl8168_ephy_read(tp, 0x03); + ephy_data |= (1 << 1); + rtl8168_ephy_write(tp, 0x03, ephy_data); + + /*Set EPHY register offset 0x06 bit 7 to 0*/ + ephy_data = rtl8168_ephy_read(tp, 0x06); + ephy_data &= ~(1 << 7); + rtl8168_ephy_write(tp, 0x06, ephy_data); + /*Set EPHY registers end*/ + + break; + case CFG_METHOD_5: + /* set EPHY registers */ + SetPCIePhyBit(tp, 0x01, BIT_0); + + ClearAndSetPCIePhyBit(tp, + 0x03, + BIT_10, + BIT_5 + ); + + break; + case CFG_METHOD_9: + /* set EPHY registers */ + rtl8168_ephy_write(tp, 0x01, 0x7C7F); + rtl8168_ephy_write(tp, 0x02, 0x011F); + if (tp->eeprom_type != EEPROM_TYPE_NONE) { + ClearAndSetPCIePhyBit(tp, + 0x03, + 0xFFB0, + 0x05B0 + ); + } else { + ClearAndSetPCIePhyBit(tp, + 0x03, + 0xFFF0, + 0x05F0 + ); + } + rtl8168_ephy_write(tp, 0x06, 0xB271); + rtl8168_ephy_write(tp, 0x07, 0xCE00); + + break; + case CFG_METHOD_10: + /* set EPHY registers */ + rtl8168_ephy_write(tp, 0x01, 0x6C7F); + rtl8168_ephy_write(tp, 0x02, 0x011F); + ClearAndSetPCIePhyBit(tp, + 0x03, + 0xFFF0, + 0x01B0 + ); + rtl8168_ephy_write(tp, 0x1A, 0x0546); + rtl8168_ephy_write(tp, 0x1C, 0x80C4); + rtl8168_ephy_write(tp, 0x1D, 0x78E5); + rtl8168_ephy_write(tp, 0x0A, 0x8100); + + break; + case CFG_METHOD_12: + case CFG_METHOD_13: + ephy_data = rtl8168_ephy_read(tp, 0x0B); + rtl8168_ephy_write(tp, 0x0B, ephy_data|0x48); + ephy_data = rtl8168_ephy_read(tp, 0x19); + ephy_data &= ~0x20; + rtl8168_ephy_write(tp, 0x19, ephy_data|0x50); + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~0x100; + rtl8168_ephy_write(tp, 0x0C, ephy_data|0x20); + ephy_data = rtl8168_ephy_read(tp, 0x10); + ephy_data &= ~0x04; + rtl8168_ephy_write(tp, 0x10, ephy_data); + + break; + case CFG_METHOD_14: + case CFG_METHOD_15: + /* set EPHY registers */ + ephy_data = rtl8168_ephy_read(tp, 0x00) & ~0x0200; + ephy_data |= 0x0100; + rtl8168_ephy_write(tp, 0x00, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data |= 0x0004; + rtl8168_ephy_write(tp, 0x00, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x06) & ~0x0002; + ephy_data |= 0x0001; + rtl8168_ephy_write(tp, 0x06, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x06); + ephy_data |= 0x0030; + rtl8168_ephy_write(tp, 0x06, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x07); + ephy_data |= 0x2000; + rtl8168_ephy_write(tp, 0x07, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data |= 0x0020; + rtl8168_ephy_write(tp, 0x00, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x03) & ~0x5800; + ephy_data |= 0x2000; + rtl8168_ephy_write(tp, 0x03, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x03); + ephy_data |= 0x0001; + rtl8168_ephy_write(tp, 0x03, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x01) & ~0x0800; + ephy_data |= 0x1000; + rtl8168_ephy_write(tp, 0x01, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x07); + ephy_data |= 0x4000; + rtl8168_ephy_write(tp, 0x07, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x1E); + ephy_data |= 0x2000; + rtl8168_ephy_write(tp, 0x1E, ephy_data); + + rtl8168_ephy_write(tp, 0x19, 0xFE6C); + + ephy_data = rtl8168_ephy_read(tp, 0x0A); + ephy_data |= 0x0040; + rtl8168_ephy_write(tp, 0x0A, ephy_data); + + break; + case CFG_METHOD_16: + case CFG_METHOD_17: + if (tp->mcfg == CFG_METHOD_16) { + rtl8168_ephy_write(tp, 0x06, 0xF020); + rtl8168_ephy_write(tp, 0x07, 0x01FF); + rtl8168_ephy_write(tp, 0x00, 0x5027); + rtl8168_ephy_write(tp, 0x01, 0x0003); + rtl8168_ephy_write(tp, 0x02, 0x2D16); + rtl8168_ephy_write(tp, 0x03, 0x6D49); + rtl8168_ephy_write(tp, 0x08, 0x0006); + rtl8168_ephy_write(tp, 0x0A, 0x00C8); + } + + ephy_data = rtl8168_ephy_read(tp, 0x09); + ephy_data |= BIT_7; + rtl8168_ephy_write(tp, 0x09, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x19); + ephy_data |= (BIT_2 | BIT_5 | BIT_9); + rtl8168_ephy_write(tp, 0x19, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data |= BIT_3; + rtl8168_ephy_write(tp, 0x00, ephy_data); + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~(BIT_13 | BIT_12 | BIT_11 | BIT_10 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4); + ephy_data |= BIT_9; + rtl8168_ephy_write(tp, 0x0C, ephy_data); + + break; + case CFG_METHOD_18: + case CFG_METHOD_19: + if (tp->mcfg == CFG_METHOD_18) { + ephy_data = rtl8168_ephy_read(tp, 0x06); + ephy_data |= BIT_5; + ephy_data &= ~(BIT_7 | BIT_6); + rtl8168_ephy_write(tp, 0x06, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x08); + ephy_data |= BIT_1; + ephy_data &= ~BIT_0; + rtl8168_ephy_write(tp, 0x08, ephy_data); + } + + ephy_data = rtl8168_ephy_read(tp, 0x09); + ephy_data |= BIT_7; + rtl8168_ephy_write(tp, 0x09, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x19); + ephy_data |= (BIT_2 | BIT_5 | BIT_9); + rtl8168_ephy_write(tp, 0x19, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data |= BIT_3; + rtl8168_ephy_write(tp, 0x00, ephy_data); + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~(BIT_13 | BIT_12 | BIT_11 | BIT_10 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4); + ephy_data |= BIT_9; + rtl8168_ephy_write(tp, 0x0C, ephy_data); + + break; + case CFG_METHOD_20: + ephy_data = rtl8168_ephy_read(tp, 0x06); + ephy_data |= BIT_5; + ephy_data &= ~(BIT_7 | BIT_6); + rtl8168_ephy_write(tp, 0x06, ephy_data); + + rtl8168_ephy_write(tp, 0x0f, 0x5200); + + ephy_data = rtl8168_ephy_read(tp, 0x19); + ephy_data |= (BIT_2 | BIT_5 | BIT_9); + rtl8168_ephy_write(tp, 0x19, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data |= BIT_3; + rtl8168_ephy_write(tp, 0x00, ephy_data); + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~(BIT_13 | BIT_12 | BIT_11 | BIT_10 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4); + ephy_data |= BIT_9; + rtl8168_ephy_write(tp, 0x0C, ephy_data); + + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data &= ~(BIT_3); + rtl8168_ephy_write(tp, 0x00, ephy_data); + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~(BIT_13 | BIT_12 | BIT_11 | BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4); + ephy_data |= (BIT_5 | BIT_11); + rtl8168_ephy_write(tp, 0x0C, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x1E); + ephy_data |= (BIT_0); + rtl8168_ephy_write(tp, 0x1E, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x19); + ephy_data &= ~(BIT_15); + rtl8168_ephy_write(tp, 0x19, ephy_data); + + break; + case CFG_METHOD_25: + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data &= ~BIT_3; + rtl8168_ephy_write(tp, 0x00, ephy_data); + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~(BIT_13 | BIT_12 | BIT_11 | BIT_10| BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4); + ephy_data |= (BIT_5 | BIT_11); + rtl8168_ephy_write(tp, 0x0C, ephy_data); + + rtl8168_ephy_write(tp, 0x19, 0x7C00); + rtl8168_ephy_write(tp, 0x1E, 0x20EB); + rtl8168_ephy_write(tp, 0x0D, 0x1666); + rtl8168_ephy_write(tp, 0x00, 0x10A3); + rtl8168_ephy_write(tp, 0x06, 0xF050); + + SetPCIePhyBit(tp, 0x04, BIT_4); + ClearPCIePhyBit(tp, 0x1D, BIT_14); + + break; + case CFG_METHOD_26: + ClearPCIePhyBit(tp, 0x00, BIT_3); + ClearAndSetPCIePhyBit( tp, + 0x0C, + (BIT_13 | BIT_12 | BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_4), + (BIT_5 | BIT_11) + ); + SetPCIePhyBit(tp, 0x1E, BIT_0); + ClearPCIePhyBit(tp, 0x19, BIT_15); + + ClearPCIePhyBit(tp, 0x19, (BIT_5 | BIT_0)); + + SetPCIePhyBit(tp, 0x1E, BIT_13); + ClearPCIePhyBit(tp, 0x0D, BIT_8); + SetPCIePhyBit(tp, 0x0D, BIT_9); + SetPCIePhyBit(tp, 0x00, BIT_7); + + SetPCIePhyBit(tp, 0x06, BIT_4); + + SetPCIePhyBit(tp, 0x04, BIT_4); + SetPCIePhyBit(tp, 0x1D, BIT_14); + + break; + case CFG_METHOD_23: + rtl8168_ephy_write(tp, 0x00, 0x10AB); + rtl8168_ephy_write(tp, 0x06, 0xf030); + rtl8168_ephy_write(tp, 0x08, 0x2006); + rtl8168_ephy_write(tp, 0x0D, 0x1666); + + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~(BIT_13 | BIT_12 | BIT_11 | BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4); + rtl8168_ephy_write(tp, 0x0C, ephy_data); + + break; + case CFG_METHOD_27: + rtl8168_ephy_write(tp, 0x00, 0x10A3); + rtl8168_ephy_write(tp, 0x19, 0xFC00); + rtl8168_ephy_write(tp, 0x1E, 0x20EA); + + break; + case CFG_METHOD_28: + rtl8168_ephy_write(tp, 0x00, 0x10AB); + rtl8168_ephy_write(tp, 0x19, 0xFC00); + rtl8168_ephy_write(tp, 0x1E, 0x20EB); + rtl8168_ephy_write(tp, 0x0D, 0x1666); + ClearPCIePhyBit(tp, 0x0B, BIT_0); + SetPCIePhyBit(tp, 0x1D, BIT_14); + ClearAndSetPCIePhyBit(tp, + 0x0C, + BIT_13 | BIT_12 | BIT_11 | BIT_10 | BIT_8 | BIT_7 | BIT_6 | BIT_5, + BIT_9 | BIT_4 + ); + + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + ClearPCIePhyBit(tp, 0x1E, BIT_11); + + SetPCIePhyBit(tp, 0x1E, BIT_0); + SetPCIePhyBit(tp, 0x1D, BIT_11); + + rtl8168_ephy_write(tp, 0x05, 0x2089); + rtl8168_ephy_write(tp, 0x06, 0x5881); + + rtl8168_ephy_write(tp, 0x04, 0x854A); + rtl8168_ephy_write(tp, 0x01, 0x068B); + + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + ClearAndSetPCIePhyBit(tp, + 0x19, + BIT_6, + (BIT_12| BIT_8) + ); + ClearAndSetPCIePhyBit(tp, + 0x59, + BIT_6, + (BIT_12| BIT_8) + ); + + ClearPCIePhyBit(tp, 0x0C, BIT_4); + ClearPCIePhyBit(tp, 0x4C, BIT_4); + ClearPCIePhyBit(tp, 0x0B, BIT_0); + + break; + } +} + +static int +rtl8168_set_phy_mcu_patch_request(struct rtl8168_private *tp) +{ + u16 PhyRegValue; + u32 WaitCnt; + int retval = TRUE; + + switch (tp->mcfg) { + case CFG_METHOD_21 ... CFG_METHOD_33: + rtl8168_mdio_write(tp,0x1f, 0x0B82); + rtl8168_set_eth_phy_bit(tp, 0x10, BIT_4); + + rtl8168_mdio_write(tp,0x1f, 0x0B80); + WaitCnt = 0; + do { + PhyRegValue = rtl8168_mdio_read(tp, 0x10); + udelay(100); + WaitCnt++; + } while (!(PhyRegValue & BIT_6) && (WaitCnt < 1000)); + + if (!(PhyRegValue & BIT_6) && (WaitCnt == 1000)) retval = FALSE; + + rtl8168_mdio_write(tp,0x1f, 0x0000); + break; + } + + return retval; +} + +static int +rtl8168_clear_phy_mcu_patch_request(struct rtl8168_private *tp) +{ + u16 PhyRegValue; + u32 WaitCnt; + int retval = TRUE; + + switch (tp->mcfg) { + case CFG_METHOD_21 ... CFG_METHOD_33: + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + rtl8168_clear_eth_phy_bit(tp, 0x10, BIT_4); + + rtl8168_mdio_write(tp,0x1f, 0x0B80); + WaitCnt = 0; + do { + PhyRegValue = rtl8168_mdio_read(tp, 0x10); + udelay(100); + WaitCnt++; + } while ((PhyRegValue & BIT_6) && (WaitCnt < 1000)); + + if ((PhyRegValue & BIT_6) && (WaitCnt == 1000)) retval = FALSE; + + rtl8168_mdio_write(tp,0x1f, 0x0000); + break; + } + + return retval; +} + +static u16 +rtl8168_get_hw_phy_mcu_code_ver(struct rtl8168_private *tp) +{ + u16 hw_ram_code_ver = ~0; + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B60); + hw_ram_code_ver = rtl8168_mdio_read(tp, 0x06); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B30); + hw_ram_code_ver = rtl8168_mdio_read(tp, 0x06); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x801E); + hw_ram_code_ver = rtl8168_mdio_read(tp, 0x14); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + default: + tp->hw_ram_code_ver = ~0; + break; + } + + return hw_ram_code_ver; +} + +#ifndef ENABLE_USE_FIRMWARE_FILE +static void +rtl8168_enable_phy_disable_mode(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->HwSuppCheckPhyDisableModeVer) { + case 1: + rtl8168_mac_ocp_write(tp, 0xDC20, rtl8168_mac_ocp_read(tp, 0xDC20) | BIT_1); + break; + case 2: + case 3: + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_5); + break; + } + + dprintk("enable phy disable mode.\n"); +} + +static void +rtl8168_disable_phy_disable_mode(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->HwSuppCheckPhyDisableModeVer) { + case 1: + rtl8168_mac_ocp_write(tp, 0xDC20, rtl8168_mac_ocp_read(tp, 0xDC20) & ~BIT_1); + break; + case 2: + case 3: + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) & ~BIT_5); + break; + } + + mdelay(1); + + dprintk("disable phy disable mode.\n"); +} + +static int +rtl8168_check_hw_phy_mcu_code_ver(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int ram_code_ver_match = 0; + + tp->hw_ram_code_ver = rtl8168_get_hw_phy_mcu_code_ver(tp); + + if ( tp->hw_ram_code_ver == tp->sw_ram_code_ver) { + ram_code_ver_match = 1; + tp->HwHasWrRamCodeToMicroP = TRUE; + } + + return ram_code_ver_match; +} + +static void +rtl8168_write_hw_phy_mcu_code_ver(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B60); + rtl8168_mdio_write(tp, 0x06, tp->sw_ram_code_ver); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + tp->hw_ram_code_ver = tp->sw_ram_code_ver; + break; + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B30); + rtl8168_mdio_write(tp, 0x06, tp->sw_ram_code_ver); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + tp->hw_ram_code_ver = tp->sw_ram_code_ver; + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x801E); + rtl8168_mdio_write(tp, 0x14, tp->sw_ram_code_ver); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + tp->hw_ram_code_ver = tp->sw_ram_code_ver; + break; + } +} +static int +rtl8168_phy_ram_code_check(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 PhyRegValue; + int retval = TRUE; + + switch(tp->mcfg) { + case CFG_METHOD_21: + rtl8168_mdio_write(tp, 0x1f, 0x0A40); + PhyRegValue = rtl8168_mdio_read(tp, 0x10); + PhyRegValue &= ~(BIT_11); + rtl8168_mdio_write(tp, 0x10, PhyRegValue); + + + rtl8168_mdio_write(tp, 0x1f, 0x0A00); + PhyRegValue = rtl8168_mdio_read(tp, 0x10); + PhyRegValue &= ~(BIT_12 | BIT_13 | BIT_14 | BIT_15); + rtl8168_mdio_write(tp, 0x10, PhyRegValue); + + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8010); + PhyRegValue = rtl8168_mdio_read(tp, 0x14); + PhyRegValue &= ~(BIT_11); + rtl8168_mdio_write(tp, 0x14, PhyRegValue); + + retval = rtl8168_set_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp, 0x1f, 0x0A40); + rtl8168_mdio_write(tp, 0x10, 0x0140); + + rtl8168_mdio_write(tp, 0x1f, 0x0A4A); + PhyRegValue = rtl8168_mdio_read(tp, 0x13); + PhyRegValue &= ~(BIT_6); + PhyRegValue |= (BIT_7); + rtl8168_mdio_write(tp, 0x13, PhyRegValue); + + rtl8168_mdio_write(tp, 0x1f, 0x0A44); + PhyRegValue = rtl8168_mdio_read(tp, 0x14); + PhyRegValue |= (BIT_2); + rtl8168_mdio_write(tp, 0x14, PhyRegValue); + + rtl8168_mdio_write(tp, 0x1f, 0x0A50); + PhyRegValue = rtl8168_mdio_read(tp, 0x11); + PhyRegValue |= (BIT_11|BIT_12); + rtl8168_mdio_write(tp, 0x11, PhyRegValue); + + retval = rtl8168_clear_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp, 0x1f, 0x0A40); + rtl8168_mdio_write(tp, 0x10, 0x1040); + + rtl8168_mdio_write(tp, 0x1f, 0x0A4A); + PhyRegValue = rtl8168_mdio_read(tp, 0x13); + PhyRegValue &= ~(BIT_6|BIT_7); + rtl8168_mdio_write(tp, 0x13, PhyRegValue); + + rtl8168_mdio_write(tp, 0x1f, 0x0A44); + PhyRegValue = rtl8168_mdio_read(tp, 0x14); + PhyRegValue &= ~(BIT_2); + rtl8168_mdio_write(tp, 0x14, PhyRegValue); + + rtl8168_mdio_write(tp, 0x1f, 0x0A50); + PhyRegValue = rtl8168_mdio_read(tp, 0x11); + PhyRegValue &= ~(BIT_11|BIT_12); + rtl8168_mdio_write(tp, 0x11, PhyRegValue); + + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8010); + PhyRegValue = rtl8168_mdio_read(tp, 0x14); + PhyRegValue |= (BIT_11); + rtl8168_mdio_write(tp, 0x14, PhyRegValue); + + retval = rtl8168_set_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp, 0x1f, 0x0A20); + PhyRegValue = rtl8168_mdio_read(tp, 0x13); + if (PhyRegValue & BIT_11) { + if (PhyRegValue & BIT_10) { + retval = FALSE; + } + } + + retval = rtl8168_clear_phy_mcu_patch_request(tp); + + mdelay(2); + break; + default: + break; + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + return retval; +} + +static void +rtl8168_set_phy_ram_code_check_fail_flag(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 TmpUshort; + + switch(tp->mcfg) { + case CFG_METHOD_21: + TmpUshort = rtl8168_mac_ocp_read(tp, 0xD3C0); + TmpUshort |= BIT_0; + rtl8168_mac_ocp_write(tp, 0xD3C0, TmpUshort); + break; + } +} + +static void +rtl8168_set_phy_mcu_8168e_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val,i; + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x1800); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x17, 0x0117); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + rtl8168_mdio_write(tp, 0x1B, 0x5000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x4104); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1E); + gphy_val &= 0x03FF; + if (gphy_val == 0x000C) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x07); + if ((gphy_val & BIT_5) == 0) + break; + } + gphy_val = rtl8168_mdio_read(tp, 0x07); + if (gphy_val & BIT_5) { + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x00a1); + rtl8168_mdio_write(tp, 0x17, 0x1000); + rtl8168_mdio_write(tp, 0x17, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2000); + rtl8168_mdio_write(tp, 0x1e, 0x002f); + rtl8168_mdio_write(tp, 0x18, 0x9bfb); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x07, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x08); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x08, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x000e); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x0010); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x0018); + rtl8168_mdio_write(tp, 0x19, 0x4801); + rtl8168_mdio_write(tp, 0x15, 0x0019); + rtl8168_mdio_write(tp, 0x19, 0x6801); + rtl8168_mdio_write(tp, 0x15, 0x001a); + rtl8168_mdio_write(tp, 0x19, 0x66a1); + rtl8168_mdio_write(tp, 0x15, 0x001f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0020); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0021); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0022); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0023); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0024); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0025); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0026); + rtl8168_mdio_write(tp, 0x19, 0x40ea); + rtl8168_mdio_write(tp, 0x15, 0x0027); + rtl8168_mdio_write(tp, 0x19, 0x4503); + rtl8168_mdio_write(tp, 0x15, 0x0028); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0029); + rtl8168_mdio_write(tp, 0x19, 0xa631); + rtl8168_mdio_write(tp, 0x15, 0x002a); + rtl8168_mdio_write(tp, 0x19, 0x9717); + rtl8168_mdio_write(tp, 0x15, 0x002b); + rtl8168_mdio_write(tp, 0x19, 0x302c); + rtl8168_mdio_write(tp, 0x15, 0x002c); + rtl8168_mdio_write(tp, 0x19, 0x4802); + rtl8168_mdio_write(tp, 0x15, 0x002d); + rtl8168_mdio_write(tp, 0x19, 0x58da); + rtl8168_mdio_write(tp, 0x15, 0x002e); + rtl8168_mdio_write(tp, 0x19, 0x400d); + rtl8168_mdio_write(tp, 0x15, 0x002f); + rtl8168_mdio_write(tp, 0x19, 0x4488); + rtl8168_mdio_write(tp, 0x15, 0x0030); + rtl8168_mdio_write(tp, 0x19, 0x9e00); + rtl8168_mdio_write(tp, 0x15, 0x0031); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0032); + rtl8168_mdio_write(tp, 0x19, 0x6481); + rtl8168_mdio_write(tp, 0x15, 0x0033); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0034); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0035); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0036); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0037); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0038); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0039); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x003a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x003b); + rtl8168_mdio_write(tp, 0x19, 0x63e8); + rtl8168_mdio_write(tp, 0x15, 0x003c); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x003d); + rtl8168_mdio_write(tp, 0x19, 0x59d4); + rtl8168_mdio_write(tp, 0x15, 0x003e); + rtl8168_mdio_write(tp, 0x19, 0x63f8); + rtl8168_mdio_write(tp, 0x15, 0x0040); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0041); + rtl8168_mdio_write(tp, 0x19, 0x30de); + rtl8168_mdio_write(tp, 0x15, 0x0044); + rtl8168_mdio_write(tp, 0x19, 0x480f); + rtl8168_mdio_write(tp, 0x15, 0x0045); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x0046); + rtl8168_mdio_write(tp, 0x19, 0x6680); + rtl8168_mdio_write(tp, 0x15, 0x0047); + rtl8168_mdio_write(tp, 0x19, 0x7c10); + rtl8168_mdio_write(tp, 0x15, 0x0048); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0049); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004f); + rtl8168_mdio_write(tp, 0x19, 0x40ea); + rtl8168_mdio_write(tp, 0x15, 0x0050); + rtl8168_mdio_write(tp, 0x19, 0x4503); + rtl8168_mdio_write(tp, 0x15, 0x0051); + rtl8168_mdio_write(tp, 0x19, 0x58ca); + rtl8168_mdio_write(tp, 0x15, 0x0052); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0053); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x0054); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x0055); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0056); + rtl8168_mdio_write(tp, 0x19, 0x3000); + rtl8168_mdio_write(tp, 0x15, 0x006E); + rtl8168_mdio_write(tp, 0x19, 0x9afa); + rtl8168_mdio_write(tp, 0x15, 0x00a1); + rtl8168_mdio_write(tp, 0x19, 0x3044); + rtl8168_mdio_write(tp, 0x15, 0x00ab); + rtl8168_mdio_write(tp, 0x19, 0x5820); + rtl8168_mdio_write(tp, 0x15, 0x00ac); + rtl8168_mdio_write(tp, 0x19, 0x5e04); + rtl8168_mdio_write(tp, 0x15, 0x00ad); + rtl8168_mdio_write(tp, 0x19, 0xb60c); + rtl8168_mdio_write(tp, 0x15, 0x00af); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x00b2); + rtl8168_mdio_write(tp, 0x19, 0x30b9); + rtl8168_mdio_write(tp, 0x15, 0x00b9); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x00ba); + rtl8168_mdio_write(tp, 0x19, 0x480b); + rtl8168_mdio_write(tp, 0x15, 0x00bb); + rtl8168_mdio_write(tp, 0x19, 0x5e00); + rtl8168_mdio_write(tp, 0x15, 0x00bc); + rtl8168_mdio_write(tp, 0x19, 0x405f); + rtl8168_mdio_write(tp, 0x15, 0x00bd); + rtl8168_mdio_write(tp, 0x19, 0x4448); + rtl8168_mdio_write(tp, 0x15, 0x00be); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x00bf); + rtl8168_mdio_write(tp, 0x19, 0x4468); + rtl8168_mdio_write(tp, 0x15, 0x00c0); + rtl8168_mdio_write(tp, 0x19, 0x9c02); + rtl8168_mdio_write(tp, 0x15, 0x00c1); + rtl8168_mdio_write(tp, 0x19, 0x58a0); + rtl8168_mdio_write(tp, 0x15, 0x00c2); + rtl8168_mdio_write(tp, 0x19, 0xb605); + rtl8168_mdio_write(tp, 0x15, 0x00c3); + rtl8168_mdio_write(tp, 0x19, 0xc0d3); + rtl8168_mdio_write(tp, 0x15, 0x00c4); + rtl8168_mdio_write(tp, 0x19, 0x00e6); + rtl8168_mdio_write(tp, 0x15, 0x00c5); + rtl8168_mdio_write(tp, 0x19, 0xdaec); + rtl8168_mdio_write(tp, 0x15, 0x00c6); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00c7); + rtl8168_mdio_write(tp, 0x19, 0x9df9); + rtl8168_mdio_write(tp, 0x15, 0x00c8); + rtl8168_mdio_write(tp, 0x19, 0x307a); + rtl8168_mdio_write(tp, 0x15, 0x0112); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0113); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0114); + rtl8168_mdio_write(tp, 0x19, 0x63f0); + rtl8168_mdio_write(tp, 0x15, 0x0115); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0116); + rtl8168_mdio_write(tp, 0x19, 0x4418); + rtl8168_mdio_write(tp, 0x15, 0x0117); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x15, 0x0118); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0119); + rtl8168_mdio_write(tp, 0x19, 0x64e1); + rtl8168_mdio_write(tp, 0x15, 0x011a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0150); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0151); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0152); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0153); + rtl8168_mdio_write(tp, 0x19, 0x4540); + rtl8168_mdio_write(tp, 0x15, 0x0154); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0155); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x15, 0x0156); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0157); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0158); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0159); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x015a); + rtl8168_mdio_write(tp, 0x19, 0x30fe); + rtl8168_mdio_write(tp, 0x15, 0x021e); + rtl8168_mdio_write(tp, 0x19, 0x5410); + rtl8168_mdio_write(tp, 0x15, 0x0225); + rtl8168_mdio_write(tp, 0x19, 0x5400); + rtl8168_mdio_write(tp, 0x15, 0x023D); + rtl8168_mdio_write(tp, 0x19, 0x4050); + rtl8168_mdio_write(tp, 0x15, 0x0295); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x02bd); + rtl8168_mdio_write(tp, 0x19, 0xa523); + rtl8168_mdio_write(tp, 0x15, 0x02be); + rtl8168_mdio_write(tp, 0x19, 0x32ca); + rtl8168_mdio_write(tp, 0x15, 0x02ca); + rtl8168_mdio_write(tp, 0x19, 0x48b3); + rtl8168_mdio_write(tp, 0x15, 0x02cb); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x02cc); + rtl8168_mdio_write(tp, 0x19, 0x4823); + rtl8168_mdio_write(tp, 0x15, 0x02cd); + rtl8168_mdio_write(tp, 0x19, 0x4510); + rtl8168_mdio_write(tp, 0x15, 0x02ce); + rtl8168_mdio_write(tp, 0x19, 0xb63a); + rtl8168_mdio_write(tp, 0x15, 0x02cf); + rtl8168_mdio_write(tp, 0x19, 0x7dc8); + rtl8168_mdio_write(tp, 0x15, 0x02d6); + rtl8168_mdio_write(tp, 0x19, 0x9bf8); + rtl8168_mdio_write(tp, 0x15, 0x02d8); + rtl8168_mdio_write(tp, 0x19, 0x85f6); + rtl8168_mdio_write(tp, 0x15, 0x02d9); + rtl8168_mdio_write(tp, 0x19, 0x32e0); + rtl8168_mdio_write(tp, 0x15, 0x02e0); + rtl8168_mdio_write(tp, 0x19, 0x4834); + rtl8168_mdio_write(tp, 0x15, 0x02e1); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x02e2); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x02e3); + rtl8168_mdio_write(tp, 0x19, 0x4824); + rtl8168_mdio_write(tp, 0x15, 0x02e4); + rtl8168_mdio_write(tp, 0x19, 0x4520); + rtl8168_mdio_write(tp, 0x15, 0x02e5); + rtl8168_mdio_write(tp, 0x19, 0x4008); + rtl8168_mdio_write(tp, 0x15, 0x02e6); + rtl8168_mdio_write(tp, 0x19, 0x4560); + rtl8168_mdio_write(tp, 0x15, 0x02e7); + rtl8168_mdio_write(tp, 0x19, 0x9d04); + rtl8168_mdio_write(tp, 0x15, 0x02e8); + rtl8168_mdio_write(tp, 0x19, 0x48c4); + rtl8168_mdio_write(tp, 0x15, 0x02e9); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02ea); + rtl8168_mdio_write(tp, 0x19, 0x4844); + rtl8168_mdio_write(tp, 0x15, 0x02eb); + rtl8168_mdio_write(tp, 0x19, 0x7dc8); + rtl8168_mdio_write(tp, 0x15, 0x02f0); + rtl8168_mdio_write(tp, 0x19, 0x9cf7); + rtl8168_mdio_write(tp, 0x15, 0x02f1); + rtl8168_mdio_write(tp, 0x19, 0xdf94); + rtl8168_mdio_write(tp, 0x15, 0x02f2); + rtl8168_mdio_write(tp, 0x19, 0x0002); + rtl8168_mdio_write(tp, 0x15, 0x02f3); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x02f4); + rtl8168_mdio_write(tp, 0x19, 0xb614); + rtl8168_mdio_write(tp, 0x15, 0x02f5); + rtl8168_mdio_write(tp, 0x19, 0xc42b); + rtl8168_mdio_write(tp, 0x15, 0x02f6); + rtl8168_mdio_write(tp, 0x19, 0x00d4); + rtl8168_mdio_write(tp, 0x15, 0x02f7); + rtl8168_mdio_write(tp, 0x19, 0xc455); + rtl8168_mdio_write(tp, 0x15, 0x02f8); + rtl8168_mdio_write(tp, 0x19, 0x0093); + rtl8168_mdio_write(tp, 0x15, 0x02f9); + rtl8168_mdio_write(tp, 0x19, 0x92ee); + rtl8168_mdio_write(tp, 0x15, 0x02fa); + rtl8168_mdio_write(tp, 0x19, 0xefed); + rtl8168_mdio_write(tp, 0x15, 0x02fb); + rtl8168_mdio_write(tp, 0x19, 0x3312); + rtl8168_mdio_write(tp, 0x15, 0x0312); + rtl8168_mdio_write(tp, 0x19, 0x49b5); + rtl8168_mdio_write(tp, 0x15, 0x0313); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x0314); + rtl8168_mdio_write(tp, 0x19, 0x4d00); + rtl8168_mdio_write(tp, 0x15, 0x0315); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x031e); + rtl8168_mdio_write(tp, 0x19, 0x404f); + rtl8168_mdio_write(tp, 0x15, 0x031f); + rtl8168_mdio_write(tp, 0x19, 0x44c8); + rtl8168_mdio_write(tp, 0x15, 0x0320); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0321); + rtl8168_mdio_write(tp, 0x19, 0x00e7); + rtl8168_mdio_write(tp, 0x15, 0x0322); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0323); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x0324); + rtl8168_mdio_write(tp, 0x19, 0x4d48); + rtl8168_mdio_write(tp, 0x15, 0x0325); + rtl8168_mdio_write(tp, 0x19, 0x3327); + rtl8168_mdio_write(tp, 0x15, 0x0326); + rtl8168_mdio_write(tp, 0x19, 0x4d40); + rtl8168_mdio_write(tp, 0x15, 0x0327); + rtl8168_mdio_write(tp, 0x19, 0xc8d7); + rtl8168_mdio_write(tp, 0x15, 0x0328); + rtl8168_mdio_write(tp, 0x19, 0x0003); + rtl8168_mdio_write(tp, 0x15, 0x0329); + rtl8168_mdio_write(tp, 0x19, 0x7c20); + rtl8168_mdio_write(tp, 0x15, 0x032a); + rtl8168_mdio_write(tp, 0x19, 0x4c20); + rtl8168_mdio_write(tp, 0x15, 0x032b); + rtl8168_mdio_write(tp, 0x19, 0xc8ed); + rtl8168_mdio_write(tp, 0x15, 0x032c); + rtl8168_mdio_write(tp, 0x19, 0x00f4); + rtl8168_mdio_write(tp, 0x15, 0x032d); + rtl8168_mdio_write(tp, 0x19, 0x82b3); + rtl8168_mdio_write(tp, 0x15, 0x032e); + rtl8168_mdio_write(tp, 0x19, 0xd11d); + rtl8168_mdio_write(tp, 0x15, 0x032f); + rtl8168_mdio_write(tp, 0x19, 0x00b1); + rtl8168_mdio_write(tp, 0x15, 0x0330); + rtl8168_mdio_write(tp, 0x19, 0xde18); + rtl8168_mdio_write(tp, 0x15, 0x0331); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x0332); + rtl8168_mdio_write(tp, 0x19, 0x91ee); + rtl8168_mdio_write(tp, 0x15, 0x0333); + rtl8168_mdio_write(tp, 0x19, 0x3339); + rtl8168_mdio_write(tp, 0x15, 0x033a); + rtl8168_mdio_write(tp, 0x19, 0x4064); + rtl8168_mdio_write(tp, 0x15, 0x0340); + rtl8168_mdio_write(tp, 0x19, 0x9e06); + rtl8168_mdio_write(tp, 0x15, 0x0341); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0342); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x0343); + rtl8168_mdio_write(tp, 0x19, 0x4d48); + rtl8168_mdio_write(tp, 0x15, 0x0344); + rtl8168_mdio_write(tp, 0x19, 0x3346); + rtl8168_mdio_write(tp, 0x15, 0x0345); + rtl8168_mdio_write(tp, 0x19, 0x4d40); + rtl8168_mdio_write(tp, 0x15, 0x0346); + rtl8168_mdio_write(tp, 0x19, 0xd11d); + rtl8168_mdio_write(tp, 0x15, 0x0347); + rtl8168_mdio_write(tp, 0x19, 0x0099); + rtl8168_mdio_write(tp, 0x15, 0x0348); + rtl8168_mdio_write(tp, 0x19, 0xbb17); + rtl8168_mdio_write(tp, 0x15, 0x0349); + rtl8168_mdio_write(tp, 0x19, 0x8102); + rtl8168_mdio_write(tp, 0x15, 0x034a); + rtl8168_mdio_write(tp, 0x19, 0x334d); + rtl8168_mdio_write(tp, 0x15, 0x034b); + rtl8168_mdio_write(tp, 0x19, 0xa22c); + rtl8168_mdio_write(tp, 0x15, 0x034c); + rtl8168_mdio_write(tp, 0x19, 0x3397); + rtl8168_mdio_write(tp, 0x15, 0x034d); + rtl8168_mdio_write(tp, 0x19, 0x91f2); + rtl8168_mdio_write(tp, 0x15, 0x034e); + rtl8168_mdio_write(tp, 0x19, 0xc218); + rtl8168_mdio_write(tp, 0x15, 0x034f); + rtl8168_mdio_write(tp, 0x19, 0x00f0); + rtl8168_mdio_write(tp, 0x15, 0x0350); + rtl8168_mdio_write(tp, 0x19, 0x3397); + rtl8168_mdio_write(tp, 0x15, 0x0351); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0364); + rtl8168_mdio_write(tp, 0x19, 0xbc05); + rtl8168_mdio_write(tp, 0x15, 0x0367); + rtl8168_mdio_write(tp, 0x19, 0xa1fc); + rtl8168_mdio_write(tp, 0x15, 0x0368); + rtl8168_mdio_write(tp, 0x19, 0x3377); + rtl8168_mdio_write(tp, 0x15, 0x0369); + rtl8168_mdio_write(tp, 0x19, 0x328b); + rtl8168_mdio_write(tp, 0x15, 0x036a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0377); + rtl8168_mdio_write(tp, 0x19, 0x4b97); + rtl8168_mdio_write(tp, 0x15, 0x0378); + rtl8168_mdio_write(tp, 0x19, 0x6818); + rtl8168_mdio_write(tp, 0x15, 0x0379); + rtl8168_mdio_write(tp, 0x19, 0x4b07); + rtl8168_mdio_write(tp, 0x15, 0x037a); + rtl8168_mdio_write(tp, 0x19, 0x40ac); + rtl8168_mdio_write(tp, 0x15, 0x037b); + rtl8168_mdio_write(tp, 0x19, 0x4445); + rtl8168_mdio_write(tp, 0x15, 0x037c); + rtl8168_mdio_write(tp, 0x19, 0x404e); + rtl8168_mdio_write(tp, 0x15, 0x037d); + rtl8168_mdio_write(tp, 0x19, 0x4461); + rtl8168_mdio_write(tp, 0x15, 0x037e); + rtl8168_mdio_write(tp, 0x19, 0x9c09); + rtl8168_mdio_write(tp, 0x15, 0x037f); + rtl8168_mdio_write(tp, 0x19, 0x63da); + rtl8168_mdio_write(tp, 0x15, 0x0380); + rtl8168_mdio_write(tp, 0x19, 0x5440); + rtl8168_mdio_write(tp, 0x15, 0x0381); + rtl8168_mdio_write(tp, 0x19, 0x4b98); + rtl8168_mdio_write(tp, 0x15, 0x0382); + rtl8168_mdio_write(tp, 0x19, 0x7c60); + rtl8168_mdio_write(tp, 0x15, 0x0383); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x0384); + rtl8168_mdio_write(tp, 0x19, 0x4b08); + rtl8168_mdio_write(tp, 0x15, 0x0385); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x0386); + rtl8168_mdio_write(tp, 0x19, 0x338d); + rtl8168_mdio_write(tp, 0x15, 0x0387); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0388); + rtl8168_mdio_write(tp, 0x19, 0x0080); + rtl8168_mdio_write(tp, 0x15, 0x0389); + rtl8168_mdio_write(tp, 0x19, 0x820c); + rtl8168_mdio_write(tp, 0x15, 0x038a); + rtl8168_mdio_write(tp, 0x19, 0xa10b); + rtl8168_mdio_write(tp, 0x15, 0x038b); + rtl8168_mdio_write(tp, 0x19, 0x9df3); + rtl8168_mdio_write(tp, 0x15, 0x038c); + rtl8168_mdio_write(tp, 0x19, 0x3395); + rtl8168_mdio_write(tp, 0x15, 0x038d); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x038e); + rtl8168_mdio_write(tp, 0x19, 0x00f9); + rtl8168_mdio_write(tp, 0x15, 0x038f); + rtl8168_mdio_write(tp, 0x19, 0xc017); + rtl8168_mdio_write(tp, 0x15, 0x0390); + rtl8168_mdio_write(tp, 0x19, 0x0005); + rtl8168_mdio_write(tp, 0x15, 0x0391); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x0392); + rtl8168_mdio_write(tp, 0x19, 0xa103); + rtl8168_mdio_write(tp, 0x15, 0x0393); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x0394); + rtl8168_mdio_write(tp, 0x19, 0x9df9); + rtl8168_mdio_write(tp, 0x15, 0x0395); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x0396); + rtl8168_mdio_write(tp, 0x19, 0x3397); + rtl8168_mdio_write(tp, 0x15, 0x0399); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x03a4); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x03a5); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x03a6); + rtl8168_mdio_write(tp, 0x19, 0x4d08); + rtl8168_mdio_write(tp, 0x15, 0x03a7); + rtl8168_mdio_write(tp, 0x19, 0x33a9); + rtl8168_mdio_write(tp, 0x15, 0x03a8); + rtl8168_mdio_write(tp, 0x19, 0x4d00); + rtl8168_mdio_write(tp, 0x15, 0x03a9); + rtl8168_mdio_write(tp, 0x19, 0x9bfa); + rtl8168_mdio_write(tp, 0x15, 0x03aa); + rtl8168_mdio_write(tp, 0x19, 0x33b6); + rtl8168_mdio_write(tp, 0x15, 0x03bb); + rtl8168_mdio_write(tp, 0x19, 0x4056); + rtl8168_mdio_write(tp, 0x15, 0x03bc); + rtl8168_mdio_write(tp, 0x19, 0x44e9); + rtl8168_mdio_write(tp, 0x15, 0x03bd); + rtl8168_mdio_write(tp, 0x19, 0x405e); + rtl8168_mdio_write(tp, 0x15, 0x03be); + rtl8168_mdio_write(tp, 0x19, 0x44f8); + rtl8168_mdio_write(tp, 0x15, 0x03bf); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x03c0); + rtl8168_mdio_write(tp, 0x19, 0x0037); + rtl8168_mdio_write(tp, 0x15, 0x03c1); + rtl8168_mdio_write(tp, 0x19, 0xbd37); + rtl8168_mdio_write(tp, 0x15, 0x03c2); + rtl8168_mdio_write(tp, 0x19, 0x9cfd); + rtl8168_mdio_write(tp, 0x15, 0x03c3); + rtl8168_mdio_write(tp, 0x19, 0xc639); + rtl8168_mdio_write(tp, 0x15, 0x03c4); + rtl8168_mdio_write(tp, 0x19, 0x0011); + rtl8168_mdio_write(tp, 0x15, 0x03c5); + rtl8168_mdio_write(tp, 0x19, 0x9b03); + rtl8168_mdio_write(tp, 0x15, 0x03c6); + rtl8168_mdio_write(tp, 0x19, 0x7c01); + rtl8168_mdio_write(tp, 0x15, 0x03c7); + rtl8168_mdio_write(tp, 0x19, 0x4c01); + rtl8168_mdio_write(tp, 0x15, 0x03c8); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x03c9); + rtl8168_mdio_write(tp, 0x19, 0x7c20); + rtl8168_mdio_write(tp, 0x15, 0x03ca); + rtl8168_mdio_write(tp, 0x19, 0x4c20); + rtl8168_mdio_write(tp, 0x15, 0x03cb); + rtl8168_mdio_write(tp, 0x19, 0x9af4); + rtl8168_mdio_write(tp, 0x15, 0x03cc); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03cd); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03ce); + rtl8168_mdio_write(tp, 0x19, 0x4470); + rtl8168_mdio_write(tp, 0x15, 0x03cf); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03d0); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03d1); + rtl8168_mdio_write(tp, 0x19, 0x33bf); + rtl8168_mdio_write(tp, 0x15, 0x03d6); + rtl8168_mdio_write(tp, 0x19, 0x4047); + rtl8168_mdio_write(tp, 0x15, 0x03d7); + rtl8168_mdio_write(tp, 0x19, 0x4469); + rtl8168_mdio_write(tp, 0x15, 0x03d8); + rtl8168_mdio_write(tp, 0x19, 0x492b); + rtl8168_mdio_write(tp, 0x15, 0x03d9); + rtl8168_mdio_write(tp, 0x19, 0x4479); + rtl8168_mdio_write(tp, 0x15, 0x03da); + rtl8168_mdio_write(tp, 0x19, 0x7c09); + rtl8168_mdio_write(tp, 0x15, 0x03db); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x03dc); + rtl8168_mdio_write(tp, 0x19, 0x4d48); + rtl8168_mdio_write(tp, 0x15, 0x03dd); + rtl8168_mdio_write(tp, 0x19, 0x33df); + rtl8168_mdio_write(tp, 0x15, 0x03de); + rtl8168_mdio_write(tp, 0x19, 0x4d40); + rtl8168_mdio_write(tp, 0x15, 0x03df); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x03e0); + rtl8168_mdio_write(tp, 0x19, 0x0017); + rtl8168_mdio_write(tp, 0x15, 0x03e1); + rtl8168_mdio_write(tp, 0x19, 0xbd17); + rtl8168_mdio_write(tp, 0x15, 0x03e2); + rtl8168_mdio_write(tp, 0x19, 0x9b03); + rtl8168_mdio_write(tp, 0x15, 0x03e3); + rtl8168_mdio_write(tp, 0x19, 0x7c20); + rtl8168_mdio_write(tp, 0x15, 0x03e4); + rtl8168_mdio_write(tp, 0x19, 0x4c20); + rtl8168_mdio_write(tp, 0x15, 0x03e5); + rtl8168_mdio_write(tp, 0x19, 0x88f5); + rtl8168_mdio_write(tp, 0x15, 0x03e6); + rtl8168_mdio_write(tp, 0x19, 0xc428); + rtl8168_mdio_write(tp, 0x15, 0x03e7); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x03e8); + rtl8168_mdio_write(tp, 0x19, 0x9af2); + rtl8168_mdio_write(tp, 0x15, 0x03e9); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03ea); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03eb); + rtl8168_mdio_write(tp, 0x19, 0x4470); + rtl8168_mdio_write(tp, 0x15, 0x03ec); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03ed); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03ee); + rtl8168_mdio_write(tp, 0x19, 0x33da); + rtl8168_mdio_write(tp, 0x15, 0x03ef); + rtl8168_mdio_write(tp, 0x19, 0x3312); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2179); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0040); + rtl8168_mdio_write(tp, 0x18, 0x0645); + rtl8168_mdio_write(tp, 0x19, 0xe200); + rtl8168_mdio_write(tp, 0x18, 0x0655); + rtl8168_mdio_write(tp, 0x19, 0x9000); + rtl8168_mdio_write(tp, 0x18, 0x0d05); + rtl8168_mdio_write(tp, 0x19, 0xbe00); + rtl8168_mdio_write(tp, 0x18, 0x0d15); + rtl8168_mdio_write(tp, 0x19, 0xd300); + rtl8168_mdio_write(tp, 0x18, 0x0d25); + rtl8168_mdio_write(tp, 0x19, 0xfe00); + rtl8168_mdio_write(tp, 0x18, 0x0d35); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x0d45); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x0d55); + rtl8168_mdio_write(tp, 0x19, 0x1000); + rtl8168_mdio_write(tp, 0x18, 0x0d65); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x0d75); + rtl8168_mdio_write(tp, 0x19, 0x8200); + rtl8168_mdio_write(tp, 0x18, 0x0d85); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x0d95); + rtl8168_mdio_write(tp, 0x19, 0x7000); + rtl8168_mdio_write(tp, 0x18, 0x0da5); + rtl8168_mdio_write(tp, 0x19, 0x0f00); + rtl8168_mdio_write(tp, 0x18, 0x0db5); + rtl8168_mdio_write(tp, 0x19, 0x0100); + rtl8168_mdio_write(tp, 0x18, 0x0dc5); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x18, 0x0dd5); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x0de5); + rtl8168_mdio_write(tp, 0x19, 0xe000); + rtl8168_mdio_write(tp, 0x18, 0x0df5); + rtl8168_mdio_write(tp, 0x19, 0xef00); + rtl8168_mdio_write(tp, 0x18, 0x16d5); + rtl8168_mdio_write(tp, 0x19, 0xe200); + rtl8168_mdio_write(tp, 0x18, 0x16e5); + rtl8168_mdio_write(tp, 0x19, 0xab00); + rtl8168_mdio_write(tp, 0x18, 0x2904); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x2914); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x2924); + rtl8168_mdio_write(tp, 0x19, 0x0100); + rtl8168_mdio_write(tp, 0x18, 0x2934); + rtl8168_mdio_write(tp, 0x19, 0x2000); + rtl8168_mdio_write(tp, 0x18, 0x2944); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2954); + rtl8168_mdio_write(tp, 0x19, 0x4600); + rtl8168_mdio_write(tp, 0x18, 0x2964); + rtl8168_mdio_write(tp, 0x19, 0xfc00); + rtl8168_mdio_write(tp, 0x18, 0x2974); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2984); + rtl8168_mdio_write(tp, 0x19, 0x5000); + rtl8168_mdio_write(tp, 0x18, 0x2994); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x18, 0x29a4); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x29b4); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x29c4); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x29d4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x29e4); + rtl8168_mdio_write(tp, 0x19, 0x2000); + rtl8168_mdio_write(tp, 0x18, 0x29f4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2a04); + rtl8168_mdio_write(tp, 0x19, 0xe600); + rtl8168_mdio_write(tp, 0x18, 0x2a14); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x2a24); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2a34); + rtl8168_mdio_write(tp, 0x19, 0x5000); + rtl8168_mdio_write(tp, 0x18, 0x2a44); + rtl8168_mdio_write(tp, 0x19, 0x8500); + rtl8168_mdio_write(tp, 0x18, 0x2a54); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x2a64); + rtl8168_mdio_write(tp, 0x19, 0xac00); + rtl8168_mdio_write(tp, 0x18, 0x2a74); + rtl8168_mdio_write(tp, 0x19, 0x0800); + rtl8168_mdio_write(tp, 0x18, 0x2a84); + rtl8168_mdio_write(tp, 0x19, 0xfc00); + rtl8168_mdio_write(tp, 0x18, 0x2a94); + rtl8168_mdio_write(tp, 0x19, 0xe000); + rtl8168_mdio_write(tp, 0x18, 0x2aa4); + rtl8168_mdio_write(tp, 0x19, 0x7400); + rtl8168_mdio_write(tp, 0x18, 0x2ab4); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x2ac4); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x2ad4); + rtl8168_mdio_write(tp, 0x19, 0x0100); + rtl8168_mdio_write(tp, 0x18, 0x2ae4); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x2af4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2b04); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x18, 0x2b14); + rtl8168_mdio_write(tp, 0x19, 0xfc00); + rtl8168_mdio_write(tp, 0x18, 0x2b24); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2b34); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x2b44); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x18, 0x2b54); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x2b64); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x2b74); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x2b84); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2b94); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x2ba4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2bb4); + rtl8168_mdio_write(tp, 0x19, 0xfc00); + rtl8168_mdio_write(tp, 0x18, 0x2bc4); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x2bd4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2be4); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x2bf4); + rtl8168_mdio_write(tp, 0x19, 0x8900); + rtl8168_mdio_write(tp, 0x18, 0x2c04); + rtl8168_mdio_write(tp, 0x19, 0x8300); + rtl8168_mdio_write(tp, 0x18, 0x2c14); + rtl8168_mdio_write(tp, 0x19, 0xe000); + rtl8168_mdio_write(tp, 0x18, 0x2c24); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2c34); + rtl8168_mdio_write(tp, 0x19, 0xac00); + rtl8168_mdio_write(tp, 0x18, 0x2c44); + rtl8168_mdio_write(tp, 0x19, 0x0800); + rtl8168_mdio_write(tp, 0x18, 0x2c54); + rtl8168_mdio_write(tp, 0x19, 0xfa00); + rtl8168_mdio_write(tp, 0x18, 0x2c64); + rtl8168_mdio_write(tp, 0x19, 0xe100); + rtl8168_mdio_write(tp, 0x18, 0x2c74); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x0001); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2100); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8b88); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0xd480); + rtl8168_mdio_write(tp, 0x06, 0xc1e4); + rtl8168_mdio_write(tp, 0x06, 0x8b9a); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x9bee); + rtl8168_mdio_write(tp, 0x06, 0x8b83); + rtl8168_mdio_write(tp, 0x06, 0x41bf); + rtl8168_mdio_write(tp, 0x06, 0x8b88); + rtl8168_mdio_write(tp, 0x06, 0xec00); + rtl8168_mdio_write(tp, 0x06, 0x19a9); + rtl8168_mdio_write(tp, 0x06, 0x8b90); + rtl8168_mdio_write(tp, 0x06, 0xf9ee); + rtl8168_mdio_write(tp, 0x06, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x41f7); + rtl8168_mdio_write(tp, 0x06, 0x2ff6); + rtl8168_mdio_write(tp, 0x06, 0x28e4); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xe5e1); + rtl8168_mdio_write(tp, 0x06, 0x41f7); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x020c); + rtl8168_mdio_write(tp, 0x06, 0x0202); + rtl8168_mdio_write(tp, 0x06, 0x1d02); + rtl8168_mdio_write(tp, 0x06, 0x0230); + rtl8168_mdio_write(tp, 0x06, 0x0202); + rtl8168_mdio_write(tp, 0x06, 0x4002); + rtl8168_mdio_write(tp, 0x06, 0x028b); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x6c02); + rtl8168_mdio_write(tp, 0x06, 0x8085); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaec3); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x10ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x1310); + rtl8168_mdio_write(tp, 0x06, 0x021f); + rtl8168_mdio_write(tp, 0x06, 0x9d02); + rtl8168_mdio_write(tp, 0x06, 0x1f0c); + rtl8168_mdio_write(tp, 0x06, 0x0227); + rtl8168_mdio_write(tp, 0x06, 0x49fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x200b); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x830e); + rtl8168_mdio_write(tp, 0x06, 0x021b); + rtl8168_mdio_write(tp, 0x06, 0x67ad); + rtl8168_mdio_write(tp, 0x06, 0x2211); + rtl8168_mdio_write(tp, 0x06, 0xf622); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x2ba5); + rtl8168_mdio_write(tp, 0x06, 0x022a); + rtl8168_mdio_write(tp, 0x06, 0x2402); + rtl8168_mdio_write(tp, 0x06, 0x80c6); + rtl8168_mdio_write(tp, 0x06, 0x022a); + rtl8168_mdio_write(tp, 0x06, 0xf0ad); + rtl8168_mdio_write(tp, 0x06, 0x2511); + rtl8168_mdio_write(tp, 0x06, 0xf625); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x8226); + rtl8168_mdio_write(tp, 0x06, 0x0204); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x19cc); + rtl8168_mdio_write(tp, 0x06, 0x022b); + rtl8168_mdio_write(tp, 0x06, 0x5bfc); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x0105); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b83); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x44e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x23ad); + rtl8168_mdio_write(tp, 0x06, 0x223b); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xbea0); + rtl8168_mdio_write(tp, 0x06, 0x0005); + rtl8168_mdio_write(tp, 0x06, 0x0228); + rtl8168_mdio_write(tp, 0x06, 0xdeae); + rtl8168_mdio_write(tp, 0x06, 0x42a0); + rtl8168_mdio_write(tp, 0x06, 0x0105); + rtl8168_mdio_write(tp, 0x06, 0x0228); + rtl8168_mdio_write(tp, 0x06, 0xf1ae); + rtl8168_mdio_write(tp, 0x06, 0x3aa0); + rtl8168_mdio_write(tp, 0x06, 0x0205); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x25ae); + rtl8168_mdio_write(tp, 0x06, 0x32a0); + rtl8168_mdio_write(tp, 0x06, 0x0305); + rtl8168_mdio_write(tp, 0x06, 0x0229); + rtl8168_mdio_write(tp, 0x06, 0x9aae); + rtl8168_mdio_write(tp, 0x06, 0x2aa0); + rtl8168_mdio_write(tp, 0x06, 0x0405); + rtl8168_mdio_write(tp, 0x06, 0x0229); + rtl8168_mdio_write(tp, 0x06, 0xaeae); + rtl8168_mdio_write(tp, 0x06, 0x22a0); + rtl8168_mdio_write(tp, 0x06, 0x0505); + rtl8168_mdio_write(tp, 0x06, 0x0229); + rtl8168_mdio_write(tp, 0x06, 0xd7ae); + rtl8168_mdio_write(tp, 0x06, 0x1aa0); + rtl8168_mdio_write(tp, 0x06, 0x0605); + rtl8168_mdio_write(tp, 0x06, 0x0229); + rtl8168_mdio_write(tp, 0x06, 0xfeae); + rtl8168_mdio_write(tp, 0x06, 0x12ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac0); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac1); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x00fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0x022a); + rtl8168_mdio_write(tp, 0x06, 0x67e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x230d); + rtl8168_mdio_write(tp, 0x06, 0x0658); + rtl8168_mdio_write(tp, 0x06, 0x03a0); + rtl8168_mdio_write(tp, 0x06, 0x0202); + rtl8168_mdio_write(tp, 0x06, 0xae2d); + rtl8168_mdio_write(tp, 0x06, 0xa001); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x2da0); + rtl8168_mdio_write(tp, 0x06, 0x004d); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe201); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x44e0); + rtl8168_mdio_write(tp, 0x06, 0x8ac2); + rtl8168_mdio_write(tp, 0x06, 0xe48a); + rtl8168_mdio_write(tp, 0x06, 0xc4e0); + rtl8168_mdio_write(tp, 0x06, 0x8ac3); + rtl8168_mdio_write(tp, 0x06, 0xe48a); + rtl8168_mdio_write(tp, 0x06, 0xc5ee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x03e0); + rtl8168_mdio_write(tp, 0x06, 0x8b83); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x3aee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x05ae); + rtl8168_mdio_write(tp, 0x06, 0x34e0); + rtl8168_mdio_write(tp, 0x06, 0x8ace); + rtl8168_mdio_write(tp, 0x06, 0xae03); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xcfe1); + rtl8168_mdio_write(tp, 0x06, 0x8ac2); + rtl8168_mdio_write(tp, 0x06, 0x4905); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xc4e1); + rtl8168_mdio_write(tp, 0x06, 0x8ac3); + rtl8168_mdio_write(tp, 0x06, 0x4905); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xc5ee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x2ab6); + rtl8168_mdio_write(tp, 0x06, 0xac20); + rtl8168_mdio_write(tp, 0x06, 0x1202); + rtl8168_mdio_write(tp, 0x06, 0x819b); + rtl8168_mdio_write(tp, 0x06, 0xac20); + rtl8168_mdio_write(tp, 0x06, 0x0cee); + rtl8168_mdio_write(tp, 0x06, 0x8ac1); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x02fc); + rtl8168_mdio_write(tp, 0x06, 0x04d0); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x590f); + rtl8168_mdio_write(tp, 0x06, 0x3902); + rtl8168_mdio_write(tp, 0x06, 0xaa04); + rtl8168_mdio_write(tp, 0x06, 0xd001); + rtl8168_mdio_write(tp, 0x06, 0xae02); + rtl8168_mdio_write(tp, 0x06, 0xd000); + rtl8168_mdio_write(tp, 0x06, 0x04f9); + rtl8168_mdio_write(tp, 0x06, 0xfae2); + rtl8168_mdio_write(tp, 0x06, 0xe2d2); + rtl8168_mdio_write(tp, 0x06, 0xe3e2); + rtl8168_mdio_write(tp, 0x06, 0xd3f9); + rtl8168_mdio_write(tp, 0x06, 0x5af7); + rtl8168_mdio_write(tp, 0x06, 0xe6e2); + rtl8168_mdio_write(tp, 0x06, 0xd2e7); + rtl8168_mdio_write(tp, 0x06, 0xe2d3); + rtl8168_mdio_write(tp, 0x06, 0xe2e0); + rtl8168_mdio_write(tp, 0x06, 0x2ce3); + rtl8168_mdio_write(tp, 0x06, 0xe02d); + rtl8168_mdio_write(tp, 0x06, 0xf95b); + rtl8168_mdio_write(tp, 0x06, 0xe01e); + rtl8168_mdio_write(tp, 0x06, 0x30e6); + rtl8168_mdio_write(tp, 0x06, 0xe02c); + rtl8168_mdio_write(tp, 0x06, 0xe7e0); + rtl8168_mdio_write(tp, 0x06, 0x2de2); + rtl8168_mdio_write(tp, 0x06, 0xe2cc); + rtl8168_mdio_write(tp, 0x06, 0xe3e2); + rtl8168_mdio_write(tp, 0x06, 0xcdf9); + rtl8168_mdio_write(tp, 0x06, 0x5a0f); + rtl8168_mdio_write(tp, 0x06, 0x6a50); + rtl8168_mdio_write(tp, 0x06, 0xe6e2); + rtl8168_mdio_write(tp, 0x06, 0xcce7); + rtl8168_mdio_write(tp, 0x06, 0xe2cd); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x3ce1); + rtl8168_mdio_write(tp, 0x06, 0xe03d); + rtl8168_mdio_write(tp, 0x06, 0xef64); + rtl8168_mdio_write(tp, 0x06, 0xfde0); + rtl8168_mdio_write(tp, 0x06, 0xe2cc); + rtl8168_mdio_write(tp, 0x06, 0xe1e2); + rtl8168_mdio_write(tp, 0x06, 0xcd58); + rtl8168_mdio_write(tp, 0x06, 0x0f5a); + rtl8168_mdio_write(tp, 0x06, 0xf01e); + rtl8168_mdio_write(tp, 0x06, 0x02e4); + rtl8168_mdio_write(tp, 0x06, 0xe2cc); + rtl8168_mdio_write(tp, 0x06, 0xe5e2); + rtl8168_mdio_write(tp, 0x06, 0xcdfd); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x2ce1); + rtl8168_mdio_write(tp, 0x06, 0xe02d); + rtl8168_mdio_write(tp, 0x06, 0x59e0); + rtl8168_mdio_write(tp, 0x06, 0x5b1f); + rtl8168_mdio_write(tp, 0x06, 0x1e13); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x2ce5); + rtl8168_mdio_write(tp, 0x06, 0xe02d); + rtl8168_mdio_write(tp, 0x06, 0xfde0); + rtl8168_mdio_write(tp, 0x06, 0xe2d2); + rtl8168_mdio_write(tp, 0x06, 0xe1e2); + rtl8168_mdio_write(tp, 0x06, 0xd358); + rtl8168_mdio_write(tp, 0x06, 0xf75a); + rtl8168_mdio_write(tp, 0x06, 0x081e); + rtl8168_mdio_write(tp, 0x06, 0x02e4); + rtl8168_mdio_write(tp, 0x06, 0xe2d2); + rtl8168_mdio_write(tp, 0x06, 0xe5e2); + rtl8168_mdio_write(tp, 0x06, 0xd3ef); + rtl8168_mdio_write(tp, 0x06, 0x46fe); + rtl8168_mdio_write(tp, 0x06, 0xfd04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xc4e1); + rtl8168_mdio_write(tp, 0x06, 0x8b6e); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e58); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x6ead); + rtl8168_mdio_write(tp, 0x06, 0x2222); + rtl8168_mdio_write(tp, 0x06, 0xac27); + rtl8168_mdio_write(tp, 0x06, 0x55ac); + rtl8168_mdio_write(tp, 0x06, 0x2602); + rtl8168_mdio_write(tp, 0x06, 0xae1a); + rtl8168_mdio_write(tp, 0x06, 0xd106); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xba02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd107); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xbd02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd107); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xc002); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xae30); + rtl8168_mdio_write(tp, 0x06, 0xd103); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xc302); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xc602); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xca02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd10f); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xba02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xbd02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xc002); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xc302); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd011); + rtl8168_mdio_write(tp, 0x06, 0x022b); + rtl8168_mdio_write(tp, 0x06, 0xfb59); + rtl8168_mdio_write(tp, 0x06, 0x03ef); + rtl8168_mdio_write(tp, 0x06, 0x01d1); + rtl8168_mdio_write(tp, 0x06, 0x00a0); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xc602); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd111); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x020c); + rtl8168_mdio_write(tp, 0x06, 0x11ad); + rtl8168_mdio_write(tp, 0x06, 0x2102); + rtl8168_mdio_write(tp, 0x06, 0x0c12); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xca02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xaec8); + rtl8168_mdio_write(tp, 0x06, 0x70e4); + rtl8168_mdio_write(tp, 0x06, 0x2602); + rtl8168_mdio_write(tp, 0x06, 0x82d1); + rtl8168_mdio_write(tp, 0x06, 0x05f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0xe2fe); + rtl8168_mdio_write(tp, 0x06, 0xe1e2); + rtl8168_mdio_write(tp, 0x06, 0xffad); + rtl8168_mdio_write(tp, 0x06, 0x2d1a); + rtl8168_mdio_write(tp, 0x06, 0xe0e1); + rtl8168_mdio_write(tp, 0x06, 0x4ee1); + rtl8168_mdio_write(tp, 0x06, 0xe14f); + rtl8168_mdio_write(tp, 0x06, 0xac2d); + rtl8168_mdio_write(tp, 0x06, 0x22f6); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x033b); + rtl8168_mdio_write(tp, 0x06, 0xf703); + rtl8168_mdio_write(tp, 0x06, 0xf706); + rtl8168_mdio_write(tp, 0x06, 0xbf84); + rtl8168_mdio_write(tp, 0x06, 0x4402); + rtl8168_mdio_write(tp, 0x06, 0x2d21); + rtl8168_mdio_write(tp, 0x06, 0xae11); + rtl8168_mdio_write(tp, 0x06, 0xe0e1); + rtl8168_mdio_write(tp, 0x06, 0x4ee1); + rtl8168_mdio_write(tp, 0x06, 0xe14f); + rtl8168_mdio_write(tp, 0x06, 0xad2d); + rtl8168_mdio_write(tp, 0x06, 0x08bf); + rtl8168_mdio_write(tp, 0x06, 0x844f); + rtl8168_mdio_write(tp, 0x06, 0x022d); + rtl8168_mdio_write(tp, 0x06, 0x21f6); + rtl8168_mdio_write(tp, 0x06, 0x06ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0x4502); + rtl8168_mdio_write(tp, 0x06, 0x83a2); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe001); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x1fd1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x843b); + rtl8168_mdio_write(tp, 0x06, 0x022d); + rtl8168_mdio_write(tp, 0x06, 0xc1e0); + rtl8168_mdio_write(tp, 0x06, 0xe020); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x21ad); + rtl8168_mdio_write(tp, 0x06, 0x200e); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf84); + rtl8168_mdio_write(tp, 0x06, 0x3b02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0x9602); + rtl8168_mdio_write(tp, 0x06, 0x2d21); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x204c); + rtl8168_mdio_write(tp, 0x06, 0xd200); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x0058); + rtl8168_mdio_write(tp, 0x06, 0x010c); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0x5810); + rtl8168_mdio_write(tp, 0x06, 0x1e20); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x3658); + rtl8168_mdio_write(tp, 0x06, 0x031e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xe01e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0x8b64); + rtl8168_mdio_write(tp, 0x06, 0x1f02); + rtl8168_mdio_write(tp, 0x06, 0x9e22); + rtl8168_mdio_write(tp, 0x06, 0xe68b); + rtl8168_mdio_write(tp, 0x06, 0x64ad); + rtl8168_mdio_write(tp, 0x06, 0x3214); + rtl8168_mdio_write(tp, 0x06, 0xad34); + rtl8168_mdio_write(tp, 0x06, 0x11ef); + rtl8168_mdio_write(tp, 0x06, 0x0258); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x07ad); + rtl8168_mdio_write(tp, 0x06, 0x3508); + rtl8168_mdio_write(tp, 0x06, 0x5ac0); + rtl8168_mdio_write(tp, 0x06, 0x9f04); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xae02); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf84); + rtl8168_mdio_write(tp, 0x06, 0x3e02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfbe0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x22e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x23e2); + rtl8168_mdio_write(tp, 0x06, 0xe036); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0x375a); + rtl8168_mdio_write(tp, 0x06, 0xc40d); + rtl8168_mdio_write(tp, 0x06, 0x0158); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x20e3); + rtl8168_mdio_write(tp, 0x06, 0x8ae7); + rtl8168_mdio_write(tp, 0x06, 0xac31); + rtl8168_mdio_write(tp, 0x06, 0x60ac); + rtl8168_mdio_write(tp, 0x06, 0x3a08); + rtl8168_mdio_write(tp, 0x06, 0xac3e); + rtl8168_mdio_write(tp, 0x06, 0x26ae); + rtl8168_mdio_write(tp, 0x06, 0x67af); + rtl8168_mdio_write(tp, 0x06, 0x8437); + rtl8168_mdio_write(tp, 0x06, 0xad37); + rtl8168_mdio_write(tp, 0x06, 0x61e0); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xe91b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x51d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8441); + rtl8168_mdio_write(tp, 0x06, 0x022d); + rtl8168_mdio_write(tp, 0x06, 0xc1ee); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x43ad); + rtl8168_mdio_write(tp, 0x06, 0x3627); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xeee1); + rtl8168_mdio_write(tp, 0x06, 0x8aef); + rtl8168_mdio_write(tp, 0x06, 0xef74); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xeae1); + rtl8168_mdio_write(tp, 0x06, 0x8aeb); + rtl8168_mdio_write(tp, 0x06, 0x1b74); + rtl8168_mdio_write(tp, 0x06, 0x9e2e); + rtl8168_mdio_write(tp, 0x06, 0x14e4); + rtl8168_mdio_write(tp, 0x06, 0x8aea); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xebef); + rtl8168_mdio_write(tp, 0x06, 0x74e0); + rtl8168_mdio_write(tp, 0x06, 0x8aee); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xef1b); + rtl8168_mdio_write(tp, 0x06, 0x479e); + rtl8168_mdio_write(tp, 0x06, 0x0fae); + rtl8168_mdio_write(tp, 0x06, 0x19ee); + rtl8168_mdio_write(tp, 0x06, 0x8aea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8aeb); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x0fac); + rtl8168_mdio_write(tp, 0x06, 0x390c); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf84); + rtl8168_mdio_write(tp, 0x06, 0x4102); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe800); + rtl8168_mdio_write(tp, 0x06, 0xe68a); + rtl8168_mdio_write(tp, 0x06, 0xe7ff); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x0400); + rtl8168_mdio_write(tp, 0x06, 0xe234); + rtl8168_mdio_write(tp, 0x06, 0xcce2); + rtl8168_mdio_write(tp, 0x06, 0x0088); + rtl8168_mdio_write(tp, 0x06, 0xe200); + rtl8168_mdio_write(tp, 0x06, 0xa725); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x1de5); + rtl8168_mdio_write(tp, 0x06, 0x0a2c); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x6de5); + rtl8168_mdio_write(tp, 0x06, 0x0a1d); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x1ce5); + rtl8168_mdio_write(tp, 0x06, 0x0a2d); + rtl8168_mdio_write(tp, 0x06, 0xa755); + rtl8168_mdio_write(tp, 0x05, 0x8b64); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8b94); + rtl8168_mdio_write(tp, 0x06, 0x82cd); + rtl8168_mdio_write(tp, 0x05, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0x2000); + rtl8168_mdio_write(tp, 0x05, 0x8aee); + rtl8168_mdio_write(tp, 0x06, 0x03b8); + rtl8168_mdio_write(tp, 0x05, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x01); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x01, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~(BIT_2); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0028); + rtl8168_mdio_write(tp, 0x15, 0x0010); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0041); + rtl8168_mdio_write(tp, 0x15, 0x0802); + rtl8168_mdio_write(tp, 0x16, 0x2185); + rtl8168_mdio_write(tp, 0x1f, 0x0000); +} + +static void +rtl8168_set_phy_mcu_8168e_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val,i; + + if (rtl8168_efuse_read(tp, 0x22) == 0x0c) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x1800); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x17, 0x0117); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + rtl8168_mdio_write(tp, 0x1B, 0x5000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x4104); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1E); + gphy_val &= 0x03FF; + if (gphy_val==0x000C) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x07); + if ((gphy_val & BIT_5) == 0) + break; + } + gphy_val = rtl8168_mdio_read(tp, 0x07); + if (gphy_val & BIT_5) { + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x00a1); + rtl8168_mdio_write(tp, 0x17, 0x1000); + rtl8168_mdio_write(tp, 0x17, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2000); + rtl8168_mdio_write(tp, 0x1e, 0x002f); + rtl8168_mdio_write(tp, 0x18, 0x9bfb); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x07, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x08); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x08, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x000e); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x0010); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x0018); + rtl8168_mdio_write(tp, 0x19, 0x4801); + rtl8168_mdio_write(tp, 0x15, 0x0019); + rtl8168_mdio_write(tp, 0x19, 0x6801); + rtl8168_mdio_write(tp, 0x15, 0x001a); + rtl8168_mdio_write(tp, 0x19, 0x66a1); + rtl8168_mdio_write(tp, 0x15, 0x001f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0020); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0021); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0022); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0023); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0024); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0025); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0026); + rtl8168_mdio_write(tp, 0x19, 0x40ea); + rtl8168_mdio_write(tp, 0x15, 0x0027); + rtl8168_mdio_write(tp, 0x19, 0x4503); + rtl8168_mdio_write(tp, 0x15, 0x0028); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0029); + rtl8168_mdio_write(tp, 0x19, 0xa631); + rtl8168_mdio_write(tp, 0x15, 0x002a); + rtl8168_mdio_write(tp, 0x19, 0x9717); + rtl8168_mdio_write(tp, 0x15, 0x002b); + rtl8168_mdio_write(tp, 0x19, 0x302c); + rtl8168_mdio_write(tp, 0x15, 0x002c); + rtl8168_mdio_write(tp, 0x19, 0x4802); + rtl8168_mdio_write(tp, 0x15, 0x002d); + rtl8168_mdio_write(tp, 0x19, 0x58da); + rtl8168_mdio_write(tp, 0x15, 0x002e); + rtl8168_mdio_write(tp, 0x19, 0x400d); + rtl8168_mdio_write(tp, 0x15, 0x002f); + rtl8168_mdio_write(tp, 0x19, 0x4488); + rtl8168_mdio_write(tp, 0x15, 0x0030); + rtl8168_mdio_write(tp, 0x19, 0x9e00); + rtl8168_mdio_write(tp, 0x15, 0x0031); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0032); + rtl8168_mdio_write(tp, 0x19, 0x6481); + rtl8168_mdio_write(tp, 0x15, 0x0033); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0034); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0035); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0036); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0037); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0038); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0039); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x003a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x003b); + rtl8168_mdio_write(tp, 0x19, 0x63e8); + rtl8168_mdio_write(tp, 0x15, 0x003c); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x003d); + rtl8168_mdio_write(tp, 0x19, 0x59d4); + rtl8168_mdio_write(tp, 0x15, 0x003e); + rtl8168_mdio_write(tp, 0x19, 0x63f8); + rtl8168_mdio_write(tp, 0x15, 0x0040); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0041); + rtl8168_mdio_write(tp, 0x19, 0x30de); + rtl8168_mdio_write(tp, 0x15, 0x0044); + rtl8168_mdio_write(tp, 0x19, 0x480f); + rtl8168_mdio_write(tp, 0x15, 0x0045); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x0046); + rtl8168_mdio_write(tp, 0x19, 0x6680); + rtl8168_mdio_write(tp, 0x15, 0x0047); + rtl8168_mdio_write(tp, 0x19, 0x7c10); + rtl8168_mdio_write(tp, 0x15, 0x0048); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0049); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004f); + rtl8168_mdio_write(tp, 0x19, 0x40ea); + rtl8168_mdio_write(tp, 0x15, 0x0050); + rtl8168_mdio_write(tp, 0x19, 0x4503); + rtl8168_mdio_write(tp, 0x15, 0x0051); + rtl8168_mdio_write(tp, 0x19, 0x58ca); + rtl8168_mdio_write(tp, 0x15, 0x0052); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0053); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x0054); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x0055); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0056); + rtl8168_mdio_write(tp, 0x19, 0x3000); + rtl8168_mdio_write(tp, 0x15, 0x00a1); + rtl8168_mdio_write(tp, 0x19, 0x3044); + rtl8168_mdio_write(tp, 0x15, 0x00ab); + rtl8168_mdio_write(tp, 0x19, 0x5820); + rtl8168_mdio_write(tp, 0x15, 0x00ac); + rtl8168_mdio_write(tp, 0x19, 0x5e04); + rtl8168_mdio_write(tp, 0x15, 0x00ad); + rtl8168_mdio_write(tp, 0x19, 0xb60c); + rtl8168_mdio_write(tp, 0x15, 0x00af); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x00b2); + rtl8168_mdio_write(tp, 0x19, 0x30b9); + rtl8168_mdio_write(tp, 0x15, 0x00b9); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x00ba); + rtl8168_mdio_write(tp, 0x19, 0x480b); + rtl8168_mdio_write(tp, 0x15, 0x00bb); + rtl8168_mdio_write(tp, 0x19, 0x5e00); + rtl8168_mdio_write(tp, 0x15, 0x00bc); + rtl8168_mdio_write(tp, 0x19, 0x405f); + rtl8168_mdio_write(tp, 0x15, 0x00bd); + rtl8168_mdio_write(tp, 0x19, 0x4448); + rtl8168_mdio_write(tp, 0x15, 0x00be); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x00bf); + rtl8168_mdio_write(tp, 0x19, 0x4468); + rtl8168_mdio_write(tp, 0x15, 0x00c0); + rtl8168_mdio_write(tp, 0x19, 0x9c02); + rtl8168_mdio_write(tp, 0x15, 0x00c1); + rtl8168_mdio_write(tp, 0x19, 0x58a0); + rtl8168_mdio_write(tp, 0x15, 0x00c2); + rtl8168_mdio_write(tp, 0x19, 0xb605); + rtl8168_mdio_write(tp, 0x15, 0x00c3); + rtl8168_mdio_write(tp, 0x19, 0xc0d3); + rtl8168_mdio_write(tp, 0x15, 0x00c4); + rtl8168_mdio_write(tp, 0x19, 0x00e6); + rtl8168_mdio_write(tp, 0x15, 0x00c5); + rtl8168_mdio_write(tp, 0x19, 0xdaec); + rtl8168_mdio_write(tp, 0x15, 0x00c6); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00c7); + rtl8168_mdio_write(tp, 0x19, 0x9df9); + rtl8168_mdio_write(tp, 0x15, 0x0112); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0113); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0114); + rtl8168_mdio_write(tp, 0x19, 0x63f0); + rtl8168_mdio_write(tp, 0x15, 0x0115); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0116); + rtl8168_mdio_write(tp, 0x19, 0x4418); + rtl8168_mdio_write(tp, 0x15, 0x0117); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x15, 0x0118); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0119); + rtl8168_mdio_write(tp, 0x19, 0x64e1); + rtl8168_mdio_write(tp, 0x15, 0x011a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0150); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0151); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0152); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0153); + rtl8168_mdio_write(tp, 0x19, 0x4540); + rtl8168_mdio_write(tp, 0x15, 0x0154); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0155); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x15, 0x0156); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0157); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0158); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0159); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x015a); + rtl8168_mdio_write(tp, 0x19, 0x30fe); + rtl8168_mdio_write(tp, 0x15, 0x029c); + rtl8168_mdio_write(tp, 0x19, 0x0070); + rtl8168_mdio_write(tp, 0x15, 0x02b2); + rtl8168_mdio_write(tp, 0x19, 0x005a); + rtl8168_mdio_write(tp, 0x15, 0x02bd); + rtl8168_mdio_write(tp, 0x19, 0xa522); + rtl8168_mdio_write(tp, 0x15, 0x02ce); + rtl8168_mdio_write(tp, 0x19, 0xb63e); + rtl8168_mdio_write(tp, 0x15, 0x02d9); + rtl8168_mdio_write(tp, 0x19, 0x32df); + rtl8168_mdio_write(tp, 0x15, 0x02df); + rtl8168_mdio_write(tp, 0x19, 0x4500); + rtl8168_mdio_write(tp, 0x15, 0x02e7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02f4); + rtl8168_mdio_write(tp, 0x19, 0xb618); + rtl8168_mdio_write(tp, 0x15, 0x02fb); + rtl8168_mdio_write(tp, 0x19, 0xb900); + rtl8168_mdio_write(tp, 0x15, 0x02fc); + rtl8168_mdio_write(tp, 0x19, 0x49b5); + rtl8168_mdio_write(tp, 0x15, 0x02fd); + rtl8168_mdio_write(tp, 0x19, 0x6812); + rtl8168_mdio_write(tp, 0x15, 0x02fe); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x02ff); + rtl8168_mdio_write(tp, 0x19, 0x9900); + rtl8168_mdio_write(tp, 0x15, 0x0300); + rtl8168_mdio_write(tp, 0x19, 0x64a0); + rtl8168_mdio_write(tp, 0x15, 0x0301); + rtl8168_mdio_write(tp, 0x19, 0x3316); + rtl8168_mdio_write(tp, 0x15, 0x0308); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030c); + rtl8168_mdio_write(tp, 0x19, 0x3000); + rtl8168_mdio_write(tp, 0x15, 0x0312); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0313); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0314); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0315); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0316); + rtl8168_mdio_write(tp, 0x19, 0x49b5); + rtl8168_mdio_write(tp, 0x15, 0x0317); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x0318); + rtl8168_mdio_write(tp, 0x19, 0x4d00); + rtl8168_mdio_write(tp, 0x15, 0x0319); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x031a); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x031b); + rtl8168_mdio_write(tp, 0x19, 0x4925); + rtl8168_mdio_write(tp, 0x15, 0x031c); + rtl8168_mdio_write(tp, 0x19, 0x403b); + rtl8168_mdio_write(tp, 0x15, 0x031d); + rtl8168_mdio_write(tp, 0x19, 0xa602); + rtl8168_mdio_write(tp, 0x15, 0x031e); + rtl8168_mdio_write(tp, 0x19, 0x402f); + rtl8168_mdio_write(tp, 0x15, 0x031f); + rtl8168_mdio_write(tp, 0x19, 0x4484); + rtl8168_mdio_write(tp, 0x15, 0x0320); + rtl8168_mdio_write(tp, 0x19, 0x40c8); + rtl8168_mdio_write(tp, 0x15, 0x0321); + rtl8168_mdio_write(tp, 0x19, 0x44c4); + rtl8168_mdio_write(tp, 0x15, 0x0322); + rtl8168_mdio_write(tp, 0x19, 0x404f); + rtl8168_mdio_write(tp, 0x15, 0x0323); + rtl8168_mdio_write(tp, 0x19, 0x44c8); + rtl8168_mdio_write(tp, 0x15, 0x0324); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0325); + rtl8168_mdio_write(tp, 0x19, 0x00e7); + rtl8168_mdio_write(tp, 0x15, 0x0326); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0327); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x0328); + rtl8168_mdio_write(tp, 0x19, 0x4d48); + rtl8168_mdio_write(tp, 0x15, 0x0329); + rtl8168_mdio_write(tp, 0x19, 0x332b); + rtl8168_mdio_write(tp, 0x15, 0x032a); + rtl8168_mdio_write(tp, 0x19, 0x4d40); + rtl8168_mdio_write(tp, 0x15, 0x032c); + rtl8168_mdio_write(tp, 0x19, 0x00f8); + rtl8168_mdio_write(tp, 0x15, 0x032d); + rtl8168_mdio_write(tp, 0x19, 0x82b2); + rtl8168_mdio_write(tp, 0x15, 0x032f); + rtl8168_mdio_write(tp, 0x19, 0x00b0); + rtl8168_mdio_write(tp, 0x15, 0x0332); + rtl8168_mdio_write(tp, 0x19, 0x91f2); + rtl8168_mdio_write(tp, 0x15, 0x033f); + rtl8168_mdio_write(tp, 0x19, 0xb6cd); + rtl8168_mdio_write(tp, 0x15, 0x0340); + rtl8168_mdio_write(tp, 0x19, 0x9e01); + rtl8168_mdio_write(tp, 0x15, 0x0341); + rtl8168_mdio_write(tp, 0x19, 0xd11d); + rtl8168_mdio_write(tp, 0x15, 0x0342); + rtl8168_mdio_write(tp, 0x19, 0x009d); + rtl8168_mdio_write(tp, 0x15, 0x0343); + rtl8168_mdio_write(tp, 0x19, 0xbb1c); + rtl8168_mdio_write(tp, 0x15, 0x0344); + rtl8168_mdio_write(tp, 0x19, 0x8102); + rtl8168_mdio_write(tp, 0x15, 0x0345); + rtl8168_mdio_write(tp, 0x19, 0x3348); + rtl8168_mdio_write(tp, 0x15, 0x0346); + rtl8168_mdio_write(tp, 0x19, 0xa231); + rtl8168_mdio_write(tp, 0x15, 0x0347); + rtl8168_mdio_write(tp, 0x19, 0x335b); + rtl8168_mdio_write(tp, 0x15, 0x0348); + rtl8168_mdio_write(tp, 0x19, 0x91f7); + rtl8168_mdio_write(tp, 0x15, 0x0349); + rtl8168_mdio_write(tp, 0x19, 0xc218); + rtl8168_mdio_write(tp, 0x15, 0x034a); + rtl8168_mdio_write(tp, 0x19, 0x00f5); + rtl8168_mdio_write(tp, 0x15, 0x034b); + rtl8168_mdio_write(tp, 0x19, 0x335b); + rtl8168_mdio_write(tp, 0x15, 0x034c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x034d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x034e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x034f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0350); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x035b); + rtl8168_mdio_write(tp, 0x19, 0xa23c); + rtl8168_mdio_write(tp, 0x15, 0x035c); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x035d); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x035e); + rtl8168_mdio_write(tp, 0x19, 0x3397); + rtl8168_mdio_write(tp, 0x15, 0x0363); + rtl8168_mdio_write(tp, 0x19, 0xb6a9); + rtl8168_mdio_write(tp, 0x15, 0x0366); + rtl8168_mdio_write(tp, 0x19, 0x00f5); + rtl8168_mdio_write(tp, 0x15, 0x0382); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0388); + rtl8168_mdio_write(tp, 0x19, 0x0084); + rtl8168_mdio_write(tp, 0x15, 0x0389); + rtl8168_mdio_write(tp, 0x19, 0xdd17); + rtl8168_mdio_write(tp, 0x15, 0x038a); + rtl8168_mdio_write(tp, 0x19, 0x000b); + rtl8168_mdio_write(tp, 0x15, 0x038b); + rtl8168_mdio_write(tp, 0x19, 0xa10a); + rtl8168_mdio_write(tp, 0x15, 0x038c); + rtl8168_mdio_write(tp, 0x19, 0x337e); + rtl8168_mdio_write(tp, 0x15, 0x038d); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x038e); + rtl8168_mdio_write(tp, 0x19, 0xa107); + rtl8168_mdio_write(tp, 0x15, 0x038f); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x0390); + rtl8168_mdio_write(tp, 0x19, 0xc017); + rtl8168_mdio_write(tp, 0x15, 0x0391); + rtl8168_mdio_write(tp, 0x19, 0x0004); + rtl8168_mdio_write(tp, 0x15, 0x0392); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0393); + rtl8168_mdio_write(tp, 0x19, 0x00f4); + rtl8168_mdio_write(tp, 0x15, 0x0397); + rtl8168_mdio_write(tp, 0x19, 0x4098); + rtl8168_mdio_write(tp, 0x15, 0x0398); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x0399); + rtl8168_mdio_write(tp, 0x19, 0x55bf); + rtl8168_mdio_write(tp, 0x15, 0x039a); + rtl8168_mdio_write(tp, 0x19, 0x4bb9); + rtl8168_mdio_write(tp, 0x15, 0x039b); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x039c); + rtl8168_mdio_write(tp, 0x19, 0x4b29); + rtl8168_mdio_write(tp, 0x15, 0x039d); + rtl8168_mdio_write(tp, 0x19, 0x4041); + rtl8168_mdio_write(tp, 0x15, 0x039e); + rtl8168_mdio_write(tp, 0x19, 0x442a); + rtl8168_mdio_write(tp, 0x15, 0x039f); + rtl8168_mdio_write(tp, 0x19, 0x4029); + rtl8168_mdio_write(tp, 0x15, 0x03aa); + rtl8168_mdio_write(tp, 0x19, 0x33b8); + rtl8168_mdio_write(tp, 0x15, 0x03b6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b8); + rtl8168_mdio_write(tp, 0x19, 0x543f); + rtl8168_mdio_write(tp, 0x15, 0x03b9); + rtl8168_mdio_write(tp, 0x19, 0x499a); + rtl8168_mdio_write(tp, 0x15, 0x03ba); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x03bb); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03bc); + rtl8168_mdio_write(tp, 0x19, 0x490a); + rtl8168_mdio_write(tp, 0x15, 0x03bd); + rtl8168_mdio_write(tp, 0x19, 0x405e); + rtl8168_mdio_write(tp, 0x15, 0x03c2); + rtl8168_mdio_write(tp, 0x19, 0x9a03); + rtl8168_mdio_write(tp, 0x15, 0x03c4); + rtl8168_mdio_write(tp, 0x19, 0x0015); + rtl8168_mdio_write(tp, 0x15, 0x03c5); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x03c8); + rtl8168_mdio_write(tp, 0x19, 0x9cf7); + rtl8168_mdio_write(tp, 0x15, 0x03c9); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03ca); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03cb); + rtl8168_mdio_write(tp, 0x19, 0x4458); + rtl8168_mdio_write(tp, 0x15, 0x03cd); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03ce); + rtl8168_mdio_write(tp, 0x19, 0x33bf); + rtl8168_mdio_write(tp, 0x15, 0x03cf); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d0); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d1); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d9); + rtl8168_mdio_write(tp, 0x19, 0x49bb); + rtl8168_mdio_write(tp, 0x15, 0x03da); + rtl8168_mdio_write(tp, 0x19, 0x4478); + rtl8168_mdio_write(tp, 0x15, 0x03db); + rtl8168_mdio_write(tp, 0x19, 0x492b); + rtl8168_mdio_write(tp, 0x15, 0x03dc); + rtl8168_mdio_write(tp, 0x19, 0x7c01); + rtl8168_mdio_write(tp, 0x15, 0x03dd); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x03de); + rtl8168_mdio_write(tp, 0x19, 0xbd1a); + rtl8168_mdio_write(tp, 0x15, 0x03df); + rtl8168_mdio_write(tp, 0x19, 0xc428); + rtl8168_mdio_write(tp, 0x15, 0x03e0); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x03e1); + rtl8168_mdio_write(tp, 0x19, 0x9cfd); + rtl8168_mdio_write(tp, 0x15, 0x03e2); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03e3); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03e4); + rtl8168_mdio_write(tp, 0x19, 0x4458); + rtl8168_mdio_write(tp, 0x15, 0x03e5); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03e6); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03e7); + rtl8168_mdio_write(tp, 0x19, 0x33de); + rtl8168_mdio_write(tp, 0x15, 0x03e8); + rtl8168_mdio_write(tp, 0x19, 0xc218); + rtl8168_mdio_write(tp, 0x15, 0x03e9); + rtl8168_mdio_write(tp, 0x19, 0x0002); + rtl8168_mdio_write(tp, 0x15, 0x03ea); + rtl8168_mdio_write(tp, 0x19, 0x32df); + rtl8168_mdio_write(tp, 0x15, 0x03eb); + rtl8168_mdio_write(tp, 0x19, 0x3316); + rtl8168_mdio_write(tp, 0x15, 0x03ec); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ed); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ee); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ef); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03f7); + rtl8168_mdio_write(tp, 0x19, 0x330c); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x0200); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x9002); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x0202); + rtl8168_mdio_write(tp, 0x06, 0x3402); + rtl8168_mdio_write(tp, 0x06, 0x027f); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0xa602); + rtl8168_mdio_write(tp, 0x06, 0x80bf); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe600); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xee03); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xefb8); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe902); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8285); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8520); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8701); + rtl8168_mdio_write(tp, 0x06, 0xd481); + rtl8168_mdio_write(tp, 0x06, 0x35e4); + rtl8168_mdio_write(tp, 0x06, 0x8b94); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x95bf); + rtl8168_mdio_write(tp, 0x06, 0x8b88); + rtl8168_mdio_write(tp, 0x06, 0xec00); + rtl8168_mdio_write(tp, 0x06, 0x19a9); + rtl8168_mdio_write(tp, 0x06, 0x8b90); + rtl8168_mdio_write(tp, 0x06, 0xf9ee); + rtl8168_mdio_write(tp, 0x06, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x41f7); + rtl8168_mdio_write(tp, 0x06, 0x2ff6); + rtl8168_mdio_write(tp, 0x06, 0x28e4); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xe5e1); + rtl8168_mdio_write(tp, 0x06, 0x4104); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x0dee); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x82f4); + rtl8168_mdio_write(tp, 0x06, 0x021f); + rtl8168_mdio_write(tp, 0x06, 0x4102); + rtl8168_mdio_write(tp, 0x06, 0x2812); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x10ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x139d); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xd602); + rtl8168_mdio_write(tp, 0x06, 0x1f99); + rtl8168_mdio_write(tp, 0x06, 0x0227); + rtl8168_mdio_write(tp, 0x06, 0xeafc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2014); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x8104); + rtl8168_mdio_write(tp, 0x06, 0x021b); + rtl8168_mdio_write(tp, 0x06, 0xf402); + rtl8168_mdio_write(tp, 0x06, 0x2c9c); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x7902); + rtl8168_mdio_write(tp, 0x06, 0x8443); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x11f6); + rtl8168_mdio_write(tp, 0x06, 0x22e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x022c); + rtl8168_mdio_write(tp, 0x06, 0x4602); + rtl8168_mdio_write(tp, 0x06, 0x2ac5); + rtl8168_mdio_write(tp, 0x06, 0x0229); + rtl8168_mdio_write(tp, 0x06, 0x2002); + rtl8168_mdio_write(tp, 0x06, 0x2b91); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x11f6); + rtl8168_mdio_write(tp, 0x06, 0x25e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0xe202); + rtl8168_mdio_write(tp, 0x06, 0x043a); + rtl8168_mdio_write(tp, 0x06, 0x021a); + rtl8168_mdio_write(tp, 0x06, 0x5902); + rtl8168_mdio_write(tp, 0x06, 0x2bfc); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe001); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x1fd1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8638); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50e0); + rtl8168_mdio_write(tp, 0x06, 0xe020); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x21ad); + rtl8168_mdio_write(tp, 0x06, 0x200e); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xbf3d); + rtl8168_mdio_write(tp, 0x06, 0x3902); + rtl8168_mdio_write(tp, 0x06, 0x2eb0); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x0402); + rtl8168_mdio_write(tp, 0x06, 0x8591); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x3c05); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xfee1); + rtl8168_mdio_write(tp, 0x06, 0xe2ff); + rtl8168_mdio_write(tp, 0x06, 0xad2d); + rtl8168_mdio_write(tp, 0x06, 0x1ae0); + rtl8168_mdio_write(tp, 0x06, 0xe14e); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x4fac); + rtl8168_mdio_write(tp, 0x06, 0x2d22); + rtl8168_mdio_write(tp, 0x06, 0xf603); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x36f7); + rtl8168_mdio_write(tp, 0x06, 0x03f7); + rtl8168_mdio_write(tp, 0x06, 0x06bf); + rtl8168_mdio_write(tp, 0x06, 0x8622); + rtl8168_mdio_write(tp, 0x06, 0x022e); + rtl8168_mdio_write(tp, 0x06, 0xb0ae); + rtl8168_mdio_write(tp, 0x06, 0x11e0); + rtl8168_mdio_write(tp, 0x06, 0xe14e); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x4fad); + rtl8168_mdio_write(tp, 0x06, 0x2d08); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x2d02); + rtl8168_mdio_write(tp, 0x06, 0x2eb0); + rtl8168_mdio_write(tp, 0x06, 0xf606); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x204c); + rtl8168_mdio_write(tp, 0x06, 0xd200); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x0058); + rtl8168_mdio_write(tp, 0x06, 0x010c); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0x5810); + rtl8168_mdio_write(tp, 0x06, 0x1e20); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x3658); + rtl8168_mdio_write(tp, 0x06, 0x031e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xe01e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0x8ae6); + rtl8168_mdio_write(tp, 0x06, 0x1f02); + rtl8168_mdio_write(tp, 0x06, 0x9e22); + rtl8168_mdio_write(tp, 0x06, 0xe68a); + rtl8168_mdio_write(tp, 0x06, 0xe6ad); + rtl8168_mdio_write(tp, 0x06, 0x3214); + rtl8168_mdio_write(tp, 0x06, 0xad34); + rtl8168_mdio_write(tp, 0x06, 0x11ef); + rtl8168_mdio_write(tp, 0x06, 0x0258); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x07ad); + rtl8168_mdio_write(tp, 0x06, 0x3508); + rtl8168_mdio_write(tp, 0x06, 0x5ac0); + rtl8168_mdio_write(tp, 0x06, 0x9f04); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xae02); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x3e02); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfae0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac26); + rtl8168_mdio_write(tp, 0x06, 0x0ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xac24); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x6bee); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xe0eb); + rtl8168_mdio_write(tp, 0x06, 0x00e2); + rtl8168_mdio_write(tp, 0x06, 0xe07c); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0x7da5); + rtl8168_mdio_write(tp, 0x06, 0x1111); + rtl8168_mdio_write(tp, 0x06, 0x15d2); + rtl8168_mdio_write(tp, 0x06, 0x60d6); + rtl8168_mdio_write(tp, 0x06, 0x6666); + rtl8168_mdio_write(tp, 0x06, 0x0207); + rtl8168_mdio_write(tp, 0x06, 0xf9d2); + rtl8168_mdio_write(tp, 0x06, 0xa0d6); + rtl8168_mdio_write(tp, 0x06, 0xaaaa); + rtl8168_mdio_write(tp, 0x06, 0x0207); + rtl8168_mdio_write(tp, 0x06, 0xf902); + rtl8168_mdio_write(tp, 0x06, 0x825c); + rtl8168_mdio_write(tp, 0x06, 0xae44); + rtl8168_mdio_write(tp, 0x06, 0xa566); + rtl8168_mdio_write(tp, 0x06, 0x6602); + rtl8168_mdio_write(tp, 0x06, 0xae38); + rtl8168_mdio_write(tp, 0x06, 0xa5aa); + rtl8168_mdio_write(tp, 0x06, 0xaa02); + rtl8168_mdio_write(tp, 0x06, 0xae32); + rtl8168_mdio_write(tp, 0x06, 0xeee0); + rtl8168_mdio_write(tp, 0x06, 0xea04); + rtl8168_mdio_write(tp, 0x06, 0xeee0); + rtl8168_mdio_write(tp, 0x06, 0xeb06); + rtl8168_mdio_write(tp, 0x06, 0xe2e0); + rtl8168_mdio_write(tp, 0x06, 0x7ce3); + rtl8168_mdio_write(tp, 0x06, 0xe07d); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x38e1); + rtl8168_mdio_write(tp, 0x06, 0xe039); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x21ad); + rtl8168_mdio_write(tp, 0x06, 0x3f13); + rtl8168_mdio_write(tp, 0x06, 0xe0e4); + rtl8168_mdio_write(tp, 0x06, 0x14e1); + rtl8168_mdio_write(tp, 0x06, 0xe415); + rtl8168_mdio_write(tp, 0x06, 0x6880); + rtl8168_mdio_write(tp, 0x06, 0xe4e4); + rtl8168_mdio_write(tp, 0x06, 0x14e5); + rtl8168_mdio_write(tp, 0x06, 0xe415); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x5cae); + rtl8168_mdio_write(tp, 0x06, 0x0bac); + rtl8168_mdio_write(tp, 0x06, 0x3e02); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x8602); + rtl8168_mdio_write(tp, 0x06, 0x82b0); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2605); + rtl8168_mdio_write(tp, 0x06, 0x0221); + rtl8168_mdio_write(tp, 0x06, 0xf3f7); + rtl8168_mdio_write(tp, 0x06, 0x28e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xad21); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x22f8); + rtl8168_mdio_write(tp, 0x06, 0xf729); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2405); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xebf7); + rtl8168_mdio_write(tp, 0x06, 0x2ae5); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x2134); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2109); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x2eac); + rtl8168_mdio_write(tp, 0x06, 0x2003); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0x52e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x09e0); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x8337); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2608); + rtl8168_mdio_write(tp, 0x06, 0xe085); + rtl8168_mdio_write(tp, 0x06, 0xd2ad); + rtl8168_mdio_write(tp, 0x06, 0x2502); + rtl8168_mdio_write(tp, 0x06, 0xf628); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x210a); + rtl8168_mdio_write(tp, 0x06, 0xe086); + rtl8168_mdio_write(tp, 0x06, 0x0af6); + rtl8168_mdio_write(tp, 0x06, 0x27a0); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0xf629); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2408); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xedad); + rtl8168_mdio_write(tp, 0x06, 0x2002); + rtl8168_mdio_write(tp, 0x06, 0xf62a); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x2ea1); + rtl8168_mdio_write(tp, 0x06, 0x0003); + rtl8168_mdio_write(tp, 0x06, 0x0221); + rtl8168_mdio_write(tp, 0x06, 0x11fc); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x8aed); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8aec); + rtl8168_mdio_write(tp, 0x06, 0x0004); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x3ae0); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xeb58); + rtl8168_mdio_write(tp, 0x06, 0xf8d1); + rtl8168_mdio_write(tp, 0x06, 0x01e4); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0xebe0); + rtl8168_mdio_write(tp, 0x06, 0xe07c); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x7d5c); + rtl8168_mdio_write(tp, 0x06, 0x00ff); + rtl8168_mdio_write(tp, 0x06, 0x3c00); + rtl8168_mdio_write(tp, 0x06, 0x1eab); + rtl8168_mdio_write(tp, 0x06, 0x1ce0); + rtl8168_mdio_write(tp, 0x06, 0xe04c); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x4d58); + rtl8168_mdio_write(tp, 0x06, 0xc1e4); + rtl8168_mdio_write(tp, 0x06, 0xe04c); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0x4de0); + rtl8168_mdio_write(tp, 0x06, 0xe0ee); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0x3ce4); + rtl8168_mdio_write(tp, 0x06, 0xe0ee); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0xeffc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2412); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0xeee1); + rtl8168_mdio_write(tp, 0x06, 0xe0ef); + rtl8168_mdio_write(tp, 0x06, 0x59c3); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0xeee5); + rtl8168_mdio_write(tp, 0x06, 0xe0ef); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xed01); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac25); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x8363); + rtl8168_mdio_write(tp, 0x06, 0xae03); + rtl8168_mdio_write(tp, 0x06, 0x0225); + rtl8168_mdio_write(tp, 0x06, 0x16fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfae0); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0xa000); + rtl8168_mdio_write(tp, 0x06, 0x19e0); + rtl8168_mdio_write(tp, 0x06, 0x860b); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x331b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x04aa); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x06ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0xe602); + rtl8168_mdio_write(tp, 0x06, 0x241e); + rtl8168_mdio_write(tp, 0x06, 0xae14); + rtl8168_mdio_write(tp, 0x06, 0xa001); + rtl8168_mdio_write(tp, 0x06, 0x1402); + rtl8168_mdio_write(tp, 0x06, 0x2426); + rtl8168_mdio_write(tp, 0x06, 0xbf26); + rtl8168_mdio_write(tp, 0x06, 0x6d02); + rtl8168_mdio_write(tp, 0x06, 0x2eb0); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0b00); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0a02); + rtl8168_mdio_write(tp, 0x06, 0xaf84); + rtl8168_mdio_write(tp, 0x06, 0x3ca0); + rtl8168_mdio_write(tp, 0x06, 0x0252); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0400); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0500); + rtl8168_mdio_write(tp, 0x06, 0xe086); + rtl8168_mdio_write(tp, 0x06, 0x0be1); + rtl8168_mdio_write(tp, 0x06, 0x8b32); + rtl8168_mdio_write(tp, 0x06, 0x1b10); + rtl8168_mdio_write(tp, 0x06, 0x9e04); + rtl8168_mdio_write(tp, 0x06, 0xaa02); + rtl8168_mdio_write(tp, 0x06, 0xaecb); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0b00); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x3ae2); + rtl8168_mdio_write(tp, 0x06, 0x8604); + rtl8168_mdio_write(tp, 0x06, 0xe386); + rtl8168_mdio_write(tp, 0x06, 0x05ef); + rtl8168_mdio_write(tp, 0x06, 0x65e2); + rtl8168_mdio_write(tp, 0x06, 0x8606); + rtl8168_mdio_write(tp, 0x06, 0xe386); + rtl8168_mdio_write(tp, 0x06, 0x071b); + rtl8168_mdio_write(tp, 0x06, 0x56aa); + rtl8168_mdio_write(tp, 0x06, 0x0eef); + rtl8168_mdio_write(tp, 0x06, 0x56e6); + rtl8168_mdio_write(tp, 0x06, 0x8606); + rtl8168_mdio_write(tp, 0x06, 0xe786); + rtl8168_mdio_write(tp, 0x06, 0x07e2); + rtl8168_mdio_write(tp, 0x06, 0x8609); + rtl8168_mdio_write(tp, 0x06, 0xe686); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8609); + rtl8168_mdio_write(tp, 0x06, 0xa000); + rtl8168_mdio_write(tp, 0x06, 0x07ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x03af); + rtl8168_mdio_write(tp, 0x06, 0x8369); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x2426); + rtl8168_mdio_write(tp, 0x06, 0xae48); + rtl8168_mdio_write(tp, 0x06, 0xa003); + rtl8168_mdio_write(tp, 0x06, 0x21e0); + rtl8168_mdio_write(tp, 0x06, 0x8608); + rtl8168_mdio_write(tp, 0x06, 0xe186); + rtl8168_mdio_write(tp, 0x06, 0x091b); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x0caa); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x249d); + rtl8168_mdio_write(tp, 0x06, 0xaee7); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x8eae); + rtl8168_mdio_write(tp, 0x06, 0xe2ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x860b); + rtl8168_mdio_write(tp, 0x06, 0x00af); + rtl8168_mdio_write(tp, 0x06, 0x8369); + rtl8168_mdio_write(tp, 0x06, 0xa004); + rtl8168_mdio_write(tp, 0x06, 0x15e0); + rtl8168_mdio_write(tp, 0x06, 0x860b); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x341b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x05aa); + rtl8168_mdio_write(tp, 0x06, 0x03af); + rtl8168_mdio_write(tp, 0x06, 0x8383); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0a05); + rtl8168_mdio_write(tp, 0x06, 0xae0c); + rtl8168_mdio_write(tp, 0x06, 0xa005); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x0702); + rtl8168_mdio_write(tp, 0x06, 0x2309); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0a00); + rtl8168_mdio_write(tp, 0x06, 0xfeef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfbe0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x22e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x23e2); + rtl8168_mdio_write(tp, 0x06, 0xe036); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0x375a); + rtl8168_mdio_write(tp, 0x06, 0xc40d); + rtl8168_mdio_write(tp, 0x06, 0x0158); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x20e3); + rtl8168_mdio_write(tp, 0x06, 0x8ae7); + rtl8168_mdio_write(tp, 0x06, 0xac31); + rtl8168_mdio_write(tp, 0x06, 0x60ac); + rtl8168_mdio_write(tp, 0x06, 0x3a08); + rtl8168_mdio_write(tp, 0x06, 0xac3e); + rtl8168_mdio_write(tp, 0x06, 0x26ae); + rtl8168_mdio_write(tp, 0x06, 0x67af); + rtl8168_mdio_write(tp, 0x06, 0x84db); + rtl8168_mdio_write(tp, 0x06, 0xad37); + rtl8168_mdio_write(tp, 0x06, 0x61e0); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xe91b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x51d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x863b); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50ee); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x43ad); + rtl8168_mdio_write(tp, 0x06, 0x3627); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xeee1); + rtl8168_mdio_write(tp, 0x06, 0x8aef); + rtl8168_mdio_write(tp, 0x06, 0xef74); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xeae1); + rtl8168_mdio_write(tp, 0x06, 0x8aeb); + rtl8168_mdio_write(tp, 0x06, 0x1b74); + rtl8168_mdio_write(tp, 0x06, 0x9e2e); + rtl8168_mdio_write(tp, 0x06, 0x14e4); + rtl8168_mdio_write(tp, 0x06, 0x8aea); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xebef); + rtl8168_mdio_write(tp, 0x06, 0x74e0); + rtl8168_mdio_write(tp, 0x06, 0x8aee); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xef1b); + rtl8168_mdio_write(tp, 0x06, 0x479e); + rtl8168_mdio_write(tp, 0x06, 0x0fae); + rtl8168_mdio_write(tp, 0x06, 0x19ee); + rtl8168_mdio_write(tp, 0x06, 0x8aea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8aeb); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x0fac); + rtl8168_mdio_write(tp, 0x06, 0x390c); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x3b02); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe800); + rtl8168_mdio_write(tp, 0x06, 0xe68a); + rtl8168_mdio_write(tp, 0x06, 0xe7ff); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xc4e1); + rtl8168_mdio_write(tp, 0x06, 0x8b6e); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e24); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x6ead); + rtl8168_mdio_write(tp, 0x06, 0x2218); + rtl8168_mdio_write(tp, 0x06, 0xac27); + rtl8168_mdio_write(tp, 0x06, 0x0dac); + rtl8168_mdio_write(tp, 0x06, 0x2605); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x8fae); + rtl8168_mdio_write(tp, 0x06, 0x1302); + rtl8168_mdio_write(tp, 0x06, 0x03c8); + rtl8168_mdio_write(tp, 0x06, 0xae0e); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0xe102); + rtl8168_mdio_write(tp, 0x06, 0x8520); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x8f02); + rtl8168_mdio_write(tp, 0x06, 0x8566); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x82ad); + rtl8168_mdio_write(tp, 0x06, 0x2737); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4402); + rtl8168_mdio_write(tp, 0x06, 0x2f23); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x2ed1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8647); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50bf); + rtl8168_mdio_write(tp, 0x06, 0x8641); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x23e5); + rtl8168_mdio_write(tp, 0x06, 0x8af0); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x22e1); + rtl8168_mdio_write(tp, 0x06, 0xe023); + rtl8168_mdio_write(tp, 0x06, 0xac2e); + rtl8168_mdio_write(tp, 0x06, 0x04d1); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0x02d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8641); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50d1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8644); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4702); + rtl8168_mdio_write(tp, 0x06, 0x2f23); + rtl8168_mdio_write(tp, 0x06, 0xad28); + rtl8168_mdio_write(tp, 0x06, 0x19d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8644); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50e1); + rtl8168_mdio_write(tp, 0x06, 0x8af0); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4102); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4702); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xfee1); + rtl8168_mdio_write(tp, 0x06, 0xe2ff); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x63e0); + rtl8168_mdio_write(tp, 0x06, 0xe038); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x39ad); + rtl8168_mdio_write(tp, 0x06, 0x2f10); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xf726); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xae0e); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e1); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xf728); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e5); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xf72b); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xd07d); + rtl8168_mdio_write(tp, 0x06, 0xb0fe); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xf62b); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xf626); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e1); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xf628); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e5); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xae20); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0xa725); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x1de5); + rtl8168_mdio_write(tp, 0x06, 0x0a2c); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x6de5); + rtl8168_mdio_write(tp, 0x06, 0x0a1d); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x1ce5); + rtl8168_mdio_write(tp, 0x06, 0x0a2d); + rtl8168_mdio_write(tp, 0x06, 0xa755); + rtl8168_mdio_write(tp, 0x06, 0x00e2); + rtl8168_mdio_write(tp, 0x06, 0x3488); + rtl8168_mdio_write(tp, 0x06, 0xe200); + rtl8168_mdio_write(tp, 0x06, 0xcce2); + rtl8168_mdio_write(tp, 0x06, 0x0055); + rtl8168_mdio_write(tp, 0x06, 0xe020); + rtl8168_mdio_write(tp, 0x06, 0x55e2); + rtl8168_mdio_write(tp, 0x06, 0xd600); + rtl8168_mdio_write(tp, 0x06, 0xe24a); + gphy_val = rtl8168_mdio_read(tp, 0x01); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x01, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2179); + rtl8168_mdio_write(tp, 0x1f, 0x0001); + rtl8168_mdio_write(tp, 0x10, 0xf274); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0042); + rtl8168_mdio_write(tp, 0x15, 0x0f00); + rtl8168_mdio_write(tp, 0x15, 0x0f00); + rtl8168_mdio_write(tp, 0x16, 0x7408); + rtl8168_mdio_write(tp, 0x15, 0x0e00); + rtl8168_mdio_write(tp, 0x15, 0x0f00); + rtl8168_mdio_write(tp, 0x15, 0x0f01); + rtl8168_mdio_write(tp, 0x16, 0x4000); + rtl8168_mdio_write(tp, 0x15, 0x0e01); + rtl8168_mdio_write(tp, 0x15, 0x0f01); + rtl8168_mdio_write(tp, 0x15, 0x0f02); + rtl8168_mdio_write(tp, 0x16, 0x9400); + rtl8168_mdio_write(tp, 0x15, 0x0e02); + rtl8168_mdio_write(tp, 0x15, 0x0f02); + rtl8168_mdio_write(tp, 0x15, 0x0f03); + rtl8168_mdio_write(tp, 0x16, 0x7408); + rtl8168_mdio_write(tp, 0x15, 0x0e03); + rtl8168_mdio_write(tp, 0x15, 0x0f03); + rtl8168_mdio_write(tp, 0x15, 0x0f04); + rtl8168_mdio_write(tp, 0x16, 0x4008); + rtl8168_mdio_write(tp, 0x15, 0x0e04); + rtl8168_mdio_write(tp, 0x15, 0x0f04); + rtl8168_mdio_write(tp, 0x15, 0x0f05); + rtl8168_mdio_write(tp, 0x16, 0x9400); + rtl8168_mdio_write(tp, 0x15, 0x0e05); + rtl8168_mdio_write(tp, 0x15, 0x0f05); + rtl8168_mdio_write(tp, 0x15, 0x0f06); + rtl8168_mdio_write(tp, 0x16, 0x0803); + rtl8168_mdio_write(tp, 0x15, 0x0e06); + rtl8168_mdio_write(tp, 0x15, 0x0f06); + rtl8168_mdio_write(tp, 0x15, 0x0d00); + rtl8168_mdio_write(tp, 0x15, 0x0100); + rtl8168_mdio_write(tp, 0x1f, 0x0001); + rtl8168_mdio_write(tp, 0x10, 0xf074); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2149); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~(BIT_2); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val |= BIT_14; + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1e, 0x0020); + gphy_val = rtl8168_mdio_read(tp, 0x1b); + gphy_val |= BIT_7; + rtl8168_mdio_write(tp, 0x1b, gphy_val); + rtl8168_mdio_write(tp, 0x1e, 0x0041); + rtl8168_mdio_write(tp, 0x15, 0x0e02); + rtl8168_mdio_write(tp, 0x1e, 0x0028); + gphy_val = rtl8168_mdio_read(tp, 0x19); + gphy_val |= BIT_15; + rtl8168_mdio_write(tp, 0x19, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } else { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x1800); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x17, 0x0117); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + rtl8168_mdio_write(tp, 0x1B, 0x5000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x4104); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1E); + gphy_val &= 0x03FF; + if (gphy_val==0x000C) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x07); + if ((gphy_val & BIT_5) == 0) + break; + } + gphy_val = rtl8168_mdio_read(tp, 0x07); + if (gphy_val & BIT_5) { + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x00a1); + rtl8168_mdio_write(tp, 0x17, 0x1000); + rtl8168_mdio_write(tp, 0x17, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2000); + rtl8168_mdio_write(tp, 0x1e, 0x002f); + rtl8168_mdio_write(tp, 0x18, 0x9bfb); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x07, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x08); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x08, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x000e); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x0010); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x0018); + rtl8168_mdio_write(tp, 0x19, 0x4801); + rtl8168_mdio_write(tp, 0x15, 0x0019); + rtl8168_mdio_write(tp, 0x19, 0x6801); + rtl8168_mdio_write(tp, 0x15, 0x001a); + rtl8168_mdio_write(tp, 0x19, 0x66a1); + rtl8168_mdio_write(tp, 0x15, 0x001f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0020); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0021); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0022); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0023); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0024); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0025); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0026); + rtl8168_mdio_write(tp, 0x19, 0x40ea); + rtl8168_mdio_write(tp, 0x15, 0x0027); + rtl8168_mdio_write(tp, 0x19, 0x4503); + rtl8168_mdio_write(tp, 0x15, 0x0028); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0029); + rtl8168_mdio_write(tp, 0x19, 0xa631); + rtl8168_mdio_write(tp, 0x15, 0x002a); + rtl8168_mdio_write(tp, 0x19, 0x9717); + rtl8168_mdio_write(tp, 0x15, 0x002b); + rtl8168_mdio_write(tp, 0x19, 0x302c); + rtl8168_mdio_write(tp, 0x15, 0x002c); + rtl8168_mdio_write(tp, 0x19, 0x4802); + rtl8168_mdio_write(tp, 0x15, 0x002d); + rtl8168_mdio_write(tp, 0x19, 0x58da); + rtl8168_mdio_write(tp, 0x15, 0x002e); + rtl8168_mdio_write(tp, 0x19, 0x400d); + rtl8168_mdio_write(tp, 0x15, 0x002f); + rtl8168_mdio_write(tp, 0x19, 0x4488); + rtl8168_mdio_write(tp, 0x15, 0x0030); + rtl8168_mdio_write(tp, 0x19, 0x9e00); + rtl8168_mdio_write(tp, 0x15, 0x0031); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0032); + rtl8168_mdio_write(tp, 0x19, 0x6481); + rtl8168_mdio_write(tp, 0x15, 0x0033); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0034); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0035); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0036); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0037); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0038); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0039); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x003a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x003b); + rtl8168_mdio_write(tp, 0x19, 0x63e8); + rtl8168_mdio_write(tp, 0x15, 0x003c); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x003d); + rtl8168_mdio_write(tp, 0x19, 0x59d4); + rtl8168_mdio_write(tp, 0x15, 0x003e); + rtl8168_mdio_write(tp, 0x19, 0x63f8); + rtl8168_mdio_write(tp, 0x15, 0x0040); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0041); + rtl8168_mdio_write(tp, 0x19, 0x30de); + rtl8168_mdio_write(tp, 0x15, 0x0044); + rtl8168_mdio_write(tp, 0x19, 0x480f); + rtl8168_mdio_write(tp, 0x15, 0x0045); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x0046); + rtl8168_mdio_write(tp, 0x19, 0x6680); + rtl8168_mdio_write(tp, 0x15, 0x0047); + rtl8168_mdio_write(tp, 0x19, 0x7c10); + rtl8168_mdio_write(tp, 0x15, 0x0048); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0049); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004f); + rtl8168_mdio_write(tp, 0x19, 0x40ea); + rtl8168_mdio_write(tp, 0x15, 0x0050); + rtl8168_mdio_write(tp, 0x19, 0x4503); + rtl8168_mdio_write(tp, 0x15, 0x0051); + rtl8168_mdio_write(tp, 0x19, 0x58ca); + rtl8168_mdio_write(tp, 0x15, 0x0052); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0053); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x0054); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x0055); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0056); + rtl8168_mdio_write(tp, 0x19, 0x3000); + rtl8168_mdio_write(tp, 0x15, 0x00a1); + rtl8168_mdio_write(tp, 0x19, 0x3044); + rtl8168_mdio_write(tp, 0x15, 0x00ab); + rtl8168_mdio_write(tp, 0x19, 0x5820); + rtl8168_mdio_write(tp, 0x15, 0x00ac); + rtl8168_mdio_write(tp, 0x19, 0x5e04); + rtl8168_mdio_write(tp, 0x15, 0x00ad); + rtl8168_mdio_write(tp, 0x19, 0xb60c); + rtl8168_mdio_write(tp, 0x15, 0x00af); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x00b2); + rtl8168_mdio_write(tp, 0x19, 0x30b9); + rtl8168_mdio_write(tp, 0x15, 0x00b9); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x00ba); + rtl8168_mdio_write(tp, 0x19, 0x480b); + rtl8168_mdio_write(tp, 0x15, 0x00bb); + rtl8168_mdio_write(tp, 0x19, 0x5e00); + rtl8168_mdio_write(tp, 0x15, 0x00bc); + rtl8168_mdio_write(tp, 0x19, 0x405f); + rtl8168_mdio_write(tp, 0x15, 0x00bd); + rtl8168_mdio_write(tp, 0x19, 0x4448); + rtl8168_mdio_write(tp, 0x15, 0x00be); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x00bf); + rtl8168_mdio_write(tp, 0x19, 0x4468); + rtl8168_mdio_write(tp, 0x15, 0x00c0); + rtl8168_mdio_write(tp, 0x19, 0x9c02); + rtl8168_mdio_write(tp, 0x15, 0x00c1); + rtl8168_mdio_write(tp, 0x19, 0x58a0); + rtl8168_mdio_write(tp, 0x15, 0x00c2); + rtl8168_mdio_write(tp, 0x19, 0xb605); + rtl8168_mdio_write(tp, 0x15, 0x00c3); + rtl8168_mdio_write(tp, 0x19, 0xc0d3); + rtl8168_mdio_write(tp, 0x15, 0x00c4); + rtl8168_mdio_write(tp, 0x19, 0x00e6); + rtl8168_mdio_write(tp, 0x15, 0x00c5); + rtl8168_mdio_write(tp, 0x19, 0xdaec); + rtl8168_mdio_write(tp, 0x15, 0x00c6); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00c7); + rtl8168_mdio_write(tp, 0x19, 0x9df9); + rtl8168_mdio_write(tp, 0x15, 0x0112); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0113); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0114); + rtl8168_mdio_write(tp, 0x19, 0x63f0); + rtl8168_mdio_write(tp, 0x15, 0x0115); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0116); + rtl8168_mdio_write(tp, 0x19, 0x4418); + rtl8168_mdio_write(tp, 0x15, 0x0117); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x15, 0x0118); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0119); + rtl8168_mdio_write(tp, 0x19, 0x64e1); + rtl8168_mdio_write(tp, 0x15, 0x011a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0150); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0151); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0152); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0153); + rtl8168_mdio_write(tp, 0x19, 0x4540); + rtl8168_mdio_write(tp, 0x15, 0x0154); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0155); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x15, 0x0156); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0157); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0158); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0159); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x015a); + rtl8168_mdio_write(tp, 0x19, 0x30fe); + rtl8168_mdio_write(tp, 0x15, 0x029c); + rtl8168_mdio_write(tp, 0x19, 0x0070); + rtl8168_mdio_write(tp, 0x15, 0x02b2); + rtl8168_mdio_write(tp, 0x19, 0x005a); + rtl8168_mdio_write(tp, 0x15, 0x02bd); + rtl8168_mdio_write(tp, 0x19, 0xa522); + rtl8168_mdio_write(tp, 0x15, 0x02ce); + rtl8168_mdio_write(tp, 0x19, 0xb63e); + rtl8168_mdio_write(tp, 0x15, 0x02d9); + rtl8168_mdio_write(tp, 0x19, 0x32df); + rtl8168_mdio_write(tp, 0x15, 0x02df); + rtl8168_mdio_write(tp, 0x19, 0x4500); + rtl8168_mdio_write(tp, 0x15, 0x02f4); + rtl8168_mdio_write(tp, 0x19, 0xb618); + rtl8168_mdio_write(tp, 0x15, 0x02fb); + rtl8168_mdio_write(tp, 0x19, 0xb900); + rtl8168_mdio_write(tp, 0x15, 0x02fc); + rtl8168_mdio_write(tp, 0x19, 0x49b5); + rtl8168_mdio_write(tp, 0x15, 0x02fd); + rtl8168_mdio_write(tp, 0x19, 0x6812); + rtl8168_mdio_write(tp, 0x15, 0x02fe); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x02ff); + rtl8168_mdio_write(tp, 0x19, 0x9900); + rtl8168_mdio_write(tp, 0x15, 0x0300); + rtl8168_mdio_write(tp, 0x19, 0x64a0); + rtl8168_mdio_write(tp, 0x15, 0x0301); + rtl8168_mdio_write(tp, 0x19, 0x3316); + rtl8168_mdio_write(tp, 0x15, 0x0308); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030c); + rtl8168_mdio_write(tp, 0x19, 0x3000); + rtl8168_mdio_write(tp, 0x15, 0x0312); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0313); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0314); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0315); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0316); + rtl8168_mdio_write(tp, 0x19, 0x49b5); + rtl8168_mdio_write(tp, 0x15, 0x0317); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x0318); + rtl8168_mdio_write(tp, 0x19, 0x4d00); + rtl8168_mdio_write(tp, 0x15, 0x0319); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x031a); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x031b); + rtl8168_mdio_write(tp, 0x19, 0x4925); + rtl8168_mdio_write(tp, 0x15, 0x031c); + rtl8168_mdio_write(tp, 0x19, 0x403b); + rtl8168_mdio_write(tp, 0x15, 0x031d); + rtl8168_mdio_write(tp, 0x19, 0xa602); + rtl8168_mdio_write(tp, 0x15, 0x031e); + rtl8168_mdio_write(tp, 0x19, 0x402f); + rtl8168_mdio_write(tp, 0x15, 0x031f); + rtl8168_mdio_write(tp, 0x19, 0x4484); + rtl8168_mdio_write(tp, 0x15, 0x0320); + rtl8168_mdio_write(tp, 0x19, 0x40c8); + rtl8168_mdio_write(tp, 0x15, 0x0321); + rtl8168_mdio_write(tp, 0x19, 0x44c4); + rtl8168_mdio_write(tp, 0x15, 0x0322); + rtl8168_mdio_write(tp, 0x19, 0x404f); + rtl8168_mdio_write(tp, 0x15, 0x0323); + rtl8168_mdio_write(tp, 0x19, 0x44c8); + rtl8168_mdio_write(tp, 0x15, 0x0324); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0325); + rtl8168_mdio_write(tp, 0x19, 0x00e7); + rtl8168_mdio_write(tp, 0x15, 0x0326); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0327); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x0328); + rtl8168_mdio_write(tp, 0x19, 0x4d48); + rtl8168_mdio_write(tp, 0x15, 0x0329); + rtl8168_mdio_write(tp, 0x19, 0x332b); + rtl8168_mdio_write(tp, 0x15, 0x032a); + rtl8168_mdio_write(tp, 0x19, 0x4d40); + rtl8168_mdio_write(tp, 0x15, 0x032c); + rtl8168_mdio_write(tp, 0x19, 0x00f8); + rtl8168_mdio_write(tp, 0x15, 0x032d); + rtl8168_mdio_write(tp, 0x19, 0x82b2); + rtl8168_mdio_write(tp, 0x15, 0x032f); + rtl8168_mdio_write(tp, 0x19, 0x00b0); + rtl8168_mdio_write(tp, 0x15, 0x0332); + rtl8168_mdio_write(tp, 0x19, 0x91f2); + rtl8168_mdio_write(tp, 0x15, 0x033f); + rtl8168_mdio_write(tp, 0x19, 0xb6cd); + rtl8168_mdio_write(tp, 0x15, 0x0340); + rtl8168_mdio_write(tp, 0x19, 0x9e01); + rtl8168_mdio_write(tp, 0x15, 0x0341); + rtl8168_mdio_write(tp, 0x19, 0xd11d); + rtl8168_mdio_write(tp, 0x15, 0x0342); + rtl8168_mdio_write(tp, 0x19, 0x009d); + rtl8168_mdio_write(tp, 0x15, 0x0343); + rtl8168_mdio_write(tp, 0x19, 0xbb1c); + rtl8168_mdio_write(tp, 0x15, 0x0344); + rtl8168_mdio_write(tp, 0x19, 0x8102); + rtl8168_mdio_write(tp, 0x15, 0x0345); + rtl8168_mdio_write(tp, 0x19, 0x3348); + rtl8168_mdio_write(tp, 0x15, 0x0346); + rtl8168_mdio_write(tp, 0x19, 0xa231); + rtl8168_mdio_write(tp, 0x15, 0x0347); + rtl8168_mdio_write(tp, 0x19, 0x335b); + rtl8168_mdio_write(tp, 0x15, 0x0348); + rtl8168_mdio_write(tp, 0x19, 0x91f7); + rtl8168_mdio_write(tp, 0x15, 0x0349); + rtl8168_mdio_write(tp, 0x19, 0xc218); + rtl8168_mdio_write(tp, 0x15, 0x034a); + rtl8168_mdio_write(tp, 0x19, 0x00f5); + rtl8168_mdio_write(tp, 0x15, 0x034b); + rtl8168_mdio_write(tp, 0x19, 0x335b); + rtl8168_mdio_write(tp, 0x15, 0x034c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x034d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x034e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x034f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0350); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x035b); + rtl8168_mdio_write(tp, 0x19, 0xa23c); + rtl8168_mdio_write(tp, 0x15, 0x035c); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x035d); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x035e); + rtl8168_mdio_write(tp, 0x19, 0x3397); + rtl8168_mdio_write(tp, 0x15, 0x0363); + rtl8168_mdio_write(tp, 0x19, 0xb6a9); + rtl8168_mdio_write(tp, 0x15, 0x0366); + rtl8168_mdio_write(tp, 0x19, 0x00f5); + rtl8168_mdio_write(tp, 0x15, 0x0382); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0388); + rtl8168_mdio_write(tp, 0x19, 0x0084); + rtl8168_mdio_write(tp, 0x15, 0x0389); + rtl8168_mdio_write(tp, 0x19, 0xdd17); + rtl8168_mdio_write(tp, 0x15, 0x038a); + rtl8168_mdio_write(tp, 0x19, 0x000b); + rtl8168_mdio_write(tp, 0x15, 0x038b); + rtl8168_mdio_write(tp, 0x19, 0xa10a); + rtl8168_mdio_write(tp, 0x15, 0x038c); + rtl8168_mdio_write(tp, 0x19, 0x337e); + rtl8168_mdio_write(tp, 0x15, 0x038d); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x038e); + rtl8168_mdio_write(tp, 0x19, 0xa107); + rtl8168_mdio_write(tp, 0x15, 0x038f); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x0390); + rtl8168_mdio_write(tp, 0x19, 0xc017); + rtl8168_mdio_write(tp, 0x15, 0x0391); + rtl8168_mdio_write(tp, 0x19, 0x0004); + rtl8168_mdio_write(tp, 0x15, 0x0392); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0393); + rtl8168_mdio_write(tp, 0x19, 0x00f4); + rtl8168_mdio_write(tp, 0x15, 0x0397); + rtl8168_mdio_write(tp, 0x19, 0x4098); + rtl8168_mdio_write(tp, 0x15, 0x0398); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x0399); + rtl8168_mdio_write(tp, 0x19, 0x55bf); + rtl8168_mdio_write(tp, 0x15, 0x039a); + rtl8168_mdio_write(tp, 0x19, 0x4bb9); + rtl8168_mdio_write(tp, 0x15, 0x039b); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x039c); + rtl8168_mdio_write(tp, 0x19, 0x4b29); + rtl8168_mdio_write(tp, 0x15, 0x039d); + rtl8168_mdio_write(tp, 0x19, 0x4041); + rtl8168_mdio_write(tp, 0x15, 0x039e); + rtl8168_mdio_write(tp, 0x19, 0x442a); + rtl8168_mdio_write(tp, 0x15, 0x039f); + rtl8168_mdio_write(tp, 0x19, 0x4029); + rtl8168_mdio_write(tp, 0x15, 0x03aa); + rtl8168_mdio_write(tp, 0x19, 0x33b8); + rtl8168_mdio_write(tp, 0x15, 0x03b6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b8); + rtl8168_mdio_write(tp, 0x19, 0x543f); + rtl8168_mdio_write(tp, 0x15, 0x03b9); + rtl8168_mdio_write(tp, 0x19, 0x499a); + rtl8168_mdio_write(tp, 0x15, 0x03ba); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x03bb); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03bc); + rtl8168_mdio_write(tp, 0x19, 0x490a); + rtl8168_mdio_write(tp, 0x15, 0x03bd); + rtl8168_mdio_write(tp, 0x19, 0x405e); + rtl8168_mdio_write(tp, 0x15, 0x03c2); + rtl8168_mdio_write(tp, 0x19, 0x9a03); + rtl8168_mdio_write(tp, 0x15, 0x03c4); + rtl8168_mdio_write(tp, 0x19, 0x0015); + rtl8168_mdio_write(tp, 0x15, 0x03c5); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x03c8); + rtl8168_mdio_write(tp, 0x19, 0x9cf7); + rtl8168_mdio_write(tp, 0x15, 0x03c9); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03ca); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03cb); + rtl8168_mdio_write(tp, 0x19, 0x4458); + rtl8168_mdio_write(tp, 0x15, 0x03cd); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03ce); + rtl8168_mdio_write(tp, 0x19, 0x33bf); + rtl8168_mdio_write(tp, 0x15, 0x03cf); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d0); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d1); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d9); + rtl8168_mdio_write(tp, 0x19, 0x49bb); + rtl8168_mdio_write(tp, 0x15, 0x03da); + rtl8168_mdio_write(tp, 0x19, 0x4478); + rtl8168_mdio_write(tp, 0x15, 0x03db); + rtl8168_mdio_write(tp, 0x19, 0x492b); + rtl8168_mdio_write(tp, 0x15, 0x03dc); + rtl8168_mdio_write(tp, 0x19, 0x7c01); + rtl8168_mdio_write(tp, 0x15, 0x03dd); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x03de); + rtl8168_mdio_write(tp, 0x19, 0xbd1a); + rtl8168_mdio_write(tp, 0x15, 0x03df); + rtl8168_mdio_write(tp, 0x19, 0xc428); + rtl8168_mdio_write(tp, 0x15, 0x03e0); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x03e1); + rtl8168_mdio_write(tp, 0x19, 0x9cfd); + rtl8168_mdio_write(tp, 0x15, 0x03e2); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03e3); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03e4); + rtl8168_mdio_write(tp, 0x19, 0x4458); + rtl8168_mdio_write(tp, 0x15, 0x03e5); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03e6); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03e7); + rtl8168_mdio_write(tp, 0x19, 0x33de); + rtl8168_mdio_write(tp, 0x15, 0x03e8); + rtl8168_mdio_write(tp, 0x19, 0xc218); + rtl8168_mdio_write(tp, 0x15, 0x03e9); + rtl8168_mdio_write(tp, 0x19, 0x0002); + rtl8168_mdio_write(tp, 0x15, 0x03ea); + rtl8168_mdio_write(tp, 0x19, 0x32df); + rtl8168_mdio_write(tp, 0x15, 0x03eb); + rtl8168_mdio_write(tp, 0x19, 0x3316); + rtl8168_mdio_write(tp, 0x15, 0x03ec); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ed); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ee); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ef); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03f7); + rtl8168_mdio_write(tp, 0x19, 0x330c); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x0200); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x9002); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x0202); + rtl8168_mdio_write(tp, 0x06, 0x3402); + rtl8168_mdio_write(tp, 0x06, 0x027f); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0xa602); + rtl8168_mdio_write(tp, 0x06, 0x80bf); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe600); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xee03); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xefb8); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe902); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8285); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8520); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8701); + rtl8168_mdio_write(tp, 0x06, 0xd481); + rtl8168_mdio_write(tp, 0x06, 0x35e4); + rtl8168_mdio_write(tp, 0x06, 0x8b94); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x95bf); + rtl8168_mdio_write(tp, 0x06, 0x8b88); + rtl8168_mdio_write(tp, 0x06, 0xec00); + rtl8168_mdio_write(tp, 0x06, 0x19a9); + rtl8168_mdio_write(tp, 0x06, 0x8b90); + rtl8168_mdio_write(tp, 0x06, 0xf9ee); + rtl8168_mdio_write(tp, 0x06, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x41f7); + rtl8168_mdio_write(tp, 0x06, 0x2ff6); + rtl8168_mdio_write(tp, 0x06, 0x28e4); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xe5e1); + rtl8168_mdio_write(tp, 0x06, 0x4104); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x0dee); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x82f4); + rtl8168_mdio_write(tp, 0x06, 0x021f); + rtl8168_mdio_write(tp, 0x06, 0x4102); + rtl8168_mdio_write(tp, 0x06, 0x2812); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x10ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x139d); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xd602); + rtl8168_mdio_write(tp, 0x06, 0x1f99); + rtl8168_mdio_write(tp, 0x06, 0x0227); + rtl8168_mdio_write(tp, 0x06, 0xeafc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2014); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x8104); + rtl8168_mdio_write(tp, 0x06, 0x021b); + rtl8168_mdio_write(tp, 0x06, 0xf402); + rtl8168_mdio_write(tp, 0x06, 0x2c9c); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x7902); + rtl8168_mdio_write(tp, 0x06, 0x8443); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x11f6); + rtl8168_mdio_write(tp, 0x06, 0x22e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x022c); + rtl8168_mdio_write(tp, 0x06, 0x4602); + rtl8168_mdio_write(tp, 0x06, 0x2ac5); + rtl8168_mdio_write(tp, 0x06, 0x0229); + rtl8168_mdio_write(tp, 0x06, 0x2002); + rtl8168_mdio_write(tp, 0x06, 0x2b91); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x11f6); + rtl8168_mdio_write(tp, 0x06, 0x25e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0xe202); + rtl8168_mdio_write(tp, 0x06, 0x043a); + rtl8168_mdio_write(tp, 0x06, 0x021a); + rtl8168_mdio_write(tp, 0x06, 0x5902); + rtl8168_mdio_write(tp, 0x06, 0x2bfc); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe001); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x1fd1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8638); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50e0); + rtl8168_mdio_write(tp, 0x06, 0xe020); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x21ad); + rtl8168_mdio_write(tp, 0x06, 0x200e); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xbf3d); + rtl8168_mdio_write(tp, 0x06, 0x3902); + rtl8168_mdio_write(tp, 0x06, 0x2eb0); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x0402); + rtl8168_mdio_write(tp, 0x06, 0x8591); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x3c05); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xfee1); + rtl8168_mdio_write(tp, 0x06, 0xe2ff); + rtl8168_mdio_write(tp, 0x06, 0xad2d); + rtl8168_mdio_write(tp, 0x06, 0x1ae0); + rtl8168_mdio_write(tp, 0x06, 0xe14e); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x4fac); + rtl8168_mdio_write(tp, 0x06, 0x2d22); + rtl8168_mdio_write(tp, 0x06, 0xf603); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x36f7); + rtl8168_mdio_write(tp, 0x06, 0x03f7); + rtl8168_mdio_write(tp, 0x06, 0x06bf); + rtl8168_mdio_write(tp, 0x06, 0x8622); + rtl8168_mdio_write(tp, 0x06, 0x022e); + rtl8168_mdio_write(tp, 0x06, 0xb0ae); + rtl8168_mdio_write(tp, 0x06, 0x11e0); + rtl8168_mdio_write(tp, 0x06, 0xe14e); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x4fad); + rtl8168_mdio_write(tp, 0x06, 0x2d08); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x2d02); + rtl8168_mdio_write(tp, 0x06, 0x2eb0); + rtl8168_mdio_write(tp, 0x06, 0xf606); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x204c); + rtl8168_mdio_write(tp, 0x06, 0xd200); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x0058); + rtl8168_mdio_write(tp, 0x06, 0x010c); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0x5810); + rtl8168_mdio_write(tp, 0x06, 0x1e20); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x3658); + rtl8168_mdio_write(tp, 0x06, 0x031e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xe01e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0x8ae6); + rtl8168_mdio_write(tp, 0x06, 0x1f02); + rtl8168_mdio_write(tp, 0x06, 0x9e22); + rtl8168_mdio_write(tp, 0x06, 0xe68a); + rtl8168_mdio_write(tp, 0x06, 0xe6ad); + rtl8168_mdio_write(tp, 0x06, 0x3214); + rtl8168_mdio_write(tp, 0x06, 0xad34); + rtl8168_mdio_write(tp, 0x06, 0x11ef); + rtl8168_mdio_write(tp, 0x06, 0x0258); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x07ad); + rtl8168_mdio_write(tp, 0x06, 0x3508); + rtl8168_mdio_write(tp, 0x06, 0x5ac0); + rtl8168_mdio_write(tp, 0x06, 0x9f04); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xae02); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x3e02); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfae0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac26); + rtl8168_mdio_write(tp, 0x06, 0x0ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xac24); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x6bee); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xe0eb); + rtl8168_mdio_write(tp, 0x06, 0x00e2); + rtl8168_mdio_write(tp, 0x06, 0xe07c); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0x7da5); + rtl8168_mdio_write(tp, 0x06, 0x1111); + rtl8168_mdio_write(tp, 0x06, 0x15d2); + rtl8168_mdio_write(tp, 0x06, 0x60d6); + rtl8168_mdio_write(tp, 0x06, 0x6666); + rtl8168_mdio_write(tp, 0x06, 0x0207); + rtl8168_mdio_write(tp, 0x06, 0xf9d2); + rtl8168_mdio_write(tp, 0x06, 0xa0d6); + rtl8168_mdio_write(tp, 0x06, 0xaaaa); + rtl8168_mdio_write(tp, 0x06, 0x0207); + rtl8168_mdio_write(tp, 0x06, 0xf902); + rtl8168_mdio_write(tp, 0x06, 0x825c); + rtl8168_mdio_write(tp, 0x06, 0xae44); + rtl8168_mdio_write(tp, 0x06, 0xa566); + rtl8168_mdio_write(tp, 0x06, 0x6602); + rtl8168_mdio_write(tp, 0x06, 0xae38); + rtl8168_mdio_write(tp, 0x06, 0xa5aa); + rtl8168_mdio_write(tp, 0x06, 0xaa02); + rtl8168_mdio_write(tp, 0x06, 0xae32); + rtl8168_mdio_write(tp, 0x06, 0xeee0); + rtl8168_mdio_write(tp, 0x06, 0xea04); + rtl8168_mdio_write(tp, 0x06, 0xeee0); + rtl8168_mdio_write(tp, 0x06, 0xeb06); + rtl8168_mdio_write(tp, 0x06, 0xe2e0); + rtl8168_mdio_write(tp, 0x06, 0x7ce3); + rtl8168_mdio_write(tp, 0x06, 0xe07d); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x38e1); + rtl8168_mdio_write(tp, 0x06, 0xe039); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x21ad); + rtl8168_mdio_write(tp, 0x06, 0x3f13); + rtl8168_mdio_write(tp, 0x06, 0xe0e4); + rtl8168_mdio_write(tp, 0x06, 0x14e1); + rtl8168_mdio_write(tp, 0x06, 0xe415); + rtl8168_mdio_write(tp, 0x06, 0x6880); + rtl8168_mdio_write(tp, 0x06, 0xe4e4); + rtl8168_mdio_write(tp, 0x06, 0x14e5); + rtl8168_mdio_write(tp, 0x06, 0xe415); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x5cae); + rtl8168_mdio_write(tp, 0x06, 0x0bac); + rtl8168_mdio_write(tp, 0x06, 0x3e02); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x8602); + rtl8168_mdio_write(tp, 0x06, 0x82b0); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2605); + rtl8168_mdio_write(tp, 0x06, 0x0221); + rtl8168_mdio_write(tp, 0x06, 0xf3f7); + rtl8168_mdio_write(tp, 0x06, 0x28e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xad21); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x22f8); + rtl8168_mdio_write(tp, 0x06, 0xf729); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2405); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xebf7); + rtl8168_mdio_write(tp, 0x06, 0x2ae5); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x2134); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2109); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x2eac); + rtl8168_mdio_write(tp, 0x06, 0x2003); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0x52e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x09e0); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x8337); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2608); + rtl8168_mdio_write(tp, 0x06, 0xe085); + rtl8168_mdio_write(tp, 0x06, 0xd2ad); + rtl8168_mdio_write(tp, 0x06, 0x2502); + rtl8168_mdio_write(tp, 0x06, 0xf628); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x210a); + rtl8168_mdio_write(tp, 0x06, 0xe086); + rtl8168_mdio_write(tp, 0x06, 0x0af6); + rtl8168_mdio_write(tp, 0x06, 0x27a0); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0xf629); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2408); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xedad); + rtl8168_mdio_write(tp, 0x06, 0x2002); + rtl8168_mdio_write(tp, 0x06, 0xf62a); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x2ea1); + rtl8168_mdio_write(tp, 0x06, 0x0003); + rtl8168_mdio_write(tp, 0x06, 0x0221); + rtl8168_mdio_write(tp, 0x06, 0x11fc); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x8aed); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8aec); + rtl8168_mdio_write(tp, 0x06, 0x0004); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x3ae0); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xeb58); + rtl8168_mdio_write(tp, 0x06, 0xf8d1); + rtl8168_mdio_write(tp, 0x06, 0x01e4); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0xebe0); + rtl8168_mdio_write(tp, 0x06, 0xe07c); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x7d5c); + rtl8168_mdio_write(tp, 0x06, 0x00ff); + rtl8168_mdio_write(tp, 0x06, 0x3c00); + rtl8168_mdio_write(tp, 0x06, 0x1eab); + rtl8168_mdio_write(tp, 0x06, 0x1ce0); + rtl8168_mdio_write(tp, 0x06, 0xe04c); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x4d58); + rtl8168_mdio_write(tp, 0x06, 0xc1e4); + rtl8168_mdio_write(tp, 0x06, 0xe04c); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0x4de0); + rtl8168_mdio_write(tp, 0x06, 0xe0ee); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0x3ce4); + rtl8168_mdio_write(tp, 0x06, 0xe0ee); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0xeffc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2412); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0xeee1); + rtl8168_mdio_write(tp, 0x06, 0xe0ef); + rtl8168_mdio_write(tp, 0x06, 0x59c3); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0xeee5); + rtl8168_mdio_write(tp, 0x06, 0xe0ef); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xed01); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac25); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x8363); + rtl8168_mdio_write(tp, 0x06, 0xae03); + rtl8168_mdio_write(tp, 0x06, 0x0225); + rtl8168_mdio_write(tp, 0x06, 0x16fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfae0); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0xa000); + rtl8168_mdio_write(tp, 0x06, 0x19e0); + rtl8168_mdio_write(tp, 0x06, 0x860b); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x331b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x04aa); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x06ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0xe602); + rtl8168_mdio_write(tp, 0x06, 0x241e); + rtl8168_mdio_write(tp, 0x06, 0xae14); + rtl8168_mdio_write(tp, 0x06, 0xa001); + rtl8168_mdio_write(tp, 0x06, 0x1402); + rtl8168_mdio_write(tp, 0x06, 0x2426); + rtl8168_mdio_write(tp, 0x06, 0xbf26); + rtl8168_mdio_write(tp, 0x06, 0x6d02); + rtl8168_mdio_write(tp, 0x06, 0x2eb0); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0b00); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0a02); + rtl8168_mdio_write(tp, 0x06, 0xaf84); + rtl8168_mdio_write(tp, 0x06, 0x3ca0); + rtl8168_mdio_write(tp, 0x06, 0x0252); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0400); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0500); + rtl8168_mdio_write(tp, 0x06, 0xe086); + rtl8168_mdio_write(tp, 0x06, 0x0be1); + rtl8168_mdio_write(tp, 0x06, 0x8b32); + rtl8168_mdio_write(tp, 0x06, 0x1b10); + rtl8168_mdio_write(tp, 0x06, 0x9e04); + rtl8168_mdio_write(tp, 0x06, 0xaa02); + rtl8168_mdio_write(tp, 0x06, 0xaecb); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0b00); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x3ae2); + rtl8168_mdio_write(tp, 0x06, 0x8604); + rtl8168_mdio_write(tp, 0x06, 0xe386); + rtl8168_mdio_write(tp, 0x06, 0x05ef); + rtl8168_mdio_write(tp, 0x06, 0x65e2); + rtl8168_mdio_write(tp, 0x06, 0x8606); + rtl8168_mdio_write(tp, 0x06, 0xe386); + rtl8168_mdio_write(tp, 0x06, 0x071b); + rtl8168_mdio_write(tp, 0x06, 0x56aa); + rtl8168_mdio_write(tp, 0x06, 0x0eef); + rtl8168_mdio_write(tp, 0x06, 0x56e6); + rtl8168_mdio_write(tp, 0x06, 0x8606); + rtl8168_mdio_write(tp, 0x06, 0xe786); + rtl8168_mdio_write(tp, 0x06, 0x07e2); + rtl8168_mdio_write(tp, 0x06, 0x8609); + rtl8168_mdio_write(tp, 0x06, 0xe686); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8609); + rtl8168_mdio_write(tp, 0x06, 0xa000); + rtl8168_mdio_write(tp, 0x06, 0x07ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x03af); + rtl8168_mdio_write(tp, 0x06, 0x8369); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x2426); + rtl8168_mdio_write(tp, 0x06, 0xae48); + rtl8168_mdio_write(tp, 0x06, 0xa003); + rtl8168_mdio_write(tp, 0x06, 0x21e0); + rtl8168_mdio_write(tp, 0x06, 0x8608); + rtl8168_mdio_write(tp, 0x06, 0xe186); + rtl8168_mdio_write(tp, 0x06, 0x091b); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x0caa); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x249d); + rtl8168_mdio_write(tp, 0x06, 0xaee7); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x8eae); + rtl8168_mdio_write(tp, 0x06, 0xe2ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x860b); + rtl8168_mdio_write(tp, 0x06, 0x00af); + rtl8168_mdio_write(tp, 0x06, 0x8369); + rtl8168_mdio_write(tp, 0x06, 0xa004); + rtl8168_mdio_write(tp, 0x06, 0x15e0); + rtl8168_mdio_write(tp, 0x06, 0x860b); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x341b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x05aa); + rtl8168_mdio_write(tp, 0x06, 0x03af); + rtl8168_mdio_write(tp, 0x06, 0x8383); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0a05); + rtl8168_mdio_write(tp, 0x06, 0xae0c); + rtl8168_mdio_write(tp, 0x06, 0xa005); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x0702); + rtl8168_mdio_write(tp, 0x06, 0x2309); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0a00); + rtl8168_mdio_write(tp, 0x06, 0xfeef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfbe0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x22e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x23e2); + rtl8168_mdio_write(tp, 0x06, 0xe036); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0x375a); + rtl8168_mdio_write(tp, 0x06, 0xc40d); + rtl8168_mdio_write(tp, 0x06, 0x0158); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x20e3); + rtl8168_mdio_write(tp, 0x06, 0x8ae7); + rtl8168_mdio_write(tp, 0x06, 0xac31); + rtl8168_mdio_write(tp, 0x06, 0x60ac); + rtl8168_mdio_write(tp, 0x06, 0x3a08); + rtl8168_mdio_write(tp, 0x06, 0xac3e); + rtl8168_mdio_write(tp, 0x06, 0x26ae); + rtl8168_mdio_write(tp, 0x06, 0x67af); + rtl8168_mdio_write(tp, 0x06, 0x84db); + rtl8168_mdio_write(tp, 0x06, 0xad37); + rtl8168_mdio_write(tp, 0x06, 0x61e0); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xe91b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x51d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x863b); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50ee); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x43ad); + rtl8168_mdio_write(tp, 0x06, 0x3627); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xeee1); + rtl8168_mdio_write(tp, 0x06, 0x8aef); + rtl8168_mdio_write(tp, 0x06, 0xef74); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xeae1); + rtl8168_mdio_write(tp, 0x06, 0x8aeb); + rtl8168_mdio_write(tp, 0x06, 0x1b74); + rtl8168_mdio_write(tp, 0x06, 0x9e2e); + rtl8168_mdio_write(tp, 0x06, 0x14e4); + rtl8168_mdio_write(tp, 0x06, 0x8aea); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xebef); + rtl8168_mdio_write(tp, 0x06, 0x74e0); + rtl8168_mdio_write(tp, 0x06, 0x8aee); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xef1b); + rtl8168_mdio_write(tp, 0x06, 0x479e); + rtl8168_mdio_write(tp, 0x06, 0x0fae); + rtl8168_mdio_write(tp, 0x06, 0x19ee); + rtl8168_mdio_write(tp, 0x06, 0x8aea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8aeb); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x0fac); + rtl8168_mdio_write(tp, 0x06, 0x390c); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x3b02); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe800); + rtl8168_mdio_write(tp, 0x06, 0xe68a); + rtl8168_mdio_write(tp, 0x06, 0xe7ff); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xc4e1); + rtl8168_mdio_write(tp, 0x06, 0x8b6e); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e24); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x6ead); + rtl8168_mdio_write(tp, 0x06, 0x2218); + rtl8168_mdio_write(tp, 0x06, 0xac27); + rtl8168_mdio_write(tp, 0x06, 0x0dac); + rtl8168_mdio_write(tp, 0x06, 0x2605); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x8fae); + rtl8168_mdio_write(tp, 0x06, 0x1302); + rtl8168_mdio_write(tp, 0x06, 0x03c8); + rtl8168_mdio_write(tp, 0x06, 0xae0e); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0xe102); + rtl8168_mdio_write(tp, 0x06, 0x8520); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x8f02); + rtl8168_mdio_write(tp, 0x06, 0x8566); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x82ad); + rtl8168_mdio_write(tp, 0x06, 0x2737); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4402); + rtl8168_mdio_write(tp, 0x06, 0x2f23); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x2ed1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8647); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50bf); + rtl8168_mdio_write(tp, 0x06, 0x8641); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x23e5); + rtl8168_mdio_write(tp, 0x06, 0x8af0); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x22e1); + rtl8168_mdio_write(tp, 0x06, 0xe023); + rtl8168_mdio_write(tp, 0x06, 0xac2e); + rtl8168_mdio_write(tp, 0x06, 0x04d1); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0x02d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8641); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50d1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8644); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4702); + rtl8168_mdio_write(tp, 0x06, 0x2f23); + rtl8168_mdio_write(tp, 0x06, 0xad28); + rtl8168_mdio_write(tp, 0x06, 0x19d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8644); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50e1); + rtl8168_mdio_write(tp, 0x06, 0x8af0); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4102); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4702); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xfee1); + rtl8168_mdio_write(tp, 0x06, 0xe2ff); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x63e0); + rtl8168_mdio_write(tp, 0x06, 0xe038); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x39ad); + rtl8168_mdio_write(tp, 0x06, 0x2f10); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xf726); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xae0e); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e1); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xf728); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e5); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xf72b); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xd07d); + rtl8168_mdio_write(tp, 0x06, 0xb0fe); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xf62b); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xf626); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e1); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xf628); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e5); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xae20); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0xa725); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x1de5); + rtl8168_mdio_write(tp, 0x06, 0x0a2c); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x6de5); + rtl8168_mdio_write(tp, 0x06, 0x0a1d); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x1ce5); + rtl8168_mdio_write(tp, 0x06, 0x0a2d); + rtl8168_mdio_write(tp, 0x06, 0xa755); + rtl8168_mdio_write(tp, 0x06, 0x00e2); + rtl8168_mdio_write(tp, 0x06, 0x3488); + rtl8168_mdio_write(tp, 0x06, 0xe200); + rtl8168_mdio_write(tp, 0x06, 0xcce2); + rtl8168_mdio_write(tp, 0x06, 0x0055); + rtl8168_mdio_write(tp, 0x06, 0xe020); + rtl8168_mdio_write(tp, 0x06, 0x55e2); + rtl8168_mdio_write(tp, 0x06, 0xd600); + rtl8168_mdio_write(tp, 0x06, 0xe24a); + gphy_val = rtl8168_mdio_read(tp, 0x01); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x01, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~(BIT_2); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } +} + +static void +rtl8168_set_phy_mcu_8168evl_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val,i; + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x1800); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val &= ~(BIT_12); + rtl8168_mdio_write(tp, 0x15, gphy_val); + mdelay(20); + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + if ((gphy_val & BIT_11) == 0x0000) { + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x17, gphy_val); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x17); + if (gphy_val & BIT_11) + break; + } + } + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + rtl8168_mdio_write(tp, 0x1B, 0x5000); + rtl8168_mdio_write(tp, 0x1E, 0x002d); + rtl8168_mdio_write(tp, 0x19, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1E); + if ((gphy_val & 0x03FF) == 0x0014) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x07); + if ((gphy_val & BIT_5) == 0) + break; + } + gphy_val = rtl8168_mdio_read(tp, 0x07); + if (gphy_val & BIT_5) { + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x00a1); + rtl8168_mdio_write(tp, 0x17, 0x1000); + rtl8168_mdio_write(tp, 0x17, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2000); + rtl8168_mdio_write(tp, 0x1e, 0x002f); + rtl8168_mdio_write(tp, 0x18, 0x9bfb); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x07, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x0000); + rtl8168_mdio_write(tp, 0x19, 0x407d); + rtl8168_mdio_write(tp, 0x15, 0x0001); + rtl8168_mdio_write(tp, 0x19, 0x440f); + rtl8168_mdio_write(tp, 0x15, 0x0002); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0003); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x0004); + rtl8168_mdio_write(tp, 0x19, 0xc4d5); + rtl8168_mdio_write(tp, 0x15, 0x0005); + rtl8168_mdio_write(tp, 0x19, 0x00ff); + rtl8168_mdio_write(tp, 0x15, 0x0006); + rtl8168_mdio_write(tp, 0x19, 0x74f0); + rtl8168_mdio_write(tp, 0x15, 0x0007); + rtl8168_mdio_write(tp, 0x19, 0x4880); + rtl8168_mdio_write(tp, 0x15, 0x0008); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x0009); + rtl8168_mdio_write(tp, 0x19, 0x4800); + rtl8168_mdio_write(tp, 0x15, 0x000a); + rtl8168_mdio_write(tp, 0x19, 0x5000); + rtl8168_mdio_write(tp, 0x15, 0x000b); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x000c); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x000d); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x15, 0x000e); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x000f); + rtl8168_mdio_write(tp, 0x19, 0x7010); + rtl8168_mdio_write(tp, 0x15, 0x0010); + rtl8168_mdio_write(tp, 0x19, 0x6804); + rtl8168_mdio_write(tp, 0x15, 0x0011); + rtl8168_mdio_write(tp, 0x19, 0x64a0); + rtl8168_mdio_write(tp, 0x15, 0x0012); + rtl8168_mdio_write(tp, 0x19, 0x63da); + rtl8168_mdio_write(tp, 0x15, 0x0013); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x0014); + rtl8168_mdio_write(tp, 0x19, 0x6f05); + rtl8168_mdio_write(tp, 0x15, 0x0015); + rtl8168_mdio_write(tp, 0x19, 0x5420); + rtl8168_mdio_write(tp, 0x15, 0x0016); + rtl8168_mdio_write(tp, 0x19, 0x58ce); + rtl8168_mdio_write(tp, 0x15, 0x0017); + rtl8168_mdio_write(tp, 0x19, 0x5cf3); + rtl8168_mdio_write(tp, 0x15, 0x0018); + rtl8168_mdio_write(tp, 0x19, 0xb600); + rtl8168_mdio_write(tp, 0x15, 0x0019); + rtl8168_mdio_write(tp, 0x19, 0xc659); + rtl8168_mdio_write(tp, 0x15, 0x001a); + rtl8168_mdio_write(tp, 0x19, 0x0018); + rtl8168_mdio_write(tp, 0x15, 0x001b); + rtl8168_mdio_write(tp, 0x19, 0xc403); + rtl8168_mdio_write(tp, 0x15, 0x001c); + rtl8168_mdio_write(tp, 0x19, 0x0016); + rtl8168_mdio_write(tp, 0x15, 0x001d); + rtl8168_mdio_write(tp, 0x19, 0xaa05); + rtl8168_mdio_write(tp, 0x15, 0x001e); + rtl8168_mdio_write(tp, 0x19, 0xc503); + rtl8168_mdio_write(tp, 0x15, 0x001f); + rtl8168_mdio_write(tp, 0x19, 0x0003); + rtl8168_mdio_write(tp, 0x15, 0x0020); + rtl8168_mdio_write(tp, 0x19, 0x89f8); + rtl8168_mdio_write(tp, 0x15, 0x0021); + rtl8168_mdio_write(tp, 0x19, 0x32ae); + rtl8168_mdio_write(tp, 0x15, 0x0022); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0023); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x0024); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0025); + rtl8168_mdio_write(tp, 0x19, 0x6801); + rtl8168_mdio_write(tp, 0x15, 0x0026); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x0027); + rtl8168_mdio_write(tp, 0x19, 0xa300); + rtl8168_mdio_write(tp, 0x15, 0x0028); + rtl8168_mdio_write(tp, 0x19, 0x64a0); + rtl8168_mdio_write(tp, 0x15, 0x0029); + rtl8168_mdio_write(tp, 0x19, 0x76f0); + rtl8168_mdio_write(tp, 0x15, 0x002a); + rtl8168_mdio_write(tp, 0x19, 0x7670); + rtl8168_mdio_write(tp, 0x15, 0x002b); + rtl8168_mdio_write(tp, 0x19, 0x7630); + rtl8168_mdio_write(tp, 0x15, 0x002c); + rtl8168_mdio_write(tp, 0x19, 0x31a6); + rtl8168_mdio_write(tp, 0x15, 0x002d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x002e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x002f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0030); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0031); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0032); + rtl8168_mdio_write(tp, 0x19, 0x4801); + rtl8168_mdio_write(tp, 0x15, 0x0033); + rtl8168_mdio_write(tp, 0x19, 0x6803); + rtl8168_mdio_write(tp, 0x15, 0x0034); + rtl8168_mdio_write(tp, 0x19, 0x66a1); + rtl8168_mdio_write(tp, 0x15, 0x0035); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0036); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x0037); + rtl8168_mdio_write(tp, 0x19, 0xa300); + rtl8168_mdio_write(tp, 0x15, 0x0038); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0039); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x003a); + rtl8168_mdio_write(tp, 0x19, 0x74f8); + rtl8168_mdio_write(tp, 0x15, 0x003b); + rtl8168_mdio_write(tp, 0x19, 0x63d0); + rtl8168_mdio_write(tp, 0x15, 0x003c); + rtl8168_mdio_write(tp, 0x19, 0x7ff0); + rtl8168_mdio_write(tp, 0x15, 0x003d); + rtl8168_mdio_write(tp, 0x19, 0x77f0); + rtl8168_mdio_write(tp, 0x15, 0x003e); + rtl8168_mdio_write(tp, 0x19, 0x7ff0); + rtl8168_mdio_write(tp, 0x15, 0x003f); + rtl8168_mdio_write(tp, 0x19, 0x7750); + rtl8168_mdio_write(tp, 0x15, 0x0040); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x0041); + rtl8168_mdio_write(tp, 0x19, 0x7cf0); + rtl8168_mdio_write(tp, 0x15, 0x0042); + rtl8168_mdio_write(tp, 0x19, 0x7708); + rtl8168_mdio_write(tp, 0x15, 0x0043); + rtl8168_mdio_write(tp, 0x19, 0xa654); + rtl8168_mdio_write(tp, 0x15, 0x0044); + rtl8168_mdio_write(tp, 0x19, 0x304a); + rtl8168_mdio_write(tp, 0x15, 0x0045); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0046); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0047); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0048); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0049); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004a); + rtl8168_mdio_write(tp, 0x19, 0x4802); + rtl8168_mdio_write(tp, 0x15, 0x004b); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x004c); + rtl8168_mdio_write(tp, 0x19, 0x4440); + rtl8168_mdio_write(tp, 0x15, 0x004d); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x004e); + rtl8168_mdio_write(tp, 0x19, 0x6481); + rtl8168_mdio_write(tp, 0x15, 0x004f); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x15, 0x0050); + rtl8168_mdio_write(tp, 0x19, 0x63e8); + rtl8168_mdio_write(tp, 0x15, 0x0051); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x0052); + rtl8168_mdio_write(tp, 0x19, 0x5900); + rtl8168_mdio_write(tp, 0x15, 0x0053); + rtl8168_mdio_write(tp, 0x19, 0x63f8); + rtl8168_mdio_write(tp, 0x15, 0x0054); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0055); + rtl8168_mdio_write(tp, 0x19, 0x3116); + rtl8168_mdio_write(tp, 0x15, 0x0056); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0057); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0058); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0059); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x005a); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x005b); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x005c); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x005d); + rtl8168_mdio_write(tp, 0x19, 0x6000); + rtl8168_mdio_write(tp, 0x15, 0x005e); + rtl8168_mdio_write(tp, 0x19, 0x59ce); + rtl8168_mdio_write(tp, 0x15, 0x005f); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x0060); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x0061); + rtl8168_mdio_write(tp, 0x19, 0x72b0); + rtl8168_mdio_write(tp, 0x15, 0x0062); + rtl8168_mdio_write(tp, 0x19, 0x400e); + rtl8168_mdio_write(tp, 0x15, 0x0063); + rtl8168_mdio_write(tp, 0x19, 0x4440); + rtl8168_mdio_write(tp, 0x15, 0x0064); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x15, 0x0065); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x15, 0x0066); + rtl8168_mdio_write(tp, 0x19, 0x70b0); + rtl8168_mdio_write(tp, 0x15, 0x0067); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0068); + rtl8168_mdio_write(tp, 0x19, 0x6008); + rtl8168_mdio_write(tp, 0x15, 0x0069); + rtl8168_mdio_write(tp, 0x19, 0x7cf0); + rtl8168_mdio_write(tp, 0x15, 0x006a); + rtl8168_mdio_write(tp, 0x19, 0x7750); + rtl8168_mdio_write(tp, 0x15, 0x006b); + rtl8168_mdio_write(tp, 0x19, 0x4007); + rtl8168_mdio_write(tp, 0x15, 0x006c); + rtl8168_mdio_write(tp, 0x19, 0x4500); + rtl8168_mdio_write(tp, 0x15, 0x006d); + rtl8168_mdio_write(tp, 0x19, 0x4023); + rtl8168_mdio_write(tp, 0x15, 0x006e); + rtl8168_mdio_write(tp, 0x19, 0x4580); + rtl8168_mdio_write(tp, 0x15, 0x006f); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0070); + rtl8168_mdio_write(tp, 0x19, 0xcd78); + rtl8168_mdio_write(tp, 0x15, 0x0071); + rtl8168_mdio_write(tp, 0x19, 0x0003); + rtl8168_mdio_write(tp, 0x15, 0x0072); + rtl8168_mdio_write(tp, 0x19, 0xbe02); + rtl8168_mdio_write(tp, 0x15, 0x0073); + rtl8168_mdio_write(tp, 0x19, 0x3070); + rtl8168_mdio_write(tp, 0x15, 0x0074); + rtl8168_mdio_write(tp, 0x19, 0x7cf0); + rtl8168_mdio_write(tp, 0x15, 0x0075); + rtl8168_mdio_write(tp, 0x19, 0x77f0); + rtl8168_mdio_write(tp, 0x15, 0x0076); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x0077); + rtl8168_mdio_write(tp, 0x19, 0x4007); + rtl8168_mdio_write(tp, 0x15, 0x0078); + rtl8168_mdio_write(tp, 0x19, 0x4500); + rtl8168_mdio_write(tp, 0x15, 0x0079); + rtl8168_mdio_write(tp, 0x19, 0x4023); + rtl8168_mdio_write(tp, 0x15, 0x007a); + rtl8168_mdio_write(tp, 0x19, 0x4580); + rtl8168_mdio_write(tp, 0x15, 0x007b); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x007c); + rtl8168_mdio_write(tp, 0x19, 0xce80); + rtl8168_mdio_write(tp, 0x15, 0x007d); + rtl8168_mdio_write(tp, 0x19, 0x0004); + rtl8168_mdio_write(tp, 0x15, 0x007e); + rtl8168_mdio_write(tp, 0x19, 0xce80); + rtl8168_mdio_write(tp, 0x15, 0x007f); + rtl8168_mdio_write(tp, 0x19, 0x0002); + rtl8168_mdio_write(tp, 0x15, 0x0080); + rtl8168_mdio_write(tp, 0x19, 0x307c); + rtl8168_mdio_write(tp, 0x15, 0x0081); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x0082); + rtl8168_mdio_write(tp, 0x19, 0x480f); + rtl8168_mdio_write(tp, 0x15, 0x0083); + rtl8168_mdio_write(tp, 0x19, 0x6802); + rtl8168_mdio_write(tp, 0x15, 0x0084); + rtl8168_mdio_write(tp, 0x19, 0x6680); + rtl8168_mdio_write(tp, 0x15, 0x0085); + rtl8168_mdio_write(tp, 0x19, 0x7c10); + rtl8168_mdio_write(tp, 0x15, 0x0086); + rtl8168_mdio_write(tp, 0x19, 0x6010); + rtl8168_mdio_write(tp, 0x15, 0x0087); + rtl8168_mdio_write(tp, 0x19, 0x400a); + rtl8168_mdio_write(tp, 0x15, 0x0088); + rtl8168_mdio_write(tp, 0x19, 0x4580); + rtl8168_mdio_write(tp, 0x15, 0x0089); + rtl8168_mdio_write(tp, 0x19, 0x9e00); + rtl8168_mdio_write(tp, 0x15, 0x008a); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x008b); + rtl8168_mdio_write(tp, 0x19, 0x5800); + rtl8168_mdio_write(tp, 0x15, 0x008c); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x008d); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x008e); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x008f); + rtl8168_mdio_write(tp, 0x19, 0x8300); + rtl8168_mdio_write(tp, 0x15, 0x0090); + rtl8168_mdio_write(tp, 0x19, 0x7ff0); + rtl8168_mdio_write(tp, 0x15, 0x0091); + rtl8168_mdio_write(tp, 0x19, 0x74f0); + rtl8168_mdio_write(tp, 0x15, 0x0092); + rtl8168_mdio_write(tp, 0x19, 0x3006); + rtl8168_mdio_write(tp, 0x15, 0x0093); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0094); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0095); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0096); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0097); + rtl8168_mdio_write(tp, 0x19, 0x4803); + rtl8168_mdio_write(tp, 0x15, 0x0098); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0099); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x009a); + rtl8168_mdio_write(tp, 0x19, 0xa203); + rtl8168_mdio_write(tp, 0x15, 0x009b); + rtl8168_mdio_write(tp, 0x19, 0x64b1); + rtl8168_mdio_write(tp, 0x15, 0x009c); + rtl8168_mdio_write(tp, 0x19, 0x309e); + rtl8168_mdio_write(tp, 0x15, 0x009d); + rtl8168_mdio_write(tp, 0x19, 0x64b3); + rtl8168_mdio_write(tp, 0x15, 0x009e); + rtl8168_mdio_write(tp, 0x19, 0x4030); + rtl8168_mdio_write(tp, 0x15, 0x009f); + rtl8168_mdio_write(tp, 0x19, 0x440e); + rtl8168_mdio_write(tp, 0x15, 0x00a0); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x00a1); + rtl8168_mdio_write(tp, 0x19, 0x4419); + rtl8168_mdio_write(tp, 0x15, 0x00a2); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x00a3); + rtl8168_mdio_write(tp, 0x19, 0xc520); + rtl8168_mdio_write(tp, 0x15, 0x00a4); + rtl8168_mdio_write(tp, 0x19, 0x000b); + rtl8168_mdio_write(tp, 0x15, 0x00a5); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x00a6); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x00a7); + rtl8168_mdio_write(tp, 0x19, 0x58a4); + rtl8168_mdio_write(tp, 0x15, 0x00a8); + rtl8168_mdio_write(tp, 0x19, 0x63da); + rtl8168_mdio_write(tp, 0x15, 0x00a9); + rtl8168_mdio_write(tp, 0x19, 0x5cb0); + rtl8168_mdio_write(tp, 0x15, 0x00aa); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x00ab); + rtl8168_mdio_write(tp, 0x19, 0x72b0); + rtl8168_mdio_write(tp, 0x15, 0x00ac); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x15, 0x00ad); + rtl8168_mdio_write(tp, 0x19, 0x70b0); + rtl8168_mdio_write(tp, 0x15, 0x00ae); + rtl8168_mdio_write(tp, 0x19, 0x30b8); + rtl8168_mdio_write(tp, 0x15, 0x00AF); + rtl8168_mdio_write(tp, 0x19, 0x4060); + rtl8168_mdio_write(tp, 0x15, 0x00B0); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x00B1); + rtl8168_mdio_write(tp, 0x19, 0x7e00); + rtl8168_mdio_write(tp, 0x15, 0x00B2); + rtl8168_mdio_write(tp, 0x19, 0x72B0); + rtl8168_mdio_write(tp, 0x15, 0x00B3); + rtl8168_mdio_write(tp, 0x19, 0x7F00); + rtl8168_mdio_write(tp, 0x15, 0x00B4); + rtl8168_mdio_write(tp, 0x19, 0x73B0); + rtl8168_mdio_write(tp, 0x15, 0x00b5); + rtl8168_mdio_write(tp, 0x19, 0x58a0); + rtl8168_mdio_write(tp, 0x15, 0x00b6); + rtl8168_mdio_write(tp, 0x19, 0x63d2); + rtl8168_mdio_write(tp, 0x15, 0x00b7); + rtl8168_mdio_write(tp, 0x19, 0x5c00); + rtl8168_mdio_write(tp, 0x15, 0x00b8); + rtl8168_mdio_write(tp, 0x19, 0x5780); + rtl8168_mdio_write(tp, 0x15, 0x00b9); + rtl8168_mdio_write(tp, 0x19, 0xb60d); + rtl8168_mdio_write(tp, 0x15, 0x00ba); + rtl8168_mdio_write(tp, 0x19, 0x9bff); + rtl8168_mdio_write(tp, 0x15, 0x00bb); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x00bc); + rtl8168_mdio_write(tp, 0x19, 0x6001); + rtl8168_mdio_write(tp, 0x15, 0x00bd); + rtl8168_mdio_write(tp, 0x19, 0xc020); + rtl8168_mdio_write(tp, 0x15, 0x00be); + rtl8168_mdio_write(tp, 0x19, 0x002b); + rtl8168_mdio_write(tp, 0x15, 0x00bf); + rtl8168_mdio_write(tp, 0x19, 0xc137); + rtl8168_mdio_write(tp, 0x15, 0x00c0); + rtl8168_mdio_write(tp, 0x19, 0x0006); + rtl8168_mdio_write(tp, 0x15, 0x00c1); + rtl8168_mdio_write(tp, 0x19, 0x9af8); + rtl8168_mdio_write(tp, 0x15, 0x00c2); + rtl8168_mdio_write(tp, 0x19, 0x30c6); + rtl8168_mdio_write(tp, 0x15, 0x00c3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00c4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00c5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00c6); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x00c7); + rtl8168_mdio_write(tp, 0x19, 0x70b0); + rtl8168_mdio_write(tp, 0x15, 0x00c8); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x00c9); + rtl8168_mdio_write(tp, 0x19, 0x4804); + rtl8168_mdio_write(tp, 0x15, 0x00ca); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x00cb); + rtl8168_mdio_write(tp, 0x19, 0x5c80); + rtl8168_mdio_write(tp, 0x15, 0x00cc); + rtl8168_mdio_write(tp, 0x19, 0x4010); + rtl8168_mdio_write(tp, 0x15, 0x00cd); + rtl8168_mdio_write(tp, 0x19, 0x4415); + rtl8168_mdio_write(tp, 0x15, 0x00ce); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x15, 0x00cf); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x15, 0x00d0); + rtl8168_mdio_write(tp, 0x19, 0x70b0); + rtl8168_mdio_write(tp, 0x15, 0x00d1); + rtl8168_mdio_write(tp, 0x19, 0x3177); + rtl8168_mdio_write(tp, 0x15, 0x00d2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00d3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00d4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00d5); + rtl8168_mdio_write(tp, 0x19, 0x4808); + rtl8168_mdio_write(tp, 0x15, 0x00d6); + rtl8168_mdio_write(tp, 0x19, 0x4007); + rtl8168_mdio_write(tp, 0x15, 0x00d7); + rtl8168_mdio_write(tp, 0x19, 0x4420); + rtl8168_mdio_write(tp, 0x15, 0x00d8); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x00d9); + rtl8168_mdio_write(tp, 0x19, 0xb608); + rtl8168_mdio_write(tp, 0x15, 0x00da); + rtl8168_mdio_write(tp, 0x19, 0xbcbd); + rtl8168_mdio_write(tp, 0x15, 0x00db); + rtl8168_mdio_write(tp, 0x19, 0xc60b); + rtl8168_mdio_write(tp, 0x15, 0x00dc); + rtl8168_mdio_write(tp, 0x19, 0x00fd); + rtl8168_mdio_write(tp, 0x15, 0x00dd); + rtl8168_mdio_write(tp, 0x19, 0x30e1); + rtl8168_mdio_write(tp, 0x15, 0x00de); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00df); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00e0); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00e1); + rtl8168_mdio_write(tp, 0x19, 0x4809); + rtl8168_mdio_write(tp, 0x15, 0x00e2); + rtl8168_mdio_write(tp, 0x19, 0x7e40); + rtl8168_mdio_write(tp, 0x15, 0x00e3); + rtl8168_mdio_write(tp, 0x19, 0x5a40); + rtl8168_mdio_write(tp, 0x15, 0x00e4); + rtl8168_mdio_write(tp, 0x19, 0x305a); + rtl8168_mdio_write(tp, 0x15, 0x00e5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00e6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00e7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00e8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00e9); + rtl8168_mdio_write(tp, 0x19, 0x480a); + rtl8168_mdio_write(tp, 0x15, 0x00ea); + rtl8168_mdio_write(tp, 0x19, 0x5820); + rtl8168_mdio_write(tp, 0x15, 0x00eb); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x00ec); + rtl8168_mdio_write(tp, 0x19, 0xb60a); + rtl8168_mdio_write(tp, 0x15, 0x00ed); + rtl8168_mdio_write(tp, 0x19, 0xda07); + rtl8168_mdio_write(tp, 0x15, 0x00ee); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x00ef); + rtl8168_mdio_write(tp, 0x19, 0xc60b); + rtl8168_mdio_write(tp, 0x15, 0x00f0); + rtl8168_mdio_write(tp, 0x19, 0x00fc); + rtl8168_mdio_write(tp, 0x15, 0x00f1); + rtl8168_mdio_write(tp, 0x19, 0x30f6); + rtl8168_mdio_write(tp, 0x15, 0x00f2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00f3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00f4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00f5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00f6); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x00f7); + rtl8168_mdio_write(tp, 0x19, 0x480b); + rtl8168_mdio_write(tp, 0x15, 0x00f8); + rtl8168_mdio_write(tp, 0x19, 0x6f03); + rtl8168_mdio_write(tp, 0x15, 0x00f9); + rtl8168_mdio_write(tp, 0x19, 0x405f); + rtl8168_mdio_write(tp, 0x15, 0x00fa); + rtl8168_mdio_write(tp, 0x19, 0x4448); + rtl8168_mdio_write(tp, 0x15, 0x00fb); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x00fc); + rtl8168_mdio_write(tp, 0x19, 0x4468); + rtl8168_mdio_write(tp, 0x15, 0x00fd); + rtl8168_mdio_write(tp, 0x19, 0x9c03); + rtl8168_mdio_write(tp, 0x15, 0x00fe); + rtl8168_mdio_write(tp, 0x19, 0x6f07); + rtl8168_mdio_write(tp, 0x15, 0x00ff); + rtl8168_mdio_write(tp, 0x19, 0x58a0); + rtl8168_mdio_write(tp, 0x15, 0x0100); + rtl8168_mdio_write(tp, 0x19, 0xd6d1); + rtl8168_mdio_write(tp, 0x15, 0x0101); + rtl8168_mdio_write(tp, 0x19, 0x0004); + rtl8168_mdio_write(tp, 0x15, 0x0102); + rtl8168_mdio_write(tp, 0x19, 0xc137); + rtl8168_mdio_write(tp, 0x15, 0x0103); + rtl8168_mdio_write(tp, 0x19, 0x0002); + rtl8168_mdio_write(tp, 0x15, 0x0104); + rtl8168_mdio_write(tp, 0x19, 0xa0e5); + rtl8168_mdio_write(tp, 0x15, 0x0105); + rtl8168_mdio_write(tp, 0x19, 0x9df8); + rtl8168_mdio_write(tp, 0x15, 0x0106); + rtl8168_mdio_write(tp, 0x19, 0x30c6); + rtl8168_mdio_write(tp, 0x15, 0x0107); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0108); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0109); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x010a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x010b); + rtl8168_mdio_write(tp, 0x19, 0x4808); + rtl8168_mdio_write(tp, 0x15, 0x010c); + rtl8168_mdio_write(tp, 0x19, 0xc32d); + rtl8168_mdio_write(tp, 0x15, 0x010d); + rtl8168_mdio_write(tp, 0x19, 0x0003); + rtl8168_mdio_write(tp, 0x15, 0x010e); + rtl8168_mdio_write(tp, 0x19, 0xc8b3); + rtl8168_mdio_write(tp, 0x15, 0x010f); + rtl8168_mdio_write(tp, 0x19, 0x00fc); + rtl8168_mdio_write(tp, 0x15, 0x0110); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x0111); + rtl8168_mdio_write(tp, 0x19, 0x3116); + rtl8168_mdio_write(tp, 0x15, 0x0112); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0113); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0114); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0115); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0116); + rtl8168_mdio_write(tp, 0x19, 0x4803); + rtl8168_mdio_write(tp, 0x15, 0x0117); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0118); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x0119); + rtl8168_mdio_write(tp, 0x19, 0x7c04); + rtl8168_mdio_write(tp, 0x15, 0x011a); + rtl8168_mdio_write(tp, 0x19, 0x6000); + rtl8168_mdio_write(tp, 0x15, 0x011b); + rtl8168_mdio_write(tp, 0x19, 0x5cf7); + rtl8168_mdio_write(tp, 0x15, 0x011c); + rtl8168_mdio_write(tp, 0x19, 0x7c2a); + rtl8168_mdio_write(tp, 0x15, 0x011d); + rtl8168_mdio_write(tp, 0x19, 0x5800); + rtl8168_mdio_write(tp, 0x15, 0x011e); + rtl8168_mdio_write(tp, 0x19, 0x5400); + rtl8168_mdio_write(tp, 0x15, 0x011f); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0120); + rtl8168_mdio_write(tp, 0x19, 0x74f0); + rtl8168_mdio_write(tp, 0x15, 0x0121); + rtl8168_mdio_write(tp, 0x19, 0x4019); + rtl8168_mdio_write(tp, 0x15, 0x0122); + rtl8168_mdio_write(tp, 0x19, 0x440d); + rtl8168_mdio_write(tp, 0x15, 0x0123); + rtl8168_mdio_write(tp, 0x19, 0xb6c1); + rtl8168_mdio_write(tp, 0x15, 0x0124); + rtl8168_mdio_write(tp, 0x19, 0xc05b); + rtl8168_mdio_write(tp, 0x15, 0x0125); + rtl8168_mdio_write(tp, 0x19, 0x00bf); + rtl8168_mdio_write(tp, 0x15, 0x0126); + rtl8168_mdio_write(tp, 0x19, 0xc025); + rtl8168_mdio_write(tp, 0x15, 0x0127); + rtl8168_mdio_write(tp, 0x19, 0x00bd); + rtl8168_mdio_write(tp, 0x15, 0x0128); + rtl8168_mdio_write(tp, 0x19, 0xc603); + rtl8168_mdio_write(tp, 0x15, 0x0129); + rtl8168_mdio_write(tp, 0x19, 0x00bb); + rtl8168_mdio_write(tp, 0x15, 0x012a); + rtl8168_mdio_write(tp, 0x19, 0x8805); + rtl8168_mdio_write(tp, 0x15, 0x012b); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x012c); + rtl8168_mdio_write(tp, 0x19, 0x4001); + rtl8168_mdio_write(tp, 0x15, 0x012d); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x012e); + rtl8168_mdio_write(tp, 0x19, 0xa3dd); + rtl8168_mdio_write(tp, 0x15, 0x012f); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0130); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x0131); + rtl8168_mdio_write(tp, 0x19, 0x8407); + rtl8168_mdio_write(tp, 0x15, 0x0132); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0133); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x0134); + rtl8168_mdio_write(tp, 0x19, 0xd9b8); + rtl8168_mdio_write(tp, 0x15, 0x0135); + rtl8168_mdio_write(tp, 0x19, 0x0003); + rtl8168_mdio_write(tp, 0x15, 0x0136); + rtl8168_mdio_write(tp, 0x19, 0xc240); + rtl8168_mdio_write(tp, 0x15, 0x0137); + rtl8168_mdio_write(tp, 0x19, 0x0015); + rtl8168_mdio_write(tp, 0x15, 0x0138); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0139); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x013a); + rtl8168_mdio_write(tp, 0x19, 0x9ae9); + rtl8168_mdio_write(tp, 0x15, 0x013b); + rtl8168_mdio_write(tp, 0x19, 0x3140); + rtl8168_mdio_write(tp, 0x15, 0x013c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x013d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x013e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x013f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0140); + rtl8168_mdio_write(tp, 0x19, 0x4807); + rtl8168_mdio_write(tp, 0x15, 0x0141); + rtl8168_mdio_write(tp, 0x19, 0x4004); + rtl8168_mdio_write(tp, 0x15, 0x0142); + rtl8168_mdio_write(tp, 0x19, 0x4410); + rtl8168_mdio_write(tp, 0x15, 0x0143); + rtl8168_mdio_write(tp, 0x19, 0x7c0c); + rtl8168_mdio_write(tp, 0x15, 0x0144); + rtl8168_mdio_write(tp, 0x19, 0x600c); + rtl8168_mdio_write(tp, 0x15, 0x0145); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x15, 0x0146); + rtl8168_mdio_write(tp, 0x19, 0xa68f); + rtl8168_mdio_write(tp, 0x15, 0x0147); + rtl8168_mdio_write(tp, 0x19, 0x3116); + rtl8168_mdio_write(tp, 0x15, 0x0148); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0149); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x014a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x014b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x014c); + rtl8168_mdio_write(tp, 0x19, 0x4804); + rtl8168_mdio_write(tp, 0x15, 0x014d); + rtl8168_mdio_write(tp, 0x19, 0x54c0); + rtl8168_mdio_write(tp, 0x15, 0x014e); + rtl8168_mdio_write(tp, 0x19, 0xb703); + rtl8168_mdio_write(tp, 0x15, 0x014f); + rtl8168_mdio_write(tp, 0x19, 0x5cff); + rtl8168_mdio_write(tp, 0x15, 0x0150); + rtl8168_mdio_write(tp, 0x19, 0x315f); + rtl8168_mdio_write(tp, 0x15, 0x0151); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0152); + rtl8168_mdio_write(tp, 0x19, 0x74f8); + rtl8168_mdio_write(tp, 0x15, 0x0153); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0154); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0155); + rtl8168_mdio_write(tp, 0x19, 0x6000); + rtl8168_mdio_write(tp, 0x15, 0x0156); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0157); + rtl8168_mdio_write(tp, 0x19, 0x4418); + rtl8168_mdio_write(tp, 0x15, 0x0158); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x15, 0x0159); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x015a); + rtl8168_mdio_write(tp, 0x19, 0x64e1); + rtl8168_mdio_write(tp, 0x15, 0x015b); + rtl8168_mdio_write(tp, 0x19, 0x7c20); + rtl8168_mdio_write(tp, 0x15, 0x015c); + rtl8168_mdio_write(tp, 0x19, 0x5820); + rtl8168_mdio_write(tp, 0x15, 0x015d); + rtl8168_mdio_write(tp, 0x19, 0x5ccf); + rtl8168_mdio_write(tp, 0x15, 0x015e); + rtl8168_mdio_write(tp, 0x19, 0x7050); + rtl8168_mdio_write(tp, 0x15, 0x015f); + rtl8168_mdio_write(tp, 0x19, 0xd9b8); + rtl8168_mdio_write(tp, 0x15, 0x0160); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x0161); + rtl8168_mdio_write(tp, 0x19, 0xdab1); + rtl8168_mdio_write(tp, 0x15, 0x0162); + rtl8168_mdio_write(tp, 0x19, 0x0015); + rtl8168_mdio_write(tp, 0x15, 0x0163); + rtl8168_mdio_write(tp, 0x19, 0xc244); + rtl8168_mdio_write(tp, 0x15, 0x0164); + rtl8168_mdio_write(tp, 0x19, 0x0013); + rtl8168_mdio_write(tp, 0x15, 0x0165); + rtl8168_mdio_write(tp, 0x19, 0xc021); + rtl8168_mdio_write(tp, 0x15, 0x0166); + rtl8168_mdio_write(tp, 0x19, 0x00f9); + rtl8168_mdio_write(tp, 0x15, 0x0167); + rtl8168_mdio_write(tp, 0x19, 0x3177); + rtl8168_mdio_write(tp, 0x15, 0x0168); + rtl8168_mdio_write(tp, 0x19, 0x5cf7); + rtl8168_mdio_write(tp, 0x15, 0x0169); + rtl8168_mdio_write(tp, 0x19, 0x4010); + rtl8168_mdio_write(tp, 0x15, 0x016a); + rtl8168_mdio_write(tp, 0x19, 0x4428); + rtl8168_mdio_write(tp, 0x15, 0x016b); + rtl8168_mdio_write(tp, 0x19, 0x9c00); + rtl8168_mdio_write(tp, 0x15, 0x016c); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x016d); + rtl8168_mdio_write(tp, 0x19, 0x6008); + rtl8168_mdio_write(tp, 0x15, 0x016e); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x016f); + rtl8168_mdio_write(tp, 0x19, 0x74f0); + rtl8168_mdio_write(tp, 0x15, 0x0170); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0171); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0172); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0173); + rtl8168_mdio_write(tp, 0x19, 0x3116); + rtl8168_mdio_write(tp, 0x15, 0x0174); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0175); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0176); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0177); + rtl8168_mdio_write(tp, 0x19, 0x4805); + rtl8168_mdio_write(tp, 0x15, 0x0178); + rtl8168_mdio_write(tp, 0x19, 0xa103); + rtl8168_mdio_write(tp, 0x15, 0x0179); + rtl8168_mdio_write(tp, 0x19, 0x7c02); + rtl8168_mdio_write(tp, 0x15, 0x017a); + rtl8168_mdio_write(tp, 0x19, 0x6002); + rtl8168_mdio_write(tp, 0x15, 0x017b); + rtl8168_mdio_write(tp, 0x19, 0x7e00); + rtl8168_mdio_write(tp, 0x15, 0x017c); + rtl8168_mdio_write(tp, 0x19, 0x5400); + rtl8168_mdio_write(tp, 0x15, 0x017d); + rtl8168_mdio_write(tp, 0x19, 0x7c6b); + rtl8168_mdio_write(tp, 0x15, 0x017e); + rtl8168_mdio_write(tp, 0x19, 0x5c63); + rtl8168_mdio_write(tp, 0x15, 0x017f); + rtl8168_mdio_write(tp, 0x19, 0x407d); + rtl8168_mdio_write(tp, 0x15, 0x0180); + rtl8168_mdio_write(tp, 0x19, 0xa602); + rtl8168_mdio_write(tp, 0x15, 0x0181); + rtl8168_mdio_write(tp, 0x19, 0x4001); + rtl8168_mdio_write(tp, 0x15, 0x0182); + rtl8168_mdio_write(tp, 0x19, 0x4420); + rtl8168_mdio_write(tp, 0x15, 0x0183); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x0184); + rtl8168_mdio_write(tp, 0x19, 0x44a1); + rtl8168_mdio_write(tp, 0x15, 0x0185); + rtl8168_mdio_write(tp, 0x19, 0xd6e0); + rtl8168_mdio_write(tp, 0x15, 0x0186); + rtl8168_mdio_write(tp, 0x19, 0x0009); + rtl8168_mdio_write(tp, 0x15, 0x0187); + rtl8168_mdio_write(tp, 0x19, 0x9efe); + rtl8168_mdio_write(tp, 0x15, 0x0188); + rtl8168_mdio_write(tp, 0x19, 0x7c02); + rtl8168_mdio_write(tp, 0x15, 0x0189); + rtl8168_mdio_write(tp, 0x19, 0x6000); + rtl8168_mdio_write(tp, 0x15, 0x018a); + rtl8168_mdio_write(tp, 0x19, 0x9c00); + rtl8168_mdio_write(tp, 0x15, 0x018b); + rtl8168_mdio_write(tp, 0x19, 0x318f); + rtl8168_mdio_write(tp, 0x15, 0x018c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x018d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x018e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x018f); + rtl8168_mdio_write(tp, 0x19, 0x4806); + rtl8168_mdio_write(tp, 0x15, 0x0190); + rtl8168_mdio_write(tp, 0x19, 0x7c10); + rtl8168_mdio_write(tp, 0x15, 0x0191); + rtl8168_mdio_write(tp, 0x19, 0x5c10); + rtl8168_mdio_write(tp, 0x15, 0x0192); + rtl8168_mdio_write(tp, 0x19, 0x40fa); + rtl8168_mdio_write(tp, 0x15, 0x0193); + rtl8168_mdio_write(tp, 0x19, 0xa602); + rtl8168_mdio_write(tp, 0x15, 0x0194); + rtl8168_mdio_write(tp, 0x19, 0x4010); + rtl8168_mdio_write(tp, 0x15, 0x0195); + rtl8168_mdio_write(tp, 0x19, 0x4440); + rtl8168_mdio_write(tp, 0x15, 0x0196); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x15, 0x0197); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0198); + rtl8168_mdio_write(tp, 0x19, 0x6400); + rtl8168_mdio_write(tp, 0x15, 0x0199); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x019a); + rtl8168_mdio_write(tp, 0x19, 0x4540); + rtl8168_mdio_write(tp, 0x15, 0x019b); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x019c); + rtl8168_mdio_write(tp, 0x19, 0x6008); + rtl8168_mdio_write(tp, 0x15, 0x019d); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x019e); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x019f); + rtl8168_mdio_write(tp, 0x19, 0x6400); + rtl8168_mdio_write(tp, 0x15, 0x01a0); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x01a1); + rtl8168_mdio_write(tp, 0x19, 0x6480); + rtl8168_mdio_write(tp, 0x15, 0x01a2); + rtl8168_mdio_write(tp, 0x19, 0x3140); + rtl8168_mdio_write(tp, 0x15, 0x01a3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01a4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01a5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01a6); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x01a7); + rtl8168_mdio_write(tp, 0x19, 0x7c0b); + rtl8168_mdio_write(tp, 0x15, 0x01a8); + rtl8168_mdio_write(tp, 0x19, 0x6c01); + rtl8168_mdio_write(tp, 0x15, 0x01a9); + rtl8168_mdio_write(tp, 0x19, 0x64a8); + rtl8168_mdio_write(tp, 0x15, 0x01aa); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x01ab); + rtl8168_mdio_write(tp, 0x19, 0x5cf0); + rtl8168_mdio_write(tp, 0x15, 0x01ac); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x01ad); + rtl8168_mdio_write(tp, 0x19, 0xb628); + rtl8168_mdio_write(tp, 0x15, 0x01ae); + rtl8168_mdio_write(tp, 0x19, 0xc053); + rtl8168_mdio_write(tp, 0x15, 0x01af); + rtl8168_mdio_write(tp, 0x19, 0x0026); + rtl8168_mdio_write(tp, 0x15, 0x01b0); + rtl8168_mdio_write(tp, 0x19, 0xc02d); + rtl8168_mdio_write(tp, 0x15, 0x01b1); + rtl8168_mdio_write(tp, 0x19, 0x0024); + rtl8168_mdio_write(tp, 0x15, 0x01b2); + rtl8168_mdio_write(tp, 0x19, 0xc603); + rtl8168_mdio_write(tp, 0x15, 0x01b3); + rtl8168_mdio_write(tp, 0x19, 0x0022); + rtl8168_mdio_write(tp, 0x15, 0x01b4); + rtl8168_mdio_write(tp, 0x19, 0x8cf9); + rtl8168_mdio_write(tp, 0x15, 0x01b5); + rtl8168_mdio_write(tp, 0x19, 0x31ba); + rtl8168_mdio_write(tp, 0x15, 0x01b6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01b7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01b8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01b9); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01ba); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x01bb); + rtl8168_mdio_write(tp, 0x19, 0x5420); + rtl8168_mdio_write(tp, 0x15, 0x01bc); + rtl8168_mdio_write(tp, 0x19, 0x4811); + rtl8168_mdio_write(tp, 0x15, 0x01bd); + rtl8168_mdio_write(tp, 0x19, 0x5000); + rtl8168_mdio_write(tp, 0x15, 0x01be); + rtl8168_mdio_write(tp, 0x19, 0x4801); + rtl8168_mdio_write(tp, 0x15, 0x01bf); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x01c0); + rtl8168_mdio_write(tp, 0x19, 0x31f5); + rtl8168_mdio_write(tp, 0x15, 0x01c1); + rtl8168_mdio_write(tp, 0x19, 0xb614); + rtl8168_mdio_write(tp, 0x15, 0x01c2); + rtl8168_mdio_write(tp, 0x19, 0x8ce4); + rtl8168_mdio_write(tp, 0x15, 0x01c3); + rtl8168_mdio_write(tp, 0x19, 0xb30c); + rtl8168_mdio_write(tp, 0x15, 0x01c4); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x01c5); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x01c6); + rtl8168_mdio_write(tp, 0x19, 0x8206); + rtl8168_mdio_write(tp, 0x15, 0x01c7); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x01c8); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x01c9); + rtl8168_mdio_write(tp, 0x19, 0x7c04); + rtl8168_mdio_write(tp, 0x15, 0x01ca); + rtl8168_mdio_write(tp, 0x19, 0x7404); + rtl8168_mdio_write(tp, 0x15, 0x01cb); + rtl8168_mdio_write(tp, 0x19, 0x31c0); + rtl8168_mdio_write(tp, 0x15, 0x01cc); + rtl8168_mdio_write(tp, 0x19, 0x7c04); + rtl8168_mdio_write(tp, 0x15, 0x01cd); + rtl8168_mdio_write(tp, 0x19, 0x7400); + rtl8168_mdio_write(tp, 0x15, 0x01ce); + rtl8168_mdio_write(tp, 0x19, 0x31c0); + rtl8168_mdio_write(tp, 0x15, 0x01cf); + rtl8168_mdio_write(tp, 0x19, 0x8df1); + rtl8168_mdio_write(tp, 0x15, 0x01d0); + rtl8168_mdio_write(tp, 0x19, 0x3248); + rtl8168_mdio_write(tp, 0x15, 0x01d1); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01d2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01d3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01d4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01d5); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x01d6); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x01d7); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x01d8); + rtl8168_mdio_write(tp, 0x19, 0x7670); + rtl8168_mdio_write(tp, 0x15, 0x01d9); + rtl8168_mdio_write(tp, 0x19, 0x4023); + rtl8168_mdio_write(tp, 0x15, 0x01da); + rtl8168_mdio_write(tp, 0x19, 0x4500); + rtl8168_mdio_write(tp, 0x15, 0x01db); + rtl8168_mdio_write(tp, 0x19, 0x4069); + rtl8168_mdio_write(tp, 0x15, 0x01dc); + rtl8168_mdio_write(tp, 0x19, 0x4580); + rtl8168_mdio_write(tp, 0x15, 0x01dd); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x01de); + rtl8168_mdio_write(tp, 0x19, 0xcff5); + rtl8168_mdio_write(tp, 0x15, 0x01df); + rtl8168_mdio_write(tp, 0x19, 0x00ff); + rtl8168_mdio_write(tp, 0x15, 0x01e0); + rtl8168_mdio_write(tp, 0x19, 0x76f0); + rtl8168_mdio_write(tp, 0x15, 0x01e1); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x01e2); + rtl8168_mdio_write(tp, 0x19, 0x4023); + rtl8168_mdio_write(tp, 0x15, 0x01e3); + rtl8168_mdio_write(tp, 0x19, 0x4500); + rtl8168_mdio_write(tp, 0x15, 0x01e4); + rtl8168_mdio_write(tp, 0x19, 0x4069); + rtl8168_mdio_write(tp, 0x15, 0x01e5); + rtl8168_mdio_write(tp, 0x19, 0x4580); + rtl8168_mdio_write(tp, 0x15, 0x01e6); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x01e7); + rtl8168_mdio_write(tp, 0x19, 0xd0f5); + rtl8168_mdio_write(tp, 0x15, 0x01e8); + rtl8168_mdio_write(tp, 0x19, 0x00ff); + rtl8168_mdio_write(tp, 0x15, 0x01e9); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x01ea); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x01eb); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x01ec); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x01ed); + rtl8168_mdio_write(tp, 0x19, 0x8300); + rtl8168_mdio_write(tp, 0x15, 0x01ee); + rtl8168_mdio_write(tp, 0x19, 0x74f0); + rtl8168_mdio_write(tp, 0x15, 0x01ef); + rtl8168_mdio_write(tp, 0x19, 0x3006); + rtl8168_mdio_write(tp, 0x15, 0x01f0); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01f1); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01f2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01f3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01f4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01f5); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x01f6); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x01f7); + rtl8168_mdio_write(tp, 0x19, 0x409d); + rtl8168_mdio_write(tp, 0x15, 0x01f8); + rtl8168_mdio_write(tp, 0x19, 0x7c87); + rtl8168_mdio_write(tp, 0x15, 0x01f9); + rtl8168_mdio_write(tp, 0x19, 0xae14); + rtl8168_mdio_write(tp, 0x15, 0x01fa); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x01fb); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x01fc); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x01fd); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x01fe); + rtl8168_mdio_write(tp, 0x19, 0x980e); + rtl8168_mdio_write(tp, 0x15, 0x01ff); + rtl8168_mdio_write(tp, 0x19, 0x930c); + rtl8168_mdio_write(tp, 0x15, 0x0200); + rtl8168_mdio_write(tp, 0x19, 0x9206); + rtl8168_mdio_write(tp, 0x15, 0x0201); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0202); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0203); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0204); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0205); + rtl8168_mdio_write(tp, 0x19, 0x320c); + rtl8168_mdio_write(tp, 0x15, 0x0206); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x15, 0x0207); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0208); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x0209); + rtl8168_mdio_write(tp, 0x19, 0x5500); + rtl8168_mdio_write(tp, 0x15, 0x020a); + rtl8168_mdio_write(tp, 0x19, 0x320c); + rtl8168_mdio_write(tp, 0x15, 0x020b); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x020c); + rtl8168_mdio_write(tp, 0x19, 0x3220); + rtl8168_mdio_write(tp, 0x15, 0x020d); + rtl8168_mdio_write(tp, 0x19, 0x4480); + rtl8168_mdio_write(tp, 0x15, 0x020e); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x020f); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0210); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x0211); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0212); + rtl8168_mdio_write(tp, 0x19, 0x980e); + rtl8168_mdio_write(tp, 0x15, 0x0213); + rtl8168_mdio_write(tp, 0x19, 0x930c); + rtl8168_mdio_write(tp, 0x15, 0x0214); + rtl8168_mdio_write(tp, 0x19, 0x9206); + rtl8168_mdio_write(tp, 0x15, 0x0215); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x15, 0x0216); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0217); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0218); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0219); + rtl8168_mdio_write(tp, 0x19, 0x3220); + rtl8168_mdio_write(tp, 0x15, 0x021a); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x021b); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x021c); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x021d); + rtl8168_mdio_write(tp, 0x19, 0x5540); + rtl8168_mdio_write(tp, 0x15, 0x021e); + rtl8168_mdio_write(tp, 0x19, 0x3220); + rtl8168_mdio_write(tp, 0x15, 0x021f); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x15, 0x0220); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0221); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0222); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0223); + rtl8168_mdio_write(tp, 0x19, 0x3231); + rtl8168_mdio_write(tp, 0x15, 0x0224); + rtl8168_mdio_write(tp, 0x19, 0xab06); + rtl8168_mdio_write(tp, 0x15, 0x0225); + rtl8168_mdio_write(tp, 0x19, 0xbf08); + rtl8168_mdio_write(tp, 0x15, 0x0226); + rtl8168_mdio_write(tp, 0x19, 0x4076); + rtl8168_mdio_write(tp, 0x15, 0x0227); + rtl8168_mdio_write(tp, 0x19, 0x7d07); + rtl8168_mdio_write(tp, 0x15, 0x0228); + rtl8168_mdio_write(tp, 0x19, 0x4502); + rtl8168_mdio_write(tp, 0x15, 0x0229); + rtl8168_mdio_write(tp, 0x19, 0x3231); + rtl8168_mdio_write(tp, 0x15, 0x022a); + rtl8168_mdio_write(tp, 0x19, 0x7d80); + rtl8168_mdio_write(tp, 0x15, 0x022b); + rtl8168_mdio_write(tp, 0x19, 0x5180); + rtl8168_mdio_write(tp, 0x15, 0x022c); + rtl8168_mdio_write(tp, 0x19, 0x322f); + rtl8168_mdio_write(tp, 0x15, 0x022d); + rtl8168_mdio_write(tp, 0x19, 0x7d80); + rtl8168_mdio_write(tp, 0x15, 0x022e); + rtl8168_mdio_write(tp, 0x19, 0x5000); + rtl8168_mdio_write(tp, 0x15, 0x022f); + rtl8168_mdio_write(tp, 0x19, 0x7d07); + rtl8168_mdio_write(tp, 0x15, 0x0230); + rtl8168_mdio_write(tp, 0x19, 0x4402); + rtl8168_mdio_write(tp, 0x15, 0x0231); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0232); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x0233); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0234); + rtl8168_mdio_write(tp, 0x19, 0xb309); + rtl8168_mdio_write(tp, 0x15, 0x0235); + rtl8168_mdio_write(tp, 0x19, 0xb204); + rtl8168_mdio_write(tp, 0x15, 0x0236); + rtl8168_mdio_write(tp, 0x19, 0xb105); + rtl8168_mdio_write(tp, 0x15, 0x0237); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0238); + rtl8168_mdio_write(tp, 0x19, 0x31c1); + rtl8168_mdio_write(tp, 0x15, 0x0239); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x023a); + rtl8168_mdio_write(tp, 0x19, 0x3261); + rtl8168_mdio_write(tp, 0x15, 0x023b); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x023c); + rtl8168_mdio_write(tp, 0x19, 0x3250); + rtl8168_mdio_write(tp, 0x15, 0x023d); + rtl8168_mdio_write(tp, 0x19, 0xb203); + rtl8168_mdio_write(tp, 0x15, 0x023e); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x023f); + rtl8168_mdio_write(tp, 0x19, 0x327a); + rtl8168_mdio_write(tp, 0x15, 0x0240); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0241); + rtl8168_mdio_write(tp, 0x19, 0x3293); + rtl8168_mdio_write(tp, 0x15, 0x0242); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0243); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0244); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0245); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0246); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0247); + rtl8168_mdio_write(tp, 0x19, 0x32a3); + rtl8168_mdio_write(tp, 0x15, 0x0248); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0249); + rtl8168_mdio_write(tp, 0x19, 0x403d); + rtl8168_mdio_write(tp, 0x15, 0x024a); + rtl8168_mdio_write(tp, 0x19, 0x440c); + rtl8168_mdio_write(tp, 0x15, 0x024b); + rtl8168_mdio_write(tp, 0x19, 0x4812); + rtl8168_mdio_write(tp, 0x15, 0x024c); + rtl8168_mdio_write(tp, 0x19, 0x5001); + rtl8168_mdio_write(tp, 0x15, 0x024d); + rtl8168_mdio_write(tp, 0x19, 0x4802); + rtl8168_mdio_write(tp, 0x15, 0x024e); + rtl8168_mdio_write(tp, 0x19, 0x6880); + rtl8168_mdio_write(tp, 0x15, 0x024f); + rtl8168_mdio_write(tp, 0x19, 0x31f5); + rtl8168_mdio_write(tp, 0x15, 0x0250); + rtl8168_mdio_write(tp, 0x19, 0xb685); + rtl8168_mdio_write(tp, 0x15, 0x0251); + rtl8168_mdio_write(tp, 0x19, 0x801c); + rtl8168_mdio_write(tp, 0x15, 0x0252); + rtl8168_mdio_write(tp, 0x19, 0xbaf5); + rtl8168_mdio_write(tp, 0x15, 0x0253); + rtl8168_mdio_write(tp, 0x19, 0xc07c); + rtl8168_mdio_write(tp, 0x15, 0x0254); + rtl8168_mdio_write(tp, 0x19, 0x00fb); + rtl8168_mdio_write(tp, 0x15, 0x0255); + rtl8168_mdio_write(tp, 0x19, 0x325a); + rtl8168_mdio_write(tp, 0x15, 0x0256); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0257); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0258); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0259); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x025a); + rtl8168_mdio_write(tp, 0x19, 0x481a); + rtl8168_mdio_write(tp, 0x15, 0x025b); + rtl8168_mdio_write(tp, 0x19, 0x5001); + rtl8168_mdio_write(tp, 0x15, 0x025c); + rtl8168_mdio_write(tp, 0x19, 0x401b); + rtl8168_mdio_write(tp, 0x15, 0x025d); + rtl8168_mdio_write(tp, 0x19, 0x480a); + rtl8168_mdio_write(tp, 0x15, 0x025e); + rtl8168_mdio_write(tp, 0x19, 0x4418); + rtl8168_mdio_write(tp, 0x15, 0x025f); + rtl8168_mdio_write(tp, 0x19, 0x6900); + rtl8168_mdio_write(tp, 0x15, 0x0260); + rtl8168_mdio_write(tp, 0x19, 0x31f5); + rtl8168_mdio_write(tp, 0x15, 0x0261); + rtl8168_mdio_write(tp, 0x19, 0xb64b); + rtl8168_mdio_write(tp, 0x15, 0x0262); + rtl8168_mdio_write(tp, 0x19, 0xdb00); + rtl8168_mdio_write(tp, 0x15, 0x0263); + rtl8168_mdio_write(tp, 0x19, 0x0048); + rtl8168_mdio_write(tp, 0x15, 0x0264); + rtl8168_mdio_write(tp, 0x19, 0xdb7d); + rtl8168_mdio_write(tp, 0x15, 0x0265); + rtl8168_mdio_write(tp, 0x19, 0x0002); + rtl8168_mdio_write(tp, 0x15, 0x0266); + rtl8168_mdio_write(tp, 0x19, 0xa0fa); + rtl8168_mdio_write(tp, 0x15, 0x0267); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x0268); + rtl8168_mdio_write(tp, 0x19, 0x3248); + rtl8168_mdio_write(tp, 0x15, 0x0269); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x026a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x026b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x026c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x026d); + rtl8168_mdio_write(tp, 0x19, 0xb806); + rtl8168_mdio_write(tp, 0x15, 0x026e); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x026f); + rtl8168_mdio_write(tp, 0x19, 0x5500); + rtl8168_mdio_write(tp, 0x15, 0x0270); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0271); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0272); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0273); + rtl8168_mdio_write(tp, 0x19, 0x4814); + rtl8168_mdio_write(tp, 0x15, 0x0274); + rtl8168_mdio_write(tp, 0x19, 0x500b); + rtl8168_mdio_write(tp, 0x15, 0x0275); + rtl8168_mdio_write(tp, 0x19, 0x4804); + rtl8168_mdio_write(tp, 0x15, 0x0276); + rtl8168_mdio_write(tp, 0x19, 0x40c4); + rtl8168_mdio_write(tp, 0x15, 0x0277); + rtl8168_mdio_write(tp, 0x19, 0x4425); + rtl8168_mdio_write(tp, 0x15, 0x0278); + rtl8168_mdio_write(tp, 0x19, 0x6a00); + rtl8168_mdio_write(tp, 0x15, 0x0279); + rtl8168_mdio_write(tp, 0x19, 0x31f5); + rtl8168_mdio_write(tp, 0x15, 0x027a); + rtl8168_mdio_write(tp, 0x19, 0xb632); + rtl8168_mdio_write(tp, 0x15, 0x027b); + rtl8168_mdio_write(tp, 0x19, 0xdc03); + rtl8168_mdio_write(tp, 0x15, 0x027c); + rtl8168_mdio_write(tp, 0x19, 0x0027); + rtl8168_mdio_write(tp, 0x15, 0x027d); + rtl8168_mdio_write(tp, 0x19, 0x80fc); + rtl8168_mdio_write(tp, 0x15, 0x027e); + rtl8168_mdio_write(tp, 0x19, 0x3283); + rtl8168_mdio_write(tp, 0x15, 0x027f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0280); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0281); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0282); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0283); + rtl8168_mdio_write(tp, 0x19, 0xb806); + rtl8168_mdio_write(tp, 0x15, 0x0284); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0285); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0286); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0287); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x15, 0x0288); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0289); + rtl8168_mdio_write(tp, 0x19, 0x4818); + rtl8168_mdio_write(tp, 0x15, 0x028a); + rtl8168_mdio_write(tp, 0x19, 0x5051); + rtl8168_mdio_write(tp, 0x15, 0x028b); + rtl8168_mdio_write(tp, 0x19, 0x4808); + rtl8168_mdio_write(tp, 0x15, 0x028c); + rtl8168_mdio_write(tp, 0x19, 0x4050); + rtl8168_mdio_write(tp, 0x15, 0x028d); + rtl8168_mdio_write(tp, 0x19, 0x4462); + rtl8168_mdio_write(tp, 0x15, 0x028e); + rtl8168_mdio_write(tp, 0x19, 0x40c4); + rtl8168_mdio_write(tp, 0x15, 0x028f); + rtl8168_mdio_write(tp, 0x19, 0x4473); + rtl8168_mdio_write(tp, 0x15, 0x0290); + rtl8168_mdio_write(tp, 0x19, 0x5041); + rtl8168_mdio_write(tp, 0x15, 0x0291); + rtl8168_mdio_write(tp, 0x19, 0x6b00); + rtl8168_mdio_write(tp, 0x15, 0x0292); + rtl8168_mdio_write(tp, 0x19, 0x31f5); + rtl8168_mdio_write(tp, 0x15, 0x0293); + rtl8168_mdio_write(tp, 0x19, 0xb619); + rtl8168_mdio_write(tp, 0x15, 0x0294); + rtl8168_mdio_write(tp, 0x19, 0x80d9); + rtl8168_mdio_write(tp, 0x15, 0x0295); + rtl8168_mdio_write(tp, 0x19, 0xbd06); + rtl8168_mdio_write(tp, 0x15, 0x0296); + rtl8168_mdio_write(tp, 0x19, 0xbb0d); + rtl8168_mdio_write(tp, 0x15, 0x0297); + rtl8168_mdio_write(tp, 0x19, 0xaf14); + rtl8168_mdio_write(tp, 0x15, 0x0298); + rtl8168_mdio_write(tp, 0x19, 0x8efa); + rtl8168_mdio_write(tp, 0x15, 0x0299); + rtl8168_mdio_write(tp, 0x19, 0x5049); + rtl8168_mdio_write(tp, 0x15, 0x029a); + rtl8168_mdio_write(tp, 0x19, 0x3248); + rtl8168_mdio_write(tp, 0x15, 0x029b); + rtl8168_mdio_write(tp, 0x19, 0x4c10); + rtl8168_mdio_write(tp, 0x15, 0x029c); + rtl8168_mdio_write(tp, 0x19, 0x44b0); + rtl8168_mdio_write(tp, 0x15, 0x029d); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x029e); + rtl8168_mdio_write(tp, 0x19, 0x3292); + rtl8168_mdio_write(tp, 0x15, 0x029f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02a0); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02a1); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02a2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02a3); + rtl8168_mdio_write(tp, 0x19, 0x481f); + rtl8168_mdio_write(tp, 0x15, 0x02a4); + rtl8168_mdio_write(tp, 0x19, 0x5005); + rtl8168_mdio_write(tp, 0x15, 0x02a5); + rtl8168_mdio_write(tp, 0x19, 0x480f); + rtl8168_mdio_write(tp, 0x15, 0x02a6); + rtl8168_mdio_write(tp, 0x19, 0xac00); + rtl8168_mdio_write(tp, 0x15, 0x02a7); + rtl8168_mdio_write(tp, 0x19, 0x31a6); + rtl8168_mdio_write(tp, 0x15, 0x02a8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02a9); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02aa); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02ab); + rtl8168_mdio_write(tp, 0x19, 0x31ba); + rtl8168_mdio_write(tp, 0x15, 0x02ac); + rtl8168_mdio_write(tp, 0x19, 0x31d5); + rtl8168_mdio_write(tp, 0x15, 0x02ad); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02ae); + rtl8168_mdio_write(tp, 0x19, 0x5cf0); + rtl8168_mdio_write(tp, 0x15, 0x02af); + rtl8168_mdio_write(tp, 0x19, 0x588c); + rtl8168_mdio_write(tp, 0x15, 0x02b0); + rtl8168_mdio_write(tp, 0x19, 0x542f); + rtl8168_mdio_write(tp, 0x15, 0x02b1); + rtl8168_mdio_write(tp, 0x19, 0x7ffb); + rtl8168_mdio_write(tp, 0x15, 0x02b2); + rtl8168_mdio_write(tp, 0x19, 0x6ff8); + rtl8168_mdio_write(tp, 0x15, 0x02b3); + rtl8168_mdio_write(tp, 0x19, 0x64a4); + rtl8168_mdio_write(tp, 0x15, 0x02b4); + rtl8168_mdio_write(tp, 0x19, 0x64a0); + rtl8168_mdio_write(tp, 0x15, 0x02b5); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x02b6); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x02b7); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x02b8); + rtl8168_mdio_write(tp, 0x19, 0x4480); + rtl8168_mdio_write(tp, 0x15, 0x02b9); + rtl8168_mdio_write(tp, 0x19, 0x9e00); + rtl8168_mdio_write(tp, 0x15, 0x02ba); + rtl8168_mdio_write(tp, 0x19, 0x4891); + rtl8168_mdio_write(tp, 0x15, 0x02bb); + rtl8168_mdio_write(tp, 0x19, 0x4cc0); + rtl8168_mdio_write(tp, 0x15, 0x02bc); + rtl8168_mdio_write(tp, 0x19, 0x4801); + rtl8168_mdio_write(tp, 0x15, 0x02bd); + rtl8168_mdio_write(tp, 0x19, 0xa609); + rtl8168_mdio_write(tp, 0x15, 0x02be); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x02bf); + rtl8168_mdio_write(tp, 0x19, 0x004e); + rtl8168_mdio_write(tp, 0x15, 0x02c0); + rtl8168_mdio_write(tp, 0x19, 0x87fe); + rtl8168_mdio_write(tp, 0x15, 0x02c1); + rtl8168_mdio_write(tp, 0x19, 0x32c6); + rtl8168_mdio_write(tp, 0x15, 0x02c2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02c3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02c4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02c5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02c6); + rtl8168_mdio_write(tp, 0x19, 0x48b2); + rtl8168_mdio_write(tp, 0x15, 0x02c7); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x02c8); + rtl8168_mdio_write(tp, 0x19, 0x4822); + rtl8168_mdio_write(tp, 0x15, 0x02c9); + rtl8168_mdio_write(tp, 0x19, 0x4488); + rtl8168_mdio_write(tp, 0x15, 0x02ca); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x02cb); + rtl8168_mdio_write(tp, 0x19, 0x0042); + rtl8168_mdio_write(tp, 0x15, 0x02cc); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x02cd); + rtl8168_mdio_write(tp, 0x19, 0x4cc8); + rtl8168_mdio_write(tp, 0x15, 0x02ce); + rtl8168_mdio_write(tp, 0x19, 0x32d0); + rtl8168_mdio_write(tp, 0x15, 0x02cf); + rtl8168_mdio_write(tp, 0x19, 0x4cc0); + rtl8168_mdio_write(tp, 0x15, 0x02d0); + rtl8168_mdio_write(tp, 0x19, 0xc4d4); + rtl8168_mdio_write(tp, 0x15, 0x02d1); + rtl8168_mdio_write(tp, 0x19, 0x00f9); + rtl8168_mdio_write(tp, 0x15, 0x02d2); + rtl8168_mdio_write(tp, 0x19, 0xa51a); + rtl8168_mdio_write(tp, 0x15, 0x02d3); + rtl8168_mdio_write(tp, 0x19, 0x32d9); + rtl8168_mdio_write(tp, 0x15, 0x02d4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02d5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02d6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02d7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02d8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02d9); + rtl8168_mdio_write(tp, 0x19, 0x48b3); + rtl8168_mdio_write(tp, 0x15, 0x02da); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x02db); + rtl8168_mdio_write(tp, 0x19, 0x4823); + rtl8168_mdio_write(tp, 0x15, 0x02dc); + rtl8168_mdio_write(tp, 0x19, 0x4410); + rtl8168_mdio_write(tp, 0x15, 0x02dd); + rtl8168_mdio_write(tp, 0x19, 0xb630); + rtl8168_mdio_write(tp, 0x15, 0x02de); + rtl8168_mdio_write(tp, 0x19, 0x7dc8); + rtl8168_mdio_write(tp, 0x15, 0x02df); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x02e0); + rtl8168_mdio_write(tp, 0x19, 0x4c48); + rtl8168_mdio_write(tp, 0x15, 0x02e1); + rtl8168_mdio_write(tp, 0x19, 0x32e3); + rtl8168_mdio_write(tp, 0x15, 0x02e2); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x02e3); + rtl8168_mdio_write(tp, 0x19, 0x9bfa); + rtl8168_mdio_write(tp, 0x15, 0x02e4); + rtl8168_mdio_write(tp, 0x19, 0x84ca); + rtl8168_mdio_write(tp, 0x15, 0x02e5); + rtl8168_mdio_write(tp, 0x19, 0x85f8); + rtl8168_mdio_write(tp, 0x15, 0x02e6); + rtl8168_mdio_write(tp, 0x19, 0x32ec); + rtl8168_mdio_write(tp, 0x15, 0x02e7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02e8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02e9); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02ea); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02eb); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02ec); + rtl8168_mdio_write(tp, 0x19, 0x48d4); + rtl8168_mdio_write(tp, 0x15, 0x02ed); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x02ee); + rtl8168_mdio_write(tp, 0x19, 0x4844); + rtl8168_mdio_write(tp, 0x15, 0x02ef); + rtl8168_mdio_write(tp, 0x19, 0x4420); + rtl8168_mdio_write(tp, 0x15, 0x02f0); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x02f1); + rtl8168_mdio_write(tp, 0x19, 0x7dc0); + rtl8168_mdio_write(tp, 0x15, 0x02f2); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x02f3); + rtl8168_mdio_write(tp, 0x19, 0x7c0b); + rtl8168_mdio_write(tp, 0x15, 0x02f4); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x02f5); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x02f6); + rtl8168_mdio_write(tp, 0x19, 0x9cfd); + rtl8168_mdio_write(tp, 0x15, 0x02f7); + rtl8168_mdio_write(tp, 0x19, 0xb616); + rtl8168_mdio_write(tp, 0x15, 0x02f8); + rtl8168_mdio_write(tp, 0x19, 0xc42b); + rtl8168_mdio_write(tp, 0x15, 0x02f9); + rtl8168_mdio_write(tp, 0x19, 0x00e0); + rtl8168_mdio_write(tp, 0x15, 0x02fa); + rtl8168_mdio_write(tp, 0x19, 0xc455); + rtl8168_mdio_write(tp, 0x15, 0x02fb); + rtl8168_mdio_write(tp, 0x19, 0x00b3); + rtl8168_mdio_write(tp, 0x15, 0x02fc); + rtl8168_mdio_write(tp, 0x19, 0xb20a); + rtl8168_mdio_write(tp, 0x15, 0x02fd); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x02fe); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x02ff); + rtl8168_mdio_write(tp, 0x19, 0x8204); + rtl8168_mdio_write(tp, 0x15, 0x0300); + rtl8168_mdio_write(tp, 0x19, 0x7c04); + rtl8168_mdio_write(tp, 0x15, 0x0301); + rtl8168_mdio_write(tp, 0x19, 0x7404); + rtl8168_mdio_write(tp, 0x15, 0x0302); + rtl8168_mdio_write(tp, 0x19, 0x32f3); + rtl8168_mdio_write(tp, 0x15, 0x0303); + rtl8168_mdio_write(tp, 0x19, 0x7c04); + rtl8168_mdio_write(tp, 0x15, 0x0304); + rtl8168_mdio_write(tp, 0x19, 0x7400); + rtl8168_mdio_write(tp, 0x15, 0x0305); + rtl8168_mdio_write(tp, 0x19, 0x32f3); + rtl8168_mdio_write(tp, 0x15, 0x0306); + rtl8168_mdio_write(tp, 0x19, 0xefed); + rtl8168_mdio_write(tp, 0x15, 0x0307); + rtl8168_mdio_write(tp, 0x19, 0x3342); + rtl8168_mdio_write(tp, 0x15, 0x0308); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0309); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030d); + rtl8168_mdio_write(tp, 0x19, 0x3006); + rtl8168_mdio_write(tp, 0x15, 0x030e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0310); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0311); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0312); + rtl8168_mdio_write(tp, 0x19, 0xa207); + rtl8168_mdio_write(tp, 0x15, 0x0313); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x0314); + rtl8168_mdio_write(tp, 0x19, 0x3322); + rtl8168_mdio_write(tp, 0x15, 0x0315); + rtl8168_mdio_write(tp, 0x19, 0x4041); + rtl8168_mdio_write(tp, 0x15, 0x0316); + rtl8168_mdio_write(tp, 0x19, 0x7d07); + rtl8168_mdio_write(tp, 0x15, 0x0317); + rtl8168_mdio_write(tp, 0x19, 0x4502); + rtl8168_mdio_write(tp, 0x15, 0x0318); + rtl8168_mdio_write(tp, 0x19, 0x3322); + rtl8168_mdio_write(tp, 0x15, 0x0319); + rtl8168_mdio_write(tp, 0x19, 0x4c08); + rtl8168_mdio_write(tp, 0x15, 0x031a); + rtl8168_mdio_write(tp, 0x19, 0x3322); + rtl8168_mdio_write(tp, 0x15, 0x031b); + rtl8168_mdio_write(tp, 0x19, 0x7d80); + rtl8168_mdio_write(tp, 0x15, 0x031c); + rtl8168_mdio_write(tp, 0x19, 0x5180); + rtl8168_mdio_write(tp, 0x15, 0x031d); + rtl8168_mdio_write(tp, 0x19, 0x3320); + rtl8168_mdio_write(tp, 0x15, 0x031e); + rtl8168_mdio_write(tp, 0x19, 0x7d80); + rtl8168_mdio_write(tp, 0x15, 0x031f); + rtl8168_mdio_write(tp, 0x19, 0x5000); + rtl8168_mdio_write(tp, 0x15, 0x0320); + rtl8168_mdio_write(tp, 0x19, 0x7d07); + rtl8168_mdio_write(tp, 0x15, 0x0321); + rtl8168_mdio_write(tp, 0x19, 0x4402); + rtl8168_mdio_write(tp, 0x15, 0x0322); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0323); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x0324); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0325); + rtl8168_mdio_write(tp, 0x19, 0xb30c); + rtl8168_mdio_write(tp, 0x15, 0x0326); + rtl8168_mdio_write(tp, 0x19, 0xb206); + rtl8168_mdio_write(tp, 0x15, 0x0327); + rtl8168_mdio_write(tp, 0x19, 0xb103); + rtl8168_mdio_write(tp, 0x15, 0x0328); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0329); + rtl8168_mdio_write(tp, 0x19, 0x32f6); + rtl8168_mdio_write(tp, 0x15, 0x032a); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x032b); + rtl8168_mdio_write(tp, 0x19, 0x3352); + rtl8168_mdio_write(tp, 0x15, 0x032c); + rtl8168_mdio_write(tp, 0x19, 0xb103); + rtl8168_mdio_write(tp, 0x15, 0x032d); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x032e); + rtl8168_mdio_write(tp, 0x19, 0x336a); + rtl8168_mdio_write(tp, 0x15, 0x032f); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0330); + rtl8168_mdio_write(tp, 0x19, 0x3382); + rtl8168_mdio_write(tp, 0x15, 0x0331); + rtl8168_mdio_write(tp, 0x19, 0xb206); + rtl8168_mdio_write(tp, 0x15, 0x0332); + rtl8168_mdio_write(tp, 0x19, 0xb103); + rtl8168_mdio_write(tp, 0x15, 0x0333); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0334); + rtl8168_mdio_write(tp, 0x19, 0x3395); + rtl8168_mdio_write(tp, 0x15, 0x0335); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0336); + rtl8168_mdio_write(tp, 0x19, 0x33c6); + rtl8168_mdio_write(tp, 0x15, 0x0337); + rtl8168_mdio_write(tp, 0x19, 0xb103); + rtl8168_mdio_write(tp, 0x15, 0x0338); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0339); + rtl8168_mdio_write(tp, 0x19, 0x33d7); + rtl8168_mdio_write(tp, 0x15, 0x033a); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x033b); + rtl8168_mdio_write(tp, 0x19, 0x33f2); + rtl8168_mdio_write(tp, 0x15, 0x033c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x033d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x033e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x033f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0340); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0341); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0342); + rtl8168_mdio_write(tp, 0x19, 0x49b5); + rtl8168_mdio_write(tp, 0x15, 0x0343); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x0344); + rtl8168_mdio_write(tp, 0x19, 0x4d00); + rtl8168_mdio_write(tp, 0x15, 0x0345); + rtl8168_mdio_write(tp, 0x19, 0x6880); + rtl8168_mdio_write(tp, 0x15, 0x0346); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0347); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x0348); + rtl8168_mdio_write(tp, 0x19, 0x4925); + rtl8168_mdio_write(tp, 0x15, 0x0349); + rtl8168_mdio_write(tp, 0x19, 0x403b); + rtl8168_mdio_write(tp, 0x15, 0x034a); + rtl8168_mdio_write(tp, 0x19, 0xa602); + rtl8168_mdio_write(tp, 0x15, 0x034b); + rtl8168_mdio_write(tp, 0x19, 0x402f); + rtl8168_mdio_write(tp, 0x15, 0x034c); + rtl8168_mdio_write(tp, 0x19, 0x4484); + rtl8168_mdio_write(tp, 0x15, 0x034d); + rtl8168_mdio_write(tp, 0x19, 0x40c8); + rtl8168_mdio_write(tp, 0x15, 0x034e); + rtl8168_mdio_write(tp, 0x19, 0x44c4); + rtl8168_mdio_write(tp, 0x15, 0x034f); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0350); + rtl8168_mdio_write(tp, 0x19, 0x00bd); + rtl8168_mdio_write(tp, 0x15, 0x0351); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x0352); + rtl8168_mdio_write(tp, 0x19, 0xc8ed); + rtl8168_mdio_write(tp, 0x15, 0x0353); + rtl8168_mdio_write(tp, 0x19, 0x00fc); + rtl8168_mdio_write(tp, 0x15, 0x0354); + rtl8168_mdio_write(tp, 0x19, 0x8221); + rtl8168_mdio_write(tp, 0x15, 0x0355); + rtl8168_mdio_write(tp, 0x19, 0xd11d); + rtl8168_mdio_write(tp, 0x15, 0x0356); + rtl8168_mdio_write(tp, 0x19, 0x001f); + rtl8168_mdio_write(tp, 0x15, 0x0357); + rtl8168_mdio_write(tp, 0x19, 0xde18); + rtl8168_mdio_write(tp, 0x15, 0x0358); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x0359); + rtl8168_mdio_write(tp, 0x19, 0x91f6); + rtl8168_mdio_write(tp, 0x15, 0x035a); + rtl8168_mdio_write(tp, 0x19, 0x3360); + rtl8168_mdio_write(tp, 0x15, 0x035b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x035c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x035d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x035e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x035f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0360); + rtl8168_mdio_write(tp, 0x19, 0x4bb6); + rtl8168_mdio_write(tp, 0x15, 0x0361); + rtl8168_mdio_write(tp, 0x19, 0x4064); + rtl8168_mdio_write(tp, 0x15, 0x0362); + rtl8168_mdio_write(tp, 0x19, 0x4b26); + rtl8168_mdio_write(tp, 0x15, 0x0363); + rtl8168_mdio_write(tp, 0x19, 0x4410); + rtl8168_mdio_write(tp, 0x15, 0x0364); + rtl8168_mdio_write(tp, 0x19, 0x4006); + rtl8168_mdio_write(tp, 0x15, 0x0365); + rtl8168_mdio_write(tp, 0x19, 0x4490); + rtl8168_mdio_write(tp, 0x15, 0x0366); + rtl8168_mdio_write(tp, 0x19, 0x6900); + rtl8168_mdio_write(tp, 0x15, 0x0367); + rtl8168_mdio_write(tp, 0x19, 0xb6a6); + rtl8168_mdio_write(tp, 0x15, 0x0368); + rtl8168_mdio_write(tp, 0x19, 0x9e02); + rtl8168_mdio_write(tp, 0x15, 0x0369); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x036a); + rtl8168_mdio_write(tp, 0x19, 0xd11d); + rtl8168_mdio_write(tp, 0x15, 0x036b); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x036c); + rtl8168_mdio_write(tp, 0x19, 0xbb0f); + rtl8168_mdio_write(tp, 0x15, 0x036d); + rtl8168_mdio_write(tp, 0x19, 0x8102); + rtl8168_mdio_write(tp, 0x15, 0x036e); + rtl8168_mdio_write(tp, 0x19, 0x3371); + rtl8168_mdio_write(tp, 0x15, 0x036f); + rtl8168_mdio_write(tp, 0x19, 0xa21e); + rtl8168_mdio_write(tp, 0x15, 0x0370); + rtl8168_mdio_write(tp, 0x19, 0x33b6); + rtl8168_mdio_write(tp, 0x15, 0x0371); + rtl8168_mdio_write(tp, 0x19, 0x91f6); + rtl8168_mdio_write(tp, 0x15, 0x0372); + rtl8168_mdio_write(tp, 0x19, 0xc218); + rtl8168_mdio_write(tp, 0x15, 0x0373); + rtl8168_mdio_write(tp, 0x19, 0x00f4); + rtl8168_mdio_write(tp, 0x15, 0x0374); + rtl8168_mdio_write(tp, 0x19, 0x33b6); + rtl8168_mdio_write(tp, 0x15, 0x0375); + rtl8168_mdio_write(tp, 0x19, 0x32ec); + rtl8168_mdio_write(tp, 0x15, 0x0376); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0377); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0378); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0379); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x037a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x037b); + rtl8168_mdio_write(tp, 0x19, 0x4b97); + rtl8168_mdio_write(tp, 0x15, 0x037c); + rtl8168_mdio_write(tp, 0x19, 0x402b); + rtl8168_mdio_write(tp, 0x15, 0x037d); + rtl8168_mdio_write(tp, 0x19, 0x4b07); + rtl8168_mdio_write(tp, 0x15, 0x037e); + rtl8168_mdio_write(tp, 0x19, 0x4422); + rtl8168_mdio_write(tp, 0x15, 0x037f); + rtl8168_mdio_write(tp, 0x19, 0x6980); + rtl8168_mdio_write(tp, 0x15, 0x0380); + rtl8168_mdio_write(tp, 0x19, 0xb608); + rtl8168_mdio_write(tp, 0x15, 0x0381); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x0382); + rtl8168_mdio_write(tp, 0x19, 0xbc05); + rtl8168_mdio_write(tp, 0x15, 0x0383); + rtl8168_mdio_write(tp, 0x19, 0xc21c); + rtl8168_mdio_write(tp, 0x15, 0x0384); + rtl8168_mdio_write(tp, 0x19, 0x0032); + rtl8168_mdio_write(tp, 0x15, 0x0385); + rtl8168_mdio_write(tp, 0x19, 0xa1fb); + rtl8168_mdio_write(tp, 0x15, 0x0386); + rtl8168_mdio_write(tp, 0x19, 0x338d); + rtl8168_mdio_write(tp, 0x15, 0x0387); + rtl8168_mdio_write(tp, 0x19, 0x32ae); + rtl8168_mdio_write(tp, 0x15, 0x0388); + rtl8168_mdio_write(tp, 0x19, 0x330d); + rtl8168_mdio_write(tp, 0x15, 0x0389); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x038a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x038b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x038c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x038d); + rtl8168_mdio_write(tp, 0x19, 0x4b97); + rtl8168_mdio_write(tp, 0x15, 0x038e); + rtl8168_mdio_write(tp, 0x19, 0x6a08); + rtl8168_mdio_write(tp, 0x15, 0x038f); + rtl8168_mdio_write(tp, 0x19, 0x4b07); + rtl8168_mdio_write(tp, 0x15, 0x0390); + rtl8168_mdio_write(tp, 0x19, 0x40ac); + rtl8168_mdio_write(tp, 0x15, 0x0391); + rtl8168_mdio_write(tp, 0x19, 0x4445); + rtl8168_mdio_write(tp, 0x15, 0x0392); + rtl8168_mdio_write(tp, 0x19, 0x404e); + rtl8168_mdio_write(tp, 0x15, 0x0393); + rtl8168_mdio_write(tp, 0x19, 0x4461); + rtl8168_mdio_write(tp, 0x15, 0x0394); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x0395); + rtl8168_mdio_write(tp, 0x19, 0x9c0a); + rtl8168_mdio_write(tp, 0x15, 0x0396); + rtl8168_mdio_write(tp, 0x19, 0x63da); + rtl8168_mdio_write(tp, 0x15, 0x0397); + rtl8168_mdio_write(tp, 0x19, 0x6f0c); + rtl8168_mdio_write(tp, 0x15, 0x0398); + rtl8168_mdio_write(tp, 0x19, 0x5440); + rtl8168_mdio_write(tp, 0x15, 0x0399); + rtl8168_mdio_write(tp, 0x19, 0x4b98); + rtl8168_mdio_write(tp, 0x15, 0x039a); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x039b); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x039c); + rtl8168_mdio_write(tp, 0x19, 0x4b08); + rtl8168_mdio_write(tp, 0x15, 0x039d); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x039e); + rtl8168_mdio_write(tp, 0x19, 0x33a5); + rtl8168_mdio_write(tp, 0x15, 0x039f); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x03a0); + rtl8168_mdio_write(tp, 0x19, 0x00e8); + rtl8168_mdio_write(tp, 0x15, 0x03a1); + rtl8168_mdio_write(tp, 0x19, 0x820e); + rtl8168_mdio_write(tp, 0x15, 0x03a2); + rtl8168_mdio_write(tp, 0x19, 0xa10d); + rtl8168_mdio_write(tp, 0x15, 0x03a3); + rtl8168_mdio_write(tp, 0x19, 0x9df1); + rtl8168_mdio_write(tp, 0x15, 0x03a4); + rtl8168_mdio_write(tp, 0x19, 0x33af); + rtl8168_mdio_write(tp, 0x15, 0x03a5); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x03a6); + rtl8168_mdio_write(tp, 0x19, 0x00f9); + rtl8168_mdio_write(tp, 0x15, 0x03a7); + rtl8168_mdio_write(tp, 0x19, 0xc017); + rtl8168_mdio_write(tp, 0x15, 0x03a8); + rtl8168_mdio_write(tp, 0x19, 0x0007); + rtl8168_mdio_write(tp, 0x15, 0x03a9); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x03aa); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x03ab); + rtl8168_mdio_write(tp, 0x19, 0xa104); + rtl8168_mdio_write(tp, 0x15, 0x03ac); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x03ad); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x03ae); + rtl8168_mdio_write(tp, 0x19, 0x9df7); + rtl8168_mdio_write(tp, 0x15, 0x03af); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x03b0); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x03b1); + rtl8168_mdio_write(tp, 0x19, 0x33b6); + rtl8168_mdio_write(tp, 0x15, 0x03b2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b6); + rtl8168_mdio_write(tp, 0x19, 0x55af); + rtl8168_mdio_write(tp, 0x15, 0x03b7); + rtl8168_mdio_write(tp, 0x19, 0x7ff0); + rtl8168_mdio_write(tp, 0x15, 0x03b8); + rtl8168_mdio_write(tp, 0x19, 0x6ff0); + rtl8168_mdio_write(tp, 0x15, 0x03b9); + rtl8168_mdio_write(tp, 0x19, 0x4bb9); + rtl8168_mdio_write(tp, 0x15, 0x03ba); + rtl8168_mdio_write(tp, 0x19, 0x6a80); + rtl8168_mdio_write(tp, 0x15, 0x03bb); + rtl8168_mdio_write(tp, 0x19, 0x4b29); + rtl8168_mdio_write(tp, 0x15, 0x03bc); + rtl8168_mdio_write(tp, 0x19, 0x4041); + rtl8168_mdio_write(tp, 0x15, 0x03bd); + rtl8168_mdio_write(tp, 0x19, 0x440a); + rtl8168_mdio_write(tp, 0x15, 0x03be); + rtl8168_mdio_write(tp, 0x19, 0x4029); + rtl8168_mdio_write(tp, 0x15, 0x03bf); + rtl8168_mdio_write(tp, 0x19, 0x4418); + rtl8168_mdio_write(tp, 0x15, 0x03c0); + rtl8168_mdio_write(tp, 0x19, 0x4090); + rtl8168_mdio_write(tp, 0x15, 0x03c1); + rtl8168_mdio_write(tp, 0x19, 0x4438); + rtl8168_mdio_write(tp, 0x15, 0x03c2); + rtl8168_mdio_write(tp, 0x19, 0x40c4); + rtl8168_mdio_write(tp, 0x15, 0x03c3); + rtl8168_mdio_write(tp, 0x19, 0x447b); + rtl8168_mdio_write(tp, 0x15, 0x03c4); + rtl8168_mdio_write(tp, 0x19, 0xb6c4); + rtl8168_mdio_write(tp, 0x15, 0x03c5); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x03c6); + rtl8168_mdio_write(tp, 0x19, 0x9bfe); + rtl8168_mdio_write(tp, 0x15, 0x03c7); + rtl8168_mdio_write(tp, 0x19, 0x33cc); + rtl8168_mdio_write(tp, 0x15, 0x03c8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03c9); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ca); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03cb); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03cc); + rtl8168_mdio_write(tp, 0x19, 0x542f); + rtl8168_mdio_write(tp, 0x15, 0x03cd); + rtl8168_mdio_write(tp, 0x19, 0x499a); + rtl8168_mdio_write(tp, 0x15, 0x03ce); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x03cf); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03d0); + rtl8168_mdio_write(tp, 0x19, 0x490a); + rtl8168_mdio_write(tp, 0x15, 0x03d1); + rtl8168_mdio_write(tp, 0x19, 0x405e); + rtl8168_mdio_write(tp, 0x15, 0x03d2); + rtl8168_mdio_write(tp, 0x19, 0x44f8); + rtl8168_mdio_write(tp, 0x15, 0x03d3); + rtl8168_mdio_write(tp, 0x19, 0x6b00); + rtl8168_mdio_write(tp, 0x15, 0x03d4); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x03d5); + rtl8168_mdio_write(tp, 0x19, 0x0028); + rtl8168_mdio_write(tp, 0x15, 0x03d6); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x03d7); + rtl8168_mdio_write(tp, 0x19, 0xbd27); + rtl8168_mdio_write(tp, 0x15, 0x03d8); + rtl8168_mdio_write(tp, 0x19, 0x9cfc); + rtl8168_mdio_write(tp, 0x15, 0x03d9); + rtl8168_mdio_write(tp, 0x19, 0xc639); + rtl8168_mdio_write(tp, 0x15, 0x03da); + rtl8168_mdio_write(tp, 0x19, 0x000f); + rtl8168_mdio_write(tp, 0x15, 0x03db); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x03dc); + rtl8168_mdio_write(tp, 0x19, 0x7c01); + rtl8168_mdio_write(tp, 0x15, 0x03dd); + rtl8168_mdio_write(tp, 0x19, 0x4c01); + rtl8168_mdio_write(tp, 0x15, 0x03de); + rtl8168_mdio_write(tp, 0x19, 0x9af6); + rtl8168_mdio_write(tp, 0x15, 0x03df); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03e0); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03e1); + rtl8168_mdio_write(tp, 0x19, 0x4470); + rtl8168_mdio_write(tp, 0x15, 0x03e2); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03e3); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03e4); + rtl8168_mdio_write(tp, 0x19, 0x33d4); + rtl8168_mdio_write(tp, 0x15, 0x03e5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03e6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03e7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03e8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03e9); + rtl8168_mdio_write(tp, 0x19, 0x49bb); + rtl8168_mdio_write(tp, 0x15, 0x03ea); + rtl8168_mdio_write(tp, 0x19, 0x4478); + rtl8168_mdio_write(tp, 0x15, 0x03eb); + rtl8168_mdio_write(tp, 0x19, 0x492b); + rtl8168_mdio_write(tp, 0x15, 0x03ec); + rtl8168_mdio_write(tp, 0x19, 0x6b80); + rtl8168_mdio_write(tp, 0x15, 0x03ed); + rtl8168_mdio_write(tp, 0x19, 0x7c01); + rtl8168_mdio_write(tp, 0x15, 0x03ee); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x03ef); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x03f0); + rtl8168_mdio_write(tp, 0x19, 0x000d); + rtl8168_mdio_write(tp, 0x15, 0x03f1); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x03f2); + rtl8168_mdio_write(tp, 0x19, 0xbd0c); + rtl8168_mdio_write(tp, 0x15, 0x03f3); + rtl8168_mdio_write(tp, 0x19, 0xc428); + rtl8168_mdio_write(tp, 0x15, 0x03f4); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x03f5); + rtl8168_mdio_write(tp, 0x19, 0x9afa); + rtl8168_mdio_write(tp, 0x15, 0x03f6); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03f7); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03f8); + rtl8168_mdio_write(tp, 0x19, 0x4470); + rtl8168_mdio_write(tp, 0x15, 0x03f9); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03fa); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03fb); + rtl8168_mdio_write(tp, 0x19, 0x33ef); + rtl8168_mdio_write(tp, 0x15, 0x03fc); + rtl8168_mdio_write(tp, 0x19, 0x3342); + rtl8168_mdio_write(tp, 0x15, 0x03fd); + rtl8168_mdio_write(tp, 0x19, 0x330d); + rtl8168_mdio_write(tp, 0x15, 0x03fe); + rtl8168_mdio_write(tp, 0x19, 0x32ae); + rtl8168_mdio_write(tp, 0x15, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x0112); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x1f02); + rtl8168_mdio_write(tp, 0x06, 0x012c); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x3c02); + rtl8168_mdio_write(tp, 0x06, 0x0156); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x6d02); + rtl8168_mdio_write(tp, 0x06, 0x809d); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xc702); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd105); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xcd02); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xca02); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd105); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xd002); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd481); + rtl8168_mdio_write(tp, 0x06, 0xc9e4); + rtl8168_mdio_write(tp, 0x06, 0x8b90); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x91d4); + rtl8168_mdio_write(tp, 0x06, 0x81b8); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x92e5); + rtl8168_mdio_write(tp, 0x06, 0x8b93); + rtl8168_mdio_write(tp, 0x06, 0xbf8b); + rtl8168_mdio_write(tp, 0x06, 0x88ec); + rtl8168_mdio_write(tp, 0x06, 0x0019); + rtl8168_mdio_write(tp, 0x06, 0xa98b); + rtl8168_mdio_write(tp, 0x06, 0x90f9); + rtl8168_mdio_write(tp, 0x06, 0xeeff); + rtl8168_mdio_write(tp, 0x06, 0xf600); + rtl8168_mdio_write(tp, 0x06, 0xeeff); + rtl8168_mdio_write(tp, 0x06, 0xf7fc); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xc102); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xc402); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x201a); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x824b); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x1902); + rtl8168_mdio_write(tp, 0x06, 0x2c9d); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x9602); + rtl8168_mdio_write(tp, 0x06, 0x0473); + rtl8168_mdio_write(tp, 0x06, 0x022e); + rtl8168_mdio_write(tp, 0x06, 0x3902); + rtl8168_mdio_write(tp, 0x06, 0x044d); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x210b); + rtl8168_mdio_write(tp, 0x06, 0xf621); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x0416); + rtl8168_mdio_write(tp, 0x06, 0x021b); + rtl8168_mdio_write(tp, 0x06, 0xa4e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x22e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2305); + rtl8168_mdio_write(tp, 0x06, 0xf623); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x24e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2505); + rtl8168_mdio_write(tp, 0x06, 0xf625); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x26e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xdae0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x27e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x5cfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad21); + rtl8168_mdio_write(tp, 0x06, 0x57e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xc059); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b3c); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e44); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x3cad); + rtl8168_mdio_write(tp, 0x06, 0x211d); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x84f7); + rtl8168_mdio_write(tp, 0x06, 0x29e5); + rtl8168_mdio_write(tp, 0x06, 0x8b84); + rtl8168_mdio_write(tp, 0x06, 0xac27); + rtl8168_mdio_write(tp, 0x06, 0x0dac); + rtl8168_mdio_write(tp, 0x06, 0x2605); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x7fae); + rtl8168_mdio_write(tp, 0x06, 0x2b02); + rtl8168_mdio_write(tp, 0x06, 0x2c23); + rtl8168_mdio_write(tp, 0x06, 0xae26); + rtl8168_mdio_write(tp, 0x06, 0x022c); + rtl8168_mdio_write(tp, 0x06, 0x41ae); + rtl8168_mdio_write(tp, 0x06, 0x21e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x18e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0x58fc); + rtl8168_mdio_write(tp, 0x06, 0xe4ff); + rtl8168_mdio_write(tp, 0x06, 0xf7d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x2eee); + rtl8168_mdio_write(tp, 0x06, 0x0232); + rtl8168_mdio_write(tp, 0x06, 0x0ad1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x82e8); + rtl8168_mdio_write(tp, 0x06, 0x0232); + rtl8168_mdio_write(tp, 0x06, 0x0a02); + rtl8168_mdio_write(tp, 0x06, 0x2bdf); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x04d0); + rtl8168_mdio_write(tp, 0x06, 0x0202); + rtl8168_mdio_write(tp, 0x06, 0x1e97); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2228); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xd302); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd10c); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xd602); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd104); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xd902); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xe802); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xe0ff); + rtl8168_mdio_write(tp, 0x06, 0xf768); + rtl8168_mdio_write(tp, 0x06, 0x03e4); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xd004); + rtl8168_mdio_write(tp, 0x06, 0x0228); + rtl8168_mdio_write(tp, 0x06, 0x7a04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0xe234); + rtl8168_mdio_write(tp, 0x06, 0xe1e2); + rtl8168_mdio_write(tp, 0x06, 0x35f6); + rtl8168_mdio_write(tp, 0x06, 0x2be4); + rtl8168_mdio_write(tp, 0x06, 0xe234); + rtl8168_mdio_write(tp, 0x06, 0xe5e2); + rtl8168_mdio_write(tp, 0x06, 0x35fc); + rtl8168_mdio_write(tp, 0x06, 0x05f8); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xf72b); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xfc05); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69ac); + rtl8168_mdio_write(tp, 0x06, 0x1b4c); + rtl8168_mdio_write(tp, 0x06, 0xbf2e); + rtl8168_mdio_write(tp, 0x06, 0x3002); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0xef01); + rtl8168_mdio_write(tp, 0x06, 0xe28a); + rtl8168_mdio_write(tp, 0x06, 0x76e4); + rtl8168_mdio_write(tp, 0x06, 0x8a76); + rtl8168_mdio_write(tp, 0x06, 0x1f12); + rtl8168_mdio_write(tp, 0x06, 0x9e3a); + rtl8168_mdio_write(tp, 0x06, 0xef12); + rtl8168_mdio_write(tp, 0x06, 0x5907); + rtl8168_mdio_write(tp, 0x06, 0x9f12); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf721); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40d0); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x287a); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x34fc); + rtl8168_mdio_write(tp, 0x06, 0xa000); + rtl8168_mdio_write(tp, 0x06, 0x1002); + rtl8168_mdio_write(tp, 0x06, 0x2dc3); + rtl8168_mdio_write(tp, 0x06, 0x022e); + rtl8168_mdio_write(tp, 0x06, 0x21e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf621); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40ae); + rtl8168_mdio_write(tp, 0x06, 0x0fbf); + rtl8168_mdio_write(tp, 0x06, 0x3fa5); + rtl8168_mdio_write(tp, 0x06, 0x0231); + rtl8168_mdio_write(tp, 0x06, 0x6cbf); + rtl8168_mdio_write(tp, 0x06, 0x3fa2); + rtl8168_mdio_write(tp, 0x06, 0x0231); + rtl8168_mdio_write(tp, 0x06, 0x6c02); + rtl8168_mdio_write(tp, 0x06, 0x2dc3); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0xe2f4); + rtl8168_mdio_write(tp, 0x06, 0xe1e2); + rtl8168_mdio_write(tp, 0x06, 0xf5e4); + rtl8168_mdio_write(tp, 0x06, 0x8a78); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0x79ee); + rtl8168_mdio_write(tp, 0x06, 0xe2f4); + rtl8168_mdio_write(tp, 0x06, 0xd8ee); + rtl8168_mdio_write(tp, 0x06, 0xe2f5); + rtl8168_mdio_write(tp, 0x06, 0x20fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2065); + rtl8168_mdio_write(tp, 0x06, 0xd200); + rtl8168_mdio_write(tp, 0x06, 0xbf2e); + rtl8168_mdio_write(tp, 0x06, 0xe802); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0x1e21); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xdf02); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0x0c11); + rtl8168_mdio_write(tp, 0x06, 0x1e21); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xe202); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0x0c12); + rtl8168_mdio_write(tp, 0x06, 0x1e21); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xe502); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0x0c13); + rtl8168_mdio_write(tp, 0x06, 0x1e21); + rtl8168_mdio_write(tp, 0x06, 0xbf1f); + rtl8168_mdio_write(tp, 0x06, 0x5302); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0x0c14); + rtl8168_mdio_write(tp, 0x06, 0x1e21); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xeb02); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0x0c16); + rtl8168_mdio_write(tp, 0x06, 0x1e21); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0xe01f); + rtl8168_mdio_write(tp, 0x06, 0x029e); + rtl8168_mdio_write(tp, 0x06, 0x22e6); + rtl8168_mdio_write(tp, 0x06, 0x83e0); + rtl8168_mdio_write(tp, 0x06, 0xad31); + rtl8168_mdio_write(tp, 0x06, 0x14ad); + rtl8168_mdio_write(tp, 0x06, 0x3011); + rtl8168_mdio_write(tp, 0x06, 0xef02); + rtl8168_mdio_write(tp, 0x06, 0x580c); + rtl8168_mdio_write(tp, 0x06, 0x9e07); + rtl8168_mdio_write(tp, 0x06, 0xad36); + rtl8168_mdio_write(tp, 0x06, 0x085a); + rtl8168_mdio_write(tp, 0x06, 0x309f); + rtl8168_mdio_write(tp, 0x06, 0x04d1); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0x02d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x82dc); + rtl8168_mdio_write(tp, 0x06, 0x0232); + rtl8168_mdio_write(tp, 0x06, 0x0aef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x0400); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0x77e1); + rtl8168_mdio_write(tp, 0x06, 0x4010); + rtl8168_mdio_write(tp, 0x06, 0xe150); + rtl8168_mdio_write(tp, 0x06, 0x32e1); + rtl8168_mdio_write(tp, 0x06, 0x5030); + rtl8168_mdio_write(tp, 0x06, 0xe144); + rtl8168_mdio_write(tp, 0x06, 0x74e1); + rtl8168_mdio_write(tp, 0x06, 0x44bb); + rtl8168_mdio_write(tp, 0x06, 0xe2d2); + rtl8168_mdio_write(tp, 0x06, 0x40e0); + rtl8168_mdio_write(tp, 0x06, 0x2cfc); + rtl8168_mdio_write(tp, 0x06, 0xe2cc); + rtl8168_mdio_write(tp, 0x06, 0xcce2); + rtl8168_mdio_write(tp, 0x06, 0x00cc); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0x99e0); + rtl8168_mdio_write(tp, 0x06, 0x3688); + rtl8168_mdio_write(tp, 0x06, 0xe036); + rtl8168_mdio_write(tp, 0x06, 0x99e1); + rtl8168_mdio_write(tp, 0x06, 0x40dd); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x05, 0xe142); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x05, 0xe140); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + gphy_val |= BIT_2; + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); +} + +static void +rtl8168_set_phy_mcu_8168evl_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + unsigned int gphy_val,i; + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x1800); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val &= ~(BIT_12); + rtl8168_mdio_write(tp, 0x15, gphy_val); + rtl8168_mdio_write(tp, 0x00, 0x4800); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x002f); + for (i = 0; i < 1000; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1c); + if ((gphy_val & 0x0080) == 0x0080) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x1800); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x17); + if (!(gphy_val & 0x0001)) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x00AF); + rtl8168_mdio_write(tp, 0x19, 0x4060); + rtl8168_mdio_write(tp, 0x15, 0x00B0); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x00B1); + rtl8168_mdio_write(tp, 0x19, 0x7e00); + rtl8168_mdio_write(tp, 0x15, 0x00B2); + rtl8168_mdio_write(tp, 0x19, 0x72B0); + rtl8168_mdio_write(tp, 0x15, 0x00B3); + rtl8168_mdio_write(tp, 0x19, 0x7F00); + rtl8168_mdio_write(tp, 0x15, 0x00B4); + rtl8168_mdio_write(tp, 0x19, 0x73B0); + rtl8168_mdio_write(tp, 0x15, 0x0101); + rtl8168_mdio_write(tp, 0x19, 0x0005); + rtl8168_mdio_write(tp, 0x15, 0x0103); + rtl8168_mdio_write(tp, 0x19, 0x0003); + rtl8168_mdio_write(tp, 0x15, 0x0105); + rtl8168_mdio_write(tp, 0x19, 0x30FD); + rtl8168_mdio_write(tp, 0x15, 0x0106); + rtl8168_mdio_write(tp, 0x19, 0x9DF7); + rtl8168_mdio_write(tp, 0x15, 0x0107); + rtl8168_mdio_write(tp, 0x19, 0x30C6); + rtl8168_mdio_write(tp, 0x15, 0x0098); + rtl8168_mdio_write(tp, 0x19, 0x7c0b); + rtl8168_mdio_write(tp, 0x15, 0x0099); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00eb); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00f8); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00fe); + rtl8168_mdio_write(tp, 0x19, 0x6f0f); + rtl8168_mdio_write(tp, 0x15, 0x00db); + rtl8168_mdio_write(tp, 0x19, 0x6f09); + rtl8168_mdio_write(tp, 0x15, 0x00dc); + rtl8168_mdio_write(tp, 0x19, 0xaefd); + rtl8168_mdio_write(tp, 0x15, 0x00dd); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00de); + rtl8168_mdio_write(tp, 0x19, 0xc60b); + rtl8168_mdio_write(tp, 0x15, 0x00df); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00e0); + rtl8168_mdio_write(tp, 0x19, 0x30e1); + rtl8168_mdio_write(tp, 0x15, 0x020c); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x020e); + rtl8168_mdio_write(tp, 0x19, 0x9813); + rtl8168_mdio_write(tp, 0x15, 0x020f); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0210); + rtl8168_mdio_write(tp, 0x19, 0x930f); + rtl8168_mdio_write(tp, 0x15, 0x0211); + rtl8168_mdio_write(tp, 0x19, 0x9206); + rtl8168_mdio_write(tp, 0x15, 0x0212); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0213); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0214); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0215); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0216); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0217); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0218); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0219); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x021a); + rtl8168_mdio_write(tp, 0x19, 0x5540); + rtl8168_mdio_write(tp, 0x15, 0x021b); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x021c); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x021d); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x021e); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x021f); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0220); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0221); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x0222); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0223); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x0224); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0225); + rtl8168_mdio_write(tp, 0x19, 0x3231); + rtl8168_mdio_write(tp, 0x15, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2160); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0040); + rtl8168_mdio_write(tp, 0x18, 0x0004); + if (pdev->subsystem_vendor == 0x144d && + pdev->subsystem_device == 0xc0a6) { + rtl8168_mdio_write(tp, 0x18, 0x0724); + rtl8168_mdio_write(tp, 0x19, 0xfe00); + rtl8168_mdio_write(tp, 0x18, 0x0734); + rtl8168_mdio_write(tp, 0x19, 0xfd00); + rtl8168_mdio_write(tp, 0x18, 0x1824); + rtl8168_mdio_write(tp, 0x19, 0xfc00); + rtl8168_mdio_write(tp, 0x18, 0x1834); + rtl8168_mdio_write(tp, 0x19, 0xfd00); + } + rtl8168_mdio_write(tp, 0x18, 0x09d4); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x09e4); + rtl8168_mdio_write(tp, 0x19, 0x0800); + rtl8168_mdio_write(tp, 0x18, 0x09f4); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x0a04); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x0a14); + rtl8168_mdio_write(tp, 0x19, 0x0c00); + rtl8168_mdio_write(tp, 0x18, 0x0a24); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x0a74); + rtl8168_mdio_write(tp, 0x19, 0xf600); + rtl8168_mdio_write(tp, 0x18, 0x1a24); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x18, 0x1a64); + rtl8168_mdio_write(tp, 0x19, 0x0500); + rtl8168_mdio_write(tp, 0x18, 0x1a74); + rtl8168_mdio_write(tp, 0x19, 0x9500); + rtl8168_mdio_write(tp, 0x18, 0x1a84); + rtl8168_mdio_write(tp, 0x19, 0x8000); + rtl8168_mdio_write(tp, 0x18, 0x1a94); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x18, 0x1aa4); + rtl8168_mdio_write(tp, 0x19, 0x9600); + rtl8168_mdio_write(tp, 0x18, 0x1ac4); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x1ad4); + rtl8168_mdio_write(tp, 0x19, 0x0800); + rtl8168_mdio_write(tp, 0x18, 0x1af4); + rtl8168_mdio_write(tp, 0x19, 0xc400); + rtl8168_mdio_write(tp, 0x18, 0x1b04); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x1b14); + rtl8168_mdio_write(tp, 0x19, 0x0800); + rtl8168_mdio_write(tp, 0x18, 0x1b24); + rtl8168_mdio_write(tp, 0x19, 0xfd00); + rtl8168_mdio_write(tp, 0x18, 0x1b34); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x1b44); + rtl8168_mdio_write(tp, 0x19, 0x0400); + rtl8168_mdio_write(tp, 0x18, 0x1b94); + rtl8168_mdio_write(tp, 0x19, 0xf100); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2100); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0040); + rtl8168_mdio_write(tp, 0x18, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x0115); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x2202); + rtl8168_mdio_write(tp, 0x06, 0x80a0); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x3f02); + rtl8168_mdio_write(tp, 0x06, 0x0159); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0xbd02); + rtl8168_mdio_write(tp, 0x06, 0x80da); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xd481); + rtl8168_mdio_write(tp, 0x06, 0xd2e4); + rtl8168_mdio_write(tp, 0x06, 0x8b92); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x93d1); + rtl8168_mdio_write(tp, 0x06, 0x03bf); + rtl8168_mdio_write(tp, 0x06, 0x859e); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23d1); + rtl8168_mdio_write(tp, 0x06, 0x02bf); + rtl8168_mdio_write(tp, 0x06, 0x85a1); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23ee); + rtl8168_mdio_write(tp, 0x06, 0x8608); + rtl8168_mdio_write(tp, 0x06, 0x03ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x60ee); + rtl8168_mdio_write(tp, 0x06, 0x8610); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8611); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x07ee); + rtl8168_mdio_write(tp, 0x06, 0x8abf); + rtl8168_mdio_write(tp, 0x06, 0x73ee); + rtl8168_mdio_write(tp, 0x06, 0x8a95); + rtl8168_mdio_write(tp, 0x06, 0x02bf); + rtl8168_mdio_write(tp, 0x06, 0x8b88); + rtl8168_mdio_write(tp, 0x06, 0xec00); + rtl8168_mdio_write(tp, 0x06, 0x19a9); + rtl8168_mdio_write(tp, 0x06, 0x8b90); + rtl8168_mdio_write(tp, 0x06, 0xf9ee); + rtl8168_mdio_write(tp, 0x06, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xfed1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8595); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23d1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8598); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x2304); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8a); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x14ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8a); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x1f9a); + rtl8168_mdio_write(tp, 0x06, 0xe0e4); + rtl8168_mdio_write(tp, 0x06, 0x26e1); + rtl8168_mdio_write(tp, 0x06, 0xe427); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x2623); + rtl8168_mdio_write(tp, 0x06, 0xe5e4); + rtl8168_mdio_write(tp, 0x06, 0x27fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8dad); + rtl8168_mdio_write(tp, 0x06, 0x2014); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8d00); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0x5a78); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x0902); + rtl8168_mdio_write(tp, 0x06, 0x05db); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x7b02); + rtl8168_mdio_write(tp, 0x06, 0x3231); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x1df6); + rtl8168_mdio_write(tp, 0x06, 0x20e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x5c02); + rtl8168_mdio_write(tp, 0x06, 0x2bcb); + rtl8168_mdio_write(tp, 0x06, 0x022d); + rtl8168_mdio_write(tp, 0x06, 0x2902); + rtl8168_mdio_write(tp, 0x06, 0x03b4); + rtl8168_mdio_write(tp, 0x06, 0x0285); + rtl8168_mdio_write(tp, 0x06, 0x6402); + rtl8168_mdio_write(tp, 0x06, 0x2eca); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0xcd02); + rtl8168_mdio_write(tp, 0x06, 0x046f); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x210b); + rtl8168_mdio_write(tp, 0x06, 0xf621); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x8520); + rtl8168_mdio_write(tp, 0x06, 0x021b); + rtl8168_mdio_write(tp, 0x06, 0xe8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x22e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2308); + rtl8168_mdio_write(tp, 0x06, 0xf623); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x311c); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2405); + rtl8168_mdio_write(tp, 0x06, 0xf624); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x25e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2608); + rtl8168_mdio_write(tp, 0x06, 0xf626); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x2df5); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2705); + rtl8168_mdio_write(tp, 0x06, 0xf627); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x037a); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x65d2); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x2fe9); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf61e); + rtl8168_mdio_write(tp, 0x06, 0x21bf); + rtl8168_mdio_write(tp, 0x06, 0x2ff5); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf60c); + rtl8168_mdio_write(tp, 0x06, 0x111e); + rtl8168_mdio_write(tp, 0x06, 0x21bf); + rtl8168_mdio_write(tp, 0x06, 0x2ff8); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf60c); + rtl8168_mdio_write(tp, 0x06, 0x121e); + rtl8168_mdio_write(tp, 0x06, 0x21bf); + rtl8168_mdio_write(tp, 0x06, 0x2ffb); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf60c); + rtl8168_mdio_write(tp, 0x06, 0x131e); + rtl8168_mdio_write(tp, 0x06, 0x21bf); + rtl8168_mdio_write(tp, 0x06, 0x1f97); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf60c); + rtl8168_mdio_write(tp, 0x06, 0x141e); + rtl8168_mdio_write(tp, 0x06, 0x21bf); + rtl8168_mdio_write(tp, 0x06, 0x859b); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf60c); + rtl8168_mdio_write(tp, 0x06, 0x161e); + rtl8168_mdio_write(tp, 0x06, 0x21e0); + rtl8168_mdio_write(tp, 0x06, 0x8a8c); + rtl8168_mdio_write(tp, 0x06, 0x1f02); + rtl8168_mdio_write(tp, 0x06, 0x9e22); + rtl8168_mdio_write(tp, 0x06, 0xe68a); + rtl8168_mdio_write(tp, 0x06, 0x8cad); + rtl8168_mdio_write(tp, 0x06, 0x3114); + rtl8168_mdio_write(tp, 0x06, 0xad30); + rtl8168_mdio_write(tp, 0x06, 0x11ef); + rtl8168_mdio_write(tp, 0x06, 0x0258); + rtl8168_mdio_write(tp, 0x06, 0x0c9e); + rtl8168_mdio_write(tp, 0x06, 0x07ad); + rtl8168_mdio_write(tp, 0x06, 0x3608); + rtl8168_mdio_write(tp, 0x06, 0x5a30); + rtl8168_mdio_write(tp, 0x06, 0x9f04); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xae02); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf2f); + rtl8168_mdio_write(tp, 0x06, 0xf202); + rtl8168_mdio_write(tp, 0x06, 0x3723); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xface); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69fa); + rtl8168_mdio_write(tp, 0x06, 0xd401); + rtl8168_mdio_write(tp, 0x06, 0x55b4); + rtl8168_mdio_write(tp, 0x06, 0xfebf); + rtl8168_mdio_write(tp, 0x06, 0x85a7); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf6ac); + rtl8168_mdio_write(tp, 0x06, 0x280b); + rtl8168_mdio_write(tp, 0x06, 0xbf85); + rtl8168_mdio_write(tp, 0x06, 0xa402); + rtl8168_mdio_write(tp, 0x06, 0x36f6); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x49ae); + rtl8168_mdio_write(tp, 0x06, 0x64bf); + rtl8168_mdio_write(tp, 0x06, 0x85a4); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf6ac); + rtl8168_mdio_write(tp, 0x06, 0x285b); + rtl8168_mdio_write(tp, 0x06, 0xd000); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x60ac); + rtl8168_mdio_write(tp, 0x06, 0x2105); + rtl8168_mdio_write(tp, 0x06, 0xac22); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x4ebf); + rtl8168_mdio_write(tp, 0x06, 0xe0c4); + rtl8168_mdio_write(tp, 0x06, 0xbe86); + rtl8168_mdio_write(tp, 0x06, 0x14d2); + rtl8168_mdio_write(tp, 0x06, 0x04d8); + rtl8168_mdio_write(tp, 0x06, 0x19d9); + rtl8168_mdio_write(tp, 0x06, 0x1907); + rtl8168_mdio_write(tp, 0x06, 0xdc19); + rtl8168_mdio_write(tp, 0x06, 0xdd19); + rtl8168_mdio_write(tp, 0x06, 0x0789); + rtl8168_mdio_write(tp, 0x06, 0x89ef); + rtl8168_mdio_write(tp, 0x06, 0x645e); + rtl8168_mdio_write(tp, 0x06, 0x07ff); + rtl8168_mdio_write(tp, 0x06, 0x0d65); + rtl8168_mdio_write(tp, 0x06, 0x5cf8); + rtl8168_mdio_write(tp, 0x06, 0x001e); + rtl8168_mdio_write(tp, 0x06, 0x46dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0x19b2); + rtl8168_mdio_write(tp, 0x06, 0xe2d4); + rtl8168_mdio_write(tp, 0x06, 0x0001); + rtl8168_mdio_write(tp, 0x06, 0xbf85); + rtl8168_mdio_write(tp, 0x06, 0xa402); + rtl8168_mdio_write(tp, 0x06, 0x3723); + rtl8168_mdio_write(tp, 0x06, 0xae1d); + rtl8168_mdio_write(tp, 0x06, 0xbee0); + rtl8168_mdio_write(tp, 0x06, 0xc4bf); + rtl8168_mdio_write(tp, 0x06, 0x8614); + rtl8168_mdio_write(tp, 0x06, 0xd204); + rtl8168_mdio_write(tp, 0x06, 0xd819); + rtl8168_mdio_write(tp, 0x06, 0xd919); + rtl8168_mdio_write(tp, 0x06, 0x07dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0x1907); + rtl8168_mdio_write(tp, 0x06, 0xb2f4); + rtl8168_mdio_write(tp, 0x06, 0xd400); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x85a4); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23fe); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfec6); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc05); + rtl8168_mdio_write(tp, 0x06, 0xf9e2); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0xeb5a); + rtl8168_mdio_write(tp, 0x06, 0x070c); + rtl8168_mdio_write(tp, 0x06, 0x031e); + rtl8168_mdio_write(tp, 0x06, 0x20e6); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe7e0); + rtl8168_mdio_write(tp, 0x06, 0xebe0); + rtl8168_mdio_write(tp, 0x06, 0xe0fc); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xfdfd); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac26); + rtl8168_mdio_write(tp, 0x06, 0x1ae0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x14e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xac20); + rtl8168_mdio_write(tp, 0x06, 0x0ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xac23); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xac24); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0x1ab5); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x1c04); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x1d04); + rtl8168_mdio_write(tp, 0x06, 0xe2e0); + rtl8168_mdio_write(tp, 0x06, 0x7ce3); + rtl8168_mdio_write(tp, 0x06, 0xe07d); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x38e1); + rtl8168_mdio_write(tp, 0x06, 0xe039); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x1bad); + rtl8168_mdio_write(tp, 0x06, 0x390d); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf21); + rtl8168_mdio_write(tp, 0x06, 0xd502); + rtl8168_mdio_write(tp, 0x06, 0x3723); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xd8ae); + rtl8168_mdio_write(tp, 0x06, 0x0bac); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0x1802); + rtl8168_mdio_write(tp, 0x06, 0x8360); + rtl8168_mdio_write(tp, 0x06, 0x021a); + rtl8168_mdio_write(tp, 0x06, 0xc6fd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2605); + rtl8168_mdio_write(tp, 0x06, 0x0222); + rtl8168_mdio_write(tp, 0x06, 0xa4f7); + rtl8168_mdio_write(tp, 0x06, 0x28e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xad21); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x23a9); + rtl8168_mdio_write(tp, 0x06, 0xf729); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2005); + rtl8168_mdio_write(tp, 0x06, 0x0214); + rtl8168_mdio_write(tp, 0x06, 0xabf7); + rtl8168_mdio_write(tp, 0x06, 0x2ae0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad23); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x12e7); + rtl8168_mdio_write(tp, 0x06, 0xf72b); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2405); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0xbcf7); + rtl8168_mdio_write(tp, 0x06, 0x2ce5); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x21e5); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2109); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xf4ac); + rtl8168_mdio_write(tp, 0x06, 0x2003); + rtl8168_mdio_write(tp, 0x06, 0x0223); + rtl8168_mdio_write(tp, 0x06, 0x98e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x09e0); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x13fb); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2309); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xf4ac); + rtl8168_mdio_write(tp, 0x06, 0x2203); + rtl8168_mdio_write(tp, 0x06, 0x0212); + rtl8168_mdio_write(tp, 0x06, 0xfae0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x09e0); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xac23); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x83c1); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2608); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0xd2ad); + rtl8168_mdio_write(tp, 0x06, 0x2502); + rtl8168_mdio_write(tp, 0x06, 0xf628); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x210a); + rtl8168_mdio_write(tp, 0x06, 0xe084); + rtl8168_mdio_write(tp, 0x06, 0x0af6); + rtl8168_mdio_write(tp, 0x06, 0x27a0); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0xf629); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2008); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xe8ad); + rtl8168_mdio_write(tp, 0x06, 0x2102); + rtl8168_mdio_write(tp, 0x06, 0xf62a); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2308); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x20a0); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0xf62b); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2408); + rtl8168_mdio_write(tp, 0x06, 0xe086); + rtl8168_mdio_write(tp, 0x06, 0x02a0); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0xf62c); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xf4a1); + rtl8168_mdio_write(tp, 0x06, 0x0008); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf21); + rtl8168_mdio_write(tp, 0x06, 0xd502); + rtl8168_mdio_write(tp, 0x06, 0x3723); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0200); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x241e); + rtl8168_mdio_write(tp, 0x06, 0xe086); + rtl8168_mdio_write(tp, 0x06, 0x02a0); + rtl8168_mdio_write(tp, 0x06, 0x0005); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0xe8ae); + rtl8168_mdio_write(tp, 0x06, 0xf5a0); + rtl8168_mdio_write(tp, 0x06, 0x0105); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0xf8ae); + rtl8168_mdio_write(tp, 0x06, 0x0ba0); + rtl8168_mdio_write(tp, 0x06, 0x0205); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0x14ae); + rtl8168_mdio_write(tp, 0x06, 0x03a0); + rtl8168_mdio_write(tp, 0x06, 0x0300); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0x2bee); + rtl8168_mdio_write(tp, 0x06, 0x8602); + rtl8168_mdio_write(tp, 0x06, 0x01ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8ee); + rtl8168_mdio_write(tp, 0x06, 0x8609); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x8461); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xae10); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8608); + rtl8168_mdio_write(tp, 0x06, 0xe186); + rtl8168_mdio_write(tp, 0x06, 0x091f); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x0611); + rtl8168_mdio_write(tp, 0x06, 0xe586); + rtl8168_mdio_write(tp, 0x06, 0x09ae); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x8602); + rtl8168_mdio_write(tp, 0x06, 0x01fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfbbf); + rtl8168_mdio_write(tp, 0x06, 0x8604); + rtl8168_mdio_write(tp, 0x06, 0xef79); + rtl8168_mdio_write(tp, 0x06, 0xd200); + rtl8168_mdio_write(tp, 0x06, 0xd400); + rtl8168_mdio_write(tp, 0x06, 0x221e); + rtl8168_mdio_write(tp, 0x06, 0x02bf); + rtl8168_mdio_write(tp, 0x06, 0x2fec); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23bf); + rtl8168_mdio_write(tp, 0x06, 0x13f2); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf60d); + rtl8168_mdio_write(tp, 0x06, 0x4559); + rtl8168_mdio_write(tp, 0x06, 0x1fef); + rtl8168_mdio_write(tp, 0x06, 0x97dd); + rtl8168_mdio_write(tp, 0x06, 0xd308); + rtl8168_mdio_write(tp, 0x06, 0x1a93); + rtl8168_mdio_write(tp, 0x06, 0xdd12); + rtl8168_mdio_write(tp, 0x06, 0x17a2); + rtl8168_mdio_write(tp, 0x06, 0x04de); + rtl8168_mdio_write(tp, 0x06, 0xffef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfbee); + rtl8168_mdio_write(tp, 0x06, 0x8602); + rtl8168_mdio_write(tp, 0x06, 0x03d5); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x04ef); + rtl8168_mdio_write(tp, 0x06, 0x79ef); + rtl8168_mdio_write(tp, 0x06, 0x45bf); + rtl8168_mdio_write(tp, 0x06, 0x2fec); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23bf); + rtl8168_mdio_write(tp, 0x06, 0x13f2); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf6ad); + rtl8168_mdio_write(tp, 0x06, 0x2702); + rtl8168_mdio_write(tp, 0x06, 0x78ff); + rtl8168_mdio_write(tp, 0x06, 0xe186); + rtl8168_mdio_write(tp, 0x06, 0x0a1b); + rtl8168_mdio_write(tp, 0x06, 0x01aa); + rtl8168_mdio_write(tp, 0x06, 0x2eef); + rtl8168_mdio_write(tp, 0x06, 0x97d9); + rtl8168_mdio_write(tp, 0x06, 0x7900); + rtl8168_mdio_write(tp, 0x06, 0x9e2b); + rtl8168_mdio_write(tp, 0x06, 0x81dd); + rtl8168_mdio_write(tp, 0x06, 0xbf85); + rtl8168_mdio_write(tp, 0x06, 0xad02); + rtl8168_mdio_write(tp, 0x06, 0x3723); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xef02); + rtl8168_mdio_write(tp, 0x06, 0x100c); + rtl8168_mdio_write(tp, 0x06, 0x11b0); + rtl8168_mdio_write(tp, 0x06, 0xfc0d); + rtl8168_mdio_write(tp, 0x06, 0x11bf); + rtl8168_mdio_write(tp, 0x06, 0x85aa); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x85aa); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23ee); + rtl8168_mdio_write(tp, 0x06, 0x8602); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x0413); + rtl8168_mdio_write(tp, 0x06, 0xa38b); + rtl8168_mdio_write(tp, 0x06, 0xb4d3); + rtl8168_mdio_write(tp, 0x06, 0x8012); + rtl8168_mdio_write(tp, 0x06, 0x17a2); + rtl8168_mdio_write(tp, 0x06, 0x04ad); + rtl8168_mdio_write(tp, 0x06, 0xffef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x48e0); + rtl8168_mdio_write(tp, 0x06, 0x8a96); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0x977c); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x9e35); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x9600); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x9700); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xbee1); + rtl8168_mdio_write(tp, 0x06, 0x8abf); + rtl8168_mdio_write(tp, 0x06, 0xe286); + rtl8168_mdio_write(tp, 0x06, 0x10e3); + rtl8168_mdio_write(tp, 0x06, 0x8611); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0x1aad); + rtl8168_mdio_write(tp, 0x06, 0x2012); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x9603); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x97b7); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x1000); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x1100); + rtl8168_mdio_write(tp, 0x06, 0xae11); + rtl8168_mdio_write(tp, 0x06, 0x15e6); + rtl8168_mdio_write(tp, 0x06, 0x8610); + rtl8168_mdio_write(tp, 0x06, 0xe786); + rtl8168_mdio_write(tp, 0x06, 0x11ae); + rtl8168_mdio_write(tp, 0x06, 0x08ee); + rtl8168_mdio_write(tp, 0x06, 0x8610); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8611); + rtl8168_mdio_write(tp, 0x06, 0x00fd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe001); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x32e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf720); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40bf); + rtl8168_mdio_write(tp, 0x06, 0x31f5); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf6ad); + rtl8168_mdio_write(tp, 0x06, 0x2821); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x20e1); + rtl8168_mdio_write(tp, 0x06, 0xe021); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x18e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40ee); + rtl8168_mdio_write(tp, 0x06, 0x8b3b); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0x8a8a); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0x8be4); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0x01ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x80ad); + rtl8168_mdio_write(tp, 0x06, 0x2722); + rtl8168_mdio_write(tp, 0x06, 0xbf44); + rtl8168_mdio_write(tp, 0x06, 0xfc02); + rtl8168_mdio_write(tp, 0x06, 0x36f6); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x441f); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x15e5); + rtl8168_mdio_write(tp, 0x06, 0x8b44); + rtl8168_mdio_write(tp, 0x06, 0xad29); + rtl8168_mdio_write(tp, 0x06, 0x07ac); + rtl8168_mdio_write(tp, 0x06, 0x2804); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xae02); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf85); + rtl8168_mdio_write(tp, 0x06, 0xb002); + rtl8168_mdio_write(tp, 0x06, 0x3723); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x0400); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0x77e1); + rtl8168_mdio_write(tp, 0x06, 0x40dd); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0x32e1); + rtl8168_mdio_write(tp, 0x06, 0x5074); + rtl8168_mdio_write(tp, 0x06, 0xe144); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0xdaff); + rtl8168_mdio_write(tp, 0x06, 0xe0c0); + rtl8168_mdio_write(tp, 0x06, 0x52e0); + rtl8168_mdio_write(tp, 0x06, 0xeed9); + rtl8168_mdio_write(tp, 0x06, 0xe04c); + rtl8168_mdio_write(tp, 0x06, 0xbbe0); + rtl8168_mdio_write(tp, 0x06, 0x2a00); + rtl8168_mdio_write(tp, 0x05, 0xe142); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x05, 0xe140); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0042); + rtl8168_mdio_write(tp, 0x18, 0x2300); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + if (tp->RequiredSecLanDonglePatch) { + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~BIT_2; + rtl8168_mdio_write(tp, 0x17, gphy_val); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); +} + +static void +rtl8168_set_phy_mcu_8168f_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val,i; + + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x1800); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val &= ~(BIT_12); + rtl8168_mdio_write(tp,0x15, gphy_val); + rtl8168_mdio_write(tp,0x00, 0x4800); + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x002f); + for (i = 0; i < 1000; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1c); + if (gphy_val & 0x0080) + break; + } + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x1800); + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x0023); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x18); + if (!(gphy_val & 0x0001)) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x0194); + rtl8168_mdio_write(tp, 0x19, 0x407D); + rtl8168_mdio_write(tp, 0x15, 0x0098); + rtl8168_mdio_write(tp, 0x19, 0x7c0b); + rtl8168_mdio_write(tp, 0x15, 0x0099); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00eb); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00f8); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00fe); + rtl8168_mdio_write(tp, 0x19, 0x6f0f); + rtl8168_mdio_write(tp, 0x15, 0x00db); + rtl8168_mdio_write(tp, 0x19, 0x6f09); + rtl8168_mdio_write(tp, 0x15, 0x00dc); + rtl8168_mdio_write(tp, 0x19, 0xaefd); + rtl8168_mdio_write(tp, 0x15, 0x00dd); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00de); + rtl8168_mdio_write(tp, 0x19, 0xc60b); + rtl8168_mdio_write(tp, 0x15, 0x00df); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00e0); + rtl8168_mdio_write(tp, 0x19, 0x30e1); + rtl8168_mdio_write(tp, 0x15, 0x020c); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x020e); + rtl8168_mdio_write(tp, 0x19, 0x9813); + rtl8168_mdio_write(tp, 0x15, 0x020f); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0210); + rtl8168_mdio_write(tp, 0x19, 0x930f); + rtl8168_mdio_write(tp, 0x15, 0x0211); + rtl8168_mdio_write(tp, 0x19, 0x9206); + rtl8168_mdio_write(tp, 0x15, 0x0212); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0213); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0214); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0215); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0216); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0217); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0218); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0219); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x021a); + rtl8168_mdio_write(tp, 0x19, 0x5540); + rtl8168_mdio_write(tp, 0x15, 0x021b); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x021c); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x021d); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x021e); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x021f); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0220); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0221); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x0222); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0223); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x0224); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0225); + rtl8168_mdio_write(tp, 0x19, 0x3231); + rtl8168_mdio_write(tp, 0x15, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x0118); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x2502); + rtl8168_mdio_write(tp, 0x06, 0x8090); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x4202); + rtl8168_mdio_write(tp, 0x06, 0x015c); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0xad02); + rtl8168_mdio_write(tp, 0x06, 0x80ca); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xd484); + rtl8168_mdio_write(tp, 0x06, 0x3ce4); + rtl8168_mdio_write(tp, 0x06, 0x8b92); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x93ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac8); + rtl8168_mdio_write(tp, 0x06, 0x03ee); + rtl8168_mdio_write(tp, 0x06, 0x8aca); + rtl8168_mdio_write(tp, 0x06, 0x60ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac0); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac1); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x07ee); + rtl8168_mdio_write(tp, 0x06, 0x8abf); + rtl8168_mdio_write(tp, 0x06, 0x73ee); + rtl8168_mdio_write(tp, 0x06, 0x8a95); + rtl8168_mdio_write(tp, 0x06, 0x02bf); + rtl8168_mdio_write(tp, 0x06, 0x8b88); + rtl8168_mdio_write(tp, 0x06, 0xec00); + rtl8168_mdio_write(tp, 0x06, 0x19a9); + rtl8168_mdio_write(tp, 0x06, 0x8b90); + rtl8168_mdio_write(tp, 0x06, 0xf9ee); + rtl8168_mdio_write(tp, 0x06, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xfed1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x85a4); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7dd1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x85a7); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7d04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8a); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x14ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8a); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x204b); + rtl8168_mdio_write(tp, 0x06, 0xe0e4); + rtl8168_mdio_write(tp, 0x06, 0x26e1); + rtl8168_mdio_write(tp, 0x06, 0xe427); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x2623); + rtl8168_mdio_write(tp, 0x06, 0xe5e4); + rtl8168_mdio_write(tp, 0x06, 0x27fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8dad); + rtl8168_mdio_write(tp, 0x06, 0x2014); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8d00); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0x5a78); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x0902); + rtl8168_mdio_write(tp, 0x06, 0x05e8); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x4f02); + rtl8168_mdio_write(tp, 0x06, 0x326c); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x1df6); + rtl8168_mdio_write(tp, 0x06, 0x20e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x0902); + rtl8168_mdio_write(tp, 0x06, 0x2ab0); + rtl8168_mdio_write(tp, 0x06, 0x0285); + rtl8168_mdio_write(tp, 0x06, 0x1602); + rtl8168_mdio_write(tp, 0x06, 0x03ba); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0xe502); + rtl8168_mdio_write(tp, 0x06, 0x2df1); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0x8302); + rtl8168_mdio_write(tp, 0x06, 0x0475); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x210b); + rtl8168_mdio_write(tp, 0x06, 0xf621); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x83f8); + rtl8168_mdio_write(tp, 0x06, 0x021c); + rtl8168_mdio_write(tp, 0x06, 0x99e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x22e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0235); + rtl8168_mdio_write(tp, 0x06, 0x63e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad23); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x23e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0231); + rtl8168_mdio_write(tp, 0x06, 0x57e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x24e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2505); + rtl8168_mdio_write(tp, 0x06, 0xf625); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x26e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x022d); + rtl8168_mdio_write(tp, 0x06, 0x1ce0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x27e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x80fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac26); + rtl8168_mdio_write(tp, 0x06, 0x1ae0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x14e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xac20); + rtl8168_mdio_write(tp, 0x06, 0x0ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xac23); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xac24); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0x1ac2); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x1c04); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x1d04); + rtl8168_mdio_write(tp, 0x06, 0xe2e0); + rtl8168_mdio_write(tp, 0x06, 0x7ce3); + rtl8168_mdio_write(tp, 0x06, 0xe07d); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x38e1); + rtl8168_mdio_write(tp, 0x06, 0xe039); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x1bad); + rtl8168_mdio_write(tp, 0x06, 0x390d); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf22); + rtl8168_mdio_write(tp, 0x06, 0x7a02); + rtl8168_mdio_write(tp, 0x06, 0x387d); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xacae); + rtl8168_mdio_write(tp, 0x06, 0x0bac); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xe902); + rtl8168_mdio_write(tp, 0x06, 0x822e); + rtl8168_mdio_write(tp, 0x06, 0x021a); + rtl8168_mdio_write(tp, 0x06, 0xd3fd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2602); + rtl8168_mdio_write(tp, 0x06, 0xf728); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2105); + rtl8168_mdio_write(tp, 0x06, 0x0222); + rtl8168_mdio_write(tp, 0x06, 0x8ef7); + rtl8168_mdio_write(tp, 0x06, 0x29e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x14b8); + rtl8168_mdio_write(tp, 0x06, 0xf72a); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2305); + rtl8168_mdio_write(tp, 0x06, 0x0212); + rtl8168_mdio_write(tp, 0x06, 0xf4f7); + rtl8168_mdio_write(tp, 0x06, 0x2be0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x8284); + rtl8168_mdio_write(tp, 0x06, 0xf72c); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xf4fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2600); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2109); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xf4ac); + rtl8168_mdio_write(tp, 0x06, 0x2003); + rtl8168_mdio_write(tp, 0x06, 0x0222); + rtl8168_mdio_write(tp, 0x06, 0x7de0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x09e0); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x1408); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2309); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xf4ac); + rtl8168_mdio_write(tp, 0x06, 0x2203); + rtl8168_mdio_write(tp, 0x06, 0x0213); + rtl8168_mdio_write(tp, 0x06, 0x07e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x09e0); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xac23); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x8289); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2602); + rtl8168_mdio_write(tp, 0x06, 0xf628); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x210a); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0xecf6); + rtl8168_mdio_write(tp, 0x06, 0x27a0); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0xf629); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2008); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xe8ad); + rtl8168_mdio_write(tp, 0x06, 0x2102); + rtl8168_mdio_write(tp, 0x06, 0xf62a); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2308); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x20a0); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0xf62b); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2408); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xc2a0); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0xf62c); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xf4a1); + rtl8168_mdio_write(tp, 0x06, 0x0008); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf22); + rtl8168_mdio_write(tp, 0x06, 0x7a02); + rtl8168_mdio_write(tp, 0x06, 0x387d); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xc200); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x241e); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xc2a0); + rtl8168_mdio_write(tp, 0x06, 0x0005); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xb0ae); + rtl8168_mdio_write(tp, 0x06, 0xf5a0); + rtl8168_mdio_write(tp, 0x06, 0x0105); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xc0ae); + rtl8168_mdio_write(tp, 0x06, 0x0ba0); + rtl8168_mdio_write(tp, 0x06, 0x0205); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xcaae); + rtl8168_mdio_write(tp, 0x06, 0x03a0); + rtl8168_mdio_write(tp, 0x06, 0x0300); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xe1ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac2); + rtl8168_mdio_write(tp, 0x06, 0x01ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac9); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x8317); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8ac8); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xc91f); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x0611); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xc9ae); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac2); + rtl8168_mdio_write(tp, 0x06, 0x01fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfbbf); + rtl8168_mdio_write(tp, 0x06, 0x8ac4); + rtl8168_mdio_write(tp, 0x06, 0xef79); + rtl8168_mdio_write(tp, 0x06, 0xd200); + rtl8168_mdio_write(tp, 0x06, 0xd400); + rtl8168_mdio_write(tp, 0x06, 0x221e); + rtl8168_mdio_write(tp, 0x06, 0x02bf); + rtl8168_mdio_write(tp, 0x06, 0x3024); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7dbf); + rtl8168_mdio_write(tp, 0x06, 0x13ff); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x500d); + rtl8168_mdio_write(tp, 0x06, 0x4559); + rtl8168_mdio_write(tp, 0x06, 0x1fef); + rtl8168_mdio_write(tp, 0x06, 0x97dd); + rtl8168_mdio_write(tp, 0x06, 0xd308); + rtl8168_mdio_write(tp, 0x06, 0x1a93); + rtl8168_mdio_write(tp, 0x06, 0xdd12); + rtl8168_mdio_write(tp, 0x06, 0x17a2); + rtl8168_mdio_write(tp, 0x06, 0x04de); + rtl8168_mdio_write(tp, 0x06, 0xffef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfbee); + rtl8168_mdio_write(tp, 0x06, 0x8ac2); + rtl8168_mdio_write(tp, 0x06, 0x03d5); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x06, 0xbf8a); + rtl8168_mdio_write(tp, 0x06, 0xc4ef); + rtl8168_mdio_write(tp, 0x06, 0x79ef); + rtl8168_mdio_write(tp, 0x06, 0x45bf); + rtl8168_mdio_write(tp, 0x06, 0x3024); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7dbf); + rtl8168_mdio_write(tp, 0x06, 0x13ff); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x50ad); + rtl8168_mdio_write(tp, 0x06, 0x2702); + rtl8168_mdio_write(tp, 0x06, 0x78ff); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xca1b); + rtl8168_mdio_write(tp, 0x06, 0x01aa); + rtl8168_mdio_write(tp, 0x06, 0x2eef); + rtl8168_mdio_write(tp, 0x06, 0x97d9); + rtl8168_mdio_write(tp, 0x06, 0x7900); + rtl8168_mdio_write(tp, 0x06, 0x9e2b); + rtl8168_mdio_write(tp, 0x06, 0x81dd); + rtl8168_mdio_write(tp, 0x06, 0xbf85); + rtl8168_mdio_write(tp, 0x06, 0xad02); + rtl8168_mdio_write(tp, 0x06, 0x387d); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xef02); + rtl8168_mdio_write(tp, 0x06, 0x100c); + rtl8168_mdio_write(tp, 0x06, 0x11b0); + rtl8168_mdio_write(tp, 0x06, 0xfc0d); + rtl8168_mdio_write(tp, 0x06, 0x11bf); + rtl8168_mdio_write(tp, 0x06, 0x85aa); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7dd1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x85aa); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7dee); + rtl8168_mdio_write(tp, 0x06, 0x8ac2); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x0413); + rtl8168_mdio_write(tp, 0x06, 0xa38b); + rtl8168_mdio_write(tp, 0x06, 0xb4d3); + rtl8168_mdio_write(tp, 0x06, 0x8012); + rtl8168_mdio_write(tp, 0x06, 0x17a2); + rtl8168_mdio_write(tp, 0x06, 0x04ad); + rtl8168_mdio_write(tp, 0x06, 0xffef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x48e0); + rtl8168_mdio_write(tp, 0x06, 0x8a96); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0x977c); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x9e35); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x9600); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x9700); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xbee1); + rtl8168_mdio_write(tp, 0x06, 0x8abf); + rtl8168_mdio_write(tp, 0x06, 0xe28a); + rtl8168_mdio_write(tp, 0x06, 0xc0e3); + rtl8168_mdio_write(tp, 0x06, 0x8ac1); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x74ad); + rtl8168_mdio_write(tp, 0x06, 0x2012); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x9603); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x97b7); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xc000); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xc100); + rtl8168_mdio_write(tp, 0x06, 0xae11); + rtl8168_mdio_write(tp, 0x06, 0x15e6); + rtl8168_mdio_write(tp, 0x06, 0x8ac0); + rtl8168_mdio_write(tp, 0x06, 0xe78a); + rtl8168_mdio_write(tp, 0x06, 0xc1ae); + rtl8168_mdio_write(tp, 0x06, 0x08ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac0); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac1); + rtl8168_mdio_write(tp, 0x06, 0x00fd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xae20); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe001); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x32e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf720); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40bf); + rtl8168_mdio_write(tp, 0x06, 0x3230); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x50ad); + rtl8168_mdio_write(tp, 0x06, 0x2821); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x20e1); + rtl8168_mdio_write(tp, 0x06, 0xe021); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x18e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40ee); + rtl8168_mdio_write(tp, 0x06, 0x8b3b); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0x8a8a); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0x8be4); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0x01ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xface); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69fa); + rtl8168_mdio_write(tp, 0x06, 0xd401); + rtl8168_mdio_write(tp, 0x06, 0x55b4); + rtl8168_mdio_write(tp, 0x06, 0xfebf); + rtl8168_mdio_write(tp, 0x06, 0x1c1e); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x50ac); + rtl8168_mdio_write(tp, 0x06, 0x280b); + rtl8168_mdio_write(tp, 0x06, 0xbf1c); + rtl8168_mdio_write(tp, 0x06, 0x1b02); + rtl8168_mdio_write(tp, 0x06, 0x3850); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x49ae); + rtl8168_mdio_write(tp, 0x06, 0x64bf); + rtl8168_mdio_write(tp, 0x06, 0x1c1b); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x50ac); + rtl8168_mdio_write(tp, 0x06, 0x285b); + rtl8168_mdio_write(tp, 0x06, 0xd000); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0xcaac); + rtl8168_mdio_write(tp, 0x06, 0x2105); + rtl8168_mdio_write(tp, 0x06, 0xac22); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x4ebf); + rtl8168_mdio_write(tp, 0x06, 0xe0c4); + rtl8168_mdio_write(tp, 0x06, 0xbe85); + rtl8168_mdio_write(tp, 0x06, 0xf6d2); + rtl8168_mdio_write(tp, 0x06, 0x04d8); + rtl8168_mdio_write(tp, 0x06, 0x19d9); + rtl8168_mdio_write(tp, 0x06, 0x1907); + rtl8168_mdio_write(tp, 0x06, 0xdc19); + rtl8168_mdio_write(tp, 0x06, 0xdd19); + rtl8168_mdio_write(tp, 0x06, 0x0789); + rtl8168_mdio_write(tp, 0x06, 0x89ef); + rtl8168_mdio_write(tp, 0x06, 0x645e); + rtl8168_mdio_write(tp, 0x06, 0x07ff); + rtl8168_mdio_write(tp, 0x06, 0x0d65); + rtl8168_mdio_write(tp, 0x06, 0x5cf8); + rtl8168_mdio_write(tp, 0x06, 0x001e); + rtl8168_mdio_write(tp, 0x06, 0x46dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0x19b2); + rtl8168_mdio_write(tp, 0x06, 0xe2d4); + rtl8168_mdio_write(tp, 0x06, 0x0001); + rtl8168_mdio_write(tp, 0x06, 0xbf1c); + rtl8168_mdio_write(tp, 0x06, 0x1b02); + rtl8168_mdio_write(tp, 0x06, 0x387d); + rtl8168_mdio_write(tp, 0x06, 0xae1d); + rtl8168_mdio_write(tp, 0x06, 0xbee0); + rtl8168_mdio_write(tp, 0x06, 0xc4bf); + rtl8168_mdio_write(tp, 0x06, 0x85f6); + rtl8168_mdio_write(tp, 0x06, 0xd204); + rtl8168_mdio_write(tp, 0x06, 0xd819); + rtl8168_mdio_write(tp, 0x06, 0xd919); + rtl8168_mdio_write(tp, 0x06, 0x07dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0x1907); + rtl8168_mdio_write(tp, 0x06, 0xb2f4); + rtl8168_mdio_write(tp, 0x06, 0xd400); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x1c1b); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7dfe); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfec6); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc05); + rtl8168_mdio_write(tp, 0x06, 0xf9e2); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0xeb5a); + rtl8168_mdio_write(tp, 0x06, 0x070c); + rtl8168_mdio_write(tp, 0x06, 0x031e); + rtl8168_mdio_write(tp, 0x06, 0x20e6); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe7e0); + rtl8168_mdio_write(tp, 0x06, 0xebe0); + rtl8168_mdio_write(tp, 0x06, 0xe0fc); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xfdfd); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0x8b80); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x22bf); + rtl8168_mdio_write(tp, 0x06, 0x4616); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x50e0); + rtl8168_mdio_write(tp, 0x06, 0x8b44); + rtl8168_mdio_write(tp, 0x06, 0x1f01); + rtl8168_mdio_write(tp, 0x06, 0x9e15); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x44ad); + rtl8168_mdio_write(tp, 0x06, 0x2907); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x04d1); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0x02d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x85b0); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7def); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x30e0); + rtl8168_mdio_write(tp, 0x06, 0xe036); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x37e1); + rtl8168_mdio_write(tp, 0x06, 0x8b3f); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e23); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x3fac); + rtl8168_mdio_write(tp, 0x06, 0x200b); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x0dac); + rtl8168_mdio_write(tp, 0x06, 0x250f); + rtl8168_mdio_write(tp, 0x06, 0xac27); + rtl8168_mdio_write(tp, 0x06, 0x11ae); + rtl8168_mdio_write(tp, 0x06, 0x1202); + rtl8168_mdio_write(tp, 0x06, 0x2c47); + rtl8168_mdio_write(tp, 0x06, 0xae0d); + rtl8168_mdio_write(tp, 0x06, 0x0285); + rtl8168_mdio_write(tp, 0x06, 0x4fae); + rtl8168_mdio_write(tp, 0x06, 0x0802); + rtl8168_mdio_write(tp, 0x06, 0x2c69); + rtl8168_mdio_write(tp, 0x06, 0xae03); + rtl8168_mdio_write(tp, 0x06, 0x022c); + rtl8168_mdio_write(tp, 0x06, 0x7cfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x6902); + rtl8168_mdio_write(tp, 0x06, 0x856c); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x14e1); + rtl8168_mdio_write(tp, 0x06, 0xe015); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x08d1); + rtl8168_mdio_write(tp, 0x06, 0x1ebf); + rtl8168_mdio_write(tp, 0x06, 0x2cd9); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7def); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x2fd0); + rtl8168_mdio_write(tp, 0x06, 0x0b02); + rtl8168_mdio_write(tp, 0x06, 0x3682); + rtl8168_mdio_write(tp, 0x06, 0x5882); + rtl8168_mdio_write(tp, 0x06, 0x7882); + rtl8168_mdio_write(tp, 0x06, 0x9f24); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x32e1); + rtl8168_mdio_write(tp, 0x06, 0x8b33); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e1a); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x8b32); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x28e1); + rtl8168_mdio_write(tp, 0x06, 0xe029); + rtl8168_mdio_write(tp, 0x06, 0xf72c); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x28e5); + rtl8168_mdio_write(tp, 0x06, 0xe029); + rtl8168_mdio_write(tp, 0x06, 0xf62c); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x28e5); + rtl8168_mdio_write(tp, 0x06, 0xe029); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0x4077); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0x52e0); + rtl8168_mdio_write(tp, 0x06, 0xeed9); + rtl8168_mdio_write(tp, 0x06, 0xe04c); + rtl8168_mdio_write(tp, 0x06, 0xbbe0); + rtl8168_mdio_write(tp, 0x06, 0x2a00); + rtl8168_mdio_write(tp, 0x05, 0xe142); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp,0x06, gphy_val); + rtl8168_mdio_write(tp, 0x05, 0xe140); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp,0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp,0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val |= BIT_1; + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~BIT_2; + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x09, 0xA20F); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0003); + rtl8168_mdio_write(tp, 0x01, 0x328A); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); +} + +static void +rtl8168_set_phy_mcu_8168f_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val,i; + + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x1800); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val &= ~(BIT_12); + rtl8168_mdio_write(tp,0x15, gphy_val); + rtl8168_mdio_write(tp,0x00, 0x4800); + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x002f); + for (i = 0; i < 1000; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1c); + if (gphy_val & 0x0080) + break; + } + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x1800); + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x0023); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x18); + if (!(gphy_val & 0x0001)) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x0098); + rtl8168_mdio_write(tp, 0x19, 0x7c0b); + rtl8168_mdio_write(tp, 0x15, 0x0099); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00eb); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00f8); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00fe); + rtl8168_mdio_write(tp, 0x19, 0x6f0f); + rtl8168_mdio_write(tp, 0x15, 0x00db); + rtl8168_mdio_write(tp, 0x19, 0x6f09); + rtl8168_mdio_write(tp, 0x15, 0x00dc); + rtl8168_mdio_write(tp, 0x19, 0xaefd); + rtl8168_mdio_write(tp, 0x15, 0x00dd); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00de); + rtl8168_mdio_write(tp, 0x19, 0xc60b); + rtl8168_mdio_write(tp, 0x15, 0x00df); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00e0); + rtl8168_mdio_write(tp, 0x19, 0x30e1); + rtl8168_mdio_write(tp, 0x15, 0x020c); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x020e); + rtl8168_mdio_write(tp, 0x19, 0x9813); + rtl8168_mdio_write(tp, 0x15, 0x020f); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0210); + rtl8168_mdio_write(tp, 0x19, 0x930f); + rtl8168_mdio_write(tp, 0x15, 0x0211); + rtl8168_mdio_write(tp, 0x19, 0x9206); + rtl8168_mdio_write(tp, 0x15, 0x0212); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0213); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0214); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0215); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0216); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0217); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0218); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0219); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x021a); + rtl8168_mdio_write(tp, 0x19, 0x5540); + rtl8168_mdio_write(tp, 0x15, 0x021b); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x021c); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x021d); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x021e); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x021f); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0220); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0221); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x0222); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0223); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x0224); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0225); + rtl8168_mdio_write(tp, 0x19, 0x3231); + rtl8168_mdio_write(tp, 0x15, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x011b); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x2802); + rtl8168_mdio_write(tp, 0x06, 0x0135); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x4502); + rtl8168_mdio_write(tp, 0x06, 0x015f); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x6b02); + rtl8168_mdio_write(tp, 0x06, 0x80e5); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xbf8b); + rtl8168_mdio_write(tp, 0x06, 0x88ec); + rtl8168_mdio_write(tp, 0x06, 0x0019); + rtl8168_mdio_write(tp, 0x06, 0xa98b); + rtl8168_mdio_write(tp, 0x06, 0x90f9); + rtl8168_mdio_write(tp, 0x06, 0xeeff); + rtl8168_mdio_write(tp, 0x06, 0xf600); + rtl8168_mdio_write(tp, 0x06, 0xeeff); + rtl8168_mdio_write(tp, 0x06, 0xf7fe); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf81); + rtl8168_mdio_write(tp, 0x06, 0x9802); + rtl8168_mdio_write(tp, 0x06, 0x39f3); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf81); + rtl8168_mdio_write(tp, 0x06, 0x9b02); + rtl8168_mdio_write(tp, 0x06, 0x39f3); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8dad); + rtl8168_mdio_write(tp, 0x06, 0x2014); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8d00); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0x5a78); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x0902); + rtl8168_mdio_write(tp, 0x06, 0x05fc); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x8802); + rtl8168_mdio_write(tp, 0x06, 0x32dd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ac); + rtl8168_mdio_write(tp, 0x06, 0x261a); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ac); + rtl8168_mdio_write(tp, 0x06, 0x2114); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ac); + rtl8168_mdio_write(tp, 0x06, 0x200e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ac); + rtl8168_mdio_write(tp, 0x06, 0x2308); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ac); + rtl8168_mdio_write(tp, 0x06, 0x2402); + rtl8168_mdio_write(tp, 0x06, 0xae38); + rtl8168_mdio_write(tp, 0x06, 0x021a); + rtl8168_mdio_write(tp, 0x06, 0xd6ee); + rtl8168_mdio_write(tp, 0x06, 0xe41c); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0xe41d); + rtl8168_mdio_write(tp, 0x06, 0x04e2); + rtl8168_mdio_write(tp, 0x06, 0xe07c); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0x7de0); + rtl8168_mdio_write(tp, 0x06, 0xe038); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x39ad); + rtl8168_mdio_write(tp, 0x06, 0x2e1b); + rtl8168_mdio_write(tp, 0x06, 0xad39); + rtl8168_mdio_write(tp, 0x06, 0x0dd1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x22c8); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xf302); + rtl8168_mdio_write(tp, 0x06, 0x21f0); + rtl8168_mdio_write(tp, 0x06, 0xae0b); + rtl8168_mdio_write(tp, 0x06, 0xac38); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x0602); + rtl8168_mdio_write(tp, 0x06, 0x222d); + rtl8168_mdio_write(tp, 0x06, 0x0222); + rtl8168_mdio_write(tp, 0x06, 0x7202); + rtl8168_mdio_write(tp, 0x06, 0x1ae7); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x201a); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x2afe); + rtl8168_mdio_write(tp, 0x06, 0x022c); + rtl8168_mdio_write(tp, 0x06, 0x5c02); + rtl8168_mdio_write(tp, 0x06, 0x03c5); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x6702); + rtl8168_mdio_write(tp, 0x06, 0x2e4f); + rtl8168_mdio_write(tp, 0x06, 0x0204); + rtl8168_mdio_write(tp, 0x06, 0x8902); + rtl8168_mdio_write(tp, 0x06, 0x2f7a); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x210b); + rtl8168_mdio_write(tp, 0x06, 0xf621); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x0445); + rtl8168_mdio_write(tp, 0x06, 0x021c); + rtl8168_mdio_write(tp, 0x06, 0xb8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x22e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0235); + rtl8168_mdio_write(tp, 0x06, 0xd4e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad23); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x23e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0231); + rtl8168_mdio_write(tp, 0x06, 0xc8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x24e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2505); + rtl8168_mdio_write(tp, 0x06, 0xf625); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x26e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x022d); + rtl8168_mdio_write(tp, 0x06, 0x6ae0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x27e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x8bfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0x8b80); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x22bf); + rtl8168_mdio_write(tp, 0x06, 0x479a); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xc6e0); + rtl8168_mdio_write(tp, 0x06, 0x8b44); + rtl8168_mdio_write(tp, 0x06, 0x1f01); + rtl8168_mdio_write(tp, 0x06, 0x9e15); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x44ad); + rtl8168_mdio_write(tp, 0x06, 0x2907); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x04d1); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0x02d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x819e); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xf3ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0x4077); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xbbe0); + rtl8168_mdio_write(tp, 0x06, 0x2a00); + rtl8168_mdio_write(tp, 0x05, 0xe142); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x05, 0xe140); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp,0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val |= BIT_1; + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~BIT_2; + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); +} + +static void +rtl8168_set_phy_mcu_8411_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val,i; + + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x1800); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val &= ~(BIT_12); + rtl8168_mdio_write(tp,0x15, gphy_val); + rtl8168_mdio_write(tp,0x00, 0x4800); + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x002f); + for (i = 0; i < 1000; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1c); + if (gphy_val & 0x0080) + break; + } + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x1800); + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x0023); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x18); + if (!(gphy_val & 0x0001)) + break; + } + rtl8168_mdio_write(tp,0x1f, 0x0005); + rtl8168_mdio_write(tp,0x05, 0xfff6); + rtl8168_mdio_write(tp,0x06, 0x0080); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x0098); + rtl8168_mdio_write(tp, 0x19, 0x7c0b); + rtl8168_mdio_write(tp, 0x15, 0x0099); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00eb); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00f8); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00fe); + rtl8168_mdio_write(tp, 0x19, 0x6f0f); + rtl8168_mdio_write(tp, 0x15, 0x00db); + rtl8168_mdio_write(tp, 0x19, 0x6f09); + rtl8168_mdio_write(tp, 0x15, 0x00dc); + rtl8168_mdio_write(tp, 0x19, 0xaefd); + rtl8168_mdio_write(tp, 0x15, 0x00dd); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00de); + rtl8168_mdio_write(tp, 0x19, 0xc60b); + rtl8168_mdio_write(tp, 0x15, 0x00df); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00e0); + rtl8168_mdio_write(tp, 0x19, 0x30e1); + rtl8168_mdio_write(tp, 0x15, 0x020c); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x020e); + rtl8168_mdio_write(tp, 0x19, 0x9813); + rtl8168_mdio_write(tp, 0x15, 0x020f); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0210); + rtl8168_mdio_write(tp, 0x19, 0x930f); + rtl8168_mdio_write(tp, 0x15, 0x0211); + rtl8168_mdio_write(tp, 0x19, 0x9206); + rtl8168_mdio_write(tp, 0x15, 0x0212); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0213); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0214); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0215); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0216); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0217); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0218); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0219); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x021a); + rtl8168_mdio_write(tp, 0x19, 0x5540); + rtl8168_mdio_write(tp, 0x15, 0x021b); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x021c); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x021d); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x021e); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x021f); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0220); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0221); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x0222); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0223); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x0224); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0225); + rtl8168_mdio_write(tp, 0x19, 0x3231); + rtl8168_mdio_write(tp, 0x15, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x011e); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x2b02); + rtl8168_mdio_write(tp, 0x06, 0x8077); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x4802); + rtl8168_mdio_write(tp, 0x06, 0x0162); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x9402); + rtl8168_mdio_write(tp, 0x06, 0x810e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xd481); + rtl8168_mdio_write(tp, 0x06, 0xd4e4); + rtl8168_mdio_write(tp, 0x06, 0x8b92); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x9302); + rtl8168_mdio_write(tp, 0x06, 0x2e5a); + rtl8168_mdio_write(tp, 0x06, 0xbf8b); + rtl8168_mdio_write(tp, 0x06, 0x88ec); + rtl8168_mdio_write(tp, 0x06, 0x0019); + rtl8168_mdio_write(tp, 0x06, 0xa98b); + rtl8168_mdio_write(tp, 0x06, 0x90f9); + rtl8168_mdio_write(tp, 0x06, 0xeeff); + rtl8168_mdio_write(tp, 0x06, 0xf600); + rtl8168_mdio_write(tp, 0x06, 0xeeff); + rtl8168_mdio_write(tp, 0x06, 0xf7fc); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf83); + rtl8168_mdio_write(tp, 0x06, 0x3c02); + rtl8168_mdio_write(tp, 0x06, 0x3a21); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf83); + rtl8168_mdio_write(tp, 0x06, 0x3f02); + rtl8168_mdio_write(tp, 0x06, 0x3a21); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8aad); + rtl8168_mdio_write(tp, 0x06, 0x2014); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8a00); + rtl8168_mdio_write(tp, 0x06, 0x0220); + rtl8168_mdio_write(tp, 0x06, 0x8be0); + rtl8168_mdio_write(tp, 0x06, 0xe426); + rtl8168_mdio_write(tp, 0x06, 0xe1e4); + rtl8168_mdio_write(tp, 0x06, 0x27ee); + rtl8168_mdio_write(tp, 0x06, 0xe426); + rtl8168_mdio_write(tp, 0x06, 0x23e5); + rtl8168_mdio_write(tp, 0x06, 0xe427); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x14ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0x8a5a); + rtl8168_mdio_write(tp, 0x06, 0x7803); + rtl8168_mdio_write(tp, 0x06, 0x9e09); + rtl8168_mdio_write(tp, 0x06, 0x0206); + rtl8168_mdio_write(tp, 0x06, 0x2802); + rtl8168_mdio_write(tp, 0x06, 0x80b1); + rtl8168_mdio_write(tp, 0x06, 0x0232); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac26); + rtl8168_mdio_write(tp, 0x06, 0x1ae0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x14e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xac20); + rtl8168_mdio_write(tp, 0x06, 0x0ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xac23); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xac24); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0x1b02); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x1c04); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x1d04); + rtl8168_mdio_write(tp, 0x06, 0xe2e0); + rtl8168_mdio_write(tp, 0x06, 0x7ce3); + rtl8168_mdio_write(tp, 0x06, 0xe07d); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x38e1); + rtl8168_mdio_write(tp, 0x06, 0xe039); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x1bad); + rtl8168_mdio_write(tp, 0x06, 0x390d); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf22); + rtl8168_mdio_write(tp, 0x06, 0xe802); + rtl8168_mdio_write(tp, 0x06, 0x3a21); + rtl8168_mdio_write(tp, 0x06, 0x0222); + rtl8168_mdio_write(tp, 0x06, 0x10ae); + rtl8168_mdio_write(tp, 0x06, 0x0bac); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0222); + rtl8168_mdio_write(tp, 0x06, 0x4d02); + rtl8168_mdio_write(tp, 0x06, 0x2292); + rtl8168_mdio_write(tp, 0x06, 0x021b); + rtl8168_mdio_write(tp, 0x06, 0x13fd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x1af6); + rtl8168_mdio_write(tp, 0x06, 0x20e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x022b); + rtl8168_mdio_write(tp, 0x06, 0x1e02); + rtl8168_mdio_write(tp, 0x06, 0x82ae); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0xc002); + rtl8168_mdio_write(tp, 0x06, 0x827d); + rtl8168_mdio_write(tp, 0x06, 0x022e); + rtl8168_mdio_write(tp, 0x06, 0x6f02); + rtl8168_mdio_write(tp, 0x06, 0x047b); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x9ae0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad21); + rtl8168_mdio_write(tp, 0x06, 0x0bf6); + rtl8168_mdio_write(tp, 0x06, 0x21e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x9002); + rtl8168_mdio_write(tp, 0x06, 0x1cd9); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2208); + rtl8168_mdio_write(tp, 0x06, 0xf622); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x35f4); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2308); + rtl8168_mdio_write(tp, 0x06, 0xf623); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x31e8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2405); + rtl8168_mdio_write(tp, 0x06, 0xf624); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x25e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2608); + rtl8168_mdio_write(tp, 0x06, 0xf626); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x2d8a); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2705); + rtl8168_mdio_write(tp, 0x06, 0xf627); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x0386); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe001); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x32e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf720); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40bf); + rtl8168_mdio_write(tp, 0x06, 0x32c1); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xf4ad); + rtl8168_mdio_write(tp, 0x06, 0x2821); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x20e1); + rtl8168_mdio_write(tp, 0x06, 0xe021); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x18e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40ee); + rtl8168_mdio_write(tp, 0x06, 0x8b3b); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0x8a8a); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0x8be4); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0x01ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xface); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69fa); + rtl8168_mdio_write(tp, 0x06, 0xd401); + rtl8168_mdio_write(tp, 0x06, 0x55b4); + rtl8168_mdio_write(tp, 0x06, 0xfebf); + rtl8168_mdio_write(tp, 0x06, 0x1c5e); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xf4ac); + rtl8168_mdio_write(tp, 0x06, 0x280b); + rtl8168_mdio_write(tp, 0x06, 0xbf1c); + rtl8168_mdio_write(tp, 0x06, 0x5b02); + rtl8168_mdio_write(tp, 0x06, 0x39f4); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x49ae); + rtl8168_mdio_write(tp, 0x06, 0x64bf); + rtl8168_mdio_write(tp, 0x06, 0x1c5b); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xf4ac); + rtl8168_mdio_write(tp, 0x06, 0x285b); + rtl8168_mdio_write(tp, 0x06, 0xd000); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x62ac); + rtl8168_mdio_write(tp, 0x06, 0x2105); + rtl8168_mdio_write(tp, 0x06, 0xac22); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x4ebf); + rtl8168_mdio_write(tp, 0x06, 0xe0c4); + rtl8168_mdio_write(tp, 0x06, 0xbe85); + rtl8168_mdio_write(tp, 0x06, 0xecd2); + rtl8168_mdio_write(tp, 0x06, 0x04d8); + rtl8168_mdio_write(tp, 0x06, 0x19d9); + rtl8168_mdio_write(tp, 0x06, 0x1907); + rtl8168_mdio_write(tp, 0x06, 0xdc19); + rtl8168_mdio_write(tp, 0x06, 0xdd19); + rtl8168_mdio_write(tp, 0x06, 0x0789); + rtl8168_mdio_write(tp, 0x06, 0x89ef); + rtl8168_mdio_write(tp, 0x06, 0x645e); + rtl8168_mdio_write(tp, 0x06, 0x07ff); + rtl8168_mdio_write(tp, 0x06, 0x0d65); + rtl8168_mdio_write(tp, 0x06, 0x5cf8); + rtl8168_mdio_write(tp, 0x06, 0x001e); + rtl8168_mdio_write(tp, 0x06, 0x46dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0x19b2); + rtl8168_mdio_write(tp, 0x06, 0xe2d4); + rtl8168_mdio_write(tp, 0x06, 0x0001); + rtl8168_mdio_write(tp, 0x06, 0xbf1c); + rtl8168_mdio_write(tp, 0x06, 0x5b02); + rtl8168_mdio_write(tp, 0x06, 0x3a21); + rtl8168_mdio_write(tp, 0x06, 0xae1d); + rtl8168_mdio_write(tp, 0x06, 0xbee0); + rtl8168_mdio_write(tp, 0x06, 0xc4bf); + rtl8168_mdio_write(tp, 0x06, 0x85ec); + rtl8168_mdio_write(tp, 0x06, 0xd204); + rtl8168_mdio_write(tp, 0x06, 0xd819); + rtl8168_mdio_write(tp, 0x06, 0xd919); + rtl8168_mdio_write(tp, 0x06, 0x07dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0x1907); + rtl8168_mdio_write(tp, 0x06, 0xb2f4); + rtl8168_mdio_write(tp, 0x06, 0xd400); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x1c5b); + rtl8168_mdio_write(tp, 0x06, 0x023a); + rtl8168_mdio_write(tp, 0x06, 0x21fe); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfec6); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc05); + rtl8168_mdio_write(tp, 0x06, 0xf9e2); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0xeb5a); + rtl8168_mdio_write(tp, 0x06, 0x070c); + rtl8168_mdio_write(tp, 0x06, 0x031e); + rtl8168_mdio_write(tp, 0x06, 0x20e6); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe7e0); + rtl8168_mdio_write(tp, 0x06, 0xebe0); + rtl8168_mdio_write(tp, 0x06, 0xe0fc); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xfdfd); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0x8b80); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x22bf); + rtl8168_mdio_write(tp, 0x06, 0x47ba); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xf4e0); + rtl8168_mdio_write(tp, 0x06, 0x8b44); + rtl8168_mdio_write(tp, 0x06, 0x1f01); + rtl8168_mdio_write(tp, 0x06, 0x9e15); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x44ad); + rtl8168_mdio_write(tp, 0x06, 0x2907); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x04d1); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0x02d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8342); + rtl8168_mdio_write(tp, 0x06, 0x023a); + rtl8168_mdio_write(tp, 0x06, 0x21ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x30e0); + rtl8168_mdio_write(tp, 0x06, 0xe036); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x37e1); + rtl8168_mdio_write(tp, 0x06, 0x8b3f); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e23); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x3fac); + rtl8168_mdio_write(tp, 0x06, 0x200b); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x0dac); + rtl8168_mdio_write(tp, 0x06, 0x250f); + rtl8168_mdio_write(tp, 0x06, 0xac27); + rtl8168_mdio_write(tp, 0x06, 0x11ae); + rtl8168_mdio_write(tp, 0x06, 0x1202); + rtl8168_mdio_write(tp, 0x06, 0x2cb5); + rtl8168_mdio_write(tp, 0x06, 0xae0d); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xe7ae); + rtl8168_mdio_write(tp, 0x06, 0x0802); + rtl8168_mdio_write(tp, 0x06, 0x2cd7); + rtl8168_mdio_write(tp, 0x06, 0xae03); + rtl8168_mdio_write(tp, 0x06, 0x022c); + rtl8168_mdio_write(tp, 0x06, 0xeafc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x6902); + rtl8168_mdio_write(tp, 0x06, 0x8304); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x14e1); + rtl8168_mdio_write(tp, 0x06, 0xe015); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x08d1); + rtl8168_mdio_write(tp, 0x06, 0x1ebf); + rtl8168_mdio_write(tp, 0x06, 0x2d47); + rtl8168_mdio_write(tp, 0x06, 0x023a); + rtl8168_mdio_write(tp, 0x06, 0x21ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x2fd0); + rtl8168_mdio_write(tp, 0x06, 0x0b02); + rtl8168_mdio_write(tp, 0x06, 0x3826); + rtl8168_mdio_write(tp, 0x06, 0x5882); + rtl8168_mdio_write(tp, 0x06, 0x7882); + rtl8168_mdio_write(tp, 0x06, 0x9f24); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x32e1); + rtl8168_mdio_write(tp, 0x06, 0x8b33); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e1a); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x8b32); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x28e1); + rtl8168_mdio_write(tp, 0x06, 0xe029); + rtl8168_mdio_write(tp, 0x06, 0xf72c); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x28e5); + rtl8168_mdio_write(tp, 0x06, 0xe029); + rtl8168_mdio_write(tp, 0x06, 0xf62c); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x28e5); + rtl8168_mdio_write(tp, 0x06, 0xe029); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0x4077); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xbbe0); + rtl8168_mdio_write(tp, 0x06, 0x2a00); + rtl8168_mdio_write(tp, 0x05, 0xe142); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp,0x06, gphy_val); + rtl8168_mdio_write(tp, 0x05, 0xe140); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp,0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp,0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val |= BIT_1; + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~BIT_2; + rtl8168_mdio_write(tp,0x17, gphy_val); + rtl8168_mdio_write(tp,0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x09, 0xA20F); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0003); + rtl8168_mdio_write(tp, 0x01, 0x328A); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x9200); +} + +static void +rtl8168_set_phy_mcu_8168g_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val; + + rtl8168_set_phy_mcu_patch_request(tp); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8146); + rtl8168_mdio_write(tp, 0x14, 0x2300); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0210); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0290); + rtl8168_mdio_write(tp, 0x13, 0xA012); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xA014); + rtl8168_mdio_write(tp, 0x14, 0x2c04); + rtl8168_mdio_write(tp, 0x14, 0x2c0c); + rtl8168_mdio_write(tp, 0x14, 0x2c6c); + rtl8168_mdio_write(tp, 0x14, 0x2d0d); + rtl8168_mdio_write(tp, 0x14, 0x31ce); + rtl8168_mdio_write(tp, 0x14, 0x506d); + rtl8168_mdio_write(tp, 0x14, 0xd708); + rtl8168_mdio_write(tp, 0x14, 0x3108); + rtl8168_mdio_write(tp, 0x14, 0x106d); + rtl8168_mdio_write(tp, 0x14, 0x1560); + rtl8168_mdio_write(tp, 0x14, 0x15a9); + rtl8168_mdio_write(tp, 0x14, 0x206e); + rtl8168_mdio_write(tp, 0x14, 0x175b); + rtl8168_mdio_write(tp, 0x14, 0x6062); + rtl8168_mdio_write(tp, 0x14, 0xd700); + rtl8168_mdio_write(tp, 0x14, 0x5fae); + rtl8168_mdio_write(tp, 0x14, 0xd708); + rtl8168_mdio_write(tp, 0x14, 0x3107); + rtl8168_mdio_write(tp, 0x14, 0x4c1e); + rtl8168_mdio_write(tp, 0x14, 0x4169); + rtl8168_mdio_write(tp, 0x14, 0x316a); + rtl8168_mdio_write(tp, 0x14, 0x0c19); + rtl8168_mdio_write(tp, 0x14, 0x31aa); + rtl8168_mdio_write(tp, 0x14, 0x0c19); + rtl8168_mdio_write(tp, 0x14, 0x2c1b); + rtl8168_mdio_write(tp, 0x14, 0x5e62); + rtl8168_mdio_write(tp, 0x14, 0x26b5); + rtl8168_mdio_write(tp, 0x14, 0x31ab); + rtl8168_mdio_write(tp, 0x14, 0x5c1e); + rtl8168_mdio_write(tp, 0x14, 0x2c0c); + rtl8168_mdio_write(tp, 0x14, 0xc040); + rtl8168_mdio_write(tp, 0x14, 0x8808); + rtl8168_mdio_write(tp, 0x14, 0xc520); + rtl8168_mdio_write(tp, 0x14, 0xc421); + rtl8168_mdio_write(tp, 0x14, 0xd05a); + rtl8168_mdio_write(tp, 0x14, 0xd19a); + rtl8168_mdio_write(tp, 0x14, 0xd709); + rtl8168_mdio_write(tp, 0x14, 0x608f); + rtl8168_mdio_write(tp, 0x14, 0xd06b); + rtl8168_mdio_write(tp, 0x14, 0xd18a); + rtl8168_mdio_write(tp, 0x14, 0x2c2c); + rtl8168_mdio_write(tp, 0x14, 0xd0be); + rtl8168_mdio_write(tp, 0x14, 0xd188); + rtl8168_mdio_write(tp, 0x14, 0x2c2c); + rtl8168_mdio_write(tp, 0x14, 0xd708); + rtl8168_mdio_write(tp, 0x14, 0x4072); + rtl8168_mdio_write(tp, 0x14, 0xc104); + rtl8168_mdio_write(tp, 0x14, 0x2c3e); + rtl8168_mdio_write(tp, 0x14, 0x4076); + rtl8168_mdio_write(tp, 0x14, 0xc110); + rtl8168_mdio_write(tp, 0x14, 0x2c3e); + rtl8168_mdio_write(tp, 0x14, 0x4071); + rtl8168_mdio_write(tp, 0x14, 0xc102); + rtl8168_mdio_write(tp, 0x14, 0x2c3e); + rtl8168_mdio_write(tp, 0x14, 0x4070); + rtl8168_mdio_write(tp, 0x14, 0xc101); + rtl8168_mdio_write(tp, 0x14, 0x2c3e); + rtl8168_mdio_write(tp, 0x14, 0x175b); + rtl8168_mdio_write(tp, 0x14, 0xd709); + rtl8168_mdio_write(tp, 0x14, 0x3390); + rtl8168_mdio_write(tp, 0x14, 0x5c39); + rtl8168_mdio_write(tp, 0x14, 0x2c4e); + rtl8168_mdio_write(tp, 0x14, 0x175b); + rtl8168_mdio_write(tp, 0x14, 0xd708); + rtl8168_mdio_write(tp, 0x14, 0x6193); + rtl8168_mdio_write(tp, 0x14, 0xd709); + rtl8168_mdio_write(tp, 0x14, 0x5f9d); + rtl8168_mdio_write(tp, 0x14, 0x408b); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x6042); + rtl8168_mdio_write(tp, 0x14, 0xb401); + rtl8168_mdio_write(tp, 0x14, 0x175b); + rtl8168_mdio_write(tp, 0x14, 0xd708); + rtl8168_mdio_write(tp, 0x14, 0x6073); + rtl8168_mdio_write(tp, 0x14, 0x5fbc); + rtl8168_mdio_write(tp, 0x14, 0x2c4d); + rtl8168_mdio_write(tp, 0x14, 0x26ed); + rtl8168_mdio_write(tp, 0x14, 0xb280); + rtl8168_mdio_write(tp, 0x14, 0xa841); + rtl8168_mdio_write(tp, 0x14, 0x9420); + rtl8168_mdio_write(tp, 0x14, 0x8710); + rtl8168_mdio_write(tp, 0x14, 0xd709); + rtl8168_mdio_write(tp, 0x14, 0x42ec); + rtl8168_mdio_write(tp, 0x14, 0x606d); + rtl8168_mdio_write(tp, 0x14, 0xd207); + rtl8168_mdio_write(tp, 0x14, 0x2c57); + rtl8168_mdio_write(tp, 0x14, 0xd203); + rtl8168_mdio_write(tp, 0x14, 0x33ff); + rtl8168_mdio_write(tp, 0x14, 0x563b); + rtl8168_mdio_write(tp, 0x14, 0x3275); + rtl8168_mdio_write(tp, 0x14, 0x7c5e); + rtl8168_mdio_write(tp, 0x14, 0xb240); + rtl8168_mdio_write(tp, 0x14, 0xb402); + rtl8168_mdio_write(tp, 0x14, 0x263b); + rtl8168_mdio_write(tp, 0x14, 0x6096); + rtl8168_mdio_write(tp, 0x14, 0xb240); + rtl8168_mdio_write(tp, 0x14, 0xb406); + rtl8168_mdio_write(tp, 0x14, 0x263b); + rtl8168_mdio_write(tp, 0x14, 0x31d7); + rtl8168_mdio_write(tp, 0x14, 0x7c67); + rtl8168_mdio_write(tp, 0x14, 0xb240); + rtl8168_mdio_write(tp, 0x14, 0xb40e); + rtl8168_mdio_write(tp, 0x14, 0x263b); + rtl8168_mdio_write(tp, 0x14, 0xb410); + rtl8168_mdio_write(tp, 0x14, 0x8802); + rtl8168_mdio_write(tp, 0x14, 0xb240); + rtl8168_mdio_write(tp, 0x14, 0x940e); + rtl8168_mdio_write(tp, 0x14, 0x263b); + rtl8168_mdio_write(tp, 0x14, 0xba04); + rtl8168_mdio_write(tp, 0x14, 0x1cd6); + rtl8168_mdio_write(tp, 0x14, 0xa902); + rtl8168_mdio_write(tp, 0x14, 0xd711); + rtl8168_mdio_write(tp, 0x14, 0x4045); + rtl8168_mdio_write(tp, 0x14, 0xa980); + rtl8168_mdio_write(tp, 0x14, 0x3003); + rtl8168_mdio_write(tp, 0x14, 0x59b1); + rtl8168_mdio_write(tp, 0x14, 0xa540); + rtl8168_mdio_write(tp, 0x14, 0xa601); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4043); + rtl8168_mdio_write(tp, 0x14, 0xa910); + rtl8168_mdio_write(tp, 0x14, 0xd711); + rtl8168_mdio_write(tp, 0x14, 0x60a0); + rtl8168_mdio_write(tp, 0x14, 0xca33); + rtl8168_mdio_write(tp, 0x14, 0xcb33); + rtl8168_mdio_write(tp, 0x14, 0xa941); + rtl8168_mdio_write(tp, 0x14, 0x2c82); + rtl8168_mdio_write(tp, 0x14, 0xcaff); + rtl8168_mdio_write(tp, 0x14, 0xcbff); + rtl8168_mdio_write(tp, 0x14, 0xa921); + rtl8168_mdio_write(tp, 0x14, 0xce02); + rtl8168_mdio_write(tp, 0x14, 0xe070); + rtl8168_mdio_write(tp, 0x14, 0x0f10); + rtl8168_mdio_write(tp, 0x14, 0xaf01); + rtl8168_mdio_write(tp, 0x14, 0x8f01); + rtl8168_mdio_write(tp, 0x14, 0x1766); + rtl8168_mdio_write(tp, 0x14, 0x8e02); + rtl8168_mdio_write(tp, 0x14, 0x1787); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x609c); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fa4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0x1ce9); + rtl8168_mdio_write(tp, 0x14, 0xce04); + rtl8168_mdio_write(tp, 0x14, 0xe070); + rtl8168_mdio_write(tp, 0x14, 0x0f20); + rtl8168_mdio_write(tp, 0x14, 0xaf01); + rtl8168_mdio_write(tp, 0x14, 0x8f01); + rtl8168_mdio_write(tp, 0x14, 0x1766); + rtl8168_mdio_write(tp, 0x14, 0x8e04); + rtl8168_mdio_write(tp, 0x14, 0x6044); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0xa520); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4043); + rtl8168_mdio_write(tp, 0x14, 0x2cc1); + rtl8168_mdio_write(tp, 0x14, 0xe00f); + rtl8168_mdio_write(tp, 0x14, 0x0501); + rtl8168_mdio_write(tp, 0x14, 0x1cef); + rtl8168_mdio_write(tp, 0x14, 0xb801); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x4060); + rtl8168_mdio_write(tp, 0x14, 0x7fc4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0x1cf5); + rtl8168_mdio_write(tp, 0x14, 0xe00f); + rtl8168_mdio_write(tp, 0x14, 0x0502); + rtl8168_mdio_write(tp, 0x14, 0x1cef); + rtl8168_mdio_write(tp, 0x14, 0xb802); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x4061); + rtl8168_mdio_write(tp, 0x14, 0x7fc4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0x1cf5); + rtl8168_mdio_write(tp, 0x14, 0xe00f); + rtl8168_mdio_write(tp, 0x14, 0x0504); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6099); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fa4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0xc17f); + rtl8168_mdio_write(tp, 0x14, 0xc200); + rtl8168_mdio_write(tp, 0x14, 0xc43f); + rtl8168_mdio_write(tp, 0x14, 0xcc03); + rtl8168_mdio_write(tp, 0x14, 0xa701); + rtl8168_mdio_write(tp, 0x14, 0xa510); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4018); + rtl8168_mdio_write(tp, 0x14, 0x9910); + rtl8168_mdio_write(tp, 0x14, 0x8510); + rtl8168_mdio_write(tp, 0x14, 0x2860); + rtl8168_mdio_write(tp, 0x14, 0xe00f); + rtl8168_mdio_write(tp, 0x14, 0x0504); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6099); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fa4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0xa608); + rtl8168_mdio_write(tp, 0x14, 0xc17d); + rtl8168_mdio_write(tp, 0x14, 0xc200); + rtl8168_mdio_write(tp, 0x14, 0xc43f); + rtl8168_mdio_write(tp, 0x14, 0xcc03); + rtl8168_mdio_write(tp, 0x14, 0xa701); + rtl8168_mdio_write(tp, 0x14, 0xa510); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4018); + rtl8168_mdio_write(tp, 0x14, 0x9910); + rtl8168_mdio_write(tp, 0x14, 0x8510); + rtl8168_mdio_write(tp, 0x14, 0x2926); + rtl8168_mdio_write(tp, 0x14, 0x1792); + rtl8168_mdio_write(tp, 0x14, 0x27db); + rtl8168_mdio_write(tp, 0x14, 0xc000); + rtl8168_mdio_write(tp, 0x14, 0xc100); + rtl8168_mdio_write(tp, 0x14, 0xc200); + rtl8168_mdio_write(tp, 0x14, 0xc300); + rtl8168_mdio_write(tp, 0x14, 0xc400); + rtl8168_mdio_write(tp, 0x14, 0xc500); + rtl8168_mdio_write(tp, 0x14, 0xc600); + rtl8168_mdio_write(tp, 0x14, 0xc7c1); + rtl8168_mdio_write(tp, 0x14, 0xc800); + rtl8168_mdio_write(tp, 0x14, 0xcc00); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xca0f); + rtl8168_mdio_write(tp, 0x14, 0xcbff); + rtl8168_mdio_write(tp, 0x14, 0xa901); + rtl8168_mdio_write(tp, 0x14, 0x8902); + rtl8168_mdio_write(tp, 0x14, 0xc900); + rtl8168_mdio_write(tp, 0x14, 0xca00); + rtl8168_mdio_write(tp, 0x14, 0xcb00); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xb804); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x6044); + rtl8168_mdio_write(tp, 0x14, 0x9804); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6099); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fa4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xa510); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6098); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fa4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0x8510); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xd711); + rtl8168_mdio_write(tp, 0x14, 0x3003); + rtl8168_mdio_write(tp, 0x14, 0x1d01); + rtl8168_mdio_write(tp, 0x14, 0x2d0b); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x60be); + rtl8168_mdio_write(tp, 0x14, 0xe060); + rtl8168_mdio_write(tp, 0x14, 0x0920); + rtl8168_mdio_write(tp, 0x14, 0x1cd6); + rtl8168_mdio_write(tp, 0x14, 0x2c89); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x3063); + rtl8168_mdio_write(tp, 0x14, 0x1948); + rtl8168_mdio_write(tp, 0x14, 0x288a); + rtl8168_mdio_write(tp, 0x14, 0x1cd6); + rtl8168_mdio_write(tp, 0x14, 0x29bd); + rtl8168_mdio_write(tp, 0x14, 0xa802); + rtl8168_mdio_write(tp, 0x14, 0xa303); + rtl8168_mdio_write(tp, 0x14, 0x843f); + rtl8168_mdio_write(tp, 0x14, 0x81ff); + rtl8168_mdio_write(tp, 0x14, 0x8208); + rtl8168_mdio_write(tp, 0x14, 0xa201); + rtl8168_mdio_write(tp, 0x14, 0xc001); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x30a0); + rtl8168_mdio_write(tp, 0x14, 0x0d1c); + rtl8168_mdio_write(tp, 0x14, 0x30a0); + rtl8168_mdio_write(tp, 0x14, 0x3d13); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7f4c); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0xe003); + rtl8168_mdio_write(tp, 0x14, 0x0202); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6090); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fac); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0xa20c); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6091); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fac); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0x820e); + rtl8168_mdio_write(tp, 0x14, 0xa3e0); + rtl8168_mdio_write(tp, 0x14, 0xa520); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x609d); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fac); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0x8520); + rtl8168_mdio_write(tp, 0x14, 0x6703); + rtl8168_mdio_write(tp, 0x14, 0x2d34); + rtl8168_mdio_write(tp, 0x14, 0xa13e); + rtl8168_mdio_write(tp, 0x14, 0xc001); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0x6046); + rtl8168_mdio_write(tp, 0x14, 0x2d0d); + rtl8168_mdio_write(tp, 0x14, 0xa43f); + rtl8168_mdio_write(tp, 0x14, 0xa101); + rtl8168_mdio_write(tp, 0x14, 0xc020); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x3121); + rtl8168_mdio_write(tp, 0x14, 0x0d45); + rtl8168_mdio_write(tp, 0x14, 0x30c0); + rtl8168_mdio_write(tp, 0x14, 0x3d0d); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7f4c); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0xa540); + rtl8168_mdio_write(tp, 0x14, 0xc001); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4001); + rtl8168_mdio_write(tp, 0x14, 0xe00f); + rtl8168_mdio_write(tp, 0x14, 0x0501); + rtl8168_mdio_write(tp, 0x14, 0x1dac); + rtl8168_mdio_write(tp, 0x14, 0xc1c4); + rtl8168_mdio_write(tp, 0x14, 0xa268); + rtl8168_mdio_write(tp, 0x14, 0xa303); + rtl8168_mdio_write(tp, 0x14, 0x8420); + rtl8168_mdio_write(tp, 0x14, 0xe00f); + rtl8168_mdio_write(tp, 0x14, 0x0502); + rtl8168_mdio_write(tp, 0x14, 0x1dac); + rtl8168_mdio_write(tp, 0x14, 0xc002); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0x8208); + rtl8168_mdio_write(tp, 0x14, 0x8410); + rtl8168_mdio_write(tp, 0x14, 0xa121); + rtl8168_mdio_write(tp, 0x14, 0xc002); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0x8120); + rtl8168_mdio_write(tp, 0x14, 0x8180); + rtl8168_mdio_write(tp, 0x14, 0x1d97); + rtl8168_mdio_write(tp, 0x14, 0xa180); + rtl8168_mdio_write(tp, 0x14, 0xa13a); + rtl8168_mdio_write(tp, 0x14, 0x8240); + rtl8168_mdio_write(tp, 0x14, 0xa430); + rtl8168_mdio_write(tp, 0x14, 0xc010); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x30e1); + rtl8168_mdio_write(tp, 0x14, 0x0abc); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7f8c); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0xa480); + rtl8168_mdio_write(tp, 0x14, 0xa230); + rtl8168_mdio_write(tp, 0x14, 0xa303); + rtl8168_mdio_write(tp, 0x14, 0xc001); + rtl8168_mdio_write(tp, 0x14, 0xd70c); + rtl8168_mdio_write(tp, 0x14, 0x4124); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6120); + rtl8168_mdio_write(tp, 0x14, 0xd711); + rtl8168_mdio_write(tp, 0x14, 0x3128); + rtl8168_mdio_write(tp, 0x14, 0x3d76); + rtl8168_mdio_write(tp, 0x14, 0x2d70); + rtl8168_mdio_write(tp, 0x14, 0xa801); + rtl8168_mdio_write(tp, 0x14, 0x2d6c); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0xe018); + rtl8168_mdio_write(tp, 0x14, 0x0208); + rtl8168_mdio_write(tp, 0x14, 0xa1f8); + rtl8168_mdio_write(tp, 0x14, 0x8480); + rtl8168_mdio_write(tp, 0x14, 0xc004); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0x6046); + rtl8168_mdio_write(tp, 0x14, 0x2d0d); + rtl8168_mdio_write(tp, 0x14, 0xa43f); + rtl8168_mdio_write(tp, 0x14, 0xa105); + rtl8168_mdio_write(tp, 0x14, 0x8228); + rtl8168_mdio_write(tp, 0x14, 0xc004); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0x81bc); + rtl8168_mdio_write(tp, 0x14, 0xa220); + rtl8168_mdio_write(tp, 0x14, 0x1d97); + rtl8168_mdio_write(tp, 0x14, 0x8220); + rtl8168_mdio_write(tp, 0x14, 0xa1bc); + rtl8168_mdio_write(tp, 0x14, 0xc040); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x30e1); + rtl8168_mdio_write(tp, 0x14, 0x0abc); + rtl8168_mdio_write(tp, 0x14, 0x30e1); + rtl8168_mdio_write(tp, 0x14, 0x3d0d); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7f4c); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0xa802); + rtl8168_mdio_write(tp, 0x14, 0xd70c); + rtl8168_mdio_write(tp, 0x14, 0x4244); + rtl8168_mdio_write(tp, 0x14, 0xa301); + rtl8168_mdio_write(tp, 0x14, 0xc004); + rtl8168_mdio_write(tp, 0x14, 0xd711); + rtl8168_mdio_write(tp, 0x14, 0x3128); + rtl8168_mdio_write(tp, 0x14, 0x3da5); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x5f80); + rtl8168_mdio_write(tp, 0x14, 0xd711); + rtl8168_mdio_write(tp, 0x14, 0x3109); + rtl8168_mdio_write(tp, 0x14, 0x3da7); + rtl8168_mdio_write(tp, 0x14, 0x2dab); + rtl8168_mdio_write(tp, 0x14, 0xa801); + rtl8168_mdio_write(tp, 0x14, 0x2d9a); + rtl8168_mdio_write(tp, 0x14, 0xa802); + rtl8168_mdio_write(tp, 0x14, 0xc004); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xa510); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x609a); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fac); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0x8510); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x13, 0xA01A); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xA006); + rtl8168_mdio_write(tp, 0x14, 0x0ad6); + rtl8168_mdio_write(tp, 0x13, 0xA004); + rtl8168_mdio_write(tp, 0x14, 0x07f5); + rtl8168_mdio_write(tp, 0x13, 0xA002); + rtl8168_mdio_write(tp, 0x14, 0x06a9); + rtl8168_mdio_write(tp, 0x13, 0xA000); + rtl8168_mdio_write(tp, 0x14, 0xf069); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0210); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x83a0); + rtl8168_mdio_write(tp, 0x14, 0xaf83); + rtl8168_mdio_write(tp, 0x14, 0xacaf); + rtl8168_mdio_write(tp, 0x14, 0x83b8); + rtl8168_mdio_write(tp, 0x14, 0xaf83); + rtl8168_mdio_write(tp, 0x14, 0xcdaf); + rtl8168_mdio_write(tp, 0x14, 0x83d3); + rtl8168_mdio_write(tp, 0x14, 0x0204); + rtl8168_mdio_write(tp, 0x14, 0x9a02); + rtl8168_mdio_write(tp, 0x14, 0x09a9); + rtl8168_mdio_write(tp, 0x14, 0x0284); + rtl8168_mdio_write(tp, 0x14, 0x61af); + rtl8168_mdio_write(tp, 0x14, 0x02fc); + rtl8168_mdio_write(tp, 0x14, 0xad20); + rtl8168_mdio_write(tp, 0x14, 0x0302); + rtl8168_mdio_write(tp, 0x14, 0x867c); + rtl8168_mdio_write(tp, 0x14, 0xad21); + rtl8168_mdio_write(tp, 0x14, 0x0302); + rtl8168_mdio_write(tp, 0x14, 0x85c9); + rtl8168_mdio_write(tp, 0x14, 0xad22); + rtl8168_mdio_write(tp, 0x14, 0x0302); + rtl8168_mdio_write(tp, 0x14, 0x1bc0); + rtl8168_mdio_write(tp, 0x14, 0xaf17); + rtl8168_mdio_write(tp, 0x14, 0xe302); + rtl8168_mdio_write(tp, 0x14, 0x8703); + rtl8168_mdio_write(tp, 0x14, 0xaf18); + rtl8168_mdio_write(tp, 0x14, 0x6201); + rtl8168_mdio_write(tp, 0x14, 0x06e0); + rtl8168_mdio_write(tp, 0x14, 0x8148); + rtl8168_mdio_write(tp, 0x14, 0xaf3c); + rtl8168_mdio_write(tp, 0x14, 0x69f8); + rtl8168_mdio_write(tp, 0x14, 0xf9fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0x10f7); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0x131f); + rtl8168_mdio_write(tp, 0x14, 0xd104); + rtl8168_mdio_write(tp, 0x14, 0xbf87); + rtl8168_mdio_write(tp, 0x14, 0xf302); + rtl8168_mdio_write(tp, 0x14, 0x4259); + rtl8168_mdio_write(tp, 0x14, 0x0287); + rtl8168_mdio_write(tp, 0x14, 0x88bf); + rtl8168_mdio_write(tp, 0x14, 0x87cf); + rtl8168_mdio_write(tp, 0x14, 0xd7b8); + rtl8168_mdio_write(tp, 0x14, 0x22d0); + rtl8168_mdio_write(tp, 0x14, 0x0c02); + rtl8168_mdio_write(tp, 0x14, 0x4252); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xcda0); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xce8b); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xd1f5); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xd2a9); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xd30a); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xf010); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xf38f); + rtl8168_mdio_write(tp, 0x14, 0xee81); + rtl8168_mdio_write(tp, 0x14, 0x011e); + rtl8168_mdio_write(tp, 0x14, 0xee81); + rtl8168_mdio_write(tp, 0x14, 0x0b4a); + rtl8168_mdio_write(tp, 0x14, 0xee81); + rtl8168_mdio_write(tp, 0x14, 0x0c7c); + rtl8168_mdio_write(tp, 0x14, 0xee81); + rtl8168_mdio_write(tp, 0x14, 0x127f); + rtl8168_mdio_write(tp, 0x14, 0xd100); + rtl8168_mdio_write(tp, 0x14, 0x0210); + rtl8168_mdio_write(tp, 0x14, 0xb5ee); + rtl8168_mdio_write(tp, 0x14, 0x8088); + rtl8168_mdio_write(tp, 0x14, 0xa4ee); + rtl8168_mdio_write(tp, 0x14, 0x8089); + rtl8168_mdio_write(tp, 0x14, 0x44ee); + rtl8168_mdio_write(tp, 0x14, 0x809a); + rtl8168_mdio_write(tp, 0x14, 0xa4ee); + rtl8168_mdio_write(tp, 0x14, 0x809b); + rtl8168_mdio_write(tp, 0x14, 0x44ee); + rtl8168_mdio_write(tp, 0x14, 0x809c); + rtl8168_mdio_write(tp, 0x14, 0xa7ee); + rtl8168_mdio_write(tp, 0x14, 0x80a5); + rtl8168_mdio_write(tp, 0x14, 0xa7d2); + rtl8168_mdio_write(tp, 0x14, 0x0002); + rtl8168_mdio_write(tp, 0x14, 0x0e66); + rtl8168_mdio_write(tp, 0x14, 0x0285); + rtl8168_mdio_write(tp, 0x14, 0xc0ee); + rtl8168_mdio_write(tp, 0x14, 0x87fc); + rtl8168_mdio_write(tp, 0x14, 0x00e0); + rtl8168_mdio_write(tp, 0x14, 0x8245); + rtl8168_mdio_write(tp, 0x14, 0xf622); + rtl8168_mdio_write(tp, 0x14, 0xe482); + rtl8168_mdio_write(tp, 0x14, 0x45ef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfdfc); + rtl8168_mdio_write(tp, 0x14, 0x0402); + rtl8168_mdio_write(tp, 0x14, 0x847a); + rtl8168_mdio_write(tp, 0x14, 0x0284); + rtl8168_mdio_write(tp, 0x14, 0xb302); + rtl8168_mdio_write(tp, 0x14, 0x0cab); + rtl8168_mdio_write(tp, 0x14, 0x020c); + rtl8168_mdio_write(tp, 0x14, 0xc402); + rtl8168_mdio_write(tp, 0x14, 0x0cef); + rtl8168_mdio_write(tp, 0x14, 0x020d); + rtl8168_mdio_write(tp, 0x14, 0x0802); + rtl8168_mdio_write(tp, 0x14, 0x0d33); + rtl8168_mdio_write(tp, 0x14, 0x020c); + rtl8168_mdio_write(tp, 0x14, 0x3d04); + rtl8168_mdio_write(tp, 0x14, 0xf8fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xe182); + rtl8168_mdio_write(tp, 0x14, 0x2fac); + rtl8168_mdio_write(tp, 0x14, 0x291a); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x24ac); + rtl8168_mdio_write(tp, 0x14, 0x2102); + rtl8168_mdio_write(tp, 0x14, 0xae22); + rtl8168_mdio_write(tp, 0x14, 0x0210); + rtl8168_mdio_write(tp, 0x14, 0x57f6); + rtl8168_mdio_write(tp, 0x14, 0x21e4); + rtl8168_mdio_write(tp, 0x14, 0x8224); + rtl8168_mdio_write(tp, 0x14, 0xd101); + rtl8168_mdio_write(tp, 0x14, 0xbf44); + rtl8168_mdio_write(tp, 0x14, 0xd202); + rtl8168_mdio_write(tp, 0x14, 0x4259); + rtl8168_mdio_write(tp, 0x14, 0xae10); + rtl8168_mdio_write(tp, 0x14, 0x0212); + rtl8168_mdio_write(tp, 0x14, 0x4cf6); + rtl8168_mdio_write(tp, 0x14, 0x29e5); + rtl8168_mdio_write(tp, 0x14, 0x822f); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x24f6); + rtl8168_mdio_write(tp, 0x14, 0x21e4); + rtl8168_mdio_write(tp, 0x14, 0x8224); + rtl8168_mdio_write(tp, 0x14, 0xef96); + rtl8168_mdio_write(tp, 0x14, 0xfefc); + rtl8168_mdio_write(tp, 0x14, 0x04f8); + rtl8168_mdio_write(tp, 0x14, 0xe182); + rtl8168_mdio_write(tp, 0x14, 0x2fac); + rtl8168_mdio_write(tp, 0x14, 0x2a18); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x24ac); + rtl8168_mdio_write(tp, 0x14, 0x2202); + rtl8168_mdio_write(tp, 0x14, 0xae26); + rtl8168_mdio_write(tp, 0x14, 0x0284); + rtl8168_mdio_write(tp, 0x14, 0xf802); + rtl8168_mdio_write(tp, 0x14, 0x8565); + rtl8168_mdio_write(tp, 0x14, 0xd101); + rtl8168_mdio_write(tp, 0x14, 0xbf44); + rtl8168_mdio_write(tp, 0x14, 0xd502); + rtl8168_mdio_write(tp, 0x14, 0x4259); + rtl8168_mdio_write(tp, 0x14, 0xae0e); + rtl8168_mdio_write(tp, 0x14, 0x0284); + rtl8168_mdio_write(tp, 0x14, 0xea02); + rtl8168_mdio_write(tp, 0x14, 0x85a9); + rtl8168_mdio_write(tp, 0x14, 0xe182); + rtl8168_mdio_write(tp, 0x14, 0x2ff6); + rtl8168_mdio_write(tp, 0x14, 0x2ae5); + rtl8168_mdio_write(tp, 0x14, 0x822f); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x24f6); + rtl8168_mdio_write(tp, 0x14, 0x22e4); + rtl8168_mdio_write(tp, 0x14, 0x8224); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xf9e2); + rtl8168_mdio_write(tp, 0x14, 0x8011); + rtl8168_mdio_write(tp, 0x14, 0xad31); + rtl8168_mdio_write(tp, 0x14, 0x05d2); + rtl8168_mdio_write(tp, 0x14, 0x0002); + rtl8168_mdio_write(tp, 0x14, 0x0e66); + rtl8168_mdio_write(tp, 0x14, 0xfd04); + rtl8168_mdio_write(tp, 0x14, 0xf8f9); + rtl8168_mdio_write(tp, 0x14, 0xfaef); + rtl8168_mdio_write(tp, 0x14, 0x69e0); + rtl8168_mdio_write(tp, 0x14, 0x8011); + rtl8168_mdio_write(tp, 0x14, 0xad21); + rtl8168_mdio_write(tp, 0x14, 0x5cbf); + rtl8168_mdio_write(tp, 0x14, 0x43be); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97ac); + rtl8168_mdio_write(tp, 0x14, 0x281b); + rtl8168_mdio_write(tp, 0x14, 0xbf43); + rtl8168_mdio_write(tp, 0x14, 0xc102); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0xac28); + rtl8168_mdio_write(tp, 0x14, 0x12bf); + rtl8168_mdio_write(tp, 0x14, 0x43c7); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97ac); + rtl8168_mdio_write(tp, 0x14, 0x2804); + rtl8168_mdio_write(tp, 0x14, 0xd300); + rtl8168_mdio_write(tp, 0x14, 0xae07); + rtl8168_mdio_write(tp, 0x14, 0xd306); + rtl8168_mdio_write(tp, 0x14, 0xaf85); + rtl8168_mdio_write(tp, 0x14, 0x56d3); + rtl8168_mdio_write(tp, 0x14, 0x03e0); + rtl8168_mdio_write(tp, 0x14, 0x8011); + rtl8168_mdio_write(tp, 0x14, 0xad26); + rtl8168_mdio_write(tp, 0x14, 0x25bf); + rtl8168_mdio_write(tp, 0x14, 0x4559); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97e2); + rtl8168_mdio_write(tp, 0x14, 0x8073); + rtl8168_mdio_write(tp, 0x14, 0x0d21); + rtl8168_mdio_write(tp, 0x14, 0xf637); + rtl8168_mdio_write(tp, 0x14, 0x0d11); + rtl8168_mdio_write(tp, 0x14, 0xf62f); + rtl8168_mdio_write(tp, 0x14, 0x1b21); + rtl8168_mdio_write(tp, 0x14, 0xaa02); + rtl8168_mdio_write(tp, 0x14, 0xae10); + rtl8168_mdio_write(tp, 0x14, 0xe280); + rtl8168_mdio_write(tp, 0x14, 0x740d); + rtl8168_mdio_write(tp, 0x14, 0x21f6); + rtl8168_mdio_write(tp, 0x14, 0x371b); + rtl8168_mdio_write(tp, 0x14, 0x21aa); + rtl8168_mdio_write(tp, 0x14, 0x0313); + rtl8168_mdio_write(tp, 0x14, 0xae02); + rtl8168_mdio_write(tp, 0x14, 0x2b02); + rtl8168_mdio_write(tp, 0x14, 0x020e); + rtl8168_mdio_write(tp, 0x14, 0x5102); + rtl8168_mdio_write(tp, 0x14, 0x0e66); + rtl8168_mdio_write(tp, 0x14, 0x020f); + rtl8168_mdio_write(tp, 0x14, 0xa3ef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfdfc); + rtl8168_mdio_write(tp, 0x14, 0x04f8); + rtl8168_mdio_write(tp, 0x14, 0xf9fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xe080); + rtl8168_mdio_write(tp, 0x14, 0x12ad); + rtl8168_mdio_write(tp, 0x14, 0x2733); + rtl8168_mdio_write(tp, 0x14, 0xbf43); + rtl8168_mdio_write(tp, 0x14, 0xbe02); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0xac28); + rtl8168_mdio_write(tp, 0x14, 0x09bf); + rtl8168_mdio_write(tp, 0x14, 0x43c1); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97ad); + rtl8168_mdio_write(tp, 0x14, 0x2821); + rtl8168_mdio_write(tp, 0x14, 0xbf45); + rtl8168_mdio_write(tp, 0x14, 0x5902); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0xe387); + rtl8168_mdio_write(tp, 0x14, 0xffd2); + rtl8168_mdio_write(tp, 0x14, 0x001b); + rtl8168_mdio_write(tp, 0x14, 0x45ac); + rtl8168_mdio_write(tp, 0x14, 0x2711); + rtl8168_mdio_write(tp, 0x14, 0xe187); + rtl8168_mdio_write(tp, 0x14, 0xfebf); + rtl8168_mdio_write(tp, 0x14, 0x87e4); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x590d); + rtl8168_mdio_write(tp, 0x14, 0x11bf); + rtl8168_mdio_write(tp, 0x14, 0x87e7); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59ef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfdfc); + rtl8168_mdio_write(tp, 0x14, 0x04f8); + rtl8168_mdio_write(tp, 0x14, 0xfaef); + rtl8168_mdio_write(tp, 0x14, 0x69d1); + rtl8168_mdio_write(tp, 0x14, 0x00bf); + rtl8168_mdio_write(tp, 0x14, 0x87e4); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59bf); + rtl8168_mdio_write(tp, 0x14, 0x87e7); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59ef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xee87); + rtl8168_mdio_write(tp, 0x14, 0xff46); + rtl8168_mdio_write(tp, 0x14, 0xee87); + rtl8168_mdio_write(tp, 0x14, 0xfe01); + rtl8168_mdio_write(tp, 0x14, 0x04f8); + rtl8168_mdio_write(tp, 0x14, 0xfaef); + rtl8168_mdio_write(tp, 0x14, 0x69e0); + rtl8168_mdio_write(tp, 0x14, 0x8241); + rtl8168_mdio_write(tp, 0x14, 0xa000); + rtl8168_mdio_write(tp, 0x14, 0x0502); + rtl8168_mdio_write(tp, 0x14, 0x85eb); + rtl8168_mdio_write(tp, 0x14, 0xae0e); + rtl8168_mdio_write(tp, 0x14, 0xa001); + rtl8168_mdio_write(tp, 0x14, 0x0502); + rtl8168_mdio_write(tp, 0x14, 0x1a5a); + rtl8168_mdio_write(tp, 0x14, 0xae06); + rtl8168_mdio_write(tp, 0x14, 0xa002); + rtl8168_mdio_write(tp, 0x14, 0x0302); + rtl8168_mdio_write(tp, 0x14, 0x1ae6); + rtl8168_mdio_write(tp, 0x14, 0xef96); + rtl8168_mdio_write(tp, 0x14, 0xfefc); + rtl8168_mdio_write(tp, 0x14, 0x04f8); + rtl8168_mdio_write(tp, 0x14, 0xf9fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x29f6); + rtl8168_mdio_write(tp, 0x14, 0x21e4); + rtl8168_mdio_write(tp, 0x14, 0x8229); + rtl8168_mdio_write(tp, 0x14, 0xe080); + rtl8168_mdio_write(tp, 0x14, 0x10ac); + rtl8168_mdio_write(tp, 0x14, 0x2202); + rtl8168_mdio_write(tp, 0x14, 0xae76); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x27f7); + rtl8168_mdio_write(tp, 0x14, 0x21e4); + rtl8168_mdio_write(tp, 0x14, 0x8227); + rtl8168_mdio_write(tp, 0x14, 0xbf43); + rtl8168_mdio_write(tp, 0x14, 0x1302); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0xef21); + rtl8168_mdio_write(tp, 0x14, 0xbf43); + rtl8168_mdio_write(tp, 0x14, 0x1602); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0x0c11); + rtl8168_mdio_write(tp, 0x14, 0x1e21); + rtl8168_mdio_write(tp, 0x14, 0xbf43); + rtl8168_mdio_write(tp, 0x14, 0x1902); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0x0c12); + rtl8168_mdio_write(tp, 0x14, 0x1e21); + rtl8168_mdio_write(tp, 0x14, 0xe682); + rtl8168_mdio_write(tp, 0x14, 0x43a2); + rtl8168_mdio_write(tp, 0x14, 0x000a); + rtl8168_mdio_write(tp, 0x14, 0xe182); + rtl8168_mdio_write(tp, 0x14, 0x27f6); + rtl8168_mdio_write(tp, 0x14, 0x29e5); + rtl8168_mdio_write(tp, 0x14, 0x8227); + rtl8168_mdio_write(tp, 0x14, 0xae42); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x44f7); + rtl8168_mdio_write(tp, 0x14, 0x21e4); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x0246); + rtl8168_mdio_write(tp, 0x14, 0xaebf); + rtl8168_mdio_write(tp, 0x14, 0x4325); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97ef); + rtl8168_mdio_write(tp, 0x14, 0x21bf); + rtl8168_mdio_write(tp, 0x14, 0x431c); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x970c); + rtl8168_mdio_write(tp, 0x14, 0x121e); + rtl8168_mdio_write(tp, 0x14, 0x21bf); + rtl8168_mdio_write(tp, 0x14, 0x431f); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x970c); + rtl8168_mdio_write(tp, 0x14, 0x131e); + rtl8168_mdio_write(tp, 0x14, 0x21bf); + rtl8168_mdio_write(tp, 0x14, 0x4328); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x970c); + rtl8168_mdio_write(tp, 0x14, 0x141e); + rtl8168_mdio_write(tp, 0x14, 0x21bf); + rtl8168_mdio_write(tp, 0x14, 0x44b1); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x970c); + rtl8168_mdio_write(tp, 0x14, 0x161e); + rtl8168_mdio_write(tp, 0x14, 0x21e6); + rtl8168_mdio_write(tp, 0x14, 0x8242); + rtl8168_mdio_write(tp, 0x14, 0xee82); + rtl8168_mdio_write(tp, 0x14, 0x4101); + rtl8168_mdio_write(tp, 0x14, 0xef96); + rtl8168_mdio_write(tp, 0x14, 0xfefd); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xf8fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x46a0); + rtl8168_mdio_write(tp, 0x14, 0x0005); + rtl8168_mdio_write(tp, 0x14, 0x0286); + rtl8168_mdio_write(tp, 0x14, 0x96ae); + rtl8168_mdio_write(tp, 0x14, 0x06a0); + rtl8168_mdio_write(tp, 0x14, 0x0103); + rtl8168_mdio_write(tp, 0x14, 0x0219); + rtl8168_mdio_write(tp, 0x14, 0x19ef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xf8fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x29f6); + rtl8168_mdio_write(tp, 0x14, 0x20e4); + rtl8168_mdio_write(tp, 0x14, 0x8229); + rtl8168_mdio_write(tp, 0x14, 0xe080); + rtl8168_mdio_write(tp, 0x14, 0x10ac); + rtl8168_mdio_write(tp, 0x14, 0x2102); + rtl8168_mdio_write(tp, 0x14, 0xae54); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x27f7); + rtl8168_mdio_write(tp, 0x14, 0x20e4); + rtl8168_mdio_write(tp, 0x14, 0x8227); + rtl8168_mdio_write(tp, 0x14, 0xbf42); + rtl8168_mdio_write(tp, 0x14, 0xe602); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0xac28); + rtl8168_mdio_write(tp, 0x14, 0x22bf); + rtl8168_mdio_write(tp, 0x14, 0x430d); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97e5); + rtl8168_mdio_write(tp, 0x14, 0x8247); + rtl8168_mdio_write(tp, 0x14, 0xac28); + rtl8168_mdio_write(tp, 0x14, 0x20d1); + rtl8168_mdio_write(tp, 0x14, 0x03bf); + rtl8168_mdio_write(tp, 0x14, 0x4307); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59ee); + rtl8168_mdio_write(tp, 0x14, 0x8246); + rtl8168_mdio_write(tp, 0x14, 0x00e1); + rtl8168_mdio_write(tp, 0x14, 0x8227); + rtl8168_mdio_write(tp, 0x14, 0xf628); + rtl8168_mdio_write(tp, 0x14, 0xe582); + rtl8168_mdio_write(tp, 0x14, 0x27ae); + rtl8168_mdio_write(tp, 0x14, 0x21d1); + rtl8168_mdio_write(tp, 0x14, 0x04bf); + rtl8168_mdio_write(tp, 0x14, 0x4307); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59ae); + rtl8168_mdio_write(tp, 0x14, 0x08d1); + rtl8168_mdio_write(tp, 0x14, 0x05bf); + rtl8168_mdio_write(tp, 0x14, 0x4307); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59e0); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0xf720); + rtl8168_mdio_write(tp, 0x14, 0xe482); + rtl8168_mdio_write(tp, 0x14, 0x4402); + rtl8168_mdio_write(tp, 0x14, 0x46ae); + rtl8168_mdio_write(tp, 0x14, 0xee82); + rtl8168_mdio_write(tp, 0x14, 0x4601); + rtl8168_mdio_write(tp, 0x14, 0xef96); + rtl8168_mdio_write(tp, 0x14, 0xfefc); + rtl8168_mdio_write(tp, 0x14, 0x04f8); + rtl8168_mdio_write(tp, 0x14, 0xfaef); + rtl8168_mdio_write(tp, 0x14, 0x69e0); + rtl8168_mdio_write(tp, 0x14, 0x8013); + rtl8168_mdio_write(tp, 0x14, 0xad24); + rtl8168_mdio_write(tp, 0x14, 0x1cbf); + rtl8168_mdio_write(tp, 0x14, 0x87f0); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97ad); + rtl8168_mdio_write(tp, 0x14, 0x2813); + rtl8168_mdio_write(tp, 0x14, 0xe087); + rtl8168_mdio_write(tp, 0x14, 0xfca0); + rtl8168_mdio_write(tp, 0x14, 0x0005); + rtl8168_mdio_write(tp, 0x14, 0x0287); + rtl8168_mdio_write(tp, 0x14, 0x36ae); + rtl8168_mdio_write(tp, 0x14, 0x10a0); + rtl8168_mdio_write(tp, 0x14, 0x0105); + rtl8168_mdio_write(tp, 0x14, 0x0287); + rtl8168_mdio_write(tp, 0x14, 0x48ae); + rtl8168_mdio_write(tp, 0x14, 0x08e0); + rtl8168_mdio_write(tp, 0x14, 0x8230); + rtl8168_mdio_write(tp, 0x14, 0xf626); + rtl8168_mdio_write(tp, 0x14, 0xe482); + rtl8168_mdio_write(tp, 0x14, 0x30ef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xf8e0); + rtl8168_mdio_write(tp, 0x14, 0x8245); + rtl8168_mdio_write(tp, 0x14, 0xf722); + rtl8168_mdio_write(tp, 0x14, 0xe482); + rtl8168_mdio_write(tp, 0x14, 0x4502); + rtl8168_mdio_write(tp, 0x14, 0x46ae); + rtl8168_mdio_write(tp, 0x14, 0xee87); + rtl8168_mdio_write(tp, 0x14, 0xfc01); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xf8fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xfb02); + rtl8168_mdio_write(tp, 0x14, 0x46d3); + rtl8168_mdio_write(tp, 0x14, 0xad50); + rtl8168_mdio_write(tp, 0x14, 0x2fbf); + rtl8168_mdio_write(tp, 0x14, 0x87ed); + rtl8168_mdio_write(tp, 0x14, 0xd101); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59bf); + rtl8168_mdio_write(tp, 0x14, 0x87ed); + rtl8168_mdio_write(tp, 0x14, 0xd100); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59e0); + rtl8168_mdio_write(tp, 0x14, 0x8245); + rtl8168_mdio_write(tp, 0x14, 0xf622); + rtl8168_mdio_write(tp, 0x14, 0xe482); + rtl8168_mdio_write(tp, 0x14, 0x4502); + rtl8168_mdio_write(tp, 0x14, 0x46ae); + rtl8168_mdio_write(tp, 0x14, 0xd100); + rtl8168_mdio_write(tp, 0x14, 0xbf87); + rtl8168_mdio_write(tp, 0x14, 0xf002); + rtl8168_mdio_write(tp, 0x14, 0x4259); + rtl8168_mdio_write(tp, 0x14, 0xee87); + rtl8168_mdio_write(tp, 0x14, 0xfc00); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x30f6); + rtl8168_mdio_write(tp, 0x14, 0x26e4); + rtl8168_mdio_write(tp, 0x14, 0x8230); + rtl8168_mdio_write(tp, 0x14, 0xffef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xf8f9); + rtl8168_mdio_write(tp, 0x14, 0xface); + rtl8168_mdio_write(tp, 0x14, 0xfaef); + rtl8168_mdio_write(tp, 0x14, 0x69fb); + rtl8168_mdio_write(tp, 0x14, 0xbf87); + rtl8168_mdio_write(tp, 0x14, 0xb3d7); + rtl8168_mdio_write(tp, 0x14, 0x001c); + rtl8168_mdio_write(tp, 0x14, 0xd819); + rtl8168_mdio_write(tp, 0x14, 0xd919); + rtl8168_mdio_write(tp, 0x14, 0xda19); + rtl8168_mdio_write(tp, 0x14, 0xdb19); + rtl8168_mdio_write(tp, 0x14, 0x07ef); + rtl8168_mdio_write(tp, 0x14, 0x9502); + rtl8168_mdio_write(tp, 0x14, 0x4259); + rtl8168_mdio_write(tp, 0x14, 0x073f); + rtl8168_mdio_write(tp, 0x14, 0x0004); + rtl8168_mdio_write(tp, 0x14, 0x9fec); + rtl8168_mdio_write(tp, 0x14, 0xffef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xc6fe); + rtl8168_mdio_write(tp, 0x14, 0xfdfc); + rtl8168_mdio_write(tp, 0x14, 0x0400); + rtl8168_mdio_write(tp, 0x14, 0x0145); + rtl8168_mdio_write(tp, 0x14, 0x7d00); + rtl8168_mdio_write(tp, 0x14, 0x0345); + rtl8168_mdio_write(tp, 0x14, 0x5c00); + rtl8168_mdio_write(tp, 0x14, 0x0143); + rtl8168_mdio_write(tp, 0x14, 0x4f00); + rtl8168_mdio_write(tp, 0x14, 0x0387); + rtl8168_mdio_write(tp, 0x14, 0xdb00); + rtl8168_mdio_write(tp, 0x14, 0x0987); + rtl8168_mdio_write(tp, 0x14, 0xde00); + rtl8168_mdio_write(tp, 0x14, 0x0987); + rtl8168_mdio_write(tp, 0x14, 0xe100); + rtl8168_mdio_write(tp, 0x14, 0x0087); + rtl8168_mdio_write(tp, 0x14, 0xeaa4); + rtl8168_mdio_write(tp, 0x14, 0x00b8); + rtl8168_mdio_write(tp, 0x14, 0x20c4); + rtl8168_mdio_write(tp, 0x14, 0x1600); + rtl8168_mdio_write(tp, 0x14, 0x000f); + rtl8168_mdio_write(tp, 0x14, 0xf800); + rtl8168_mdio_write(tp, 0x14, 0x7098); + rtl8168_mdio_write(tp, 0x14, 0xa58a); + rtl8168_mdio_write(tp, 0x14, 0xb6a8); + rtl8168_mdio_write(tp, 0x14, 0x3e50); + rtl8168_mdio_write(tp, 0x14, 0xa83e); + rtl8168_mdio_write(tp, 0x14, 0x33bc); + rtl8168_mdio_write(tp, 0x14, 0xc622); + rtl8168_mdio_write(tp, 0x14, 0xbcc6); + rtl8168_mdio_write(tp, 0x14, 0xaaa4); + rtl8168_mdio_write(tp, 0x14, 0x42ff); + rtl8168_mdio_write(tp, 0x14, 0xc408); + rtl8168_mdio_write(tp, 0x14, 0x00c4); + rtl8168_mdio_write(tp, 0x14, 0x16a8); + rtl8168_mdio_write(tp, 0x14, 0xbcc0); + rtl8168_mdio_write(tp, 0x13, 0xb818); + rtl8168_mdio_write(tp, 0x14, 0x02f3); + rtl8168_mdio_write(tp, 0x13, 0xb81a); + rtl8168_mdio_write(tp, 0x14, 0x17d1); + rtl8168_mdio_write(tp, 0x13, 0xb81c); + rtl8168_mdio_write(tp, 0x14, 0x185a); + rtl8168_mdio_write(tp, 0x13, 0xb81e); + rtl8168_mdio_write(tp, 0x14, 0x3c66); + rtl8168_mdio_write(tp, 0x13, 0xb820); + rtl8168_mdio_write(tp, 0x14, 0x021f); + rtl8168_mdio_write(tp, 0x13, 0xc416); + rtl8168_mdio_write(tp, 0x14, 0x0500); + rtl8168_mdio_write(tp, 0x13, 0xb82e); + rtl8168_mdio_write(tp, 0x14, 0xfffc); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + gphy_val = rtl8168_mdio_read(tp, 0x10); + gphy_val &= ~(BIT_9); + rtl8168_mdio_write(tp, 0x10, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8146); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_clear_phy_mcu_patch_request(tp); +} + +static void +rtl8168_set_phy_mcu_8168gu_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val; + + rtl8168_set_phy_mcu_patch_request(tp); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8146); + rtl8168_mdio_write(tp, 0x14, 0x0300); + rtl8168_mdio_write(tp, 0x13, 0xB82E); + rtl8168_mdio_write(tp, 0x14, 0x0001); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0xb820); + rtl8168_mdio_write(tp, 0x14, 0x0290); + rtl8168_mdio_write(tp, 0x13, 0xa012); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xa014); + rtl8168_mdio_write(tp, 0x14, 0x2c04); + rtl8168_mdio_write(tp, 0x14, 0x2c07); + rtl8168_mdio_write(tp, 0x14, 0x2c07); + rtl8168_mdio_write(tp, 0x14, 0x2c07); + rtl8168_mdio_write(tp, 0x14, 0xa304); + rtl8168_mdio_write(tp, 0x14, 0xa301); + rtl8168_mdio_write(tp, 0x14, 0x207e); + rtl8168_mdio_write(tp, 0x13, 0xa01a); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xa006); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xa004); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xa002); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xa000); + rtl8168_mdio_write(tp, 0x14, 0x107c); + rtl8168_mdio_write(tp, 0x13, 0xb820); + rtl8168_mdio_write(tp, 0x14, 0x0210); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8146); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_clear_phy_mcu_patch_request(tp); +} + +static void +rtl8168_set_phy_mcu_8411b_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val; + + rtl8168_set_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp,0x1f, 0x0A43); + rtl8168_mdio_write(tp,0x13, 0x8146); + rtl8168_mdio_write(tp,0x14, 0x0100); + rtl8168_mdio_write(tp,0x13, 0xB82E); + rtl8168_mdio_write(tp,0x14, 0x0001); + + + rtl8168_mdio_write(tp,0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0xb820); + rtl8168_mdio_write(tp, 0x14, 0x0290); + rtl8168_mdio_write(tp, 0x13, 0xa012); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xa014); + rtl8168_mdio_write(tp, 0x14, 0x2c04); + rtl8168_mdio_write(tp, 0x14, 0x2c07); + rtl8168_mdio_write(tp, 0x14, 0x2c07); + rtl8168_mdio_write(tp, 0x14, 0x2c07); + rtl8168_mdio_write(tp, 0x14, 0xa304); + rtl8168_mdio_write(tp, 0x14, 0xa301); + rtl8168_mdio_write(tp, 0x14, 0x207e); + rtl8168_mdio_write(tp, 0x13, 0xa01a); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xa006); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xa004); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xa002); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xa000); + rtl8168_mdio_write(tp, 0x14, 0x107c); + rtl8168_mdio_write(tp, 0x13, 0xb820); + rtl8168_mdio_write(tp, 0x14, 0x0210); + + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8146); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_clear_phy_mcu_patch_request(tp); +} + +static void +rtl8168_set_phy_mcu_8168ep_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val; + + rtl8168_set_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp,0x1f, 0x0A43); + rtl8168_mdio_write(tp,0x13, 0x8146); + rtl8168_mdio_write(tp,0x14, 0x8700); + rtl8168_mdio_write(tp,0x13, 0xB82E); + rtl8168_mdio_write(tp,0x14, 0x0001); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + + rtl8168_mdio_write(tp, 0x13, 0x83DD); + rtl8168_mdio_write(tp, 0x14, 0xAF83); + rtl8168_mdio_write(tp, 0x14, 0xE9AF); + rtl8168_mdio_write(tp, 0x14, 0x83EE); + rtl8168_mdio_write(tp, 0x14, 0xAF83); + rtl8168_mdio_write(tp, 0x14, 0xF1A1); + rtl8168_mdio_write(tp, 0x14, 0x83F4); + rtl8168_mdio_write(tp, 0x14, 0xD149); + rtl8168_mdio_write(tp, 0x14, 0xAF06); + rtl8168_mdio_write(tp, 0x14, 0x47AF); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0xAF00); + rtl8168_mdio_write(tp, 0x14, 0x00AF); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_mdio_write(tp, 0x13, 0xB818); + rtl8168_mdio_write(tp, 0x14, 0x0645); + + rtl8168_mdio_write(tp, 0x13, 0xB81A); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_mdio_write(tp, 0x13, 0xB81C); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_mdio_write(tp, 0x13, 0xB81E); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_mdio_write(tp, 0x13, 0xB832); + rtl8168_mdio_write(tp, 0x14, 0x0001); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8146); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_clear_phy_mcu_patch_request(tp); +} + +static void +rtl8168_set_phy_mcu_8168h_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val; + + rtl8168_set_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8028); + rtl8168_mdio_write(tp, 0x14, 0x6200); + rtl8168_mdio_write(tp, 0x13, 0xB82E); + rtl8168_mdio_write(tp, 0x14, 0x0001); + + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0290); + rtl8168_mdio_write(tp, 0x13, 0xA012); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xA014); + rtl8168_mdio_write(tp, 0x14, 0x2c04); + rtl8168_mdio_write(tp, 0x14, 0x2c10); + rtl8168_mdio_write(tp, 0x14, 0x2c10); + rtl8168_mdio_write(tp, 0x14, 0x2c10); + rtl8168_mdio_write(tp, 0x14, 0xa210); + rtl8168_mdio_write(tp, 0x14, 0xa101); + rtl8168_mdio_write(tp, 0x14, 0xce10); + rtl8168_mdio_write(tp, 0x14, 0xe070); + rtl8168_mdio_write(tp, 0x14, 0x0f40); + rtl8168_mdio_write(tp, 0x14, 0xaf01); + rtl8168_mdio_write(tp, 0x14, 0x8f01); + rtl8168_mdio_write(tp, 0x14, 0x183e); + rtl8168_mdio_write(tp, 0x14, 0x8e10); + rtl8168_mdio_write(tp, 0x14, 0x8101); + rtl8168_mdio_write(tp, 0x14, 0x8210); + rtl8168_mdio_write(tp, 0x14, 0x28da); + rtl8168_mdio_write(tp, 0x13, 0xA01A); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xA006); + rtl8168_mdio_write(tp, 0x14, 0x0017); + rtl8168_mdio_write(tp, 0x13, 0xA004); + rtl8168_mdio_write(tp, 0x14, 0x0015); + rtl8168_mdio_write(tp, 0x13, 0xA002); + rtl8168_mdio_write(tp, 0x14, 0x0013); + rtl8168_mdio_write(tp, 0x13, 0xA000); + rtl8168_mdio_write(tp, 0x14, 0x18d1); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0210); + + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8028); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_clear_phy_mcu_patch_request(tp); +} + +static void +rtl8168_set_phy_mcu_8168h_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val; + + rtl8168_set_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8028); + rtl8168_mdio_write(tp, 0x14, 0x6201); + rtl8168_mdio_write(tp, 0x13, 0xB82E); + rtl8168_mdio_write(tp, 0x14, 0x0001); + + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0290); + rtl8168_mdio_write(tp, 0x13, 0xA012); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xA014); + rtl8168_mdio_write(tp, 0x14, 0x2c04); + rtl8168_mdio_write(tp, 0x14, 0x2c09); + rtl8168_mdio_write(tp, 0x14, 0x2c09); + rtl8168_mdio_write(tp, 0x14, 0x2c09); + rtl8168_mdio_write(tp, 0x14, 0xad01); + rtl8168_mdio_write(tp, 0x14, 0xad01); + rtl8168_mdio_write(tp, 0x14, 0xad01); + rtl8168_mdio_write(tp, 0x14, 0xad01); + rtl8168_mdio_write(tp, 0x14, 0x236c); + rtl8168_mdio_write(tp, 0x13, 0xA01A); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xA006); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xA004); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xA002); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xA000); + rtl8168_mdio_write(tp, 0x14, 0x136b); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0210); + + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8323); + rtl8168_mdio_write(tp, 0x14, 0xaf83); + rtl8168_mdio_write(tp, 0x14, 0x2faf); + rtl8168_mdio_write(tp, 0x14, 0x853d); + rtl8168_mdio_write(tp, 0x14, 0xaf85); + rtl8168_mdio_write(tp, 0x14, 0x3daf); + rtl8168_mdio_write(tp, 0x14, 0x853d); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x45ad); + rtl8168_mdio_write(tp, 0x14, 0x2052); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7ae3); + rtl8168_mdio_write(tp, 0x14, 0x85fe); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f6); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7a1b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fa); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7be3); + rtl8168_mdio_write(tp, 0x14, 0x85fe); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f7); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7b1b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fb); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7ce3); + rtl8168_mdio_write(tp, 0x14, 0x85fe); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f8); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7c1b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fc); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7de3); + rtl8168_mdio_write(tp, 0x14, 0x85fe); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f9); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7d1b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fd); + rtl8168_mdio_write(tp, 0x14, 0xae50); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7ee3); + rtl8168_mdio_write(tp, 0x14, 0x85ff); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f6); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7e1b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fa); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7fe3); + rtl8168_mdio_write(tp, 0x14, 0x85ff); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f7); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7f1b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fb); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x80e3); + rtl8168_mdio_write(tp, 0x14, 0x85ff); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f8); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x801b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fc); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x81e3); + rtl8168_mdio_write(tp, 0x14, 0x85ff); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f9); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x811b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fd); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf6ad); + rtl8168_mdio_write(tp, 0x14, 0x2404); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xf610); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf7ad); + rtl8168_mdio_write(tp, 0x14, 0x2404); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xf710); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf8ad); + rtl8168_mdio_write(tp, 0x14, 0x2404); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xf810); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf9ad); + rtl8168_mdio_write(tp, 0x14, 0x2404); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xf910); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfaad); + rtl8168_mdio_write(tp, 0x14, 0x2704); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xfa00); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfbad); + rtl8168_mdio_write(tp, 0x14, 0x2704); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xfb00); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfcad); + rtl8168_mdio_write(tp, 0x14, 0x2704); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xfc00); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfdad); + rtl8168_mdio_write(tp, 0x14, 0x2704); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xfd00); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x44ad); + rtl8168_mdio_write(tp, 0x14, 0x203f); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf6e4); + rtl8168_mdio_write(tp, 0x14, 0x8288); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfae4); + rtl8168_mdio_write(tp, 0x14, 0x8289); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x440d); + rtl8168_mdio_write(tp, 0x14, 0x0458); + rtl8168_mdio_write(tp, 0x14, 0x01bf); + rtl8168_mdio_write(tp, 0x14, 0x8264); + rtl8168_mdio_write(tp, 0x14, 0x0215); + rtl8168_mdio_write(tp, 0x14, 0x38bf); + rtl8168_mdio_write(tp, 0x14, 0x824e); + rtl8168_mdio_write(tp, 0x14, 0x0213); + rtl8168_mdio_write(tp, 0x14, 0x06a0); + rtl8168_mdio_write(tp, 0x14, 0x010f); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x44f6); + rtl8168_mdio_write(tp, 0x14, 0x20e4); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x580f); + rtl8168_mdio_write(tp, 0x14, 0xe582); + rtl8168_mdio_write(tp, 0x14, 0x5aae); + rtl8168_mdio_write(tp, 0x14, 0x0ebf); + rtl8168_mdio_write(tp, 0x14, 0x825e); + rtl8168_mdio_write(tp, 0x14, 0xe382); + rtl8168_mdio_write(tp, 0x14, 0x44f7); + rtl8168_mdio_write(tp, 0x14, 0x3ce7); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x0212); + rtl8168_mdio_write(tp, 0x14, 0xf0ad); + rtl8168_mdio_write(tp, 0x14, 0x213f); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf7e4); + rtl8168_mdio_write(tp, 0x14, 0x8288); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfbe4); + rtl8168_mdio_write(tp, 0x14, 0x8289); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x440d); + rtl8168_mdio_write(tp, 0x14, 0x0558); + rtl8168_mdio_write(tp, 0x14, 0x01bf); + rtl8168_mdio_write(tp, 0x14, 0x826b); + rtl8168_mdio_write(tp, 0x14, 0x0215); + rtl8168_mdio_write(tp, 0x14, 0x38bf); + rtl8168_mdio_write(tp, 0x14, 0x824f); + rtl8168_mdio_write(tp, 0x14, 0x0213); + rtl8168_mdio_write(tp, 0x14, 0x06a0); + rtl8168_mdio_write(tp, 0x14, 0x010f); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x44f6); + rtl8168_mdio_write(tp, 0x14, 0x21e4); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x580f); + rtl8168_mdio_write(tp, 0x14, 0xe582); + rtl8168_mdio_write(tp, 0x14, 0x5bae); + rtl8168_mdio_write(tp, 0x14, 0x0ebf); + rtl8168_mdio_write(tp, 0x14, 0x8265); + rtl8168_mdio_write(tp, 0x14, 0xe382); + rtl8168_mdio_write(tp, 0x14, 0x44f7); + rtl8168_mdio_write(tp, 0x14, 0x3de7); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x0212); + rtl8168_mdio_write(tp, 0x14, 0xf0ad); + rtl8168_mdio_write(tp, 0x14, 0x223f); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf8e4); + rtl8168_mdio_write(tp, 0x14, 0x8288); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfce4); + rtl8168_mdio_write(tp, 0x14, 0x8289); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x440d); + rtl8168_mdio_write(tp, 0x14, 0x0658); + rtl8168_mdio_write(tp, 0x14, 0x01bf); + rtl8168_mdio_write(tp, 0x14, 0x8272); + rtl8168_mdio_write(tp, 0x14, 0x0215); + rtl8168_mdio_write(tp, 0x14, 0x38bf); + rtl8168_mdio_write(tp, 0x14, 0x8250); + rtl8168_mdio_write(tp, 0x14, 0x0213); + rtl8168_mdio_write(tp, 0x14, 0x06a0); + rtl8168_mdio_write(tp, 0x14, 0x010f); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x44f6); + rtl8168_mdio_write(tp, 0x14, 0x22e4); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x580f); + rtl8168_mdio_write(tp, 0x14, 0xe582); + rtl8168_mdio_write(tp, 0x14, 0x5cae); + rtl8168_mdio_write(tp, 0x14, 0x0ebf); + rtl8168_mdio_write(tp, 0x14, 0x826c); + rtl8168_mdio_write(tp, 0x14, 0xe382); + rtl8168_mdio_write(tp, 0x14, 0x44f7); + rtl8168_mdio_write(tp, 0x14, 0x3ee7); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x0212); + rtl8168_mdio_write(tp, 0x14, 0xf0ad); + rtl8168_mdio_write(tp, 0x14, 0x233f); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf9e4); + rtl8168_mdio_write(tp, 0x14, 0x8288); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfde4); + rtl8168_mdio_write(tp, 0x14, 0x8289); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x440d); + rtl8168_mdio_write(tp, 0x14, 0x0758); + rtl8168_mdio_write(tp, 0x14, 0x01bf); + rtl8168_mdio_write(tp, 0x14, 0x8279); + rtl8168_mdio_write(tp, 0x14, 0x0215); + rtl8168_mdio_write(tp, 0x14, 0x38bf); + rtl8168_mdio_write(tp, 0x14, 0x8251); + rtl8168_mdio_write(tp, 0x14, 0x0213); + rtl8168_mdio_write(tp, 0x14, 0x06a0); + rtl8168_mdio_write(tp, 0x14, 0x010f); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x44f6); + rtl8168_mdio_write(tp, 0x14, 0x23e4); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x580f); + rtl8168_mdio_write(tp, 0x14, 0xe582); + rtl8168_mdio_write(tp, 0x14, 0x5dae); + rtl8168_mdio_write(tp, 0x14, 0x0ebf); + rtl8168_mdio_write(tp, 0x14, 0x8273); + rtl8168_mdio_write(tp, 0x14, 0xe382); + rtl8168_mdio_write(tp, 0x14, 0x44f7); + rtl8168_mdio_write(tp, 0x14, 0x3fe7); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x0212); + rtl8168_mdio_write(tp, 0x14, 0xf0ee); + rtl8168_mdio_write(tp, 0x14, 0x8288); + rtl8168_mdio_write(tp, 0x14, 0x10ee); + rtl8168_mdio_write(tp, 0x14, 0x8289); + rtl8168_mdio_write(tp, 0x14, 0x00af); + rtl8168_mdio_write(tp, 0x14, 0x14aa); + rtl8168_mdio_write(tp, 0x13, 0xb818); + rtl8168_mdio_write(tp, 0x14, 0x13cf); + rtl8168_mdio_write(tp, 0x13, 0xb81a); + rtl8168_mdio_write(tp, 0x14, 0xfffd); + rtl8168_mdio_write(tp, 0x13, 0xb81c); + rtl8168_mdio_write(tp, 0x14, 0xfffd); + rtl8168_mdio_write(tp, 0x13, 0xb81e); + rtl8168_mdio_write(tp, 0x14, 0xfffd); + rtl8168_mdio_write(tp, 0x13, 0xb832); + rtl8168_mdio_write(tp, 0x14, 0x0001); + + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8028); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_clear_phy_mcu_patch_request(tp); + + if (tp->RequiredSecLanDonglePatch) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + gphy_val = rtl8168_mdio_read(tp, 0x11); + gphy_val &= ~BIT_6; + rtl8168_mdio_write(tp, 0x11, gphy_val); + } +} + +static void +rtl8168_init_hw_phy_mcu(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 require_disable_phy_disable_mode = FALSE; + + if (tp->NotWrRamCodeToMicroP == TRUE) return; + if (rtl8168_check_hw_phy_mcu_code_ver(dev)) return; + + if (FALSE == rtl8168_phy_ram_code_check(dev)) { + rtl8168_set_phy_ram_code_check_fail_flag(dev); + return; + } + + if (HW_SUPPORT_CHECK_PHY_DISABLE_MODE(tp) && rtl8168_is_in_phy_disable_mode(dev)) + require_disable_phy_disable_mode = TRUE; + + if (require_disable_phy_disable_mode) + rtl8168_disable_phy_disable_mode(dev); + + switch (tp->mcfg) { + case CFG_METHOD_14: + rtl8168_set_phy_mcu_8168e_1(dev); + break; + case CFG_METHOD_15: + rtl8168_set_phy_mcu_8168e_2(dev); + break; + case CFG_METHOD_16: + rtl8168_set_phy_mcu_8168evl_1(dev); + break; + case CFG_METHOD_17: + rtl8168_set_phy_mcu_8168evl_2(dev); + break; + case CFG_METHOD_18: + rtl8168_set_phy_mcu_8168f_1(dev); + break; + case CFG_METHOD_19: + rtl8168_set_phy_mcu_8168f_2(dev); + break; + case CFG_METHOD_20: + rtl8168_set_phy_mcu_8411_1(dev); + break; + case CFG_METHOD_21: + rtl8168_set_phy_mcu_8168g_1(dev); + break; + case CFG_METHOD_25: + rtl8168_set_phy_mcu_8168gu_2(dev); + break; + case CFG_METHOD_26: + rtl8168_set_phy_mcu_8411b_1(dev); + break; + case CFG_METHOD_28: + rtl8168_set_phy_mcu_8168ep_2(dev); + break; + case CFG_METHOD_29: + rtl8168_set_phy_mcu_8168h_1(dev); + break; + case CFG_METHOD_30: + rtl8168_set_phy_mcu_8168h_2(dev); + break; + } + + if (require_disable_phy_disable_mode) + rtl8168_enable_phy_disable_mode(dev); + + rtl8168_write_hw_phy_mcu_code_ver(dev); + + rtl8168_mdio_write(tp,0x1F, 0x0000); + + tp->HwHasWrRamCodeToMicroP = TRUE; +} +#endif + +static void +rtl8168_hw_phy_config(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + u16 gphy_val; + unsigned int i; + + tp->phy_reset_enable(dev); + + if (HW_DASH_SUPPORT_TYPE_3(tp) && tp->HwPkgDet == 0x06) return; + +#ifndef ENABLE_USE_FIRMWARE_FILE + if (!tp->rtl_fw) { + rtl8168_init_hw_phy_mcu(dev); + } +#endif + + if (tp->mcfg == CFG_METHOD_1) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0B, 0x94B0); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x12, 0x6096); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x0D, 0xF8A0); + } else if (tp->mcfg == CFG_METHOD_2) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0B, 0x94B0); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x12, 0x6096); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_3) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0B, 0x94B0); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x12, 0x6096); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_4) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x12, 0x2300); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x16, 0x000A); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x12, 0xC096); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x00, 0x88DE); + rtl8168_mdio_write(tp, 0x01, 0x82B1); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x08, 0x9E30); + rtl8168_mdio_write(tp, 0x09, 0x01F0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0A, 0x5500); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x03, 0x7002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0C, 0x00C8); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | (1 << 5)); + rtl8168_mdio_write(tp, 0x0D, rtl8168_mdio_read(tp, 0x0D) & ~(1 << 5)); + } else if (tp->mcfg == CFG_METHOD_5) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x12, 0x2300); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x16, 0x0F0A); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x00, 0x88DE); + rtl8168_mdio_write(tp, 0x01, 0x82B1); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0C, 0x7EB8); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x0761); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x03, 0x802F); + rtl8168_mdio_write(tp, 0x02, 0x4F02); + rtl8168_mdio_write(tp, 0x01, 0x0409); + rtl8168_mdio_write(tp, 0x00, 0xF099); + rtl8168_mdio_write(tp, 0x04, 0x9800); + rtl8168_mdio_write(tp, 0x04, 0x9000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x16, rtl8168_mdio_read(tp, 0x16) | (1 << 0)); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | (1 << 5)); + rtl8168_mdio_write(tp, 0x0D, rtl8168_mdio_read(tp, 0x0D) & ~(1 << 5)); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x1D, 0x3D98); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_6) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x12, 0x2300); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x16, 0x0F0A); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x00, 0x88DE); + rtl8168_mdio_write(tp, 0x01, 0x82B1); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0C, 0x7EB8); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x5461); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x5461); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x16, rtl8168_mdio_read(tp, 0x16) | (1 << 0)); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | (1 << 5)); + rtl8168_mdio_write(tp, 0x0D, rtl8168_mdio_read(tp, 0x0D) & ~(1 << 5)); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x1D, 0x3D98); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_7) { + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | (1 << 5)); + rtl8168_mdio_write(tp, 0x0D, rtl8168_mdio_read(tp, 0x0D) & ~(1 << 5)); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x1D, 0x3D98); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x14, 0xCAA3); + rtl8168_mdio_write(tp, 0x1C, 0x000A); + rtl8168_mdio_write(tp, 0x18, 0x65D0); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x17, 0xB580); + rtl8168_mdio_write(tp, 0x18, 0xFF54); + rtl8168_mdio_write(tp, 0x19, 0x3954); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0D, 0x310C); + rtl8168_mdio_write(tp, 0x0E, 0x310C); + rtl8168_mdio_write(tp, 0x0F, 0x311C); + rtl8168_mdio_write(tp, 0x06, 0x0761); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x18, 0xFF55); + rtl8168_mdio_write(tp, 0x19, 0x3955); + rtl8168_mdio_write(tp, 0x18, 0xFF54); + rtl8168_mdio_write(tp, 0x19, 0x3954); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_8) { + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | (1 << 5)); + rtl8168_mdio_write(tp, 0x0D, rtl8168_mdio_read(tp, 0x0D) & ~(1 << 5)); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x14, 0xCAA3); + rtl8168_mdio_write(tp, 0x1C, 0x000A); + rtl8168_mdio_write(tp, 0x18, 0x65D0); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x17, 0xB580); + rtl8168_mdio_write(tp, 0x18, 0xFF54); + rtl8168_mdio_write(tp, 0x19, 0x3954); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0D, 0x310C); + rtl8168_mdio_write(tp, 0x0E, 0x310C); + rtl8168_mdio_write(tp, 0x0F, 0x311C); + rtl8168_mdio_write(tp, 0x06, 0x0761); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x18, 0xFF55); + rtl8168_mdio_write(tp, 0x19, 0x3955); + rtl8168_mdio_write(tp, 0x18, 0xFF54); + rtl8168_mdio_write(tp, 0x19, 0x3954); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x16, rtl8168_mdio_read(tp, 0x16) | (1 << 0)); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_9) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x06, 0x4064); + rtl8168_mdio_write(tp, 0x07, 0x2863); + rtl8168_mdio_write(tp, 0x08, 0x059C); + rtl8168_mdio_write(tp, 0x09, 0x26B4); + rtl8168_mdio_write(tp, 0x0A, 0x6A19); + rtl8168_mdio_write(tp, 0x0B, 0xDCC8); + rtl8168_mdio_write(tp, 0x10, 0xF06D); + rtl8168_mdio_write(tp, 0x14, 0x7F68); + rtl8168_mdio_write(tp, 0x18, 0x7FD9); + rtl8168_mdio_write(tp, 0x1C, 0xF0FF); + rtl8168_mdio_write(tp, 0x1D, 0x3D9C); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x12, 0xF49F); + rtl8168_mdio_write(tp, 0x13, 0x070B); + rtl8168_mdio_write(tp, 0x1A, 0x05AD); + rtl8168_mdio_write(tp, 0x14, 0x94C0); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0B) & 0xFF00; + gphy_val |= 0x10; + rtl8168_mdio_write(tp, 0x0B, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x0C) & 0x00FF; + gphy_val |= 0xA200; + rtl8168_mdio_write(tp, 0x0C, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x5561); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8332); + rtl8168_mdio_write(tp, 0x06, 0x5561); + + if (rtl8168_efuse_read(tp, 0x01) == 0xb1) { + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x05, 0x669A); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8330); + rtl8168_mdio_write(tp, 0x06, 0x669A); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0D); + if ((gphy_val & 0x00FF) != 0x006C) { + gphy_val &= 0xFF00; + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0065); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0066); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0067); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0068); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0069); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x006A); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x006B); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x006C); + } + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x05, 0x6662); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8330); + rtl8168_mdio_write(tp, 0x06, 0x6662); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0D); + gphy_val |= BIT_9; + gphy_val |= BIT_8; + rtl8168_mdio_write(tp, 0x0D, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x0F); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x0F, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x02); + gphy_val &= ~BIT_10; + gphy_val &= ~BIT_9; + gphy_val |= BIT_8; + rtl8168_mdio_write(tp, 0x02, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x03); + gphy_val &= ~BIT_15; + gphy_val &= ~BIT_14; + gphy_val &= ~BIT_13; + rtl8168_mdio_write(tp, 0x03, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x001B); + if (rtl8168_mdio_read(tp, 0x06) == 0xBF00) { + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x59ee); + rtl8168_mdio_write(tp, 0x06, 0xf8ea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xf8eb); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xf87c); + rtl8168_mdio_write(tp, 0x06, 0xe1f8); + rtl8168_mdio_write(tp, 0x06, 0x7d59); + rtl8168_mdio_write(tp, 0x06, 0x0fef); + rtl8168_mdio_write(tp, 0x06, 0x0139); + rtl8168_mdio_write(tp, 0x06, 0x029e); + rtl8168_mdio_write(tp, 0x06, 0x06ef); + rtl8168_mdio_write(tp, 0x06, 0x1039); + rtl8168_mdio_write(tp, 0x06, 0x089f); + rtl8168_mdio_write(tp, 0x06, 0x2aee); + rtl8168_mdio_write(tp, 0x06, 0xf8ea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xf8eb); + rtl8168_mdio_write(tp, 0x06, 0x01e0); + rtl8168_mdio_write(tp, 0x06, 0xf87c); + rtl8168_mdio_write(tp, 0x06, 0xe1f8); + rtl8168_mdio_write(tp, 0x06, 0x7d58); + rtl8168_mdio_write(tp, 0x06, 0x409e); + rtl8168_mdio_write(tp, 0x06, 0x0f39); + rtl8168_mdio_write(tp, 0x06, 0x46aa); + rtl8168_mdio_write(tp, 0x06, 0x0bbf); + rtl8168_mdio_write(tp, 0x06, 0x8290); + rtl8168_mdio_write(tp, 0x06, 0xd682); + rtl8168_mdio_write(tp, 0x06, 0x9802); + rtl8168_mdio_write(tp, 0x06, 0x014f); + rtl8168_mdio_write(tp, 0x06, 0xae09); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0x98d6); + rtl8168_mdio_write(tp, 0x06, 0x82a0); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x4fef); + rtl8168_mdio_write(tp, 0x06, 0x95fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x05f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xeef8); + rtl8168_mdio_write(tp, 0x06, 0xea00); + rtl8168_mdio_write(tp, 0x06, 0xeef8); + rtl8168_mdio_write(tp, 0x06, 0xeb00); + rtl8168_mdio_write(tp, 0x06, 0xe2f8); + rtl8168_mdio_write(tp, 0x06, 0x7ce3); + rtl8168_mdio_write(tp, 0x06, 0xf87d); + rtl8168_mdio_write(tp, 0x06, 0xa511); + rtl8168_mdio_write(tp, 0x06, 0x1112); + rtl8168_mdio_write(tp, 0x06, 0xd240); + rtl8168_mdio_write(tp, 0x06, 0xd644); + rtl8168_mdio_write(tp, 0x06, 0x4402); + rtl8168_mdio_write(tp, 0x06, 0x8217); + rtl8168_mdio_write(tp, 0x06, 0xd2a0); + rtl8168_mdio_write(tp, 0x06, 0xd6aa); + rtl8168_mdio_write(tp, 0x06, 0xaa02); + rtl8168_mdio_write(tp, 0x06, 0x8217); + rtl8168_mdio_write(tp, 0x06, 0xae0f); + rtl8168_mdio_write(tp, 0x06, 0xa544); + rtl8168_mdio_write(tp, 0x06, 0x4402); + rtl8168_mdio_write(tp, 0x06, 0xae4d); + rtl8168_mdio_write(tp, 0x06, 0xa5aa); + rtl8168_mdio_write(tp, 0x06, 0xaa02); + rtl8168_mdio_write(tp, 0x06, 0xae47); + rtl8168_mdio_write(tp, 0x06, 0xaf82); + rtl8168_mdio_write(tp, 0x06, 0x13ee); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0x0fee); + rtl8168_mdio_write(tp, 0x06, 0x834c); + rtl8168_mdio_write(tp, 0x06, 0x0fee); + rtl8168_mdio_write(tp, 0x06, 0x834f); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8351); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x834a); + rtl8168_mdio_write(tp, 0x06, 0xffee); + rtl8168_mdio_write(tp, 0x06, 0x834b); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0x8330); + rtl8168_mdio_write(tp, 0x06, 0xe183); + rtl8168_mdio_write(tp, 0x06, 0x3158); + rtl8168_mdio_write(tp, 0x06, 0xfee4); + rtl8168_mdio_write(tp, 0x06, 0xf88a); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x8be0); + rtl8168_mdio_write(tp, 0x06, 0x8332); + rtl8168_mdio_write(tp, 0x06, 0xe183); + rtl8168_mdio_write(tp, 0x06, 0x3359); + rtl8168_mdio_write(tp, 0x06, 0x0fe2); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0x0c24); + rtl8168_mdio_write(tp, 0x06, 0x5af0); + rtl8168_mdio_write(tp, 0x06, 0x1e12); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x8ce5); + rtl8168_mdio_write(tp, 0x06, 0xf88d); + rtl8168_mdio_write(tp, 0x06, 0xaf82); + rtl8168_mdio_write(tp, 0x06, 0x13e0); + rtl8168_mdio_write(tp, 0x06, 0x834f); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x834f); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x009f); + rtl8168_mdio_write(tp, 0x06, 0x0ae0); + rtl8168_mdio_write(tp, 0x06, 0x834f); + rtl8168_mdio_write(tp, 0x06, 0xa010); + rtl8168_mdio_write(tp, 0x06, 0xa5ee); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x01e0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7805); + rtl8168_mdio_write(tp, 0x06, 0x9e9a); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x049e); + rtl8168_mdio_write(tp, 0x06, 0x10e0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7803); + rtl8168_mdio_write(tp, 0x06, 0x9e0f); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x05ae); + rtl8168_mdio_write(tp, 0x06, 0x0caf); + rtl8168_mdio_write(tp, 0x06, 0x81f8); + rtl8168_mdio_write(tp, 0x06, 0xaf81); + rtl8168_mdio_write(tp, 0x06, 0xa3af); + rtl8168_mdio_write(tp, 0x06, 0x81dc); + rtl8168_mdio_write(tp, 0x06, 0xaf82); + rtl8168_mdio_write(tp, 0x06, 0x13ee); + rtl8168_mdio_write(tp, 0x06, 0x8348); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8349); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0x8351); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x8351); + rtl8168_mdio_write(tp, 0x06, 0x5801); + rtl8168_mdio_write(tp, 0x06, 0x9fea); + rtl8168_mdio_write(tp, 0x06, 0xd000); + rtl8168_mdio_write(tp, 0x06, 0xd180); + rtl8168_mdio_write(tp, 0x06, 0x1f66); + rtl8168_mdio_write(tp, 0x06, 0xe2f8); + rtl8168_mdio_write(tp, 0x06, 0xeae3); + rtl8168_mdio_write(tp, 0x06, 0xf8eb); + rtl8168_mdio_write(tp, 0x06, 0x5af8); + rtl8168_mdio_write(tp, 0x06, 0x1e20); + rtl8168_mdio_write(tp, 0x06, 0xe6f8); + rtl8168_mdio_write(tp, 0x06, 0xeae5); + rtl8168_mdio_write(tp, 0x06, 0xf8eb); + rtl8168_mdio_write(tp, 0x06, 0xd302); + rtl8168_mdio_write(tp, 0x06, 0xb3fe); + rtl8168_mdio_write(tp, 0x06, 0xe2f8); + rtl8168_mdio_write(tp, 0x06, 0x7cef); + rtl8168_mdio_write(tp, 0x06, 0x325b); + rtl8168_mdio_write(tp, 0x06, 0x80e3); + rtl8168_mdio_write(tp, 0x06, 0xf87d); + rtl8168_mdio_write(tp, 0x06, 0x9e03); + rtl8168_mdio_write(tp, 0x06, 0x7dff); + rtl8168_mdio_write(tp, 0x06, 0xff0d); + rtl8168_mdio_write(tp, 0x06, 0x581c); + rtl8168_mdio_write(tp, 0x06, 0x551a); + rtl8168_mdio_write(tp, 0x06, 0x6511); + rtl8168_mdio_write(tp, 0x06, 0xa190); + rtl8168_mdio_write(tp, 0x06, 0xd3e2); + rtl8168_mdio_write(tp, 0x06, 0x8348); + rtl8168_mdio_write(tp, 0x06, 0xe383); + rtl8168_mdio_write(tp, 0x06, 0x491b); + rtl8168_mdio_write(tp, 0x06, 0x56ab); + rtl8168_mdio_write(tp, 0x06, 0x08ef); + rtl8168_mdio_write(tp, 0x06, 0x56e6); + rtl8168_mdio_write(tp, 0x06, 0x8348); + rtl8168_mdio_write(tp, 0x06, 0xe783); + rtl8168_mdio_write(tp, 0x06, 0x4910); + rtl8168_mdio_write(tp, 0x06, 0xd180); + rtl8168_mdio_write(tp, 0x06, 0x1f66); + rtl8168_mdio_write(tp, 0x06, 0xa004); + rtl8168_mdio_write(tp, 0x06, 0xb9e2); + rtl8168_mdio_write(tp, 0x06, 0x8348); + rtl8168_mdio_write(tp, 0x06, 0xe383); + rtl8168_mdio_write(tp, 0x06, 0x49ef); + rtl8168_mdio_write(tp, 0x06, 0x65e2); + rtl8168_mdio_write(tp, 0x06, 0x834a); + rtl8168_mdio_write(tp, 0x06, 0xe383); + rtl8168_mdio_write(tp, 0x06, 0x4b1b); + rtl8168_mdio_write(tp, 0x06, 0x56aa); + rtl8168_mdio_write(tp, 0x06, 0x0eef); + rtl8168_mdio_write(tp, 0x06, 0x56e6); + rtl8168_mdio_write(tp, 0x06, 0x834a); + rtl8168_mdio_write(tp, 0x06, 0xe783); + rtl8168_mdio_write(tp, 0x06, 0x4be2); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0xe683); + rtl8168_mdio_write(tp, 0x06, 0x4ce0); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0xa000); + rtl8168_mdio_write(tp, 0x06, 0x0caf); + rtl8168_mdio_write(tp, 0x06, 0x81dc); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4d10); + rtl8168_mdio_write(tp, 0x06, 0xe483); + rtl8168_mdio_write(tp, 0x06, 0x4dae); + rtl8168_mdio_write(tp, 0x06, 0x0480); + rtl8168_mdio_write(tp, 0x06, 0xe483); + rtl8168_mdio_write(tp, 0x06, 0x4de0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7803); + rtl8168_mdio_write(tp, 0x06, 0x9e0b); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x049e); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x02e0); + rtl8168_mdio_write(tp, 0x06, 0x8332); + rtl8168_mdio_write(tp, 0x06, 0xe183); + rtl8168_mdio_write(tp, 0x06, 0x3359); + rtl8168_mdio_write(tp, 0x06, 0x0fe2); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0x0c24); + rtl8168_mdio_write(tp, 0x06, 0x5af0); + rtl8168_mdio_write(tp, 0x06, 0x1e12); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x8ce5); + rtl8168_mdio_write(tp, 0x06, 0xf88d); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x30e1); + rtl8168_mdio_write(tp, 0x06, 0x8331); + rtl8168_mdio_write(tp, 0x06, 0x6801); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x8ae5); + rtl8168_mdio_write(tp, 0x06, 0xf88b); + rtl8168_mdio_write(tp, 0x06, 0xae37); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4e03); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4ce1); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0x1b01); + rtl8168_mdio_write(tp, 0x06, 0x9e04); + rtl8168_mdio_write(tp, 0x06, 0xaaa1); + rtl8168_mdio_write(tp, 0x06, 0xaea8); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4e04); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4f00); + rtl8168_mdio_write(tp, 0x06, 0xaeab); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4f78); + rtl8168_mdio_write(tp, 0x06, 0x039f); + rtl8168_mdio_write(tp, 0x06, 0x14ee); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x05d2); + rtl8168_mdio_write(tp, 0x06, 0x40d6); + rtl8168_mdio_write(tp, 0x06, 0x5554); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x17d2); + rtl8168_mdio_write(tp, 0x06, 0xa0d6); + rtl8168_mdio_write(tp, 0x06, 0xba00); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x17fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x05f8); + rtl8168_mdio_write(tp, 0x06, 0xe0f8); + rtl8168_mdio_write(tp, 0x06, 0x60e1); + rtl8168_mdio_write(tp, 0x06, 0xf861); + rtl8168_mdio_write(tp, 0x06, 0x6802); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x60e5); + rtl8168_mdio_write(tp, 0x06, 0xf861); + rtl8168_mdio_write(tp, 0x06, 0xe0f8); + rtl8168_mdio_write(tp, 0x06, 0x48e1); + rtl8168_mdio_write(tp, 0x06, 0xf849); + rtl8168_mdio_write(tp, 0x06, 0x580f); + rtl8168_mdio_write(tp, 0x06, 0x1e02); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x48e5); + rtl8168_mdio_write(tp, 0x06, 0xf849); + rtl8168_mdio_write(tp, 0x06, 0xd000); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x5bbf); + rtl8168_mdio_write(tp, 0x06, 0x8350); + rtl8168_mdio_write(tp, 0x06, 0xef46); + rtl8168_mdio_write(tp, 0x06, 0xdc19); + rtl8168_mdio_write(tp, 0x06, 0xddd0); + rtl8168_mdio_write(tp, 0x06, 0x0102); + rtl8168_mdio_write(tp, 0x06, 0x825b); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x77e0); + rtl8168_mdio_write(tp, 0x06, 0xf860); + rtl8168_mdio_write(tp, 0x06, 0xe1f8); + rtl8168_mdio_write(tp, 0x06, 0x6158); + rtl8168_mdio_write(tp, 0x06, 0xfde4); + rtl8168_mdio_write(tp, 0x06, 0xf860); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x61fc); + rtl8168_mdio_write(tp, 0x06, 0x04f9); + rtl8168_mdio_write(tp, 0x06, 0xfafb); + rtl8168_mdio_write(tp, 0x06, 0xc6bf); + rtl8168_mdio_write(tp, 0x06, 0xf840); + rtl8168_mdio_write(tp, 0x06, 0xbe83); + rtl8168_mdio_write(tp, 0x06, 0x50a0); + rtl8168_mdio_write(tp, 0x06, 0x0101); + rtl8168_mdio_write(tp, 0x06, 0x071b); + rtl8168_mdio_write(tp, 0x06, 0x89cf); + rtl8168_mdio_write(tp, 0x06, 0xd208); + rtl8168_mdio_write(tp, 0x06, 0xebdb); + rtl8168_mdio_write(tp, 0x06, 0x19b2); + rtl8168_mdio_write(tp, 0x06, 0xfbff); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe0f8); + rtl8168_mdio_write(tp, 0x06, 0x48e1); + rtl8168_mdio_write(tp, 0x06, 0xf849); + rtl8168_mdio_write(tp, 0x06, 0x6808); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x48e5); + rtl8168_mdio_write(tp, 0x06, 0xf849); + rtl8168_mdio_write(tp, 0x06, 0x58f7); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x48e5); + rtl8168_mdio_write(tp, 0x06, 0xf849); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0x4d20); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x4e22); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x4ddf); + rtl8168_mdio_write(tp, 0x06, 0xff01); + rtl8168_mdio_write(tp, 0x06, 0x4edd); + rtl8168_mdio_write(tp, 0x06, 0xff01); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xfbef); + rtl8168_mdio_write(tp, 0x06, 0x79bf); + rtl8168_mdio_write(tp, 0x06, 0xf822); + rtl8168_mdio_write(tp, 0x06, 0xd819); + rtl8168_mdio_write(tp, 0x06, 0xd958); + rtl8168_mdio_write(tp, 0x06, 0x849f); + rtl8168_mdio_write(tp, 0x06, 0x09bf); + rtl8168_mdio_write(tp, 0x06, 0x82be); + rtl8168_mdio_write(tp, 0x06, 0xd682); + rtl8168_mdio_write(tp, 0x06, 0xc602); + rtl8168_mdio_write(tp, 0x06, 0x014f); + rtl8168_mdio_write(tp, 0x06, 0xef97); + rtl8168_mdio_write(tp, 0x06, 0xfffe); + rtl8168_mdio_write(tp, 0x06, 0xfc05); + rtl8168_mdio_write(tp, 0x06, 0x17ff); + rtl8168_mdio_write(tp, 0x06, 0xfe01); + rtl8168_mdio_write(tp, 0x06, 0x1700); + rtl8168_mdio_write(tp, 0x06, 0x0102); + rtl8168_mdio_write(tp, 0x05, 0x83d8); + rtl8168_mdio_write(tp, 0x06, 0x8051); + rtl8168_mdio_write(tp, 0x05, 0x83d6); + rtl8168_mdio_write(tp, 0x06, 0x82a0); + rtl8168_mdio_write(tp, 0x05, 0x83d4); + rtl8168_mdio_write(tp, 0x06, 0x8000); + rtl8168_mdio_write(tp, 0x02, 0x2010); + rtl8168_mdio_write(tp, 0x03, 0xdc00); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x0b, 0x0600); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00fc); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0xF880); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_10) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x06, 0x4064); + rtl8168_mdio_write(tp, 0x07, 0x2863); + rtl8168_mdio_write(tp, 0x08, 0x059C); + rtl8168_mdio_write(tp, 0x09, 0x26B4); + rtl8168_mdio_write(tp, 0x0A, 0x6A19); + rtl8168_mdio_write(tp, 0x0B, 0xDCC8); + rtl8168_mdio_write(tp, 0x10, 0xF06D); + rtl8168_mdio_write(tp, 0x14, 0x7F68); + rtl8168_mdio_write(tp, 0x18, 0x7FD9); + rtl8168_mdio_write(tp, 0x1C, 0xF0FF); + rtl8168_mdio_write(tp, 0x1D, 0x3D9C); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x12, 0xF49F); + rtl8168_mdio_write(tp, 0x13, 0x070B); + rtl8168_mdio_write(tp, 0x1A, 0x05AD); + rtl8168_mdio_write(tp, 0x14, 0x94C0); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x5561); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8332); + rtl8168_mdio_write(tp, 0x06, 0x5561); + + if (rtl8168_efuse_read(tp, 0x01) == 0xb1) { + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x05, 0x669A); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8330); + rtl8168_mdio_write(tp, 0x06, 0x669A); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0D); + if ((gphy_val & 0x00FF) != 0x006C) { + gphy_val &= 0xFF00; + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0065); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0066); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0067); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0068); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0069); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x006A); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x006B); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x006C); + } + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x05, 0x2642); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8330); + rtl8168_mdio_write(tp, 0x06, 0x2642); + } + + if (rtl8168_efuse_read(tp, 0x30) == 0x98) { + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) & ~BIT_1); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x01, rtl8168_mdio_read(tp, 0x01) | BIT_9); + } else if (rtl8168_efuse_read(tp, 0x30) == 0x90) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x01, rtl8168_mdio_read(tp, 0x01) & ~BIT_9); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x5101); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x02); + gphy_val &= ~BIT_10; + gphy_val &= ~BIT_9; + gphy_val |= BIT_8; + rtl8168_mdio_write(tp, 0x02, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x03); + gphy_val &= ~BIT_15; + gphy_val &= ~BIT_14; + gphy_val &= ~BIT_13; + rtl8168_mdio_write(tp, 0x03, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0F); + gphy_val |= BIT_4; + gphy_val |= BIT_2; + gphy_val |= BIT_1; + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x0F, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x001B); + if (rtl8168_mdio_read(tp, 0x06) == 0xB300) { + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaee); + rtl8168_mdio_write(tp, 0x06, 0xf8ea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xf8eb); + rtl8168_mdio_write(tp, 0x06, 0x00e2); + rtl8168_mdio_write(tp, 0x06, 0xf87c); + rtl8168_mdio_write(tp, 0x06, 0xe3f8); + rtl8168_mdio_write(tp, 0x06, 0x7da5); + rtl8168_mdio_write(tp, 0x06, 0x1111); + rtl8168_mdio_write(tp, 0x06, 0x12d2); + rtl8168_mdio_write(tp, 0x06, 0x40d6); + rtl8168_mdio_write(tp, 0x06, 0x4444); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xc6d2); + rtl8168_mdio_write(tp, 0x06, 0xa0d6); + rtl8168_mdio_write(tp, 0x06, 0xaaaa); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xc6ae); + rtl8168_mdio_write(tp, 0x06, 0x0fa5); + rtl8168_mdio_write(tp, 0x06, 0x4444); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x4da5); + rtl8168_mdio_write(tp, 0x06, 0xaaaa); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x47af); + rtl8168_mdio_write(tp, 0x06, 0x81c2); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4e00); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4d0f); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4c0f); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4f00); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x5100); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4aff); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4bff); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x30e1); + rtl8168_mdio_write(tp, 0x06, 0x8331); + rtl8168_mdio_write(tp, 0x06, 0x58fe); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x8ae5); + rtl8168_mdio_write(tp, 0x06, 0xf88b); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x32e1); + rtl8168_mdio_write(tp, 0x06, 0x8333); + rtl8168_mdio_write(tp, 0x06, 0x590f); + rtl8168_mdio_write(tp, 0x06, 0xe283); + rtl8168_mdio_write(tp, 0x06, 0x4d0c); + rtl8168_mdio_write(tp, 0x06, 0x245a); + rtl8168_mdio_write(tp, 0x06, 0xf01e); + rtl8168_mdio_write(tp, 0x06, 0x12e4); + rtl8168_mdio_write(tp, 0x06, 0xf88c); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x8daf); + rtl8168_mdio_write(tp, 0x06, 0x81c2); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4f10); + rtl8168_mdio_write(tp, 0x06, 0xe483); + rtl8168_mdio_write(tp, 0x06, 0x4fe0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7800); + rtl8168_mdio_write(tp, 0x06, 0x9f0a); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4fa0); + rtl8168_mdio_write(tp, 0x06, 0x10a5); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4e01); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x059e); + rtl8168_mdio_write(tp, 0x06, 0x9ae0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7804); + rtl8168_mdio_write(tp, 0x06, 0x9e10); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x0fe0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7801); + rtl8168_mdio_write(tp, 0x06, 0x9e05); + rtl8168_mdio_write(tp, 0x06, 0xae0c); + rtl8168_mdio_write(tp, 0x06, 0xaf81); + rtl8168_mdio_write(tp, 0x06, 0xa7af); + rtl8168_mdio_write(tp, 0x06, 0x8152); + rtl8168_mdio_write(tp, 0x06, 0xaf81); + rtl8168_mdio_write(tp, 0x06, 0x8baf); + rtl8168_mdio_write(tp, 0x06, 0x81c2); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4800); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4900); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x5110); + rtl8168_mdio_write(tp, 0x06, 0xe483); + rtl8168_mdio_write(tp, 0x06, 0x5158); + rtl8168_mdio_write(tp, 0x06, 0x019f); + rtl8168_mdio_write(tp, 0x06, 0xead0); + rtl8168_mdio_write(tp, 0x06, 0x00d1); + rtl8168_mdio_write(tp, 0x06, 0x801f); + rtl8168_mdio_write(tp, 0x06, 0x66e2); + rtl8168_mdio_write(tp, 0x06, 0xf8ea); + rtl8168_mdio_write(tp, 0x06, 0xe3f8); + rtl8168_mdio_write(tp, 0x06, 0xeb5a); + rtl8168_mdio_write(tp, 0x06, 0xf81e); + rtl8168_mdio_write(tp, 0x06, 0x20e6); + rtl8168_mdio_write(tp, 0x06, 0xf8ea); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0xebd3); + rtl8168_mdio_write(tp, 0x06, 0x02b3); + rtl8168_mdio_write(tp, 0x06, 0xfee2); + rtl8168_mdio_write(tp, 0x06, 0xf87c); + rtl8168_mdio_write(tp, 0x06, 0xef32); + rtl8168_mdio_write(tp, 0x06, 0x5b80); + rtl8168_mdio_write(tp, 0x06, 0xe3f8); + rtl8168_mdio_write(tp, 0x06, 0x7d9e); + rtl8168_mdio_write(tp, 0x06, 0x037d); + rtl8168_mdio_write(tp, 0x06, 0xffff); + rtl8168_mdio_write(tp, 0x06, 0x0d58); + rtl8168_mdio_write(tp, 0x06, 0x1c55); + rtl8168_mdio_write(tp, 0x06, 0x1a65); + rtl8168_mdio_write(tp, 0x06, 0x11a1); + rtl8168_mdio_write(tp, 0x06, 0x90d3); + rtl8168_mdio_write(tp, 0x06, 0xe283); + rtl8168_mdio_write(tp, 0x06, 0x48e3); + rtl8168_mdio_write(tp, 0x06, 0x8349); + rtl8168_mdio_write(tp, 0x06, 0x1b56); + rtl8168_mdio_write(tp, 0x06, 0xab08); + rtl8168_mdio_write(tp, 0x06, 0xef56); + rtl8168_mdio_write(tp, 0x06, 0xe683); + rtl8168_mdio_write(tp, 0x06, 0x48e7); + rtl8168_mdio_write(tp, 0x06, 0x8349); + rtl8168_mdio_write(tp, 0x06, 0x10d1); + rtl8168_mdio_write(tp, 0x06, 0x801f); + rtl8168_mdio_write(tp, 0x06, 0x66a0); + rtl8168_mdio_write(tp, 0x06, 0x04b9); + rtl8168_mdio_write(tp, 0x06, 0xe283); + rtl8168_mdio_write(tp, 0x06, 0x48e3); + rtl8168_mdio_write(tp, 0x06, 0x8349); + rtl8168_mdio_write(tp, 0x06, 0xef65); + rtl8168_mdio_write(tp, 0x06, 0xe283); + rtl8168_mdio_write(tp, 0x06, 0x4ae3); + rtl8168_mdio_write(tp, 0x06, 0x834b); + rtl8168_mdio_write(tp, 0x06, 0x1b56); + rtl8168_mdio_write(tp, 0x06, 0xaa0e); + rtl8168_mdio_write(tp, 0x06, 0xef56); + rtl8168_mdio_write(tp, 0x06, 0xe683); + rtl8168_mdio_write(tp, 0x06, 0x4ae7); + rtl8168_mdio_write(tp, 0x06, 0x834b); + rtl8168_mdio_write(tp, 0x06, 0xe283); + rtl8168_mdio_write(tp, 0x06, 0x4de6); + rtl8168_mdio_write(tp, 0x06, 0x834c); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4da0); + rtl8168_mdio_write(tp, 0x06, 0x000c); + rtl8168_mdio_write(tp, 0x06, 0xaf81); + rtl8168_mdio_write(tp, 0x06, 0x8be0); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0xae04); + rtl8168_mdio_write(tp, 0x06, 0x80e4); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x0be0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7804); + rtl8168_mdio_write(tp, 0x06, 0x9e04); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4e02); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x32e1); + rtl8168_mdio_write(tp, 0x06, 0x8333); + rtl8168_mdio_write(tp, 0x06, 0x590f); + rtl8168_mdio_write(tp, 0x06, 0xe283); + rtl8168_mdio_write(tp, 0x06, 0x4d0c); + rtl8168_mdio_write(tp, 0x06, 0x245a); + rtl8168_mdio_write(tp, 0x06, 0xf01e); + rtl8168_mdio_write(tp, 0x06, 0x12e4); + rtl8168_mdio_write(tp, 0x06, 0xf88c); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x8de0); + rtl8168_mdio_write(tp, 0x06, 0x8330); + rtl8168_mdio_write(tp, 0x06, 0xe183); + rtl8168_mdio_write(tp, 0x06, 0x3168); + rtl8168_mdio_write(tp, 0x06, 0x01e4); + rtl8168_mdio_write(tp, 0x06, 0xf88a); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x8bae); + rtl8168_mdio_write(tp, 0x06, 0x37ee); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x03e0); + rtl8168_mdio_write(tp, 0x06, 0x834c); + rtl8168_mdio_write(tp, 0x06, 0xe183); + rtl8168_mdio_write(tp, 0x06, 0x4d1b); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x04aa); + rtl8168_mdio_write(tp, 0x06, 0xa1ae); + rtl8168_mdio_write(tp, 0x06, 0xa8ee); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x834f); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0xabe0); + rtl8168_mdio_write(tp, 0x06, 0x834f); + rtl8168_mdio_write(tp, 0x06, 0x7803); + rtl8168_mdio_write(tp, 0x06, 0x9f14); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4e05); + rtl8168_mdio_write(tp, 0x06, 0xd240); + rtl8168_mdio_write(tp, 0x06, 0xd655); + rtl8168_mdio_write(tp, 0x06, 0x5402); + rtl8168_mdio_write(tp, 0x06, 0x81c6); + rtl8168_mdio_write(tp, 0x06, 0xd2a0); + rtl8168_mdio_write(tp, 0x06, 0xd6ba); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x81c6); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc05); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0xf860); + rtl8168_mdio_write(tp, 0x06, 0xe1f8); + rtl8168_mdio_write(tp, 0x06, 0x6168); + rtl8168_mdio_write(tp, 0x06, 0x02e4); + rtl8168_mdio_write(tp, 0x06, 0xf860); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x61e0); + rtl8168_mdio_write(tp, 0x06, 0xf848); + rtl8168_mdio_write(tp, 0x06, 0xe1f8); + rtl8168_mdio_write(tp, 0x06, 0x4958); + rtl8168_mdio_write(tp, 0x06, 0x0f1e); + rtl8168_mdio_write(tp, 0x06, 0x02e4); + rtl8168_mdio_write(tp, 0x06, 0xf848); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x49d0); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x820a); + rtl8168_mdio_write(tp, 0x06, 0xbf83); + rtl8168_mdio_write(tp, 0x06, 0x50ef); + rtl8168_mdio_write(tp, 0x06, 0x46dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0xd001); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x0a02); + rtl8168_mdio_write(tp, 0x06, 0x8226); + rtl8168_mdio_write(tp, 0x06, 0xe0f8); + rtl8168_mdio_write(tp, 0x06, 0x60e1); + rtl8168_mdio_write(tp, 0x06, 0xf861); + rtl8168_mdio_write(tp, 0x06, 0x58fd); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x60e5); + rtl8168_mdio_write(tp, 0x06, 0xf861); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xfbc6); + rtl8168_mdio_write(tp, 0x06, 0xbff8); + rtl8168_mdio_write(tp, 0x06, 0x40be); + rtl8168_mdio_write(tp, 0x06, 0x8350); + rtl8168_mdio_write(tp, 0x06, 0xa001); + rtl8168_mdio_write(tp, 0x06, 0x0107); + rtl8168_mdio_write(tp, 0x06, 0x1b89); + rtl8168_mdio_write(tp, 0x06, 0xcfd2); + rtl8168_mdio_write(tp, 0x06, 0x08eb); + rtl8168_mdio_write(tp, 0x06, 0xdb19); + rtl8168_mdio_write(tp, 0x06, 0xb2fb); + rtl8168_mdio_write(tp, 0x06, 0xfffe); + rtl8168_mdio_write(tp, 0x06, 0xfd04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0xf848); + rtl8168_mdio_write(tp, 0x06, 0xe1f8); + rtl8168_mdio_write(tp, 0x06, 0x4968); + rtl8168_mdio_write(tp, 0x06, 0x08e4); + rtl8168_mdio_write(tp, 0x06, 0xf848); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x4958); + rtl8168_mdio_write(tp, 0x06, 0xf7e4); + rtl8168_mdio_write(tp, 0x06, 0xf848); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x49fc); + rtl8168_mdio_write(tp, 0x06, 0x044d); + rtl8168_mdio_write(tp, 0x06, 0x2000); + rtl8168_mdio_write(tp, 0x06, 0x024e); + rtl8168_mdio_write(tp, 0x06, 0x2200); + rtl8168_mdio_write(tp, 0x06, 0x024d); + rtl8168_mdio_write(tp, 0x06, 0xdfff); + rtl8168_mdio_write(tp, 0x06, 0x014e); + rtl8168_mdio_write(tp, 0x06, 0xddff); + rtl8168_mdio_write(tp, 0x06, 0x01f8); + rtl8168_mdio_write(tp, 0x06, 0xfafb); + rtl8168_mdio_write(tp, 0x06, 0xef79); + rtl8168_mdio_write(tp, 0x06, 0xbff8); + rtl8168_mdio_write(tp, 0x06, 0x22d8); + rtl8168_mdio_write(tp, 0x06, 0x19d9); + rtl8168_mdio_write(tp, 0x06, 0x5884); + rtl8168_mdio_write(tp, 0x06, 0x9f09); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0x6dd6); + rtl8168_mdio_write(tp, 0x06, 0x8275); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x4fef); + rtl8168_mdio_write(tp, 0x06, 0x97ff); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x0517); + rtl8168_mdio_write(tp, 0x06, 0xfffe); + rtl8168_mdio_write(tp, 0x06, 0x0117); + rtl8168_mdio_write(tp, 0x06, 0x0001); + rtl8168_mdio_write(tp, 0x06, 0x0200); + rtl8168_mdio_write(tp, 0x05, 0x83d8); + rtl8168_mdio_write(tp, 0x06, 0x8000); + rtl8168_mdio_write(tp, 0x05, 0x83d6); + rtl8168_mdio_write(tp, 0x06, 0x824f); + rtl8168_mdio_write(tp, 0x02, 0x2010); + rtl8168_mdio_write(tp, 0x03, 0xdc00); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x0b, 0x0600); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00fc); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0xF880); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_11) { + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x10, 0x0008); + rtl8168_mdio_write(tp, 0x0D, 0x006C); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0B, 0xA4D8); + rtl8168_mdio_write(tp, 0x09, 0x281C); + rtl8168_mdio_write(tp, 0x07, 0x2883); + rtl8168_mdio_write(tp, 0x0A, 0x6B35); + rtl8168_mdio_write(tp, 0x1D, 0x3DA4); + rtl8168_mdio_write(tp, 0x1C, 0xEFFD); + rtl8168_mdio_write(tp, 0x14, 0x7F52); + rtl8168_mdio_write(tp, 0x18, 0x7FC6); + rtl8168_mdio_write(tp, 0x08, 0x0601); + rtl8168_mdio_write(tp, 0x06, 0x4063); + rtl8168_mdio_write(tp, 0x10, 0xF074); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x13, 0x0789); + rtl8168_mdio_write(tp, 0x12, 0xF4BD); + rtl8168_mdio_write(tp, 0x1A, 0x04FD); + rtl8168_mdio_write(tp, 0x14, 0x84B0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x01, 0x0340); + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x04, 0x4000); + rtl8168_mdio_write(tp, 0x03, 0x1D21); + rtl8168_mdio_write(tp, 0x02, 0x0C32); + rtl8168_mdio_write(tp, 0x01, 0x0200); + rtl8168_mdio_write(tp, 0x00, 0x5554); + rtl8168_mdio_write(tp, 0x04, 0x4800); + rtl8168_mdio_write(tp, 0x04, 0x4000); + rtl8168_mdio_write(tp, 0x04, 0xF000); + rtl8168_mdio_write(tp, 0x03, 0xDF01); + rtl8168_mdio_write(tp, 0x02, 0xDF20); + rtl8168_mdio_write(tp, 0x01, 0x101A); + rtl8168_mdio_write(tp, 0x00, 0xA0FF); + rtl8168_mdio_write(tp, 0x04, 0xF800); + rtl8168_mdio_write(tp, 0x04, 0xF000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + gphy_val = rtl8168_mdio_read(tp, 0x0D); + gphy_val |= BIT_5; + rtl8168_mdio_write(tp, 0x0D, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0C); + gphy_val |= BIT_10; + rtl8168_mdio_write(tp, 0x0C, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_12) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x0D); + gphy_val |= BIT_5; + rtl8168_mdio_write(tp, 0x0D, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0C); + gphy_val |= BIT_10; + rtl8168_mdio_write(tp, 0x0C, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + rtl8168_mdio_write(tp, 0x15, 0x035D); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x01, 0x0300); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_13) { + rtl8168_mdio_write(tp, 0x1F, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x0D); + gphy_val |= BIT_5; + rtl8168_mdio_write(tp, 0x0D, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0C); + gphy_val |= BIT_10; + rtl8168_mdio_write(tp, 0x0C, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_14 || tp->mcfg == CFG_METHOD_15) { + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17) | BIT_1; + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~(BIT_2); + else + gphy_val |= (BIT_2); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b80); + rtl8168_mdio_write(tp, 0x06, 0xc896); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0B, 0x6C20); + rtl8168_mdio_write(tp, 0x07, 0x2872); + rtl8168_mdio_write(tp, 0x1C, 0xEFFF); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x14, 0x6420); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x08) & 0x00FF; + rtl8168_mdio_write(tp, 0x08, gphy_val | 0x8000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + rtl8168_mdio_write(tp, 0x18, gphy_val | 0x0010); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x14); + rtl8168_mdio_write(tp, 0x14, gphy_val | 0x8000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x00, 0x080B); + rtl8168_mdio_write(tp, 0x0B, 0x09D7); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x1006); + } + } + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x19, 0x7F46); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8AD2); + rtl8168_mdio_write(tp, 0x06, 0x6810); + rtl8168_mdio_write(tp, 0x05, 0x8AD4); + rtl8168_mdio_write(tp, 0x06, 0x8002); + rtl8168_mdio_write(tp, 0x05, 0x8ADE); + rtl8168_mdio_write(tp, 0x06, 0x8025); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002F); + rtl8168_mdio_write(tp, 0x15, 0x1919); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + rtl8168_mdio_write(tp, 0x18, gphy_val | 0x0040); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + rtl8168_mdio_write(tp, 0x06, gphy_val | 0x0001); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x00AC); + rtl8168_mdio_write(tp, 0x18, 0x0006); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_16) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_2 | BIT_1; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x18, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x14); + gphy_val |= BIT_15; + rtl8168_mdio_write(tp, 0x14, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x1006); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0B, 0x6C14); + rtl8168_mdio_write(tp, 0x14, 0x7F3D); + rtl8168_mdio_write(tp, 0x1C, 0xFAFE); + rtl8168_mdio_write(tp, 0x08, 0x07C5); + rtl8168_mdio_write(tp, 0x10, 0xF090); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x14, 0x641A); + rtl8168_mdio_write(tp, 0x1A, 0x0606); + rtl8168_mdio_write(tp, 0x12, 0xF480); + rtl8168_mdio_write(tp, 0x13, 0x0747); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0004); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0078); + rtl8168_mdio_write(tp, 0x15, 0xA408); + rtl8168_mdio_write(tp, 0x17, 0x5100); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x0D, 0x0207); + rtl8168_mdio_write(tp, 0x02, 0x5FD0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0004); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x00A1); + gphy_val = rtl8168_mdio_read(tp, 0x1A); + gphy_val &= ~BIT_2; + rtl8168_mdio_write(tp, 0x1A, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0004); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x16); + gphy_val |= BIT_5; + rtl8168_mdio_write(tp, 0x16, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0004); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x00AC); + rtl8168_mdio_write(tp, 0x18, 0x0006); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x09, 0xA20F); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B5B); + rtl8168_mdio_write(tp, 0x06, 0x9222); + rtl8168_mdio_write(tp, 0x05, 0x8B6D); + rtl8168_mdio_write(tp, 0x06, 0x8000); + rtl8168_mdio_write(tp, 0x05, 0x8B76); + rtl8168_mdio_write(tp, 0x06, 0x8000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (pdev->subsystem_vendor == 0x1043 && + pdev->subsystem_device == 0x13F7) { + + static const u16 evl_phy_value[] = { + 0x8B56, 0x8B5F, 0x8B68, 0x8B71, + 0x8B7A, 0x8A7B, 0x8A7E, 0x8A81, + 0x8A84, 0x8A87 + }; + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + for (i = 0; i < ARRAY_SIZE(evl_phy_value); i++) { + rtl8168_mdio_write(tp, 0x05, evl_phy_value[i]); + gphy_val = (0xAA << 8) | (rtl8168_mdio_read(tp, 0x06) & 0xFF); + rtl8168_mdio_write(tp, 0x06, gphy_val); + } + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0078); + rtl8168_mdio_write(tp, 0x17, 0x51AA); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B54); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8B5D); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8A7C); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A7F); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A82); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A88); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + gphy_val = rtl8168_mdio_read(tp, 0x06) | BIT_14 | BIT_15; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } else if (tp->mcfg == CFG_METHOD_17) { + if (pdev->subsystem_vendor == 0x144d && + pdev->subsystem_device == 0xc0a6) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0e, 0x6b7f); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_2 | BIT_1; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val &= ~BIT_4; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x18, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x14); + gphy_val |= BIT_15; + rtl8168_mdio_write(tp, 0x14, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0004); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x00AC); + rtl8168_mdio_write(tp, 0x18, 0x0006); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x09, 0xA20F); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + gphy_val = rtl8168_mdio_read(tp, 0x06) | BIT_14 | BIT_15; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B5B); + rtl8168_mdio_write(tp, 0x06, 0x9222); + rtl8168_mdio_write(tp, 0x05, 0x8B6D); + rtl8168_mdio_write(tp, 0x06, 0x8000); + rtl8168_mdio_write(tp, 0x05, 0x8B76); + rtl8168_mdio_write(tp, 0x06, 0x8000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (pdev->subsystem_vendor == 0x1043 && + pdev->subsystem_device == 0x13F7) { + + static const u16 evl_phy_value[] = { + 0x8B56, 0x8B5F, 0x8B68, 0x8B71, + 0x8B7A, 0x8A7B, 0x8A7E, 0x8A81, + 0x8A84, 0x8A87 + }; + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + for (i = 0; i < ARRAY_SIZE(evl_phy_value); i++) { + rtl8168_mdio_write(tp, 0x05, evl_phy_value[i]); + gphy_val = (0xAA << 8) | (rtl8168_mdio_read(tp, 0x06) & 0xFF); + rtl8168_mdio_write(tp, 0x06, gphy_val); + } + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0078); + rtl8168_mdio_write(tp, 0x17, 0x51AA); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B54); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8B5D); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8A7C); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A7F); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A82); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A88); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val |= BIT_12; + rtl8168_mdio_write(tp, 0x15, gphy_val); + } + } + } else if (tp->mcfg == CFG_METHOD_18) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_2 | BIT_1; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x18, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x14); + gphy_val |= BIT_15; + rtl8168_mdio_write(tp, 0x14, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_14; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x09, 0xA20F); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B55); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8B5E); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8B67); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8B70); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0078); + rtl8168_mdio_write(tp, 0x17, 0x0000); + rtl8168_mdio_write(tp, 0x19, 0x00FB); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B79); + rtl8168_mdio_write(tp, 0x06, 0xAA00); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0003); + rtl8168_mdio_write(tp, 0x01, 0x328A); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B54); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8B5D); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8A7C); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A7F); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A82); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A88); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_15); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val |= BIT_12; + rtl8168_mdio_write(tp, 0x15, gphy_val); + } + } + } else if (tp->mcfg == CFG_METHOD_19) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_2 | BIT_1; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x18, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x14); + gphy_val |= BIT_15; + rtl8168_mdio_write(tp, 0x14, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B54); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8B5D); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8A7C); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A7F); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A82); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A88); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_15); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val |= BIT_12; + rtl8168_mdio_write(tp, 0x15, gphy_val); + } + } + } else if (tp->mcfg == CFG_METHOD_20) { + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_2 | BIT_1; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x18, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x14); + gphy_val |= BIT_15; + rtl8168_mdio_write(tp, 0x14, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_14; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x09, 0xA20F); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B55); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8B5E); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8B67); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8B70); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0078); + rtl8168_mdio_write(tp, 0x17, 0x0000); + rtl8168_mdio_write(tp, 0x19, 0x00FB); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B79); + rtl8168_mdio_write(tp, 0x06, 0xAA00); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B54); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8B5D); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8A7C); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A7F); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A82); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A88); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_15); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val |= BIT_12; + rtl8168_mdio_write(tp, 0x15, gphy_val); + } + } + } else if (tp->mcfg == CFG_METHOD_21) { + rtl8168_mdio_write(tp, 0x1F, 0x0A46); + gphy_val = rtl8168_mdio_read(tp, 0x10); + rtl8168_mdio_write(tp, 0x1F, 0x0BCC); + if (gphy_val & BIT_8) + rtl8168_clear_eth_phy_bit(tp, 0x12, BIT_15); + else + rtl8168_set_eth_phy_bit(tp, 0x12, BIT_15); + rtl8168_mdio_write(tp, 0x1F, 0x0A46); + gphy_val = rtl8168_mdio_read(tp, 0x13); + rtl8168_mdio_write(tp, 0x1F, 0x0C41); + if (gphy_val & BIT_8) + rtl8168_set_eth_phy_bit(tp, 0x15, BIT_1); + else + rtl8168_clear_eth_phy_bit(tp, 0x15, BIT_1); + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_2 | BIT_3); + + rtl8168_mdio_write(tp, 0x1F, 0x0BCC); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_7); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_6); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8084); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~(BIT_14 | BIT_13)); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_12); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_1); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_0); + + rtl8168_mdio_write(tp, 0x1F, 0x0A4B); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_2); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8012); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | BIT_15); + + rtl8168_mdio_write(tp, 0x1F, 0x0C42); + gphy_val = rtl8168_mdio_read(tp, 0x11); + gphy_val |= BIT_14; + gphy_val &= ~BIT_13; + rtl8168_mdio_write(tp, 0x11, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x809A); + rtl8168_mdio_write(tp, 0x14, 0x8022); + rtl8168_mdio_write(tp, 0x13, 0x80A0); + gphy_val = rtl8168_mdio_read(tp, 0x14) & 0x00FF; + gphy_val |= 0x1000; + rtl8168_mdio_write(tp, 0x14, gphy_val); + rtl8168_mdio_write(tp, 0x13, 0x8088); + rtl8168_mdio_write(tp, 0x14, 0x9222); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_2); + } + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_22) { + //do nothing + } else if (tp->mcfg == CFG_METHOD_23) { + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | (BIT_3 | BIT_2)); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0BCC); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_7); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_6); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8084); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~(BIT_14 | BIT_13)); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_12); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_1); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_0); + + rtl8168_mdio_write(tp, 0x1F, 0x0A4B); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_2); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8012); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | BIT_15); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0C42); + ClearAndSetEthPhyBit(tp, + 0x11, + BIT_13, + BIT_14 + ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_2); + } + } + } else if (tp->mcfg == CFG_METHOD_24) { + rtl8168_mdio_write(tp, 0x1F, 0x0BCC); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_7); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_6); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8084); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~(BIT_14 | BIT_13)); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_12); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_1); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_0); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8012); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | BIT_15); + + rtl8168_mdio_write(tp, 0x1F, 0x0C42); + gphy_val = rtl8168_mdio_read(tp, 0x11); + gphy_val |= BIT_14; + gphy_val &= ~BIT_13; + rtl8168_mdio_write(tp, 0x11, gphy_val); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_2); + } + } + } else if (tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26) { + rtl8168_mdio_write(tp, 0x1F, 0x0BCC); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_7); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_6); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8084); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~(BIT_14 | BIT_13)); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_12); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_1); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_0); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8012); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | BIT_15); + + rtl8168_mdio_write(tp, 0x1F, 0x0BCE); + rtl8168_mdio_write(tp, 0x12, 0x8860); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x80F3); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x8B00); + rtl8168_mdio_write(tp, 0x13, 0x80F0); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x3A00); + rtl8168_mdio_write(tp, 0x13, 0x80EF); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x0500); + rtl8168_mdio_write(tp, 0x13, 0x80F6); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x6E00); + rtl8168_mdio_write(tp, 0x13, 0x80EC); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x6800); + rtl8168_mdio_write(tp, 0x13, 0x80ED); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x7C00); + rtl8168_mdio_write(tp, 0x13, 0x80F2); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xF400); + rtl8168_mdio_write(tp, 0x13, 0x80F4); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x8500); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8110); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xA800); + rtl8168_mdio_write(tp, 0x13, 0x810F); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x1D00); + rtl8168_mdio_write(tp, 0x13, 0x8111); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xF500); + rtl8168_mdio_write(tp, 0x13, 0x8113); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x6100); + rtl8168_mdio_write(tp, 0x13, 0x8115); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x9200); + rtl8168_mdio_write(tp, 0x13, 0x810E); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x0400); + rtl8168_mdio_write(tp, 0x13, 0x810C); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x7C00); + rtl8168_mdio_write(tp, 0x13, 0x810B); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x5A00); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x80D1); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xFF00); + rtl8168_mdio_write(tp, 0x13, 0x80CD); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x9E00); + rtl8168_mdio_write(tp, 0x13, 0x80D3); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x0E00); + rtl8168_mdio_write(tp, 0x13, 0x80D5); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xCA00); + rtl8168_mdio_write(tp, 0x13, 0x80D7); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x8400); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_2); + } + } + } else if (tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28) { + rtl8168_mdio_write(tp, 0x1F, 0x0BCC); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_7); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_6); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8084); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~(BIT_14 | BIT_13)); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_12); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_1); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_0); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8012); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | BIT_15); + + rtl8168_mdio_write(tp, 0x1F, 0x0C42); + rtl8168_mdio_write(tp, 0x11, (rtl8168_mdio_read(tp, 0x11) & ~BIT_13) | BIT_14); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x80F3); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x8B00); + rtl8168_mdio_write(tp, 0x13, 0x80F0); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x3A00); + rtl8168_mdio_write(tp, 0x13, 0x80EF); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x0500); + rtl8168_mdio_write(tp, 0x13, 0x80F6); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x6E00); + rtl8168_mdio_write(tp, 0x13, 0x80EC); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x6800); + rtl8168_mdio_write(tp, 0x13, 0x80ED); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x7C00); + rtl8168_mdio_write(tp, 0x13, 0x80F2); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xF400); + rtl8168_mdio_write(tp, 0x13, 0x80F4); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x8500); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8110); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xA800); + rtl8168_mdio_write(tp, 0x13, 0x810F); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x1D00); + rtl8168_mdio_write(tp, 0x13, 0x8111); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xF500); + rtl8168_mdio_write(tp, 0x13, 0x8113); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x6100); + rtl8168_mdio_write(tp, 0x13, 0x8115); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x9200); + rtl8168_mdio_write(tp, 0x13, 0x810E); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x0400); + rtl8168_mdio_write(tp, 0x13, 0x810C); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x7C00); + rtl8168_mdio_write(tp, 0x13, 0x810B); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x5A00); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x80D1); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xFF00); + rtl8168_mdio_write(tp, 0x13, 0x80CD); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x9E00); + rtl8168_mdio_write(tp, 0x13, 0x80D3); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x0E00); + rtl8168_mdio_write(tp, 0x13, 0x80D5); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xCA00); + rtl8168_mdio_write(tp, 0x13, 0x80D7); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x8400); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_2); + } + } + } else if (tp->mcfg == CFG_METHOD_29) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x809b); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xF800 , + 0x8000 + ); + rtl8168_mdio_write(tp, 0x13, 0x80A2); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x8000 + ); + rtl8168_mdio_write(tp, 0x13, 0x80A4); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x8500 + ); + rtl8168_mdio_write(tp, 0x13, 0x809C); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0xbd00 + ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x80AD); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xF800 , + 0x7000 + ); + rtl8168_mdio_write(tp, 0x13, 0x80B4); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x5000 + ); + rtl8168_mdio_write(tp, 0x13, 0x80AC); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x4000 + ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x808E); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x1200 + ); + rtl8168_mdio_write(tp, 0x13, 0x8090); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0xE500 + ); + rtl8168_mdio_write(tp, 0x13, 0x8092); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x9F00 + ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + u16 dout_tapbin; + + dout_tapbin = 0x0000; + rtl8168_mdio_write( tp, 0x1F, 0x0A46 ); + gphy_val = rtl8168_mdio_read( tp, 0x13 ); + gphy_val &= (BIT_1|BIT_0); + gphy_val <<= 2; + dout_tapbin |= gphy_val; + + gphy_val = rtl8168_mdio_read( tp, 0x12 ); + gphy_val &= (BIT_15|BIT_14); + gphy_val >>= 14; + dout_tapbin |= gphy_val; + + dout_tapbin = ~( dout_tapbin^BIT_3 ); + dout_tapbin <<= 12; + dout_tapbin &= 0xF000; + + rtl8168_mdio_write( tp, 0x1F, 0x0A43 ); + + rtl8168_mdio_write( tp, 0x13, 0x827A ); + ClearAndSetEthPhyBit( tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_12, + dout_tapbin + ); + + + rtl8168_mdio_write( tp, 0x13, 0x827B ); + ClearAndSetEthPhyBit( tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_12, + dout_tapbin + ); + + + rtl8168_mdio_write( tp, 0x13, 0x827C ); + ClearAndSetEthPhyBit( tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_12, + dout_tapbin + ); + + + rtl8168_mdio_write( tp, 0x13, 0x827D ); + ClearAndSetEthPhyBit( tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_12, + dout_tapbin + ); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_11); + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + rtl8168_set_eth_phy_bit(tp, 0x16, BIT_1); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_set_eth_phy_bit( tp, 0x11, BIT_11 ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + + rtl8168_mdio_write(tp, 0x1F, 0x0BCA); + ClearAndSetEthPhyBit( tp, + 0x17, + (BIT_13 | BIT_12) , + BIT_14 + ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x803F); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x13, 0x8047); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x13, 0x804F); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x13, 0x8057); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x13, 0x805F); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x13, 0x8067 ); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x13, 0x806F ); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_set_eth_phy_bit( tp, 0x10, BIT_2 ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + } + } else if (tp->mcfg == CFG_METHOD_30) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x808A); + ClearAndSetEthPhyBit( tp, + 0x14, + BIT_5 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0, + 0x0A ); + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_11); + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + rtl8168_set_eth_phy_bit(tp, 0x16, BIT_1); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_set_eth_phy_bit( tp, 0x11, BIT_11 ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (tp->RequireAdcBiasPatch) { + rtl8168_mdio_write(tp, 0x1F, 0x0BCF); + rtl8168_mdio_write(tp, 0x16, tp->AdcBiasPatchIoffset); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + { + u16 rlen; + + rtl8168_mdio_write(tp, 0x1F, 0x0BCD); + gphy_val = rtl8168_mdio_read( tp, 0x16 ); + gphy_val &= 0x000F; + + if ( gphy_val > 3 ) { + rlen = gphy_val - 3; + } else { + rlen = 0; + } + + gphy_val = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12); + + rtl8168_mdio_write(tp, 0x1F, 0x0BCD); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x85FE); + ClearAndSetEthPhyBit( + tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_12|BIT_11|BIT_10|BIT_8, + BIT_9); + rtl8168_mdio_write(tp, 0x13, 0x85FF); + ClearAndSetEthPhyBit( + tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_12, + BIT_11|BIT_10|BIT_9|BIT_8); + rtl8168_mdio_write(tp, 0x13, 0x814B); + ClearAndSetEthPhyBit( + tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_11|BIT_10|BIT_9|BIT_8, + BIT_12); + } + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_set_eth_phy_bit( tp, 0x10, BIT_2 ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + } + } else if (tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x808E); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x4800 + ); + rtl8168_mdio_write(tp, 0x13, 0x8090); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0xCC00 + ); + rtl8168_mdio_write(tp, 0x13, 0x8092); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0xB000 + ); + rtl8168_mdio_write(tp, 0x13, 0x8088); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x6000 + ); + rtl8168_mdio_write(tp, 0x13, 0x808B); + ClearAndSetEthPhyBit( tp, + 0x14, + 0x3F00 , + 0x0B00 + ); + rtl8168_mdio_write(tp, 0x13, 0x808D); + ClearAndSetEthPhyBit( tp, + 0x14, + 0x1F00 , + 0x0600 + ); + rtl8168_mdio_write(tp, 0x13, 0x808C); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0xB000 + ); + + rtl8168_mdio_write(tp, 0x13, 0x80A0); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x2800 + ); + rtl8168_mdio_write(tp, 0x13, 0x80A2); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x5000 + ); + rtl8168_mdio_write(tp, 0x13, 0x809B); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xF800 , + 0xB000 + ); + rtl8168_mdio_write(tp, 0x13, 0x809A); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x4B00 + ); + rtl8168_mdio_write(tp, 0x13, 0x809D); + ClearAndSetEthPhyBit( tp, + 0x14, + 0x3F00 , + 0x0800 + ); + rtl8168_mdio_write(tp, 0x13, 0x80A1); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x7000 + ); + rtl8168_mdio_write(tp, 0x13, 0x809F); + ClearAndSetEthPhyBit( tp, + 0x14, + 0x1F00 , + 0x0300 + ); + rtl8168_mdio_write(tp, 0x13, 0x809E); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x8800 + ); + + rtl8168_mdio_write(tp, 0x13, 0x80B2); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x2200 + ); + rtl8168_mdio_write(tp, 0x13, 0x80AD); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xF800 , + 0x9800 + ); + rtl8168_mdio_write(tp, 0x13, 0x80AF); + ClearAndSetEthPhyBit( tp, + 0x14, + 0x3F00 , + 0x0800 + ); + rtl8168_mdio_write(tp, 0x13, 0x80B3); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x6F00 + ); + rtl8168_mdio_write(tp, 0x13, 0x80B1); + ClearAndSetEthPhyBit( tp, + 0x14, + 0x1F00 , + 0x0300 + ); + rtl8168_mdio_write(tp, 0x13, 0x80B0); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x9300 + ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_11); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_set_eth_phy_bit(tp, 0x11, BIT_11); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8016); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_10); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (aspm) { + if (!HW_SUPP_SERDES_PHY(tp) && + HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_set_eth_phy_bit( tp, 0x10, BIT_2 ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + } + } + +#ifdef ENABLE_FIBER_SUPPORT + if (HW_FIBER_MODE_ENABLED(tp)) + rtl8168_hw_fiber_phy_config(dev); +#endif //ENABLE_FIBER_SUPPORT + + //EthPhyPPSW + if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_24 || tp->mcfg == CFG_METHOD_25 || + tp->mcfg == CFG_METHOD_26) { + //disable EthPhyPPSW + rtl8168_mdio_write(tp, 0x1F, 0x0BCD); + rtl8168_mdio_write(tp, 0x14, 0x5065); + rtl8168_mdio_write(tp, 0x14, 0xD065); + rtl8168_mdio_write(tp, 0x1F, 0x0BC8); + rtl8168_mdio_write(tp, 0x11, 0x5655); + rtl8168_mdio_write(tp, 0x1F, 0x0BCD); + rtl8168_mdio_write(tp, 0x14, 0x1065); + rtl8168_mdio_write(tp, 0x14, 0x9065); + rtl8168_mdio_write(tp, 0x14, 0x1065); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + //enable EthPhyPPSW + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_set_eth_phy_bit( tp, 0x11, BIT_7 ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + /*ocp phy power saving*/ + if (tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + if (aspm) + rtl8168_enable_ocp_phy_power_saving(dev); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + if (tp->eee_enabled) + rtl8168_enable_EEE(tp); + else + rtl8168_disable_EEE(tp); + } +} + +static inline void rtl8168_delete_esd_timer(struct net_device *dev, struct timer_list *timer) +{ + del_timer_sync(timer); +} + +static inline void rtl8168_request_esd_timer(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct timer_list *timer = &tp->esd_timer; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + setup_timer(timer, rtl8168_esd_timer, (unsigned long)dev); +#else + timer_setup(timer, rtl8168_esd_timer, 0); +#endif + mod_timer(timer, jiffies + RTL8168_ESD_TIMEOUT); +} + +static inline void rtl8168_delete_link_timer(struct net_device *dev, struct timer_list *timer) +{ + del_timer_sync(timer); +} + +static inline void rtl8168_request_link_timer(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct timer_list *timer = &tp->link_timer; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + setup_timer(timer, rtl8168_link_timer, (unsigned long)dev); +#else + timer_setup(timer, rtl8168_link_timer, 0); +#endif + mod_timer(timer, jiffies + RTL8168_LINK_TIMEOUT); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void +rtl8168_netpoll(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + disable_irq(pdev->irq); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) + rtl8168_interrupt(pdev->irq, dev, NULL); +#else + rtl8168_interrupt(pdev->irq, dev); +#endif + enable_irq(pdev->irq); +} +#endif + +static void +rtl8168_get_bios_setting(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->bios_setting = RTL_R32(tp, 0x8c); + break; + } +} + +static void +rtl8168_set_bios_setting(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W32(tp, 0x8C, tp->bios_setting); + break; + } +} + +static void +rtl8168_init_software_variable(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + rtl8168_get_bios_setting(dev); + + switch (tp->mcfg) { + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + tp->HwSuppDashVer = 1; + break; + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + tp->HwSuppDashVer = 2; + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppDashVer = 3; + break; + default: + tp->HwSuppDashVer = 0; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwPkgDet = rtl8168_mac_ocp_read(tp, 0xDC00); + tp->HwPkgDet = (tp->HwPkgDet >> 3) & 0x0F; + break; + } + + if (HW_SUPP_SERDES_PHY(tp)) + eee_enable = 0; + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppNowIsOobVer = 1; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppPhyOcpVer = 1; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppUpsVer = 1; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwPcieSNOffset = 0x16C; + break; + case CFG_METHOD_DEFAULT: + tp->HwPcieSNOffset = 0; + break; + default: + tp->HwPcieSNOffset = 0x164; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppAspmClkIntrLock = 1; + break; + } + + if (!aspm || !tp->HwSuppAspmClkIntrLock) + dynamic_aspm = 0; + +#ifdef ENABLE_REALWOW_SUPPORT + rtl8168_get_realwow_hw_version(dev); +#endif //ENABLE_REALWOW_SUPPORT + + if (HW_DASH_SUPPORT_DASH(tp) && rtl8168_check_dash(tp)) + tp->DASH = 1; + else + tp->DASH = 0; + + if (tp->DASH) { + if (HW_DASH_SUPPORT_TYPE_3(tp)) { + u64 CmacMemPhysAddress; + void __iomem *cmac_ioaddr = NULL; + struct pci_dev *pdev_cmac; + + pdev_cmac = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); + + //map CMAC IO space + CmacMemPhysAddress = pci_resource_start(pdev_cmac, 2); + + /* ioremap MMIO region */ + cmac_ioaddr = ioremap(CmacMemPhysAddress, R8168_REGS_SIZE); + + if (cmac_ioaddr == NULL) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "cannot remap CMAC MMIO, aborting\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + tp->DASH = 0; + } else { + tp->mapped_cmac_ioaddr = cmac_ioaddr; + } + } + + eee_enable = 0; + } + +#ifdef ENABLE_DASH_SUPPORT +#ifdef ENABLE_DASH_PRINTER_SUPPORT + if (tp->DASH) { + if (HW_DASH_SUPPORT_TYPE_3(tp) && tp->HwPkgDet == 0x0F) + tp->dash_printer_enabled = 1; + else if (HW_DASH_SUPPORT_TYPE_2(tp)) + tp->dash_printer_enabled = 1; + } +#endif //ENABLE_DASH_PRINTER_SUPPORT +#endif //ENABLE_DASH_SUPPORT + + if (HW_DASH_SUPPORT_TYPE_2(tp)) + tp->cmac_ioaddr = tp->mmio_addr; + else if (HW_DASH_SUPPORT_TYPE_3(tp)) + tp->cmac_ioaddr = tp->mapped_cmac_ioaddr; + + switch (tp->mcfg) { + case CFG_METHOD_1: + tp->intr_mask = RxDescUnavail | RxFIFOOver | TxDescUnavail | TxOK | RxOK | SWInt; + tp->timer_intr_mask = PCSTimeout | RxFIFOOver; + break; + case CFG_METHOD_2: + case CFG_METHOD_3: + case CFG_METHOD_4: + tp->intr_mask = RxDescUnavail | TxDescUnavail | TxOK | RxOK | SWInt; + tp->timer_intr_mask = PCSTimeout; + break; + default: + tp->intr_mask = RxDescUnavail | TxOK | RxOK | SWInt; + tp->timer_intr_mask = PCSTimeout; + break; + } + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) { + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + tp->timer_intr_mask |= ( ISRIMR_DASH_INTR_EN | ISRIMR_DASH_INTR_CMAC_RESET); + tp->intr_mask |= ( ISRIMR_DASH_INTR_EN | ISRIMR_DASH_INTR_CMAC_RESET); + } else { + tp->timer_intr_mask |= ( ISRIMR_DP_DASH_OK | ISRIMR_DP_HOST_OK | ISRIMR_DP_REQSYS_OK ); + tp->intr_mask |= ( ISRIMR_DP_DASH_OK | ISRIMR_DP_HOST_OK | ISRIMR_DP_REQSYS_OK ); + } + } +#endif + if (aspm) { + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->org_pci_offset_99 = rtl8168_csi_fun0_read_byte(tp, 0x99); + tp->org_pci_offset_99 &= ~(BIT_5|BIT_6); + break; + } + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + tp->org_pci_offset_180 = rtl8168_csi_fun0_read_byte(tp, 0x180); + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->org_pci_offset_180 = rtl8168_csi_fun0_read_byte(tp, 0x214); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_27: + case CFG_METHOD_28: + if (tp->org_pci_offset_99 & BIT_2) + tp->issue_offset_99_event = TRUE; + break; + } + } + + pci_read_config_byte(pdev, 0x80, &tp->org_pci_offset_80); + pci_read_config_byte(pdev, 0x81, &tp->org_pci_offset_81); + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + if ((tp->features & RTL_FEATURE_MSI) && (tp->org_pci_offset_80 & BIT_1)) + tp->use_timer_interrrupt = FALSE; + else + tp->use_timer_interrrupt = TRUE; + break; + default: + tp->use_timer_interrrupt = TRUE; + break; + } + + if (timer_count == 0 || tp->mcfg == CFG_METHOD_DEFAULT) + tp->use_timer_interrrupt = FALSE; + + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + tp->ShortPacketSwChecksum = TRUE; + break; + case CFG_METHOD_16: + case CFG_METHOD_17: + tp->ShortPacketSwChecksum = TRUE; + tp->UseSwPaddingShortPkt = TRUE; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_30: { + u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0; + u16 TmpUshort; + + rtl8168_mac_ocp_write( tp, 0xDD02, 0x807D); + TmpUshort = rtl8168_mac_ocp_read( tp, 0xDD02 ); + ioffset_p3 = ( (TmpUshort & BIT_7) >>7 ); + ioffset_p3 <<= 3; + TmpUshort = rtl8168_mac_ocp_read( tp, 0xDD00 ); + + ioffset_p3 |= ((TmpUshort & (BIT_15 | BIT_14 | BIT_13))>>13); + + ioffset_p2 = ((TmpUshort & (BIT_12|BIT_11|BIT_10|BIT_9))>>9); + ioffset_p1 = ((TmpUshort & (BIT_8|BIT_7|BIT_6|BIT_5))>>5); + + ioffset_p0 = ( (TmpUshort & BIT_4) >>4 ); + ioffset_p0 <<= 3; + ioffset_p0 |= (TmpUshort & (BIT_2| BIT_1 | BIT_0)); + + if ((ioffset_p3 == 0x0F) && (ioffset_p2 == 0x0F) && (ioffset_p1 == 0x0F) && (ioffset_p0 == 0x0F)) { + tp->RequireAdcBiasPatch = FALSE; + } else { + tp->RequireAdcBiasPatch = TRUE; + tp->AdcBiasPatchIoffset = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0); + } + } + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: { + u16 rg_saw_cnt; + + rtl8168_mdio_write(tp, 0x1F, 0x0C42); + rg_saw_cnt = rtl8168_mdio_read(tp, 0x13); + rg_saw_cnt &= ~(BIT_15|BIT_14); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if ( rg_saw_cnt > 0) { + tp->SwrCnt1msIni = 16000000/rg_saw_cnt; + tp->SwrCnt1msIni &= 0x0FFF; + + tp->RequireAdjustUpsTxLinkPulseTiming = TRUE; + } + } + break; + } + +#ifdef ENABLE_FIBER_SUPPORT + rtl8168_check_hw_fiber_mode_support(dev); +#endif //ENABLE_FIBER_SUPPORT + + switch(tp->mcfg) { + case CFG_METHOD_32: + case CFG_METHOD_33: + if (tp->HwPkgDet == 0x06) { + u8 tmpUchar = rtl8168_eri_read(tp, 0xE6, 1, ERIAR_ExGMAC); + if (tmpUchar == 0x02) + tp->HwSuppSerDesPhyVer = 1; + else if (tmpUchar == 0x00) + tp->HwSuppSerDesPhyVer = 2; + } + break; + } + + if (pdev->subsystem_vendor == 0x144d) { + if (pdev->subsystem_device == 0xc098 || + pdev->subsystem_device == 0xc0b1 || + pdev->subsystem_device == 0xc0b8) + hwoptimize |= HW_PATCH_SAMSUNG_LAN_DONGLE; + } + + if (hwoptimize & HW_PATCH_SAMSUNG_LAN_DONGLE) { + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_30: + tp->RequiredSecLanDonglePatch = TRUE; + break; + } + } + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppMagicPktVer = WAKEUP_MAGIC_PACKET_V2; + break; + case CFG_METHOD_DEFAULT: + tp->HwSuppMagicPktVer = WAKEUP_MAGIC_PACKET_NOT_SUPPORT; + break; + default: + tp->HwSuppMagicPktVer = WAKEUP_MAGIC_PACKET_V1; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + tp->HwSuppEsdVer = 2; + break; + default: + tp->HwSuppEsdVer = 1; + break; + } + + if (tp->HwSuppEsdVer == 2) { + rtl8168_mdio_write(tp, 0x1F, 0x0A46); + tp->BackupPhyFuseDout_15_0 = rtl8168_mdio_read(tp, 0x10); + tp->BackupPhyFuseDout_47_32 = rtl8168_mdio_read(tp, 0x12); + tp->BackupPhyFuseDout_63_48 = rtl8168_mdio_read(tp, 0x13); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + tp->TestPhyOcpReg = TRUE; + } + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + tp->HwSuppCheckPhyDisableModeVer = 1; + break; + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + tp->HwSuppCheckPhyDisableModeVer = 2; + break; + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppCheckPhyDisableModeVer = 3; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_14; + break; + case CFG_METHOD_16: + case CFG_METHOD_17: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_16; + break; + case CFG_METHOD_18: + case CFG_METHOD_19: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_18; + break; + case CFG_METHOD_20: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_20; + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_21; + break; + case CFG_METHOD_23: + case CFG_METHOD_27: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_23; + break; + case CFG_METHOD_24: + case CFG_METHOD_25: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_24; + break; + case CFG_METHOD_26: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_26; + break; + case CFG_METHOD_28: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_28; + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_29; + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_31; + break; + } + + if (tp->HwIcVerUnknown) { + tp->NotWrRamCodeToMicroP = TRUE; + tp->NotWrMcuPatchCode = TRUE; + } + + tp->NicCustLedValue = RTL_R16(tp, CustomLED); + + rtl8168_get_hw_wol(dev); + + rtl8168_link_option((u8*)&autoneg_mode, (u32*)&speed_mode, (u8*)&duplex_mode, (u32*)&advertising_mode); + + tp->autoneg = autoneg_mode; + tp->speed = speed_mode; + tp->duplex = duplex_mode; + tp->advertising = advertising_mode; + + tp->max_jumbo_frame_size = rtl_chip_info[tp->chipset].jumbo_frame_sz; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) + /* MTU range: 60 - hw-specific max */ + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = tp->max_jumbo_frame_size; +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) + tp->eee_enabled = eee_enable; + tp->eee_adv_t = MDIO_EEE_1000T | MDIO_EEE_100TX; + +#ifdef ENABLE_FIBER_SUPPORT + if (HW_FIBER_MODE_ENABLED(tp)) + rtl8168_set_fiber_mode_software_variable(dev); +#endif //ENABLE_FIBER_SUPPORT +} + +static void +rtl8168_release_board(struct pci_dev *pdev, + struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + rtl8168_set_bios_setting(dev); + rtl8168_rar_set(tp, tp->org_mac_addr); + tp->wol_enabled = WOL_DISABLED; + + if (!tp->DASH) + rtl8168_phy_power_down(dev); + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) + FreeAllocatedDashShareMemory(dev); +#endif + + if (tp->mapped_cmac_ioaddr != NULL) + iounmap(tp->mapped_cmac_ioaddr); + + iounmap(ioaddr); + pci_release_regions(pdev); + pci_clear_mwi(pdev); + pci_disable_device(pdev); + free_netdev(dev); +} + +static int +rtl8168_get_mac_address(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int i; + u8 mac_addr[MAC_ADDR_LEN]; + + for (i = 0; i < MAC_ADDR_LEN; i++) + mac_addr[i] = RTL_R8(tp, MAC0 + i); + + if (tp->mcfg == CFG_METHOD_18 || + tp->mcfg == CFG_METHOD_19 || + tp->mcfg == CFG_METHOD_20 || + tp->mcfg == CFG_METHOD_21 || + tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_23 || + tp->mcfg == CFG_METHOD_24 || + tp->mcfg == CFG_METHOD_25 || + tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || + tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || + tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || + tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + *(u32*)&mac_addr[0] = rtl8168_eri_read(tp, 0xE0, 4, ERIAR_ExGMAC); + *(u16*)&mac_addr[4] = rtl8168_eri_read(tp, 0xE4, 2, ERIAR_ExGMAC); + } else { + if (tp->eeprom_type != EEPROM_TYPE_NONE) { + u16 *pUshort = (u16*)mac_addr; + /* Get MAC address from EEPROM */ + if (tp->mcfg == CFG_METHOD_16 || + tp->mcfg == CFG_METHOD_17 || + tp->mcfg == CFG_METHOD_18 || + tp->mcfg == CFG_METHOD_19 || + tp->mcfg == CFG_METHOD_20 || + tp->mcfg == CFG_METHOD_21 || + tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_23 || + tp->mcfg == CFG_METHOD_24 || + tp->mcfg == CFG_METHOD_25 || + tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || + tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || + tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || + tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + *pUshort++ = rtl8168_eeprom_read_sc(tp, 1); + *pUshort++ = rtl8168_eeprom_read_sc(tp, 2); + *pUshort = rtl8168_eeprom_read_sc(tp, 3); + } else { + *pUshort++ = rtl8168_eeprom_read_sc(tp, 7); + *pUshort++ = rtl8168_eeprom_read_sc(tp, 8); + *pUshort = rtl8168_eeprom_read_sc(tp, 9); + } + } + } + + if (!is_valid_ether_addr(mac_addr)) { + netif_err(tp, probe, dev, "Invalid ether addr %pM\n", + mac_addr); + eth_hw_addr_random(dev); + ether_addr_copy(mac_addr, dev->dev_addr); + netif_info(tp, probe, dev, "Random ether addr %pM\n", + mac_addr); + tp->random_mac = 1; + } + + rtl8168_rar_set(tp, mac_addr); + + for (i = 0; i < MAC_ADDR_LEN; i++) { + dev->dev_addr[i] = RTL_R8(tp, MAC0 + i); + tp->org_mac_addr[i] = dev->dev_addr[i]; /* keep the original MAC address */ + } +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); +#endif +// memcpy(dev->dev_addr, dev->dev_addr, dev->addr_len); + + return 0; +} + +/** + * rtl8168_set_mac_address - Change the Ethernet Address of the NIC + * @dev: network interface device structure + * @p: pointer to an address structure + * + * Return 0 on success, negative on failure + **/ +static int +rtl8168_set_mac_address(struct net_device *dev, + void *p) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct sockaddr *addr = p; + unsigned long flags; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + spin_lock_irqsave(&tp->lock, flags); + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + rtl8168_rar_set(tp, dev->dev_addr); + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} + +/****************************************************************************** + * rtl8168_rar_set - Puts an ethernet address into a receive address register. + * + * tp - The private data structure for driver + * addr - Address to put into receive address register + *****************************************************************************/ +void +rtl8168_rar_set(struct rtl8168_private *tp, + uint8_t *addr) +{ + uint32_t rar_low = 0; + uint32_t rar_high = 0; + + rar_low = ((uint32_t) addr[0] | + ((uint32_t) addr[1] << 8) | + ((uint32_t) addr[2] << 16) | + ((uint32_t) addr[3] << 24)); + + rar_high = ((uint32_t) addr[4] | + ((uint32_t) addr[5] << 8)); + + rtl8168_enable_cfg9346_write(tp); + RTL_W32(tp, MAC0, rar_low); + RTL_W32(tp, MAC4, rar_high); + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + RTL_W32(tp, SecMAC0, rar_low); + RTL_W16(tp, SecMAC4, (uint16_t)rar_high); + break; + } + + if (tp->mcfg == CFG_METHOD_17) { + rtl8168_eri_write(tp, 0xf0, 4, rar_low << 16, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xf4, 4, rar_low >> 16 | rar_high << 16, ERIAR_ExGMAC); + } + + rtl8168_disable_cfg9346_write(tp); +} + +#ifdef ETHTOOL_OPS_COMPAT +static int ethtool_get_settings(struct net_device *dev, void *useraddr) +{ + struct ethtool_cmd cmd = { ETHTOOL_GSET }; + int err; + + if (!ethtool_ops->get_settings) + return -EOPNOTSUPP; + + err = ethtool_ops->get_settings(dev, &cmd); + if (err < 0) + return err; + + if (copy_to_user(useraddr, &cmd, sizeof(cmd))) + return -EFAULT; + return 0; +} + +static int ethtool_set_settings(struct net_device *dev, void *useraddr) +{ + struct ethtool_cmd cmd; + + if (!ethtool_ops->set_settings) + return -EOPNOTSUPP; + + if (copy_from_user(&cmd, useraddr, sizeof(cmd))) + return -EFAULT; + + return ethtool_ops->set_settings(dev, &cmd); +} + +static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr) +{ + struct ethtool_drvinfo info; + struct ethtool_ops *ops = ethtool_ops; + + if (!ops->get_drvinfo) + return -EOPNOTSUPP; + + memset(&info, 0, sizeof(info)); + info.cmd = ETHTOOL_GDRVINFO; + ops->get_drvinfo(dev, &info); + + if (ops->self_test_count) + info.testinfo_len = ops->self_test_count(dev); + if (ops->get_stats_count) + info.n_stats = ops->get_stats_count(dev); + if (ops->get_regs_len) + info.regdump_len = ops->get_regs_len(dev); + if (ops->get_eeprom_len) + info.eedump_len = ops->get_eeprom_len(dev); + + if (copy_to_user(useraddr, &info, sizeof(info))) + return -EFAULT; + return 0; +} + +static int ethtool_get_regs(struct net_device *dev, char *useraddr) +{ + struct ethtool_regs regs; + struct ethtool_ops *ops = ethtool_ops; + void *regbuf; + int reglen, ret; + + if (!ops->get_regs || !ops->get_regs_len) + return -EOPNOTSUPP; + + if (copy_from_user(®s, useraddr, sizeof(regs))) + return -EFAULT; + + reglen = ops->get_regs_len(dev); + if (regs.len > reglen) + regs.len = reglen; + + regbuf = kmalloc(reglen, GFP_USER); + if (!regbuf) + return -ENOMEM; + + ops->get_regs(dev, ®s, regbuf); + + ret = -EFAULT; + if (copy_to_user(useraddr, ®s, sizeof(regs))) + goto out; + useraddr += offsetof(struct ethtool_regs, data); + if (copy_to_user(useraddr, regbuf, reglen)) + goto out; + ret = 0; + +out: + kfree(regbuf); + return ret; +} + +static int ethtool_get_wol(struct net_device *dev, char *useraddr) +{ + struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; + + if (!ethtool_ops->get_wol) + return -EOPNOTSUPP; + + ethtool_ops->get_wol(dev, &wol); + + if (copy_to_user(useraddr, &wol, sizeof(wol))) + return -EFAULT; + return 0; +} + +static int ethtool_set_wol(struct net_device *dev, char *useraddr) +{ + struct ethtool_wolinfo wol; + + if (!ethtool_ops->set_wol) + return -EOPNOTSUPP; + + if (copy_from_user(&wol, useraddr, sizeof(wol))) + return -EFAULT; + + return ethtool_ops->set_wol(dev, &wol); +} + +static int ethtool_get_msglevel(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GMSGLVL }; + + if (!ethtool_ops->get_msglevel) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_msglevel(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_msglevel(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_msglevel) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + ethtool_ops->set_msglevel(dev, edata.data); + return 0; +} + +static int ethtool_nway_reset(struct net_device *dev) +{ + if (!ethtool_ops->nway_reset) + return -EOPNOTSUPP; + + return ethtool_ops->nway_reset(dev); +} + +static int ethtool_get_link(struct net_device *dev, void *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GLINK }; + + if (!ethtool_ops->get_link) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_link(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_get_eeprom(struct net_device *dev, void *useraddr) +{ + struct ethtool_eeprom eeprom; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->get_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + return -EINVAL; + + data = kmalloc(eeprom.len, GFP_USER); + if (!data) + return -ENOMEM; + + ret = -EFAULT; + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) + goto out; + + ret = ops->get_eeprom(dev, &eeprom, data); + if (ret) + goto out; + + ret = -EFAULT; + if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) + goto out; + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_set_eeprom(struct net_device *dev, void *useraddr) +{ + struct ethtool_eeprom eeprom; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->set_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + return -EINVAL; + + data = kmalloc(eeprom.len, GFP_USER); + if (!data) + return -ENOMEM; + + ret = -EFAULT; + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) + goto out; + + ret = ops->set_eeprom(dev, &eeprom, data); + if (ret) + goto out; + + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) + ret = -EFAULT; + +out: + kfree(data); + return ret; +} + +static int ethtool_get_coalesce(struct net_device *dev, void *useraddr) +{ + struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; + + if (!ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + ethtool_ops->get_coalesce(dev, &coalesce); + + if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) + return -EFAULT; + return 0; +} + +static int ethtool_set_coalesce(struct net_device *dev, void *useraddr) +{ + struct ethtool_coalesce coalesce; + + if (!ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) + return -EFAULT; + + return ethtool_ops->set_coalesce(dev, &coalesce); +} + +static int ethtool_get_ringparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; + + if (!ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + ethtool_ops->get_ringparam(dev, &ringparam); + + if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_ringparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_ringparam ringparam; + + if (!ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) + return -EFAULT; + + return ethtool_ops->set_ringparam(dev, &ringparam); +} + +static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; + + if (!ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + ethtool_ops->get_pauseparam(dev, &pauseparam); + + if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_pauseparam pauseparam; + + if (!ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) + return -EFAULT; + + return ethtool_ops->set_pauseparam(dev, &pauseparam); +} + +static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GRXCSUM }; + + if (!ethtool_ops->get_rx_csum) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_rx_csum(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_rx_csum) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + ethtool_ops->set_rx_csum(dev, edata.data); + return 0; +} + +static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GTXCSUM }; + + if (!ethtool_ops->get_tx_csum) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_tx_csum(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_tx_csum) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_tx_csum(dev, edata.data); +} + +static int ethtool_get_sg(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GSG }; + + if (!ethtool_ops->get_sg) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_sg(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_sg(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_sg) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_sg(dev, edata.data); +} + +static int ethtool_get_tso(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GTSO }; + + if (!ethtool_ops->get_tso) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_tso(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_tso(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_tso) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_tso(dev, edata.data); +} + +static int ethtool_self_test(struct net_device *dev, char *useraddr) +{ + struct ethtool_test test; + struct ethtool_ops *ops = ethtool_ops; + u64 *data; + int ret; + + if (!ops->self_test || !ops->self_test_count) + return -EOPNOTSUPP; + + if (copy_from_user(&test, useraddr, sizeof(test))) + return -EFAULT; + + test.len = ops->self_test_count(dev); + data = kmalloc(test.len * sizeof(u64), GFP_USER); + if (!data) + return -ENOMEM; + + ops->self_test(dev, &test, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &test, sizeof(test))) + goto out; + useraddr += sizeof(test); + if (copy_to_user(useraddr, data, test.len * sizeof(u64))) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_get_strings(struct net_device *dev, void *useraddr) +{ + struct ethtool_gstrings gstrings; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->get_strings) + return -EOPNOTSUPP; + + if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) + return -EFAULT; + + switch (gstrings.string_set) { + case ETH_SS_TEST: + if (!ops->self_test_count) + return -EOPNOTSUPP; + gstrings.len = ops->self_test_count(dev); + break; + case ETH_SS_STATS: + if (!ops->get_stats_count) + return -EOPNOTSUPP; + gstrings.len = ops->get_stats_count(dev); + break; + default: + return -EINVAL; + } + + data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); + if (!data) + return -ENOMEM; + + ops->get_strings(dev, gstrings.string_set, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) + goto out; + useraddr += sizeof(gstrings); + if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_phys_id(struct net_device *dev, void *useraddr) +{ + struct ethtool_value id; + + if (!ethtool_ops->phys_id) + return -EOPNOTSUPP; + + if (copy_from_user(&id, useraddr, sizeof(id))) + return -EFAULT; + + return ethtool_ops->phys_id(dev, id.data); +} + +static int ethtool_get_stats(struct net_device *dev, void *useraddr) +{ + struct ethtool_stats stats; + struct ethtool_ops *ops = ethtool_ops; + u64 *data; + int ret; + + if (!ops->get_ethtool_stats || !ops->get_stats_count) + return -EOPNOTSUPP; + + if (copy_from_user(&stats, useraddr, sizeof(stats))) + return -EFAULT; + + stats.n_stats = ops->get_stats_count(dev); + data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); + if (!data) + return -ENOMEM; + + ops->get_ethtool_stats(dev, &stats, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &stats, sizeof(stats))) + goto out; + useraddr += sizeof(stats); + if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_ioctl(struct ifreq *ifr) +{ + struct net_device *dev = __dev_get_by_name(ifr->ifr_name); + void *useraddr = (void *) ifr->ifr_data; + u32 ethcmd; + + /* + * XXX: This can be pushed down into the ethtool_* handlers that + * need it. Keep existing behaviour for the moment. + */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!dev || !netif_device_present(dev)) + return -ENODEV; + + if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) + return -EFAULT; + + switch (ethcmd) { + case ETHTOOL_GSET: + return ethtool_get_settings(dev, useraddr); + case ETHTOOL_SSET: + return ethtool_set_settings(dev, useraddr); + case ETHTOOL_GDRVINFO: + return ethtool_get_drvinfo(dev, useraddr); + case ETHTOOL_GREGS: + return ethtool_get_regs(dev, useraddr); + case ETHTOOL_GWOL: + return ethtool_get_wol(dev, useraddr); + case ETHTOOL_SWOL: + return ethtool_set_wol(dev, useraddr); + case ETHTOOL_GMSGLVL: + return ethtool_get_msglevel(dev, useraddr); + case ETHTOOL_SMSGLVL: + return ethtool_set_msglevel(dev, useraddr); + case ETHTOOL_NWAY_RST: + return ethtool_nway_reset(dev); + case ETHTOOL_GLINK: + return ethtool_get_link(dev, useraddr); + case ETHTOOL_GEEPROM: + return ethtool_get_eeprom(dev, useraddr); + case ETHTOOL_SEEPROM: + return ethtool_set_eeprom(dev, useraddr); + case ETHTOOL_GCOALESCE: + return ethtool_get_coalesce(dev, useraddr); + case ETHTOOL_SCOALESCE: + return ethtool_set_coalesce(dev, useraddr); + case ETHTOOL_GRINGPARAM: + return ethtool_get_ringparam(dev, useraddr); + case ETHTOOL_SRINGPARAM: + return ethtool_set_ringparam(dev, useraddr); + case ETHTOOL_GPAUSEPARAM: + return ethtool_get_pauseparam(dev, useraddr); + case ETHTOOL_SPAUSEPARAM: + return ethtool_set_pauseparam(dev, useraddr); + case ETHTOOL_GRXCSUM: + return ethtool_get_rx_csum(dev, useraddr); + case ETHTOOL_SRXCSUM: + return ethtool_set_rx_csum(dev, useraddr); + case ETHTOOL_GTXCSUM: + return ethtool_get_tx_csum(dev, useraddr); + case ETHTOOL_STXCSUM: + return ethtool_set_tx_csum(dev, useraddr); + case ETHTOOL_GSG: + return ethtool_get_sg(dev, useraddr); + case ETHTOOL_SSG: + return ethtool_set_sg(dev, useraddr); + case ETHTOOL_GTSO: + return ethtool_get_tso(dev, useraddr); + case ETHTOOL_STSO: + return ethtool_set_tso(dev, useraddr); + case ETHTOOL_TEST: + return ethtool_self_test(dev, useraddr); + case ETHTOOL_GSTRINGS: + return ethtool_get_strings(dev, useraddr); + case ETHTOOL_PHYS_ID: + return ethtool_phys_id(dev, useraddr); + case ETHTOOL_GSTATS: + return ethtool_get_stats(dev, useraddr); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} +#endif //ETHTOOL_OPS_COMPAT + +static int +rtl8168_do_ioctl(struct net_device *dev, + struct ifreq *ifr, + int cmd) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + int ret; + unsigned long flags; + + ret = 0; + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 32; /* Internal PHY */ + break; + + case SIOCGMIIREG: + spin_lock_irqsave(&tp->lock, flags); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + data->val_out = rtl8168_mdio_read(tp, data->reg_num); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + case SIOCSMIIREG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + spin_lock_irqsave(&tp->lock, flags); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, data->reg_num, data->val_in); + spin_unlock_irqrestore(&tp->lock, flags); + break; + +#ifdef ETHTOOL_OPS_COMPAT + case SIOCETHTOOL: + ret = ethtool_ioctl(ifr); + break; +#endif + case SIOCDEVPRIVATE_RTLASF: + if (!netif_running(dev)) { + ret = -ENODEV; + break; + } + + ret = rtl8168_asf_ioctl(dev, ifr); + break; + +#ifdef ENABLE_DASH_SUPPORT + case SIOCDEVPRIVATE_RTLDASH: + if (!netif_running(dev)) { + ret = -ENODEV; + break; + } + if (!capable(CAP_NET_ADMIN)) { + ret = -EPERM; + break; + } + + ret = rtl8168_dash_ioctl(dev, ifr); + break; +#endif + +#ifdef ENABLE_REALWOW_SUPPORT + case SIOCDEVPRIVATE_RTLREALWOW: + if (!netif_running(dev)) { + ret = -ENODEV; + break; + } + + ret = rtl8168_realwow_ioctl(dev, ifr); + break; +#endif + + case SIOCRTLTOOL: + ret = rtl8168_tool_ioctl(tp, ifr); + break; + + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static void +rtl8168_phy_power_up(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (rtl8168_is_in_phy_disable_mode(dev)) + return; + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + case CFG_METHOD_4: + case CFG_METHOD_5: + case CFG_METHOD_6: + case CFG_METHOD_7: + case CFG_METHOD_8: + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + rtl8168_mdio_write(tp, 0x0E, 0x0000); + break; + } + rtl8168_mdio_write(tp, MII_BMCR, BMCR_ANENABLE); + + //wait mdc/mdio ready + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + mdelay(10); + break; + } + + //wait ups resume (phy state 3) + if (HW_SUPPORT_UPS_MODE(tp)) + rtl8168_wait_phy_ups_resume(dev, HW_PHY_STATUS_LAN_ON); +} + +static void +rtl8168_phy_power_down(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + csi_tmp = rtl8168_eri_read(tp, 0x1AB, 1, ERIAR_ExGMAC); + csi_tmp &= ~( BIT_2 | BIT_3 | BIT_4 | BIT_5 | BIT_6 | BIT_7 ); + rtl8168_eri_write(tp, 0x1AB, 1, csi_tmp, ERIAR_ExGMAC); + break; + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + case CFG_METHOD_4: + case CFG_METHOD_5: + case CFG_METHOD_6: + case CFG_METHOD_7: + case CFG_METHOD_8: + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + rtl8168_mdio_write(tp, 0x0E, 0x0200); + rtl8168_mdio_write(tp, MII_BMCR, BMCR_PDOWN); + break; + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_29: + case CFG_METHOD_30: + rtl8168_mdio_write(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + rtl8168_mdio_write(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); + break; + case CFG_METHOD_23: + case CFG_METHOD_24: + rtl8168_mdio_write(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); + break; + default: + rtl8168_mdio_write(tp, MII_BMCR, BMCR_PDOWN); + break; + } +} + +static int __devinit +rtl8168_init_board(struct pci_dev *pdev, + struct net_device **dev_out, + void __iomem **ioaddr_out) +{ + void __iomem *ioaddr; + struct net_device *dev; + struct rtl8168_private *tp; + int rc = -ENOMEM, i, pm_cap; + + assert(ioaddr_out != NULL); + + /* dev zeroed in alloc_etherdev */ + dev = alloc_etherdev(sizeof (*tp)); + if (dev == NULL) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_drv(&debug)) + dev_err(&pdev->dev, "unable to alloc new ethernet\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + goto err_out; + } + + SET_MODULE_OWNER(dev); + SET_NETDEV_DEV(dev, &pdev->dev); + tp = netdev_priv(dev); + tp->dev = dev; + tp->msg_enable = netif_msg_init(debug.msg_enable, R8168_MSG_DEFAULT); + + if (!aspm || tp->mcfg == CFG_METHOD_9) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); +#endif + } + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pci_enable_device(pdev); + if (rc < 0) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "enable failure\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + goto err_out_free_dev; + } + + if (pci_set_mwi(pdev) < 0) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_drv(&debug)) + dev_info(&pdev->dev, "Mem-Wr-Inval unavailable.\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + } + + /* save power state before pci_enable_device overwrites it */ + pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (pm_cap) { + u16 pwr_command; + + pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command); + } else { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) { + dev_err(&pdev->dev, "PowerManagement capability not found.\n"); + } +#else + printk("PowerManagement capability not found.\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + + } + + /* make sure PCI base addr 1 is MMIO */ + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + rc = -ENODEV; + goto err_out_mwi; + } + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, 2) < R8168_REGS_SIZE) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + rc = -ENODEV; + goto err_out_mwi; + } + + rc = pci_request_regions(pdev, MODULENAME); + if (rc < 0) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "could not request regions.\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + goto err_out_mwi; + } + + if ((sizeof(dma_addr_t) > 4) && + use_dac && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + dev->features |= NETIF_F_HIGHDMA; + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "DMA configuration failed.\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + goto err_out_free_res; + } + } + + /* ioremap MMIO region */ + ioaddr = ioremap(pci_resource_start(pdev, 2), R8168_REGS_SIZE); + if (ioaddr == NULL) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + rc = -EIO; + goto err_out_free_res; + } + + tp->mmio_addr = ioaddr; + + /* Identify chip attached to board */ + rtl8168_get_mac_version(tp); + + rtl8168_print_mac_version(tp); + + for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) { + if (tp->mcfg == rtl_chip_info[i].mcfg) + break; + } + + if (i < 0) { + /* Unknown chip: assume array element #0, original RTL-8168 */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_printk(KERN_DEBUG, &pdev->dev, "unknown chip version, assuming %s\n", rtl_chip_info[0].name); +#else + printk("Realtek unknown chip version, assuming %s\n", rtl_chip_info[0].name); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) + i++; + } + + tp->chipset = i; + + *ioaddr_out = ioaddr; + *dev_out = dev; +out: + return rc; + +err_out_free_res: + pci_release_regions(pdev); +err_out_mwi: + pci_clear_mwi(pdev); + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(dev); +err_out: + *ioaddr_out = NULL; + *dev_out = NULL; + goto out; +} + +static void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) +rtl8168_esd_timer(unsigned long __opaque) +#else +rtl8168_esd_timer(struct timer_list *t) +#endif +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + struct net_device *dev = (struct net_device *)__opaque; + struct rtl8168_private *tp = netdev_priv(dev); + struct timer_list *timer = &tp->esd_timer; +#else + struct rtl8168_private *tp = from_timer(tp, t, esd_timer); + struct net_device *dev = tp->dev; + struct timer_list *timer = t; +#endif + struct pci_dev *pdev = tp->pci_dev; + unsigned long timeout = RTL8168_ESD_TIMEOUT; + unsigned long flags; + u8 cmd; + u16 io_base_l; + u16 mem_base_l; + u16 mem_base_h; + u8 ilr; + u16 resv_0x1c_h; + u16 resv_0x1c_l; + u16 resv_0x20_l; + u16 resv_0x20_h; + u16 resv_0x24_l; + u16 resv_0x24_h; + u16 resv_0x2c_h; + u16 resv_0x2c_l; + u32 pci_sn_l; + u32 pci_sn_h; + + spin_lock_irqsave(&tp->lock, flags); + + tp->esd_flag = 0; + + pci_read_config_byte(pdev, PCI_COMMAND, &cmd); + if (cmd != tp->pci_cfg_space.cmd) { + printk(KERN_ERR "%s: cmd = 0x%02x, should be 0x%02x \n.", dev->name, cmd, tp->pci_cfg_space.cmd); + pci_write_config_byte(pdev, PCI_COMMAND, tp->pci_cfg_space.cmd); + tp->esd_flag |= BIT_0; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_0, &io_base_l); + if (io_base_l != tp->pci_cfg_space.io_base_l) { + printk(KERN_ERR "%s: io_base_l = 0x%04x, should be 0x%04x \n.", dev->name, io_base_l, tp->pci_cfg_space.io_base_l); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_0, tp->pci_cfg_space.io_base_l); + tp->esd_flag |= BIT_1; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_2, &mem_base_l); + if (mem_base_l != tp->pci_cfg_space.mem_base_l) { + printk(KERN_ERR "%s: mem_base_l = 0x%04x, should be 0x%04x \n.", dev->name, mem_base_l, tp->pci_cfg_space.mem_base_l); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_2, tp->pci_cfg_space.mem_base_l); + tp->esd_flag |= BIT_2; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_2 + 2, &mem_base_h); + if (mem_base_h!= tp->pci_cfg_space.mem_base_h) { + printk(KERN_ERR "%s: mem_base_h = 0x%04x, should be 0x%04x \n.", dev->name, mem_base_h, tp->pci_cfg_space.mem_base_h); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_2 + 2, tp->pci_cfg_space.mem_base_h); + tp->esd_flag |= BIT_3; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_3, &resv_0x1c_l); + if (resv_0x1c_l != tp->pci_cfg_space.resv_0x1c_l) { + printk(KERN_ERR "%s: resv_0x1c_l = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x1c_l, tp->pci_cfg_space.resv_0x1c_l); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_3, tp->pci_cfg_space.resv_0x1c_l); + tp->esd_flag |= BIT_4; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_3 + 2, &resv_0x1c_h); + if (resv_0x1c_h != tp->pci_cfg_space.resv_0x1c_h) { + printk(KERN_ERR "%s: resv_0x1c_h = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x1c_h, tp->pci_cfg_space.resv_0x1c_h); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_3 + 2, tp->pci_cfg_space.resv_0x1c_h); + tp->esd_flag |= BIT_5; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_4, &resv_0x20_l); + if (resv_0x20_l != tp->pci_cfg_space.resv_0x20_l) { + printk(KERN_ERR "%s: resv_0x20_l = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x20_l, tp->pci_cfg_space.resv_0x20_l); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_4, tp->pci_cfg_space.resv_0x20_l); + tp->esd_flag |= BIT_6; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_4 + 2, &resv_0x20_h); + if (resv_0x20_h != tp->pci_cfg_space.resv_0x20_h) { + printk(KERN_ERR "%s: resv_0x20_h = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x20_h, tp->pci_cfg_space.resv_0x20_h); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_4 + 2, tp->pci_cfg_space.resv_0x20_h); + tp->esd_flag |= BIT_7; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_5, &resv_0x24_l); + if (resv_0x24_l != tp->pci_cfg_space.resv_0x24_l) { + printk(KERN_ERR "%s: resv_0x24_l = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x24_l, tp->pci_cfg_space.resv_0x24_l); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_5, tp->pci_cfg_space.resv_0x24_l); + tp->esd_flag |= BIT_8; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_5 + 2, &resv_0x24_h); + if (resv_0x24_h != tp->pci_cfg_space.resv_0x24_h) { + printk(KERN_ERR "%s: resv_0x24_h = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x24_h, tp->pci_cfg_space.resv_0x24_h); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_5 + 2, tp->pci_cfg_space.resv_0x24_h); + tp->esd_flag |= BIT_9; + } + + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &ilr); + if (ilr != tp->pci_cfg_space.ilr) { + printk(KERN_ERR "%s: ilr = 0x%02x, should be 0x%02x \n.", dev->name, ilr, tp->pci_cfg_space.ilr); + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, tp->pci_cfg_space.ilr); + tp->esd_flag |= BIT_10; + } + + pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &resv_0x2c_l); + if (resv_0x2c_l != tp->pci_cfg_space.resv_0x2c_l) { + printk(KERN_ERR "%s: resv_0x2c_l = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x2c_l, tp->pci_cfg_space.resv_0x2c_l); + pci_write_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, tp->pci_cfg_space.resv_0x2c_l); + tp->esd_flag |= BIT_11; + } + + pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID + 2, &resv_0x2c_h); + if (resv_0x2c_h != tp->pci_cfg_space.resv_0x2c_h) { + printk(KERN_ERR "%s: resv_0x2c_h = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x2c_h, tp->pci_cfg_space.resv_0x2c_h); + pci_write_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID + 2, tp->pci_cfg_space.resv_0x2c_h); + tp->esd_flag |= BIT_12; + } + + if (tp->HwPcieSNOffset > 0) { + pci_sn_l = rtl8168_csi_read(tp, tp->HwPcieSNOffset); + if (pci_sn_l != tp->pci_cfg_space.pci_sn_l) { + printk(KERN_ERR "%s: pci_sn_l = 0x%08x, should be 0x%08x \n.", dev->name, pci_sn_l, tp->pci_cfg_space.pci_sn_l); + rtl8168_csi_write(tp, tp->HwPcieSNOffset, tp->pci_cfg_space.pci_sn_l); + tp->esd_flag |= BIT_13; + } + + pci_sn_h = rtl8168_csi_read(tp, tp->HwPcieSNOffset + 4); + if (pci_sn_h != tp->pci_cfg_space.pci_sn_h) { + printk(KERN_ERR "%s: pci_sn_h = 0x%08x, should be 0x%08x \n.", dev->name, pci_sn_h, tp->pci_cfg_space.pci_sn_h); + rtl8168_csi_write(tp, tp->HwPcieSNOffset + 4, tp->pci_cfg_space.pci_sn_h); + tp->esd_flag |= BIT_14; + } + } + + if (tp->TestPhyOcpReg && rtl8168_test_phy_ocp(tp)) + tp->esd_flag |= BIT_15; + + if (tp->esd_flag != 0) { + printk(KERN_ERR "%s: esd_flag = 0x%04x\n.\n", dev->name, tp->esd_flag); + netif_stop_queue(dev); + netif_carrier_off(dev); + rtl8168_hw_reset(dev); + rtl8168_tx_clear(tp); + rtl8168_rx_clear(tp); + rtl8168_init_ring(dev); + rtl8168_hw_init(dev); + rtl8168_powerup_pll(dev); + rtl8168_hw_ephy_config(dev); + rtl8168_hw_phy_config(dev); + rtl8168_hw_config(dev); + rtl8168_set_speed(dev, tp->autoneg, tp->speed, tp->duplex, tp->advertising); + tp->esd_flag = 0; + } + spin_unlock_irqrestore(&tp->lock, flags); + + mod_timer(timer, jiffies + timeout); +} + +static void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) +rtl8168_link_timer(unsigned long __opaque) +#else +rtl8168_link_timer(struct timer_list *t) +#endif +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + struct net_device *dev = (struct net_device *)__opaque; + struct rtl8168_private *tp = netdev_priv(dev); + struct timer_list *timer = &tp->link_timer; +#else + struct rtl8168_private *tp = from_timer(tp, t, link_timer); + struct net_device *dev = tp->dev; + struct timer_list *timer = t; +#endif + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_check_link_status(dev); + spin_unlock_irqrestore(&tp->lock, flags); + + mod_timer(timer, jiffies + RTL8168_LINK_TIMEOUT); +} + +/* Cfg9346_Unlock assumed. */ +static unsigned rtl8168_try_msi(struct pci_dev *pdev, struct rtl8168_private *tp) +{ + unsigned msi = 0; + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + case CFG_METHOD_4: + case CFG_METHOD_5: + case CFG_METHOD_6: + case CFG_METHOD_7: + case CFG_METHOD_8: + dev_info(&pdev->dev, "Default use INTx.\n"); + break; + default: + if (pci_enable_msi(pdev)) + dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); + else + msi |= RTL_FEATURE_MSI; + break; + } +#endif + + return msi; +} + +static void rtl8168_disable_msi(struct pci_dev *pdev, struct rtl8168_private *tp) +{ + if (tp->features & RTL_FEATURE_MSI) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) + pci_disable_msi(pdev); +#endif + tp->features &= ~RTL_FEATURE_MSI; + } +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) +static const struct net_device_ops rtl8168_netdev_ops = { + .ndo_open = rtl8168_open, + .ndo_stop = rtl8168_close, + .ndo_get_stats = rtl8168_get_stats, + .ndo_start_xmit = rtl8168_start_xmit, + .ndo_tx_timeout = rtl8168_tx_timeout, + .ndo_change_mtu = rtl8168_change_mtu, + .ndo_set_mac_address = rtl8168_set_mac_address, + .ndo_do_ioctl = rtl8168_do_ioctl, +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) + .ndo_set_multicast_list = rtl8168_set_rx_mode, +#else + .ndo_set_rx_mode = rtl8168_set_rx_mode, +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) +#ifdef CONFIG_R8168_VLAN + .ndo_vlan_rx_register = rtl8168_vlan_rx_register, +#endif +#else + .ndo_fix_features = rtl8168_fix_features, + .ndo_set_features = rtl8168_set_features, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8168_netpoll, +#endif +}; +#endif + +static int __devinit +rtl8168_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev = NULL; + struct rtl8168_private *tp; + void __iomem *ioaddr = NULL; + static int board_idx = -1; + + int rc; + + assert(pdev != NULL); + assert(ent != NULL); + + board_idx++; + + if (netif_msg_drv(&debug)) + printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", + MODULENAME, RTL8168_VERSION); + + rc = rtl8168_init_board(pdev, &dev, &ioaddr); + if (rc) + goto out; + + tp = netdev_priv(dev); + assert(ioaddr != NULL); + + tp->set_speed = rtl8168_set_speed_xmii; + tp->get_settings = rtl8168_gset_xmii; + tp->phy_reset_enable = rtl8168_xmii_reset_enable; + tp->phy_reset_pending = rtl8168_xmii_reset_pending; + tp->link_ok = rtl8168_xmii_link_ok; + + tp->features |= rtl8168_try_msi(pdev, tp); + + RTL_NET_DEVICE_OPS(rtl8168_netdev_ops); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) + SET_ETHTOOL_OPS(dev, &rtl8168_ethtool_ops); +#endif + + dev->watchdog_timeo = RTL8168_TX_TIMEOUT; + dev->irq = pdev->irq; + dev->base_addr = (unsigned long) ioaddr; + +#ifdef CONFIG_R8168_NAPI + RTL_NAPI_CONFIG(dev, tp, rtl8168_poll, R8168_NAPI_WEIGHT); +#endif + +#ifdef CONFIG_R8168_VLAN + if (tp->mcfg != CFG_METHOD_DEFAULT) { + dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + dev->vlan_rx_kill_vid = rtl8168_vlan_rx_kill_vid; +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + } +#endif + + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * Therefore disable both features by default, but allow users to + * enable them. Use at own risk! + */ + tp->cp_cmd |= RTL_R16(tp, CPlusCmd); + if (tp->mcfg != CFG_METHOD_DEFAULT) { + dev->features |= NETIF_F_IP_CSUM; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + tp->cp_cmd |= RxChkSum; +#else + dev->features |= NETIF_F_RXCSUM; + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_HIGHDMA; + if ((tp->mcfg != CFG_METHOD_16) && (tp->mcfg != CFG_METHOD_17)) { + //dev->features |= NETIF_F_TSO; + dev->hw_features |= NETIF_F_TSO; + dev->vlan_features |= NETIF_F_TSO; + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) + if ((tp->mcfg == CFG_METHOD_1) || (tp->mcfg == CFG_METHOD_2) || (tp->mcfg == CFG_METHOD_3)) { + dev->hw_features &= ~NETIF_F_IPV6_CSUM; + netif_set_gso_max_size(dev, LSO_32K); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0) + dev->gso_max_segs = NIC_MAX_PHYS_BUF_COUNT_LSO_64K; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) + dev->gso_min_segs = NIC_MIN_PHYS_BUF_COUNT; +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0) + } else { + dev->hw_features |= NETIF_F_IPV6_CSUM; + dev->features |= NETIF_F_IPV6_CSUM; + if ((tp->mcfg != CFG_METHOD_16) && (tp->mcfg != CFG_METHOD_17)) { + dev->hw_features |= NETIF_F_TSO6; + //dev->features |= NETIF_F_TSO6; + } + netif_set_gso_max_size(dev, LSO_64K); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0) + dev->gso_max_segs = NIC_MAX_PHYS_BUF_COUNT_LSO2; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) + dev->gso_min_segs = NIC_MIN_PHYS_BUF_COUNT; +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0) + } +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + } + + tp->pci_dev = pdev; + + spin_lock_init(&tp->lock); + + rtl8168_init_software_variable(dev); + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) + AllocateDashShareMemory(dev); +#endif + + rtl8168_exit_oob(dev); + + rtl8168_hw_init(dev); + + rtl8168_hw_reset(dev); + + /* Get production from EEPROM */ + if (((tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_29 || + tp->mcfg == CFG_METHOD_30) && (rtl8168_mac_ocp_read(tp, 0xDC00) & BIT_3)) || + ((tp->mcfg == CFG_METHOD_26) && (rtl8168_mac_ocp_read(tp, 0xDC00) & BIT_4))) + tp->eeprom_type = EEPROM_TYPE_NONE; + else + rtl8168_eeprom_type(tp); + + if (tp->eeprom_type == EEPROM_TYPE_93C46 || tp->eeprom_type == EEPROM_TYPE_93C56) + rtl8168_set_eeprom_sel_low(tp); + + rtl8168_get_mac_address(dev); + + tp->fw_name = rtl_chip_fw_infos[tp->mcfg].fw_name; + +#if defined(ENABLE_DASH_PRINTER_SUPPORT) + init_completion(&tp->fw_host_ok); + init_completion(&tp->fw_ack); + init_completion(&tp->fw_req); +#endif + + tp->tally_vaddr = dma_alloc_coherent(&pdev->dev, sizeof(*tp->tally_vaddr), + &tp->tally_paddr, GFP_KERNEL); + if (!tp->tally_vaddr) { + rc = -ENOMEM; + goto err_out; + } + + rtl8168_tally_counter_clear(tp); + + pci_set_drvdata(pdev, dev); + + rc = register_netdev(dev); + if (rc) + goto err_out; + + printk(KERN_INFO "%s: This product is covered by one or more of the following patents: US6,570,884, US6,115,776, and US6,327,625.\n", MODULENAME); + + rtl8168_disable_rxdvgate(dev); + + device_set_wakeup_enable(&pdev->dev, tp->wol_enabled); + + netif_carrier_off(dev); + + printk("%s", GPL_CLAIM); + +out: + return rc; + +err_out: + if (tp->tally_vaddr != NULL) { + dma_free_coherent(&pdev->dev, sizeof(*tp->tally_vaddr), tp->tally_vaddr, + tp->tally_paddr); + + tp->tally_vaddr = NULL; + } +#ifdef CONFIG_R8168_NAPI + RTL_NAPI_DEL(tp); +#endif + rtl8168_disable_msi(pdev, tp); + rtl8168_release_board(pdev, dev); + + goto out; +} + +static void __devexit +rtl8168_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8168_private *tp = netdev_priv(dev); + + assert(dev != NULL); + assert(tp != NULL); + +#ifdef CONFIG_R8168_NAPI + RTL_NAPI_DEL(tp); +#endif + if (HW_DASH_SUPPORT_DASH(tp)) + rtl8168_driver_stop(tp); + + unregister_netdev(dev); + rtl8168_disable_msi(pdev, tp); +#ifdef ENABLE_R8168_PROCFS + rtl8168_proc_remove(dev); +#endif + if (tp->tally_vaddr != NULL) { + dma_free_coherent(&pdev->dev, sizeof(*tp->tally_vaddr), tp->tally_vaddr, tp->tally_paddr); + tp->tally_vaddr = NULL; + } + + rtl8168_release_board(pdev, dev); + +#ifdef ENABLE_USE_FIRMWARE_FILE + rtl8168_release_firmware(tp); +#endif + + pci_set_drvdata(pdev, NULL); +} + +static void +rtl8168_set_rxbufsize(struct rtl8168_private *tp, + struct net_device *dev) +{ + unsigned int mtu = dev->mtu; + + tp->rx_buf_sz = (mtu > ETH_DATA_LEN) ? mtu + ETH_HLEN + 8 + 1 : RX_BUF_SIZE; +} + +#ifdef ENABLE_USE_FIRMWARE_FILE +static void rtl8168_request_firmware(struct rtl8168_private *tp) +{ + struct rtl8168_fw *rtl_fw; + + /* firmware loaded already or no firmware available */ + if (tp->rtl_fw || !tp->fw_name) + return; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + return; + + rtl_fw->phy_write = rtl8168_mdio_write; + rtl_fw->phy_read = rtl8168_mdio_read; + rtl_fw->mac_mcu_write = mac_mcu_write; + rtl_fw->mac_mcu_read = mac_mcu_read; + rtl_fw->fw_name = tp->fw_name; + rtl_fw->dev = tp_to_dev(tp); + + if (rtl8168_fw_request_firmware(rtl_fw)) + kfree(rtl_fw); + else + tp->rtl_fw = rtl_fw; +} +#endif + +static int rtl8168_open(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + unsigned long flags; + int retval; + + retval = -ENOMEM; + +#ifdef ENABLE_R8168_PROCFS + rtl8168_proc_init(dev); +#endif + rtl8168_set_rxbufsize(tp, dev); + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * pci_alloc_consistent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8168_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto err_free_all_allocated_mem; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8168_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_all_allocated_mem; + + retval = rtl8168_init_ring(dev); + if (retval < 0) + goto err_free_all_allocated_mem; + + retval = request_irq(dev->irq, rtl8168_interrupt, (tp->features & RTL_FEATURE_MSI) ? 0 : SA_SHIRQ, dev->name, dev); + if (retval<0) + goto err_free_all_allocated_mem; + + if (netif_msg_probe(tp)) { + printk(KERN_INFO "%s: 0x%lx, " + "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, " + "IRQ %d\n", + dev->name, + dev->base_addr, + dev->dev_addr[0], dev->dev_addr[1], + dev->dev_addr[2], dev->dev_addr[3], + dev->dev_addr[4], dev->dev_addr[5], dev->irq); + } + +#ifdef ENABLE_USE_FIRMWARE_FILE + rtl8168_request_firmware(tp); +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + INIT_WORK(&tp->task, rtl8168_reset_task, dev); +#else + INIT_DELAYED_WORK(&tp->task, rtl8168_reset_task); +#endif + + pci_set_master(pdev); + +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + RTL_NAPI_ENABLE(dev, &tp->napi); +#endif +#endif + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_exit_oob(dev); + + rtl8168_hw_init(dev); + + rtl8168_hw_reset(dev); + + rtl8168_powerup_pll(dev); + + rtl8168_hw_ephy_config(dev); + + rtl8168_hw_phy_config(dev); + + rtl8168_hw_config(dev); + + rtl8168_dsm(dev, DSM_IF_UP); + + rtl8168_set_speed(dev, tp->autoneg, tp->speed, tp->duplex, tp->advertising); + + spin_unlock_irqrestore(&tp->lock, flags); + + if (tp->esd_flag == 0) + rtl8168_request_esd_timer(dev); + + rtl8168_request_link_timer(dev); + +out: + + return retval; + +err_free_all_allocated_mem: + if (tp->RxDescArray != NULL) { + dma_free_coherent(&pdev->dev, R8168_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; + } + + if (tp->TxDescArray != NULL) { + dma_free_coherent(&pdev->dev, R8168_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + } + + goto out; +} + +static void +rtl8168_dsm(struct net_device *dev, int dev_state) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (dev_state) { + case DSM_MAC_INIT: + if ((tp->mcfg == CFG_METHOD_5) || (tp->mcfg == CFG_METHOD_6)) { + if (RTL_R8(tp, MACDBG) & 0x80) + RTL_W8(tp, GPIO, RTL_R8(tp, GPIO) | GPIO_en); + else + RTL_W8(tp, GPIO, RTL_R8(tp, GPIO) & ~GPIO_en); + } + + break; + case DSM_NIC_GOTO_D3: + case DSM_IF_DOWN: + if ((tp->mcfg == CFG_METHOD_5) || (tp->mcfg == CFG_METHOD_6)) { + if (RTL_R8(tp, MACDBG) & 0x80) + RTL_W8(tp, GPIO, RTL_R8(tp, GPIO) & ~GPIO_en); + } + break; + + case DSM_NIC_RESUME_D3: + case DSM_IF_UP: + if ((tp->mcfg == CFG_METHOD_5) || (tp->mcfg == CFG_METHOD_6)) { + if (RTL_R8(tp, MACDBG) & 0x80) + RTL_W8(tp, GPIO, RTL_R8(tp, GPIO) | GPIO_en); + } + + break; + } + +} + +static void +set_offset70F(struct rtl8168_private *tp, u8 setting) +{ + u32 csi_tmp; + u32 temp = (u32)setting; + temp = temp << 24; + /*set PCI configuration space offset 0x70F to setting*/ + /*When the register offset of PCI configuration space larger than 0xff, use CSI to access it.*/ + + csi_tmp = rtl8168_csi_read(tp, 0x70c) & 0x00ffffff; + rtl8168_csi_write(tp, 0x70c, csi_tmp | temp); +} + +static void +set_offset79(struct rtl8168_private *tp, u8 setting) +{ + //Set PCI configuration space offset 0x79 to setting + + struct pci_dev *pdev = tp->pci_dev; + u8 device_control; + + if (hwoptimize & HW_PATCH_SOC_LAN) return; + + pci_read_config_byte(pdev, 0x79, &device_control); + device_control &= ~0x70; + device_control |= setting; + pci_write_config_byte(pdev, 0x79, device_control); +} + +static void +set_offset711(struct rtl8168_private *tp, u8 setting) +{ + u32 csi_tmp; + u32 temp = (u32)setting; + temp &= 0x0f; + temp = temp << 12; + /*set PCI configuration space offset 0x711 to setting*/ + + csi_tmp = rtl8168_csi_read(tp, 0x710) & 0xffff0fff; + rtl8168_csi_write(tp, 0x710, csi_tmp | temp); +} + +static void +rtl8168_hw_set_rx_packet_filter(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + u32 tmp = 0; + + if (dev->flags & IFF_PROMISC) { + /* Unconditionally log net taps. */ + if (netif_msg_link(tp)) + printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", + dev->name); + + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) + || (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) + struct dev_mc_list *mclist; + unsigned int i; + + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; + i++, mclist = mclist->next) { + int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } +#else + struct netdev_hw_addr *ha; + + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } +#endif + } + + if (dev->features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + + tmp = mc_filter[0]; + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(tmp); + + tp->rtl8168_rx_config = rtl_chip_info[tp->chipset].RCR_Cfg; + tmp = tp->rtl8168_rx_config | rx_mode | (RTL_R32(tp, RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask); + + RTL_W32(tp, RxConfig, tmp); + RTL_W32(tp, MAR0 + 0, mc_filter[0]); + RTL_W32(tp, MAR0 + 4, mc_filter[1]); +} + +static void +rtl8168_set_rx_mode(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_hw_set_rx_packet_filter(dev); + + spin_unlock_irqrestore(&tp->lock, flags); +} + +static void +rtl8168_hw_config(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + u8 device_control; + u16 mac_ocp_data; + u32 csi_tmp; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + if (dev->mtu > ETH_DATA_LEN) { + dev->features &= ~(NETIF_F_IP_CSUM); + } else { + dev->features |= NETIF_F_IP_CSUM; + } +#endif + + RTL_W32(tp, RxConfig, (RX_DMA_BURST << RxCfgDMAShift)); + + rtl8168_hw_reset(dev); + + rtl8168_enable_cfg9346_write(tp); + if (tp->HwSuppAspmClkIntrLock) { + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) & ~BIT_7); + rtl8168_hw_aspm_clkreq_enable(tp, false); + } + + //clear io_rdy_l23 + switch (tp->mcfg) { + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~BIT_1); + break; + } + + //keep magic packet only + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0xDE, 1, ERIAR_ExGMAC); + csi_tmp &= BIT_0; + rtl8168_eri_write(tp, 0xDE, 1, csi_tmp, ERIAR_ExGMAC); + break; + } + + RTL_W8(tp, MTPS, Reserved1_data); + + tp->cp_cmd |= INTT_1; + if (tp->use_timer_interrrupt) + tp->cp_cmd |= PktCntrDisable; + else + tp->cp_cmd &= ~PktCntrDisable; + + RTL_W16(tp, IntrMitigate, 0x5f51); + + rtl8168_tally_counter_addr_fill(tp); + + rtl8168_desc_addr_fill(tp); + + /* Set DMA burst size and Interframe Gap Time */ + if (tp->mcfg == CFG_METHOD_1) + RTL_W32(tp, TxConfig, (TX_DMA_BURST_512 << TxDMAShift) | + (InterFrameGap << TxInterFrameGapShift)); + else + RTL_W32(tp, TxConfig, (TX_DMA_BURST_unlimited << TxDMAShift) | + (InterFrameGap << TxInterFrameGapShift)); + + if (tp->mcfg == CFG_METHOD_4) { + set_offset70F(tp, 0x27); + + RTL_W8(tp, DBG_reg, (0x0E << 4) | Fix_Nak_1 | Fix_Nak_2); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + //disable clock request. + pci_write_config_byte(pdev, 0x81, 0x00); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + set_offset79(tp, 0x50); + } + + //rx checksum offload enable +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + tp->cp_cmd |= RxChkSum; +#else + dev->features |= NETIF_F_RXCSUM; +#endif + + tp->cp_cmd &= ~(EnableBist | Macdbgo_oe | Force_halfdup | + Force_rxflow_en | Force_txflow_en | Cxpl_dbg_sel | + ASF | PktCntrDisable | Macdbgo_sel); + } else if (tp->mcfg == CFG_METHOD_5) { + + set_offset70F(tp, 0x27); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + //disable clock request. + pci_write_config_byte(pdev, 0x81, 0x00); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + set_offset79(tp, 0x50); + } + + //rx checksum offload enable +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + tp->cp_cmd |= RxChkSum; +#else + dev->features |= NETIF_F_RXCSUM; +#endif + } else if (tp->mcfg == CFG_METHOD_6) { + set_offset70F(tp, 0x27); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + //disable clock request. + pci_write_config_byte(pdev, 0x81, 0x00); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + set_offset79(tp, 0x50); + } + + //rx checksum offload enable +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + tp->cp_cmd |= RxChkSum; +#else + dev->features |= NETIF_F_RXCSUM; +#endif + } else if (tp->mcfg == CFG_METHOD_7) { + set_offset70F(tp, 0x27); + + rtl8168_eri_write(tp, 0x1EC, 1, 0x07, ERIAR_ASF); + + //disable clock request. + pci_write_config_byte(pdev, 0x81, 0x00); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + + set_offset79(tp, 0x50); + } + } else if (tp->mcfg == CFG_METHOD_8) { + + set_offset70F(tp, 0x27); + + rtl8168_eri_write(tp, 0x1EC, 1, 0x07, ERIAR_ASF); + + //disable clock request. + pci_write_config_byte(pdev, 0x81, 0x00); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + RTL_W8(tp, 0xD1, 0x20); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + set_offset79(tp, 0x50); + } + } else if (tp->mcfg == CFG_METHOD_9) { + set_offset70F(tp, 0x27); + + /* disable clock request. */ + pci_write_config_byte(pdev, 0x81, 0x00); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~BIT_4); + RTL_W8(tp, DBG_reg, RTL_R8(tp, DBG_reg) | BIT_7 | BIT_1); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + set_offset79(tp, 0x50); + } + + RTL_W8(tp, TDFNR, 0x8); + + } else if (tp->mcfg == CFG_METHOD_10) { + set_offset70F(tp, 0x27); + + RTL_W8(tp, DBG_reg, RTL_R8(tp, DBG_reg) | BIT_7 | BIT_1); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + set_offset79(tp, 0x50); + } + + RTL_W8(tp, TDFNR, 0x8); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | 0x10); + + /* disable clock request. */ + pci_write_config_byte(pdev, 0x81, 0x00); + } else if (tp->mcfg == CFG_METHOD_11 || tp->mcfg == CFG_METHOD_13) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + else + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + + pci_write_config_byte(pdev, 0x81, 0x00); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | 0x10); + + } else if (tp->mcfg == CFG_METHOD_12) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + else + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + + pci_write_config_byte(pdev, 0x81, 0x01); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | 0x10); + + } else if (tp->mcfg == CFG_METHOD_14 || tp->mcfg == CFG_METHOD_15) { + + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, MTPS, 0x24); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); + } + + RTL_W8(tp, 0xF3, RTL_R8(tp, 0xF3) | BIT_5); + RTL_W8(tp, 0xF3, RTL_R8(tp, 0xF3) & ~BIT_5); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_7 | BIT_6); + + RTL_W8(tp, 0xD1, RTL_R8(tp, 0xD1) | BIT_2 | BIT_3); + + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_6 | BIT_5 | BIT_4 | BIT_2 | BIT_1); + + RTL_W8(tp, TDFNR, 0x8); + + /* + if (aspm) + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_7); + */ + + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~BIT_3); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10); + } else if (tp->mcfg == CFG_METHOD_16 || tp->mcfg == CFG_METHOD_17) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + rtl8168_eri_write(tp, 0xC0, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xB8, 4, 0x00000000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xC8, 4, 0x00100002, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xE8, 4, 0x00100006, ERIAR_ExGMAC); + csi_tmp = rtl8168_eri_read(tp, 0x1D0, 4, ERIAR_ExGMAC); + csi_tmp |= BIT_1; + rtl8168_eri_write(tp, 0x1D0, 1, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0xDC, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | BIT_7); + RTL_W8(tp, 0xD3, RTL_R8(tp, 0xD3) & ~BIT_7); + RTL_W8(tp, 0x1B, RTL_R8(tp, 0x1B) & ~0x07); + + if (tp->mcfg == CFG_METHOD_16) { + RTL_W32(tp, 0xB0, 0xEE480010); + RTL_W8(tp, 0x1A, RTL_R8(tp, 0x1A) & ~(BIT_2|BIT_3)); + rtl8168_eri_write(tp, 0x1DC, 1, 0x64, ERIAR_ExGMAC); + } else { + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp |= BIT_4; + rtl8168_eri_write(tp, 0x1B0, 1, csi_tmp, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xCC, 4, 0x00000050, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xD0, 4, 0x07ff0060, ERIAR_ExGMAC); + } + + RTL_W8(tp, TDFNR, 0x8); + + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~PMSTS_En); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_6); + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, MTPS, 0x27); + + /* disable clock request. */ + pci_write_config_byte(pdev, 0x81, 0x00); + + } else if (tp->mcfg == CFG_METHOD_18 || tp->mcfg == CFG_METHOD_19) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + rtl8168_eri_write(tp, 0xC8, 4, 0x00100002, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xE8, 4, 0x00100006, ERIAR_ExGMAC); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | BIT_7); + RTL_W8(tp, 0xD3, RTL_R8(tp, 0xD3) & ~BIT_7); + csi_tmp = rtl8168_eri_read(tp, 0xDC, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + + /* + if (aspm) + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_7); + */ + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, MTPS, 0x27); + + RTL_W8(tp, TDFNR, 0x8); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_6); + + rtl8168_eri_write(tp, 0xC0, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xB8, 4, 0x00000000, ERIAR_ExGMAC); + RTL_W8(tp, 0x1B,RTL_R8(tp, 0x1B) & ~0x07); + + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_4; + rtl8168_eri_write(tp, 0x1B0, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp = rtl8168_eri_read(tp, 0x1d0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_4 | BIT_1; + rtl8168_eri_write(tp, 0x1d0, 1, csi_tmp, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xCC, 4, 0x00000050, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xd0, 4, 0x00000060, ERIAR_ExGMAC); + } else if (tp->mcfg == CFG_METHOD_20) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + rtl8168_eri_write(tp, 0xC8, 4, 0x00100002, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xE8, 4, 0x00100006, ERIAR_ExGMAC); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | BIT_7); + RTL_W8(tp, 0xD3, RTL_R8(tp, 0xD3) & ~BIT_7); + csi_tmp = rtl8168_eri_read(tp, 0xDC, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + + /* + if (aspm) + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_7); + */ + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, MTPS, 0x27); + + RTL_W8(tp, TDFNR, 0x8); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_6); + rtl8168_eri_write(tp, 0xC0, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xB8, 4, 0x00000000, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_4; + rtl8168_eri_write(tp, 0x1B0, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp = rtl8168_eri_read(tp, 0x1d0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_4 | BIT_1; + rtl8168_eri_write(tp, 0x1d0, 1, csi_tmp, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xCC, 4, 0x00000050, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xd0, 4, 0x00000060, ERIAR_ExGMAC); + } else if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_24 || tp->mcfg == CFG_METHOD_25 || + tp->mcfg == CFG_METHOD_26 || tp->mcfg == CFG_METHOD_29 || + tp->mcfg == CFG_METHOD_30) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22) + set_offset711(tp, 0x04); + + rtl8168_eri_write(tp, 0xC8, 4, 0x00080002, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xCC, 1, 0x38, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xD0, 1, 0x48, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xE8, 4, 0x00100006, ERIAR_ExGMAC); + + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | BIT_7); + + csi_tmp = rtl8168_eri_read(tp, 0xDC, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + + if (tp->mcfg == CFG_METHOD_26) { + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD3C0); + mac_ocp_data &= ~(BIT_11 | BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0); + mac_ocp_data |= 0x0FFF; + rtl8168_mac_ocp_write(tp, 0xD3C0, mac_ocp_data); + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD3C2); + mac_ocp_data &= ~(BIT_7 | BIT_6 | BIT_5 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0); + rtl8168_mac_ocp_write(tp, 0xD3C2, mac_ocp_data); + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD3C4); + mac_ocp_data |= BIT_0; + rtl8168_mac_ocp_write(tp, 0xD3C4, mac_ocp_data); + } else if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30) { + + if (tp->RequireAdjustUpsTxLinkPulseTiming) { + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD412); + mac_ocp_data &= ~(0x0FFF); + mac_ocp_data |= tp->SwrCnt1msIni; + rtl8168_mac_ocp_write(tp, 0xD412, mac_ocp_data); + } + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xE056); + mac_ocp_data &= ~(BIT_7 | BIT_6 | BIT_5 | BIT_4); + //mac_ocp_data |= (BIT_6 | BIT_5 | BIT_4); + rtl8168_mac_ocp_write(tp, 0xE056, mac_ocp_data); + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xE052); + mac_ocp_data &= ~(BIT_15 | BIT_14 | BIT_13 | BIT_3); + mac_ocp_data |= BIT_15; + //mac_ocp_data |= BIT_3; + rtl8168_mac_ocp_write(tp, 0xE052, mac_ocp_data); + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD420); + mac_ocp_data &= ~(BIT_11 | BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0); + mac_ocp_data |= 0x45F; + rtl8168_mac_ocp_write(tp, 0xD420, mac_ocp_data); + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xE0D6); + mac_ocp_data &= ~(BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0); + mac_ocp_data |= 0x17F; + rtl8168_mac_ocp_write(tp, 0xE0D6, mac_ocp_data); + } + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + RTL_W8(tp, 0x1B, RTL_R8(tp, 0x1B) & ~0x07); + + RTL_W8(tp, TDFNR, 0x4); + + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~PMSTS_En); + + /* + if (aspm) + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_7); + */ + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, MTPS, 0x27); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_6); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_7); + + rtl8168_eri_write(tp, 0xC0, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xB8, 4, 0x00000000, ERIAR_ExGMAC); + + if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30) { + rtl8168_mac_ocp_write(tp, 0xE054, 0x0000); + + rtl8168_eri_write(tp, 0x5F0, 2, 0x4000, ERIAR_ExGMAC); + } else { + rtl8168_eri_write(tp, 0x5F0, 2, 0x4F87, ERIAR_ExGMAC); + } + + if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30) { + csi_tmp = rtl8168_eri_read(tp, 0xDC, 4, ERIAR_ExGMAC); + csi_tmp |= (BIT_2 | BIT_3 | BIT_4); + rtl8168_eri_write(tp, 0xDC, 4, csi_tmp, ERIAR_ExGMAC); + } + + if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_24 || tp->mcfg == CFG_METHOD_25) { + rtl8168_mac_ocp_write(tp, 0xC140, 0xFFFF); + } else if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30) { + rtl8168_mac_ocp_write(tp, 0xC140, 0xFFFF); + rtl8168_mac_ocp_write(tp, 0xC142, 0xFFFF); + } + + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp &= ~BIT_12; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + + if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30) { + csi_tmp = rtl8168_eri_read(tp, 0x2FC, 1, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_2); + rtl8168_eri_write(tp, 0x2FC, 1, csi_tmp, ERIAR_ExGMAC); + } else { + csi_tmp = rtl8168_eri_read(tp, 0x2FC, 1, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_0 | BIT_1 | BIT_2); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0x2FC, 1, csi_tmp, ERIAR_ExGMAC); + } + + csi_tmp = rtl8168_eri_read(tp, 0x1D0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_1; + rtl8168_eri_write(tp, 0x1D0, 1, csi_tmp, ERIAR_ExGMAC); + } else if (tp->mcfg == CFG_METHOD_23 || tp->mcfg == CFG_METHOD_27 || + tp->mcfg == CFG_METHOD_28) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + rtl8168_eri_write(tp, 0xC8, 4, 0x00080002, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xCC, 1, 0x2F, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xD0, 1, 0x5F, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xE8, 4, 0x00100006, ERIAR_ExGMAC); + + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | BIT_7); + + csi_tmp = rtl8168_eri_read(tp, 0xDC, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_6); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_7); + + rtl8168_eri_write(tp, 0xC0, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xB8, 4, 0x00000000, ERIAR_ExGMAC); + RTL_W8(tp, 0x1B, RTL_R8(tp, 0x1B) & ~0x07); + + RTL_W8(tp, TDFNR, 0x4); + + /* + if (aspm) + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_7); + */ + + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp &= ~BIT_12; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x2FC, 1, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_0 | BIT_1 | BIT_2); + csi_tmp |= (BIT_0 | BIT_1); + rtl8168_eri_write(tp, 0x2FC, 1, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x1D0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_1; + rtl8168_eri_write(tp, 0x1D0, 1, csi_tmp, ERIAR_ExGMAC); + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, MTPS, 0x27); + + if (tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28) { + rtl8168_oob_mutex_lock(tp); + rtl8168_eri_write(tp, 0x5F0, 2, 0x4F87, ERIAR_ExGMAC); + rtl8168_oob_mutex_unlock(tp); + } + + rtl8168_mac_ocp_write(tp, 0xC140, 0xFFFF); + rtl8168_mac_ocp_write(tp, 0xC142, 0xFFFF); + + if (tp->mcfg == CFG_METHOD_28) { + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD3E2); + mac_ocp_data &= 0xF000; + mac_ocp_data |= 0xAFD; + rtl8168_mac_ocp_write(tp, 0xD3E2, mac_ocp_data); + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD3E4); + mac_ocp_data &= 0xFF00; + rtl8168_mac_ocp_write(tp, 0xD3E4, mac_ocp_data); + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xE860); + mac_ocp_data |= BIT_7; + rtl8168_mac_ocp_write(tp, 0xE860, mac_ocp_data); + } + } else if (tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + rtl8168_eri_write(tp, 0xC8, 4, 0x00080002, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xCC, 1, 0x2F, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xD0, 1, 0x5F, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xE8, 4, 0x00100006, ERIAR_ExGMAC); + + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | BIT_7); + + csi_tmp = rtl8168_eri_read(tp, 0xDC, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + + if (tp->RequireAdjustUpsTxLinkPulseTiming) { + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD412); + mac_ocp_data &= ~(0x0FFF); + mac_ocp_data |= tp->SwrCnt1msIni; + rtl8168_mac_ocp_write(tp, 0xD412, mac_ocp_data); + } + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xE056); + mac_ocp_data &= ~(BIT_7 | BIT_6 | BIT_5 | BIT_4); + rtl8168_mac_ocp_write(tp, 0xE056, mac_ocp_data); + if (FALSE == HW_SUPP_SERDES_PHY(tp)) + rtl8168_mac_ocp_write(tp, 0xEA80, 0x0003); + else + rtl8168_mac_ocp_write(tp, 0xEA80, 0x0000); + + rtl8168_oob_mutex_lock(tp); + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xE052); + mac_ocp_data &= ~(BIT_3 | BIT_0); + rtl8168_mac_ocp_write(tp, 0xE052, mac_ocp_data); + rtl8168_oob_mutex_unlock(tp); + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD420); + mac_ocp_data &= ~(BIT_11 | BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0); + mac_ocp_data |= 0x45F; + rtl8168_mac_ocp_write(tp, 0xD420, mac_ocp_data); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + RTL_W8(tp, 0x1B, RTL_R8(tp, 0x1B) & ~0x07); + + RTL_W8(tp, TDFNR, 0x4); + + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~PMSTS_En); + + /* + if (aspm) + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_7); + */ + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, MTPS, 0x27); + + if (FALSE == HW_SUPP_SERDES_PHY(tp)) { + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_6); + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_7); + } else { + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) & ~BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) & ~BIT_6); + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) & ~BIT_7); + } + + rtl8168_eri_write(tp, 0xC0, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xB8, 4, 0x00000000, ERIAR_ExGMAC); + + rtl8168_oob_mutex_lock(tp); + rtl8168_eri_write(tp, 0x5F0, 2, 0x4000, ERIAR_ExGMAC); + rtl8168_oob_mutex_unlock(tp); + + if (tp->mcfg == CFG_METHOD_32 || tp->mcfg == CFG_METHOD_33) { + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp |= BIT_4; + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + } + + rtl8168_mac_ocp_write(tp, 0xC140, 0xFFFF); + rtl8168_mac_ocp_write(tp, 0xC142, 0xFFFF); + + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp &= ~BIT_12; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x2FC, 1, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_0 | BIT_1); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0x2FC, 1, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x1D0, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_1; + rtl8168_eri_write(tp, 0x1D0, 1, csi_tmp, ERIAR_ExGMAC); + } else if (tp->mcfg == CFG_METHOD_1) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + if (dev->mtu > ETH_DATA_LEN) { + pci_read_config_byte(pdev, 0x69, &device_control); + device_control &= ~0x70; + device_control |= 0x28; + pci_write_config_byte(pdev, 0x69, device_control); + } else { + pci_read_config_byte(pdev, 0x69, &device_control); + device_control &= ~0x70; + device_control |= 0x58; + pci_write_config_byte(pdev, 0x69, device_control); + } + } else if (tp->mcfg == CFG_METHOD_2) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + if (dev->mtu > ETH_DATA_LEN) { + pci_read_config_byte(pdev, 0x69, &device_control); + device_control &= ~0x70; + device_control |= 0x28; + pci_write_config_byte(pdev, 0x69, device_control); + + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); + } else { + pci_read_config_byte(pdev, 0x69, &device_control); + device_control &= ~0x70; + device_control |= 0x58; + pci_write_config_byte(pdev, 0x69, device_control); + + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); + } + } else if (tp->mcfg == CFG_METHOD_3) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + if (dev->mtu > ETH_DATA_LEN) { + pci_read_config_byte(pdev, 0x69, &device_control); + device_control &= ~0x70; + device_control |= 0x28; + pci_write_config_byte(pdev, 0x69, device_control); + + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); + } else { + pci_read_config_byte(pdev, 0x69, &device_control); + device_control &= ~0x70; + device_control |= 0x58; + pci_write_config_byte(pdev, 0x69, device_control); + + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); + } + } + + if ((tp->mcfg == CFG_METHOD_1) || (tp->mcfg == CFG_METHOD_2) || (tp->mcfg == CFG_METHOD_3)) { + /* csum offload command for RTL8168B/8111B */ + tp->tx_tcp_csum_cmd = TxTCPCS; + tp->tx_udp_csum_cmd = TxUDPCS; + tp->tx_ip_csum_cmd = TxIPCS; + tp->tx_ipv6_csum_cmd = 0; + } else { + /* csum offload command for RTL8168C/8111C and RTL8168CP/8111CP */ + tp->tx_tcp_csum_cmd = TxTCPCS_C; + tp->tx_udp_csum_cmd = TxUDPCS_C; + tp->tx_ip_csum_cmd = TxIPCS_C; + tp->tx_ipv6_csum_cmd = TxIPV6F_C; + } + + + //other hw parameters + if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_23 || tp->mcfg == CFG_METHOD_24 || + tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28) + rtl8168_eri_write(tp, 0x2F8, 2, 0x1D8F, ERIAR_ExGMAC); + + if (tp->bios_setting & BIT_28) { + if (tp->mcfg == CFG_METHOD_18 || tp->mcfg == CFG_METHOD_19 || + tp->mcfg == CFG_METHOD_20) { + u32 gphy_val; + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + gphy_val = rtl8168_mdio_read(tp, 0x16); + gphy_val |= BIT_10; + rtl8168_mdio_write(tp, 0x16, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_7; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + } + + rtl8168_hw_clear_timer_int(dev); + + rtl8168_enable_exit_l1_mask(tp); + + switch (tp->mcfg) { + case CFG_METHOD_25: + rtl8168_mac_ocp_write(tp, 0xD3C0, 0x0B00); + rtl8168_mac_ocp_write(tp, 0xD3C2, 0x0000); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + rtl8168_mac_ocp_write(tp, 0xE098, 0x0AA2); + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mac_ocp_write(tp, 0xE098, 0xC302); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + if (aspm) { + rtl8168_init_pci_offset_99(tp); + } + break; + } + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + if (aspm) { + rtl8168_init_pci_offset_180(tp); + } + break; + } + + tp->cp_cmd &= ~(EnableBist | Macdbgo_oe | Force_halfdup | + Force_rxflow_en | Force_txflow_en | Cxpl_dbg_sel | + ASF | Macdbgo_sel); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + RTL_W16(tp, CPlusCmd, tp->cp_cmd); +#else + rtl8168_hw_set_features(dev, dev->features); +#endif + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: { + int timeout; + for (timeout = 0; timeout < 10; timeout++) { + if ((rtl8168_eri_read(tp, 0x1AE, 2, ERIAR_ExGMAC) & BIT_13)==0) + break; + mdelay(1); + } + } + break; + } + + RTL_W16(tp, RxMaxSize, tp->rx_buf_sz); + + rtl8168_disable_rxdvgate(dev); + + if (tp->mcfg == CFG_METHOD_11 || tp->mcfg == CFG_METHOD_12) + rtl8168_mac_loopback_test(tp); + + if (!tp->pci_cfg_is_read) { + pci_read_config_byte(pdev, PCI_COMMAND, &tp->pci_cfg_space.cmd); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_0, &tp->pci_cfg_space.io_base_l); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_0 + 2, &tp->pci_cfg_space.io_base_h); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_2, &tp->pci_cfg_space.mem_base_l); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_2 + 2, &tp->pci_cfg_space.mem_base_h); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_3, &tp->pci_cfg_space.resv_0x1c_l); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_3 + 2, &tp->pci_cfg_space.resv_0x1c_h); + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tp->pci_cfg_space.ilr); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_4, &tp->pci_cfg_space.resv_0x20_l); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_4 + 2, &tp->pci_cfg_space.resv_0x20_h); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_5, &tp->pci_cfg_space.resv_0x24_l); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_5 + 2, &tp->pci_cfg_space.resv_0x24_h); + pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &tp->pci_cfg_space.resv_0x2c_l); + pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID + 2, &tp->pci_cfg_space.resv_0x2c_h); + if (tp->HwPcieSNOffset > 0) { + tp->pci_cfg_space.pci_sn_l = rtl8168_csi_read(tp, tp->HwPcieSNOffset); + tp->pci_cfg_space.pci_sn_h = rtl8168_csi_read(tp, tp->HwPcieSNOffset + 4); + } + + tp->pci_cfg_is_read = 1; + } + + rtl8168_dsm(dev, DSM_MAC_INIT); + + /* Set Rx packet filter */ + rtl8168_hw_set_rx_packet_filter(dev); + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH && !tp->dash_printer_enabled) + NICChkTypeEnableDashInterrupt(tp); +#endif + + if (tp->HwSuppAspmClkIntrLock) + rtl8168_hw_aspm_clkreq_enable(tp, true); + + rtl8168_disable_cfg9346_write(tp); + + udelay(10); +} + +static void +rtl8168_hw_start(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + + rtl8168_enable_hw_interrupt(tp); +} + +static int +rtl8168_change_mtu(struct net_device *dev, + int new_mtu) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int ret = 0; + unsigned long flags; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) + if (new_mtu < ETH_MIN_MTU) + return -EINVAL; + else if (new_mtu > tp->max_jumbo_frame_size) + new_mtu = tp->max_jumbo_frame_size; +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) + + spin_lock_irqsave(&tp->lock, flags); + dev->mtu = new_mtu; + spin_unlock_irqrestore(&tp->lock, flags); + + if (!netif_running(dev)) + goto out; + + rtl8168_down(dev); + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_set_rxbufsize(tp, dev); + + ret = rtl8168_init_ring(dev); + + if (ret < 0) { + spin_unlock_irqrestore(&tp->lock, flags); + goto err_out; + } + +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + RTL_NAPI_ENABLE(dev, &tp->napi); +#endif +#endif//CONFIG_R8168_NAPI + + netif_stop_queue(dev); + netif_carrier_off(dev); + rtl8168_hw_config(dev); + spin_unlock_irqrestore(&tp->lock, flags); + + rtl8168_set_speed(dev, tp->autoneg, tp->speed, tp->duplex, tp->advertising); + + mod_timer(&tp->esd_timer, jiffies + RTL8168_ESD_TIMEOUT); + mod_timer(&tp->link_timer, jiffies + RTL8168_LINK_TIMEOUT); +out: +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) + netdev_update_features(dev); +#endif + +err_out: + return ret; +} + +static inline void +rtl8168_make_unusable_by_asic(struct RxDesc *desc) +{ + desc->addr = 0x0badbadbadbadbadull; + desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); +} + +static void +rtl8168_free_rx_skb(struct rtl8168_private *tp, + struct sk_buff **sk_buff, + struct RxDesc *desc) +{ + struct pci_dev *pdev = tp->pci_dev; + + dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz, + DMA_FROM_DEVICE); + dev_kfree_skb(*sk_buff); + *sk_buff = NULL; + rtl8168_make_unusable_by_asic(desc); +} + +static inline void +rtl8168_mark_to_asic(struct RxDesc *desc, + u32 rx_buf_sz) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz); +} + +static inline void +rtl8168_map_to_asic(struct RxDesc *desc, + dma_addr_t mapping, + u32 rx_buf_sz) +{ + desc->addr = cpu_to_le64(mapping); + wmb(); + rtl8168_mark_to_asic(desc, rx_buf_sz); +} + +static int +rtl8168_alloc_rx_skb(struct rtl8168_private *tp, + struct sk_buff **sk_buff, + struct RxDesc *desc, + int rx_buf_sz, + u8 in_intr) +{ + struct sk_buff *skb; + dma_addr_t mapping; + int ret = 0; + + if (in_intr) + skb = RTL_ALLOC_SKB_INTR(tp, rx_buf_sz + RTK_RX_ALIGN); + else + skb = dev_alloc_skb(rx_buf_sz + RTK_RX_ALIGN); + + if (unlikely(!skb)) + goto err_out; + + skb_reserve(skb, RTK_RX_ALIGN); + + mapping = dma_map_single(tp_to_dev(tp), skb->data, rx_buf_sz, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(tp_to_dev(tp), mapping))) { + if (unlikely(net_ratelimit())) + netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n"); + goto err_out; + } + + *sk_buff = skb; + rtl8168_map_to_asic(desc, mapping, rx_buf_sz); +out: + return ret; + +err_out: + if (skb) + dev_kfree_skb(skb); + ret = -ENOMEM; + rtl8168_make_unusable_by_asic(desc); + goto out; +} + +static void +rtl8168_rx_clear(struct rtl8168_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + if (tp->Rx_skbuff[i]) + rtl8168_free_rx_skb(tp, tp->Rx_skbuff + i, + tp->RxDescArray + i); + } +} + +static u32 +rtl8168_rx_fill(struct rtl8168_private *tp, + struct net_device *dev, + u32 start, + u32 end, + u8 in_intr) +{ + u32 cur; + + for (cur = start; end - cur > 0; cur++) { + int ret, i = cur % NUM_RX_DESC; + + if (tp->Rx_skbuff[i]) + continue; + + ret = rtl8168_alloc_rx_skb(tp, tp->Rx_skbuff + i, + tp->RxDescArray + i, + tp->rx_buf_sz, + in_intr); + if (ret < 0) + break; + } + return cur - start; +} + +static inline void +rtl8168_mark_as_last_descriptor(struct RxDesc *desc) +{ + desc->opts1 |= cpu_to_le32(RingEnd); +} + +static void +rtl8168_desc_addr_fill(struct rtl8168_private *tp) +{ + if (!tp->TxPhyAddr || !tp->RxPhyAddr) + return; + + RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_BIT_MASK(32))); + RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32)); + RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_BIT_MASK(32))); + RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32)); +} + +static void +rtl8168_tx_desc_init(struct rtl8168_private *tp) +{ + int i = 0; + + memset(tp->TxDescArray, 0x0, NUM_TX_DESC * sizeof(struct TxDesc)); + + for (i = 0; i < NUM_TX_DESC; i++) { + if (i == (NUM_TX_DESC - 1)) + tp->TxDescArray[i].opts1 = cpu_to_le32(RingEnd); + } +} + +static void +rtl8168_rx_desc_offset0_init(struct rtl8168_private *tp, int own) +{ + int i = 0; + int ownbit = 0; + + if (tp->RxDescArray == NULL) return; + + if (own) + ownbit = DescOwn; + + for (i = 0; i < NUM_RX_DESC; i++) { + if (i == (NUM_RX_DESC - 1)) + tp->RxDescArray[i].opts1 = cpu_to_le32((ownbit | RingEnd) | (unsigned long)tp->rx_buf_sz); + else + tp->RxDescArray[i].opts1 = cpu_to_le32(ownbit | (unsigned long)tp->rx_buf_sz); + } +} + +static void +rtl8168_rx_desc_init(struct rtl8168_private *tp) +{ + memset(tp->RxDescArray, 0x0, NUM_RX_DESC * sizeof(struct RxDesc)); +} + +static int +rtl8168_init_ring(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + rtl8168_init_ring_indexes(tp); + + memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); + memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); + + rtl8168_tx_desc_init(tp); + rtl8168_rx_desc_init(tp); + + if (rtl8168_rx_fill(tp, dev, 0, NUM_RX_DESC, 0) != NUM_RX_DESC) + goto err_out; + + rtl8168_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); + + return 0; + +err_out: + rtl8168_rx_clear(tp); + return -ENOMEM; +} + +static void +rtl8168_unmap_tx_skb(struct pci_dev *pdev, + struct ring_info *tx_skb, + struct TxDesc *desc) +{ + unsigned int len = tx_skb->len; + + dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE); + + desc->opts1 = cpu_to_le32(RTK_MAGIC_DEBUG_VALUE); + desc->opts2 = 0x00; + desc->addr = 0x00; + tx_skb->len = 0; +} + +static void rtl8168_tx_clear_range(struct rtl8168_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) + struct net_device *dev = tp->dev; +#endif + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8168_unmap_tx_skb(tp->pci_dev, tx_skb, + tp->TxDescArray + entry); + if (skb) { + RTLDEV->stats.tx_dropped++; + dev_kfree_skb_any(skb); + tx_skb->skb = NULL; + } + } + } +} + +static void +rtl8168_tx_clear(struct rtl8168_private *tp) +{ + rtl8168_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + tp->cur_tx = tp->dirty_tx = 0; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) +static void rtl8168_schedule_work(struct net_device *dev, void (*task)(void *)) +{ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + struct rtl8168_private *tp = netdev_priv(dev); + + INIT_WORK(&tp->task, task, dev); + schedule_delayed_work(&tp->task, 4); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) +} + +#define rtl8168_cancel_schedule_work(a) + +#else +static void rtl8168_schedule_work(struct net_device *dev, work_func_t task) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + INIT_DELAYED_WORK(&tp->task, task); + schedule_delayed_work(&tp->task, 4); +} + +static void rtl8168_cancel_schedule_work(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct work_struct *work = &tp->task.work; + + if (!work->func) return; + + cancel_delayed_work_sync(&tp->task); +} +#endif + +static void +rtl8168_wait_for_quiescence(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + synchronize_irq(dev->irq); + + /* Wait for any pending NAPI task to complete */ +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + RTL_NAPI_DISABLE(dev, &tp->napi); +#endif +#endif//CONFIG_R8168_NAPI + + rtl8168_irq_mask_and_ack(tp); + +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + RTL_NAPI_ENABLE(dev, &tp->napi); +#endif +#endif//CONFIG_R8168_NAPI +} + +#if 0 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) +static void rtl8168_reinit_task(void *_data) +#else +static void rtl8168_reinit_task(struct work_struct *work) +#endif +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + struct net_device *dev = _data; +#else + struct rtl8168_private *tp = + container_of(work, struct rtl8168_private, task.work); + struct net_device *dev = tp->dev; +#endif + int ret; + + if (netif_running(dev)) { + rtl8168_wait_for_quiescence(dev); + rtl8168_close(dev); + } + + ret = rtl8168_open(dev); + if (unlikely(ret < 0)) { + if (unlikely(net_ratelimit())) { + struct rtl8168_private *tp = netdev_priv(dev); + + if (netif_msg_drv(tp)) { + printk(PFX KERN_ERR + "%s: reinit failure (status = %d)." + " Rescheduling.\n", dev->name, ret); + } + } + rtl8168_schedule_work(dev, rtl8168_reinit_task); + } +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) +static void rtl8168_reset_task(void *_data) +{ + struct net_device *dev = _data; + struct rtl8168_private *tp = netdev_priv(dev); +#else +static void rtl8168_reset_task(struct work_struct *work) +{ + struct rtl8168_private *tp = + container_of(work, struct rtl8168_private, task.work); + struct net_device *dev = tp->dev; +#endif + u32 budget = ~(u32)0; + unsigned long flags; + + if (!netif_running(dev)) + return; + + rtl8168_wait_for_quiescence(dev); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) + rtl8168_rx_interrupt(dev, tp, &budget); +#else + rtl8168_rx_interrupt(dev, tp, budget); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_tx_clear(tp); + + if (tp->dirty_rx == tp->cur_rx) { + rtl8168_rx_clear(tp); + rtl8168_init_ring(dev); + rtl8168_set_speed(dev, tp->autoneg, tp->speed, tp->duplex, tp->advertising); + spin_unlock_irqrestore(&tp->lock, flags); + } else { + spin_unlock_irqrestore(&tp->lock, flags); + if (unlikely(net_ratelimit())) { + struct rtl8168_private *tp = netdev_priv(dev); + + if (netif_msg_intr(tp)) { + printk(PFX KERN_EMERG + "%s: Rx buffers shortage\n", dev->name); + } + } + rtl8168_schedule_work(dev, rtl8168_reset_task); + } +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) +static void +rtl8168_tx_timeout(struct net_device *dev, unsigned int txqueue) +#else +static void +rtl8168_tx_timeout(struct net_device *dev) +#endif +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + netif_stop_queue(dev); + netif_carrier_off(dev); + rtl8168_hw_reset(dev); + spin_unlock_irqrestore(&tp->lock, flags); + + /* Let's wait a bit while any (async) irq lands on */ + rtl8168_schedule_work(dev, rtl8168_reset_task); +} + +static u32 +rtl8168_get_txd_opts1(u32 opts1, u32 len, unsigned int entry) +{ + u32 status = opts1 | len; + + if (entry == NUM_TX_DESC - 1) + status |= RingEnd; + + return status; +} + +static int +rtl8168_xmit_frags(struct rtl8168_private *tp, + struct sk_buff *skb, + const u32 *opts) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag, entry; + struct TxDesc *txd = NULL; + const unsigned char nr_frags = info->nr_frags; + + entry = tp->cur_tx; + for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) { + skb_frag_t *frag = info->frags + cur_frag; + dma_addr_t mapping; + u32 status, len; + void *addr; + + entry = (entry + 1) % NUM_TX_DESC; + + txd = tp->TxDescArray + entry; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) + len = frag->size; + addr = ((void *) page_address(frag->page)) + frag->page_offset; +#else + len = skb_frag_size(frag); + addr = skb_frag_address(frag); +#endif + mapping = dma_map_single(tp_to_dev(tp), addr, len, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(tp_to_dev(tp), mapping))) { + if (unlikely(net_ratelimit())) + netif_err(tp, drv, tp->dev, + "Failed to map TX fragments DMA!\n"); + goto err_out; + } + + /* anti gcc 2.95.3 bugware (sic) */ + status = rtl8168_get_txd_opts1(opts[0], len, entry); + if (cur_frag == (nr_frags - 1)) { + tp->tx_skb[entry].skb = skb; + status |= LastFrag; + } + + txd->addr = cpu_to_le64(mapping); + + tp->tx_skb[entry].len = len; + + txd->opts2 = cpu_to_le32(opts[1]); + wmb(); + txd->opts1 = cpu_to_le32(status); + } + + return cur_frag; + +err_out: + rtl8168_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static inline +__be16 get_protocol(struct sk_buff *skb) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) + return vlan_get_protocol(skb); +#else + __be16 protocol; + + if (skb->protocol == htons(ETH_P_8021Q)) + protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; + else + protocol = skb->protocol; + + return protocol; +#endif +} + +static bool rtl8168_skb_pad(struct sk_buff *skb) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) + if (skb_padto(skb, ETH_ZLEN)) + return false; + skb_put(skb, ETH_ZLEN - skb->len); + return true; +#else + return !eth_skb_pad(skb); +#endif +} + +static inline bool +rtl8168_tx_csum(struct sk_buff *skb, + struct net_device *dev, + u32 *opts) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 csum_cmd = 0; + u8 sw_calc_csum = FALSE; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + const struct iphdr *ip = skb->nh.iph; + + if (dev->features & NETIF_F_IP_CSUM) { + if (ip->protocol == IPPROTO_TCP) + csum_cmd = tp->tx_ip_csum_cmd | tp->tx_tcp_csum_cmd; + else if (ip->protocol == IPPROTO_UDP) + csum_cmd = tp->tx_ip_csum_cmd | tp->tx_udp_csum_cmd; + else if (ip->protocol == IPPROTO_IP) + csum_cmd = tp->tx_ip_csum_cmd; + } +#else + u8 ip_protocol = IPPROTO_RAW; + + switch (get_protocol(skb)) { + case __constant_htons(ETH_P_IP): + if (dev->features & NETIF_F_IP_CSUM) { + ip_protocol = ip_hdr(skb)->protocol; + csum_cmd = tp->tx_ip_csum_cmd; + } + break; + case __constant_htons(ETH_P_IPV6): + if (dev->features & NETIF_F_IPV6_CSUM) { + u32 transport_offset = (u32)skb_transport_offset(skb); + if (transport_offset > 0 && transport_offset <= TCPHO_MAX) { + ip_protocol = ipv6_hdr(skb)->nexthdr; + csum_cmd = tp->tx_ipv6_csum_cmd; + csum_cmd |= transport_offset << TCPHO_SHIFT; + } + } + break; + default: + if (unlikely(net_ratelimit())) + dprintk("checksum_partial proto=%x!\n", skb->protocol); + break; + } + + if (ip_protocol == IPPROTO_TCP) + csum_cmd |= tp->tx_tcp_csum_cmd; + else if (ip_protocol == IPPROTO_UDP) + csum_cmd |= tp->tx_udp_csum_cmd; +#endif + if (csum_cmd == 0) { + sw_calc_csum = TRUE; +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + WARN_ON(1); /* we need a WARN() */ +#endif + } + } + + if (csum_cmd != 0) { + if (tp->ShortPacketSwChecksum && skb->len < ETH_ZLEN) { + sw_calc_csum = TRUE; + if (!rtl8168_skb_pad(skb)) + return false; + } else { + if ((tp->mcfg == CFG_METHOD_1) || (tp->mcfg == CFG_METHOD_2) || (tp->mcfg == CFG_METHOD_3)) + opts[0] |= csum_cmd; + else + opts[1] |= csum_cmd; + } + } + + if (tp->UseSwPaddingShortPkt && skb->len < ETH_ZLEN) + if (!rtl8168_skb_pad(skb)) + return false; + + if (sw_calc_csum) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) + skb_checksum_help(&skb, 0); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) + skb_checksum_help(skb, 0); +#else + skb_checksum_help(skb); +#endif + } + + return true; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) +/* r8169_csum_workaround() + * The hw limits the value the transport offset. When the offset is out of the + * range, calculate the checksum by sw. + */ +static void r8168_csum_workaround(struct rtl8168_private *tp, + struct sk_buff *skb) +{ + if (skb_shinfo(skb)->gso_size) { + netdev_features_t features = tp->dev->features; + struct sk_buff *segs, *nskb; + + features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); + segs = skb_gso_segment(skb, features); + if (IS_ERR(segs) || !segs) + goto drop; + + do { + nskb = segs; + segs = segs->next; + nskb->next = NULL; + rtl8168_start_xmit(nskb, tp->dev); + } while (segs); + + dev_consume_skb_any(skb); + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb_checksum_help(skb) < 0) + goto drop; + + rtl8168_start_xmit(skb, tp->dev); + } else { + struct net_device_stats *stats; + +drop: + stats = &tp->dev->stats; + stats->tx_dropped++; + dev_kfree_skb_any(skb); + } +} + +/* msdn_giant_send_check() + * According to the document of microsoft, the TCP Pseudo Header excludes the + * packet length for IPv6 TCP large packets. + */ +static int msdn_giant_send_check(struct sk_buff *skb) +{ + const struct ipv6hdr *ipv6h; + struct tcphdr *th; + int ret; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + ipv6h = ipv6_hdr(skb); + th = tcp_hdr(skb); + + th->check = 0; + th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); + + return ret; +} +#endif + +static bool rtl8168_tx_slots_avail(struct rtl8168_private *tp, + unsigned int nr_frags) +{ + unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx; + + /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ + return slots_avail > nr_frags; +} + +static netdev_tx_t +rtl8168_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int entry; + struct TxDesc *txd; + dma_addr_t mapping; + u32 len; + u32 opts[2]; + netdev_tx_t ret = NETDEV_TX_OK; + unsigned long flags, large_send; + int frags; + + spin_lock_irqsave(&tp->lock, flags); + + if (unlikely(!rtl8168_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { + if (netif_msg_drv(tp)) { + printk(KERN_ERR + "%s: BUG! Tx Ring full when queue awake!\n", + dev->name); + } + goto err_stop; + } + + entry = tp->cur_tx % NUM_TX_DESC; + txd = tp->TxDescArray + entry; + + if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) { + if (netif_msg_drv(tp)) { + printk(KERN_ERR + "%s: BUG! Tx Desc is own by hardware!\n", + dev->name); + } + goto err_stop; + } + + opts[0] = DescOwn; + opts[1] = rtl8168_tx_vlan_tag(tp, skb); + + large_send = 0; +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) + u32 mss = skb_shinfo(skb)->tso_size; +#else + u32 mss = skb_shinfo(skb)->gso_size; +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) + + /* TCP Segmentation Offload (or TCP Large Send) */ + if (mss) { + if ((tp->mcfg == CFG_METHOD_1) || + (tp->mcfg == CFG_METHOD_2) || + (tp->mcfg == CFG_METHOD_3)) { + opts[0] |= LargeSend | (min(mss, MSS_MAX) << 16); + large_send = 1; + } else { + u32 transport_offset = (u32)skb_transport_offset(skb); + switch (get_protocol(skb)) { + case __constant_htons(ETH_P_IP): + if (transport_offset <= GTTCPHO_MAX) { + opts[0] |= GiantSendv4; + opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[1] |= min(mss, MSS_MAX) << 18; + large_send = 1; + } + break; + case __constant_htons(ETH_P_IPV6): +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) + if (msdn_giant_send_check(skb)) { + spin_unlock_irqrestore(&tp->lock, flags); + r8168_csum_workaround(tp, skb); + goto out; + } +#endif + if (transport_offset <= GTTCPHO_MAX) { + opts[0] |= GiantSendv6; + opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[1] |= min(mss, MSS_MAX) << 18; + large_send = 1; + } + break; + default: + if (unlikely(net_ratelimit())) + dprintk("tso proto=%x!\n", skb->protocol); + break; + } + } + + if (large_send == 0) + goto err_dma_0; + } + } +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + + if (large_send == 0) { + if (unlikely(!rtl8168_tx_csum(skb, dev, opts))) + goto err_dma_0; + } + + frags = rtl8168_xmit_frags(tp, skb, opts); + if (unlikely(frags < 0)) + goto err_dma_0; + if (frags) { + len = skb_headlen(skb); + opts[0] |= FirstFrag; + } else { + len = skb->len; + + tp->tx_skb[entry].skb = skb; + + opts[0] |= FirstFrag | LastFrag; + } + + opts[0] = rtl8168_get_txd_opts1(opts[0], len, entry); + mapping = dma_map_single(tp_to_dev(tp), skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tp_to_dev(tp), mapping))) { + if (unlikely(net_ratelimit())) + netif_err(tp, drv, dev, "Failed to map TX DMA!\n"); + goto err_dma_1; + } + tp->tx_skb[entry].len = len; + txd->addr = cpu_to_le64(mapping); + txd->opts2 = cpu_to_le32(opts[1]); + wmb(); + txd->opts1 = cpu_to_le32(opts[0]); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) + dev->trans_start = jiffies; +#else + skb_tx_timestamp(skb); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) + + tp->cur_tx += frags + 1; + + wmb(); + + RTL_W8(tp, TxPoll, NPQ); /* set polling bit */ + + if (!rtl8168_tx_slots_avail(tp, MAX_SKB_FRAGS)) { + netif_stop_queue(dev); + smp_rmb(); + if (rtl8168_tx_slots_avail(tp, MAX_SKB_FRAGS)) + netif_wake_queue(dev); + } + + spin_unlock_irqrestore(&tp->lock, flags); +out: + return ret; +err_dma_1: + tp->tx_skb[entry].skb = NULL; + rtl8168_tx_clear_range(tp, tp->cur_tx + 1, frags); +err_dma_0: + RTLDEV->stats.tx_dropped++; + spin_unlock_irqrestore(&tp->lock, flags); + dev_kfree_skb_any(skb); + ret = NETDEV_TX_OK; + goto out; +err_stop: + netif_stop_queue(dev); + ret = NETDEV_TX_BUSY; + RTLDEV->stats.tx_dropped++; + + spin_unlock_irqrestore(&tp->lock, flags); + goto out; +} + +static void +rtl8168_tx_interrupt(struct net_device *dev, + struct rtl8168_private *tp) +{ + unsigned int dirty_tx, tx_left; + + assert(dev != NULL); + assert(tp != NULL); + + dirty_tx = tp->dirty_tx; + smp_rmb(); + tx_left = tp->cur_tx - dirty_tx; + tp->dynamic_aspm_packet_count += tx_left; + + while (tx_left > 0) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + u32 len = tx_skb->len; + u32 status; + + rmb(); + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + RTLDEV->stats.tx_bytes += len; + RTLDEV->stats.tx_packets++; + + rtl8168_unmap_tx_skb(tp->pci_dev, + tx_skb, + tp->TxDescArray + entry); + + if (tx_skb->skb!=NULL) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) + dev_consume_skb_any(tx_skb->skb); +#else + dev_kfree_skb_any(tx_skb->skb); +#endif + tx_skb->skb = NULL; + } + dirty_tx++; + tx_left--; + } + + tp->dynamic_aspm_packet_count -= tx_left; + + if (tp->dirty_tx != dirty_tx) { + tp->dirty_tx = dirty_tx; + smp_wmb(); + if (netif_queue_stopped(dev) && + (rtl8168_tx_slots_avail(tp, MAX_SKB_FRAGS))) { + netif_wake_queue(dev); + } + smp_rmb(); + if (tp->cur_tx != dirty_tx) + RTL_W8(tp, TxPoll, NPQ); + } +} + +static inline int +rtl8168_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void +rtl8168_rx_csum(struct rtl8168_private *tp, + struct sk_buff *skb, + struct RxDesc *desc) +{ + u32 opts1 = le32_to_cpu(desc->opts1); + u32 opts2 = le32_to_cpu(desc->opts2); + + if ((tp->mcfg == CFG_METHOD_1) || + (tp->mcfg == CFG_METHOD_2) || + (tp->mcfg == CFG_METHOD_3)) { + u32 status = opts1 & RxProtoMask; + + /* rx csum offload for RTL8168B/8111B */ + if (((status == RxProtoTCP) && !(opts1 & (RxTCPF | RxIPF))) || + ((status == RxProtoUDP) && !(opts1 & (RxUDPF | RxIPF)))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + } else { + /* rx csum offload for RTL8168C/8111C and RTL8168CP/8111CP */ + if (((opts2 & RxV4F) && !(opts1 & RxIPF)) || (opts2 & RxV6F)) { + if (((opts1 & RxTCPT) && !(opts1 & RxTCPF)) || + ((opts1 & RxUDPT) && !(opts1 & RxUDPF))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + } else + skb->ip_summed = CHECKSUM_NONE; + } +} + +static inline int +rtl8168_try_rx_copy(struct rtl8168_private *tp, + struct sk_buff **sk_buff, + int pkt_size, + struct RxDesc *desc, + int rx_buf_sz) +{ + int ret = -1; + + if (pkt_size < rx_copybreak) { + struct sk_buff *skb; + + skb = RTL_ALLOC_SKB_INTR(tp, pkt_size + RTK_RX_ALIGN); + if (skb) { + u8 *data; + + data = sk_buff[0]->data; + skb_reserve(skb, RTK_RX_ALIGN); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,37) + prefetch(data - RTK_RX_ALIGN); +#endif + eth_copy_and_sum(skb, data, pkt_size, 0); + *sk_buff = skb; + rtl8168_mark_to_asic(desc, rx_buf_sz); + ret = 0; + } + } + return ret; +} + +static inline void +rtl8168_rx_skb(struct rtl8168_private *tp, + struct sk_buff *skb) +{ +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) + netif_receive_skb(skb); +#else + napi_gro_receive(&tp->napi, skb); +#endif +#else + netif_rx(skb); +#endif +} + +static int +rtl8168_rx_interrupt(struct net_device *dev, + struct rtl8168_private *tp, + napi_budget budget) +{ + unsigned int cur_rx, rx_left; + unsigned int delta, count = 0; + unsigned int entry; + struct RxDesc *desc; + u32 status; + u32 rx_quota; + + assert(dev != NULL); + assert(tp != NULL); + + if (tp->RxDescArray == NULL) + goto rx_out; + + rx_quota = RTL_RX_QUOTA(budget); + cur_rx = tp->cur_rx; + entry = cur_rx % NUM_RX_DESC; + desc = tp->RxDescArray + entry; + rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; + rx_left = rtl8168_rx_quota(rx_left, (u32)rx_quota); + + for (; rx_left > 0; rx_left--) { + rmb(); + status = le32_to_cpu(desc->opts1); + if (status & DescOwn) + break; + if (unlikely(status & RxRES)) { + if (netif_msg_rx_err(tp)) { + printk(KERN_INFO + "%s: Rx ERROR. status = %08x\n", + dev->name, status); + } + + RTLDEV->stats.rx_errors++; + + if (status & (RxRWT | RxRUNT)) + RTLDEV->stats.rx_length_errors++; + if (status & RxCRC) + RTLDEV->stats.rx_crc_errors++; + if (dev->features & NETIF_F_RXALL) + goto process_pkt; + + rtl8168_mark_to_asic(desc, tp->rx_buf_sz); + } else { + struct sk_buff *skb; + int pkt_size; + +process_pkt: + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size = (status & 0x00003fff) - 4; + else + pkt_size = status & 0x00003fff; + + /* + * The driver does not support incoming fragmented + * frames. They are seen as a symptom of over-mtu + * sized frames. + */ + if (unlikely(rtl8168_fragmented_frame(status))) { + RTLDEV->stats.rx_dropped++; + RTLDEV->stats.rx_length_errors++; + rtl8168_mark_to_asic(desc, tp->rx_buf_sz); + continue; + } + + skb = tp->Rx_skbuff[entry]; + + dma_sync_single_for_cpu(tp_to_dev(tp), + le64_to_cpu(desc->addr), tp->rx_buf_sz, + DMA_FROM_DEVICE); + + if (rtl8168_try_rx_copy(tp, &skb, pkt_size, + desc, tp->rx_buf_sz)) { + tp->Rx_skbuff[entry] = NULL; + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), + tp->rx_buf_sz, DMA_FROM_DEVICE); + } else { + dma_sync_single_for_device(tp_to_dev(tp), le64_to_cpu(desc->addr), + tp->rx_buf_sz, DMA_FROM_DEVICE); + } + + if (tp->cp_cmd & RxChkSum) + rtl8168_rx_csum(tp, skb, desc); + + skb->dev = dev; + skb_put(skb, pkt_size); + skb->protocol = eth_type_trans(skb, dev); + + if (skb->pkt_type == PACKET_MULTICAST) + RTLDEV->stats.multicast++; + + if (rtl8168_rx_vlan_skb(tp, desc, skb) < 0) + rtl8168_rx_skb(tp, skb); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0) + dev->last_rx = jiffies; +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0) + RTLDEV->stats.rx_bytes += pkt_size; + RTLDEV->stats.rx_packets++; + } + + cur_rx++; + entry = cur_rx % NUM_RX_DESC; + desc = tp->RxDescArray + entry; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,37) + prefetch(desc); +#endif + } + + count = cur_rx - tp->cur_rx; + tp->cur_rx = cur_rx; + + delta = rtl8168_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, 1); + if (!delta && count && netif_msg_intr(tp)) + printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name); + tp->dirty_rx += delta; + + tp->dynamic_aspm_packet_count += delta; + + /* + * FIXME: until there is periodic timer to try and refill the ring, + * a temporary shortage may definitely kill the Rx process. + * - disable the asic to try and avoid an overflow and kick it again + * after refill ? + * - how do others driver handle this condition (Uh oh...). + */ + if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp)) + printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name); + +rx_out: + return count; +} + +/* + *The interrupt handler does all of the Rx thread work and cleans up after + *the Tx thread. + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) +static irqreturn_t rtl8168_interrupt(int irq, void *dev_instance, struct pt_regs *regs) +#else +static irqreturn_t rtl8168_interrupt(int irq, void *dev_instance) +#endif +{ + struct net_device *dev = (struct net_device *) dev_instance; + struct rtl8168_private *tp = netdev_priv(dev); + int status; + int handled = 0; + + do { + status = RTL_R16(tp, IntrStatus); + + if (!(tp->features & RTL_FEATURE_MSI)) { + /* hotplug/major error/no more work/shared irq */ + if ((status == 0xFFFF) || !status) + break; + + if (!(status & (tp->intr_mask | tp->timer_intr_mask))) + break; + } + + handled = 1; + + rtl8168_disable_hw_interrupt(tp); + + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + /* RX_OVERFLOW RE-START mechanism now HW handles it automatically*/ + RTL_W16(tp, IntrStatus, status&~RxFIFOOver); + break; + default: + RTL_W16(tp, IntrStatus, status); + break; + } + + //Work around for rx fifo overflow + if (unlikely(status & RxFIFOOver)) { + if (tp->mcfg == CFG_METHOD_1) { + netif_stop_queue(dev); + udelay(300); + rtl8168_hw_reset(dev); + rtl8168_tx_clear(tp); + rtl8168_rx_clear(tp); + rtl8168_init_ring(dev); + rtl8168_hw_config(dev); + rtl8168_hw_start(dev); + netif_wake_queue(dev); + } + } + +#ifdef ENABLE_DASH_SUPPORT + if ( tp->DASH ) { + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + u8 DashIntType2Status; + + if (status & ISRIMR_DASH_INTR_CMAC_RESET) + tp->CmacResetIntr = TRUE; + + DashIntType2Status = RTL_CMAC_R8(tp, CMAC_IBISR0); + if (DashIntType2Status & ISRIMR_DASH_TYPE2_ROK) { + tp->RcvFwDashOkEvt = TRUE; + } + if (DashIntType2Status & ISRIMR_DASH_TYPE2_TOK) { + tp->SendFwHostOkEvt = TRUE; + } + if (DashIntType2Status & ISRIMR_DASH_TYPE2_RX_DISABLE_IDLE) { + tp->DashFwDisableRx = TRUE; + } + + RTL_CMAC_W8(tp, CMAC_IBISR0, DashIntType2Status); + } else { + if (status & ISRIMR_DP_REQSYS_OK) { + tp->RcvFwReqSysOkEvt = TRUE; + } + if (status & ISRIMR_DP_DASH_OK) { + tp->RcvFwDashOkEvt = TRUE; + } + if (status & ISRIMR_DP_HOST_OK) { + tp->SendFwHostOkEvt = TRUE; + } + } + } +#endif + +#ifdef CONFIG_R8168_NAPI + if (status & tp->intr_mask || tp->keep_intr_cnt-- > 0) { + if (status & tp->intr_mask) + tp->keep_intr_cnt = RTK_KEEP_INTERRUPT_COUNT; + + if (likely(RTL_NETIF_RX_SCHEDULE_PREP(dev, &tp->napi))) + __RTL_NETIF_RX_SCHEDULE(dev, &tp->napi); + else if (netif_msg_intr(tp)) + printk(KERN_INFO "%s: interrupt %04x in poll\n", + dev->name, status); + } else { + tp->keep_intr_cnt = RTK_KEEP_INTERRUPT_COUNT; + rtl8168_switch_to_hw_interrupt(tp); + } +#else + if (status & tp->intr_mask || tp->keep_intr_cnt-- > 0) { + u32 budget = ~(u32)0; + + if (status & tp->intr_mask) + tp->keep_intr_cnt = RTK_KEEP_INTERRUPT_COUNT; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) + rtl8168_rx_interrupt(dev, tp, &budget); +#else + rtl8168_rx_interrupt(dev, tp, budget); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) + rtl8168_tx_interrupt(dev, tp); + +#ifdef ENABLE_DASH_SUPPORT + if ( tp->DASH ) { + struct net_device *dev = tp->dev; + + HandleDashInterrupt(dev); + } +#endif + + rtl8168_switch_to_timer_interrupt(tp); + } else { + tp->keep_intr_cnt = RTK_KEEP_INTERRUPT_COUNT; + rtl8168_switch_to_hw_interrupt(tp); + } +#endif + + } while (false); + + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_R8168_NAPI +static int rtl8168_poll(napi_ptr napi, napi_budget budget) +{ + struct rtl8168_private *tp = RTL_GET_PRIV(napi, struct rtl8168_private); + RTL_GET_NETDEV(tp) + unsigned int work_to_do = RTL_NAPI_QUOTA(budget, dev); + unsigned int work_done; + unsigned long flags; + + work_done = rtl8168_rx_interrupt(dev, tp, budget); + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_tx_interrupt(dev, tp); + spin_unlock_irqrestore(&tp->lock, flags); + + RTL_NAPI_QUOTA_UPDATE(dev, work_done, budget); + + if (work_done < work_to_do) { +#ifdef ENABLE_DASH_SUPPORT + if ( tp->DASH ) { + struct net_device *dev = tp->dev; + + spin_lock_irqsave(&tp->lock, flags); + HandleDashInterrupt(dev); + spin_unlock_irqrestore(&tp->lock, flags); + } +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) + if (RTL_NETIF_RX_COMPLETE(dev, napi, work_done) == FALSE) return RTL_NAPI_RETURN_VALUE; +#else + RTL_NETIF_RX_COMPLETE(dev, napi, work_done); +#endif + /* + * 20040426: the barrier is not strictly required but the + * behavior of the irq handler could be less predictable + * without it. Btw, the lack of flush for the posted pci + * write is safe - FR + */ + smp_wmb(); + + rtl8168_switch_to_timer_interrupt(tp); + } + + return RTL_NAPI_RETURN_VALUE; +} +#endif//CONFIG_R8168_NAPI + +static void rtl8168_sleep_rx_enable(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (tp->wol_enabled != WOL_ENABLED) return; + + if ((tp->mcfg == CFG_METHOD_1) || (tp->mcfg == CFG_METHOD_2)) { + RTL_W8(tp, ChipCmd, CmdReset); + rtl8168_rx_desc_offset0_init(tp, 0); + RTL_W8(tp, ChipCmd, CmdRxEnb); + } else if (tp->mcfg == CFG_METHOD_14 || tp->mcfg == CFG_METHOD_15) { + rtl8168_ephy_write(tp, 0x19, 0xFF64); + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + } +} + +static void rtl8168_down(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + rtl8168_delete_esd_timer(dev, &tp->esd_timer); + + rtl8168_delete_link_timer(dev, &tp->link_timer); + +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + RTL_NAPI_DISABLE(dev, &tp->napi); +#endif +#endif//CONFIG_R8168_NAPI + + netif_stop_queue(dev); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11) + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_rcu(); /* FIXME: should this be synchronize_irq()? */ +#endif + + spin_lock_irqsave(&tp->lock, flags); + + netif_carrier_off(dev); + + rtl8168_dsm(dev, DSM_IF_DOWN); + + rtl8168_hw_reset(dev); + + spin_unlock_irqrestore(&tp->lock, flags); + + synchronize_irq(dev->irq); + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_tx_clear(tp); + + rtl8168_rx_clear(tp); + + spin_unlock_irqrestore(&tp->lock, flags); +} + +static int rtl8168_close(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + unsigned long flags; + + if (tp->TxDescArray!=NULL && tp->RxDescArray!=NULL) { + rtl8168_cancel_schedule_work(dev); + + rtl8168_down(dev); + + pci_clear_master(tp->pci_dev); + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_hw_d3_para(dev); + + rtl8168_powerdown_pll(dev); + + rtl8168_sleep_rx_enable(dev); + + spin_unlock_irqrestore(&tp->lock, flags); + + free_irq(dev->irq, dev); + + dma_free_coherent(&pdev->dev, R8168_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8168_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + } else { + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_hw_d3_para(dev); + + rtl8168_powerdown_pll(dev); + + spin_unlock_irqrestore(&tp->lock, flags); + } + + return 0; +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11) +static void rtl8168_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8168_private *tp = netdev_priv(dev); + + if (HW_DASH_SUPPORT_DASH(tp)) + rtl8168_driver_stop(tp); + + rtl8168_set_bios_setting(dev); + if (s5_keep_curr_mac == 0 && tp->random_mac == 0) + rtl8168_rar_set(tp, tp->org_mac_addr); + +#ifdef ENABLE_FIBER_SUPPORT + rtl8168_hw_fiber_nic_d3_para(dev); +#endif //ENABLE_FIBER_SUPPORT + + if (s5wol == 0) + tp->wol_enabled = WOL_DISABLED; + + rtl8168_close(dev); + rtl8168_disable_msi(pdev, tp); + + if (system_state == SYSTEM_POWER_OFF) { + pci_clear_master(tp->pci_dev); + rtl8168_sleep_rx_enable(dev); + pci_wake_from_d3(pdev, tp->wol_enabled); + pci_set_power_state(pdev, PCI_D3hot); + } +} +#endif + +/** + * rtl8168_get_stats - Get rtl8168 read/write statistics + * @dev: The Ethernet Device to get statistics for + * + * Get TX/RX statistics for rtl8168 + */ +static struct +net_device_stats *rtl8168_get_stats(struct net_device *dev) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + struct rtl8168_private *tp = netdev_priv(dev); +#endif + if (netif_running(dev)) { +// spin_lock_irqsave(&tp->lock, flags); +// spin_unlock_irqrestore(&tp->lock, flags); + } + + return &RTLDEV->stats; +} + +#ifdef CONFIG_PM + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) +static int +rtl8168_suspend(struct pci_dev *pdev, u32 state) +#else +static int +rtl8168_suspend(struct pci_dev *pdev, pm_message_t state) +#endif +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8168_private *tp = netdev_priv(dev); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) + u32 pci_pm_state = pci_choose_state(pdev, state); +#endif + unsigned long flags; + + if (!netif_running(dev)) + goto out; + + rtl8168_cancel_schedule_work(dev); + + rtl8168_delete_esd_timer(dev, &tp->esd_timer); + + rtl8168_delete_link_timer(dev, &tp->link_timer); + + netif_stop_queue(dev); + + netif_carrier_off(dev); + + netif_device_detach(dev); + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_dsm(dev, DSM_NIC_GOTO_D3); + + rtl8168_hw_reset(dev); + + pci_clear_master(pdev); + + rtl8168_hw_d3_para(dev); + +#ifdef ENABLE_FIBER_SUPPORT + rtl8168_hw_fiber_nic_d3_para(dev); +#endif //ENABLE_FIBER_SUPPORT + + rtl8168_powerdown_pll(dev); + + rtl8168_sleep_rx_enable(dev); + + spin_unlock_irqrestore(&tp->lock, flags); + +out: + if (HW_DASH_SUPPORT_DASH(tp)) { + spin_lock_irqsave(&tp->lock, flags); + rtl8168_driver_stop(tp); + spin_unlock_irqrestore(&tp->lock, flags); + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) + pci_save_state(pdev, &pci_pm_state); +#else + pci_save_state(pdev); +#endif + pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled); +// pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int +rtl8168_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) + u32 pci_pm_state = PCI_D0; +#endif + + pci_set_power_state(pdev, PCI_D0); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) + pci_restore_state(pdev, &pci_pm_state); +#else + pci_restore_state(pdev); +#endif + pci_enable_wake(pdev, PCI_D0, 0); + + spin_lock_irqsave(&tp->lock, flags); + + /* restore last modified mac address */ + rtl8168_rar_set(tp, dev->dev_addr); + + spin_unlock_irqrestore(&tp->lock, flags); + + if (!netif_running(dev)) { + if (HW_DASH_SUPPORT_DASH(tp)) { + spin_lock_irqsave(&tp->lock, flags); + rtl8168_driver_start(tp); + spin_unlock_irqrestore(&tp->lock, flags); + } + goto out; + } + + pci_set_master(pdev); + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_exit_oob(dev); + + rtl8168_dsm(dev, DSM_NIC_RESUME_D3); + + rtl8168_hw_init(dev); + + rtl8168_powerup_pll(dev); + + rtl8168_hw_ephy_config(dev); + + rtl8168_hw_phy_config(dev); + + rtl8168_hw_config(dev); + + spin_unlock_irqrestore(&tp->lock, flags); + + rtl8168_schedule_work(dev, rtl8168_reset_task); + + netif_device_attach(dev); + + mod_timer(&tp->esd_timer, jiffies + RTL8168_ESD_TIMEOUT); + mod_timer(&tp->link_timer, jiffies + RTL8168_LINK_TIMEOUT); +out: + return 0; +} + +#endif /* CONFIG_PM */ + +static struct pci_driver rtl8168_pci_driver = { + .name = MODULENAME, + .id_table = rtl8168_pci_tbl, + .probe = rtl8168_init_one, + .remove = __devexit_p(rtl8168_remove_one), +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11) + .shutdown = rtl8168_shutdown, +#endif +#ifdef CONFIG_PM + .suspend = rtl8168_suspend, + .resume = rtl8168_resume, +#endif +}; + +static int __init +rtl8168_init_module(void) +{ +#ifdef ENABLE_R8168_PROCFS + rtl8168_proc_module_init(); +#endif +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + return pci_register_driver(&rtl8168_pci_driver); +#else + return pci_module_init(&rtl8168_pci_driver); +#endif +} + +static void __exit +rtl8168_cleanup_module(void) +{ + pci_unregister_driver(&rtl8168_pci_driver); +#ifdef ENABLE_R8168_PROCFS + if (rtl8168_proc) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) + remove_proc_subtree(MODULENAME, init_net.proc_net); +#else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) + remove_proc_entry(MODULENAME, init_net.proc_net); +#else + remove_proc_entry(MODULENAME, proc_net); +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) + rtl8168_proc = NULL; + } +#endif +} + +module_init(rtl8168_init_module); +module_exit(rtl8168_cleanup_module); diff --git a/addons/r8168/src/3.10.108/r8168_realwow.h b/addons/r8168/src/3.10.108/r8168_realwow.h new file mode 100644 index 00000000..28e2579e --- /dev/null +++ b/addons/r8168/src/3.10.108/r8168_realwow.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#ifndef _LINUX_R8168_REALWOW_H +#define _LINUX_R8168_REALWOW_H + +#define SIOCDEVPRIVATE_RTLREALWOW SIOCDEVPRIVATE+3 + +#define MAX_RealWoW_KCP_SIZE (100) +#define MAX_RealWoW_Payload (64) + +#define KA_TX_PACKET_SIZE (100) +#define KA_WAKEUP_PATTERN_SIZE (120) + +//HwSuppKeepAliveOffloadVer +#define HW_SUPPORT_KCP_OFFLOAD(_M) ((_M)->HwSuppKCPOffloadVer > 0) + +enum rtl_realwow_cmd { + + RTL_REALWOW_SET_KCP_DISABLE=0, + RTL_REALWOW_SET_KCP_INFO, + RTL_REALWOW_SET_KCP_CONTENT, + + RTL_REALWOW_SET_KCP_ACKPKTINFO, + RTL_REALWOW_SET_KCP_WPINFO, + RTL_REALWOW_SET_KCPDHCP_TIMEOUT, + + RTLT_REALWOW_COMMAND_INVALID +}; + +struct rtl_realwow_ioctl_struct { + __u32 cmd; + __u32 offset; + __u32 len; + union { + __u32 data; + void *data_buffer; + }; +}; + +typedef struct _MP_KCPInfo { + u8 DIPv4[4]; + u8 MacID[6]; + u16 UdpPort[2]; + u8 PKTLEN[2]; + + u16 ackLostCnt; + u8 KCP_WakePattern[MAX_RealWoW_Payload]; + u8 KCP_AckPacket[MAX_RealWoW_Payload]; + u32 KCP_interval; + u8 KCP_WakePattern_Len; + u8 KCP_AckPacket_Len; + u8 KCP_TxPacket[2][KA_TX_PACKET_SIZE]; +} MP_KCP_INFO, *PMP_KCP_INFO; + +typedef struct _KCPInfo { + u32 nId; // = id + u8 DIPv4[4]; + u8 MacID[6]; + u16 UdpPort; + u16 PKTLEN; +} KCPInfo, *PKCPInfo; + +typedef struct _KCPContent { + u32 id; // = id + u32 mSec; // = msec + u32 size; // =size + u8 bPacket[MAX_RealWoW_KCP_SIZE]; // put packet here +} KCPContent, *PKCPContent; + +typedef struct _RealWoWAckPktInfo { + u16 ackLostCnt; + u16 patterntSize; + u8 pattern[MAX_RealWoW_Payload]; +} RealWoWAckPktInfo,*PRealWoWAckPktInfo; + +typedef struct _RealWoWWPInfo { + u16 patterntSize; + u8 pattern[MAX_RealWoW_Payload]; +} RealWoWWPInfo,*PRealWoWWPInfo; + +int rtl8168_realwow_ioctl(struct net_device *dev, struct ifreq *ifr); +void rtl8168_realwow_hw_init(struct net_device *dev); +void rtl8168_get_realwow_hw_version(struct net_device *dev); +void rtl8168_set_realwow_d3_para(struct net_device *dev); + +#endif /* _LINUX_R8168_REALWOW_H */ diff --git a/addons/r8168/src/3.10.108/rtl_eeprom.c b/addons/r8168/src/3.10.108/rtl_eeprom.c new file mode 100644 index 00000000..51bd3a12 --- /dev/null +++ b/addons/r8168/src/3.10.108/rtl_eeprom.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include + +#include "r8168.h" +#include "rtl_eeprom.h" + +//------------------------------------------------------------------- +//rtl8168_eeprom_type(): +// tell the eeprom type +//return value: +// 0: the eeprom type is 93C46 +// 1: the eeprom type is 93C56 or 93C66 +//------------------------------------------------------------------- +void rtl8168_eeprom_type(struct rtl8168_private *tp) +{ + u16 magic = 0; + + if (tp->mcfg == CFG_METHOD_DEFAULT) + goto out_no_eeprom; + + if(RTL_R8(tp, 0xD2)&0x04) { + //not support + //tp->eeprom_type = EEPROM_TWSI; + //tp->eeprom_len = 256; + goto out_no_eeprom; + } else if(RTL_R32(tp, RxConfig) & RxCfg_9356SEL) { + tp->eeprom_type = EEPROM_TYPE_93C56; + tp->eeprom_len = 256; + } else { + tp->eeprom_type = EEPROM_TYPE_93C46; + tp->eeprom_len = 128; + } + + magic = rtl8168_eeprom_read_sc(tp, 0); + +out_no_eeprom: + if ((magic != 0x8129) && (magic != 0x8128)) { + tp->eeprom_type = EEPROM_TYPE_NONE; + tp->eeprom_len = 0; + } +} + +void rtl8168_eeprom_cleanup(struct rtl8168_private *tp) +{ + u8 x; + + x = RTL_R8(tp, Cfg9346); + x &= ~(Cfg9346_EEDI | Cfg9346_EECS); + + RTL_W8(tp, Cfg9346, x); + + rtl8168_raise_clock(tp, &x); + rtl8168_lower_clock(tp, &x); +} + +int rtl8168_eeprom_cmd_done(struct rtl8168_private *tp) +{ + u8 x; + int i; + + rtl8168_stand_by(tp); + + for (i = 0; i < 50000; i++) { + x = RTL_R8(tp, Cfg9346); + + if (x & Cfg9346_EEDO) { + udelay(RTL_CLOCK_RATE * 2 * 3); + return 0; + } + udelay(1); + } + + return -1; +} + +//------------------------------------------------------------------- +//rtl8168_eeprom_read_sc(): +// read one word from eeprom +//------------------------------------------------------------------- +u16 rtl8168_eeprom_read_sc(struct rtl8168_private *tp, u16 reg) +{ + int addr_sz = 6; + u8 x; + u16 data; + + if(tp->eeprom_type == EEPROM_TYPE_NONE) { + return -1; + } + + if (tp->eeprom_type==EEPROM_TYPE_93C46) + addr_sz = 6; + else if (tp->eeprom_type==EEPROM_TYPE_93C56) + addr_sz = 8; + + x = Cfg9346_EEM1 | Cfg9346_EECS; + RTL_W8(tp, Cfg9346, x); + + rtl8168_shift_out_bits(tp, RTL_EEPROM_READ_OPCODE, 3); + rtl8168_shift_out_bits(tp, reg, addr_sz); + + data = rtl8168_shift_in_bits(tp); + + rtl8168_eeprom_cleanup(tp); + + RTL_W8(tp, Cfg9346, 0); + + return data; +} + +//------------------------------------------------------------------- +//rtl8168_eeprom_write_sc(): +// write one word to a specific address in the eeprom +//------------------------------------------------------------------- +void rtl8168_eeprom_write_sc(struct rtl8168_private *tp, u16 reg, u16 data) +{ + u8 x; + int addr_sz = 6; + int w_dummy_addr = 4; + + if(tp->eeprom_type == EEPROM_TYPE_NONE) { + return ; + } + + if (tp->eeprom_type==EEPROM_TYPE_93C46) { + addr_sz = 6; + w_dummy_addr = 4; + } else if (tp->eeprom_type==EEPROM_TYPE_93C56) { + addr_sz = 8; + w_dummy_addr = 6; + } + + x = Cfg9346_EEM1 | Cfg9346_EECS; + RTL_W8(tp, Cfg9346, x); + + rtl8168_shift_out_bits(tp, RTL_EEPROM_EWEN_OPCODE, 5); + rtl8168_shift_out_bits(tp, reg, w_dummy_addr); + rtl8168_stand_by(tp); + + rtl8168_shift_out_bits(tp, RTL_EEPROM_ERASE_OPCODE, 3); + rtl8168_shift_out_bits(tp, reg, addr_sz); + if (rtl8168_eeprom_cmd_done(tp) < 0) { + return; + } + rtl8168_stand_by(tp); + + rtl8168_shift_out_bits(tp, RTL_EEPROM_WRITE_OPCODE, 3); + rtl8168_shift_out_bits(tp, reg, addr_sz); + rtl8168_shift_out_bits(tp, data, 16); + if (rtl8168_eeprom_cmd_done(tp) < 0) { + return; + } + rtl8168_stand_by(tp); + + rtl8168_shift_out_bits(tp, RTL_EEPROM_EWDS_OPCODE, 5); + rtl8168_shift_out_bits(tp, reg, w_dummy_addr); + + rtl8168_eeprom_cleanup(tp); + RTL_W8(tp, Cfg9346, 0); +} + +void rtl8168_raise_clock(struct rtl8168_private *tp, u8 *x) +{ + *x = *x | Cfg9346_EESK; + RTL_W8(tp, Cfg9346, *x); + udelay(RTL_CLOCK_RATE); +} + +void rtl8168_lower_clock(struct rtl8168_private *tp, u8 *x) +{ + + *x = *x & ~Cfg9346_EESK; + RTL_W8(tp, Cfg9346, *x); + udelay(RTL_CLOCK_RATE); +} + +void rtl8168_shift_out_bits(struct rtl8168_private *tp, int data, int count) +{ + u8 x; + int mask; + + mask = 0x01 << (count - 1); + x = RTL_R8(tp, Cfg9346); + x &= ~(Cfg9346_EEDI | Cfg9346_EEDO); + + do { + if (data & mask) + x |= Cfg9346_EEDI; + else + x &= ~Cfg9346_EEDI; + + RTL_W8(tp, Cfg9346, x); + udelay(RTL_CLOCK_RATE); + rtl8168_raise_clock(tp, &x); + rtl8168_lower_clock(tp, &x); + mask = mask >> 1; + } while(mask); + + x &= ~Cfg9346_EEDI; + RTL_W8(tp, Cfg9346, x); +} + +u16 rtl8168_shift_in_bits(struct rtl8168_private *tp) +{ + u8 x; + u16 d, i; + + x = RTL_R8(tp, Cfg9346); + x &= ~(Cfg9346_EEDI | Cfg9346_EEDO); + + d = 0; + + for (i = 0; i < 16; i++) { + d = d << 1; + rtl8168_raise_clock(tp, &x); + + x = RTL_R8(tp, Cfg9346); + x &= ~Cfg9346_EEDI; + + if (x & Cfg9346_EEDO) + d |= 1; + + rtl8168_lower_clock(tp, &x); + } + + return d; +} + +void rtl8168_stand_by(struct rtl8168_private *tp) +{ + u8 x; + + x = RTL_R8(tp, Cfg9346); + x &= ~(Cfg9346_EECS | Cfg9346_EESK); + RTL_W8(tp, Cfg9346, x); + udelay(RTL_CLOCK_RATE); + + x |= Cfg9346_EECS; + RTL_W8(tp, Cfg9346, x); +} + +void rtl8168_set_eeprom_sel_low(struct rtl8168_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_EEM1); + RTL_W8(tp, Cfg9346, Cfg9346_EEM1 | Cfg9346_EESK); + + udelay(20); + + RTL_W8(tp, Cfg9346, Cfg9346_EEM1); +} diff --git a/addons/r8168/src/3.10.108/rtl_eeprom.h b/addons/r8168/src/3.10.108/rtl_eeprom.h new file mode 100644 index 00000000..861defd0 --- /dev/null +++ b/addons/r8168/src/3.10.108/rtl_eeprom.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +//EEPROM opcodes +#define RTL_EEPROM_READ_OPCODE 06 +#define RTL_EEPROM_WRITE_OPCODE 05 +#define RTL_EEPROM_ERASE_OPCODE 07 +#define RTL_EEPROM_EWEN_OPCODE 19 +#define RTL_EEPROM_EWDS_OPCODE 16 + +#define RTL_CLOCK_RATE 3 + +void rtl8168_eeprom_type(struct rtl8168_private *tp); +void rtl8168_eeprom_cleanup(struct rtl8168_private *tp); +u16 rtl8168_eeprom_read_sc(struct rtl8168_private *tp, u16 reg); +void rtl8168_eeprom_write_sc(struct rtl8168_private *tp, u16 reg, u16 data); +void rtl8168_shift_out_bits(struct rtl8168_private *tp, int data, int count); +u16 rtl8168_shift_in_bits(struct rtl8168_private *tp); +void rtl8168_raise_clock(struct rtl8168_private *tp, u8 *x); +void rtl8168_lower_clock(struct rtl8168_private *tp, u8 *x); +void rtl8168_stand_by(struct rtl8168_private *tp); +void rtl8168_set_eeprom_sel_low(struct rtl8168_private *tp); + + + diff --git a/addons/r8168/src/3.10.108/rtltool.c b/addons/r8168/src/3.10.108/rtltool.c new file mode 100644 index 00000000..2a1be083 --- /dev/null +++ b/addons/r8168/src/3.10.108/rtltool.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "r8168.h" +#include "rtl_eeprom.h" +#include "rtltool.h" + +int rtl8168_tool_ioctl(struct rtl8168_private *tp, struct ifreq *ifr) +{ + struct rtltool_cmd my_cmd; + unsigned long flags; + int ret; + + if (copy_from_user(&my_cmd, ifr->ifr_data, sizeof(my_cmd))) + return -EFAULT; + + ret = 0; + switch (my_cmd.cmd) { + case RTLTOOL_READ_MAC: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (my_cmd.len==1) + my_cmd.data = readb(tp->mmio_addr+my_cmd.offset); + else if (my_cmd.len==2) + my_cmd.data = readw(tp->mmio_addr+(my_cmd.offset&~1)); + else if (my_cmd.len==4) + my_cmd.data = readl(tp->mmio_addr+(my_cmd.offset&~3)); + else { + ret = -EOPNOTSUPP; + break; + } + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + break; + + case RTLTOOL_WRITE_MAC: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (my_cmd.len==1) + writeb(my_cmd.data, tp->mmio_addr+my_cmd.offset); + else if (my_cmd.len==2) + writew(my_cmd.data, tp->mmio_addr+(my_cmd.offset&~1)); + else if (my_cmd.len==4) + writel(my_cmd.data, tp->mmio_addr+(my_cmd.offset&~3)); + else { + ret = -EOPNOTSUPP; + break; + } + + break; + + case RTLTOOL_READ_PHY: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + my_cmd.data = rtl8168_mdio_prot_read(tp, my_cmd.offset); + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + + break; + + case RTLTOOL_WRITE_PHY: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_mdio_prot_write(tp, my_cmd.offset, my_cmd.data); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + case RTLTOOL_READ_EPHY: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + my_cmd.data = rtl8168_ephy_read(tp, my_cmd.offset); + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + + break; + + case RTLTOOL_WRITE_EPHY: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_ephy_write(tp, my_cmd.offset, my_cmd.data); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + case RTLTOOL_READ_ERI: + my_cmd.data = 0; + if (my_cmd.len==1 || my_cmd.len==2 || my_cmd.len==4) { + spin_lock_irqsave(&tp->lock, flags); + my_cmd.data = rtl8168_eri_read(tp, my_cmd.offset, my_cmd.len, ERIAR_ExGMAC); + spin_unlock_irqrestore(&tp->lock, flags); + } else { + ret = -EOPNOTSUPP; + break; + } + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + + break; + + case RTLTOOL_WRITE_ERI: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (my_cmd.len==1 || my_cmd.len==2 || my_cmd.len==4) { + spin_lock_irqsave(&tp->lock, flags); + rtl8168_eri_write(tp, my_cmd.offset, my_cmd.len, my_cmd.data, ERIAR_ExGMAC); + spin_unlock_irqrestore(&tp->lock, flags); + } else { + ret = -EOPNOTSUPP; + break; + } + break; + + case RTLTOOL_READ_PCI: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + my_cmd.data = 0; + if (my_cmd.len==1) + pci_read_config_byte(tp->pci_dev, my_cmd.offset, + (u8 *)&my_cmd.data); + else if (my_cmd.len==2) + pci_read_config_word(tp->pci_dev, my_cmd.offset, + (u16 *)&my_cmd.data); + else if (my_cmd.len==4) + pci_read_config_dword(tp->pci_dev, my_cmd.offset, + &my_cmd.data); + else { + ret = -EOPNOTSUPP; + break; + } + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + break; + + case RTLTOOL_WRITE_PCI: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (my_cmd.len==1) + pci_write_config_byte(tp->pci_dev, my_cmd.offset, + my_cmd.data); + else if (my_cmd.len==2) + pci_write_config_word(tp->pci_dev, my_cmd.offset, + my_cmd.data); + else if (my_cmd.len==4) + pci_write_config_dword(tp->pci_dev, my_cmd.offset, + my_cmd.data); + else { + ret = -EOPNOTSUPP; + break; + } + + break; + + case RTLTOOL_READ_EEPROM: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + my_cmd.data = rtl8168_eeprom_read_sc(tp, my_cmd.offset); + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + + break; + + case RTLTOOL_WRITE_EEPROM: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_eeprom_write_sc(tp, my_cmd.offset, my_cmd.data); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + case RTL_READ_OOB_MAC: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_oob_mutex_lock(tp); + my_cmd.data = rtl8168_ocp_read(tp, my_cmd.offset, 4); + rtl8168_oob_mutex_unlock(tp); + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + break; + + case RTL_WRITE_OOB_MAC: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (my_cmd.len == 0 || my_cmd.len > 4) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_oob_mutex_lock(tp); + rtl8168_ocp_write(tp, my_cmd.offset, my_cmd.len, my_cmd.data); + rtl8168_oob_mutex_unlock(tp); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + case RTL_ENABLE_PCI_DIAG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + tp->rtk_enable_diag = 1; + spin_unlock_irqrestore(&tp->lock, flags); + + dprintk("enable rtk diag\n"); + break; + + case RTL_DISABLE_PCI_DIAG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + tp->rtk_enable_diag = 0; + spin_unlock_irqrestore(&tp->lock, flags); + + dprintk("disable rtk diag\n"); + break; + + case RTL_READ_MAC_OCP: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (my_cmd.offset % 2) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + my_cmd.data = rtl8168_mac_ocp_read(tp, my_cmd.offset); + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + break; + + case RTL_WRITE_MAC_OCP: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if ((my_cmd.offset % 2) || (my_cmd.len != 2)) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_mac_ocp_write(tp, my_cmd.offset, (u16)my_cmd.data); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + case RTL_DIRECT_READ_PHY_OCP: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + my_cmd.data = rtl8168_mdio_prot_direct_read_phy_ocp(tp, my_cmd.offset); + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + + break; + + case RTL_DIRECT_WRITE_PHY_OCP: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_mdio_prot_direct_write_phy_ocp(tp, my_cmd.offset, my_cmd.data); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} diff --git a/addons/r8168/src/3.10.108/rtltool.h b/addons/r8168/src/3.10.108/rtltool.h new file mode 100644 index 00000000..d8731dff --- /dev/null +++ b/addons/r8168/src/3.10.108/rtltool.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#ifndef _LINUX_RTLTOOL_H +#define _LINUX_RTLTOOL_H + +#define SIOCRTLTOOL SIOCDEVPRIVATE+1 + +enum rtl_cmd { + RTLTOOL_READ_MAC=0, + RTLTOOL_WRITE_MAC, + RTLTOOL_READ_PHY, + RTLTOOL_WRITE_PHY, + RTLTOOL_READ_EPHY, + RTLTOOL_WRITE_EPHY, + RTLTOOL_READ_ERI, + RTLTOOL_WRITE_ERI, + RTLTOOL_READ_PCI, + RTLTOOL_WRITE_PCI, + RTLTOOL_READ_EEPROM, + RTLTOOL_WRITE_EEPROM, + + RTL_READ_OOB_MAC, + RTL_WRITE_OOB_MAC, + + RTL_ENABLE_PCI_DIAG, + RTL_DISABLE_PCI_DIAG, + + RTL_READ_MAC_OCP, + RTL_WRITE_MAC_OCP, + + RTL_DIRECT_READ_PHY_OCP, + RTL_DIRECT_WRITE_PHY_OCP, + + RTLTOOL_INVALID +}; + +struct rtltool_cmd { + __u32 cmd; + __u32 offset; + __u32 len; + __u32 data; +}; + +enum mode_access { + MODE_NONE=0, + MODE_READ, + MODE_WRITE +}; + +#ifdef __KERNEL__ +int rtl8168_tool_ioctl(struct rtl8168_private *tp, struct ifreq *ifr); +#endif + +#endif /* _LINUX_RTLTOOL_H */ diff --git a/addons/r8168/src/4.4.180/Makefile b/addons/r8168/src/4.4.180/Makefile new file mode 100644 index 00000000..74c19689 --- /dev/null +++ b/addons/r8168/src/4.4.180/Makefile @@ -0,0 +1,62 @@ +CONFIG_SOC_LAN = n +ENABLE_FIBER_SUPPORT = n +ENABLE_REALWOW_SUPPORT = n +ENABLE_DASH_SUPPORT = n +ENABLE_DASH_PRINTER_SUPPORT = n +CONFIG_DOWN_SPEED_100 = n +CONFIG_ASPM = y +ENABLE_S5WOL = y +ENABLE_S5_KEEP_CURR_MAC = n +ENABLE_EEE = y +ENABLE_S0_MAGIC_PACKET = n +CONFIG_DYNAMIC_ASPM = y +ENABLE_USE_FIRMWARE_FILE = n + +obj-m := r8168.o +r8168-objs := r8168_n.o r8168_asf.o rtl_eeprom.o rtltool.o +ifeq ($(CONFIG_SOC_LAN), y) + EXTRA_CFLAGS += -DCONFIG_SOC_LAN +endif +ifeq ($(ENABLE_FIBER_SUPPORT), y) + r8168-objs += r8168_fiber.o + EXTRA_CFLAGS += -DENABLE_FIBER_SUPPORT +endif +ifeq ($(ENABLE_REALWOW_SUPPORT), y) + r8168-objs += r8168_realwow.o + EXTRA_CFLAGS += -DENABLE_REALWOW_SUPPORT +endif +ifeq ($(ENABLE_DASH_SUPPORT), y) + r8168-objs += r8168_dash.o + EXTRA_CFLAGS += -DENABLE_DASH_SUPPORT +endif +ifeq ($(ENABLE_DASH_PRINTER_SUPPORT), y) + r8168-objs += r8168_dash.o + EXTRA_CFLAGS += -DENABLE_DASH_SUPPORT -DENABLE_DASH_PRINTER_SUPPORT +endif +EXTRA_CFLAGS += -DCONFIG_R8168_NAPI +EXTRA_CFLAGS += -DCONFIG_R8168_VLAN +ifeq ($(CONFIG_DOWN_SPEED_100), y) + EXTRA_CFLAGS += -DCONFIG_DOWN_SPEED_100 +endif +ifeq ($(CONFIG_ASPM), y) + EXTRA_CFLAGS += -DCONFIG_ASPM +endif +ifeq ($(ENABLE_S5WOL), y) + EXTRA_CFLAGS += -DENABLE_S5WOL +endif +ifeq ($(ENABLE_S5_KEEP_CURR_MAC), y) + EXTRA_CFLAGS += -DENABLE_S5_KEEP_CURR_MAC +endif +ifeq ($(ENABLE_EEE), y) + EXTRA_CFLAGS += -DENABLE_EEE +endif +ifeq ($(ENABLE_S0_MAGIC_PACKET), y) + EXTRA_CFLAGS += -DENABLE_S0_MAGIC_PACKET +endif +ifeq ($(CONFIG_DYNAMIC_ASPM), y) + EXTRA_CFLAGS += -DCONFIG_DYNAMIC_ASPM +endif +ifeq ($(ENABLE_USE_FIRMWARE_FILE), y) + r8168-objs += r8168_firmware.o + EXTRA_CFLAGS += -DENABLE_USE_FIRMWARE_FILE +endif diff --git a/addons/r8168/src/4.4.180/r8168.h b/addons/r8168/src/4.4.180/r8168.h new file mode 100644 index 00000000..4bd1047a --- /dev/null +++ b/addons/r8168/src/4.4.180/r8168.h @@ -0,0 +1,1842 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#include +#include "r8168_dash.h" +#include "r8168_realwow.h" +#include "r8168_fiber.h" + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) +typedef int netdev_tx_t; +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) +#define skb_transport_offset(skb) (skb->h.raw - skb->data) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) +#define device_set_wakeup_enable(dev, val) do {} while (0) +#endif + +#if 0 +static inline void ether_addr_copy(u8 *dst, const u8 *src) +{ + u16 *a = (u16 *)dst; + const u16 *b = (const u16 *)src; + + a[0] = b[0]; + a[1] = b[1]; + a[2] = b[2]; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) +#define IS_ERR_OR_NULL(ptr) (!ptr) +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) +#define reinit_completion(x) ((x)->done = 0) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) +#define pm_runtime_mark_last_busy(x) +#define pm_runtime_put_autosuspend(x) pm_runtime_put(x) +#define pm_runtime_put_sync_autosuspend(x) pm_runtime_put_sync(x) + +static inline bool pm_runtime_suspended(struct device *dev) +{ + return dev->power.runtime_status == RPM_SUSPENDED + && !dev->power.disable_depth; +} + +static inline bool pm_runtime_active(struct device *dev) +{ + return dev->power.runtime_status == RPM_ACTIVE + || dev->power.disable_depth; +} +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) +#define queue_delayed_work(long_wq, work, delay) schedule_delayed_work(work, delay) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) +#define netif_printk(priv, type, level, netdev, fmt, args...) \ + do { \ + if (netif_msg_##type(priv)) \ + printk(level "%s: " fmt,(netdev)->name , ##args); \ + } while (0) + +#define netif_emerg(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_EMERG, netdev, fmt, ##args) +#define netif_alert(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_ALERT, netdev, fmt, ##args) +#define netif_crit(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_CRIT, netdev, fmt, ##args) +#define netif_err(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_ERR, netdev, fmt, ##args) +#define netif_warn(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_WARNING, netdev, fmt, ##args) +#define netif_notice(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_NOTICE, netdev, fmt, ##args) +#define netif_info(priv, type, netdev, fmt, args...) \ + netif_printk(priv, type, KERN_INFO, (netdev), fmt, ##args) +#endif +#endif +#endif +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) +#define setup_timer(_timer, _function, _data) \ +do { \ + (_timer)->function = _function; \ + (_timer)->data = _data; \ + init_timer(_timer); \ +} while (0) +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) +#if defined(skb_vlan_tag_present) && !defined(vlan_tx_tag_present) +#define vlan_tx_tag_present skb_vlan_tag_present +#endif +#if defined(skb_vlan_tag_get) && !defined(vlan_tx_tag_get) +#define vlan_tx_tag_get skb_vlan_tag_get +#endif +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) + +#define RTL_ALLOC_SKB_INTR(tp, length) dev_alloc_skb(length) +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) +#undef RTL_ALLOC_SKB_INTR +#define RTL_ALLOC_SKB_INTR(tp, length) napi_alloc_skb(&tp->napi, length) +#endif +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) +#define netdev_features_t u32 +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,5,0) +#define NETIF_F_ALL_CSUM NETIF_F_CSUM_MASK +#else +#ifndef NETIF_F_ALL_CSUM +#define NETIF_F_ALL_CSUM NETIF_F_CSUM_MASK +#endif +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,37) +#define ENABLE_R8168_PROCFS +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) +#define NETIF_F_HW_VLAN_RX NETIF_F_HW_VLAN_CTAG_RX +#define NETIF_F_HW_VLAN_TX NETIF_F_HW_VLAN_CTAG_TX +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0) +#define __devinit +#define __devexit +#define __devexit_p(func) func +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) +#define CHECKSUM_PARTIAL CHECKSUM_HW +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) +#define irqreturn_t void +#define IRQ_HANDLED 1 +#define IRQ_NONE 0 +#define IRQ_RETVAL(x) +#endif + +#ifndef NETIF_F_RXALL +#define NETIF_F_RXALL 0 +#endif + +#ifndef NETIF_F_RXFCS +#define NETIF_F_RXFCS 0 +#endif + +#ifndef HAVE_FREE_NETDEV +#define free_netdev(x) kfree(x) +#endif + +#ifndef SET_NETDEV_DEV +#define SET_NETDEV_DEV(net, pdev) +#endif + +#ifndef SET_MODULE_OWNER +#define SET_MODULE_OWNER(dev) +#endif + +#ifndef SA_SHIRQ +#define SA_SHIRQ IRQF_SHARED +#endif + +#ifndef NETIF_F_GSO +#define gso_size tso_size +#define gso_segs tso_segs +#endif + +#ifndef PCI_VENDOR_ID_DLINK +#define PCI_VENDOR_ID_DLINK 0x1186 +#endif + +#ifndef dma_mapping_error +#define dma_mapping_error(a,b) 0 +#endif + +#ifndef netif_err +#define netif_err(a,b,c,d) +#endif + +#ifndef AUTONEG_DISABLE +#define AUTONEG_DISABLE 0x00 +#endif + +#ifndef AUTONEG_ENABLE +#define AUTONEG_ENABLE 0x01 +#endif + +#ifndef BMCR_SPEED1000 +#define BMCR_SPEED1000 0x0040 +#endif + +#ifndef BMCR_SPEED100 +#define BMCR_SPEED100 0x2000 +#endif + +#ifndef BMCR_SPEED10 +#define BMCR_SPEED10 0x0000 +#endif + +#ifndef SPEED_UNKNOWN +#define SPEED_UNKNOWN -1 +#endif + +#ifndef DUPLEX_UNKNOWN +#define DUPLEX_UNKNOWN 0xff +#endif + +#ifndef SUPPORTED_Pause +#define SUPPORTED_Pause (1 << 13) +#endif + +#ifndef SUPPORTED_Asym_Pause +#define SUPPORTED_Asym_Pause (1 << 14) +#endif + +#ifndef MDIO_EEE_100TX +#define MDIO_EEE_100TX 0x0002 +#endif + +#ifndef MDIO_EEE_1000T +#define MDIO_EEE_1000T 0x0004 +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) +#ifdef CONFIG_NET_POLL_CONTROLLER +#define RTL_NET_POLL_CONTROLLER dev->poll_controller=rtl8168_netpoll +#else +#define RTL_NET_POLL_CONTROLLER +#endif + +#ifdef CONFIG_R8168_VLAN +#define RTL_SET_VLAN dev->vlan_rx_register=rtl8168_vlan_rx_register +#else +#define RTL_SET_VLAN +#endif + +#define RTL_NET_DEVICE_OPS(ops) dev->open=rtl8168_open; \ + dev->hard_start_xmit=rtl8168_start_xmit; \ + dev->get_stats=rtl8168_get_stats; \ + dev->stop=rtl8168_close; \ + dev->tx_timeout=rtl8168_tx_timeout; \ + dev->set_multicast_list=rtl8168_set_rx_mode; \ + dev->change_mtu=rtl8168_change_mtu; \ + dev->set_mac_address=rtl8168_set_mac_address; \ + dev->do_ioctl=rtl8168_do_ioctl; \ + RTL_NET_POLL_CONTROLLER; \ + RTL_SET_VLAN; +#else +#define RTL_NET_DEVICE_OPS(ops) dev->netdev_ops=&ops +#endif + +#ifndef FALSE +#define FALSE 0 +#endif + +#ifndef TRUE +#define TRUE 1 +#endif + +#ifndef false +#define false 0 +#endif + +#ifndef true +#define true 1 +#endif + +//Hardware will continue interrupt 10 times after interrupt finished. +#define RTK_KEEP_INTERRUPT_COUNT (10) + +//Due to the hardware design of RTL8111B, the low 32 bit address of receive +//buffer must be 8-byte alignment. +#ifndef NET_IP_ALIGN +#define NET_IP_ALIGN 2 +#endif +#define RTK_RX_ALIGN 8 + +#ifdef CONFIG_R8168_NAPI +#define NAPI_SUFFIX "-NAPI" +#else +#define NAPI_SUFFIX "" +#endif +#ifdef ENABLE_FIBER_SUPPORT +#define FIBER_SUFFIX "-FIBER" +#else +#define FIBER_SUFFIX "" +#endif +#ifdef ENABLE_REALWOW_SUPPORT +#define REALWOW_SUFFIX "-REALWOW" +#else +#define REALWOW_SUFFIX "" +#endif +#if defined(ENABLE_DASH_PRINTER_SUPPORT) +#define DASH_SUFFIX "-PRINTER" +#elif defined(ENABLE_DASH_SUPPORT) +#define DASH_SUFFIX "-DASH" +#else +#define DASH_SUFFIX "" +#endif + +#define RTL8168_VERSION "8.049.02" NAPI_SUFFIX FIBER_SUFFIX REALWOW_SUFFIX DASH_SUFFIX +#define MODULENAME "r8168" +#define PFX MODULENAME ": " + +#define GPL_CLAIM "\ +r8168 Copyright (C) 2021 Realtek NIC software team \n \ +This program comes with ABSOLUTELY NO WARRANTY; for details, please see . \n \ +This is free software, and you are welcome to redistribute it under certain conditions; see . \n" + +#ifdef RTL8168_DEBUG +#define assert(expr) \ + if(!(expr)) { \ + printk( "Assertion failed! %s,%s,%s,line=%d\n", \ + #expr,__FILE__,__FUNCTION__,__LINE__); \ + } +#define dprintk(fmt, args...) do { printk(PFX fmt, ## args); } while (0) +#else +#define assert(expr) do {} while (0) +#define dprintk(fmt, args...) do {} while (0) +#endif /* RTL8168_DEBUG */ + +#define R8168_MSG_DEFAULT \ + (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) + +#ifdef CONFIG_R8168_NAPI +#define rtl8168_rx_hwaccel_skb vlan_hwaccel_receive_skb +#define rtl8168_rx_quota(count, quota) min(count, quota) +#else +#define rtl8168_rx_hwaccel_skb vlan_hwaccel_rx +#define rtl8168_rx_quota(count, quota) count +#endif + +/* MAC address length */ +#ifndef MAC_ADDR_LEN +#define MAC_ADDR_LEN 6 +#endif + +#ifndef MAC_PROTOCOL_LEN +#define MAC_PROTOCOL_LEN 2 +#endif + +#ifndef ETH_FCS_LEN +#define ETH_FCS_LEN 4 +#endif + +#ifndef NETIF_F_TSO6 +#define NETIF_F_TSO6 0 +#endif + +#define Reserved2_data 7 +#define RX_DMA_BURST 7 /* Maximum PCI burst, '6' is 1024 */ +#define TX_DMA_BURST_unlimited 7 +#define TX_DMA_BURST_1024 6 +#define TX_DMA_BURST_512 5 +#define TX_DMA_BURST_256 4 +#define TX_DMA_BURST_128 3 +#define TX_DMA_BURST_64 2 +#define TX_DMA_BURST_32 1 +#define TX_DMA_BURST_16 0 +#define Reserved1_data 0x3F +#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */ +#define Jumbo_Frame_1k ETH_DATA_LEN +#define Jumbo_Frame_2k (2*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_3k (3*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_4k (4*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_5k (5*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_6k (6*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_7k (7*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_8k (8*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define Jumbo_Frame_9k (9*1024 - ETH_HLEN - VLAN_HLEN - ETH_FCS_LEN) +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ +#define RxEarly_off_V1 (0x07 << 11) +#define RxEarly_off_V2 (1 << 11) +#define Rx_Single_fetch_V2 (1 << 14) + +#define R8168_REGS_SIZE (256) +#define R8168_MAC_REGS_SIZE (256) +#define R8168_PHY_REGS_SIZE (16*2) +#define R8168_EPHY_REGS_SIZE (31*2) +#define R8168_ERI_REGS_SIZE (0x100) +#define R8168_REGS_DUMP_SIZE (0x400) +#define R8168_PCI_REGS_SIZE (0x100) +#define R8168_NAPI_WEIGHT 64 + +#define RTL8168_TX_TIMEOUT (6 * HZ) +#define RTL8168_LINK_TIMEOUT (1 * HZ) +#define RTL8168_ESD_TIMEOUT (2 * HZ) + +#define NUM_TX_DESC 1024 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 1024 /* Number of Rx descriptor registers */ + +#define RX_BUF_SIZE 0x05F3 /* 0x05F3 = 1522bye + 1 */ +#define R8168_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8168_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define OCP_STD_PHY_BASE 0xa400 + +#define NODE_ADDRESS_SIZE 6 + +#define SHORT_PACKET_PADDING_BUF_SIZE 256 + +#define RTK_MAGIC_DEBUG_VALUE 0x0badbeef + +/* write/read MMIO register */ +#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg)) +#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg)) +#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg)) +#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg)) +#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg)) +#define RTL_R32(tp, reg) ((unsigned long) readl(tp->mmio_addr + (reg))) + +#ifndef DMA_64BIT_MASK +#define DMA_64BIT_MASK 0xffffffffffffffffULL +#endif + +#ifndef DMA_32BIT_MASK +#define DMA_32BIT_MASK 0x00000000ffffffffULL +#endif + +#ifndef NETDEV_TX_OK +#define NETDEV_TX_OK 0 /* driver took care of packet */ +#endif + +#ifndef NETDEV_TX_BUSY +#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/ +#endif + +#ifndef NETDEV_TX_LOCKED +#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */ +#endif + +#ifndef ADVERTISED_Pause +#define ADVERTISED_Pause (1 << 13) +#endif + +#ifndef ADVERTISED_Asym_Pause +#define ADVERTISED_Asym_Pause (1 << 14) +#endif + +#ifndef ADVERTISE_PAUSE_CAP +#define ADVERTISE_PAUSE_CAP 0x400 +#endif + +#ifndef ADVERTISE_PAUSE_ASYM +#define ADVERTISE_PAUSE_ASYM 0x800 +#endif + +#ifndef MII_CTRL1000 +#define MII_CTRL1000 0x09 +#endif + +#ifndef ADVERTISE_1000FULL +#define ADVERTISE_1000FULL 0x200 +#endif + +#ifndef ADVERTISE_1000HALF +#define ADVERTISE_1000HALF 0x100 +#endif + +#ifndef ETH_MIN_MTU +#define ETH_MIN_MTU 68 +#endif + +/*****************************************************************************/ + +//#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) +#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ + (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ + ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) +/* copied from linux kernel 2.6.20 include/linux/netdev.h */ +#define NETDEV_ALIGN 32 +#define NETDEV_ALIGN_CONST (NETDEV_ALIGN - 1) + +static inline void *netdev_priv(struct net_device *dev) +{ + return (char *)dev + ((sizeof(struct net_device) + + NETDEV_ALIGN_CONST) + & ~NETDEV_ALIGN_CONST); +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) + +/*****************************************************************************/ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) +#define RTLDEV tp +#else +#define RTLDEV dev +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) +/*****************************************************************************/ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) +typedef struct net_device *napi_ptr; +typedef int *napi_budget; + +#define napi dev +#define RTL_NAPI_CONFIG(ndev, priv, function, weig) ndev->poll=function; \ + ndev->weight=weig; +#define RTL_NAPI_QUOTA(budget, ndev) min(*budget, ndev->quota) +#define RTL_GET_PRIV(stuct_ptr, priv_struct) netdev_priv(stuct_ptr) +#define RTL_GET_NETDEV(priv_ptr) +#define RTL_RX_QUOTA(budget) *budget +#define RTL_NAPI_QUOTA_UPDATE(ndev, work_done, budget) *budget -= work_done; \ + ndev->quota -= work_done; +#define RTL_NETIF_RX_COMPLETE(dev, napi, work_done) netif_rx_complete(dev) +#define RTL_NETIF_RX_SCHEDULE_PREP(dev, napi) netif_rx_schedule_prep(dev) +#define __RTL_NETIF_RX_SCHEDULE(dev, napi) __netif_rx_schedule(dev) +#define RTL_NAPI_RETURN_VALUE work_done >= work_to_do +#define RTL_NAPI_ENABLE(dev, napi) netif_poll_enable(dev) +#define RTL_NAPI_DISABLE(dev, napi) netif_poll_disable(dev) +#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) +#else +typedef struct napi_struct *napi_ptr; +typedef int napi_budget; + +#define RTL_NAPI_CONFIG(ndev, priv, function, weight) netif_napi_add(ndev, &priv->napi, function, weight) +#define RTL_NAPI_QUOTA(budget, ndev) min(budget, budget) +#define RTL_GET_PRIV(stuct_ptr, priv_struct) container_of(stuct_ptr, priv_struct, stuct_ptr) +#define RTL_GET_NETDEV(priv_ptr) struct net_device *dev = priv_ptr->dev; +#define RTL_RX_QUOTA(budget) budget +#define RTL_NAPI_QUOTA_UPDATE(ndev, work_done, budget) +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) +#define RTL_NETIF_RX_COMPLETE(dev, napi, work_done) netif_rx_complete(dev, napi) +#define RTL_NETIF_RX_SCHEDULE_PREP(dev, napi) netif_rx_schedule_prep(dev, napi) +#define __RTL_NETIF_RX_SCHEDULE(dev, napi) __netif_rx_schedule(dev, napi) +#endif +#if LINUX_VERSION_CODE == KERNEL_VERSION(2,6,29) +#define RTL_NETIF_RX_COMPLETE(dev, napi, work_done) netif_rx_complete(napi) +#define RTL_NETIF_RX_SCHEDULE_PREP(dev, napi) netif_rx_schedule_prep(napi) +#define __RTL_NETIF_RX_SCHEDULE(dev, napi) __netif_rx_schedule(napi) +#endif +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,29) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) +#define RTL_NETIF_RX_COMPLETE(dev, napi, work_done) napi_complete_done(napi, work_done) +#else +#define RTL_NETIF_RX_COMPLETE(dev, napi, work_done) napi_complete(napi) +#endif +#define RTL_NETIF_RX_SCHEDULE_PREP(dev, napi) napi_schedule_prep(napi) +#define __RTL_NETIF_RX_SCHEDULE(dev, napi) __napi_schedule(napi) +#endif +#define RTL_NAPI_RETURN_VALUE work_done +#define RTL_NAPI_ENABLE(dev, napi) napi_enable(napi) +#define RTL_NAPI_DISABLE(dev, napi) napi_disable(napi) +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) +#define RTL_NAPI_DEL(priv) +#else +#define RTL_NAPI_DEL(priv) netif_napi_del(&priv->napi) +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) + +/*****************************************************************************/ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) +#ifdef __CHECKER__ +#define __iomem __attribute__((noderef, address_space(2))) +extern void __chk_io_ptr(void __iomem *); +#define __bitwise __attribute__((bitwise)) +#else +#define __iomem +#define __chk_io_ptr(x) (void)0 +#define __bitwise +#endif +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) + +/*****************************************************************************/ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) +#ifdef __CHECKER__ +#define __force __attribute__((force)) +#else +#define __force +#endif +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) + +#ifndef module_param +#define module_param(v,t,p) MODULE_PARM(v, "i"); +#endif + +#ifndef PCI_DEVICE +#define PCI_DEVICE(vend,dev) \ + .vendor = (vend), .device = (dev), \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID +#endif + +/*****************************************************************************/ +/* 2.5.28 => 2.4.23 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) + +static inline void _kc_synchronize_irq(void) +{ + synchronize_irq(); +} +#undef synchronize_irq +#define synchronize_irq(X) _kc_synchronize_irq() + +#include +#define work_struct tq_struct +#undef INIT_WORK +#define INIT_WORK(a,b,c) INIT_TQUEUE(a,(void (*)(void *))b,c) +#undef container_of +#define container_of list_entry +#define schedule_work schedule_task +#define flush_scheduled_work flush_scheduled_tasks +#endif /* 2.5.28 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) +#define MODULE_VERSION(_version) MODULE_INFO(version, _version) +#endif /* 2.6.4 => 2.6.0 */ +/*****************************************************************************/ +/* 2.6.0 => 2.5.28 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) +#define MODULE_INFO(version, _version) +#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT +#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 +#endif + +#define pci_set_consistent_dma_mask(dev,mask) 1 + +#undef dev_put +#define dev_put(dev) __dev_put(dev) + +#ifndef skb_fill_page_desc +#define skb_fill_page_desc _kc_skb_fill_page_desc +extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); +#endif + +#ifndef pci_dma_mapping_error +#define pci_dma_mapping_error _kc_pci_dma_mapping_error +static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) +{ + return dma_addr == 0; +} +#endif + +#undef ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +#endif /* 2.6.0 => 2.5.28 */ + +/*****************************************************************************/ +/* 2.4.22 => 2.4.17 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) +#define pci_name(x) ((x)->slot_name) +#endif /* 2.4.22 => 2.4.17 */ + +/*****************************************************************************/ +/* 2.6.5 => 2.6.0 */ +#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) +#define pci_dma_sync_single_for_cpu pci_dma_sync_single +#define pci_dma_sync_single_for_device pci_dma_sync_single_for_cpu +#endif /* 2.6.5 => 2.6.0 */ + +/*****************************************************************************/ + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) +/* + * initialize a work-struct's func and data pointers: + */ +#define PREPARE_WORK(_work, _func, _data) \ + do { \ + (_work)->func = _func; \ + (_work)->data = _data; \ + } while (0) + +#endif +/*****************************************************************************/ +/* 2.6.4 => 2.6.0 */ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) && \ + LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22)) || \ + (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ + LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4))) +#define ETHTOOL_OPS_COMPAT +#endif /* 2.6.4 => 2.6.0 */ + +/*****************************************************************************/ +/* Installations with ethtool version without eeprom, adapter id, or statistics + * support */ + +#ifndef ETH_GSTRING_LEN +#define ETH_GSTRING_LEN 32 +#endif + +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x1d +#undef ethtool_drvinfo +#define ethtool_drvinfo k_ethtool_drvinfo +struct k_ethtool_drvinfo { + u32 cmd; + char driver[32]; + char version[32]; + char fw_version[32]; + char bus_info[32]; + char reserved1[32]; + char reserved2[16]; + u32 n_stats; + u32 testinfo_len; + u32 eedump_len; + u32 regdump_len; +}; + +struct ethtool_stats { + u32 cmd; + u32 n_stats; + u64 data[0]; +}; +#endif /* ETHTOOL_GSTATS */ + +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x1c +#endif /* ETHTOOL_PHYS_ID */ + +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x1b +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; +struct ethtool_gstrings { + u32 cmd; /* ETHTOOL_GSTRINGS */ + u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ + u32 len; /* number of strings in the string set */ + u8 data[0]; +}; +#endif /* ETHTOOL_GSTRINGS */ + +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x1a +enum ethtool_test_flags { + ETH_TEST_FL_OFFLINE = (1 << 0), + ETH_TEST_FL_FAILED = (1 << 1), +}; +struct ethtool_test { + u32 cmd; + u32 flags; + u32 reserved; + u32 len; + u64 data[0]; +}; +#endif /* ETHTOOL_TEST */ + +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0xb +#undef ETHTOOL_GREGS +struct ethtool_eeprom { + u32 cmd; + u32 magic; + u32 offset; + u32 len; + u8 data[0]; +}; + +struct ethtool_value { + u32 cmd; + u32 data; +}; +#endif /* ETHTOOL_GEEPROM */ + +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0xa +#endif /* ETHTOOL_GLINK */ + +#ifndef ETHTOOL_GREGS +#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ +#define ethtool_regs _kc_ethtool_regs +/* for passing big chunks of data */ +struct _kc_ethtool_regs { + u32 cmd; + u32 version; /* driver-specific, indicates different chips/revs */ + u32 len; /* bytes */ + u8 data[0]; +}; +#endif /* ETHTOOL_GREGS */ + +#ifndef ETHTOOL_GMSGLVL +#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ +#endif +#ifndef ETHTOOL_SMSGLVL +#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ +#endif +#ifndef ETHTOOL_NWAY_RST +#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ +#endif +#ifndef ETHTOOL_GLINK +#define ETHTOOL_GLINK 0x0000000a /* Get link status */ +#endif +#ifndef ETHTOOL_GEEPROM +#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ +#endif +#ifndef ETHTOOL_SEEPROM +#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ +#endif +#ifndef ETHTOOL_GCOALESCE +#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ +/* for configuring coalescing parameters of chip */ +#define ethtool_coalesce _kc_ethtool_coalesce +struct _kc_ethtool_coalesce { + u32 cmd; /* ETHTOOL_{G,S}COALESCE */ + + /* How many usecs to delay an RX interrupt after + * a packet arrives. If 0, only rx_max_coalesced_frames + * is used. + */ + u32 rx_coalesce_usecs; + + /* How many packets to delay an RX interrupt after + * a packet arrives. If 0, only rx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause RX interrupts to never be + * generated. + */ + u32 rx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 rx_coalesce_usecs_irq; + u32 rx_max_coalesced_frames_irq; + + /* How many usecs to delay a TX interrupt after + * a packet is sent. If 0, only tx_max_coalesced_frames + * is used. + */ + u32 tx_coalesce_usecs; + + /* How many packets to delay a TX interrupt after + * a packet is sent. If 0, only tx_coalesce_usecs is + * used. It is illegal to set both usecs and max frames + * to zero as this would cause TX interrupts to never be + * generated. + */ + u32 tx_max_coalesced_frames; + + /* Same as above two parameters, except that these values + * apply while an IRQ is being serviced by the host. Not + * all cards support this feature and the values are ignored + * in that case. + */ + u32 tx_coalesce_usecs_irq; + u32 tx_max_coalesced_frames_irq; + + /* How many usecs to delay in-memory statistics + * block updates. Some drivers do not have an in-memory + * statistic block, and in such cases this value is ignored. + * This value must not be zero. + */ + u32 stats_block_coalesce_usecs; + + /* Adaptive RX/TX coalescing is an algorithm implemented by + * some drivers to improve latency under low packet rates and + * improve throughput under high packet rates. Some drivers + * only implement one of RX or TX adaptive coalescing. Anything + * not implemented by the driver causes these values to be + * silently ignored. + */ + u32 use_adaptive_rx_coalesce; + u32 use_adaptive_tx_coalesce; + + /* When the packet rate (measured in packets per second) + * is below pkt_rate_low, the {rx,tx}_*_low parameters are + * used. + */ + u32 pkt_rate_low; + u32 rx_coalesce_usecs_low; + u32 rx_max_coalesced_frames_low; + u32 tx_coalesce_usecs_low; + u32 tx_max_coalesced_frames_low; + + /* When the packet rate is below pkt_rate_high but above + * pkt_rate_low (both measured in packets per second) the + * normal {rx,tx}_* coalescing parameters are used. + */ + + /* When the packet rate is (measured in packets per second) + * is above pkt_rate_high, the {rx,tx}_*_high parameters are + * used. + */ + u32 pkt_rate_high; + u32 rx_coalesce_usecs_high; + u32 rx_max_coalesced_frames_high; + u32 tx_coalesce_usecs_high; + u32 tx_max_coalesced_frames_high; + + /* How often to do adaptive coalescing packet rate sampling, + * measured in seconds. Must not be zero. + */ + u32 rate_sample_interval; +}; +#endif /* ETHTOOL_GCOALESCE */ + +#ifndef ETHTOOL_SCOALESCE +#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ +#endif +#ifndef ETHTOOL_GRINGPARAM +#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ +/* for configuring RX/TX ring parameters */ +#define ethtool_ringparam _kc_ethtool_ringparam +struct _kc_ethtool_ringparam { + u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ + + /* Read only attributes. These indicate the maximum number + * of pending RX/TX ring entries the driver will allow the + * user to set. + */ + u32 rx_max_pending; + u32 rx_mini_max_pending; + u32 rx_jumbo_max_pending; + u32 tx_max_pending; + + /* Values changeable by the user. The valid values are + * in the range 1 to the "*_max_pending" counterpart above. + */ + u32 rx_pending; + u32 rx_mini_pending; + u32 rx_jumbo_pending; + u32 tx_pending; +}; +#endif /* ETHTOOL_GRINGPARAM */ + +#ifndef ETHTOOL_SRINGPARAM +#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ +#endif +#ifndef ETHTOOL_GPAUSEPARAM +#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ +/* for configuring link flow control parameters */ +#define ethtool_pauseparam _kc_ethtool_pauseparam +struct _kc_ethtool_pauseparam { + u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ + + /* If the link is being auto-negotiated (via ethtool_cmd.autoneg + * being true) the user may set 'autonet' here non-zero to have the + * pause parameters be auto-negotiated too. In such a case, the + * {rx,tx}_pause values below determine what capabilities are + * advertised. + * + * If 'autoneg' is zero or the link is not being auto-negotiated, + * then {rx,tx}_pause force the driver to use/not-use pause + * flow control. + */ + u32 autoneg; + u32 rx_pause; + u32 tx_pause; +}; +#endif /* ETHTOOL_GPAUSEPARAM */ + +#ifndef ETHTOOL_SPAUSEPARAM +#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ +#endif +#ifndef ETHTOOL_GRXCSUM +#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_SRXCSUM +#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GTXCSUM +#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STXCSUM +#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_GSG +#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable +* (ethtool_value) */ +#endif +#ifndef ETHTOOL_SSG +#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable +* (ethtool_value). */ +#endif +#ifndef ETHTOOL_TEST +#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ +#endif +#ifndef ETHTOOL_GSTRINGS +#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ +#endif +#ifndef ETHTOOL_PHYS_ID +#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ +#endif +#ifndef ETHTOOL_GSTATS +#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ +#endif +#ifndef ETHTOOL_GTSO +#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ +#endif +#ifndef ETHTOOL_STSO +#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ +#endif + +#ifndef ETHTOOL_BUSINFO_LEN +#define ETHTOOL_BUSINFO_LEN 32 +#endif + +/*****************************************************************************/ + +enum RTL8168_DSM_STATE { + DSM_MAC_INIT = 1, + DSM_NIC_GOTO_D3 = 2, + DSM_IF_DOWN = 3, + DSM_NIC_RESUME_D3 = 4, + DSM_IF_UP = 5, +}; + +enum RTL8168_registers { + MAC0 = 0x00, /* Ethernet hardware address. */ + MAC4 = 0x04, + MAR0 = 0x08, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + CustomLED = 0x18, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3C, + IntrStatus = 0x3E, + TxConfig = 0x40, + RxConfig = 0x44, + TCTR = 0x48, + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + TDFNR = 0x57, + TimeInt0 = 0x58, + TimeInt1 = 0x5C, + PHYAR = 0x60, + CSIDR = 0x64, + CSIAR = 0x68, + PHYstatus = 0x6C, + MACDBG = 0x6D, + GPIO = 0x6E, + PMCH = 0x6F, + ERIDR = 0x70, + ERIAR = 0x74, + EPHY_RXER_NUM = 0x7C, + EPHYAR = 0x80, + TimeInt2 = 0x8C, + OCPDR = 0xB0, + MACOCP = 0xB0, + OCPAR = 0xB4, + SecMAC0 = 0xB4, + SecMAC4 = 0xB8, + PHYOCP = 0xB8, + DBG_reg = 0xD1, + TwiCmdReg = 0xD2, + MCUCmd_reg = 0xD3, + RxMaxSize = 0xDA, + EFUSEAR = 0xDC, + CPlusCmd = 0xE0, + IntrMitigate = 0xE2, + RxDescAddrLow = 0xE4, + RxDescAddrHigh = 0xE8, + MTPS = 0xEC, + FuncEvent = 0xF0, + PPSW = 0xF2, + FuncEventMask = 0xF4, + TimeInt3 = 0xF4, + FuncPresetState = 0xF8, + CMAC_IBCR0 = 0xF8, + CMAC_IBCR2 = 0xF9, + CMAC_IBIMR0 = 0xFA, + CMAC_IBISR0 = 0xFB, + FuncForceEvent = 0xFC, +}; + +enum RTL8168_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxDescUnavail = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xC0, + Cfg9346_EEDO = (1 << 0), + Cfg9346_EEDI = (1 << 1), + Cfg9346_EESK = (1 << 2), + Cfg9346_EECS = (1 << 3), + Cfg9346_EEM0 = (1 << 6), + Cfg9346_EEM1 = (1 << 7), + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, + + /* Transmit Priority Polling*/ + HPQ = 0x80, + NPQ = 0x40, + FSWInt = 0x01, + + /* RxConfigBits */ + Reserved2_shift = 13, + RxCfgDMAShift = 8, + RxCfg_128_int_en = (1 << 15), + RxCfg_fet_multi_en = (1 << 14), + RxCfg_half_refetch = (1 << 13), + RxCfg_9356SEL = (1 << 6), + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + TxMACLoopBack = (1 << 17), /* MAC loopback */ + + /* Config1 register */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + PMSTS_En = (1 << 5), + + /* Config3 register */ + Isolate_en = (1 << 12), /* Isolate enable */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* This bit is reserved in RTL8168B.*/ + /* Wake up when the cable connection is re-established */ + ECRCEN = (1 << 3), /* This bit is reserved in RTL8168B*/ + Jumbo_En0 = (1 << 2), /* This bit is reserved in RTL8168B*/ + RDY_TO_L23 = (1 << 1), /* This bit is reserved in RTL8168B*/ + Beacon_en = (1 << 0), /* This bit is reserved in RTL8168B*/ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* This bit is reserved in RTL8168B*/ + + /* Config5 register */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* CPlusCmd */ + EnableBist = (1 << 15), + Macdbgo_oe = (1 << 14), + Normal_mode = (1 << 13), + Force_halfdup = (1 << 12), + Force_rxflow_en = (1 << 11), + Force_txflow_en = (1 << 10), + Cxpl_dbg_sel = (1 << 9),//This bit is reserved in RTL8168B + ASF = (1 << 8),//This bit is reserved in RTL8168C + PktCntrDisable = (1 << 7), + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + Macdbgo_sel = 0x001C, + INTT_0 = 0x0000, + INTT_1 = 0x0001, + INTT_2 = 0x0002, + INTT_3 = 0x0003, + + /* rtl8168_PHYstatus */ + PowerSaveStatus = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* DBG_reg */ + Fix_Nak_1 = (1 << 4), + Fix_Nak_2 = (1 << 3), + DBGPIN_E2 = (1 << 0), + + /* ResetCounterCommand */ + CounterReset = 0x1, + /* DumpCounterCommand */ + CounterDump = 0x8, + + /* PHY access */ + PHYAR_Flag = 0x80000000, + PHYAR_Write = 0x80000000, + PHYAR_Read = 0x00000000, + PHYAR_Reg_Mask = 0x1f, + PHYAR_Reg_shift = 16, + PHYAR_Data_Mask = 0xffff, + + /* EPHY access */ + EPHYAR_Flag = 0x80000000, + EPHYAR_Write = 0x80000000, + EPHYAR_Read = 0x00000000, + EPHYAR_Reg_Mask = 0x3f, + EPHYAR_Reg_shift = 16, + EPHYAR_Data_Mask = 0xffff, + + /* CSI access */ + CSIAR_Flag = 0x80000000, + CSIAR_Write = 0x80000000, + CSIAR_Read = 0x00000000, + CSIAR_ByteEn = 0x0f, + CSIAR_ByteEn_shift = 12, + CSIAR_Addr_Mask = 0x0fff, + + /* ERI access */ + ERIAR_Flag = 0x80000000, + ERIAR_Write = 0x80000000, + ERIAR_Read = 0x00000000, + ERIAR_Addr_Align = 4, /* ERI access register address must be 4 byte alignment */ + ERIAR_ExGMAC = 0, + ERIAR_MSIX = 1, + ERIAR_ASF = 2, + ERIAR_OOB = 2, + ERIAR_Type_shift = 16, + ERIAR_ByteEn = 0x0f, + ERIAR_ByteEn_shift = 12, + + /* OCP GPHY access */ + OCPDR_Write = 0x80000000, + OCPDR_Read = 0x00000000, + OCPDR_Reg_Mask = 0xFF, + OCPDR_Data_Mask = 0xFFFF, + OCPDR_GPHY_Reg_shift = 16, + OCPAR_Flag = 0x80000000, + OCPAR_GPHY_Write = 0x8000F060, + OCPAR_GPHY_Read = 0x0000F060, + OCPR_Write = 0x80000000, + OCPR_Read = 0x00000000, + OCPR_Addr_Reg_shift = 16, + OCPR_Flag = 0x80000000, + OCP_STD_PHY_BASE_PAGE = 0x0A40, + + /* MCU Command */ + Now_is_oob = (1 << 7), + Txfifo_empty = (1 << 5), + Rxfifo_empty = (1 << 4), + + /* E-FUSE access */ + EFUSE_WRITE = 0x80000000, + EFUSE_WRITE_OK = 0x00000000, + EFUSE_READ = 0x00000000, + EFUSE_READ_OK = 0x80000000, + EFUSE_WRITE_V3 = 0x40000000, + EFUSE_WRITE_OK_V3 = 0x00000000, + EFUSE_READ_V3 = 0x80000000, + EFUSE_READ_OK_V3 = 0x00000000, + EFUSE_Reg_Mask = 0x03FF, + EFUSE_Reg_Shift = 8, + EFUSE_Check_Cnt = 300, + EFUSE_READ_FAIL = 0xFF, + EFUSE_Data_Mask = 0x000000FF, + + /* GPIO */ + GPIO_en = (1 << 0), + +}; + +enum _DescStatusBit { + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ + + /* Tx private */ + /*------ offset 0 of tx descriptor ------*/ + LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */ + GiantSendv4 = (1 << 26), /* TCP Giant Send Offload V4 (GSOv4) */ + GiantSendv6 = (1 << 25), /* TCP Giant Send Offload V6 (GSOv6) */ + LargeSend_DP = (1 << 16), /* TCP Large Send Offload (TSO) */ + MSSShift = 16, /* MSS value position */ + MSSMask = 0x7FFU, /* MSS value 11 bits */ + TxIPCS = (1 << 18), /* Calculate IP checksum */ + TxUDPCS = (1 << 17), /* Calculate UDP/IP checksum */ + TxTCPCS = (1 << 16), /* Calculate TCP/IP checksum */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ + + /*@@@@@@ offset 4 of tx descriptor => bits for RTL8168C/CP only begin @@@@@@*/ + TxUDPCS_C = (1 << 31), /* Calculate UDP/IP checksum */ + TxTCPCS_C = (1 << 30), /* Calculate TCP/IP checksum */ + TxIPCS_C = (1 << 29), /* Calculate IP checksum */ + TxIPV6F_C = (1 << 28), /* Indicate it is an IPv6 packet */ + /*@@@@@@ offset 4 of tx descriptor => bits for RTL8168C/CP only end @@@@@@*/ + + + /* Rx private */ + /*------ offset 0 of rx descriptor ------*/ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 2/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + RxIPF = (1 << 16), /* IP checksum failed */ + RxUDPF = (1 << 15), /* UDP/IP checksum failed */ + RxTCPF = (1 << 14), /* TCP/IP checksum failed */ + RxVlanTag = (1 << 16), /* VLAN tag available */ + + /*@@@@@@ offset 0 of rx descriptor => bits for RTL8168C/CP only begin @@@@@@*/ + RxUDPT = (1 << 18), + RxTCPT = (1 << 17), + /*@@@@@@ offset 0 of rx descriptor => bits for RTL8168C/CP only end @@@@@@*/ + + /*@@@@@@ offset 4 of rx descriptor => bits for RTL8168C/CP only begin @@@@@@*/ + RxV6F = (1 << 31), + RxV4F = (1 << 30), + /*@@@@@@ offset 4 of rx descriptor => bits for RTL8168C/CP only end @@@@@@*/ +}; + +enum features { +// RTL_FEATURE_WOL = (1 << 0), + RTL_FEATURE_MSI = (1 << 1), +}; + +enum wol_capability { + WOL_DISABLED = 0, + WOL_ENABLED = 1 +}; + +enum bits { + BIT_0 = (1 << 0), + BIT_1 = (1 << 1), + BIT_2 = (1 << 2), + BIT_3 = (1 << 3), + BIT_4 = (1 << 4), + BIT_5 = (1 << 5), + BIT_6 = (1 << 6), + BIT_7 = (1 << 7), + BIT_8 = (1 << 8), + BIT_9 = (1 << 9), + BIT_10 = (1 << 10), + BIT_11 = (1 << 11), + BIT_12 = (1 << 12), + BIT_13 = (1 << 13), + BIT_14 = (1 << 14), + BIT_15 = (1 << 15), + BIT_16 = (1 << 16), + BIT_17 = (1 << 17), + BIT_18 = (1 << 18), + BIT_19 = (1 << 19), + BIT_20 = (1 << 20), + BIT_21 = (1 << 21), + BIT_22 = (1 << 22), + BIT_23 = (1 << 23), + BIT_24 = (1 << 24), + BIT_25 = (1 << 25), + BIT_26 = (1 << 26), + BIT_27 = (1 << 27), + BIT_28 = (1 << 28), + BIT_29 = (1 << 29), + BIT_30 = (1 << 30), + BIT_31 = (1 << 31) +}; + +enum effuse { + EFUSE_NOT_SUPPORT = 0, + EFUSE_SUPPORT_V1, + EFUSE_SUPPORT_V2, + EFUSE_SUPPORT_V3, +}; +#define RsvdMask 0x3fffc000 + +struct TxDesc { + u32 opts1; + u32 opts2; + u64 addr; +}; + +struct RxDesc { + u32 opts1; + u32 opts2; + u64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; + u8 __pad[sizeof(void *) - sizeof(u32)]; +}; + +struct pci_resource { + u8 cmd; + u8 cls; + u16 io_base_h; + u16 io_base_l; + u16 mem_base_h; + u16 mem_base_l; + u8 ilr; + u16 resv_0x1c_h; + u16 resv_0x1c_l; + u16 resv_0x20_h; + u16 resv_0x20_l; + u16 resv_0x24_h; + u16 resv_0x24_l; + u16 resv_0x2c_h; + u16 resv_0x2c_l; + u32 pci_sn_l; + u32 pci_sn_h; +}; + +struct rtl8168_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; /* Index of PCI device */ + struct net_device *dev; +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) + struct napi_struct napi; +#endif +#endif + struct net_device_stats stats; /* statistics of net device */ + spinlock_t lock; /* spin lock flag */ + u32 msg_enable; + u32 tx_tcp_csum_cmd; + u32 tx_udp_csum_cmd; + u32 tx_ip_csum_cmd; + u32 tx_ipv6_csum_cmd; + int max_jumbo_frame_size; + int chipset; + u32 mcfg; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_rx; + u32 dirty_tx; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + struct sk_buff *Rx_skbuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + unsigned rx_buf_sz; + struct timer_list esd_timer; + struct timer_list link_timer; + struct pci_resource pci_cfg_space; + unsigned int esd_flag; + unsigned int pci_cfg_is_read; + unsigned int rtl8168_rx_config; + u16 cp_cmd; + u16 intr_mask; + u16 timer_intr_mask; + int phy_auto_nego_reg; + int phy_1000_ctrl_reg; + u8 org_mac_addr[NODE_ADDRESS_SIZE]; + struct rtl8168_counters *tally_vaddr; + dma_addr_t tally_paddr; + +#ifdef CONFIG_R8168_VLAN + struct vlan_group *vlgrp; +#endif + u8 wol_enabled; + u32 wol_opts; + u8 efuse_ver; + u8 eeprom_type; + u8 autoneg; + u8 duplex; + u32 speed; + u32 advertising; + u16 eeprom_len; + u16 cur_page; + u32 bios_setting; + + int (*set_speed)(struct net_device *, u8 autoneg, u32 speed, u8 duplex, u32 adv); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + void (*get_settings)(struct net_device *, struct ethtool_cmd *); +#else + void (*get_settings)(struct net_device *, struct ethtool_link_ksettings *); +#endif + void (*phy_reset_enable)(struct net_device *); + unsigned int (*phy_reset_pending)(struct net_device *); + unsigned int (*link_ok)(struct net_device *); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + struct work_struct task; +#else + struct delayed_work task; +#endif + unsigned features; + + u8 org_pci_offset_99; + u8 org_pci_offset_180; + u8 issue_offset_99_event; + + u8 org_pci_offset_80; + u8 org_pci_offset_81; + u8 use_timer_interrrupt; + + u32 keep_intr_cnt; + + u8 HwIcVerUnknown; + u8 NotWrRamCodeToMicroP; + u8 NotWrMcuPatchCode; + u8 HwHasWrRamCodeToMicroP; + + u16 sw_ram_code_ver; + u16 hw_ram_code_ver; + + u8 rtk_enable_diag; + + u8 ShortPacketSwChecksum; + + u8 UseSwPaddingShortPkt; + + u8 RequireAdcBiasPatch; + u16 AdcBiasPatchIoffset; + + u8 RequireAdjustUpsTxLinkPulseTiming; + u16 SwrCnt1msIni; + + u8 HwSuppNowIsOobVer; + + u8 RequiredSecLanDonglePatch; + + u32 HwFiberModeVer; + u32 HwFiberStat; + u8 HwSwitchMdiToFiber; + + u8 HwSuppSerDesPhyVer; + + u8 HwSuppPhyOcpVer; + + u8 HwSuppAspmClkIntrLock; + + u16 NicCustLedValue; + + u8 HwSuppUpsVer; + + u8 HwSuppMagicPktVer; + + u8 HwSuppCheckPhyDisableModeVer; + + u8 random_mac; + + u16 phy_reg_aner; + u16 phy_reg_anlpar; + u16 phy_reg_gbsr; + + u32 HwPcieSNOffset; + + u8 HwSuppEsdVer; + u8 TestPhyOcpReg; + u16 BackupPhyFuseDout_15_0; + u16 BackupPhyFuseDout_47_32; + u16 BackupPhyFuseDout_63_48; + + const char *fw_name; + struct rtl8168_fw *rtl_fw; + u32 ocp_base; + + //Dash+++++++++++++++++ + u8 HwSuppDashVer; + u8 DASH; + u8 dash_printer_enabled; + u8 HwPkgDet; + void __iomem *mapped_cmac_ioaddr; /* mapped cmac memory map physical address */ + void __iomem *cmac_ioaddr; /* cmac memory map physical address */ + +#ifdef ENABLE_DASH_SUPPORT + u16 AfterRecvFromFwBufLen; + u8 AfterRecvFromFwBuf[RECV_FROM_FW_BUF_SIZE]; + u16 AfterSendToFwBufLen; + u8 AfterSendToFwBuf[SEND_TO_FW_BUF_SIZE]; + u16 SendToFwBufferLen; + u32 SizeOfSendToFwBuffer ; + u32 SizeOfSendToFwBufferMemAlloc ; + u32 NumOfSendToFwBuffer ; + + u8 OobReq; + u8 OobAck; + u32 OobReqComplete; + u32 OobAckComplete; + + u8 RcvFwReqSysOkEvt; + u8 RcvFwDashOkEvt; + u8 SendFwHostOkEvt; + + u8 DashFwDisableRx; + + void *SendToFwBuffer ; + dma_addr_t SendToFwBufferPhy ; + u8 SendingToFw; + PTX_DASH_SEND_FW_DESC TxDashSendFwDesc; + dma_addr_t TxDashSendFwDescPhy; + u32 SizeOfTxDashSendFwDescMemAlloc; + u32 SizeOfTxDashSendFwDesc ; + u32 NumTxDashSendFwDesc ; + u32 CurrNumTxDashSendFwDesc ; + u32 LastSendNumTxDashSendFwDesc ; + + u32 NumRecvFromFwBuffer ; + u32 SizeOfRecvFromFwBuffer ; + u32 SizeOfRecvFromFwBufferMemAlloc ; + void *RecvFromFwBuffer ; + dma_addr_t RecvFromFwBufferPhy ; + + PRX_DASH_FROM_FW_DESC RxDashRecvFwDesc; + dma_addr_t RxDashRecvFwDescPhy; + u32 SizeOfRxDashRecvFwDescMemAlloc; + u32 SizeOfRxDashRecvFwDesc ; + u32 NumRxDashRecvFwDesc ; + u32 CurrNumRxDashRecvFwDesc ; + u8 DashReqRegValue; + u16 HostReqValue; + + u32 CmacResetIsrCounter; + u8 CmacResetIntr ; + u8 CmacResetting ; + u8 CmacOobIssueCmacReset ; + u32 CmacResetbyFwCnt; + +#if defined(ENABLE_DASH_PRINTER_SUPPORT) + struct completion fw_ack; + struct completion fw_req; + struct completion fw_host_ok; +#endif + //Dash----------------- +#endif //ENABLE_DASH_SUPPORT + + //Realwow++++++++++++++ + u8 HwSuppKCPOffloadVer; + + u8 EnableDhcpTimeoutWake; + u8 EnableTeredoOffload; + u8 EnableKCPOffload; +#ifdef ENABLE_REALWOW_SUPPORT + u32 DhcpTimeout; + MP_KCP_INFO MpKCPInfo; + //Realwow-------------- +#endif //ENABLE_REALWOW_SUPPORT + + u32 eee_adv_t; + u8 eee_enabled; + + u32 dynamic_aspm_packet_count; + +#ifdef ENABLE_R8168_PROCFS + //Procfs support + struct proc_dir_entry *proc_dir; +#endif +}; + +enum eetype { + EEPROM_TYPE_NONE=0, + EEPROM_TYPE_93C46, + EEPROM_TYPE_93C56, + EEPROM_TWSI +}; + +enum mcfg { + CFG_METHOD_1=0, + CFG_METHOD_2, + CFG_METHOD_3, + CFG_METHOD_4, + CFG_METHOD_5, + CFG_METHOD_6, + CFG_METHOD_7, + CFG_METHOD_8, + CFG_METHOD_9 , + CFG_METHOD_10, + CFG_METHOD_11, + CFG_METHOD_12, + CFG_METHOD_13, + CFG_METHOD_14, + CFG_METHOD_15, + CFG_METHOD_16, + CFG_METHOD_17, + CFG_METHOD_18, + CFG_METHOD_19, + CFG_METHOD_20, + CFG_METHOD_21, + CFG_METHOD_22, + CFG_METHOD_23, + CFG_METHOD_24, + CFG_METHOD_25, + CFG_METHOD_26, + CFG_METHOD_27, + CFG_METHOD_28, + CFG_METHOD_29, + CFG_METHOD_30, + CFG_METHOD_31, + CFG_METHOD_32, + CFG_METHOD_33, + CFG_METHOD_MAX, + CFG_METHOD_DEFAULT = 0xFF +}; + +#define LSO_32K 32000 +#define LSO_64K 64000 + +#define NIC_MIN_PHYS_BUF_COUNT (2) +#define NIC_MAX_PHYS_BUF_COUNT_LSO_64K (24) +#define NIC_MAX_PHYS_BUF_COUNT_LSO2 (16*4) + +#define GTTCPHO_SHIFT 18 +#define GTTCPHO_MAX 0x7fU +#define GTPKTSIZE_MAX 0x3ffffU +#define TCPHO_SHIFT 18 +#define TCPHO_MAX 0x3ffU +#define LSOPKTSIZE_MAX 0xffffU +#define MSS_MAX 0x07ffu /* MSS value */ + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 +#define OOB_CMD_SET_IPMAC 0x41 + +#define WAKEUP_MAGIC_PACKET_NOT_SUPPORT (0) +#define WAKEUP_MAGIC_PACKET_V1 (1) +#define WAKEUP_MAGIC_PACKET_V2 (2) + +//Ram Code Version +#define NIC_RAMCODE_VERSION_CFG_METHOD_14 (0x0057) +#define NIC_RAMCODE_VERSION_CFG_METHOD_16 (0x0055) +#define NIC_RAMCODE_VERSION_CFG_METHOD_18 (0x0052) +#define NIC_RAMCODE_VERSION_CFG_METHOD_20 (0x0044) +#define NIC_RAMCODE_VERSION_CFG_METHOD_21 (0x0042) +#define NIC_RAMCODE_VERSION_CFG_METHOD_24 (0x0001) +#define NIC_RAMCODE_VERSION_CFG_METHOD_23 (0x0015) +#define NIC_RAMCODE_VERSION_CFG_METHOD_26 (0x0012) +#define NIC_RAMCODE_VERSION_CFG_METHOD_28 (0x0019) +#define NIC_RAMCODE_VERSION_CFG_METHOD_29 (0x0055) +#define NIC_RAMCODE_VERSION_CFG_METHOD_31 (0x0003) + +//hwoptimize +#define HW_PATCH_SOC_LAN (BIT_0) +#define HW_PATCH_SAMSUNG_LAN_DONGLE (BIT_2) + +#define HW_PHY_STATUS_INI 1 +#define HW_PHY_STATUS_EXT_INI 2 +#define HW_PHY_STATUS_LAN_ON 3 + +void rtl8168_mdio_write(struct rtl8168_private *tp, u16 RegAddr, u16 value); +void rtl8168_mdio_prot_write(struct rtl8168_private *tp, u32 RegAddr, u32 value); +void rtl8168_mdio_prot_direct_write_phy_ocp(struct rtl8168_private *tp, u32 RegAddr, u32 value); +u32 rtl8168_mdio_read(struct rtl8168_private *tp, u16 RegAddr); +u32 rtl8168_mdio_prot_read(struct rtl8168_private *tp, u32 RegAddr); +u32 rtl8168_mdio_prot_direct_read_phy_ocp(struct rtl8168_private *tp, u32 RegAddr); +void rtl8168_ephy_write(struct rtl8168_private *tp, int RegAddr, int value); +void rtl8168_mac_ocp_write(struct rtl8168_private *tp, u16 reg_addr, u16 value); +u16 rtl8168_mac_ocp_read(struct rtl8168_private *tp, u16 reg_addr); +void rtl8168_clear_eth_phy_bit(struct rtl8168_private *tp, u8 addr, u16 mask); +void rtl8168_set_eth_phy_bit(struct rtl8168_private *tp, u8 addr, u16 mask); +void rtl8168_ocp_write(struct rtl8168_private *tp, u16 addr, u8 len, u32 data); +void rtl8168_oob_notify(struct rtl8168_private *tp, u8 cmd); +void rtl8168_init_ring_indexes(struct rtl8168_private *tp); +int rtl8168_eri_write(struct rtl8168_private *tp, int addr, int len, u32 value, int type); +void rtl8168_oob_mutex_lock(struct rtl8168_private *tp); +u32 rtl8168_ocp_read(struct rtl8168_private *tp, u16 addr, u8 len); +u32 rtl8168_ocp_read_with_oob_base_address(struct rtl8168_private *tp, u16 addr, u8 len, u32 base_address); +u32 rtl8168_ocp_write_with_oob_base_address(struct rtl8168_private *tp, u16 addr, u8 len, u32 value, u32 base_address); +u32 rtl8168_eri_read(struct rtl8168_private *tp, int addr, int len, int type); +u32 rtl8168_eri_read_with_oob_base_address(struct rtl8168_private *tp, int addr, int len, int type, u32 base_address); +int rtl8168_eri_write_with_oob_base_address(struct rtl8168_private *tp, int addr, int len, u32 value, int type, u32 base_address); +u16 rtl8168_ephy_read(struct rtl8168_private *tp, int RegAddr); +void rtl8168_wait_txrx_fifo_empty(struct net_device *dev); +void rtl8168_wait_ll_share_fifo_ready(struct net_device *dev); +void rtl8168_enable_now_is_oob(struct rtl8168_private *tp); +void rtl8168_disable_now_is_oob(struct rtl8168_private *tp); +void rtl8168_oob_mutex_unlock(struct rtl8168_private *tp); +void rtl8168_dash2_disable_tx(struct rtl8168_private *tp); +void rtl8168_dash2_enable_tx(struct rtl8168_private *tp); +void rtl8168_dash2_disable_rx(struct rtl8168_private *tp); +void rtl8168_dash2_enable_rx(struct rtl8168_private *tp); +void rtl8168_hw_disable_mac_mcu_bps(struct net_device *dev); + +#define HW_SUPPORT_CHECK_PHY_DISABLE_MODE(_M) ((_M)->HwSuppCheckPhyDisableModeVer > 0 ) +#define HW_SUPP_SERDES_PHY(_M) ((_M)->HwSuppSerDesPhyVer > 0) +#define HW_HAS_WRITE_PHY_MCU_RAM_CODE(_M) (((_M)->HwHasWrRamCodeToMicroP == TRUE) ? 1 : 0) +#define HW_SUPPORT_UPS_MODE(_M) ((_M)->HwSuppUpsVer > 0) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) +#define netdev_mc_count(dev) ((dev)->mc_count) +#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) +#define netdev_for_each_mc_addr(mclist, dev) \ + for (mclist = dev->mc_list; mclist; mclist = mclist->next) +#endif diff --git a/addons/r8168/src/4.4.180/r8168_asf.c b/addons/r8168/src/4.4.180/r8168_asf.c new file mode 100644 index 00000000..77524f04 --- /dev/null +++ b/addons/r8168/src/4.4.180/r8168_asf.c @@ -0,0 +1,422 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "r8168.h" +#include "r8168_asf.h" +#include "rtl_eeprom.h" + +int rtl8168_asf_ioctl(struct net_device *dev, + struct ifreq *ifr) +{ + struct rtl8168_private *tp = netdev_priv(dev); + void *user_data = ifr->ifr_data; + struct asf_ioctl_struct asf_usrdata; + unsigned long flags; + + if (tp->mcfg != CFG_METHOD_7 && tp->mcfg != CFG_METHOD_8) + return -EOPNOTSUPP; + + if (copy_from_user(&asf_usrdata, user_data, sizeof(struct asf_ioctl_struct))) + return -EFAULT; + + spin_lock_irqsave(&tp->lock, flags); + + switch (asf_usrdata.offset) { + case HBPeriod: + rtl8168_asf_hbperiod(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case WD8Timer: + break; + case WD16Rst: + rtl8168_asf_wd16rst(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case WD8Rst: + rtl8168_asf_time_period(tp, asf_usrdata.arg, WD8Rst, asf_usrdata.u.data); + break; + case LSnsrPollCycle: + rtl8168_asf_time_period(tp, asf_usrdata.arg, LSnsrPollCycle, asf_usrdata.u.data); + break; + case ASFSnsrPollPrd: + rtl8168_asf_time_period(tp, asf_usrdata.arg, ASFSnsrPollPrd, asf_usrdata.u.data); + break; + case AlertReSendItvl: + rtl8168_asf_time_period(tp, asf_usrdata.arg, AlertReSendItvl, asf_usrdata.u.data); + break; + case SMBAddr: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, SMBAddr, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case ASFConfigR0: + rtl8168_asf_config_regs(tp, asf_usrdata.arg, ASFConfigR0, asf_usrdata.u.data); + break; + case ASFConfigR1: + rtl8168_asf_config_regs(tp, asf_usrdata.arg, ASFConfigR1, asf_usrdata.u.data); + break; + case ConsoleMA: + rtl8168_asf_console_mac(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case ConsoleIP: + rtl8168_asf_ip_address(tp, asf_usrdata.arg, ConsoleIP, asf_usrdata.u.data); + break; + case IPAddr: + rtl8168_asf_ip_address(tp, asf_usrdata.arg, IPAddr, asf_usrdata.u.data); + break; + case UUID: + rtl8168_asf_rw_uuid(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case IANA: + rtl8168_asf_rw_iana(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case SysID: + rtl8168_asf_rw_systemid(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case Community: + rtl8168_asf_community_string(tp, asf_usrdata.arg, asf_usrdata.u.string); + break; + case StringLength: + rtl8168_asf_community_string_len(tp, asf_usrdata.arg, asf_usrdata.u.data); + break; + case FmCapMsk: + rtl8168_asf_capability_masks(tp, asf_usrdata.arg, FmCapMsk, asf_usrdata.u.data); + break; + case SpCMDMsk: + rtl8168_asf_capability_masks(tp, asf_usrdata.arg, SpCMDMsk, asf_usrdata.u.data); + break; + case SysCapMsk: + rtl8168_asf_capability_masks(tp, asf_usrdata.arg, SysCapMsk, asf_usrdata.u.data); + break; + case RmtRstAddr: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtRstAddr, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtRstCmd: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtRstCmd, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtRstData: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtRstData, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPwrOffAddr: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPwrOffAddr, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPwrOffCmd: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPwrOffCmd, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPwrOffData: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPwrOffData, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPwrOnAddr: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPwrOnAddr, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPwrOnCmd: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPwrOnCmd, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPwrOnData: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPwrOnData, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPCRAddr: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPCRAddr, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPCRCmd: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPCRCmd, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case RmtPCRData: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, RmtPCRData, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case ASFSnsr0Addr: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, ASFSnsr0Addr, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case LSnsrAddr0: + rtl8168_asf_rw_hexadecimal(tp, asf_usrdata.arg, LSnsrAddr0, RW_ONE_BYTE, asf_usrdata.u.data); + break; + case KO: + /* Get/Set Key Operation */ + rtl8168_asf_key_access(tp, asf_usrdata.arg, KO, asf_usrdata.u.data); + break; + case KA: + /* Get/Set Key Administrator */ + rtl8168_asf_key_access(tp, asf_usrdata.arg, KA, asf_usrdata.u.data); + break; + case KG: + /* Get/Set Key Generation */ + rtl8168_asf_key_access(tp, asf_usrdata.arg, KG, asf_usrdata.u.data); + break; + case KR: + /* Get/Set Key Random */ + rtl8168_asf_key_access(tp, asf_usrdata.arg, KR, asf_usrdata.u.data); + break; + default: + spin_unlock_irqrestore(&tp->lock, flags); + return -EOPNOTSUPP; + } + + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(user_data, &asf_usrdata, sizeof(struct asf_ioctl_struct))) + return -EFAULT; + + return 0; +} + +void rtl8168_asf_hbperiod(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + if (arg == ASF_GET) + data[ASFHBPERIOD] = rtl8168_eri_read(tp, HBPeriod, RW_TWO_BYTES, ERIAR_ASF); + else if (arg == ASF_SET) { + rtl8168_eri_write(tp, HBPeriod, RW_TWO_BYTES, data[ASFHBPERIOD], ERIAR_ASF); + rtl8168_eri_write(tp, 0x1EC, RW_ONE_BYTE, 0x07, ERIAR_ASF); + } +} + +void rtl8168_asf_wd16rst(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + data[ASFWD16RST] = rtl8168_eri_read(tp, WD16Rst, RW_TWO_BYTES, ERIAR_ASF); +} + +void rtl8168_asf_console_mac(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + int i; + + if (arg == ASF_GET) { + for (i = 0; i < 6; i++) + data[i] = rtl8168_eri_read(tp, ConsoleMA + i, RW_ONE_BYTE, ERIAR_ASF); + } else if (arg == ASF_SET) { + for (i = 0; i < 6; i++) + rtl8168_eri_write(tp, ConsoleMA + i, RW_ONE_BYTE, data[i], ERIAR_ASF); + + /* write the new console MAC address to EEPROM */ + rtl8168_eeprom_write_sc(tp, 70, (data[1] << 8) | data[0]); + rtl8168_eeprom_write_sc(tp, 71, (data[3] << 8) | data[2]); + rtl8168_eeprom_write_sc(tp, 72, (data[5] << 8) | data[4]); + } +} + +void rtl8168_asf_ip_address(struct rtl8168_private *tp, int arg, int offset, unsigned int *data) +{ + int i; + int eeprom_off = 0; + + if (arg == ASF_GET) { + for (i = 0; i < 4; i++) + data[i] = rtl8168_eri_read(tp, offset + i, RW_ONE_BYTE, ERIAR_ASF); + } else if (arg == ASF_SET) { + for (i = 0; i < 4; i++) + rtl8168_eri_write(tp, offset + i, RW_ONE_BYTE, data[i], ERIAR_ASF); + + if (offset == ConsoleIP) + eeprom_off = 73; + else if (offset == IPAddr) + eeprom_off = 75; + + /* write the new IP address to EEPROM */ + rtl8168_eeprom_write_sc(tp, eeprom_off, (data[1] << 8) | data[0]); + rtl8168_eeprom_write_sc(tp, eeprom_off + 1, (data[3] << 8) | data[2]); + + } +} + +void rtl8168_asf_config_regs(struct rtl8168_private *tp, int arg, int offset, unsigned int *data) +{ + unsigned int value; + + if (arg == ASF_GET) { + data[ASFCAPABILITY] = (rtl8168_eri_read(tp, offset, RW_ONE_BYTE, ERIAR_ASF) & data[ASFCONFIG]) ? FUNCTION_ENABLE : FUNCTION_DISABLE; + } else if (arg == ASF_SET) { + value = rtl8168_eri_read(tp, offset, RW_ONE_BYTE, ERIAR_ASF); + + if (data[ASFCAPABILITY] == FUNCTION_ENABLE) + value |= data[ASFCONFIG]; + else if (data[ASFCAPABILITY] == FUNCTION_DISABLE) + value &= ~data[ASFCONFIG]; + + rtl8168_eri_write(tp, offset, RW_ONE_BYTE, value, ERIAR_ASF); + } +} + +void rtl8168_asf_capability_masks(struct rtl8168_private *tp, int arg, int offset, unsigned int *data) +{ + unsigned int len, bit_mask; + + bit_mask = DISABLE_MASK; + + if (offset == FmCapMsk) { + /* System firmware capabilities */ + len = RW_FOUR_BYTES; + if (data[ASFCAPMASK] == FUNCTION_ENABLE) + bit_mask = FMW_CAP_MASK; + } else if (offset == SpCMDMsk) { + /* Special commands */ + len = RW_TWO_BYTES; + if (data[ASFCAPMASK] == FUNCTION_ENABLE) + bit_mask = SPC_CMD_MASK; + } else { + /* System capability (offset == SysCapMsk)*/ + len = RW_ONE_BYTE; + if (data[ASFCAPMASK] == FUNCTION_ENABLE) + bit_mask = SYS_CAP_MASK; + } + + if (arg == ASF_GET) + data[ASFCAPMASK] = rtl8168_eri_read(tp, offset, len, ERIAR_ASF) ? FUNCTION_ENABLE : FUNCTION_DISABLE; + else /* arg == ASF_SET */ + rtl8168_eri_write(tp, offset, len, bit_mask, ERIAR_ASF); +} + +void rtl8168_asf_community_string(struct rtl8168_private *tp, int arg, char *string) +{ + int i; + + if (arg == ASF_GET) { + for (i = 0; i < COMMU_STR_MAX_LEN; i++) + string[i] = rtl8168_eri_read(tp, Community + i, RW_ONE_BYTE, ERIAR_ASF); + } else { /* arg == ASF_SET */ + for (i = 0; i < COMMU_STR_MAX_LEN; i++) + rtl8168_eri_write(tp, Community + i, RW_ONE_BYTE, string[i], ERIAR_ASF); + } +} + +void rtl8168_asf_community_string_len(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + if (arg == ASF_GET) + data[ASFCOMMULEN] = rtl8168_eri_read(tp, StringLength, RW_ONE_BYTE, ERIAR_ASF); + else /* arg == ASF_SET */ + rtl8168_eri_write(tp, StringLength, RW_ONE_BYTE, data[ASFCOMMULEN], ERIAR_ASF); +} + +void rtl8168_asf_time_period(struct rtl8168_private *tp, int arg, int offset, unsigned int *data) +{ + int pos = 0; + + if (offset == WD8Rst) + pos = ASFWD8RESET; + else if (offset == LSnsrPollCycle) + pos = ASFLSNRPOLLCYC; + else if (offset == ASFSnsrPollPrd) + pos = ASFSNRPOLLCYC; + else if (offset == AlertReSendItvl) + pos = ASFALERTRESND; + + if (arg == ASF_GET) + data[pos] = rtl8168_eri_read(tp, offset, RW_ONE_BYTE, ERIAR_ASF); + else /* arg == ASF_SET */ + rtl8168_eri_write(tp, offset, RW_ONE_BYTE, data[pos], ERIAR_ASF); + +} + +void rtl8168_asf_key_access(struct rtl8168_private *tp, int arg, int offset, unsigned int *data) +{ + int i, j; + int key_off = 0; + + if (arg == ASF_GET) { + for (i = 0; i < KEY_LEN; i++) + data[i] = rtl8168_eri_read(tp, offset + KEY_LEN - (i + 1), RW_ONE_BYTE, ERIAR_ASF); + } else { + if (offset == KO) + key_off = 162; + else if (offset == KA) + key_off = 172; + else if (offset == KG) + key_off = 182; + else if (offset == KR) + key_off = 192; + + /* arg == ASF_SET */ + for (i = 0; i < KEY_LEN; i++) + rtl8168_eri_write(tp, offset + KEY_LEN - (i + 1), RW_ONE_BYTE, data[i], ERIAR_ASF); + + /* write the new key to EEPROM */ + for (i = 0, j = 19; i < 10; i++, j = j - 2) + rtl8168_eeprom_write_sc(tp, key_off + i, (data[j - 1] << 8) | data[j]); + } +} + +void rtl8168_asf_rw_hexadecimal(struct rtl8168_private *tp, int arg, int offset, int len, unsigned int *data) +{ + if (arg == ASF_GET) + data[ASFRWHEXNUM] = rtl8168_eri_read(tp, offset, len, ERIAR_ASF); + else /* arg == ASF_SET */ + rtl8168_eri_write(tp, offset, len, data[ASFRWHEXNUM], ERIAR_ASF); +} + +void rtl8168_asf_rw_systemid(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + int i; + + if (arg == ASF_GET) + for (i = 0; i < SYSID_LEN ; i++) + data[i] = rtl8168_eri_read(tp, SysID + i, RW_ONE_BYTE, ERIAR_ASF); + else /* arg == ASF_SET */ + for (i = 0; i < SYSID_LEN ; i++) + rtl8168_eri_write(tp, SysID + i, RW_ONE_BYTE, data[i], ERIAR_ASF); +} + +void rtl8168_asf_rw_iana(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + int i; + + if (arg == ASF_GET) + for (i = 0; i < RW_FOUR_BYTES; i++) + data[i] = rtl8168_eri_read(tp, IANA + i, RW_ONE_BYTE, ERIAR_ASF); + else /* arg == ASF_SET */ + for (i = 0; i < RW_FOUR_BYTES; i++) + rtl8168_eri_write(tp, IANA + i, RW_ONE_BYTE, data[i], ERIAR_ASF); +} + +void rtl8168_asf_rw_uuid(struct rtl8168_private *tp, int arg, unsigned int *data) +{ + int i, j; + + if (arg == ASF_GET) + for (i = UUID_LEN - 1, j = 0; i >= 0 ; i--, j++) + data[j] = rtl8168_eri_read(tp, UUID + i, RW_ONE_BYTE, ERIAR_ASF); + else /* arg == ASF_SET */ + for (i = UUID_LEN - 1, j = 0; i >= 0 ; i--, j++) + rtl8168_eri_write(tp, UUID + i, RW_ONE_BYTE, data[j], ERIAR_ASF); +} diff --git a/addons/r8168/src/4.4.180/r8168_asf.h b/addons/r8168/src/4.4.180/r8168_asf.h new file mode 100644 index 00000000..c4b412a6 --- /dev/null +++ b/addons/r8168/src/4.4.180/r8168_asf.h @@ -0,0 +1,295 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#define SIOCDEVPRIVATE_RTLASF SIOCDEVPRIVATE + +#define FUNCTION_ENABLE 1 +#define FUNCTION_DISABLE 0 + +#define ASFCONFIG 0 +#define ASFCAPABILITY 1 +#define ASFCOMMULEN 0 +#define ASFHBPERIOD 0 +#define ASFWD16RST 0 +#define ASFCAPMASK 0 +#define ASFALERTRESND 0 +#define ASFLSNRPOLLCYC 0 +#define ASFSNRPOLLCYC 0 +#define ASFWD8RESET 0 +#define ASFRWHEXNUM 0 + +#define FMW_CAP_MASK 0x0000F867 +#define SPC_CMD_MASK 0x1F00 +#define SYS_CAP_MASK 0xFF +#define DISABLE_MASK 0x00 + +#define MAX_DATA_LEN 200 +#define MAX_STR_LEN 200 + +#define COMMU_STR_MAX_LEN 23 + +#define KEY_LEN 20 +#define UUID_LEN 16 +#define SYSID_LEN 2 + +#define RW_ONE_BYTE 1 +#define RW_TWO_BYTES 2 +#define RW_FOUR_BYTES 4 + +enum asf_registers { + HBPeriod = 0x0000, + WD8Rst = 0x0002, + WD8Timer = 0x0003, + WD16Rst = 0x0004, + LSnsrPollCycle = 0x0006, + ASFSnsrPollPrd = 0x0007, + AlertReSendCnt = 0x0008, + AlertReSendItvl = 0x0009, + SMBAddr = 0x000A, + SMBCap = 0x000B, + ASFConfigR0 = 0x000C, + ASFConfigR1 = 0x000D, + WD16Timer = 0x000E, + ConsoleMA = 0x0010, + ConsoleIP = 0x0016, + IPAddr = 0x001A, + + UUID = 0x0020, + IANA = 0x0030, + SysID = 0x0034, + Community = 0x0036, + StringLength = 0x004D, + LC = 0x004E, + EntityInst = 0x004F, + FmCapMsk = 0x0050, + SpCMDMsk = 0x0054, + SysCapMsk = 0x0056, + WDSysSt = 0x0057, + RxMsgType = 0x0058, + RxSpCMD = 0x0059, + RxSpCMDPa = 0x005A, + RxBtOpMsk = 0x005C, + RmtRstAddr = 0x005E, + RmtRstCmd = 0x005F, + RmtRstData = 0x0060, + RmtPwrOffAddr = 0x0061, + RmtPwrOffCmd = 0x0062, + RmtPwrOffData = 0x0063, + RmtPwrOnAddr = 0x0064, + RmtPwrOnCmd = 0x0065, + RmtPwrOnData = 0x0066, + RmtPCRAddr = 0x0067, + RmtPCRCmd = 0x0068, + RmtPCRData = 0x0069, + RMCP_IANA = 0x006A, + RMCP_OEM = 0x006E, + ASFSnsr0Addr = 0x0070, + + ASFSnsrEvSt = 0x0073, + ASFSnsrEvAlert = 0x0081, + + LSnsrNo = 0x00AD, + AssrtEvntMsk = 0x00AE, + DeAssrtEvntMsk = 0x00AF, + + LSnsrAddr0 = 0x00B0, + LAlertCMD0 = 0x00B1, + LAlertDataMsk0 = 0x00B2, + LAlertCmp0 = 0x00B3, + LAlertESnsrT0 = 0x00B4, + LAlertET0 = 0x00B5, + LAlertEOffset0 = 0x00B6, + LAlertES0 = 0x00B7, + LAlertSN0 = 0x00B8, + LAlertEntity0 = 0x00B9, + LAlertEI0 = 0x00BA, + LSnsrState0 = 0x00BB, + + LSnsrAddr1 = 0x00BD, + LAlertCMD1 = 0x00BE, + LAlertDataMsk1 = 0x00BF, + LAlertCmp1 = 0x00C0, + LAlertESnsrT1 = 0x00C1, + LAlertET1 = 0x00C2, + LAlertEOffset1 = 0x00C3, + LAlertES1 = 0x00C4, + LAlertSN1 = 0x00C5, + LAlertEntity1 = 0x00C6, + LAlertEI1 = 0x00C7, + LSnsrState1 = 0x00C8, + + LSnsrAddr2 = 0x00CA, + LAlertCMD2 = 0x00CB, + LAlertDataMsk2 = 0x00CC, + LAlertCmp2 = 0x00CD, + LAlertESnsrT2 = 0x00CE, + LAlertET2 = 0x00CF, + LAlertEOffset2 = 0x00D0, + LAlertES2 = 0x00D1, + LAlertSN2 = 0x00D2, + LAlertEntity2 = 0x00D3, + LAlertEI2 = 0x00D4, + LSnsrState2 = 0x00D5, + + LSnsrAddr3 = 0x00D7, + LAlertCMD3 = 0x00D8, + LAlertDataMsk3 = 0x00D9, + LAlertCmp3 = 0x00DA, + LAlertESnsrT3 = 0x00DB, + LAlertET3 = 0x00DC, + LAlertEOffset3 = 0x00DD, + LAlertES3 = 0x00DE, + LAlertSN3 = 0x00DF, + LAlertEntity3 = 0x00E0, + LAlertEI3 = 0x00E1, + LSnsrState3 = 0x00E2, + + LSnsrAddr4 = 0x00E4, + LAlertCMD4 = 0x00E5, + LAlertDataMsk4 = 0x00E6, + LAlertCmp4 = 0x00E7, + LAlertESnsrT4 = 0x00E8, + LAlertET4 = 0x00E9, + LAlertEOffset4 = 0x00EA, + LAlertES4 = 0x00EB, + LAlertSN4 = 0x00EC, + LAlertEntity4 = 0x00ED, + LAlertEI4 = 0x00EE, + LSnsrState4 = 0x00EF, + + LSnsrAddr5 = 0x00F1, + LAlertCMD5 = 0x00F2, + LAlertDataMsk5 = 0x00F3, + LAlertCmp5 = 0x00F4, + LAlertESnsrT5 = 0x00F5, + LAlertET5 = 0x00F6, + LAlertEOffset5 = 0x00F7, + LAlertES5 = 0x00F8, + LAlertSN5 = 0x00F9, + LAlertEntity5 = 0x00FA, + LAlertEI5 = 0x00FB, + LSnsrState5 = 0x00FC, + + LSnsrAddr6 = 0x00FE, + LAlertCMD6 = 0x00FF, + LAlertDataMsk6 = 0x0100, + LAlertCmp6 = 0x0101, + LAlertESnsrT6 = 0x0102, + LAlertET6 = 0x0103, + LAlertEOffset6 = 0x0104, + LAlertES6 = 0x0105, + LAlertSN6 = 0x0106, + LAlertEntity6 = 0x0107, + LAlertEI6 = 0x0108, + LSnsrState6 = 0x0109, + + LSnsrAddr7 = 0x010B, + LAlertCMD7 = 0x010C, + LAlertDataMsk7 = 0x010D, + LAlertCmp7 = 0x010E, + LAlertESnsrT7 = 0x010F, + LAlertET7 = 0x0110, + LAlertEOffset7 = 0x0111, + LAlertES7 = 0x0112, + LAlertSN7 = 0x0113, + LAlertEntity7 = 0x0114, + LAlertEI7 = 0x0115, + LSnsrState7 = 0x0116, + LAssert = 0x0117, + LDAssert = 0x0118, + IPServiceType = 0x0119, + IPIdfr = 0x011A, + FlagFOffset = 0x011C, + TTL = 0x011E, + HbtEI = 0x011F, + MgtConSID1 = 0x0120, + MgtConSID2 = 0x0124, + MgdCltSID = 0x0128, + StCd = 0x012C, + MgtConUR = 0x012D, + MgtConUNL = 0x012E, + + AuthPd = 0x0130, + IntyPd = 0x0138, + MgtConRN = 0x0140, + MgdCtlRN = 0x0150, + MgtConUN = 0x0160, + Rakp2IntCk = 0x0170, + KO = 0x017C, + KA = 0x0190, + KG = 0x01A4, + KR = 0x01B8, + CP = 0x01CC, + CQ = 0x01D0, + KC = 0x01D4, + ConsoleSid = 0x01E8, + + SIK1 = 0x01FC, + SIK2 = 0x0210, + Udpsrc_port = 0x0224, + Udpdes_port = 0x0226, + Asf_debug_mux = 0x0228 +}; + +enum asf_cmdln_opt { + ASF_GET, + ASF_SET, + ASF_HELP +}; + +struct asf_ioctl_struct { + unsigned int arg; + unsigned int offset; + union { + unsigned int data[MAX_DATA_LEN]; + char string[MAX_STR_LEN]; + } u; +}; + +int rtl8168_asf_ioctl(struct net_device *dev, struct ifreq *ifr); +void rtl8168_asf_hbperiod(struct rtl8168_private *tp, int arg, unsigned int *data); +void rtl8168_asf_wd16rst(struct rtl8168_private *tp, int arg, unsigned int *data); +void rtl8168_asf_console_mac(struct rtl8168_private *, int arg, unsigned int *data); +void rtl8168_asf_ip_address(struct rtl8168_private *, int arg, int offset, unsigned int *data); +void rtl8168_asf_config_regs(struct rtl8168_private *tp, int arg, int offset, unsigned int *data); +void rtl8168_asf_capability_masks(struct rtl8168_private *tp, int arg, int offset, unsigned int *data); +void rtl8168_asf_community_string(struct rtl8168_private *tp, int arg, char *string); +void rtl8168_asf_community_string_len(struct rtl8168_private *tp, int arg, unsigned int *data); +void rtl8168_asf_alert_resend_interval(struct rtl8168_private *tp, int arg, unsigned int *data); +void rtl8168_asf_time_period(struct rtl8168_private *tp, int arg, int offset, unsigned int *data); +void rtl8168_asf_key_access(struct rtl8168_private *, int arg, int offset, unsigned int *data); +void rtl8168_asf_rw_hexadecimal(struct rtl8168_private *tp, int arg, int offset, int len, unsigned int *data); +void rtl8168_asf_rw_iana(struct rtl8168_private *tp, int arg, unsigned int *data); +void rtl8168_asf_rw_uuid(struct rtl8168_private *tp, int arg, unsigned int *data); +void rtl8168_asf_rw_systemid(struct rtl8168_private *tp, int arg, unsigned int *data); diff --git a/addons/r8168/src/4.4.180/r8168_dash.h b/addons/r8168/src/4.4.180/r8168_dash.h new file mode 100644 index 00000000..b4d358f1 --- /dev/null +++ b/addons/r8168/src/4.4.180/r8168_dash.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#ifndef _LINUX_R8168_DASH_H +#define _LINUX_R8168_DASH_H + +#define SIOCDEVPRIVATE_RTLDASH SIOCDEVPRIVATE+2 + +enum rtl_dash_cmd { + RTL_DASH_ARP_NS_OFFLOAD = 0, + RTL_DASH_SET_OOB_IPMAC, + RTL_DASH_NOTIFY_OOB, + + RTL_DASH_SEND_BUFFER_DATA_TO_DASH_FW, + RTL_DASH_CHECK_SEND_BUFFER_TO_DASH_FW_COMPLETE, + RTL_DASH_GET_RCV_FROM_FW_BUFFER_DATA, + RTL_DASH_OOB_REQ, + RTL_DASH_OOB_ACK, + RTL_DASH_DETACH_OOB_REQ, + RTL_DASH_DETACH_OOB_ACK, + + RTL_FW_SET_IPV4 = 0x10, + RTL_FW_GET_IPV4, + RTL_FW_SET_IPV6, + RTL_FW_GET_IPV6, + RTL_FW_SET_EXT_SNMP, + RTL_FW_GET_EXT_SNMP, + RTL_FW_SET_WAKEUP_PATTERN, + RTL_FW_GET_WAKEUP_PATTERN, + RTL_FW_DEL_WAKEUP_PATTERN, + + RTLT_DASH_COMMAND_INVALID, +}; + +struct rtl_dash_ip_mac { + struct sockaddr ifru_addr; + struct sockaddr ifru_netmask; + struct sockaddr ifru_hwaddr; +}; + +struct rtl_dash_ioctl_struct { + __u32 cmd; + __u32 offset; + __u32 len; + union { + __u32 data; + void *data_buffer; + }; +}; + +struct settings_ipv4 { + __u32 IPv4addr; + __u32 IPv4mask; + __u32 IPv4Gateway; +}; + +struct settings_ipv6 { + __u32 reserved; + __u32 prefixLen; + __u16 IPv6addr[8]; + __u16 IPv6Gateway[8]; +}; + +struct settings_ext_snmp { + __u16 index; + __u16 oid_get_len; + __u8 oid_for_get[24]; + __u8 reserved0[26]; + __u16 value_len; + __u8 value[256]; + __u8 supported; + __u8 reserved1[27]; +}; + +struct wakeup_pattern { + __u8 index; + __u8 valid; + __u8 start; + __u8 length; + __u8 name[36]; + __u8 mask[16]; + __u8 pattern[128]; + __u32 reserved[2]; +}; + +typedef struct _RX_DASH_FROM_FW_DESC { + __le16 length; + __le16 status; + __le32 resv; + __le64 BufferAddress; +} +RX_DASH_FROM_FW_DESC, *PRX_DASH_FROM_FW_DESC; + +typedef struct _TX_DASH_SEND_FW_DESC { + __le16 length; + __le16 status; + __le32 resv; + __le64 BufferAddress; +} +TX_DASH_SEND_FW_DESC, *PTX_DASH_SEND_FW_DESC; + +typedef struct _OSOOBHdr { + __le32 len; + u8 type; + u8 flag; + u8 hostReqV; + u8 res; +} +OSOOBHdr, *POSOOBHdr; + +typedef struct _RX_DASH_BUFFER_TYPE_2 { + OSOOBHdr oobhdr; + u8 RxDataBuffer[0]; +} +RX_DASH_BUFFER_TYPE_2, *PRX_DASH_BUFFER_TYPE_2; + +#define ALIGN_8 (0x7) +#define ALIGN_16 (0xf) +#define ALIGN_32 (0x1f) +#define ALIGN_64 (0x3f) +#define ALIGN_256 (0xff) +#define ALIGN_4096 (0xfff) + +#define OCP_REG_CONFIG0 (0x10) +#define OCP_REG_CONFIG0_REV_F (0xB8) +#define OCP_REG_DASH_POLL (0x30) +#define OCP_REG_HOST_REQ (0x34) +#define OCP_REG_DASH_REQ (0x35) +#define OCP_REG_CR (0x36) +#define OCP_REG_DMEMSTA (0x38) +#define OCP_REG_GPHYAR (0x60) + + +#define OCP_REG_CONFIG0_DASHEN BIT_15 +#define OCP_REG_CONFIG0_OOBRESET BIT_14 +#define OCP_REG_CONFIG0_APRDY BIT_13 +#define OCP_REG_CONFIG0_FIRMWARERDY BIT_12 +#define OCP_REG_CONFIG0_DRIVERRDY BIT_11 +#define OCP_REG_CONFIG0_OOB_WDT BIT_9 +#define OCP_REG_CONFIG0_DRV_WAIT_OOB BIT_8 +#define OCP_REG_CONFIG0_TLSEN BIT_7 + +#define HW_DASH_SUPPORT_DASH(_M) ((_M)->HwSuppDashVer > 0) +#define HW_DASH_SUPPORT_TYPE_1(_M) ((_M)->HwSuppDashVer == 1) +#define HW_DASH_SUPPORT_TYPE_2(_M) ((_M)->HwSuppDashVer == 2) +#define HW_DASH_SUPPORT_TYPE_3(_M) ((_M)->HwSuppDashVer == 3) + +#define RECV_FROM_FW_BUF_SIZE (2048) +#define SEND_TO_FW_BUF_SIZE (2048) + +#define RX_DASH_FROM_FW_OWN BIT_15 +#define TX_DASH_SEND_FW_OWN BIT_15 + +#define TXS_CC3_0 (BIT_0|BIT_1|BIT_2|BIT_3) +#define TXS_EXC BIT_4 +#define TXS_LNKF BIT_5 +#define TXS_OWC BIT_6 +#define TXS_TES BIT_7 +#define TXS_UNF BIT_9 +#define TXS_LGSEN BIT_11 +#define TXS_LS BIT_12 +#define TXS_FS BIT_13 +#define TXS_EOR BIT_14 +#define TXS_OWN BIT_15 + +#define TPPool_HRDY 0x20 + +#define HostReqReg (0xC0) +#define SystemMasterDescStartAddrLow (0xF0) +#define SystemMasterDescStartAddrHigh (0xF4) +#define SystemSlaveDescStartAddrLow (0xF8) +#define SystemSlaveDescStartAddrHigh (0xFC) + +//DASH Request Type +#define WSMANREG 0x01 +#define OSPUSHDATA 0x02 + +#define RXS_OWN BIT_15 +#define RXS_EOR BIT_14 +#define RXS_FS BIT_13 +#define RXS_LS BIT_12 + +#define ISRIMR_DP_DASH_OK BIT_15 +#define ISRIMR_DP_HOST_OK BIT_13 +#define ISRIMR_DP_REQSYS_OK BIT_11 + +#define ISRIMR_DASH_INTR_EN BIT_12 +#define ISRIMR_DASH_INTR_CMAC_RESET BIT_15 + +#define ISRIMR_DASH_TYPE2_ROK BIT_0 +#define ISRIMR_DASH_TYPE2_RDU BIT_1 +#define ISRIMR_DASH_TYPE2_TOK BIT_2 +#define ISRIMR_DASH_TYPE2_TDU BIT_3 +#define ISRIMR_DASH_TYPE2_TX_FIFO_FULL BIT_4 +#define ISRIMR_DASH_TYPE2_TX_DISABLE_IDLE BIT_5 +#define ISRIMR_DASH_TYPE2_RX_DISABLE_IDLE BIT_6 + +#define CMAC_OOB_STOP 0x25 +#define CMAC_OOB_INIT 0x26 +#define CMAC_OOB_RESET 0x2a + +#define NO_BASE_ADDRESS 0x00000000 +#define RTL8168FP_OOBMAC_BASE 0xBAF70000 +#define RTL8168FP_CMAC_IOBASE 0xBAF20000 +#define RTL8168FP_KVM_BASE 0xBAF80400 +#define CMAC_SYNC_REG 0x20 +#define CMAC_RXDESC_OFFSET 0x90 //RX: 0x90 - 0x98 +#define CMAC_TXDESC_OFFSET 0x98 //TX: 0x98 - 0x9F + +/* cmac write/read MMIO register */ +#define RTL_CMAC_W8(tp, reg, val8) writeb ((val8), tp->cmac_ioaddr + (reg)) +#define RTL_CMAC_W16(tp, reg, val16) writew ((val16), tp->cmac_ioaddr + (reg)) +#define RTL_CMAC_W32(tp, reg, val32) writel ((val32), tp->cmac_ioaddr + (reg)) +#define RTL_CMAC_R8(tp, reg) readb (tp->cmac_ioaddr + (reg)) +#define RTL_CMAC_R16(tp, reg) readw (tp->cmac_ioaddr + (reg)) +#define RTL_CMAC_R32(tp, reg) ((unsigned long) readl (tp->cmac_ioaddr + (reg))) + +int rtl8168_dash_ioctl(struct net_device *dev, struct ifreq *ifr); +void HandleDashInterrupt(struct net_device *dev); +int AllocateDashShareMemory(struct net_device *dev); +void FreeAllocatedDashShareMemory(struct net_device *dev); +void DashHwInit(struct net_device *dev); + + +#endif /* _LINUX_R8168_DASH_H */ diff --git a/addons/r8168/src/4.4.180/r8168_fiber.h b/addons/r8168/src/4.4.180/r8168_fiber.h new file mode 100644 index 00000000..2e303fe2 --- /dev/null +++ b/addons/r8168/src/4.4.180/r8168_fiber.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#ifndef _LINUX_R8168_FIBER_H +#define _LINUX_R8168_FIBER_H + +enum { + FIBER_MODE_NIC_ONLY = 0, + FIBER_MODE_RTL8168H_RTL8211FS, + FIBER_MODE_RTL8168H_MDI_SWITCH_RTL8211FS, + FIBER_MODE_MAX +}; + +enum { + FIBER_STAT_NOT_CHECKED = 0, + FIBER_STAT_CONNECT, + FIBER_STAT_DISCONNECT, + FIBER_STAT_MAX +}; + +#define HW_FIBER_MODE_ENABLED(_M) ((_M)->HwFiberModeVer > 0) + + + +void rtl8168_hw_init_fiber_nic(struct net_device *dev); +void rtl8168_hw_fiber_nic_d3_para(struct net_device *dev); +void rtl8168_hw_fiber_phy_config(struct net_device *dev); +void rtl8168_hw_switch_mdi_to_fiber(struct net_device *dev); +void rtl8168_hw_switch_mdi_to_nic(struct net_device *dev); +unsigned int rtl8168_hw_fiber_link_ok(struct net_device *dev); +void rtl8168_check_fiber_link_status(struct net_device *dev); +void rtl8168_check_hw_fiber_mode_support(struct net_device *dev); +void rtl8168_set_fiber_mode_software_variable(struct net_device *dev); + + +#endif /* _LINUX_R8168_FIBER_H */ diff --git a/addons/r8168/src/4.4.180/r8168_firmware.c b/addons/r8168/src/4.4.180/r8168_firmware.c new file mode 100644 index 00000000..3fe95db9 --- /dev/null +++ b/addons/r8168/src/4.4.180/r8168_firmware.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#include +#include +#include + +#include "r8168_firmware.h" + +enum rtl_fw_opcode { + PHY_READ = 0x0, + PHY_DATA_OR = 0x1, + PHY_DATA_AND = 0x2, + PHY_BJMPN = 0x3, + PHY_MDIO_CHG = 0x4, + PHY_CLEAR_READCOUNT = 0x7, + PHY_WRITE = 0x8, + PHY_READCOUNT_EQ_SKIP = 0x9, + PHY_COMP_EQ_SKIPN = 0xa, + PHY_COMP_NEQ_SKIPN = 0xb, + PHY_WRITE_PREVIOUS = 0xc, + PHY_SKIPN = 0xd, + PHY_DELAY_MS = 0xe, +}; + +struct fw_info { + u32 magic; + char version[RTL8168_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,16,0) +#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) +#endif +#define FW_OPCODE_SIZE sizeof_field(struct rtl8168_fw_phy_action, code[0]) + +static bool rtl8168_fw_format_ok(struct rtl8168_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl8168_fw_phy_action *pa = &rtl_fw->phy_action; + + if (fw->size < FW_OPCODE_SIZE) + return false; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + return false; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + return false; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + return false; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, fw_info->version, RTL8168_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + return false; + + strscpy(rtl_fw->version, rtl_fw->fw_name, RTL8168_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + + return true; +} + +static bool rtl8168_fw_data_ok(struct rtl8168_fw *rtl_fw) +{ + struct rtl8168_fw_phy_action *pa = &rtl_fw->phy_action; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 val = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + switch (action >> 28) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_MDIO_CHG: + if (val > 1) + goto out; + break; + + case PHY_BJMPN: + if (regno > index) + goto out; + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) + goto out; + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) + goto out; + break; + + default: + dev_err(rtl_fw->dev, "Invalid action 0x%08x\n", action); + return false; + } + } + + return true; +out: + dev_err(rtl_fw->dev, "Out of range of firmware\n"); + return false; +} + +void rtl8168_fw_write_firmware(struct rtl8168_private *tp, struct rtl8168_fw *rtl_fw) +{ + struct rtl8168_fw_phy_action *pa = &rtl_fw->phy_action; + rtl8168_fw_write_t fw_write = rtl_fw->phy_write; + rtl8168_fw_read_t fw_read = rtl_fw->phy_read; + int predata = 0, count = 0; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + enum rtl_fw_opcode opcode = action >> 28; + + if (!action) + break; + + switch (opcode) { + case PHY_READ: + predata = fw_read(tp, regno); + count++; + break; + case PHY_DATA_OR: + predata |= data; + break; + case PHY_DATA_AND: + predata &= data; + break; + case PHY_BJMPN: + index -= (regno + 1); + break; + case PHY_MDIO_CHG: + if (data) { + fw_write = rtl_fw->mac_mcu_write; + fw_read = rtl_fw->mac_mcu_read; + } else { + fw_write = rtl_fw->phy_write; + fw_read = rtl_fw->phy_read; + } + + break; + case PHY_CLEAR_READCOUNT: + count = 0; + break; + case PHY_WRITE: + fw_write(tp, regno, data); + break; + case PHY_READCOUNT_EQ_SKIP: + if (count == data) + index++; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + break; + case PHY_WRITE_PREVIOUS: + fw_write(tp, regno, predata); + break; + case PHY_SKIPN: + index += regno; + break; + case PHY_DELAY_MS: + mdelay(data); + break; + } + } +} + +void rtl8168_fw_release_firmware(struct rtl8168_fw *rtl_fw) +{ + release_firmware(rtl_fw->fw); +} + +int rtl8168_fw_request_firmware(struct rtl8168_fw *rtl_fw) +{ + int rc; + + rc = request_firmware(&rtl_fw->fw, rtl_fw->fw_name, rtl_fw->dev); + if (rc < 0) + goto out; + + if (!rtl8168_fw_format_ok(rtl_fw) || !rtl8168_fw_data_ok(rtl_fw)) { + release_firmware(rtl_fw->fw); + rc = -EINVAL; + goto out; + } + + return 0; +out: + dev_err(rtl_fw->dev, "Unable to load firmware %s (%d)\n", + rtl_fw->fw_name, rc); + return rc; +} diff --git a/addons/r8168/src/4.4.180/r8168_firmware.h b/addons/r8168/src/4.4.180/r8168_firmware.h new file mode 100644 index 00000000..563280ff --- /dev/null +++ b/addons/r8168/src/4.4.180/r8168_firmware.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek 2.5Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#ifndef _LINUX_RTL8168_FIRMWARE_H +#define _LINUX_RTL8168_FIRMWARE_H + +#include +#include + +struct rtl8168_private; +typedef void (*rtl8168_fw_write_t)(struct rtl8168_private *tp, u16 reg, u16 val); +typedef u32 (*rtl8168_fw_read_t)(struct rtl8168_private *tp, u16 reg); + +#define RTL8168_VER_SIZE 32 + +struct rtl8168_fw { + rtl8168_fw_write_t phy_write; + rtl8168_fw_read_t phy_read; + rtl8168_fw_write_t mac_mcu_write; + rtl8168_fw_read_t mac_mcu_read; + const struct firmware *fw; + const char *fw_name; + struct device *dev; + + char version[RTL8168_VER_SIZE]; + + struct rtl8168_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; +}; + +int rtl8168_fw_request_firmware(struct rtl8168_fw *rtl_fw); +void rtl8168_fw_release_firmware(struct rtl8168_fw *rtl_fw); +void rtl8168_fw_write_firmware(struct rtl8168_private *tp, struct rtl8168_fw *rtl_fw); + +#endif /* _LINUX_RTL8168_FIRMWARE_H */ diff --git a/addons/r8168/src/4.4.180/r8168_n.c b/addons/r8168/src/4.4.180/r8168_n.c new file mode 100644 index 00000000..a44352f3 --- /dev/null +++ b/addons/r8168/src/4.4.180/r8168_n.c @@ -0,0 +1,28743 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +/* + * This driver is modified from r8169.c in Linux kernel 2.6.18 + */ + +/* In Linux 5.4 asm_inline was introduced, but it's not supported by clang. + * Redefine it to just asm to enable successful compilation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) +#include +#include +#endif +#include +#include +#include +#include + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) +#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0) +#include +#endif +#endif +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,37) +#include +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) +#define dev_printk(A,B,fmt,args...) printk(A fmt,##args) +#else +#include +#include +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,31) +#include +#endif + +#include +#include + +#include "r8168.h" +#include "r8168_asf.h" +#include "rtl_eeprom.h" +#include "rtltool.h" +#include "r8168_firmware.h" + +#ifdef ENABLE_R8168_PROCFS +#include +#include +#endif + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168E_4 "rtl_nic/rtl8168e-4.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168EP_1 "rtl_nic/rtl8168ep-1.fw" +#define FIRMWARE_8168EP_2 "rtl_nic/rtl8168ep-2.fw" +#define FIRMWARE_8168EP_3 "rtl_nic/rtl8168ep-3.fw" +#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8168FP_3 "rtl_nic/rtl8168fp-3.fw" +#define FIRMWARE_8168FP_4 "rtl_nic/rtl8168fp-4.fw" + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +static const int multicast_filter_limit = 32; + +static const struct { + const char *name; + const char *fw_name; +} rtl_chip_fw_infos[] = { + /* PCI-E devices. */ + [CFG_METHOD_1] = {"RTL8168B/8111", }, + [CFG_METHOD_2] = {"RTL8168B/8111", }, + [CFG_METHOD_3] = {"RTL8168B/8111", }, + [CFG_METHOD_4] = {"RTL8168C/8111C", }, + [CFG_METHOD_5] = {"RTL8168C/8111C", }, + [CFG_METHOD_6] = {"RTL8168C/8111C", }, + [CFG_METHOD_7] = {"RTL8168CP/8111CP", }, + [CFG_METHOD_8] = {"RTL8168CP/8111CP", }, + [CFG_METHOD_9] = {"RTL8168D/8111D", FIRMWARE_8168D_1}, + [CFG_METHOD_10] = {"RTL8168D/8111D", FIRMWARE_8168D_2}, + [CFG_METHOD_11] = {"RTL8168DP/8111DP", }, + [CFG_METHOD_12] = {"RTL8168DP/8111DP", }, + [CFG_METHOD_13] = {"RTL8168DP/8111DP", }, + [CFG_METHOD_14] = {"RTL8168E/8111E", FIRMWARE_8168E_1}, + [CFG_METHOD_15] = {"RTL8168E/8111E", FIRMWARE_8168E_2}, + [CFG_METHOD_16] = {"RTL8168E-VL/8111E-VL", FIRMWARE_8168E_3}, + [CFG_METHOD_17] = {"RTL8168E-VL/8111E-VL", FIRMWARE_8168E_4}, + [CFG_METHOD_18] = {"RTL8168F/8111F", FIRMWARE_8168F_1}, + [CFG_METHOD_19] = {"RTL8168F/8111F", FIRMWARE_8168F_2}, + [CFG_METHOD_20] = {"RTL8411", FIRMWARE_8411_1}, + [CFG_METHOD_21] = {"RTL8168G/8111G", FIRMWARE_8168G_2}, + [CFG_METHOD_22] = {"RTL8168G/8111G", }, + [CFG_METHOD_23] = {"RTL8168EP/8111EP", FIRMWARE_8168EP_1}, + [CFG_METHOD_24] = {"RTL8168GU/8111GU", }, + [CFG_METHOD_25] = {"RTL8168GU/8111GU", FIRMWARE_8168G_3}, + [CFG_METHOD_26] = {"8411B", FIRMWARE_8411_2}, + [CFG_METHOD_27] = {"RTL8168EP/8111EP", FIRMWARE_8168EP_2}, + [CFG_METHOD_28] = {"RTL8168EP/8111EP", FIRMWARE_8168EP_3}, + [CFG_METHOD_29] = {"RTL8168H/8111H", FIRMWARE_8168H_1}, + [CFG_METHOD_30] = {"RTL8168H/8111H", FIRMWARE_8168H_2}, + [CFG_METHOD_31] = {"RTL8168FP/8111FP", }, + [CFG_METHOD_32] = {"RTL8168FP/8111FP", FIRMWARE_8168FP_3}, + [CFG_METHOD_33] = {"RTL8168FP/8111FP", FIRMWARE_8168FP_4}, + [CFG_METHOD_DEFAULT] = {"Unknown", }, +}; + +#define _R(NAME,MAC,RCR,MASK, JumFrameSz) \ + { .name = NAME, .mcfg = MAC, .RCR_Cfg = RCR, .RxConfigMask = MASK, .jumbo_frame_sz = JumFrameSz } + +static const struct { + const char *name; + u8 mcfg; + u32 RCR_Cfg; + u32 RxConfigMask; /* Clears the bits supported by this chip */ + u32 jumbo_frame_sz; +} rtl_chip_info[] = { + _R("RTL8168B/8111B", + CFG_METHOD_1, + (Reserved2_data << Reserved2_shift) | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_4k), + + _R("RTL8168B/8111B", + CFG_METHOD_2, + (Reserved2_data << Reserved2_shift) | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_4k), + + _R("RTL8168B/8111B", + CFG_METHOD_3, + (Reserved2_data << Reserved2_shift) | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_4k), + + _R("RTL8168C/8111C", + CFG_METHOD_4, + RxCfg_128_int_en | RxCfg_fet_multi_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_6k), + + _R("RTL8168C/8111C", + CFG_METHOD_5, + RxCfg_128_int_en | RxCfg_fet_multi_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_6k), + + _R("RTL8168C/8111C", + CFG_METHOD_6, + RxCfg_128_int_en | RxCfg_fet_multi_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_6k), + + _R("RTL8168CP/8111CP", + CFG_METHOD_7, + RxCfg_128_int_en | RxCfg_fet_multi_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_6k), + + _R("RTL8168CP/8111CP", + CFG_METHOD_8, + RxCfg_128_int_en | RxCfg_fet_multi_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_6k), + + _R("RTL8168D/8111D", + CFG_METHOD_9, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168D/8111D", + CFG_METHOD_10, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168DP/8111DP", + CFG_METHOD_11, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168DP/8111DP", + CFG_METHOD_12, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168DP/8111DP", + CFG_METHOD_13, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168E/8111E", + CFG_METHOD_14, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168E/8111E", + CFG_METHOD_15, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168E-VL/8111E-VL", + CFG_METHOD_16, + RxCfg_128_int_en | RxEarly_off_V1 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e0080, + Jumbo_Frame_9k), + + _R("RTL8168E-VL/8111E-VL", + CFG_METHOD_17, + RxCfg_128_int_en | RxEarly_off_V1 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168F/8111F", + CFG_METHOD_18, + RxCfg_128_int_en | RxEarly_off_V1 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168F/8111F", + CFG_METHOD_19, + RxCfg_128_int_en | RxEarly_off_V1 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8411", + CFG_METHOD_20, + RxCfg_128_int_en | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e1880, + Jumbo_Frame_9k), + + _R("RTL8168G/8111G", + CFG_METHOD_21, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168G/8111G", + CFG_METHOD_22, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168EP/8111EP", + CFG_METHOD_23, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168GU/8111GU", + CFG_METHOD_24, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168GU/8111GU", + CFG_METHOD_25, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("8411B", + CFG_METHOD_26, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168EP/8111EP", + CFG_METHOD_27, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168EP/8111EP", + CFG_METHOD_28, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168H/8111H", + CFG_METHOD_29, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168H/8111H", + CFG_METHOD_30, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168FP/8111FP", + CFG_METHOD_31, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168FP/8111FP", + CFG_METHOD_32, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("RTL8168FP/8111FP", + CFG_METHOD_33, + RxCfg_128_int_en | RxEarly_off_V2 | Rx_Single_fetch_V2 | (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_9k), + + _R("Unknown", + CFG_METHOD_DEFAULT, + (RX_DMA_BURST << RxCfgDMAShift), + 0xff7e5880, + Jumbo_Frame_1k) +}; +#undef _R + +#ifndef PCI_VENDOR_ID_DLINK +#define PCI_VENDOR_ID_DLINK 0x1186 +#endif + +static struct pci_device_id rtl8168_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x2502), }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x2600), }, + { PCI_VENDOR_ID_DLINK, 0x4300, 0x1186, 0x4b10,}, + {0,}, +}; + +MODULE_DEVICE_TABLE(pci, rtl8168_pci_tbl); + +static int rx_copybreak = 0; +static int use_dac = 1; +static int timer_count = 0x2600; +static int dynamic_aspm_packet_threshold = 10; + +static struct { + u32 msg_enable; +} debug = { -1 }; + +static unsigned int speed_mode = SPEED_1000; +static unsigned int duplex_mode = DUPLEX_FULL; +static unsigned int autoneg_mode = AUTONEG_ENABLE; +static unsigned int advertising_mode = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full; +#ifdef CONFIG_ASPM +static int aspm = 1; +#else +static int aspm = 0; +#endif +#ifdef CONFIG_DYNAMIC_ASPM +static int dynamic_aspm = 1; +#else +static int dynamic_aspm = 0; +#endif +#ifdef ENABLE_S5WOL +static int s5wol = 1; +#else +static int s5wol = 0; +#endif +#ifdef ENABLE_S5_KEEP_CURR_MAC +static int s5_keep_curr_mac = 1; +#else +static int s5_keep_curr_mac = 0; +#endif +#ifdef ENABLE_EEE +static int eee_enable = 1; +#else +static int eee_enable = 0; +#endif +#ifdef CONFIG_SOC_LAN +static ulong hwoptimize = HW_PATCH_SOC_LAN; +#else +static ulong hwoptimize = 0; +#endif +#ifdef ENABLE_S0_MAGIC_PACKET +static int s0_magic_packet = 1; +#else +static int s0_magic_packet = 0; +#endif + +MODULE_AUTHOR("Realtek and the Linux r8168 crew "); +MODULE_DESCRIPTION("RealTek RTL-8168 Gigabit Ethernet driver"); + +module_param(speed_mode, uint, 0); +MODULE_PARM_DESC(speed_mode, "force phy operation. Deprecated by ethtool (8)."); + +module_param(duplex_mode, uint, 0); +MODULE_PARM_DESC(duplex_mode, "force phy operation. Deprecated by ethtool (8)."); + +module_param(autoneg_mode, uint, 0); +MODULE_PARM_DESC(autoneg_mode, "force phy operation. Deprecated by ethtool (8)."); + +module_param(advertising_mode, uint, 0); +MODULE_PARM_DESC(advertising_mode, "force phy operation. Deprecated by ethtool (8)."); + +module_param(aspm, int, 0); +MODULE_PARM_DESC(aspm, "Enable ASPM."); + +module_param(dynamic_aspm, int, 0); +MODULE_PARM_DESC(aspm, "Enable Software Dynamic ASPM."); + +module_param(s5wol, int, 0); +MODULE_PARM_DESC(s5wol, "Enable Shutdown Wake On Lan."); + +module_param(s5_keep_curr_mac, int, 0); +MODULE_PARM_DESC(s5_keep_curr_mac, "Enable Shutdown Keep Current MAC Address."); + +module_param(rx_copybreak, int, 0); +MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); + +module_param(use_dac, int, 0); +MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); + +module_param(timer_count, int, 0); +MODULE_PARM_DESC(timer_count, "Timer Interrupt Interval."); + +module_param(eee_enable, int, 0); +MODULE_PARM_DESC(eee_enable, "Enable Energy Efficient Ethernet."); + +module_param(hwoptimize, ulong, 0); +MODULE_PARM_DESC(hwoptimize, "Enable HW optimization function."); + +module_param(s0_magic_packet, int, 0); +MODULE_PARM_DESC(s0_magic_packet, "Enable S0 Magic Packet."); + +module_param(dynamic_aspm_packet_threshold, int, 0); +MODULE_PARM_DESC(dynamic_aspm_packet_threshold, "Dynamic ASPM packet threshold."); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) +module_param_named(debug, debug.msg_enable, int, 0); +MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); +#endif//LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + +MODULE_LICENSE("GPL"); +#ifdef ENABLE_USE_FIRMWARE_FILE +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8168E_4); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168EP_1); +MODULE_FIRMWARE(FIRMWARE_8168EP_2); +MODULE_FIRMWARE(FIRMWARE_8168EP_3); +MODULE_FIRMWARE(FIRMWARE_8168H_1); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8168FP_3); +MODULE_FIRMWARE(FIRMWARE_8168FP_4); +#endif + +MODULE_VERSION(RTL8168_VERSION); + +static void rtl8168_sleep_rx_enable(struct net_device *dev); +static void rtl8168_dsm(struct net_device *dev, int dev_state); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) +static void rtl8168_esd_timer(unsigned long __opaque); +#else +static void rtl8168_esd_timer(struct timer_list *t); +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) +static void rtl8168_link_timer(unsigned long __opaque); +#else +static void rtl8168_link_timer(struct timer_list *t); +#endif +static void rtl8168_tx_clear(struct rtl8168_private *tp); +static void rtl8168_rx_clear(struct rtl8168_private *tp); + +static int rtl8168_open(struct net_device *dev); +static netdev_tx_t rtl8168_start_xmit(struct sk_buff *skb, struct net_device *dev); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) +static irqreturn_t rtl8168_interrupt(int irq, void *dev_instance, struct pt_regs *regs); +#else +static irqreturn_t rtl8168_interrupt(int irq, void *dev_instance); +#endif +static void rtl8168_rx_desc_offset0_init(struct rtl8168_private *, int); +static int rtl8168_init_ring(struct net_device *dev); +static void rtl8168_hw_config(struct net_device *dev); +static void rtl8168_hw_start(struct net_device *dev); +static int rtl8168_close(struct net_device *dev); +static void rtl8168_set_rx_mode(struct net_device *dev); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) +static void rtl8168_tx_timeout(struct net_device *dev, unsigned int txqueue); +#else +static void rtl8168_tx_timeout(struct net_device *dev); +#endif +static struct net_device_stats *rtl8168_get_stats(struct net_device *dev); +static int rtl8168_rx_interrupt(struct net_device *, struct rtl8168_private *, napi_budget); +static int rtl8168_change_mtu(struct net_device *dev, int new_mtu); +static void rtl8168_down(struct net_device *dev); + +static int rtl8168_set_mac_address(struct net_device *dev, void *p); +void rtl8168_rar_set(struct rtl8168_private *tp, uint8_t *addr); +static void rtl8168_desc_addr_fill(struct rtl8168_private *); +static void rtl8168_tx_desc_init(struct rtl8168_private *tp); +static void rtl8168_rx_desc_init(struct rtl8168_private *tp); + +static u16 rtl8168_get_hw_phy_mcu_code_ver(struct rtl8168_private *tp); + +static void rtl8168_hw_reset(struct net_device *dev); + +static void rtl8168_phy_power_up(struct net_device *dev); +static void rtl8168_phy_power_down(struct net_device *dev); +static int rtl8168_set_speed(struct net_device *dev, u8 autoneg, u32 speed, u8 duplex, u32 adv); + +static int rtl8168_set_phy_mcu_patch_request(struct rtl8168_private *tp); +static int rtl8168_clear_phy_mcu_patch_request(struct rtl8168_private *tp); + +#ifdef CONFIG_R8168_NAPI +static int rtl8168_poll(napi_ptr napi, napi_budget budget); +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) +static void rtl8168_reset_task(void *_data); +#else +static void rtl8168_reset_task(struct work_struct *work); +#endif + +static inline struct device *tp_to_dev(struct rtl8168_private *tp) +{ + return &tp->pci_dev->dev; +} + +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) && \ + LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,00))) +void ethtool_convert_legacy_u32_to_link_mode(unsigned long *dst, + u32 legacy_u32) +{ + bitmap_zero(dst, __ETHTOOL_LINK_MODE_MASK_NBITS); + dst[0] = legacy_u32; +} + +bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, + const unsigned long *src) +{ + bool retval = true; + + /* TODO: following test will soon always be true */ + if (__ETHTOOL_LINK_MODE_MASK_NBITS > 32) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(ext); + + bitmap_zero(ext, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_fill(ext, 32); + bitmap_complement(ext, ext, __ETHTOOL_LINK_MODE_MASK_NBITS); + if (bitmap_intersects(ext, src, + __ETHTOOL_LINK_MODE_MASK_NBITS)) { + /* src mask goes beyond bit 31 */ + retval = false; + } + } + *legacy_u32 = src[0]; + return retval; +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) + +#ifndef LPA_1000FULL +#define LPA_1000FULL 0x0800 +#endif + +#ifndef LPA_1000HALF +#define LPA_1000HALF 0x0400 +#endif + +static inline u32 mii_adv_to_ethtool_adv_t(u32 adv) +{ + u32 result = 0; + + if (adv & ADVERTISE_10HALF) + result |= ADVERTISED_10baseT_Half; + if (adv & ADVERTISE_10FULL) + result |= ADVERTISED_10baseT_Full; + if (adv & ADVERTISE_100HALF) + result |= ADVERTISED_100baseT_Half; + if (adv & ADVERTISE_100FULL) + result |= ADVERTISED_100baseT_Full; + if (adv & ADVERTISE_PAUSE_CAP) + result |= ADVERTISED_Pause; + if (adv & ADVERTISE_PAUSE_ASYM) + result |= ADVERTISED_Asym_Pause; + + return result; +} + +static inline u32 mii_lpa_to_ethtool_lpa_t(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_LPACK) + result |= ADVERTISED_Autoneg; + + return result | mii_adv_to_ethtool_adv_t(lpa); +} + +static inline u32 mii_stat1000_to_ethtool_lpa_t(u32 lpa) +{ + u32 result = 0; + + if (lpa & LPA_1000HALF) + result |= ADVERTISED_1000baseT_Half; + if (lpa & LPA_1000FULL) + result |= ADVERTISED_1000baseT_Full; + + return result; +} + +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) +static inline void eth_hw_addr_random(struct net_device *dev) +{ + random_ether_addr(dev->dev_addr); +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) +#undef ethtool_ops +#define ethtool_ops _kc_ethtool_ops + +struct _kc_ethtool_ops { + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + int (*set_settings)(struct net_device *, struct ethtool_cmd *); + void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); + int (*get_regs_len)(struct net_device *); + void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); + void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); + int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); + u32 (*get_msglevel)(struct net_device *); + void (*set_msglevel)(struct net_device *, u32); + int (*nway_reset)(struct net_device *); + u32 (*get_link)(struct net_device *); + int (*get_eeprom_len)(struct net_device *); + int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); + int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); + int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); + void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); + int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); + void (*get_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + int (*set_pauseparam)(struct net_device *, + struct ethtool_pauseparam*); + u32 (*get_rx_csum)(struct net_device *); + int (*set_rx_csum)(struct net_device *, u32); + u32 (*get_tx_csum)(struct net_device *); + int (*set_tx_csum)(struct net_device *, u32); + u32 (*get_sg)(struct net_device *); + int (*set_sg)(struct net_device *, u32); + u32 (*get_tso)(struct net_device *); + int (*set_tso)(struct net_device *, u32); + int (*self_test_count)(struct net_device *); + void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); + void (*get_strings)(struct net_device *, u32 stringset, u8 *); + int (*phys_id)(struct net_device *, u32); + int (*get_stats_count)(struct net_device *); + void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, + u64 *); +} *ethtool_ops = NULL; + +#undef SET_ETHTOOL_OPS +#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops)) + +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) +#ifndef SET_ETHTOOL_OPS +#define SET_ETHTOOL_OPS(netdev,ops) \ + ( (netdev)->ethtool_ops = (ops) ) +#endif //SET_ETHTOOL_OPS +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0) + +//#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) +#ifndef netif_msg_init +#define netif_msg_init _kc_netif_msg_init +/* copied from linux kernel 2.6.20 include/linux/netdevice.h */ +static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) +{ + /* use default */ + if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) + return default_msg_enable_bits; + if (debug_value == 0) /* no output */ + return 0; + /* set low N bits */ + return (1 << debug_value) - 1; +} + +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) +static inline void eth_copy_and_sum (struct sk_buff *dest, + const unsigned char *src, + int len, int base) +{ + memcpy (dest->data, src, len); +} +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) +/* copied from linux kernel 2.6.20 /include/linux/time.h */ +/* Parameters used to convert the timespec values: */ +#define MSEC_PER_SEC 1000L + +/* copied from linux kernel 2.6.20 /include/linux/jiffies.h */ +/* + * Change timeval to jiffies, trying to avoid the + * most obvious overflows.. + * + * And some not so obvious. + * + * Note that we don't want to return MAX_LONG, because + * for various timeout reasons we often end up having + * to wait "jiffies+1" in order to guarantee that we wait + * at _least_ "jiffies" - so "jiffies+1" had better still + * be positive. + */ +#define MAX_JIFFY_OFFSET ((~0UL >> 1)-1) + +/* + * Convert jiffies to milliseconds and back. + * + * Avoid unnecessary multiplications/divisions in the + * two most common HZ cases: + */ +static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) +{ +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); +#else + return (j * MSEC_PER_SEC) / HZ; +#endif +} + +static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) +{ + if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) + return MAX_JIFFY_OFFSET; +#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); +#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) + return m * (HZ / MSEC_PER_SEC); +#else + return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; +#endif +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) + + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) + +/* copied from linux kernel 2.6.12.6 /include/linux/pm.h */ +typedef int __bitwise pci_power_t; + +/* copied from linux kernel 2.6.12.6 /include/linux/pci.h */ +typedef u32 __bitwise pm_message_t; + +#define PCI_D0 ((pci_power_t __force) 0) +#define PCI_D1 ((pci_power_t __force) 1) +#define PCI_D2 ((pci_power_t __force) 2) +#define PCI_D3hot ((pci_power_t __force) 3) +#define PCI_D3cold ((pci_power_t __force) 4) +#define PCI_POWER_ERROR ((pci_power_t __force) -1) + +/* copied from linux kernel 2.6.12.6 /drivers/pci/pci.c */ +/** + * pci_choose_state - Choose the power state of a PCI device + * @dev: PCI device to be suspended + * @state: target sleep state for the whole system. This is the value + * that is passed to suspend() function. + * + * Returns PCI power state suitable for given device and given system + * message. + */ + +pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) +{ + if (!pci_find_capability(dev, PCI_CAP_ID_PM)) + return PCI_D0; + + switch (state) { + case 0: + return PCI_D0; + case 3: + return PCI_D3hot; + default: + printk("They asked me for state %d\n", state); +// BUG(); + } + return PCI_D0; +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) +/** + * msleep_interruptible - sleep waiting for waitqueue interruptions + * @msecs: Time in milliseconds to sleep for + */ +#define msleep_interruptible _kc_msleep_interruptible +unsigned long _kc_msleep_interruptible(unsigned int msecs) +{ + unsigned long timeout = _kc_msecs_to_jiffies(msecs); + + while (timeout && !signal_pending(current)) { + set_current_state(TASK_INTERRUPTIBLE); + timeout = schedule_timeout(timeout); + } + return _kc_jiffies_to_msecs(timeout); +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) +/* copied from linux kernel 2.6.20 include/linux/sched.h */ +#ifndef __sched +#define __sched __attribute__((__section__(".sched.text"))) +#endif + +/* copied from linux kernel 2.6.20 kernel/timer.c */ +signed long __sched schedule_timeout_uninterruptible(signed long timeout) +{ + __set_current_state(TASK_UNINTERRUPTIBLE); + return schedule_timeout(timeout); +} + +/* copied from linux kernel 2.6.20 include/linux/mii.h */ +#undef if_mii +#define if_mii _kc_if_mii +static inline struct mii_ioctl_data *if_mii(struct ifreq *rq) +{ + return (struct mii_ioctl_data *) &rq->ifr_ifru; +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) + +struct rtl8168_counters { + u64 tx_packets; + u64 rx_packets; + u64 tx_errors; + u32 rx_errors; + u16 rx_missed; + u16 align_errors; + u32 tx_one_collision; + u32 tx_multi_collision; + u64 rx_unicast; + u64 rx_broadcast; + u32 rx_multicast; + u16 tx_aborted; + u16 tx_underrun; +}; + +#ifdef ENABLE_R8168_PROCFS +/**************************************************************************** +* -----------------------------PROCFS STUFF------------------------- +***************************************************************************** +*/ + +static struct proc_dir_entry *rtl8168_proc; +static int proc_init_num = 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) +static int proc_get_driver_variable(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + seq_puts(m, "\nDump Driver Variable\n"); + + spin_lock_irqsave(&tp->lock, flags); + seq_puts(m, "Variable\tValue\n----------\t-----\n"); + seq_printf(m, "MODULENAME\t%s\n", MODULENAME); + seq_printf(m, "driver version\t%s\n", RTL8168_VERSION); + seq_printf(m, "chipset\t%d\n", tp->chipset); + seq_printf(m, "chipset_name\t%s\n", rtl_chip_info[tp->chipset].name); + seq_printf(m, "mtu\t%d\n", dev->mtu); + seq_printf(m, "NUM_RX_DESC\t0x%x\n", NUM_RX_DESC); + seq_printf(m, "cur_rx\t0x%x\n", tp->cur_rx); + seq_printf(m, "dirty_rx\t0x%x\n", tp->dirty_rx); + seq_printf(m, "NUM_TX_DESC\t0x%x\n", NUM_TX_DESC); + seq_printf(m, "cur_tx\t0x%x\n", tp->cur_tx); + seq_printf(m, "dirty_tx\t0x%x\n", tp->dirty_tx); + seq_printf(m, "rx_buf_sz\t0x%x\n", tp->rx_buf_sz); + seq_printf(m, "esd_flag\t0x%x\n", tp->esd_flag); + seq_printf(m, "pci_cfg_is_read\t0x%x\n", tp->pci_cfg_is_read); + seq_printf(m, "rtl8168_rx_config\t0x%x\n", tp->rtl8168_rx_config); + seq_printf(m, "cp_cmd\t0x%x\n", tp->cp_cmd); + seq_printf(m, "intr_mask\t0x%x\n", tp->intr_mask); + seq_printf(m, "timer_intr_mask\t0x%x\n", tp->timer_intr_mask); + seq_printf(m, "wol_enabled\t0x%x\n", tp->wol_enabled); + seq_printf(m, "wol_opts\t0x%x\n", tp->wol_opts); + seq_printf(m, "efuse_ver\t0x%x\n", tp->efuse_ver); + seq_printf(m, "eeprom_type\t0x%x\n", tp->eeprom_type); + seq_printf(m, "autoneg\t0x%x\n", tp->autoneg); + seq_printf(m, "duplex\t0x%x\n", tp->duplex); + seq_printf(m, "speed\t%d\n", tp->speed); + seq_printf(m, "advertising\t0x%x\n", tp->advertising); + seq_printf(m, "eeprom_len\t0x%x\n", tp->eeprom_len); + seq_printf(m, "cur_page\t0x%x\n", tp->cur_page); + seq_printf(m, "bios_setting\t0x%x\n", tp->bios_setting); + seq_printf(m, "features\t0x%x\n", tp->features); + seq_printf(m, "org_pci_offset_99\t0x%x\n", tp->org_pci_offset_99); + seq_printf(m, "org_pci_offset_180\t0x%x\n", tp->org_pci_offset_180); + seq_printf(m, "issue_offset_99_event\t0x%x\n", tp->issue_offset_99_event); + seq_printf(m, "org_pci_offset_80\t0x%x\n", tp->org_pci_offset_80); + seq_printf(m, "org_pci_offset_81\t0x%x\n", tp->org_pci_offset_81); + seq_printf(m, "use_timer_interrrupt\t0x%x\n", tp->use_timer_interrrupt); + seq_printf(m, "HwIcVerUnknown\t0x%x\n", tp->HwIcVerUnknown); + seq_printf(m, "NotWrRamCodeToMicroP\t0x%x\n", tp->NotWrRamCodeToMicroP); + seq_printf(m, "NotWrMcuPatchCode\t0x%x\n", tp->NotWrMcuPatchCode); + seq_printf(m, "HwHasWrRamCodeToMicroP\t0x%x\n", tp->HwHasWrRamCodeToMicroP); + seq_printf(m, "sw_ram_code_ver\t0x%x\n", tp->sw_ram_code_ver); + seq_printf(m, "hw_ram_code_ver\t0x%x\n", tp->hw_ram_code_ver); + seq_printf(m, "rtk_enable_diag\t0x%x\n", tp->rtk_enable_diag); + seq_printf(m, "ShortPacketSwChecksum\t0x%x\n", tp->ShortPacketSwChecksum); + seq_printf(m, "UseSwPaddingShortPkt\t0x%x\n", tp->UseSwPaddingShortPkt); + seq_printf(m, "RequireAdcBiasPatch\t0x%x\n", tp->RequireAdcBiasPatch); + seq_printf(m, "AdcBiasPatchIoffset\t0x%x\n", tp->AdcBiasPatchIoffset); + seq_printf(m, "RequireAdjustUpsTxLinkPulseTiming\t0x%x\n", tp->RequireAdjustUpsTxLinkPulseTiming); + seq_printf(m, "SwrCnt1msIni\t0x%x\n", tp->SwrCnt1msIni); + seq_printf(m, "HwSuppNowIsOobVer\t0x%x\n", tp->HwSuppNowIsOobVer); + seq_printf(m, "HwFiberModeVer\t0x%x\n", tp->HwFiberModeVer); + seq_printf(m, "HwFiberStat\t0x%x\n", tp->HwFiberStat); + seq_printf(m, "HwSwitchMdiToFiber\t0x%x\n", tp->HwSwitchMdiToFiber); + seq_printf(m, "HwSuppSerDesPhyVer\t0x%x\n", tp->HwSuppSerDesPhyVer); + seq_printf(m, "NicCustLedValue\t0x%x\n", tp->NicCustLedValue); + seq_printf(m, "RequiredSecLanDonglePatch\t0x%x\n", tp->RequiredSecLanDonglePatch); + seq_printf(m, "HwSuppDashVer\t0x%x\n", tp->HwSuppDashVer); + seq_printf(m, "DASH\t0x%x\n", tp->DASH); + seq_printf(m, "dash_printer_enabled\t0x%x\n", tp->dash_printer_enabled); + seq_printf(m, "HwSuppKCPOffloadVer\t0x%x\n", tp->HwSuppKCPOffloadVer); + seq_printf(m, "speed_mode\t0x%x\n", speed_mode); + seq_printf(m, "duplex_mode\t0x%x\n", duplex_mode); + seq_printf(m, "autoneg_mode\t0x%x\n", autoneg_mode); + seq_printf(m, "advertising_mode\t0x%x\n", advertising_mode); + seq_printf(m, "aspm\t0x%x\n", aspm); + seq_printf(m, "s5wol\t0x%x\n", s5wol); + seq_printf(m, "s5_keep_curr_mac\t0x%x\n", s5_keep_curr_mac); + seq_printf(m, "eee_enable\t0x%x\n", tp->eee_enabled); + seq_printf(m, "hwoptimize\t0x%lx\n", hwoptimize); + seq_printf(m, "proc_init_num\t0x%x\n", proc_init_num); + seq_printf(m, "s0_magic_packet\t0x%x\n", s0_magic_packet); + seq_printf(m, "HwSuppMagicPktVer\t0x%x\n", tp->HwSuppMagicPktVer); + seq_printf(m, "HwSuppUpsVer\t0x%x\n", tp->HwSuppUpsVer); + seq_printf(m, "HwSuppEsdVer\t0x%x\n", tp->HwSuppEsdVer); + seq_printf(m, "HwSuppCheckPhyDisableModeVer\t0x%x\n", tp->HwSuppCheckPhyDisableModeVer); + seq_printf(m, "HwPkgDet\t0x%x\n", tp->HwPkgDet); + seq_printf(m, "random_mac\t0x%x\n", tp->random_mac); + seq_printf(m, "org_mac_addr\t%pM\n", tp->org_mac_addr); +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) + seq_printf(m, "perm_addr\t%pM\n", dev->perm_addr); +#endif + seq_printf(m, "dev_addr\t%pM\n", dev->dev_addr); + spin_unlock_irqrestore(&tp->lock, flags); + + seq_putc(m, '\n'); + return 0; +} + +static int proc_get_tally_counter(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + struct rtl8168_private *tp = netdev_priv(dev); + struct rtl8168_counters *counters; + dma_addr_t paddr; + u32 cmd; + u32 WaitCnt; + unsigned long flags; + + seq_puts(m, "\nDump Tally Counter\n"); + + //ASSERT_RTNL(); + + counters = tp->tally_vaddr; + paddr = tp->tally_paddr; + if (!counters) { + seq_puts(m, "\nDump Tally Counter Fail\n"); + return 0; + } + + spin_lock_irqsave(&tp->lock, flags); + RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | CounterDump); + + WaitCnt = 0; + while (RTL_R32(tp, CounterAddrLow) & CounterDump) { + udelay(10); + + WaitCnt++; + if (WaitCnt > 20) + break; + } + spin_unlock_irqrestore(&tp->lock, flags); + + seq_puts(m, "Statistics\tValue\n----------\t-----\n"); + seq_printf(m, "tx_packets\t%lld\n", le64_to_cpu(counters->tx_packets)); + seq_printf(m, "rx_packets\t%lld\n", le64_to_cpu(counters->rx_packets)); + seq_printf(m, "tx_errors\t%lld\n", le64_to_cpu(counters->tx_errors)); + seq_printf(m, "rx_errors\t%d\n", le32_to_cpu(counters->rx_errors)); + seq_printf(m, "rx_missed\t%d\n", le16_to_cpu(counters->rx_missed)); + seq_printf(m, "align_errors\t%d\n", le16_to_cpu(counters->align_errors)); + seq_printf(m, "tx_one_collision\t%d\n", le32_to_cpu(counters->tx_one_collision)); + seq_printf(m, "tx_multi_collision\t%d\n", le32_to_cpu(counters->tx_multi_collision)); + seq_printf(m, "rx_unicast\t%lld\n", le64_to_cpu(counters->rx_unicast)); + seq_printf(m, "rx_broadcast\t%lld\n", le64_to_cpu(counters->rx_broadcast)); + seq_printf(m, "rx_multicast\t%d\n", le32_to_cpu(counters->rx_multicast)); + seq_printf(m, "tx_aborted\t%d\n", le16_to_cpu(counters->tx_aborted)); + seq_printf(m, "tx_underrun\t%d\n", le16_to_cpu(counters->tx_underrun)); + + seq_putc(m, '\n'); + return 0; +} + +static int proc_get_registers(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + int i, n, max = R8168_MAC_REGS_SIZE; + u8 byte_rd; + struct rtl8168_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + + seq_puts(m, "\nDump MAC Registers\n"); + seq_puts(m, "Offset\tValue\n------\t-----\n"); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + seq_printf(m, "\n0x%02x:\t", n); + + for (i = 0; i < 16 && n < max; i++, n++) { + byte_rd = readb(ioaddr + n); + seq_printf(m, "%02x ", byte_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + seq_putc(m, '\n'); + return 0; +} + +static int proc_get_pcie_phy(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + int i, n, max = R8168_EPHY_REGS_SIZE/2; + u16 word_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + seq_puts(m, "\nDump PCIE PHY\n"); + seq_puts(m, "\nOffset\tValue\n------\t-----\n "); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + seq_printf(m, "\n0x%02x:\t", n); + + for (i = 0; i < 8 && n < max; i++, n++) { + word_rd = rtl8168_ephy_read(tp, n); + seq_printf(m, "%04x ", word_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + seq_putc(m, '\n'); + return 0; +} + +static int proc_get_eth_phy(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + int i, n, max = R8168_PHY_REGS_SIZE/2; + u16 word_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + seq_puts(m, "\nDump Ethernet PHY\n"); + seq_puts(m, "\nOffset\tValue\n------\t-----\n "); + + spin_lock_irqsave(&tp->lock, flags); + seq_puts(m, "\n####################page 0##################\n "); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + for (n = 0; n < max;) { + seq_printf(m, "\n0x%02x:\t", n); + + for (i = 0; i < 8 && n < max; i++, n++) { + word_rd = rtl8168_mdio_read(tp, n); + seq_printf(m, "%04x ", word_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + seq_putc(m, '\n'); + return 0; +} + +static int proc_get_extended_registers(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + int i, n, max = R8168_ERI_REGS_SIZE; + u32 dword_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + /* RTL8168B does not support Extend GMAC */ + seq_puts(m, "\nNot Support Dump Extended Registers\n"); + return 0; + } + + seq_puts(m, "\nDump Extended Registers\n"); + seq_puts(m, "\nOffset\tValue\n------\t-----\n "); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + seq_printf(m, "\n0x%02x:\t", n); + + for (i = 0; i < 4 && n < max; i++, n+=4) { + dword_rd = rtl8168_eri_read(tp, n, 4, ERIAR_ExGMAC); + seq_printf(m, "%08x ", dword_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + seq_putc(m, '\n'); + return 0; +} + +static int proc_get_pci_registers(struct seq_file *m, void *v) +{ + struct net_device *dev = m->private; + int i, n, max = R8168_PCI_REGS_SIZE; + u32 dword_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + seq_puts(m, "\nDump PCI Registers\n"); + seq_puts(m, "\nOffset\tValue\n------\t-----\n "); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + seq_printf(m, "\n0x%03x:\t", n); + + for (i = 0; i < 4 && n < max; i++, n+=4) { + pci_read_config_dword(tp->pci_dev, n, &dword_rd); + seq_printf(m, "%08x ", dword_rd); + } + } + + n = 0x110; + pci_read_config_dword(tp->pci_dev, n, &dword_rd); + seq_printf(m, "\n0x%03x:\t%08x ", n, dword_rd); + n = 0x70c; + pci_read_config_dword(tp->pci_dev, n, &dword_rd); + seq_printf(m, "\n0x%03x:\t%08x ", n, dword_rd); + + spin_unlock_irqrestore(&tp->lock, flags); + + seq_putc(m, '\n'); + return 0; +} +#else + +static int proc_get_driver_variable(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + int len = 0; + + len += snprintf(page + len, count - len, + "\nDump Driver Driver\n"); + + spin_lock_irqsave(&tp->lock, flags); + len += snprintf(page + len, count - len, + "Variable\tValue\n----------\t-----\n"); + + len += snprintf(page + len, count - len, + "MODULENAME\t%s\n" + "driver version\t%s\n" + "chipset\t%d\n" + "chipset_name\t%s\n" + "mtu\t%d\n" + "NUM_RX_DESC\t0x%x\n" + "cur_rx\t0x%x\n" + "dirty_rx\t0x%x\n" + "NUM_TX_DESC\t0x%x\n" + "cur_tx\t0x%x\n" + "dirty_tx\t0x%x\n" + "rx_buf_sz\t0x%x\n" + "esd_flag\t0x%x\n" + "pci_cfg_is_read\t0x%x\n" + "rtl8168_rx_config\t0x%x\n" + "cp_cmd\t0x%x\n" + "intr_mask\t0x%x\n" + "timer_intr_mask\t0x%x\n" + "wol_enabled\t0x%x\n" + "wol_opts\t0x%x\n" + "efuse_ver\t0x%x\n" + "eeprom_type\t0x%x\n" + "autoneg\t0x%x\n" + "duplex\t0x%x\n" + "speed\t%d\n" + "advertising\t0x%x\n" + "eeprom_len\t0x%x\n" + "cur_page\t0x%x\n" + "bios_setting\t0x%x\n" + "features\t0x%x\n" + "org_pci_offset_99\t0x%x\n" + "org_pci_offset_180\t0x%x\n" + "issue_offset_99_event\t0x%x\n" + "org_pci_offset_80\t0x%x\n" + "org_pci_offset_81\t0x%x\n" + "use_timer_interrrupt\t0x%x\n" + "HwIcVerUnknown\t0x%x\n" + "NotWrRamCodeToMicroP\t0x%x\n" + "NotWrMcuPatchCode\t0x%x\n" + "HwHasWrRamCodeToMicroP\t0x%x\n" + "sw_ram_code_ver\t0x%x\n" + "hw_ram_code_ver\t0x%x\n" + "rtk_enable_diag\t0x%x\n" + "ShortPacketSwChecksum\t0x%x\n" + "UseSwPaddingShortPkt\t0x%x\n" + "RequireAdcBiasPatch\t0x%x\n" + "AdcBiasPatchIoffset\t0x%x\n" + "RequireAdjustUpsTxLinkPulseTiming\t0x%x\n" + "SwrCnt1msIni\t0x%x\n" + "HwSuppNowIsOobVer\t0x%x\n" + "HwFiberModeVer\t0x%x\n" + "HwFiberStat\t0x%x\n" + "HwSwitchMdiToFiber\t0x%x\n" + "HwSuppSerDesPhyVer\t0x%x\n" + "NicCustLedValue\t0x%x\n" + "RequiredSecLanDonglePatch\t0x%x\n" + "HwSuppDashVer\t0x%x\n" + "DASH\t0x%x\n" + "dash_printer_enabled\t0x%x\n" + "HwSuppKCPOffloadVer\t0x%x\n" + "speed_mode\t0x%x\n" + "duplex_mode\t0x%x\n" + "autoneg_mode\t0x%x\n" + "advertising_mode\t0x%x\n" + "aspm\t0x%x\n" + "s5wol\t0x%x\n" + "s5_keep_curr_mac\t0x%x\n" + "eee_enable\t0x%x\n" + "hwoptimize\t0x%lx\n" + "proc_init_num\t0x%x\n" + "s0_magic_packet\t0x%x\n" + "HwSuppMagicPktVer\t0x%x\n" + "HwSuppUpsVer\t0x%x\n" + "HwSuppEsdVer\t0x%x\n" + "HwSuppCheckPhyDisableModeVer\t0x%x\n" + "HwPkgDet\t0x%x\n" + "random_mac\t0x%x\n" + "org_mac_addr\t%pM\n" +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) + "perm_addr\t%pM\n" +#endif + "dev_addr\t%pM\n", + MODULENAME, + RTL8168_VERSION, + tp->chipset, + rtl_chip_info[tp->chipset].name, + dev->mtu, + NUM_RX_DESC, + tp->cur_rx, + tp->dirty_rx, + NUM_TX_DESC, + tp->cur_tx, + tp->dirty_tx, + tp->rx_buf_sz, + tp->esd_flag, + tp->pci_cfg_is_read, + tp->rtl8168_rx_config, + tp->cp_cmd, + tp->intr_mask, + tp->timer_intr_mask, + tp->wol_enabled, + tp->wol_opts, + tp->efuse_ver, + tp->eeprom_type, + tp->autoneg, + tp->duplex, + tp->speed, + tp->advertising, + tp->eeprom_len, + tp->cur_page, + tp->bios_setting, + tp->features, + tp->org_pci_offset_99, + tp->org_pci_offset_180, + tp->issue_offset_99_event, + tp->org_pci_offset_80, + tp->org_pci_offset_81, + tp->use_timer_interrrupt, + tp->HwIcVerUnknown, + tp->NotWrRamCodeToMicroP, + tp->NotWrMcuPatchCode, + tp->HwHasWrRamCodeToMicroP, + tp->sw_ram_code_ver, + tp->hw_ram_code_ver, + tp->rtk_enable_diag, + tp->ShortPacketSwChecksum, + tp->UseSwPaddingShortPkt, + tp->RequireAdcBiasPatch, + tp->AdcBiasPatchIoffset, + tp->RequireAdjustUpsTxLinkPulseTiming, + tp->SwrCnt1msIni, + tp->HwSuppNowIsOobVer, + tp->HwFiberModeVer, + tp->HwFiberStat, + tp->HwSwitchMdiToFiber, + tp->HwSuppSerDesPhyVer, + tp->NicCustLedValue, + tp->RequiredSecLanDonglePatch, + tp->HwSuppDashVer, + tp->DASH, + tp->dash_printer_enabled, + tp->HwSuppKCPOffloadVer, + speed_mode, + duplex_mode, + autoneg_mode, + advertising_mode, + aspm, + s5wol, + s5_keep_curr_mac, + tp->eee_enabled, + hwoptimize, + proc_init_num, + s0_magic_packet, + tp->HwSuppMagicPktVer, + tp->HwSuppUpsVer, + tp->HwSuppEsdVer, + tp->HwSuppCheckPhyDisableModeVer, + tp->HwPkgDet, + tp->random_mac, + tp->org_mac_addr, +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) + dev->perm_addr, +#endif + dev->dev_addr + ); + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, "\n"); + + *eof = 1; + return len; +} + +static int proc_get_tally_counter(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + struct rtl8168_private *tp = netdev_priv(dev); + struct rtl8168_counters *counters; + dma_addr_t paddr; + u32 cmd; + u32 WaitCnt; + unsigned long flags; + int len = 0; + + len += snprintf(page + len, count - len, + "\nDump Tally Counter\n"); + + //ASSERT_RTNL(); + + counters = tp->tally_vaddr; + paddr = tp->tally_paddr; + if (!counters) { + len += snprintf(page + len, count - len, + "\nDump Tally Counter Fail\n"); + goto out; + } + + spin_lock_irqsave(&tp->lock, flags); + RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | CounterDump); + + WaitCnt = 0; + while (RTL_R32(tp, CounterAddrLow) & CounterDump) { + udelay(10); + + WaitCnt++; + if (WaitCnt > 20) + break; + } + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, + "Statistics\tValue\n----------\t-----\n"); + + len += snprintf(page + len, count - len, + "tx_packets\t%lld\n" + "rx_packets\t%lld\n" + "tx_errors\t%lld\n" + "rx_errors\t%d\n" + "rx_missed\t%d\n" + "align_errors\t%d\n" + "tx_one_collision\t%d\n" + "tx_multi_collision\t%d\n" + "rx_unicast\t%lld\n" + "rx_broadcast\t%lld\n" + "rx_multicast\t%d\n" + "tx_aborted\t%d\n" + "tx_underrun\t%d\n", + le64_to_cpu(counters->tx_packets), + le64_to_cpu(counters->rx_packets), + le64_to_cpu(counters->tx_errors), + le32_to_cpu(counters->rx_errors), + le16_to_cpu(counters->rx_missed), + le16_to_cpu(counters->align_errors), + le32_to_cpu(counters->tx_one_collision), + le32_to_cpu(counters->tx_multi_collision), + le64_to_cpu(counters->rx_unicast), + le64_to_cpu(counters->rx_broadcast), + le32_to_cpu(counters->rx_multicast), + le16_to_cpu(counters->tx_aborted), + le16_to_cpu(counters->tx_underrun) + ); + + len += snprintf(page + len, count - len, "\n"); +out: + *eof = 1; + return len; +} + +static int proc_get_registers(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + int i, n, max = R8168_MAC_REGS_SIZE; + u8 byte_rd; + struct rtl8168_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned long flags; + int len = 0; + + len += snprintf(page + len, count - len, + "\nDump MAC Registers\n" + "Offset\tValue\n------\t-----\n"); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + len += snprintf(page + len, count - len, + "\n0x%02x:\t", + n); + + for (i = 0; i < 16 && n < max; i++, n++) { + byte_rd = readb(ioaddr + n); + len += snprintf(page + len, count - len, + "%02x ", + byte_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, "\n"); + + *eof = 1; + return len; +} + +static int proc_get_pcie_phy(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + int i, n, max = R8168_EPHY_REGS_SIZE/2; + u16 word_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + int len = 0; + + len += snprintf(page + len, count - len, + "\nDump PCIE PHY\n" + "Offset\tValue\n------\t-----\n"); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + len += snprintf(page + len, count - len, + "\n0x%02x:\t", + n); + + for (i = 0; i < 8 && n < max; i++, n++) { + word_rd = rtl8168_ephy_read(tp, n); + len += snprintf(page + len, count - len, + "%04x ", + word_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, "\n"); + + *eof = 1; + return len; +} + +static int proc_get_eth_phy(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + int i, n, max = R8168_PHY_REGS_SIZE/2; + u16 word_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + int len = 0; + + len += snprintf(page + len, count - len, + "\nDump Ethernet PHY\n" + "Offset\tValue\n------\t-----\n"); + + spin_lock_irqsave(&tp->lock, flags); + len += snprintf(page + len, count - len, + "\n####################page 0##################\n"); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + for (n = 0; n < max;) { + len += snprintf(page + len, count - len, + "\n0x%02x:\t", + n); + + for (i = 0; i < 8 && n < max; i++, n++) { + word_rd = rtl8168_mdio_read(tp, n); + len += snprintf(page + len, count - len, + "%04x ", + word_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, "\n"); + + *eof = 1; + return len; +} + +static int proc_get_extended_registers(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + int i, n, max = R8168_ERI_REGS_SIZE; + u32 dword_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + int len = 0; + + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + /* RTL8168B does not support Extend GMAC */ + len += snprintf(page + len, count - len, + "\nNot Support Dump Extended Registers\n"); + + goto out; + } + + len += snprintf(page + len, count - len, + "\nDump Extended Registers\n" + "Offset\tValue\n------\t-----\n"); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + len += snprintf(page + len, count - len, + "\n0x%02x:\t", + n); + + for (i = 0; i < 4 && n < max; i++, n+=4) { + dword_rd = rtl8168_eri_read(tp, n, 4, ERIAR_ExGMAC); + len += snprintf(page + len, count - len, + "%08x ", + dword_rd); + } + } + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, "\n"); +out: + *eof = 1; + return len; +} + +static int proc_get_pci_registers(char *page, char **start, + off_t offset, int count, + int *eof, void *data) +{ + struct net_device *dev = data; + int i, n, max = R8168_PCI_REGS_SIZE; + u32 dword_rd; + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + int len = 0; + + len += snprintf(page + len, count - len, + "\nDump PCI Registers\n" + "Offset\tValue\n------\t-----\n"); + + spin_lock_irqsave(&tp->lock, flags); + for (n = 0; n < max;) { + len += snprintf(page + len, count - len, + "\n0x%03x:\t", + n); + + for (i = 0; i < 4 && n < max; i++, n+=4) { + pci_read_config_dword(tp->pci_dev, n, &dword_rd); + len += snprintf(page + len, count - len, + "%08x ", + dword_rd); + } + } + + n = 0x110; + pci_read_config_dword(tp->pci_dev, n, &dword_rd); + len += snprintf(page + len, count - len, + "\n0x%03x:\t%08x ", + n, + dword_rd); + n = 0x70c; + pci_read_config_dword(tp->pci_dev, n, &dword_rd); + len += snprintf(page + len, count - len, + "\n0x%03x:\t%08x ", + n, + dword_rd); + spin_unlock_irqrestore(&tp->lock, flags); + + len += snprintf(page + len, count - len, "\n"); + + *eof = 1; + return len; +} +#endif +static void rtl8168_proc_module_init(void) +{ + //create /proc/net/r8168 +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) + rtl8168_proc = proc_mkdir(MODULENAME, init_net.proc_net); +#else + rtl8168_proc = proc_mkdir(MODULENAME, proc_net); +#endif + if (!rtl8168_proc) + dprintk("cannot create %s proc entry \n", MODULENAME); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) +/* + * seq_file wrappers for procfile show routines. + */ +static int rtl8168_proc_open(struct inode *inode, struct file *file) +{ + struct net_device *dev = proc_get_parent_data(inode); + int (*show)(struct seq_file *, void *) = PDE_DATA(inode); + + return single_open(file, show, dev); +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) +static const struct proc_ops rtl8168_proc_fops = { + .proc_open = rtl8168_proc_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; +#else +static const struct file_operations rtl8168_proc_fops = { + .open = rtl8168_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + +#endif + +/* + * Table of proc files we need to create. + */ +struct rtl8168_proc_file { + char name[12]; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) + int (*show)(struct seq_file *, void *); +#else + int (*show)(char *, char **, off_t, int, int *, void *); +#endif +}; + +static const struct rtl8168_proc_file rtl8168_proc_files[] = { + { "driver_var", &proc_get_driver_variable }, + { "tally", &proc_get_tally_counter }, + { "registers", &proc_get_registers }, + { "pcie_phy", &proc_get_pcie_phy }, + { "eth_phy", &proc_get_eth_phy }, + { "ext_regs", &proc_get_extended_registers }, + { "pci_regs", &proc_get_pci_registers }, + { "" } +}; + +static void rtl8168_proc_init(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + const struct rtl8168_proc_file *f; + struct proc_dir_entry *dir; + + if (rtl8168_proc && !tp->proc_dir) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) + dir = proc_mkdir_data(dev->name, 0, rtl8168_proc, dev); + if (!dir) { + printk("Unable to initialize /proc/net/%s/%s\n", + MODULENAME, dev->name); + return; + } + + tp->proc_dir = dir; + proc_init_num++; + + for (f = rtl8168_proc_files; f->name[0]; f++) { + if (!proc_create_data(f->name, S_IFREG | S_IRUGO, dir, + &rtl8168_proc_fops, f->show)) { + printk("Unable to initialize " + "/proc/net/%s/%s/%s\n", + MODULENAME, dev->name, f->name); + return; + } + } +#else + dir = proc_mkdir(dev->name, rtl8168_proc); + if (!dir) { + printk("Unable to initialize /proc/net/%s/%s\n", + MODULENAME, dev->name); + return; + } + + tp->proc_dir = dir; + proc_init_num++; + + for (f = rtl8168_proc_files; f->name[0]; f++) { + if (!create_proc_read_entry(f->name, S_IFREG | S_IRUGO, + dir, f->show, dev)) { + printk("Unable to initialize " + "/proc/net/%s/%s/%s\n", + MODULENAME, dev->name, f->name); + return; + } + } +#endif + } +} + +static void rtl8168_proc_remove(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (tp->proc_dir) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) + remove_proc_subtree(dev->name, rtl8168_proc); + proc_init_num--; + +#else + const struct rtl8168_proc_file *f; + struct rtl8168_private *tp = netdev_priv(dev); + + for (f = rtl8168_proc_files; f->name[0]; f++) + remove_proc_entry(f->name, tp->proc_dir); + + remove_proc_entry(dev->name, rtl8168_proc); + proc_init_num--; +#endif + tp->proc_dir = NULL; + } +} + +#endif //ENABLE_R8168_PROCFS + +static inline u16 map_phy_ocp_addr(u16 PageNum, u8 RegNum) +{ + u16 OcpPageNum = 0; + u8 OcpRegNum = 0; + u16 OcpPhyAddress = 0; + + if ( PageNum == 0 ) { + OcpPageNum = OCP_STD_PHY_BASE_PAGE + ( RegNum / 8 ); + OcpRegNum = 0x10 + ( RegNum % 8 ); + } else { + OcpPageNum = PageNum; + OcpRegNum = RegNum; + } + + OcpPageNum <<= 4; + + if ( OcpRegNum < 16 ) { + OcpPhyAddress = 0; + } else { + OcpRegNum -= 16; + OcpRegNum <<= 1; + + OcpPhyAddress = OcpPageNum + OcpRegNum; + } + + + return OcpPhyAddress; +} + +static void mdio_real_direct_write_phy_ocp(struct rtl8168_private *tp, + u32 RegAddr, + u32 value) +{ + u32 data32; + int i; + + if (tp->HwSuppPhyOcpVer == 0) goto out; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) + WARN_ON_ONCE(RegAddr % 2); +#endif + data32 = RegAddr/2; + data32 <<= OCPR_Addr_Reg_shift; + data32 |= OCPR_Write | value; + + RTL_W32(tp, PHYOCP, data32); + for (i = 0; i < 100; i++) { + udelay(1); + + if (!(RTL_R32(tp, PHYOCP) & OCPR_Flag)) + break; + } +out: + return; +} + +static void mdio_direct_write_phy_ocp(struct rtl8168_private *tp, + u16 RegAddr, + u16 value) +{ + if (tp->rtk_enable_diag) return; + + mdio_real_direct_write_phy_ocp(tp, RegAddr, value); +} + +static void rtl8168_mdio_write_phy_ocp(struct rtl8168_private *tp, + u16 PageNum, + u32 RegAddr, + u32 value) +{ + u16 ocp_addr; + + if (tp->rtk_enable_diag) return; + + ocp_addr = map_phy_ocp_addr(PageNum, RegAddr); + + mdio_direct_write_phy_ocp(tp, ocp_addr, value); +} + +static void rtl8168_mdio_real_write_phy_ocp(struct rtl8168_private *tp, + u16 PageNum, + u32 RegAddr, + u32 value) +{ + u16 ocp_addr; + + ocp_addr = map_phy_ocp_addr(PageNum, RegAddr); + + mdio_real_direct_write_phy_ocp(tp, ocp_addr, value); +} + +static void mdio_real_write(struct rtl8168_private *tp, + u32 RegAddr, + u32 value) +{ + int i; + + if (RegAddr == 0x1F) { + tp->cur_page = value; + } + + if (tp->mcfg == CFG_METHOD_11) { + RTL_W32(tp, OCPDR, OCPDR_Write | + (RegAddr & OCPDR_Reg_Mask) << OCPDR_GPHY_Reg_shift | + (value & OCPDR_Data_Mask)); + RTL_W32(tp, OCPAR, OCPAR_GPHY_Write); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + for (i = 0; i < 100; i++) { + mdelay(1); + if (!(RTL_R32(tp, OCPAR) & OCPAR_Flag)) + break; + } + } else { + if (tp->HwSuppPhyOcpVer > 0) { + if (RegAddr == 0x1F) { + return; + } + rtl8168_mdio_real_write_phy_ocp(tp, tp->cur_page, RegAddr, value); + } else { + if (tp->mcfg == CFG_METHOD_12 || tp->mcfg == CFG_METHOD_13) + RTL_W32(tp, 0xD0, RTL_R32(tp, 0xD0) & ~0x00020000); + + RTL_W32(tp, PHYAR, PHYAR_Write | + (RegAddr & PHYAR_Reg_Mask) << PHYAR_Reg_shift | + (value & PHYAR_Data_Mask)); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed writing to the specified MII register */ + if (!(RTL_R32(tp, PHYAR) & PHYAR_Flag)) { + udelay(20); + break; + } + } + + if (tp->mcfg == CFG_METHOD_12 || tp->mcfg == CFG_METHOD_13) + RTL_W32(tp, 0xD0, RTL_R32(tp, 0xD0) | 0x00020000); + } + } +} + +void rtl8168_mdio_write(struct rtl8168_private *tp, + u16 RegAddr, + u16 value) +{ + if (tp->rtk_enable_diag) return; + + mdio_real_write(tp, RegAddr, value); +} + +void rtl8168_mdio_prot_write(struct rtl8168_private *tp, + u32 RegAddr, + u32 value) +{ + mdio_real_write(tp, RegAddr, value); +} + +void rtl8168_mdio_prot_direct_write_phy_ocp(struct rtl8168_private *tp, + u32 RegAddr, + u32 value) +{ + mdio_real_direct_write_phy_ocp(tp, RegAddr, value); +} + +static u32 mdio_real_direct_read_phy_ocp(struct rtl8168_private *tp, + u32 RegAddr) +{ + u32 data32; + int i, value = 0; + + if (tp->HwSuppPhyOcpVer == 0) goto out; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) + WARN_ON_ONCE(RegAddr % 2); +#endif + data32 = RegAddr/2; + data32 <<= OCPR_Addr_Reg_shift; + + RTL_W32(tp, PHYOCP, data32); + for (i = 0; i < 100; i++) { + udelay(1); + + if (RTL_R32(tp, PHYOCP) & OCPR_Flag) + break; + } + value = RTL_R32(tp, PHYOCP) & OCPDR_Data_Mask; + +out: + return value; +} + +static u32 mdio_direct_read_phy_ocp(struct rtl8168_private *tp, + u16 RegAddr) +{ + if (tp->rtk_enable_diag) return 0xffffffff; + + return mdio_real_direct_read_phy_ocp(tp, RegAddr); +} + +static u32 rtl8168_mdio_read_phy_ocp(struct rtl8168_private *tp, + u16 PageNum, + u32 RegAddr) +{ + u16 ocp_addr; + + if (tp->rtk_enable_diag) return 0xffffffff; + + ocp_addr = map_phy_ocp_addr(PageNum, RegAddr); + + return mdio_direct_read_phy_ocp(tp, ocp_addr); +} + +static u32 rtl8168_mdio_real_read_phy_ocp(struct rtl8168_private *tp, + u16 PageNum, + u32 RegAddr) +{ + u16 ocp_addr; + + ocp_addr = map_phy_ocp_addr(PageNum, RegAddr); + + return mdio_real_direct_read_phy_ocp(tp, ocp_addr); +} + +u32 mdio_real_read(struct rtl8168_private *tp, + u32 RegAddr) +{ + int i, value = 0; + + if (tp->mcfg==CFG_METHOD_11) { + RTL_W32(tp, OCPDR, OCPDR_Read | + (RegAddr & OCPDR_Reg_Mask) << OCPDR_GPHY_Reg_shift); + RTL_W32(tp, OCPAR, OCPAR_GPHY_Write); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + for (i = 0; i < 100; i++) { + mdelay(1); + if (!(RTL_R32(tp, OCPAR) & OCPAR_Flag)) + break; + } + + mdelay(1); + RTL_W32(tp, OCPAR, OCPAR_GPHY_Read); + RTL_W32(tp, EPHY_RXER_NUM, 0); + + for (i = 0; i < 100; i++) { + mdelay(1); + if (RTL_R32(tp, OCPAR) & OCPAR_Flag) + break; + } + + value = RTL_R32(tp, OCPDR) & OCPDR_Data_Mask; + } else { + if (tp->HwSuppPhyOcpVer > 0) { + value = rtl8168_mdio_real_read_phy_ocp(tp, tp->cur_page, RegAddr); + } else { + if (tp->mcfg == CFG_METHOD_12 || tp->mcfg == CFG_METHOD_13) + RTL_W32(tp, 0xD0, RTL_R32(tp, 0xD0) & ~0x00020000); + + RTL_W32(tp, PHYAR, + PHYAR_Read | (RegAddr & PHYAR_Reg_Mask) << PHYAR_Reg_shift); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed retrieving data from the specified MII register */ + if (RTL_R32(tp, PHYAR) & PHYAR_Flag) { + value = RTL_R32(tp, PHYAR) & PHYAR_Data_Mask; + udelay(20); + break; + } + } + + if (tp->mcfg == CFG_METHOD_12 || tp->mcfg == CFG_METHOD_13) + RTL_W32(tp, 0xD0, RTL_R32(tp, 0xD0) | 0x00020000); + } + } + + return value; +} + +u32 rtl8168_mdio_read(struct rtl8168_private *tp, + u16 RegAddr) +{ + if (tp->rtk_enable_diag) return 0xffffffff; + + return mdio_real_read(tp, RegAddr); +} + +u32 rtl8168_mdio_prot_read(struct rtl8168_private *tp, + u32 RegAddr) +{ + return mdio_real_read(tp, RegAddr); +} + +u32 rtl8168_mdio_prot_direct_read_phy_ocp(struct rtl8168_private *tp, + u32 RegAddr) +{ + return mdio_real_direct_read_phy_ocp(tp, RegAddr); +} + +static void ClearAndSetEthPhyBit(struct rtl8168_private *tp, u8 addr, u16 clearmask, u16 setmask) +{ + u16 PhyRegValue; + + + PhyRegValue = rtl8168_mdio_read(tp, addr); + PhyRegValue &= ~clearmask; + PhyRegValue |= setmask; + rtl8168_mdio_write(tp, addr, PhyRegValue); +} + +void rtl8168_clear_eth_phy_bit(struct rtl8168_private *tp, u8 addr, u16 mask) +{ + ClearAndSetEthPhyBit(tp, + addr, + mask, + 0 + ); +} + +void rtl8168_set_eth_phy_bit(struct rtl8168_private *tp, u8 addr, u16 mask) +{ + ClearAndSetEthPhyBit(tp, + addr, + 0, + mask + ); +} + +void rtl8168_mac_ocp_write(struct rtl8168_private *tp, u16 reg_addr, u16 value) +{ + u32 data32; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) + WARN_ON_ONCE(reg_addr % 2); +#endif + + data32 = reg_addr/2; + data32 <<= OCPR_Addr_Reg_shift; + data32 += value; + data32 |= OCPR_Write; + + RTL_W32(tp, MACOCP, data32); +} + +u16 rtl8168_mac_ocp_read(struct rtl8168_private *tp, u16 reg_addr) +{ + u32 data32; + u16 data16 = 0; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) + WARN_ON_ONCE(reg_addr % 2); +#endif + + data32 = reg_addr/2; + data32 <<= OCPR_Addr_Reg_shift; + + RTL_W32(tp, MACOCP, data32); + data16 = (u16)RTL_R32(tp, MACOCP); + + return data16; +} + +#ifdef ENABLE_USE_FIRMWARE_FILE +static void mac_mcu_write(struct rtl8168_private *tp, u16 reg, u16 value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + rtl8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static u32 mac_mcu_read(struct rtl8168_private *tp, u16 reg) +{ + return rtl8168_mac_ocp_read(tp, tp->ocp_base + reg); +} +#endif + +static void +rtl8168_clear_and_set_mcu_ocp_bit( + struct rtl8168_private *tp, + u16 addr, + u16 clearmask, + u16 setmask +) +{ + u16 RegValue; + + RegValue = rtl8168_mac_ocp_read(tp, addr); + RegValue &= ~clearmask; + RegValue |= setmask; + rtl8168_mac_ocp_write(tp, addr, RegValue); +} + +/* +static void +rtl8168_clear_mcu_ocp_bit( + struct rtl8168_private *tp, + u16 addr, + u16 mask +) +{ + rtl8168_clear_and_set_mcu_ocp_bit(tp, + addr, + mask, + 0 + ); +} +*/ + +static void +rtl8168_set_mcu_ocp_bit( + struct rtl8168_private *tp, + u16 addr, + u16 mask +) +{ + rtl8168_clear_and_set_mcu_ocp_bit(tp, + addr, + 0, + mask + ); +} + +static u32 real_ocp_read(struct rtl8168_private *tp, u16 addr, u8 len) +{ + int i, val_shift, shift = 0; + u32 value1 = 0, value2 = 0, mask; + + if (len > 4 || len <= 0) + return -1; + + while (len > 0) { + val_shift = addr % 4; + addr = addr & ~0x3; + + RTL_W32(tp, OCPAR, (0x0F<<12) | (addr&0xFFF)); + + for (i = 0; i < 20; i++) { + udelay(100); + if (RTL_R32(tp, OCPAR) & OCPAR_Flag) + break; + } + + if (len == 1) mask = (0xFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 2) mask = (0xFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 3) mask = (0xFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else mask = (0xFFFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + + value1 = RTL_R32(tp, OCPDR) & mask; + value2 |= (value1 >> val_shift * 8) << shift * 8; + + if (len <= 4 - val_shift) { + len = 0; + } else { + len -= (4 - val_shift); + shift = 4 - val_shift; + addr += 4; + } + } + + udelay(20); + + return value2; +} + +u32 rtl8168_ocp_read_with_oob_base_address(struct rtl8168_private *tp, u16 addr, u8 len, const u32 base_address) +{ + return rtl8168_eri_read_with_oob_base_address(tp, addr, len, ERIAR_OOB, base_address); +} + +u32 rtl8168_ocp_read(struct rtl8168_private *tp, u16 addr, u8 len) +{ + u32 value = 0; + + if (HW_DASH_SUPPORT_TYPE_2(tp)) + value = rtl8168_ocp_read_with_oob_base_address(tp, addr, len, NO_BASE_ADDRESS); + else if (HW_DASH_SUPPORT_TYPE_3(tp)) + value = rtl8168_ocp_read_with_oob_base_address(tp, addr, len, RTL8168FP_OOBMAC_BASE); + else + value = real_ocp_read(tp, addr, len); + + return value; +} + +static int real_ocp_write(struct rtl8168_private *tp, u16 addr, u8 len, u32 value) +{ + int i, val_shift, shift = 0; + u32 value1 = 0, mask; + + if (len > 4 || len <= 0) + return -1; + + while (len > 0) { + val_shift = addr % 4; + addr = addr & ~0x3; + + if (len == 1) mask = (0xFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 2) mask = (0xFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 3) mask = (0xFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else mask = (0xFFFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + + value1 = rtl8168_ocp_read(tp, addr, 4) & ~mask; + value1 |= ((value << val_shift * 8) >> shift * 8); + + RTL_W32(tp, OCPDR, value1); + RTL_W32(tp, OCPAR, OCPAR_Flag | (0x0F<<12) | (addr&0xFFF)); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed ERI write */ + if (!(RTL_R32(tp, OCPAR) & OCPAR_Flag)) + break; + } + + if (len <= 4 - val_shift) { + len = 0; + } else { + len -= (4 - val_shift); + shift = 4 - val_shift; + addr += 4; + } + } + + udelay(20); + + return 0; +} + +u32 rtl8168_ocp_write_with_oob_base_address(struct rtl8168_private *tp, u16 addr, u8 len, u32 value, const u32 base_address) +{ + return rtl8168_eri_write_with_oob_base_address(tp, addr, len, value, ERIAR_OOB, base_address); +} + +void rtl8168_ocp_write(struct rtl8168_private *tp, u16 addr, u8 len, u32 value) +{ + if (HW_DASH_SUPPORT_TYPE_2(tp)) + rtl8168_ocp_write_with_oob_base_address(tp, addr, len, value, NO_BASE_ADDRESS); + else if (HW_DASH_SUPPORT_TYPE_3(tp)) + rtl8168_ocp_write_with_oob_base_address(tp, addr, len, value, RTL8168FP_OOBMAC_BASE); + else + real_ocp_write(tp, addr, len, value); +} + +void rtl8168_oob_mutex_lock(struct rtl8168_private *tp) +{ + u8 reg_16, reg_a0; + u32 wait_cnt_0, wait_Cnt_1; + u16 ocp_reg_mutex_ib; + u16 ocp_reg_mutex_oob; + u16 ocp_reg_mutex_prio; + + if (!tp->DASH) return; + + switch (tp->mcfg) { + case CFG_METHOD_11: + case CFG_METHOD_12: + ocp_reg_mutex_oob = 0x16; + ocp_reg_mutex_ib = 0x17; + ocp_reg_mutex_prio = 0x9C; + break; + case CFG_METHOD_13: + ocp_reg_mutex_oob = 0x06; + ocp_reg_mutex_ib = 0x07; + ocp_reg_mutex_prio = 0x9C; + break; + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + default: + ocp_reg_mutex_oob = 0x110; + ocp_reg_mutex_ib = 0x114; + ocp_reg_mutex_prio = 0x11C; + break; + } + + rtl8168_ocp_write(tp, ocp_reg_mutex_ib, 1, BIT_0); + reg_16 = rtl8168_ocp_read(tp, ocp_reg_mutex_oob, 1); + wait_cnt_0 = 0; + while(reg_16) { + reg_a0 = rtl8168_ocp_read(tp, ocp_reg_mutex_prio, 1); + if (reg_a0) { + rtl8168_ocp_write(tp, ocp_reg_mutex_ib, 1, 0x00); + reg_a0 = rtl8168_ocp_read(tp, ocp_reg_mutex_prio, 1); + wait_Cnt_1 = 0; + while(reg_a0) { + reg_a0 = rtl8168_ocp_read(tp, ocp_reg_mutex_prio, 1); + + wait_Cnt_1++; + + if (wait_Cnt_1 > 2000) + break; + }; + rtl8168_ocp_write(tp, ocp_reg_mutex_ib, 1, BIT_0); + + } + reg_16 = rtl8168_ocp_read(tp, ocp_reg_mutex_oob, 1); + + wait_cnt_0++; + + if (wait_cnt_0 > 2000) + break; + }; +} + +void rtl8168_oob_mutex_unlock(struct rtl8168_private *tp) +{ + u16 ocp_reg_mutex_ib; + u16 ocp_reg_mutex_oob; + u16 ocp_reg_mutex_prio; + + if (!tp->DASH) return; + + switch (tp->mcfg) { + case CFG_METHOD_11: + case CFG_METHOD_12: + ocp_reg_mutex_oob = 0x16; + ocp_reg_mutex_ib = 0x17; + ocp_reg_mutex_prio = 0x9C; + break; + case CFG_METHOD_13: + ocp_reg_mutex_oob = 0x06; + ocp_reg_mutex_ib = 0x07; + ocp_reg_mutex_prio = 0x9C; + break; + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + default: + ocp_reg_mutex_oob = 0x110; + ocp_reg_mutex_ib = 0x114; + ocp_reg_mutex_prio = 0x11C; + break; + } + + rtl8168_ocp_write(tp, ocp_reg_mutex_prio, 1, BIT_0); + rtl8168_ocp_write(tp, ocp_reg_mutex_ib, 1, 0x00); +} + +void rtl8168_oob_notify(struct rtl8168_private *tp, u8 cmd) +{ + rtl8168_eri_write(tp, 0xE8, 1, cmd, ERIAR_ExGMAC); + + rtl8168_ocp_write(tp, 0x30, 1, 0x01); +} + +static int rtl8168_check_dash(struct rtl8168_private *tp) +{ + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + if (rtl8168_ocp_read(tp, 0x128, 1) & BIT_0) + return 1; + else + return 0; + } else { + u32 reg; + + if (tp->mcfg == CFG_METHOD_13) + reg = 0xb8; + else + reg = 0x10; + + if (rtl8168_ocp_read(tp, reg, 2) & 0x00008000) + return 1; + else + return 0; + } +} + +void rtl8168_dash2_disable_tx(struct rtl8168_private *tp) +{ + if (!tp->DASH) return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + u16 WaitCnt; + u8 TmpUchar; + + //Disable oob Tx + RTL_CMAC_W8(tp, CMAC_IBCR2, RTL_CMAC_R8(tp, CMAC_IBCR2) & ~( BIT_0 )); + WaitCnt = 0; + + //wait oob tx disable + do { + TmpUchar = RTL_CMAC_R8(tp, CMAC_IBISR0); + + if ( TmpUchar & ISRIMR_DASH_TYPE2_TX_DISABLE_IDLE ) { + break; + } + + udelay( 50 ); + WaitCnt++; + } while(WaitCnt < 2000); + + //Clear ISRIMR_DASH_TYPE2_TX_DISABLE_IDLE + RTL_CMAC_W8(tp, CMAC_IBISR0, RTL_CMAC_R8(tp, CMAC_IBISR0) | ISRIMR_DASH_TYPE2_TX_DISABLE_IDLE); + } +} + +void rtl8168_dash2_enable_tx(struct rtl8168_private *tp) +{ + if (!tp->DASH) return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + RTL_CMAC_W8(tp, CMAC_IBCR2, RTL_CMAC_R8(tp, CMAC_IBCR2) | BIT_0); + } +} + +void rtl8168_dash2_disable_rx(struct rtl8168_private *tp) +{ + if (!tp->DASH) return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + RTL_CMAC_W8(tp, CMAC_IBCR0, RTL_CMAC_R8(tp, CMAC_IBCR0) & ~( BIT_0 )); + } +} + +void rtl8168_dash2_enable_rx(struct rtl8168_private *tp) +{ + if (!tp->DASH) return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + RTL_CMAC_W8(tp, CMAC_IBCR0, RTL_CMAC_R8(tp, CMAC_IBCR0) | BIT_0); + } +} + +static void rtl8168_dash2_disable_txrx(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + rtl8168_dash2_disable_tx( tp ); + rtl8168_dash2_disable_rx( tp ); + } +} + +void rtl8168_ephy_write(struct rtl8168_private *tp, int RegAddr, int value) +{ + int i; + + RTL_W32(tp, EPHYAR, + EPHYAR_Write | + (RegAddr & EPHYAR_Reg_Mask) << EPHYAR_Reg_shift | + (value & EPHYAR_Data_Mask)); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed EPHY write */ + if (!(RTL_R32(tp, EPHYAR) & EPHYAR_Flag)) + break; + } + + udelay(20); +} + +u16 rtl8168_ephy_read(struct rtl8168_private *tp, int RegAddr) +{ + int i; + u16 value = 0xffff; + + RTL_W32(tp, EPHYAR, + EPHYAR_Read | (RegAddr & EPHYAR_Reg_Mask) << EPHYAR_Reg_shift); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed EPHY read */ + if (RTL_R32(tp, EPHYAR) & EPHYAR_Flag) { + value = (u16) (RTL_R32(tp, EPHYAR) & EPHYAR_Data_Mask); + break; + } + } + + udelay(20); + + return value; +} + +static void ClearAndSetPCIePhyBit(struct rtl8168_private *tp, u8 addr, u16 clearmask, u16 setmask) +{ + u16 EphyValue; + + EphyValue = rtl8168_ephy_read(tp, addr); + EphyValue &= ~clearmask; + EphyValue |= setmask; + rtl8168_ephy_write(tp, addr, EphyValue); +} + +static void ClearPCIePhyBit(struct rtl8168_private *tp, u8 addr, u16 mask) +{ + ClearAndSetPCIePhyBit( tp, + addr, + mask, + 0 + ); +} + +static void SetPCIePhyBit( struct rtl8168_private *tp, u8 addr, u16 mask) +{ + ClearAndSetPCIePhyBit( tp, + addr, + 0, + mask + ); +} + +static u32 +rtl8168_csi_other_fun_read(struct rtl8168_private *tp, + u8 multi_fun_sel_bit, + u32 addr) +{ + u32 cmd; + int i; + u32 value = 0; + + cmd = CSIAR_Read | CSIAR_ByteEn << CSIAR_ByteEn_shift | (addr & CSIAR_Addr_Mask); + + if (tp->mcfg != CFG_METHOD_20 && tp->mcfg != CFG_METHOD_23 && + tp->mcfg != CFG_METHOD_26 && tp->mcfg != CFG_METHOD_27 && + tp->mcfg != CFG_METHOD_28 && tp->mcfg != CFG_METHOD_31 && + tp->mcfg != CFG_METHOD_32 && tp->mcfg != CFG_METHOD_33) { + multi_fun_sel_bit = 0; + } + + if ( multi_fun_sel_bit > 7 ) { + return 0xffffffff; + } + + cmd |= multi_fun_sel_bit << 16; + + RTL_W32(tp, CSIAR, cmd); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed CSI read */ + if (RTL_R32(tp, CSIAR) & CSIAR_Flag) { + value = (u32)RTL_R32(tp, CSIDR); + break; + } + } + + udelay(20); + + return value; +} + +static void +rtl8168_csi_other_fun_write(struct rtl8168_private *tp, + u8 multi_fun_sel_bit, + u32 addr, + u32 value) +{ + u32 cmd; + int i; + + RTL_W32(tp, CSIDR, value); + cmd = CSIAR_Write | CSIAR_ByteEn << CSIAR_ByteEn_shift | (addr & CSIAR_Addr_Mask); + if (tp->mcfg != CFG_METHOD_20 && tp->mcfg != CFG_METHOD_23 && + tp->mcfg != CFG_METHOD_26 && tp->mcfg != CFG_METHOD_27 && + tp->mcfg != CFG_METHOD_28 && tp->mcfg != CFG_METHOD_31 && + tp->mcfg != CFG_METHOD_32 && tp->mcfg != CFG_METHOD_33) { + multi_fun_sel_bit = 0; + } + + if ( multi_fun_sel_bit > 7 ) { + return; + } + + cmd |= multi_fun_sel_bit << 16; + + RTL_W32(tp, CSIAR, cmd); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed CSI write */ + if (!(RTL_R32(tp, CSIAR) & CSIAR_Flag)) + break; + } + + udelay(20); +} + +static u32 +rtl8168_csi_read(struct rtl8168_private *tp, + u32 addr) +{ + u8 multi_fun_sel_bit; + + if (tp->mcfg == CFG_METHOD_20) + multi_fun_sel_bit = 2; + else if (tp->mcfg == CFG_METHOD_26 || tp->mcfg == CFG_METHOD_31 || + tp->mcfg == CFG_METHOD_32 || tp->mcfg == CFG_METHOD_33) + multi_fun_sel_bit = 1; + else + multi_fun_sel_bit = 0; + + return rtl8168_csi_other_fun_read(tp, multi_fun_sel_bit, addr); +} + +static void +rtl8168_csi_write(struct rtl8168_private *tp, + u32 addr, + u32 value) +{ + u8 multi_fun_sel_bit; + + if (tp->mcfg == CFG_METHOD_20) + multi_fun_sel_bit = 2; + else if (tp->mcfg == CFG_METHOD_26 || tp->mcfg == CFG_METHOD_31 || + tp->mcfg == CFG_METHOD_32 || tp->mcfg == CFG_METHOD_33) + multi_fun_sel_bit = 1; + else + multi_fun_sel_bit = 0; + + rtl8168_csi_other_fun_write(tp, multi_fun_sel_bit, addr, value); +} + +static u8 +rtl8168_csi_fun0_read_byte(struct rtl8168_private *tp, + u32 addr) +{ + u8 RetVal = 0; + + if (tp->mcfg == CFG_METHOD_20 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + u32 TmpUlong; + u16 RegAlignAddr; + u8 ShiftByte; + + RegAlignAddr = addr & ~(0x3); + ShiftByte = addr & (0x3); + TmpUlong = rtl8168_csi_other_fun_read(tp, 0, addr); + TmpUlong >>= (8*ShiftByte); + RetVal = (u8)TmpUlong; + } else { + struct pci_dev *pdev = tp->pci_dev; + + pci_read_config_byte(pdev, addr, &RetVal); + } + + udelay(20); + + return RetVal; +} + +static void +rtl8168_csi_fun0_write_byte(struct rtl8168_private *tp, + u32 addr, + u8 value) +{ + if (tp->mcfg == CFG_METHOD_20 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + u32 TmpUlong; + u16 RegAlignAddr; + u8 ShiftByte; + + RegAlignAddr = addr & ~(0x3); + ShiftByte = addr & (0x3); + TmpUlong = rtl8168_csi_other_fun_read(tp, 0, RegAlignAddr); + TmpUlong &= ~(0xFF << (8*ShiftByte)); + TmpUlong |= (value << (8*ShiftByte)); + rtl8168_csi_other_fun_write( tp, 0, RegAlignAddr, TmpUlong ); + } else { + struct pci_dev *pdev = tp->pci_dev; + + pci_write_config_byte(pdev, addr, value); + } + + udelay(20); +} + +static void +rtl8168_clear_and_set_other_fun_pci_bit(struct rtl8168_private *tp, + u8 multi_fun_sel_bit, + u32 addr, + u32 clearmask, + u32 setmask) +{ + u32 TmpUlong; + + TmpUlong = rtl8168_csi_other_fun_read(tp, multi_fun_sel_bit, addr); + TmpUlong &= ~clearmask; + TmpUlong |= setmask; + rtl8168_csi_other_fun_write(tp, multi_fun_sel_bit, addr, TmpUlong); +} + +static void +rtl8168_other_fun_dev_pci_setting(struct rtl8168_private *tp, + u32 addr, + u32 clearmask, + u32 setmask, + u8 multi_fun_sel_bit) +{ + u32 TmpUlong; + u8 i; + u8 FunBit; + + for (i = 0; i < 8; i++) { + FunBit = (1 << i); + if (FunBit & multi_fun_sel_bit) { + u8 set_other_fun = TRUE; + + switch(tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + //0: UMAC, 1: TCR1, 2: TCR2, 3: KCS, 4: EHCI(Control by EHCI Driver) + if (i < 5) { + TmpUlong = rtl8168_csi_other_fun_read(tp, i, 0x00); + if (TmpUlong == 0xFFFFFFFF) + set_other_fun = TRUE; + else + set_other_fun = FALSE; + } + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + //0: BMC, 1: NIC, 2: TCR, 3: VGA/PCIE_TO_USB, 4: EHCI, 5: WIFI, 6: WIFI, 7: KCS + if (i == 5 || i == 6) { + if (tp->DASH) { + TmpUlong = rtl8168_ocp_read(tp, 0x184, 4); + if (TmpUlong & BIT_26) + set_other_fun = FALSE; + else + set_other_fun = TRUE; + } + } else { //function 0/1/2/3/4/7 + TmpUlong = rtl8168_csi_other_fun_read(tp, i, 0x00); + if (TmpUlong == 0xFFFFFFFF) + set_other_fun = TRUE; + else + set_other_fun = FALSE; + } + break; + default: + return; + } + + if (set_other_fun) + rtl8168_clear_and_set_other_fun_pci_bit(tp, i, addr, clearmask, setmask); + } + } +} + +static void +rtl8168_set_dash_other_fun_dev_state_change(struct rtl8168_private *tp, + u8 dev_state, + u8 multi_fun_sel_bit) +{ + u32 clearmask; + u32 setmask; + + if (dev_state == 0) { + // + // goto D0 + // + clearmask = (BIT_0 | BIT_1); + setmask = 0; + + rtl8168_other_fun_dev_pci_setting(tp, 0x44, clearmask, setmask, multi_fun_sel_bit); + } else { + // + // goto D3 + // + clearmask = 0; + setmask = (BIT_0 | BIT_1); + + rtl8168_other_fun_dev_pci_setting(tp, 0x44, clearmask, setmask, multi_fun_sel_bit); + } +} + +static void +rtl8168_set_dash_other_fun_dev_aspm_clkreq(struct rtl8168_private *tp, + u8 aspm_val, + u8 clkreq_en, + u8 multi_fun_sel_bit) +{ + u32 clearmask; + u32 setmask; + + aspm_val &= (BIT_0 | BIT_1); + clearmask = (BIT_0 | BIT_1 | BIT_8); + setmask = aspm_val; + if (clkreq_en) + setmask |= BIT_8; + + rtl8168_other_fun_dev_pci_setting(tp, 0x80, clearmask, setmask, multi_fun_sel_bit); +} + +/* +static void +rtl8168_set_dash_other_fun_dev_pci_cmd_register(struct rtl8168_private *tp, + u8 pci_cmd_reg, + u8 multi_fun_sel_bit) +{ + u32 clearmask; + u32 setmask; + + pci_cmd_reg &= (BIT_0 | BIT_1 | BIT_2); + + clearmask = (BIT_0 | BIT_1 | BIT_2); + setmask = pci_cmd_reg; + + rtl8168_other_fun_dev_pci_setting(tp, 0x04, clearmask, setmask, multi_fun_sel_bit); +} +*/ + +u32 rtl8168_eri_read_with_oob_base_address(struct rtl8168_private *tp, int addr, int len, int type, const u32 base_address) +{ + int i, val_shift, shift = 0; + u32 value1 = 0, value2 = 0, mask; + u32 eri_cmd; + const u32 transformed_base_address = ((base_address & 0x00FFF000) << 6) | (base_address & 0x000FFF); + + if (len > 4 || len <= 0) + return -1; + + while (len > 0) { + val_shift = addr % ERIAR_Addr_Align; + addr = addr & ~0x3; + + eri_cmd = ERIAR_Read | + transformed_base_address | + type << ERIAR_Type_shift | + ERIAR_ByteEn << ERIAR_ByteEn_shift | + (addr & 0x0FFF); + if (addr & 0xF000) { + u32 tmp; + + tmp = addr & 0xF000; + tmp >>= 12; + eri_cmd |= (tmp << 20) & 0x00F00000; + } + + RTL_W32(tp, ERIAR, eri_cmd); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed ERI read */ + if (RTL_R32(tp, ERIAR) & ERIAR_Flag) + break; + } + + if (len == 1) mask = (0xFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 2) mask = (0xFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 3) mask = (0xFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else mask = (0xFFFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + + value1 = RTL_R32(tp, ERIDR) & mask; + value2 |= (value1 >> val_shift * 8) << shift * 8; + + if (len <= 4 - val_shift) { + len = 0; + } else { + len -= (4 - val_shift); + shift = 4 - val_shift; + addr += 4; + } + } + + udelay(20); + + return value2; +} + +u32 rtl8168_eri_read(struct rtl8168_private *tp, int addr, int len, int type) +{ + return rtl8168_eri_read_with_oob_base_address(tp, addr, len, type, 0); +} + +int rtl8168_eri_write_with_oob_base_address(struct rtl8168_private *tp, int addr, int len, u32 value, int type, const u32 base_address) +{ + int i, val_shift, shift = 0; + u32 value1 = 0, mask; + u32 eri_cmd; + const u32 transformed_base_address = ((base_address & 0x00FFF000) << 6) | (base_address & 0x000FFF); + + if (len > 4 || len <= 0) + return -1; + + while (len > 0) { + val_shift = addr % ERIAR_Addr_Align; + addr = addr & ~0x3; + + if (len == 1) mask = (0xFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 2) mask = (0xFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else if (len == 3) mask = (0xFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + else mask = (0xFFFFFFFF << (val_shift * 8)) & 0xFFFFFFFF; + + value1 = rtl8168_eri_read_with_oob_base_address(tp, addr, 4, type, base_address) & ~mask; + value1 |= ((value << val_shift * 8) >> shift * 8); + + RTL_W32(tp, ERIDR, value1); + + eri_cmd = ERIAR_Write | + transformed_base_address | + type << ERIAR_Type_shift | + ERIAR_ByteEn << ERIAR_ByteEn_shift | + (addr & 0x0FFF); + if (addr & 0xF000) { + u32 tmp; + + tmp = addr & 0xF000; + tmp >>= 12; + eri_cmd |= (tmp << 20) & 0x00F00000; + } + + RTL_W32(tp, ERIAR, eri_cmd); + + for (i = 0; i < 10; i++) { + udelay(100); + + /* Check if the RTL8168 has completed ERI write */ + if (!(RTL_R32(tp, ERIAR) & ERIAR_Flag)) + break; + } + + if (len <= 4 - val_shift) { + len = 0; + } else { + len -= (4 - val_shift); + shift = 4 - val_shift; + addr += 4; + } + } + + udelay(20); + + return 0; +} + +int rtl8168_eri_write(struct rtl8168_private *tp, int addr, int len, u32 value, int type) +{ + return rtl8168_eri_write_with_oob_base_address(tp, addr, len, value, type, NO_BASE_ADDRESS); +} + +static void +rtl8168_enable_rxdvgate(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_3); + mdelay(2); + break; + } +} + +static void +rtl8168_disable_rxdvgate(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) & ~BIT_3); + mdelay(2); + break; + } +} + +static u8 +rtl8168_is_gpio_low(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 gpio_low = FALSE; + + switch (tp->HwSuppCheckPhyDisableModeVer) { + case 1: + case 2: + if (!(rtl8168_mac_ocp_read(tp, 0xDC04) & BIT_9)) + gpio_low = TRUE; + break; + case 3: + if (!(rtl8168_mac_ocp_read(tp, 0xDC04) & BIT_13)) + gpio_low = TRUE; + break; + } + + if (gpio_low) + dprintk("gpio is low.\n"); + + return gpio_low; +} + +static u8 +rtl8168_is_phy_disable_mode_enabled(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 phy_disable_mode_enabled = FALSE; + + switch (tp->HwSuppCheckPhyDisableModeVer) { + case 1: + if (rtl8168_mac_ocp_read(tp, 0xDC20) & BIT_1) + phy_disable_mode_enabled = TRUE; + break; + case 2: + case 3: + if (RTL_R8(tp, 0xF2) & BIT_5) + phy_disable_mode_enabled = TRUE; + break; + } + + if (phy_disable_mode_enabled) + dprintk("phy disable mode enabled.\n"); + + return phy_disable_mode_enabled; +} + +static u8 +rtl8168_is_in_phy_disable_mode(struct net_device *dev) +{ + u8 in_phy_disable_mode = FALSE; + + if (rtl8168_is_phy_disable_mode_enabled(dev) && rtl8168_is_gpio_low(dev)) + in_phy_disable_mode = TRUE; + + if (in_phy_disable_mode) + dprintk("Hardware is in phy disable mode.\n"); + + return in_phy_disable_mode; +} + +void +rtl8168_wait_txrx_fifo_empty(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int i; + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + for (i = 0; i < 10; i++) { + udelay(100); + if (RTL_R32(tp, TxConfig) & BIT_11) + break; + } + + for (i = 0; i < 10; i++) { + udelay(100); + if ((RTL_R8(tp, MCUCmd_reg) & (Txfifo_empty | Rxfifo_empty)) == (Txfifo_empty | Rxfifo_empty)) + break; + } + + mdelay(1); + break; + } +} + +static void rtl8168_driver_start(struct rtl8168_private *tp) +{ + //change other device state to D0. + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + rtl8168_set_dash_other_fun_dev_aspm_clkreq(tp, 3, 1, 0x1E); + rtl8168_set_dash_other_fun_dev_state_change(tp, 3, 0x1E); + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_set_dash_other_fun_dev_aspm_clkreq(tp, 3, 1, 0xFC); + rtl8168_set_dash_other_fun_dev_state_change(tp, 3, 0xFC); + break; + } + + if (!tp->DASH) + return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + int timeout; + u32 tmp_value; + + rtl8168_ocp_write(tp, 0x180, 1, OOB_CMD_DRIVER_START); + tmp_value = rtl8168_ocp_read(tp, 0x30, 1); + tmp_value |= BIT_0; + rtl8168_ocp_write(tp, 0x30, 1, tmp_value); + + for (timeout = 0; timeout < 10; timeout++) { + mdelay(10); + if (rtl8168_ocp_read(tp, 0x124, 1) & BIT_0) + break; + } + } else { + int timeout; + u32 reg; + + if (tp->mcfg == CFG_METHOD_13) { + RTL_W8(tp, TwiCmdReg, RTL_R8(tp, TwiCmdReg) | ( BIT_7 )); + } + + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); + + if (tp->mcfg == CFG_METHOD_13) + reg = 0xB8; + else + reg = 0x10; + + for (timeout = 0; timeout < 10; timeout++) { + mdelay(10); + if (rtl8168_ocp_read(tp, reg, 2) & BIT_11) + break; + } + } +} + +static void rtl8168_driver_stop(struct rtl8168_private *tp) +{ + if (!tp->DASH) + goto update_device_state; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + struct net_device *dev = tp->dev; + int timeout; + u32 tmp_value; + + rtl8168_dash2_disable_txrx(dev); + + rtl8168_ocp_write(tp, 0x180, 1, OOB_CMD_DRIVER_STOP); + tmp_value = rtl8168_ocp_read(tp, 0x30, 1); + tmp_value |= BIT_0; + rtl8168_ocp_write(tp, 0x30, 1, tmp_value); + + for (timeout = 0; timeout < 10; timeout++) { + mdelay(10); + if (!(rtl8168_ocp_read(tp, 0x124, 1) & BIT_0)) + break; + } + } else { + int timeout; + u32 reg; + + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); + + if (tp->mcfg == CFG_METHOD_13) + reg = 0xB8; + else + reg = 0x10; + + for (timeout = 0; timeout < 10; timeout++) { + mdelay(10); + if ((rtl8168_ocp_read(tp, reg, 2) & BIT_11) == 0) + break; + } + + if (tp->mcfg == CFG_METHOD_13) { + RTL_W8(tp, TwiCmdReg, RTL_R8(tp, TwiCmdReg) & ~( BIT_7 )); + } + } + +update_device_state: + //change other device state to D3. + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + rtl8168_set_dash_other_fun_dev_state_change(tp, 3, 0x0E); + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_set_dash_other_fun_dev_state_change(tp, 3, 0xFD); + break; + } +} + +#ifdef ENABLE_DASH_SUPPORT + +inline void +rtl8168_enable_dash2_interrupt(struct rtl8168_private *tp) +{ + if (!tp->DASH) return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + RTL_CMAC_W8(tp, CMAC_IBIMR0, ( ISRIMR_DASH_TYPE2_ROK | ISRIMR_DASH_TYPE2_TOK | ISRIMR_DASH_TYPE2_TDU | ISRIMR_DASH_TYPE2_RDU | ISRIMR_DASH_TYPE2_RX_DISABLE_IDLE )); + } +} + +static inline void +rtl8168_disable_dash2_interrupt(struct rtl8168_private *tp) +{ + if (!tp->DASH) return; + + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + RTL_CMAC_W8(tp, CMAC_IBIMR0, 0); + } +} +#endif + +static inline void +rtl8168_enable_hw_interrupt(struct rtl8168_private *tp) +{ + RTL_W16(tp, IntrMask, tp->intr_mask); + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) + rtl8168_enable_dash2_interrupt(tp); +#endif +} + +static inline void +rtl8168_disable_hw_interrupt(struct rtl8168_private *tp) +{ + RTL_W16(tp, IntrMask, 0x0000); + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) + rtl8168_disable_dash2_interrupt(tp); +#endif +} + + +static inline void +rtl8168_switch_to_hw_interrupt(struct rtl8168_private *tp) +{ + RTL_W32(tp, TimeInt0, 0x0000); + + rtl8168_enable_hw_interrupt(tp); +} + +static inline void +rtl8168_switch_to_timer_interrupt(struct rtl8168_private *tp) +{ + if (tp->use_timer_interrrupt) { + RTL_W32(tp, TimeInt0, timer_count); + RTL_W32(tp, TCTR, timer_count); + RTL_W16(tp, IntrMask, tp->timer_intr_mask); + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) + rtl8168_enable_dash2_interrupt(tp); +#endif + } else { + rtl8168_switch_to_hw_interrupt(tp); + } +} + +static void +rtl8168_irq_mask_and_ack(struct rtl8168_private *tp) +{ + rtl8168_disable_hw_interrupt(tp); +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) { + if (tp->dash_printer_enabled) { + RTL_W16(tp, IntrStatus, RTL_R16(tp, IntrStatus) & + ~(ISRIMR_DASH_INTR_EN | ISRIMR_DASH_INTR_CMAC_RESET)); + } else { + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + RTL_CMAC_W8(tp, CMAC_IBISR0, RTL_CMAC_R8(tp, CMAC_IBISR0)); + } + } + } else { + RTL_W16(tp, IntrStatus, RTL_R16(tp, IntrStatus)); + } +#else + RTL_W16(tp, IntrStatus, RTL_R16(tp, IntrStatus)); +#endif +} + +static void +rtl8168_nic_reset(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int i; + + RTL_W32(tp, RxConfig, (RX_DMA_BURST << RxCfgDMAShift)); + + rtl8168_enable_rxdvgate(dev); + + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + mdelay(10); + break; + case CFG_METHOD_4: + case CFG_METHOD_5: + case CFG_METHOD_6: + case CFG_METHOD_7: + case CFG_METHOD_8: + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_14: + case CFG_METHOD_15: + RTL_W8(tp, ChipCmd, StopReq | CmdRxEnb | CmdTxEnb); + udelay(100); + break; + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + for (i = 0; i < 2000; i++) { + if (!(RTL_R8(tp, TxPoll) & NPQ)) break; + udelay(100); + } + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + mdelay(2); + break; + default: + mdelay(10); + break; + } + + rtl8168_wait_txrx_fifo_empty(dev); + + /* Soft reset the chip. */ + RTL_W8(tp, ChipCmd, CmdReset); + + /* Check that the chip has finished the reset. */ + for (i = 100; i > 0; i--) { + udelay(100); + if ((RTL_R8(tp, ChipCmd) & CmdReset) == 0) + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_11: + rtl8168_oob_mutex_lock(tp); + rtl8168_ocp_write(tp, 0x10, 2, rtl8168_ocp_read(tp, 0x010, 2)&~0x00004000); + rtl8168_oob_mutex_unlock(tp); + + rtl8168_oob_notify(tp, OOB_CMD_RESET); + + for (i = 0; i < 10; i++) { + mdelay(10); + if (rtl8168_ocp_read(tp, 0x010, 2)&0x00004000) + break; + } + + for (i = 0; i < 5; i++) { + if ( rtl8168_ocp_read(tp, 0x034, 1) == 0) + break; + } + break; + } +} + +static void +rtl8168_hw_clear_timer_int(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + RTL_W32(tp, TimeInt0, 0x0000); + + switch (tp->mcfg) { + case CFG_METHOD_4: + case CFG_METHOD_5: + case CFG_METHOD_6: + case CFG_METHOD_7: + case CFG_METHOD_8: + RTL_W32(tp, TimeInt1, 0x0000); + break; + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W32(tp, TimeInt1, 0x0000); + RTL_W32(tp, TimeInt2, 0x0000); + RTL_W32(tp, TimeInt3, 0x0000); + break; + } +} + +static void +rtl8168_hw_reset(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + /* Disable interrupts */ + rtl8168_irq_mask_and_ack(tp); + + rtl8168_hw_clear_timer_int(dev); + + rtl8168_nic_reset(dev); +} + +static void rtl8168_mac_loopback_test(struct rtl8168_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + struct net_device *dev = tp->dev; + struct sk_buff *skb, *rx_skb; + dma_addr_t mapping; + struct TxDesc *txd; + struct RxDesc *rxd; + void *tmpAddr; + u32 len, rx_len, rx_cmd = 0; + u16 type; + u8 pattern; + int i; + + if (tp->DASH) + return; + + pattern = 0x5A; + len = 60; + type = htons(ETH_P_IP); + txd = tp->TxDescArray; + rxd = tp->RxDescArray; + rx_skb = tp->Rx_skbuff[0]; + RTL_W32(tp, TxConfig, (RTL_R32(tp, TxConfig) & ~0x00060000) | 0x00020000); + + do { + skb = dev_alloc_skb(len + RTK_RX_ALIGN); + if (unlikely(!skb)) + dev_printk(KERN_NOTICE, tp_to_dev(tp), "-ENOMEM;\n"); + } while (unlikely(skb == NULL)); + skb_reserve(skb, RTK_RX_ALIGN); + + memcpy(skb_put(skb, dev->addr_len), dev->dev_addr, dev->addr_len); + memcpy(skb_put(skb, dev->addr_len), dev->dev_addr, dev->addr_len); + memcpy(skb_put(skb, sizeof(type)), &type, sizeof(type)); + tmpAddr = skb_put(skb, len - 14); + + mapping = dma_map_single(tp_to_dev(tp), skb->data, len, DMA_TO_DEVICE); + dma_sync_single_for_cpu(tp_to_dev(tp), le64_to_cpu(mapping), + len, DMA_TO_DEVICE); + txd->addr = cpu_to_le64(mapping); + txd->opts2 = 0; + while (1) { + memset(tmpAddr, pattern++, len - 14); + pci_dma_sync_single_for_device(tp->pci_dev, + le64_to_cpu(mapping), + len, DMA_TO_DEVICE); + txd->opts1 = cpu_to_le32(DescOwn | FirstFrag | LastFrag | len); + + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptMyPhys); + + smp_wmb(); + RTL_W8(tp, TxPoll, NPQ); /* set polling bit */ + + for (i = 0; i < 50; i++) { + udelay(200); + rx_cmd = le32_to_cpu(rxd->opts1); + if ((rx_cmd & DescOwn) == 0) + break; + } + + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~(AcceptErr | AcceptRunt | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys)); + + rx_len = rx_cmd & 0x3FFF; + rx_len -= 4; + rxd->opts1 = cpu_to_le32(DescOwn | tp->rx_buf_sz); + + dma_sync_single_for_cpu(tp_to_dev(tp), le64_to_cpu(mapping), len, DMA_TO_DEVICE); + + if (rx_len == len) { + dma_sync_single_for_cpu(tp_to_dev(tp), le64_to_cpu(rxd->addr), tp->rx_buf_sz, DMA_FROM_DEVICE); + i = memcmp(skb->data, rx_skb->data, rx_len); + pci_dma_sync_single_for_device(tp->pci_dev, le64_to_cpu(rxd->addr), tp->rx_buf_sz, DMA_FROM_DEVICE); + if (i == 0) { +// dev_printk(KERN_INFO, tp_to_dev(tp), "loopback test finished\n",rx_len,len); + break; + } + } + + rtl8168_hw_reset(dev); + rtl8168_disable_rxdvgate(dev); + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + } + tp->dirty_tx++; + tp->dirty_rx++; + tp->cur_tx++; + tp->cur_rx++; + dma_unmap_single(&pdev->dev, le64_to_cpu(mapping), + len, DMA_TO_DEVICE); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) & ~0x00060000); + dev_kfree_skb_any(skb); + RTL_W16(tp, IntrStatus, 0xFFBF); +} + +static unsigned int +rtl8168_xmii_reset_pending(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int retval; + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + retval = rtl8168_mdio_read(tp, MII_BMCR) & BMCR_RESET; + + return retval; +} + +static unsigned int +rtl8168_xmii_link_ok(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int retval; + + retval = (RTL_R8(tp, PHYstatus) & LinkStatus) ? 1 : 0; + + return retval; +} + +static int +rtl8168_wait_phy_reset_complete(struct rtl8168_private *tp) +{ + int i, val; + + for (i = 0; i < 2500; i++) { + val = rtl8168_mdio_read(tp, MII_BMCR) & BMCR_RESET; + if (!val) + return 0; + + mdelay(1); + } + + return -1; +} + +static void +rtl8168_xmii_reset_enable(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (rtl8168_is_in_phy_disable_mode(dev)) + return; + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, MII_ADVERTISE, rtl8168_mdio_read(tp, MII_ADVERTISE) & + ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL)); + rtl8168_mdio_write(tp, MII_CTRL1000, rtl8168_mdio_read(tp, MII_CTRL1000) & + ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL)); + rtl8168_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE); + + if (rtl8168_wait_phy_reset_complete(tp) == 0) return; + + if (netif_msg_link(tp)) + printk(KERN_ERR "%s: PHY reset failed.\n", dev->name); +} + +static void +rtl8168dp_10mbps_gphy_para(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 status = RTL_R8(tp, PHYstatus); + + if ((status & LinkStatus) && (status & _10bps)) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x10, 0x04EE); + } else { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x10, 0x01EE); + } +} + +void rtl8168_init_ring_indexes(struct rtl8168_private *tp) +{ + tp->dirty_tx = 0; + tp->dirty_rx = 0; + tp->cur_tx = 0; + tp->cur_rx = 0; +} + +static void +rtl8168_issue_offset_99_event(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_27: + case CFG_METHOD_28: + if (tp->mcfg == CFG_METHOD_24 || tp->mcfg == CFG_METHOD_25 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28) { + rtl8168_eri_write(tp, 0x3FC, 4, 0x00000000, ERIAR_ExGMAC); + } else { + rtl8168_eri_write(tp, 0x3FC, 4, 0x083C083C, ERIAR_ExGMAC); + } + csi_tmp = rtl8168_eri_read(tp, 0x3F8, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0x3F8, 1, csi_tmp, ERIAR_ExGMAC); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x1EA, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0x1EA, 1, csi_tmp, ERIAR_ExGMAC); + break; + } +} + +static void +rtl8168_enable_cfg9346_write(struct rtl8168_private *tp) +{ + RTL_W8(tp, Cfg9346, RTL_R8(tp, Cfg9346) | Cfg9346_Unlock); +} + +static void +rtl8168_disable_cfg9346_write(struct rtl8168_private *tp) +{ + RTL_W8(tp, Cfg9346, RTL_R8(tp, Cfg9346) & ~Cfg9346_Unlock); +} + +static void +rtl8168_enable_exit_l1_mask(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp |= (BIT_8 | BIT_9 | BIT_10 | BIT_11 | BIT_12); + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + break; + case CFG_METHOD_20: + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp |= (BIT_10 | BIT_11); + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + break; + case CFG_METHOD_21 ... CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp |= (BIT_7 | BIT_8 | BIT_9 | BIT_10 | BIT_11 | BIT_12); + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + break; + } +} + +static void +rtl8168_disable_exit_l1_mask(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_8 | BIT_9 | BIT_10 | BIT_11 | BIT_12); + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + break; + case CFG_METHOD_20: + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_10 | BIT_11); + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + break; + case CFG_METHOD_21 ... CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_7 | BIT_8 | BIT_9 | BIT_10 | BIT_11 | BIT_12); + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + break; + } +} + +static void +rtl8168_hw_aspm_clkreq_enable(struct rtl8168_private *tp, bool enable) +{ + if (!tp->HwSuppAspmClkIntrLock) return; + + if (enable && aspm) { + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn); + } else { + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn); + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en); + } + + udelay(10); +} + +#ifdef ENABLE_DASH_SUPPORT +static void +NICChkTypeEnableDashInterrupt(struct rtl8168_private *tp) +{ + if (tp->DASH) { + // + // even disconnected, enable 3 dash interrupt mask bits for in-band/out-band communication + // + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + rtl8168_enable_dash2_interrupt(tp); + RTL_W16(tp, IntrMask, (ISRIMR_DASH_INTR_EN | ISRIMR_DASH_INTR_CMAC_RESET)); + } else { + RTL_W16(tp, IntrMask, (ISRIMR_DP_DASH_OK | ISRIMR_DP_HOST_OK | ISRIMR_DP_REQSYS_OK)); + } + } +} +#endif + +static void +rtl8168_check_link_status(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int link_status_on; + +#ifdef ENABLE_FIBER_SUPPORT + rtl8168_check_fiber_link_status(dev); +#endif //ENABLE_FIBER_SUPPORT + + link_status_on = tp->link_ok(dev); + + if (tp->mcfg == CFG_METHOD_11) + rtl8168dp_10mbps_gphy_para(dev); + + if (netif_carrier_ok(dev) != link_status_on) { + if (link_status_on) { + rtl8168_hw_config(dev); + + if (tp->mcfg == CFG_METHOD_18 || tp->mcfg == CFG_METHOD_19 || tp->mcfg == CFG_METHOD_20) { + if (RTL_R8(tp, PHYstatus) & _1000bpsF) { + rtl8168_eri_write(tp, 0x1bc, 4, 0x00000011, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x0000001f, ERIAR_ExGMAC); + } else if (RTL_R8(tp, PHYstatus) & _100bps) { + rtl8168_eri_write(tp, 0x1bc, 4, 0x0000001f, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x0000001f, ERIAR_ExGMAC); + } else { + rtl8168_eri_write(tp, 0x1bc, 4, 0x0000001f, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x0000002d, ERIAR_ExGMAC); + } + } else if ((tp->mcfg == CFG_METHOD_16 || tp->mcfg == CFG_METHOD_17) && netif_running(dev)) { + if (tp->mcfg == CFG_METHOD_16 && (RTL_R8(tp, PHYstatus) & _10bps)) { + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptAllPhys); + } else if (tp->mcfg == CFG_METHOD_17) { + if (RTL_R8(tp, PHYstatus) & _1000bpsF) { + rtl8168_eri_write(tp, 0x1bc, 4, 0x00000011, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x00000005, ERIAR_ExGMAC); + } else if (RTL_R8(tp, PHYstatus) & _100bps) { + rtl8168_eri_write(tp, 0x1bc, 4, 0x0000001f, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x00000005, ERIAR_ExGMAC); + } else { + rtl8168_eri_write(tp, 0x1bc, 4, 0x0000001f, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x0000003f, ERIAR_ExGMAC); + } + } + } else if ((tp->mcfg == CFG_METHOD_14 || tp->mcfg == CFG_METHOD_15) && tp->eee_enabled == 1) { + /*Full -Duplex mode*/ + if (RTL_R8(tp, PHYstatus)&FullDup) { + rtl8168_mdio_write(tp, 0x1F, 0x0006); + rtl8168_mdio_write(tp, 0x00, 0x5a30); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + if (RTL_R8(tp, PHYstatus) & (_10bps | _100bps)) + RTL_W32(tp, TxConfig, (RTL_R32(tp, TxConfig) & ~BIT_19) | BIT_25); + + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0006); + rtl8168_mdio_write(tp, 0x00, 0x5a00); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + if (RTL_R8(tp, PHYstatus) & (_10bps | _100bps)) + RTL_W32(tp, TxConfig, (RTL_R32(tp, TxConfig) & ~BIT_19) | (InterFrameGap << TxInterFrameGapShift)); + } + } else if ((tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_23 || tp->mcfg == CFG_METHOD_24 || + tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) && netif_running(dev)) { + if (RTL_R8(tp, PHYstatus)&FullDup) + RTL_W32(tp, TxConfig, (RTL_R32(tp, TxConfig) | (BIT_24 | BIT_25)) & ~BIT_19); + else + RTL_W32(tp, TxConfig, (RTL_R32(tp, TxConfig) | BIT_25) & ~(BIT_19 | BIT_24)); + } + + if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + /*half mode*/ + if (!(RTL_R8(tp, PHYstatus)&FullDup)) { + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, MII_ADVERTISE, rtl8168_mdio_read(tp, MII_ADVERTISE)&~(ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM)); + } + } + + if ((tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) && (RTL_R8(tp, PHYstatus) & _10bps)) { + u32 csi_tmp; + + csi_tmp = rtl8168_eri_read(tp, 0x1D0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_1; + rtl8168_eri_write(tp, 0x1D0, 1, csi_tmp, ERIAR_ExGMAC); + } + + rtl8168_hw_start(dev); + + netif_carrier_on(dev); + + netif_wake_queue(dev); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + tp->phy_reg_aner = rtl8168_mdio_read(tp, MII_EXPANSION); + tp->phy_reg_anlpar = rtl8168_mdio_read(tp, MII_LPA); + tp->phy_reg_gbsr = rtl8168_mdio_read(tp, MII_STAT1000); + + if (netif_msg_ifup(tp)) + printk(KERN_INFO PFX "%s: link up\n", dev->name); + } else { + if (netif_msg_ifdown(tp)) + printk(KERN_INFO PFX "%s: link down\n", dev->name); + + tp->phy_reg_aner = 0; + tp->phy_reg_anlpar = 0; + tp->phy_reg_gbsr = 0; + + netif_stop_queue(dev); + + netif_carrier_off(dev); + + rtl8168_hw_reset(dev); + + rtl8168_tx_clear(tp); + + rtl8168_rx_clear(tp); + + rtl8168_init_ring(dev); + + if (dynamic_aspm) { + rtl8168_enable_cfg9346_write(tp); + rtl8168_hw_aspm_clkreq_enable(tp, true); + rtl8168_disable_cfg9346_write(tp); + } + + rtl8168_set_speed(dev, tp->autoneg, tp->speed, tp->duplex, tp->advertising); + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_27: + case CFG_METHOD_28: + if (tp->org_pci_offset_99 & BIT_2) + tp->issue_offset_99_event = TRUE; + break; + } + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) { + NICChkTypeEnableDashInterrupt(tp); + } +#endif + } + } + + if (!link_status_on) { + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_27: + case CFG_METHOD_28: + if (tp->issue_offset_99_event) { + if (!(RTL_R8(tp, PHYstatus) & PowerSaveStatus)) { + tp->issue_offset_99_event = FALSE; + rtl8168_issue_offset_99_event(tp); + } + } + break; + } + } else { + if (dynamic_aspm) { + bool enable_hw_aspm_clkreq = true; + if (tp->dynamic_aspm_packet_count > dynamic_aspm_packet_threshold) + enable_hw_aspm_clkreq = false; + + rtl8168_enable_cfg9346_write(tp); + rtl8168_hw_aspm_clkreq_enable(tp, enable_hw_aspm_clkreq); + rtl8168_disable_cfg9346_write(tp); + } + tp->dynamic_aspm_packet_count = 0; + } +} + +static void +rtl8168_link_option(u8 *aut, + u32 *spd, + u8 *dup, + u32 *adv) +{ + if ((*spd != SPEED_1000) && (*spd != SPEED_100) && (*spd != SPEED_10)) + *spd = SPEED_1000; + + if ((*dup != DUPLEX_FULL) && (*dup != DUPLEX_HALF)) + *dup = DUPLEX_FULL; + + if ((*aut != AUTONEG_ENABLE) && (*aut != AUTONEG_DISABLE)) + *aut = AUTONEG_ENABLE; + + *adv &= (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + if (*adv == 0) + *adv = (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); +} + +static void +rtl8168_enable_ocp_phy_power_saving(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 val; + + if (tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + val = rtl8168_mdio_read_phy_ocp(tp, 0x0C41, 0x13); + if (val != 0x0050) { + rtl8168_set_phy_mcu_patch_request(tp); + rtl8168_mdio_write_phy_ocp(tp, 0x0C41, 0x13, 0x0000); + rtl8168_mdio_write_phy_ocp(tp, 0x0C41, 0x13, 0x0050); + rtl8168_clear_phy_mcu_patch_request(tp); + } + } +} + +static void +rtl8168_disable_ocp_phy_power_saving(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 val; + + if (tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + val = rtl8168_mdio_read_phy_ocp(tp, 0x0C41, 0x13); + if (val != 0x0500) { + rtl8168_set_phy_mcu_patch_request(tp); + rtl8168_mdio_write_phy_ocp(tp, 0x0C41, 0x13, 0x0000); + rtl8168_mdio_write_phy_ocp(tp, 0x0C41, 0x13, 0x0500); + rtl8168_clear_phy_mcu_patch_request(tp); + } + } +} + +void +rtl8168_wait_ll_share_fifo_ready(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int i; + + for (i = 0; i < 10; i++) { + udelay(100); + if (RTL_R16(tp, 0xD2) & BIT_9) + break; + } +} + +static void +rtl8168_disable_pci_offset_99(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x3F2, 2, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_0 | BIT_1); + rtl8168_eri_write(tp, 0x3F2, 2, csi_tmp, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_csi_fun0_write_byte(tp, 0x99, 0x00); + break; + } +} + +static void +rtl8168_enable_pci_offset_99(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_csi_fun0_write_byte(tp, 0x99, tp->org_pci_offset_99); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x3F2, 2, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_0 | BIT_1); + if (tp->org_pci_offset_99 & (BIT_5 | BIT_6)) + csi_tmp |= BIT_1; + if (tp->org_pci_offset_99 & BIT_2) + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0x3F2, 2, csi_tmp, ERIAR_ExGMAC); + break; + } +} + +static void +rtl8168_init_pci_offset_99(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_26: + if (tp->org_pci_offset_99 & BIT_2) { + csi_tmp = rtl8168_eri_read(tp, 0x5C2, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_1; + rtl8168_eri_write(tp, 0x5C2, 1, csi_tmp, ERIAR_ExGMAC); + } + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x3F2, 2, ERIAR_ExGMAC); + csi_tmp &= ~( BIT_8 | BIT_9 | BIT_10 | BIT_11 | BIT_12 | BIT_13 | BIT_14 | BIT_15 ); + csi_tmp |= ( BIT_9 | BIT_10 | BIT_13 | BIT_14 | BIT_15 ); + rtl8168_eri_write(tp, 0x3F2, 2, csi_tmp, ERIAR_ExGMAC); + csi_tmp = rtl8168_eri_read(tp, 0x3F5, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_6 | BIT_7; + rtl8168_eri_write(tp, 0x3F5, 1, csi_tmp, ERIAR_ExGMAC); + rtl8168_mac_ocp_write(tp, 0xE02C, 0x1880); + rtl8168_mac_ocp_write(tp, 0xE02E, 0x4880); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_26: + rtl8168_eri_write(tp, 0x5C0, 1, 0xFA, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_26: + if (tp->org_pci_offset_99 & BIT_2) { + csi_tmp = rtl8168_eri_read(tp, 0x5C8, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0x5C8, 1, csi_tmp, ERIAR_ExGMAC); + } + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + if (tp->org_pci_offset_99 & BIT_2) + rtl8168_mac_ocp_write(tp, 0xE0A2, rtl8168_mac_ocp_read(tp, 0xE0A2) | BIT_0); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_23: + rtl8168_eri_write(tp, 0x2E8, 2, 0x883C, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2EA, 2, 0x8C12, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2EC, 2, 0x9003, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2E2, 2, 0x883C, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2E4, 2, 0x8C12, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2E6, 2, 0x9003, ERIAR_ExGMAC); + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_eri_write(tp, 0x2E8, 2, 0x9003, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2EA, 2, 0x9003, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2EC, 2, 0x9003, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2E2, 2, 0x883C, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2E4, 2, 0x8C12, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x2E6, 2, 0x9003, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + csi_tmp = rtl8168_eri_read(tp, 0x3FA, 2, ERIAR_ExGMAC); + csi_tmp |= BIT_14; + rtl8168_eri_write(tp, 0x3FA, 2, csi_tmp, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + if (tp->org_pci_offset_99 & BIT_2) + RTL_W8(tp, 0xB6, RTL_R8(tp, 0xB6) | BIT_0); + break; + } + + rtl8168_enable_pci_offset_99(tp); +} + +static void +rtl8168_disable_pci_offset_180(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x1E2, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_2; + rtl8168_eri_write(tp, 0x1E2, 1, csi_tmp, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_26: + rtl8168_eri_write(tp, 0x1E9, 1, 0x0A, ERIAR_ExGMAC); + break; + } +} + +static void +rtl8168_enable_pci_offset_180(struct rtl8168_private *tp) +{ + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_25: + case CFG_METHOD_28: + csi_tmp = rtl8168_eri_read(tp, 0x1E8, 4, ERIAR_ExGMAC); + csi_tmp &= ~(0x0000FF00); + csi_tmp |= (0x00006400); + rtl8168_eri_write(tp, 0x1E8, 4, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x1E4, 4, ERIAR_ExGMAC); + csi_tmp &= ~(0x0000FF00); + rtl8168_eri_write(tp, 0x1E4, 4, csi_tmp, ERIAR_ExGMAC); + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x1E8, 4, ERIAR_ExGMAC); + csi_tmp &= ~(0x0000FFF0); + csi_tmp |= (0x00000640); + rtl8168_eri_write(tp, 0x1E8, 4, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x1E4, 4, ERIAR_ExGMAC); + csi_tmp &= ~(0x0000FF00); + rtl8168_eri_write(tp, 0x1E4, 4, csi_tmp, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + csi_tmp = rtl8168_eri_read(tp, 0x1E2, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_2; + rtl8168_eri_write(tp, 0x1E2, 1, csi_tmp, ERIAR_ExGMAC); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_26: + rtl8168_eri_write(tp, 0x1E9, 1, 0x64, ERIAR_ExGMAC); + break; + } + + rtl8168_mac_ocp_write(tp, 0xE094, 0x0000); +} + +static void +rtl8168_init_pci_offset_180(struct rtl8168_private *tp) +{ + if (tp->org_pci_offset_180 & (BIT_0|BIT_1)) + rtl8168_enable_pci_offset_180(tp); + else + rtl8168_disable_pci_offset_180(tp); +} + +static void +rtl8168_set_pci_99_180_exit_driver_para(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_issue_offset_99_event(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_disable_pci_offset_99(tp); + break; + } + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_disable_pci_offset_180(tp); + break; + } +} + +static void +rtl8168_hw_d3_para(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + RTL_W16(tp, RxMaxSize, RX_BUF_SIZE); + + if (tp->HwSuppAspmClkIntrLock) { + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) & ~BIT_7); + rtl8168_enable_cfg9346_write(tp); + rtl8168_hw_aspm_clkreq_enable(tp, false); + rtl8168_disable_cfg9346_write(tp); + } + + rtl8168_disable_exit_l1_mask(tp); + +#ifdef ENABLE_REALWOW_SUPPORT + rtl8168_set_realwow_d3_para(dev); +#endif + + if (tp->mcfg == CFG_METHOD_18 || tp->mcfg == CFG_METHOD_19 || tp->mcfg == CFG_METHOD_20) { + rtl8168_eri_write(tp, 0x1bc, 4, 0x0000001f, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x0000002d, ERIAR_ExGMAC); + } else if (tp->mcfg == CFG_METHOD_16) { + rtl8168_eri_write(tp, 0x1bc, 4, 0x0000001f, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0x1dc, 4, 0x0000003f, ERIAR_ExGMAC); + } + + if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_23 || tp->mcfg == CFG_METHOD_24 || + tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + rtl8168_eri_write(tp, 0x2F8, 2, 0x0064, ERIAR_ExGMAC); + } + + if (tp->bios_setting & BIT_28) { + if (tp->mcfg == CFG_METHOD_18 || tp->mcfg == CFG_METHOD_19 || + tp->mcfg == CFG_METHOD_20) { + u32 gphy_val; + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x04, 0x0061); + rtl8168_mdio_write(tp, 0x09, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val &= ~BIT_7; + rtl8168_mdio_write(tp, 0x06, gphy_val); + mdelay(1); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + gphy_val = rtl8168_mdio_read(tp, 0x16); + gphy_val &= ~BIT_10; + rtl8168_mdio_write(tp, 0x16, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + } + + rtl8168_set_pci_99_180_exit_driver_para(dev); + + /*disable ocp phy power saving*/ + if (tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) + if (!tp->dash_printer_enabled) + rtl8168_disable_ocp_phy_power_saving(dev); + + rtl8168_disable_rxdvgate(dev); +} + +static void +rtl8168_enable_magic_packet(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 csi_tmp; + + switch (tp->HwSuppMagicPktVer) { + case WAKEUP_MAGIC_PACKET_V1: + rtl8168_enable_cfg9346_write(tp); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | MagicPacket); + rtl8168_disable_cfg9346_write(tp); + break; + case WAKEUP_MAGIC_PACKET_V2: + csi_tmp = rtl8168_eri_read(tp, 0xDE, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDE, 1, csi_tmp, ERIAR_ExGMAC); + break; + } +} +static void +rtl8168_disable_magic_packet(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 csi_tmp; + + switch (tp->HwSuppMagicPktVer) { + case WAKEUP_MAGIC_PACKET_V1: + rtl8168_enable_cfg9346_write(tp); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~MagicPacket); + rtl8168_disable_cfg9346_write(tp); + break; + case WAKEUP_MAGIC_PACKET_V2: + csi_tmp = rtl8168_eri_read(tp, 0xDE, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDE, 1, csi_tmp, ERIAR_ExGMAC); + break; + } +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static void +rtl8168_get_hw_wol(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 options; + u32 csi_tmp; + unsigned long flags; + + + spin_lock_irqsave(&tp->lock, flags); + + tp->wol_opts = 0; + options = RTL_R8(tp, Config1); + if (!(options & PMEnable)) + goto out_unlock; + + options = RTL_R8(tp, Config3); + if (options & LinkUp) + tp->wol_opts |= WAKE_PHY; + + switch (tp->HwSuppMagicPktVer) { + case WAKEUP_MAGIC_PACKET_V2: + csi_tmp = rtl8168_eri_read(tp, 0xDE, 1, ERIAR_ExGMAC); + if (csi_tmp & BIT_0) + tp->wol_opts |= WAKE_MAGIC; + break; + default: + if (options & MagicPacket) + tp->wol_opts |= WAKE_MAGIC; + break; + } + + options = RTL_R8(tp, Config5); + if (options & UWF) + tp->wol_opts |= WAKE_UCAST; + if (options & BWF) + tp->wol_opts |= WAKE_BCAST; + if (options & MWF) + tp->wol_opts |= WAKE_MCAST; + +out_unlock: + tp->wol_enabled = (tp->wol_opts || tp->dash_printer_enabled) ? WOL_ENABLED : WOL_DISABLED; + + spin_unlock_irqrestore(&tp->lock, flags); +} + +static void +rtl8168_set_hw_wol(struct net_device *dev, u32 wolopts) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int i,tmp; + static struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket }, + }; + + switch (tp->HwSuppMagicPktVer) { + case WAKEUP_MAGIC_PACKET_V2: + tmp = ARRAY_SIZE(cfg) - 1; + + if (wolopts & WAKE_MAGIC) + rtl8168_enable_magic_packet(dev); + else + rtl8168_disable_magic_packet(dev); + break; + default: + tmp = ARRAY_SIZE(cfg); + break; + } + + rtl8168_enable_cfg9346_write(tp); + + for (i = 0; i < tmp; i++) { + u8 options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(tp, cfg[i].reg, options); + } + + if (tp->dash_printer_enabled) + RTL_W8(tp, Config5, RTL_R8(tp, Config5) | LanWake); + + rtl8168_disable_cfg9346_write(tp); +} + +static void +rtl8168_phy_restart_nway(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (rtl8168_is_in_phy_disable_mode(dev)) return; + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART); +} + +static void +rtl8168_phy_setup_force_mode(struct net_device *dev, u32 speed, u8 duplex) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 bmcr_true_force = 0; + + if (rtl8168_is_in_phy_disable_mode(dev)) return; + + if ((speed == SPEED_10) && (duplex == DUPLEX_HALF)) { + bmcr_true_force = BMCR_SPEED10; + } else if ((speed == SPEED_10) && (duplex == DUPLEX_FULL)) { + bmcr_true_force = BMCR_SPEED10 | BMCR_FULLDPLX; + } else if ((speed == SPEED_100) && (duplex == DUPLEX_HALF)) { + bmcr_true_force = BMCR_SPEED100; + } else if ((speed == SPEED_100) && (duplex == DUPLEX_FULL)) { + bmcr_true_force = BMCR_SPEED100 | BMCR_FULLDPLX; + } else { + netif_err(tp, drv, dev, "Failed to set phy force mode!\n"); + return; + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, MII_BMCR, bmcr_true_force); +} + +static void +rtl8168_set_pci_pme(struct rtl8168_private *tp, int set) +{ + struct pci_dev *pdev = tp->pci_dev; + u16 pmc; + + if (!pdev->pm_cap) + return; + + pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &pmc); + pmc |= PCI_PM_CTRL_PME_STATUS; + if (set) + pmc |= PCI_PM_CTRL_PME_ENABLE; + else + pmc &= ~PCI_PM_CTRL_PME_ENABLE; + pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, pmc); +} + +static void +rtl8168_set_wol_link_speed(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int auto_nego; + int giga_ctrl; + u32 adv; + u16 anlpar; + u16 gbsr; + u16 aner; + + if (tp->autoneg != AUTONEG_ENABLE) + goto exit; + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + auto_nego = rtl8168_mdio_read(tp, MII_ADVERTISE); + auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL + | ADVERTISE_100HALF | ADVERTISE_100FULL); + + giga_ctrl = rtl8168_mdio_read(tp, MII_CTRL1000); + giga_ctrl &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); + + aner = anlpar = gbsr = 0; + if (tp->link_ok(dev)) { + aner = rtl8168_mdio_read(tp, MII_EXPANSION); + anlpar = rtl8168_mdio_read(tp, MII_LPA); + gbsr = rtl8168_mdio_read(tp, MII_STAT1000); + } else { + if (netif_running(dev)) { + aner = tp->phy_reg_aner; + anlpar = tp->phy_reg_anlpar; + gbsr = tp->phy_reg_gbsr; + } + } + + if ((aner | anlpar | gbsr) == 0) { + int auto_nego_tmp = 0; + adv = tp->advertising; + if ((adv & ADVERTISED_10baseT_Half) && (anlpar & LPA_10HALF)) + auto_nego_tmp |= ADVERTISE_10HALF; + if ((adv & ADVERTISED_10baseT_Full) && (anlpar & LPA_10FULL)) + auto_nego_tmp |= ADVERTISE_10FULL; + if ((adv & ADVERTISED_100baseT_Half) && (anlpar & LPA_100HALF)) + auto_nego_tmp |= ADVERTISE_100HALF; + if ((adv & ADVERTISED_100baseT_Full) && (anlpar & LPA_100FULL)) + auto_nego_tmp |= ADVERTISE_100FULL; + + if (auto_nego_tmp == 0) goto exit; + + auto_nego |= auto_nego_tmp; + goto skip_check_lpa; + } + if (!(aner & EXPANSION_NWAY)) goto exit; + + adv = tp->advertising; + if ((adv & ADVERTISED_10baseT_Half) && (anlpar & LPA_10HALF)) + auto_nego |= ADVERTISE_10HALF; + else if ((adv & ADVERTISED_10baseT_Full) && (anlpar & LPA_10FULL)) + auto_nego |= ADVERTISE_10FULL; + else if ((adv & ADVERTISED_100baseT_Half) && (anlpar & LPA_100HALF)) + auto_nego |= ADVERTISE_100HALF; + else if ((adv & ADVERTISED_100baseT_Full) && (anlpar & LPA_100FULL)) + auto_nego |= ADVERTISE_100FULL; + else if (adv & ADVERTISED_1000baseT_Half && (gbsr & LPA_1000HALF)) + giga_ctrl |= ADVERTISE_1000HALF; + else if (adv & ADVERTISED_1000baseT_Full && (gbsr & LPA_1000FULL)) + giga_ctrl |= ADVERTISE_1000FULL; + else + goto exit; + +skip_check_lpa: + if (tp->DASH) + auto_nego |= (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10HALF | ADVERTISE_10FULL); + + if (((tp->mcfg == CFG_METHOD_7) || (tp->mcfg == CFG_METHOD_8)) && (RTL_R16(tp, CPlusCmd) & ASF)) + auto_nego |= (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10HALF | ADVERTISE_10FULL); + +#ifdef CONFIG_DOWN_SPEED_100 + auto_nego |= (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10HALF | ADVERTISE_10FULL); +#endif + + rtl8168_mdio_write(tp, MII_ADVERTISE, auto_nego); + rtl8168_mdio_write(tp, MII_CTRL1000, giga_ctrl); + + rtl8168_phy_restart_nway(dev); + +exit: + return; +} + +static void +rtl8168_powerdown_pll(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + +#ifdef ENABLE_FIBER_SUPPORT + if (HW_FIBER_MODE_ENABLED(tp)) + return; +#endif //ENABLE_FIBER_SUPPORT + + if (tp->wol_enabled == WOL_ENABLED || tp->DASH || tp->EnableKCPOffload) { + rtl8168_set_hw_wol(dev, tp->wol_opts); + + if (tp->mcfg == CFG_METHOD_16 || tp->mcfg == CFG_METHOD_17 || + tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_24 || tp->mcfg == CFG_METHOD_25 || + tp->mcfg == CFG_METHOD_26 || tp->mcfg == CFG_METHOD_23 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + rtl8168_enable_cfg9346_write(tp); + RTL_W8(tp, Config2, RTL_R8(tp, Config2) | PMSTS_En); + rtl8168_disable_cfg9346_write(tp); + } + + /* Enable the PME and clear the status */ + rtl8168_set_pci_pme(tp, 1); + + if (HW_SUPP_SERDES_PHY(tp)) + return; + + rtl8168_set_wol_link_speed(dev); + + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + + return; + } + + if (tp->DASH) + return; + + if (((tp->mcfg == CFG_METHOD_7) || (tp->mcfg == CFG_METHOD_8)) && (RTL_R16(tp, CPlusCmd) & ASF)) + return; + + rtl8168_phy_power_down(dev); + + if (!tp->HwIcVerUnknown) { + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + //case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~BIT_7); + break; + } + } + + switch (tp->mcfg) { + case CFG_METHOD_14 ... CFG_METHOD_15: + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) & ~BIT_6); + break; + case CFG_METHOD_16 ... CFG_METHOD_33: + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) & ~BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) & ~BIT_6); + break; + } +} + +static void rtl8168_powerup_pll(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | BIT_7 | BIT_6); + break; + } + + rtl8168_phy_power_up(dev); +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +static void +rtl8168_get_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 options; + unsigned long flags; + + wol->wolopts = 0; + + if (tp->mcfg == CFG_METHOD_DEFAULT) { + wol->supported = 0; + return; + } else { + wol->supported = WAKE_ANY; + } + + spin_lock_irqsave(&tp->lock, flags); + + options = RTL_R8(tp, Config1); + if (!(options & PMEnable)) + goto out_unlock; + + wol->wolopts = tp->wol_opts; + +out_unlock: + spin_unlock_irqrestore(&tp->lock, flags); +} + +static int +rtl8168_set_wol(struct net_device *dev, + struct ethtool_wolinfo *wol) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + if (tp->mcfg == CFG_METHOD_DEFAULT) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + + tp->wol_opts = wol->wolopts; + + tp->wol_enabled = (tp->wol_opts || tp->dash_printer_enabled) ? WOL_ENABLED : WOL_DISABLED; + + spin_unlock_irqrestore(&tp->lock, flags); + + device_set_wakeup_enable(tp_to_dev(tp), tp->wol_enabled); + + return 0; +} + +static void +rtl8168_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct rtl8168_fw *rtl_fw = tp->rtl_fw; + + strcpy(info->driver, MODULENAME); + strcpy(info->version, RTL8168_VERSION); + strcpy(info->bus_info, pci_name(tp->pci_dev)); + info->regdump_len = R8168_REGS_DUMP_SIZE; + info->eedump_len = tp->eeprom_len; + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (rtl_fw) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int +rtl8168_get_regs_len(struct net_device *dev) +{ + return R8168_REGS_DUMP_SIZE; +} +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) + +static int +rtl8168_set_speed_xmii(struct net_device *dev, + u8 autoneg, + u32 speed, + u8 duplex, + u32 adv) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int auto_nego = 0; + int giga_ctrl = 0; + int rc = -EINVAL; + + if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + //Disable Giga Lite + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + rtl8168_clear_eth_phy_bit(tp, 0x14, BIT_9); + if (tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) + rtl8168_clear_eth_phy_bit(tp, 0x14, BIT_7); + rtl8168_mdio_write(tp, 0x1F, 0x0A40); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + if ((speed != SPEED_1000) && + (speed != SPEED_100) && + (speed != SPEED_10)) { + speed = SPEED_1000; + duplex = DUPLEX_FULL; + } + + giga_ctrl = rtl8168_mdio_read(tp, MII_CTRL1000); + giga_ctrl &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); + + if (autoneg == AUTONEG_ENABLE) { + /*n-way force*/ + auto_nego = rtl8168_mdio_read(tp, MII_ADVERTISE); + auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL | + ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + + if (adv & ADVERTISED_10baseT_Half) + auto_nego |= ADVERTISE_10HALF; + if (adv & ADVERTISED_10baseT_Full) + auto_nego |= ADVERTISE_10FULL; + if (adv & ADVERTISED_100baseT_Half) + auto_nego |= ADVERTISE_100HALF; + if (adv & ADVERTISED_100baseT_Full) + auto_nego |= ADVERTISE_100FULL; + if (adv & ADVERTISED_1000baseT_Half) + giga_ctrl |= ADVERTISE_1000HALF; + if (adv & ADVERTISED_1000baseT_Full) + giga_ctrl |= ADVERTISE_1000FULL; + + //flow control + if (dev->mtu <= ETH_DATA_LEN) + auto_nego |= ADVERTISE_PAUSE_CAP|ADVERTISE_PAUSE_ASYM; + + tp->phy_auto_nego_reg = auto_nego; + tp->phy_1000_ctrl_reg = giga_ctrl; + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, MII_ADVERTISE, auto_nego); + rtl8168_mdio_write(tp, MII_CTRL1000, giga_ctrl); + rtl8168_phy_restart_nway(dev); + mdelay(20); + } else { + /*true force*/ + if (speed == SPEED_10 || speed == SPEED_100) + rtl8168_phy_setup_force_mode(dev, speed, duplex); + else + goto out; + } + + tp->autoneg = autoneg; + tp->speed = speed; + tp->duplex = duplex; + tp->advertising = adv; + + if (tp->mcfg == CFG_METHOD_11) + rtl8168dp_10mbps_gphy_para(dev); + + rc = 0; +out: + return rc; +} + +static int +rtl8168_set_speed(struct net_device *dev, + u8 autoneg, + u32 speed, + u8 duplex, + u32 adv) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int ret; + + ret = tp->set_speed(dev, autoneg, speed, duplex, adv); + + return ret; +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +static int +rtl8168_set_settings(struct net_device *dev, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + struct ethtool_cmd *cmd +#else + const struct ethtool_link_ksettings *cmd +#endif + ) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int ret; + unsigned long flags; + u8 autoneg; + u32 speed; + u8 duplex; + u32 supported, advertising; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + autoneg = cmd->autoneg; + speed = cmd->speed; + duplex = cmd->duplex; + supported = cmd->supported; + advertising = cmd->advertising; +#else + const struct ethtool_link_settings *base = &cmd->base; + autoneg = base->autoneg; + speed = base->speed; + duplex = base->duplex; + ethtool_convert_link_mode_to_legacy_u32(&supported, + cmd->link_modes.supported); + ethtool_convert_link_mode_to_legacy_u32(&advertising, + cmd->link_modes.advertising); +#endif + if (advertising & ~supported) + return -EINVAL; + + spin_lock_irqsave(&tp->lock, flags); + ret = rtl8168_set_speed(dev, autoneg, speed, duplex, advertising); + spin_unlock_irqrestore(&tp->lock, flags); + + return ret; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) +static u32 +rtl8168_get_tx_csum(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 ret; + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + ret = ((dev->features & NETIF_F_IP_CSUM) != 0); +#else + ret = ((dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) != 0); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + spin_unlock_irqrestore(&tp->lock, flags); + + return ret; +} + +static u32 +rtl8168_get_rx_csum(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 ret; + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + ret = tp->cp_cmd & RxChkSum; + spin_unlock_irqrestore(&tp->lock, flags); + + return ret; +} + +static int +rtl8168_set_tx_csum(struct net_device *dev, + u32 data) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + if (tp->mcfg == CFG_METHOD_DEFAULT) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + if (data) + dev->features |= NETIF_F_IP_CSUM; + else + dev->features &= ~NETIF_F_IP_CSUM; +#else + if (data) + if ((tp->mcfg == CFG_METHOD_1) || (tp->mcfg == CFG_METHOD_2) || (tp->mcfg == CFG_METHOD_3)) + dev->features |= NETIF_F_IP_CSUM; + else + dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); + else + dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} + +static int +rtl8168_set_rx_csum(struct net_device *dev, + u32 data) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + if (tp->mcfg == CFG_METHOD_DEFAULT) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + + if (data) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) + +#ifdef CONFIG_R8168_VLAN + +static inline u32 +rtl8168_tx_vlan_tag(struct rtl8168_private *tp, + struct sk_buff *skb) +{ + u32 tag; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + tag = (tp->vlgrp && vlan_tx_tag_present(skb)) ? + TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; +#elif LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) + tag = (vlan_tx_tag_present(skb)) ? + TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; +#else + tag = (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +#endif + + return tag; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + +static void +rtl8168_vlan_rx_register(struct net_device *dev, + struct vlan_group *grp) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + tp->vlgrp = grp; + if (tp->vlgrp) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + RTL_R16(tp, CPlusCmd); + spin_unlock_irqrestore(&tp->lock, flags); +} + +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) +static void +rtl8168_vlan_rx_kill_vid(struct net_device *dev, + unsigned short vid) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) + if (tp->vlgrp) + tp->vlgrp->vlan_devices[vid] = NULL; +#else + vlan_group_set_device(tp->vlgrp, vid, NULL); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) + spin_unlock_irqrestore(&tp->lock, flags); +} +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + +static int +rtl8168_rx_vlan_skb(struct rtl8168_private *tp, + struct RxDesc *desc, + struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + int ret = -1; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + if (tp->vlgrp && (opts2 & RxVlanTag)) { + rtl8168_rx_hwaccel_skb(skb, tp->vlgrp, + swab16(opts2 & 0xffff)); + ret = 0; + } +#elif LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); +#else + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +#endif + + desc->opts2 = 0; + return ret; +} + +#else /* !CONFIG_R8168_VLAN */ + +static inline u32 +rtl8168_tx_vlan_tag(struct rtl8168_private *tp, + struct sk_buff *skb) +{ + return 0; +} + +static int +rtl8168_rx_vlan_skb(struct rtl8168_private *tp, + struct RxDesc *desc, + struct sk_buff *skb) +{ + return -1; +} + +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) + +static netdev_features_t rtl8168_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + if (dev->mtu > MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + if (dev->mtu > ETH_DATA_LEN) { + features &= ~NETIF_F_ALL_TSO; + features &= ~NETIF_F_ALL_CSUM; + } + spin_unlock_irqrestore(&tp->lock, flags); + + return features; +} + +static int rtl8168_hw_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 rx_config; + + rx_config = RTL_R32(tp, RxConfig); + if (features & NETIF_F_RXALL) + rx_config |= (AcceptErr | AcceptRunt); + else + rx_config &= ~(AcceptErr | AcceptRunt); + + RTL_W32(tp, RxConfig, rx_config); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (dev->features & NETIF_F_HW_VLAN_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + + RTL_W16(tp, CPlusCmd, tp->cp_cmd); + RTL_R16(tp, CPlusCmd); + + return 0; +} + +static int rtl8168_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX; + + spin_lock_irqsave(&tp->lock, flags); + if (features ^ dev->features) + rtl8168_hw_set_features(dev, features); + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} + +#endif + +static void rtl8168_gset_xmii(struct net_device *dev, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + struct ethtool_cmd *cmd +#else + struct ethtool_link_ksettings *cmd +#endif + ) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 status; + u8 autoneg, duplex; + u32 speed = 0; + u16 bmcr, bmsr, anlpar, ctrl1000 = 0, stat1000 = 0; + u32 supported, advertising, lp_advertising; + unsigned long flags; + + supported = SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP | + SUPPORTED_Pause | + SUPPORTED_Asym_Pause; + + advertising = ADVERTISED_TP; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + bmcr = rtl8168_mdio_read(tp, MII_BMCR); + bmsr = rtl8168_mdio_read(tp, MII_BMSR); + anlpar = rtl8168_mdio_read(tp, MII_LPA); + ctrl1000 = rtl8168_mdio_read(tp, MII_CTRL1000); + stat1000 = rtl8168_mdio_read(tp, MII_STAT1000); + spin_unlock_irqrestore(&tp->lock, flags); + + if (bmcr & BMCR_ANENABLE) { + advertising |= ADVERTISED_Autoneg; + autoneg = AUTONEG_ENABLE; + + if (bmsr & BMSR_ANEGCOMPLETE) { + lp_advertising = mii_lpa_to_ethtool_lpa_t(anlpar); + lp_advertising |= + mii_stat1000_to_ethtool_lpa_t(stat1000); + } else { + lp_advertising = 0; + } + + if (tp->phy_auto_nego_reg & ADVERTISE_10HALF) + advertising |= ADVERTISED_10baseT_Half; + if (tp->phy_auto_nego_reg & ADVERTISE_10FULL) + advertising |= ADVERTISED_10baseT_Full; + if (tp->phy_auto_nego_reg & ADVERTISE_100HALF) + advertising |= ADVERTISED_100baseT_Half; + if (tp->phy_auto_nego_reg & ADVERTISE_100FULL) + advertising |= ADVERTISED_100baseT_Full; + if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL) + advertising |= ADVERTISED_1000baseT_Full; + } else { + autoneg = AUTONEG_DISABLE; + lp_advertising = 0; + } + + status = RTL_R8(tp, PHYstatus); + + if (status & LinkStatus) { + /*link on*/ + if (status & _1000bpsF) + speed = SPEED_1000; + else if (status & _100bps) + speed = SPEED_100; + else if (status & _10bps) + speed = SPEED_10; + + if (status & TxFlowCtrl) + advertising |= ADVERTISED_Asym_Pause; + + if (status & RxFlowCtrl) + advertising |= ADVERTISED_Pause; + + duplex = ((status & _1000bpsF) || (status & FullDup)) ? + DUPLEX_FULL : DUPLEX_HALF; + } else { + /*link down*/ + speed = SPEED_UNKNOWN; + duplex = DUPLEX_UNKNOWN; + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + cmd->supported = supported; + cmd->advertising = advertising; +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30) + cmd->lp_advertising = lp_advertising; +#endif + cmd->autoneg = autoneg; + cmd->speed = speed; + cmd->duplex = duplex; + cmd->port = PORT_TP; +#else + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, + supported); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, + advertising); + ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, + lp_advertising); + cmd->base.autoneg = autoneg; + cmd->base.speed = speed; + cmd->base.duplex = duplex; + cmd->base.port = PORT_TP; +#endif +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +static int +rtl8168_get_settings(struct net_device *dev, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + struct ethtool_cmd *cmd +#else + struct ethtool_link_ksettings *cmd +#endif + ) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + tp->get_settings(dev, cmd); + + return 0; +} + +static void rtl8168_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8168_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned int i; + u8 *data = p; + unsigned long flags; + + if (regs->len < R8168_REGS_DUMP_SIZE) + return /* -EINVAL */; + + memset(p, 0, regs->len); + + spin_lock_irqsave(&tp->lock, flags); + for (i = 0; i < R8168_MAC_REGS_SIZE; i++) + *data++ = readb(ioaddr + i); + data = (u8*)p + 256; + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + for (i = 0; i < R8168_PHY_REGS_SIZE/2; i++) { + *(u16*)data = rtl8168_mdio_read(tp, i); + data += 2; + } + data = (u8*)p + 256 * 2; + + for (i = 0; i < R8168_EPHY_REGS_SIZE/2; i++) { + *(u16*)data = rtl8168_ephy_read(tp, i); + data += 2; + } + data = (u8*)p + 256 * 3; + + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + /* RTL8168B does not support Extend GMAC */ + break; + default: + for (i = 0; i < R8168_ERI_REGS_SIZE; i+=4) { + *(u32*)data = rtl8168_eri_read(tp, i , 4, ERIAR_ExGMAC); + data += 4; + } + break; + } + spin_unlock_irqrestore(&tp->lock, flags); +} + +static u32 +rtl8168_get_msglevel(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + return tp->msg_enable; +} + +static void +rtl8168_set_msglevel(struct net_device *dev, + u32 value) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + tp->msg_enable = value; +} + +static const char rtl8168_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; +#endif //#LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +static int rtl8168_get_stats_count(struct net_device *dev) +{ + return ARRAY_SIZE(rtl8168_gstrings); +} +#endif //#LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +#else +static int rtl8168_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8168_gstrings); + default: + return -EOPNOTSUPP; + } +} +#endif + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +static void +rtl8168_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, + u64 *data) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct rtl8168_counters *counters; + dma_addr_t paddr; + u32 cmd; + u32 WaitCnt; + unsigned long flags; + + ASSERT_RTNL(); + + counters = tp->tally_vaddr; + paddr = tp->tally_paddr; + if (!counters) + return; + + spin_lock_irqsave(&tp->lock, flags); + RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(tp, CounterAddrLow, cmd); + RTL_W32(tp, CounterAddrLow, cmd | CounterDump); + + WaitCnt = 0; + while (RTL_R32(tp, CounterAddrLow) & CounterDump) { + udelay(10); + + WaitCnt++; + if (WaitCnt > 20) + break; + } + spin_unlock_irqrestore(&tp->lock, flags); + + data[0] = le64_to_cpu(counters->tx_packets); + data[1] = le64_to_cpu(counters->rx_packets); + data[2] = le64_to_cpu(counters->tx_errors); + data[3] = le32_to_cpu(counters->rx_errors); + data[4] = le16_to_cpu(counters->rx_missed); + data[5] = le16_to_cpu(counters->align_errors); + data[6] = le32_to_cpu(counters->tx_one_collision); + data[7] = le32_to_cpu(counters->tx_multi_collision); + data[8] = le64_to_cpu(counters->rx_unicast); + data[9] = le64_to_cpu(counters->rx_broadcast); + data[10] = le32_to_cpu(counters->rx_multicast); + data[11] = le16_to_cpu(counters->tx_aborted); + data[12] = le16_to_cpu(counters->tx_underrun); +} + +static void +rtl8168_get_strings(struct net_device *dev, + u32 stringset, + u8 *data) +{ + switch (stringset) { + case ETH_SS_STATS: + memcpy(data, *rtl8168_gstrings, sizeof(rtl8168_gstrings)); + break; + } +} +#endif //#LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) + +static int rtl_get_eeprom_len(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + return tp->eeprom_len; +} + +static int rtl_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *buf) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int i,j,ret; + int start_w, end_w; + int VPD_addr, VPD_data; + u32 *eeprom_buff; + u16 tmp; + + if (tp->eeprom_type == EEPROM_TYPE_NONE) { + dev_printk(KERN_DEBUG, tp_to_dev(tp), "Detect none EEPROM\n"); + return -EOPNOTSUPP; + } else if (eeprom->len == 0 || (eeprom->offset+eeprom->len) > tp->eeprom_len) { + dev_printk(KERN_DEBUG, tp_to_dev(tp), "Invalid parameter\n"); + return -EINVAL; + } + + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + VPD_addr = 0xCE; + VPD_data = 0xD0; + break; + + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + return -EOPNOTSUPP; + default: + VPD_addr = 0xD2; + VPD_data = 0xD4; + break; + } + + start_w = eeprom->offset >> 2; + end_w = (eeprom->offset + eeprom->len - 1) >> 2; + + eeprom_buff = kmalloc(sizeof(u32)*(end_w - start_w + 1), GFP_KERNEL); + if (!eeprom_buff) + return -ENOMEM; + + rtl8168_enable_cfg9346_write(tp); + ret = -EFAULT; + for (i=start_w; i<=end_w; i++) { + pci_write_config_word(tp->pci_dev, VPD_addr, (u16)i*4); + ret = -EFAULT; + for (j = 0; j < 10; j++) { + udelay(400); + pci_read_config_word(tp->pci_dev, VPD_addr, &tmp); + if (tmp&0x8000) { + ret = 0; + break; + } + } + + if (ret) + break; + + pci_read_config_dword(tp->pci_dev, VPD_data, &eeprom_buff[i-start_w]); + } + rtl8168_disable_cfg9346_write(tp); + + if (!ret) + memcpy(buf, (u8 *)eeprom_buff + (eeprom->offset & 3), eeprom->len); + + kfree(eeprom_buff); + + return ret; +} + +#undef ethtool_op_get_link +#define ethtool_op_get_link _kc_ethtool_op_get_link +static u32 _kc_ethtool_op_get_link(struct net_device *dev) +{ + return netif_carrier_ok(dev) ? 1 : 0; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) +#undef ethtool_op_get_sg +#define ethtool_op_get_sg _kc_ethtool_op_get_sg +static u32 _kc_ethtool_op_get_sg(struct net_device *dev) +{ +#ifdef NETIF_F_SG + return (dev->features & NETIF_F_SG) != 0; +#else + return 0; +#endif +} + +#undef ethtool_op_set_sg +#define ethtool_op_set_sg _kc_ethtool_op_set_sg +static int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (tp->mcfg == CFG_METHOD_DEFAULT) + return -EOPNOTSUPP; + +#ifdef NETIF_F_SG + if (data) + dev->features |= NETIF_F_SG; + else + dev->features &= ~NETIF_F_SG; +#endif + + return 0; +} +#endif + +static int rtl8168_enable_EEE(struct rtl8168_private *tp) +{ + int ret; + u16 data; + u32 csi_tmp; + + ret = 0; + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0020); + data = rtl8168_mdio_read(tp, 0x15) | 0x0100; + rtl8168_mdio_write(tp, 0x15, data); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + data = rtl8168_mdio_read(tp, 0x06) | 0x2000; + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0006); + rtl8168_mdio_write(tp, 0x00, 0x5A30); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0007); + rtl8168_mdio_write(tp, 0x0E, 0x003C); + rtl8168_mdio_write(tp, 0x0D, 0x4007); + rtl8168_mdio_write(tp, 0x0E, 0x0006); + rtl8168_mdio_write(tp, 0x0D, 0x0000); + if ((RTL_R8(tp, Config4)&0x40) && (RTL_R8(tp, 0x6D) & BIT_7)) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8AC8); + rtl8168_mdio_write(tp, 0x06, RTL_R16(tp, tp->NicCustLedValue)); + rtl8168_mdio_write(tp, 0x05, 0x8B82); + data = rtl8168_mdio_read(tp, 0x06) | 0x0010; + rtl8168_mdio_write(tp, 0x05, 0x8B82); + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + break; + + case CFG_METHOD_16: + case CFG_METHOD_17: + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC) | 0x0003; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + rtl8168_mdio_write(tp,0x1F , 0x0004); + rtl8168_mdio_write(tp,0x1F , 0x0007); + rtl8168_mdio_write(tp,0x1E , 0x0020); + data = rtl8168_mdio_read(tp, 0x15)|0x0100; + rtl8168_mdio_write(tp,0x15 , data); + rtl8168_mdio_write(tp,0x1F , 0x0002); + rtl8168_mdio_write(tp,0x1F , 0x0005); + rtl8168_mdio_write(tp,0x05 , 0x8B85); + data = rtl8168_mdio_read(tp, 0x06)|0x2000; + rtl8168_mdio_write(tp,0x06 , data); + rtl8168_mdio_write(tp,0x1F , 0x0000); + rtl8168_mdio_write(tp,0x0D , 0x0007); + rtl8168_mdio_write(tp,0x0E , 0x003C); + rtl8168_mdio_write(tp,0x0D , 0x4007); + rtl8168_mdio_write(tp,0x0E , 0x0006); + rtl8168_mdio_write(tp,0x0D , 0x0000); + break; + + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp |= BIT_1 | BIT_0; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0020); + data = rtl8168_mdio_read(tp, 0x15); + data |= BIT_8; + rtl8168_mdio_write(tp, 0x15, data); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + data = rtl8168_mdio_read(tp, 0x06); + data |= BIT_13; + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0007); + rtl8168_mdio_write(tp, 0x0E, 0x003C); + rtl8168_mdio_write(tp, 0x0D, 0x4007); + rtl8168_mdio_write(tp, 0x0E, 0x0006); + rtl8168_mdio_write(tp, 0x0D, 0x0000); + break; + + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp |= BIT_1 | BIT_0; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x11); + rtl8168_mdio_write(tp, 0x11, data | BIT_4); + rtl8168_mdio_write(tp, 0x1F, 0x0A5D); + rtl8168_mdio_write(tp, 0x10, tp->eee_adv_t); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + + default: +// dev_printk(KERN_DEBUG, tp_to_dev(tp), "Not Support EEE\n"); + ret = -EOPNOTSUPP; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mdio_write(tp, 0x1F, 0x0A4A); + rtl8168_set_eth_phy_bit(tp, 0x11, BIT_9); + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_7); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + } + + /*Advanced EEE*/ + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_oob_mutex_lock(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_set_phy_mcu_patch_request(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_25: + rtl8168_eri_write(tp, 0x1EA, 1, 0xFA, ERIAR_ExGMAC); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x10); + if (data & BIT_10) { + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + data = rtl8168_mdio_read(tp, 0x16); + data &= ~(BIT_1); + rtl8168_mdio_write(tp, 0x16, data); + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + data = rtl8168_mdio_read(tp, 0x16); + data |= BIT_1; + rtl8168_mdio_write(tp, 0x16, data); + } + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_26: + data = rtl8168_mac_ocp_read(tp, 0xE052); + data |= BIT_0; + rtl8168_mac_ocp_write(tp, 0xE052, data); + data = rtl8168_mac_ocp_read(tp, 0xE056); + data &= 0xFF0F; + data |= (BIT_4 | BIT_5 | BIT_6); + rtl8168_mac_ocp_write(tp, 0xE056, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x10); + if (data & BIT_10) { + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + data = rtl8168_mdio_read(tp, 0x16); + data &= ~(BIT_1); + rtl8168_mdio_write(tp, 0x16, data); + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + data = rtl8168_mdio_read(tp, 0x16); + data |= BIT_1; + rtl8168_mdio_write(tp, 0x16, data); + } + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_27: + case CFG_METHOD_28: + data = rtl8168_mac_ocp_read(tp, 0xE052); + data &= ~BIT_0; + rtl8168_mac_ocp_write(tp, 0xE052, data); + data = rtl8168_mac_ocp_read(tp, 0xE056); + data &= 0xFF0F; + data |= (BIT_4 | BIT_5 | BIT_6); + rtl8168_mac_ocp_write(tp, 0xE056, data); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + data = rtl8168_mac_ocp_read(tp, 0xE052); + data &= ~(BIT_0); + rtl8168_mac_ocp_write(tp, 0xE052, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x10) | BIT_15; + rtl8168_mdio_write(tp, 0x10, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + data = rtl8168_mdio_read(tp, 0x11) | BIT_13 | BIT_14; + data &= ~(BIT_12); + rtl8168_mdio_write(tp, 0x11, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + /* + data = rtl8168_mac_ocp_read(tp, 0xE052); + data |= BIT_0; + rtl8168_mac_ocp_write(tp, 0xE052, data); + */ + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x10) | BIT_15; + rtl8168_mdio_write(tp, 0x10, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + data = rtl8168_mdio_read(tp, 0x11) | BIT_13 | BIT_14; + data &= ~(BIT_12); + rtl8168_mdio_write(tp, 0x11, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_clear_phy_mcu_patch_request(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_oob_mutex_unlock(tp); + break; + } + + return ret; +} + +static int rtl8168_disable_EEE(struct rtl8168_private *tp) +{ + int ret; + u16 data; + u32 csi_tmp; + + ret = 0; + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + data = rtl8168_mdio_read(tp, 0x06) & ~0x2000; + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0020); + data = rtl8168_mdio_read(tp, 0x15) & ~0x0100; + rtl8168_mdio_write(tp, 0x15, data); + rtl8168_mdio_write(tp, 0x1F, 0x0006); + rtl8168_mdio_write(tp, 0x00, 0x5A00); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0007); + rtl8168_mdio_write(tp, 0x0E, 0x003C); + rtl8168_mdio_write(tp, 0x0D, 0x4007); + rtl8168_mdio_write(tp, 0x0E, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + if (RTL_R8(tp, Config4) & 0x40) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B82); + data = rtl8168_mdio_read(tp, 0x06) & ~0x0010; + rtl8168_mdio_write(tp, 0x05, 0x8B82); + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + break; + + case CFG_METHOD_16: + case CFG_METHOD_17: + csi_tmp = rtl8168_eri_read(tp, 0x1B0,4, ERIAR_ExGMAC)& ~0x0003; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + data = rtl8168_mdio_read(tp, 0x06) & ~0x2000; + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0004); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0020); + data = rtl8168_mdio_read(tp, 0x15) & ~0x0100; + rtl8168_mdio_write(tp,0x15 , data); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0007); + rtl8168_mdio_write(tp, 0x0E, 0x003C); + rtl8168_mdio_write(tp, 0x0D, 0x4007); + rtl8168_mdio_write(tp, 0x0E, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_1 | BIT_0); + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + data = rtl8168_mdio_read(tp, 0x06); + data &= ~BIT_13; + rtl8168_mdio_write(tp, 0x06, data); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0020); + data = rtl8168_mdio_read(tp, 0x15); + data &= ~BIT_8; + rtl8168_mdio_write(tp, 0x15, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0007); + rtl8168_mdio_write(tp, 0x0E, 0x003C); + rtl8168_mdio_write(tp, 0x0D, 0x4007); + rtl8168_mdio_write(tp, 0x0E, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_1 | BIT_0); + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x11); + rtl8168_mdio_write(tp, 0x11, data & ~BIT_4); + rtl8168_mdio_write(tp, 0x1F, 0x0A5D); + rtl8168_mdio_write(tp, 0x10, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + + default: +// dev_printk(KERN_DEBUG, tp_to_dev(tp), "Not Support EEE\n"); + ret = -EOPNOTSUPP; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + rtl8168_clear_eth_phy_bit(tp, 0x14, BIT_7); + rtl8168_mdio_write(tp, 0x1F, 0x0A4A); + rtl8168_clear_eth_phy_bit(tp, 0x11, BIT_9); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + } + + /*Advanced EEE*/ + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_oob_mutex_lock(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_set_phy_mcu_patch_request(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_25: + rtl8168_eri_write(tp, 0x1EA, 1, 0x00, ERIAR_ExGMAC); + + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + data = rtl8168_mdio_read(tp, 0x16); + data &= ~(BIT_1); + rtl8168_mdio_write(tp, 0x16, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_26: + data = rtl8168_mac_ocp_read(tp, 0xE052); + data &= ~(BIT_0); + rtl8168_mac_ocp_write(tp, 0xE052, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + data = rtl8168_mdio_read(tp, 0x16); + data &= ~(BIT_1); + rtl8168_mdio_write(tp, 0x16, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_27: + case CFG_METHOD_28: + data = rtl8168_mac_ocp_read(tp, 0xE052); + data &= ~(BIT_0); + rtl8168_mac_ocp_write(tp, 0xE052, data); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + data = rtl8168_mac_ocp_read(tp, 0xE052); + data &= ~(BIT_0); + rtl8168_mac_ocp_write(tp, 0xE052, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + data = rtl8168_mdio_read(tp, 0x10) & ~(BIT_15); + rtl8168_mdio_write(tp, 0x10, data); + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + data = rtl8168_mdio_read(tp, 0x11) & ~(BIT_12 | BIT_13 | BIT_14); + rtl8168_mdio_write(tp, 0x11, data); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_clear_phy_mcu_patch_request(tp); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_oob_mutex_unlock(tp); + break; + } + + return ret; +} + +static int rtl_nway_reset(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + int ret, bmcr; + + spin_lock_irqsave(&tp->lock, flags); + + if (unlikely(tp->rtk_enable_diag)) { + spin_unlock_irqrestore(&tp->lock, flags); + return -EBUSY; + } + + /* if autoneg is off, it's an error */ + rtl8168_mdio_write(tp, 0x1F, 0x0000); + bmcr = rtl8168_mdio_read(tp, MII_BMCR); + + if (bmcr & BMCR_ANENABLE) { + bmcr |= BMCR_ANRESTART; + rtl8168_mdio_write(tp, MII_BMCR, bmcr); + ret = 0; + } else { + ret = -EINVAL; + } + + spin_unlock_irqrestore(&tp->lock, flags); + + return ret; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) +static int +rtl_ethtool_get_eee(struct net_device *net, struct ethtool_eee *eee) +{ + struct rtl8168_private *tp = netdev_priv(net); + u32 lp, adv, supported = 0; + unsigned long flags; + u16 val; + + switch (tp->mcfg) { + case CFG_METHOD_21 ... CFG_METHOD_33: + break; + default: + return -EOPNOTSUPP; + } + + spin_lock_irqsave(&tp->lock, flags); + + if (unlikely(tp->rtk_enable_diag)) { + spin_unlock_irqrestore(&tp->lock, flags); + return -EBUSY; + } + + rtl8168_mdio_write(tp, 0x1F, 0x0A5C); + val = rtl8168_mdio_read(tp, 0x12); + supported = mmd_eee_cap_to_ethtool_sup_t(val); + + rtl8168_mdio_write(tp, 0x1F, 0x0A5D); + val = rtl8168_mdio_read(tp, 0x10); + adv = mmd_eee_adv_to_ethtool_adv_t(val); + + val = rtl8168_mdio_read(tp, 0x11); + lp = mmd_eee_adv_to_ethtool_adv_t(val); + + val = rtl8168_eri_read(tp, 0x1B0, 2, ERIAR_ExGMAC); + val &= BIT_1 | BIT_0; + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + spin_unlock_irqrestore(&tp->lock, flags); + + eee->eee_enabled = !!val; + eee->eee_active = !!(supported & adv & lp); + eee->supported = supported; + eee->advertised = adv; + eee->lp_advertised = lp; + + return 0; +} + +static int +rtl_ethtool_set_eee(struct net_device *net, struct ethtool_eee *eee) +{ + struct rtl8168_private *tp = netdev_priv(net); + unsigned long flags; + + switch (tp->mcfg) { + case CFG_METHOD_21 ... CFG_METHOD_33: + break; + default: + return -EOPNOTSUPP; + } + + if (HW_SUPP_SERDES_PHY(tp) || + !HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp) || + tp->DASH) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + + if (unlikely(tp->rtk_enable_diag)) { + spin_unlock_irqrestore(&tp->lock, flags); + return -EBUSY; + } + + tp->eee_enabled = eee->eee_enabled; + tp->eee_adv_t = ethtool_adv_to_mmd_eee_adv_t(eee->advertised); + + if (tp->eee_enabled) + rtl8168_enable_EEE(tp); + else + rtl8168_disable_EEE(tp); + + spin_unlock_irqrestore(&tp->lock, flags); + + rtl_nway_reset(net); + + return 0; +} +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) +static const struct ethtool_ops rtl8168_ethtool_ops = { + .get_drvinfo = rtl8168_get_drvinfo, + .get_regs_len = rtl8168_get_regs_len, + .get_link = ethtool_op_get_link, +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0) + .get_settings = rtl8168_get_settings, + .set_settings = rtl8168_set_settings, +#else + .get_link_ksettings = rtl8168_get_settings, + .set_link_ksettings = rtl8168_set_settings, +#endif + .get_msglevel = rtl8168_get_msglevel, + .set_msglevel = rtl8168_set_msglevel, +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) + .get_rx_csum = rtl8168_get_rx_csum, + .set_rx_csum = rtl8168_set_rx_csum, + .get_tx_csum = rtl8168_get_tx_csum, + .set_tx_csum = rtl8168_set_tx_csum, + .get_sg = ethtool_op_get_sg, + .set_sg = ethtool_op_set_sg, +#ifdef NETIF_F_TSO + .get_tso = ethtool_op_get_tso, + .set_tso = ethtool_op_set_tso, +#endif +#endif + .get_regs = rtl8168_get_regs, + .get_wol = rtl8168_get_wol, + .set_wol = rtl8168_set_wol, + .get_strings = rtl8168_get_strings, +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) + .get_stats_count = rtl8168_get_stats_count, +#else + .get_sset_count = rtl8168_get_sset_count, +#endif + .get_ethtool_stats = rtl8168_get_ethtool_stats, +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) +#ifdef ETHTOOL_GPERMADDR + .get_perm_addr = ethtool_op_get_perm_addr, +#endif +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) + .get_eeprom = rtl_get_eeprom, + .get_eeprom_len = rtl_get_eeprom_len, +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) + .get_ts_info = ethtool_op_get_ts_info, +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) + .get_eee = rtl_ethtool_get_eee, + .set_eee = rtl_ethtool_set_eee, +#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0) */ + .nway_reset = rtl_nway_reset, +}; +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) + +#if 0 + +static int rtl8168_enable_green_feature(struct rtl8168_private *tp) +{ + u16 gphy_val; + unsigned long flags; + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + rtl8168_mdio_write(tp, 0x1F, 0x0003); + gphy_val = rtl8168_mdio_read(tp, 0x10) | 0x0400; + rtl8168_mdio_write(tp, 0x10, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x19) | 0x0001; + rtl8168_mdio_write(tp, 0x19, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + gphy_val = rtl8168_mdio_read(tp, 0x01) & ~0x0100; + rtl8168_mdio_write(tp, 0x01, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + mdelay(20); + break; + + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + rtl8168_mdio_write(tp, 0x1f, 0x0003); + gphy_val = rtl8168_mdio_read(tp, 0x10); + gphy_val |= BIT_10; + rtl8168_mdio_write(tp, 0x10, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x19); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x19, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + gphy_val = rtl8168_mdio_read(tp, 0x01); + gphy_val |= BIT_8; + rtl8168_mdio_write(tp, 0x01, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + break; + case CFG_METHOD_21: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_set_eth_phy_bit( tp, 0x14, BIT_14 ); + rtl8168_mdio_write(tp, 0x1F, 0x0A40); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8045); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0x804d); + rtl8168_mdio_write(tp, 0x14, 0x1222); + rtl8168_mdio_write(tp, 0x13, 0x805d); + rtl8168_mdio_write(tp, 0x14, 0x0022); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_set_eth_phy_bit( tp, 0x14, BIT_15 ); + rtl8168_mdio_write(tp, 0x1F, 0x0A40); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + break; + default: + dev_printk(KERN_DEBUG, tp_to_dev(tp), "Not Support Green Feature\n"); + break; + } + + return 0; +} + +static int rtl8168_disable_green_feature(struct rtl8168_private *tp) +{ + u16 gphy_val; + unsigned long flags; + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + rtl8168_mdio_write(tp, 0x1F, 0x0005); + gphy_val = rtl8168_mdio_read(tp, 0x01) | 0x0100; + rtl8168_mdio_write(tp, 0x01, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + gphy_val = rtl8168_mdio_read(tp, 0x10) & ~0x0400; + rtl8168_mdio_write(tp, 0x10, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x19) & ~0x0001; + rtl8168_mdio_write(tp, 0x19, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x06) & ~0x7000; + gphy_val |= 0x3000; + rtl8168_mdio_write(tp, 0x06, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x0D) & 0x0700; + gphy_val |= 0x0500; + rtl8168_mdio_write(tp, 0x0D, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + rtl8168_mdio_write(tp, 0x1f, 0x0003); + gphy_val = rtl8168_mdio_read(tp, 0x19); + gphy_val &= ~BIT_0; + rtl8168_mdio_write(tp, 0x19, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x10); + gphy_val &= ~BIT_10; + rtl8168_mdio_write(tp, 0x10, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + break; + case CFG_METHOD_21: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_clear_eth_phy_bit( tp, 0x14, BIT_14 ); + rtl8168_mdio_write(tp, 0x1F, 0x0A40); + rtl8168_mdio_write(tp, 0x00, 0x9200); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8045); + rtl8168_mdio_write(tp, 0x14, 0x2444); + rtl8168_mdio_write(tp, 0x13, 0x804d); + rtl8168_mdio_write(tp, 0x14, 0x2444); + rtl8168_mdio_write(tp, 0x13, 0x805d); + rtl8168_mdio_write(tp, 0x14, 0x2444); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_set_eth_phy_bit( tp, 0x14, BIT_15 ); + rtl8168_mdio_write(tp, 0x1F, 0x0A40); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + break; + default: + dev_printk(KERN_DEBUG, tp_to_dev(tp), "Not Support Green Feature\n"); + break; + } + + return 0; +} + +#endif + +static void rtl8168_get_mac_version(struct rtl8168_private *tp) +{ + u32 reg,val32; + u32 ICVerID; + + val32 = RTL_R32(tp, TxConfig); + reg = val32 & 0x7c800000; + ICVerID = val32 & 0x00700000; + + switch (reg) { + case 0x30000000: + tp->mcfg = CFG_METHOD_1; + tp->efuse_ver = EFUSE_NOT_SUPPORT; + break; + case 0x38000000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_2; + } else if (ICVerID == 0x00500000) { + tp->mcfg = CFG_METHOD_3; + } else { + tp->mcfg = CFG_METHOD_3; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_NOT_SUPPORT; + break; + case 0x3C000000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_4; + } else if (ICVerID == 0x00200000) { + tp->mcfg = CFG_METHOD_5; + } else if (ICVerID == 0x00400000) { + tp->mcfg = CFG_METHOD_6; + } else { + tp->mcfg = CFG_METHOD_6; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_NOT_SUPPORT; + break; + case 0x3C800000: + if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_7; + } else if (ICVerID == 0x00300000) { + tp->mcfg = CFG_METHOD_8; + } else { + tp->mcfg = CFG_METHOD_8; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_NOT_SUPPORT; + break; + case 0x28000000: + if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_9; + } else if (ICVerID == 0x00300000) { + tp->mcfg = CFG_METHOD_10; + } else { + tp->mcfg = CFG_METHOD_10; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V1; + break; + case 0x28800000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_11; + } else if (ICVerID == 0x00200000) { + tp->mcfg = CFG_METHOD_12; + RTL_W32(tp, 0xD0, RTL_R32(tp, 0xD0) | 0x00020000); + } else if (ICVerID == 0x00300000) { + tp->mcfg = CFG_METHOD_13; + } else { + tp->mcfg = CFG_METHOD_13; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V1; + break; + case 0x2C000000: + if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_14; + } else if (ICVerID == 0x00200000) { + tp->mcfg = CFG_METHOD_15; + } else { + tp->mcfg = CFG_METHOD_15; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V2; + break; + case 0x2C800000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_16; + } else if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_17; + } else { + tp->mcfg = CFG_METHOD_17; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x48000000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_18; + } else if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_19; + } else { + tp->mcfg = CFG_METHOD_19; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x48800000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_20; + } else { + tp->mcfg = CFG_METHOD_20; + tp->HwIcVerUnknown = TRUE; + } + + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x4C000000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_21; + } else if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_22; + } else { + tp->mcfg = CFG_METHOD_22; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x50000000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_23; + } else if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_27; + } else if (ICVerID == 0x00200000) { + tp->mcfg = CFG_METHOD_28; + } else { + tp->mcfg = CFG_METHOD_28; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x50800000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_24; + } else if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_25; + } else { + tp->mcfg = CFG_METHOD_25; + tp->HwIcVerUnknown = TRUE; + } + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x5C800000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_26; + } else { + tp->mcfg = CFG_METHOD_26; + tp->HwIcVerUnknown = TRUE; + } + + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x54000000: + if (ICVerID == 0x00000000) { + tp->mcfg = CFG_METHOD_29; + } else if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_30; + } else { + tp->mcfg = CFG_METHOD_30; + tp->HwIcVerUnknown = TRUE; + } + + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + case 0x54800000: + if (ICVerID == 0x00100000) { + tp->mcfg = CFG_METHOD_31; + } else if (ICVerID == 0x00200000) { + tp->mcfg = CFG_METHOD_32; + } else if (ICVerID == 0x00300000) { + tp->mcfg = CFG_METHOD_33; + } else { + tp->mcfg = CFG_METHOD_33; + tp->HwIcVerUnknown = TRUE; + } + + tp->efuse_ver = EFUSE_SUPPORT_V3; + break; + default: + printk("unknown chip version (%x)\n",reg); + tp->mcfg = CFG_METHOD_DEFAULT; + tp->HwIcVerUnknown = TRUE; + tp->efuse_ver = EFUSE_NOT_SUPPORT; + break; + } +} + +static void +rtl8168_print_mac_version(struct rtl8168_private *tp) +{ + int i; + for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) { + if (tp->mcfg == rtl_chip_info[i].mcfg) { + dprintk("Realtek PCIe GbE Family Controller mcfg = %04d\n", + rtl_chip_info[i].mcfg); + return; + } + } + + dprintk("mac_version == Unknown\n"); +} + +static u8 rtl8168_calc_efuse_dummy_bit(u16 reg) +{ + int s,a,b; + u8 dummyBitPos = 0; + + + s=reg% 32; + a=s % 16; + b=s/16; + + if (s/16) { + dummyBitPos = (u8)(16-a); + } else { + dummyBitPos = (u8)a; + } + + return dummyBitPos; +} + +static u32 rtl8168_decode_efuse_cmd(struct rtl8168_private *tp, u32 DwCmd) +{ + u16 reg = (u16)((DwCmd & 0x00FE0000) >> 17); + u32 DummyPos = rtl8168_calc_efuse_dummy_bit(reg); + u32 DeCodeDwCmd = DwCmd; + u32 Dw17BitData; + + + if (tp->efuse_ver < 3) { + DeCodeDwCmd = (DwCmd>>(DummyPos+1))< 0) { + DeCodeDwCmd |= ((DwCmd<<(32-DummyPos))>>(32-DummyPos)); + } + } else { + reg = (u16)((DwCmd & 0x007F0000) >> 16); + DummyPos = rtl8168_calc_efuse_dummy_bit(reg); + Dw17BitData = ((DwCmd & BIT_23) >> 23); + Dw17BitData <<= 16; + Dw17BitData |= (DwCmd & 0x0000FFFF); + DeCodeDwCmd = (Dw17BitData>>(DummyPos+1))< 0) { + DeCodeDwCmd |= ((Dw17BitData<<(32-DummyPos))>>(32-DummyPos)); + } + } + + return DeCodeDwCmd; +} + +static u8 rtl8168_efuse_read(struct rtl8168_private *tp, u16 reg) +{ + u8 efuse_data = 0; + u32 temp; + int cnt; + + if (tp->efuse_ver == EFUSE_NOT_SUPPORT) + return EFUSE_READ_FAIL; + + if (tp->efuse_ver == EFUSE_SUPPORT_V1) { + temp = EFUSE_READ | ((reg & EFUSE_Reg_Mask) << EFUSE_Reg_Shift); + RTL_W32(tp, EFUSEAR, temp); + + cnt = 0; + do { + udelay(100); + temp = RTL_R32(tp, EFUSEAR); + cnt++; + } while (!(temp & EFUSE_READ_OK) && (cnt < EFUSE_Check_Cnt)); + + if (cnt == EFUSE_Check_Cnt) + efuse_data = EFUSE_READ_FAIL; + else + efuse_data = (u8)(RTL_R32(tp, EFUSEAR) & EFUSE_Data_Mask); + } else if (tp->efuse_ver == EFUSE_SUPPORT_V2) { + temp = (reg/2) & 0x03ff; + temp <<= 17; + temp |= EFUSE_READ; + RTL_W32(tp, EFUSEAR, temp); + + cnt = 0; + do { + udelay(100); + temp = RTL_R32(tp, EFUSEAR); + cnt++; + } while (!(temp & EFUSE_READ_OK) && (cnt < EFUSE_Check_Cnt)); + + if (cnt == EFUSE_Check_Cnt) { + efuse_data = EFUSE_READ_FAIL; + } else { + temp = RTL_R32(tp, EFUSEAR); + temp = rtl8168_decode_efuse_cmd(tp, temp); + + if (reg%2) { + temp >>= 8; + efuse_data = (u8)temp; + } else { + efuse_data = (u8)temp; + } + } + } else if (tp->efuse_ver == EFUSE_SUPPORT_V3) { + temp = (reg/2) & 0x03ff; + temp <<= 16; + temp |= EFUSE_READ_V3; + RTL_W32(tp, EFUSEAR, temp); + + cnt = 0; + do { + udelay(100); + temp = RTL_R32(tp, EFUSEAR); + cnt++; + } while ((temp & BIT_31) && (cnt < EFUSE_Check_Cnt)); + + if (cnt == EFUSE_Check_Cnt) { + efuse_data = EFUSE_READ_FAIL; + } else { + temp = RTL_R32(tp, EFUSEAR); + temp = rtl8168_decode_efuse_cmd(tp, temp); + + if (reg%2) { + temp >>= 8; + efuse_data = (u8)temp; + } else { + efuse_data = (u8)temp; + } + } + } + + udelay(20); + + return efuse_data; +} + +static void +rtl8168_tally_counter_addr_fill(struct rtl8168_private *tp) +{ + if (!tp->tally_paddr) + return; + + RTL_W32(tp, CounterAddrHigh, (u64)tp->tally_paddr >> 32); + RTL_W32(tp, CounterAddrLow, (u64)tp->tally_paddr & (DMA_BIT_MASK(32))); +} + +static void +rtl8168_tally_counter_clear(struct rtl8168_private *tp) +{ + if (tp->mcfg == CFG_METHOD_1 || tp->mcfg == CFG_METHOD_2 || + tp->mcfg == CFG_METHOD_3 ) + return; + + if (!tp->tally_paddr) + return; + + RTL_W32(tp, CounterAddrHigh, (u64)tp->tally_paddr >> 32); + RTL_W32(tp, CounterAddrLow, ((u64)tp->tally_paddr & (DMA_BIT_MASK(32))) | CounterReset); +} + +static +u16 +rtl8168_get_phy_state(struct rtl8168_private *tp) +{ + u16 PhyState = 0xFF; + + if (HW_SUPPORT_UPS_MODE(tp) == FALSE) goto exit; + + switch (tp->HwSuppUpsVer) { + case 1: + PhyState = rtl8168_mdio_read_phy_ocp(tp, 0x0A42, 0x10); + PhyState &= 0x7; //bit[2:0] + break; + } + +exit: + return PhyState; +} + +static +bool +rtl8168_wait_phy_state_ready(struct rtl8168_private *tp, + u16 PhyState, + u32 MicroSecondTimeout + ) +{ + u16 TmpPhyState; + u32 WaitCount; + u32 i = 0; + bool PhyStateReady = TRUE; + + if (HW_SUPPORT_UPS_MODE(tp) == FALSE) goto exit; + + WaitCount = MicroSecondTimeout / 1000; + if (WaitCount == 0) WaitCount = 100; + + do { + TmpPhyState = rtl8168_get_phy_state(tp); + mdelay(1); + i++; + } while ((i < WaitCount) && (TmpPhyState != PhyState)); + + PhyStateReady = (i == WaitCount && TmpPhyState != PhyState) ? FALSE : TRUE; + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) + WARN_ON_ONCE(i == WaitCount); +#endif + +exit: + return PhyStateReady; +} + +static +bool +rtl8168_test_phy_ocp(struct rtl8168_private *tp) +{ + bool RestorePhyOcpReg = FALSE; + + if (tp->TestPhyOcpReg == FALSE) goto exit; + + if (tp->HwSuppEsdVer == 2) { + u16 PhyRegValue; + u8 ResetPhyType = 0; + + if (HW_PHY_STATUS_INI == rtl8168_get_phy_state(tp)) { + ResetPhyType = 1; + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0C40); + PhyRegValue = rtl8168_mdio_read(tp, 0x12); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + if ((PhyRegValue & 0x03) != 0x00) { + ResetPhyType = 2; + } + } + + if (ResetPhyType > 0) { + u32 WaitCnt; + struct net_device *dev = tp->dev; + + printk(KERN_ERR "%s: test_phy_ocp ResetPhyType = 0x%02x\n.\n", dev->name, ResetPhyType); + + rtl8168_mdio_write(tp, 0x1F, 0x0C41); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + mdelay(24); //24ms + + rtl8168_mdio_write(tp, 0x1F, 0x0C40); + PhyRegValue = rtl8168_mdio_read(tp, 0x12); + if ((PhyRegValue & 0x03) != 0x00) { + WaitCnt = 0; + while ((PhyRegValue & 0x03) != 0x00 && WaitCnt < 5) { + rtl8168_mdio_write(tp, 0x1F, 0x0C40); + rtl8168_set_eth_phy_bit(tp, 0x11, (BIT_15 | BIT_14)); + rtl8168_clear_eth_phy_bit(tp, 0x11, (BIT_15 | BIT_14)); + mdelay(100); + rtl8168_mdio_write(tp, 0x1F, 0x0C40); + PhyRegValue = rtl8168_mdio_read(tp, 0x12); + WaitCnt++; + } + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A46); + rtl8168_mdio_write(tp, 0x10, tp->BackupPhyFuseDout_15_0); + rtl8168_mdio_write(tp, 0x12, tp->BackupPhyFuseDout_47_32); + rtl8168_mdio_write(tp, 0x13, tp->BackupPhyFuseDout_63_48); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_wait_phy_state_ready(tp, HW_PHY_STATUS_INI, 5000000); + rtl8168_mdio_write(tp, 0x1F, 0x0A46); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_wait_phy_state_ready(tp, HW_PHY_STATUS_LAN_ON, 500000); + + tp->HwHasWrRamCodeToMicroP = FALSE; + + RestorePhyOcpReg = TRUE; + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + +exit: + return RestorePhyOcpReg; +} + +static int +rtl8168_is_ups_resume(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + return (rtl8168_mac_ocp_read(tp, 0xD408) & BIT_0); +} + +static void +rtl8168_clear_ups_resume_bit(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + rtl8168_mac_ocp_write(tp, 0xD408, rtl8168_mac_ocp_read(tp, 0xD408) & ~(BIT_0)); +} + +static void +rtl8168_wait_phy_ups_resume(struct net_device *dev, u16 PhyState) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 TmpPhyState; + int i = 0; + + do { + TmpPhyState = rtl8168_mdio_read_phy_ocp(tp, 0x0A42, 0x10); + TmpPhyState &= 0x7; + mdelay(1); + i++; + } while ((i < 100) && (TmpPhyState != PhyState)); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) + WARN_ON_ONCE(i == 100); +#endif +} + +void +rtl8168_enable_now_is_oob(struct rtl8168_private *tp) +{ + if ( tp->HwSuppNowIsOobVer == 1 ) { + RTL_W8(tp, MCUCmd_reg, RTL_R8(tp, MCUCmd_reg) | Now_is_oob); + } +} + +void +rtl8168_disable_now_is_oob(struct rtl8168_private *tp) +{ + if ( tp->HwSuppNowIsOobVer == 1 ) { + RTL_W8(tp, MCUCmd_reg, RTL_R8(tp, MCUCmd_reg) & ~Now_is_oob); + } +} + +static void +rtl8168_switch_to_sgmii_mode( + struct rtl8168_private *tp +) +{ + if (FALSE == HW_SUPP_SERDES_PHY(tp)) return; + + switch (tp->HwSuppSerDesPhyVer) { + case 1: + rtl8168_mac_ocp_write(tp, 0xEB00, 0x2); + rtl8168_set_mcu_ocp_bit(tp, 0xEB16, BIT_1); + break; + } +} + +static void +rtl8168_exit_oob(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 data16; + + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~(AcceptErr | AcceptRunt | AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys)); + + if (HW_SUPP_SERDES_PHY(tp)) { + if (tp->HwSuppSerDesPhyVer == 1) { + rtl8168_switch_to_sgmii_mode(tp); + } + } + + if (HW_DASH_SUPPORT_DASH(tp)) { + rtl8168_driver_start(tp); + rtl8168_dash2_disable_txrx(dev); +#ifdef ENABLE_DASH_SUPPORT + DashHwInit(dev); +#endif + } + + //Disable realwow function + switch (tp->mcfg) { + case CFG_METHOD_18: + case CFG_METHOD_19: + RTL_W32(tp, MACOCP, 0xE5A90000); + RTL_W32(tp, MACOCP, 0xF2100010); + break; + case CFG_METHOD_20: + RTL_W32(tp, MACOCP, 0xE5A90000); + RTL_W32(tp, MACOCP, 0xE4640000); + RTL_W32(tp, MACOCP, 0xF2100010); + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + RTL_W32(tp, MACOCP, 0x605E0000); + RTL_W32(tp, MACOCP, (0xE05E << 16) | (RTL_R32(tp, MACOCP) & 0xFFFE)); + RTL_W32(tp, MACOCP, 0xE9720000); + RTL_W32(tp, MACOCP, 0xF2140010); + break; + case CFG_METHOD_26: + RTL_W32(tp, MACOCP, 0xE05E00FF); + RTL_W32(tp, MACOCP, 0xE9720000); + rtl8168_mac_ocp_write(tp, 0xE428, 0x0010); + break; + } + +#ifdef ENABLE_REALWOW_SUPPORT + rtl8168_realwow_hw_init(dev); +#else + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + rtl8168_eri_write(tp, 0x174, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_mac_ocp_write(tp, 0xE428, 0x0010); + break; + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_eri_write(tp, 0x174, 2, 0x00FF, ERIAR_ExGMAC); + rtl8168_mac_ocp_write(tp, 0xE428, 0x0010); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: { + u32 csi_tmp; + csi_tmp = rtl8168_eri_read(tp, 0x174, 2, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_8); + csi_tmp |= (BIT_15); + rtl8168_eri_write(tp, 0x174, 2, csi_tmp, ERIAR_ExGMAC); + rtl8168_mac_ocp_write(tp, 0xE428, 0x0010); + } + break; + } +#endif //ENABLE_REALWOW_SUPPORT + + rtl8168_nic_reset(dev); + + switch (tp->mcfg) { + case CFG_METHOD_20: + rtl8168_wait_ll_share_fifo_ready(dev); + + data16 = rtl8168_mac_ocp_read(tp, 0xD4DE) | BIT_15; + rtl8168_mac_ocp_write(tp, 0xD4DE, data16); + + rtl8168_wait_ll_share_fifo_ready(dev); + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_disable_now_is_oob(tp); + + data16 = rtl8168_mac_ocp_read(tp, 0xE8DE) & ~BIT_14; + rtl8168_mac_ocp_write(tp, 0xE8DE, data16); + rtl8168_wait_ll_share_fifo_ready(dev); + + data16 = rtl8168_mac_ocp_read(tp, 0xE8DE) | BIT_15; + rtl8168_mac_ocp_write(tp, 0xE8DE, data16); + + rtl8168_wait_ll_share_fifo_ready(dev); + break; + } + + //wait ups resume (phy state 2) + if (HW_SUPPORT_UPS_MODE(tp)) + if (rtl8168_is_ups_resume(dev)) { + rtl8168_wait_phy_ups_resume(dev, HW_PHY_STATUS_EXT_INI); + rtl8168_clear_ups_resume_bit(dev); + } + +#ifdef ENABLE_FIBER_SUPPORT + if (HW_FIBER_MODE_ENABLED(tp)) + rtl8168_hw_init_fiber_nic(dev); +#endif //ENABLE_FIBER_SUPPORT +} + +void +rtl8168_hw_disable_mac_mcu_bps(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (tp->HwSuppAspmClkIntrLock) { + rtl8168_enable_cfg9346_write(tp); + rtl8168_hw_aspm_clkreq_enable(tp, false); + rtl8168_disable_cfg9346_write(tp); + } + + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mac_ocp_write(tp, 0xFC38, 0x0000); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mac_ocp_write(tp, 0xFC28, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC30, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC32, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC34, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC36, 0x0000); + mdelay(3); + rtl8168_mac_ocp_write(tp, 0xFC26, 0x0000); + break; + } +} + +#ifndef ENABLE_USE_FIRMWARE_FILE +static void +rtl8168_set_mac_mcu_8168g_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + rtl8168_mac_ocp_write(tp, 0xE43C, 0x0000); + rtl8168_mac_ocp_write(tp, 0xE43E, 0x0000); + + rtl8168_mac_ocp_write(tp, 0xE434, 0x0004); + rtl8168_mac_ocp_write(tp, 0xE43C, 0x0004); + + rtl8168_hw_disable_mac_mcu_bps(dev); + + rtl8168_mac_ocp_write( tp, 0xF800, 0xE008 ); + rtl8168_mac_ocp_write( tp, 0xF802, 0xE01B ); + rtl8168_mac_ocp_write( tp, 0xF804, 0xE022 ); + rtl8168_mac_ocp_write( tp, 0xF806, 0xE094 ); + rtl8168_mac_ocp_write( tp, 0xF808, 0xE097 ); + rtl8168_mac_ocp_write( tp, 0xF80A, 0xE09A ); + rtl8168_mac_ocp_write( tp, 0xF80C, 0xE0B3 ); + rtl8168_mac_ocp_write( tp, 0xF80E, 0xE0BA ); + rtl8168_mac_ocp_write( tp, 0xF810, 0x49D2 ); + rtl8168_mac_ocp_write( tp, 0xF812, 0xF10D ); + rtl8168_mac_ocp_write( tp, 0xF814, 0x766C ); + rtl8168_mac_ocp_write( tp, 0xF816, 0x49E2 ); + rtl8168_mac_ocp_write( tp, 0xF818, 0xF00A ); + rtl8168_mac_ocp_write( tp, 0xF81A, 0x1EC0 ); + rtl8168_mac_ocp_write( tp, 0xF81C, 0x8EE1 ); + rtl8168_mac_ocp_write( tp, 0xF81E, 0xC60A ); + rtl8168_mac_ocp_write( tp, 0xF820, 0x77C0 ); + rtl8168_mac_ocp_write( tp, 0xF822, 0x4870 ); + rtl8168_mac_ocp_write( tp, 0xF824, 0x9FC0 ); + rtl8168_mac_ocp_write( tp, 0xF826, 0x1EA0 ); + rtl8168_mac_ocp_write( tp, 0xF828, 0xC707 ); + rtl8168_mac_ocp_write( tp, 0xF82A, 0x8EE1 ); + rtl8168_mac_ocp_write( tp, 0xF82C, 0x9D6C ); + rtl8168_mac_ocp_write( tp, 0xF82E, 0xC603 ); + rtl8168_mac_ocp_write( tp, 0xF830, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF832, 0xB416 ); + rtl8168_mac_ocp_write( tp, 0xF834, 0x0076 ); + rtl8168_mac_ocp_write( tp, 0xF836, 0xE86C ); + rtl8168_mac_ocp_write( tp, 0xF838, 0xC406 ); + rtl8168_mac_ocp_write( tp, 0xF83A, 0x7580 ); + rtl8168_mac_ocp_write( tp, 0xF83C, 0x4852 ); + rtl8168_mac_ocp_write( tp, 0xF83E, 0x8D80 ); + rtl8168_mac_ocp_write( tp, 0xF840, 0xC403 ); + rtl8168_mac_ocp_write( tp, 0xF842, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF844, 0xD3E0 ); + rtl8168_mac_ocp_write( tp, 0xF846, 0x02C8 ); + rtl8168_mac_ocp_write( tp, 0xF848, 0x8918 ); + rtl8168_mac_ocp_write( tp, 0xF84A, 0xE815 ); + rtl8168_mac_ocp_write( tp, 0xF84C, 0x1100 ); + rtl8168_mac_ocp_write( tp, 0xF84E, 0xF011 ); + rtl8168_mac_ocp_write( tp, 0xF850, 0xE812 ); + rtl8168_mac_ocp_write( tp, 0xF852, 0x4990 ); + rtl8168_mac_ocp_write( tp, 0xF854, 0xF002 ); + rtl8168_mac_ocp_write( tp, 0xF856, 0xE817 ); + rtl8168_mac_ocp_write( tp, 0xF858, 0xE80E ); + rtl8168_mac_ocp_write( tp, 0xF85A, 0x4992 ); + rtl8168_mac_ocp_write( tp, 0xF85C, 0xF002 ); + rtl8168_mac_ocp_write( tp, 0xF85E, 0xE80E ); + rtl8168_mac_ocp_write( tp, 0xF860, 0xE80A ); + rtl8168_mac_ocp_write( tp, 0xF862, 0x4993 ); + rtl8168_mac_ocp_write( tp, 0xF864, 0xF002 ); + rtl8168_mac_ocp_write( tp, 0xF866, 0xE818 ); + rtl8168_mac_ocp_write( tp, 0xF868, 0xE806 ); + rtl8168_mac_ocp_write( tp, 0xF86A, 0x4991 ); + rtl8168_mac_ocp_write( tp, 0xF86C, 0xF002 ); + rtl8168_mac_ocp_write( tp, 0xF86E, 0xE838 ); + rtl8168_mac_ocp_write( tp, 0xF870, 0xC25E ); + rtl8168_mac_ocp_write( tp, 0xF872, 0xBA00 ); + rtl8168_mac_ocp_write( tp, 0xF874, 0xC056 ); + rtl8168_mac_ocp_write( tp, 0xF876, 0x7100 ); + rtl8168_mac_ocp_write( tp, 0xF878, 0xFF80 ); + rtl8168_mac_ocp_write( tp, 0xF87A, 0x7100 ); + rtl8168_mac_ocp_write( tp, 0xF87C, 0x4892 ); + rtl8168_mac_ocp_write( tp, 0xF87E, 0x4813 ); + rtl8168_mac_ocp_write( tp, 0xF880, 0x8900 ); + rtl8168_mac_ocp_write( tp, 0xF882, 0xE00A ); + rtl8168_mac_ocp_write( tp, 0xF884, 0x7100 ); + rtl8168_mac_ocp_write( tp, 0xF886, 0x4890 ); + rtl8168_mac_ocp_write( tp, 0xF888, 0x4813 ); + rtl8168_mac_ocp_write( tp, 0xF88A, 0x8900 ); + rtl8168_mac_ocp_write( tp, 0xF88C, 0xC74B ); + rtl8168_mac_ocp_write( tp, 0xF88E, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF890, 0x48C2 ); + rtl8168_mac_ocp_write( tp, 0xF892, 0x4841 ); + rtl8168_mac_ocp_write( tp, 0xF894, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF896, 0xC746 ); + rtl8168_mac_ocp_write( tp, 0xF898, 0x74FC ); + rtl8168_mac_ocp_write( tp, 0xF89A, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF89C, 0xF120 ); + rtl8168_mac_ocp_write( tp, 0xF89E, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF8A0, 0xF11E ); + rtl8168_mac_ocp_write( tp, 0xF8A2, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF8A4, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF8A6, 0xF01B ); + rtl8168_mac_ocp_write( tp, 0xF8A8, 0x49C6 ); + rtl8168_mac_ocp_write( tp, 0xF8AA, 0xF119 ); + rtl8168_mac_ocp_write( tp, 0xF8AC, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF8AE, 0x49C4 ); + rtl8168_mac_ocp_write( tp, 0xF8B0, 0xF013 ); + rtl8168_mac_ocp_write( tp, 0xF8B2, 0xC536 ); + rtl8168_mac_ocp_write( tp, 0xF8B4, 0x74B0 ); + rtl8168_mac_ocp_write( tp, 0xF8B6, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF8B8, 0xF1FD ); + rtl8168_mac_ocp_write( tp, 0xF8BA, 0xC537 ); + rtl8168_mac_ocp_write( tp, 0xF8BC, 0xC434 ); + rtl8168_mac_ocp_write( tp, 0xF8BE, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF8C0, 0xC435 ); + rtl8168_mac_ocp_write( tp, 0xF8C2, 0x1C13 ); + rtl8168_mac_ocp_write( tp, 0xF8C4, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xF8C6, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xF8C8, 0xC52B ); + rtl8168_mac_ocp_write( tp, 0xF8CA, 0x74B0 ); + rtl8168_mac_ocp_write( tp, 0xF8CC, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF8CE, 0xF1FD ); + rtl8168_mac_ocp_write( tp, 0xF8D0, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF8D2, 0x48C4 ); + rtl8168_mac_ocp_write( tp, 0xF8D4, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF8D6, 0x7100 ); + rtl8168_mac_ocp_write( tp, 0xF8D8, 0x4893 ); + rtl8168_mac_ocp_write( tp, 0xF8DA, 0x8900 ); + rtl8168_mac_ocp_write( tp, 0xF8DC, 0xFF80 ); + rtl8168_mac_ocp_write( tp, 0xF8DE, 0xC520 ); + rtl8168_mac_ocp_write( tp, 0xF8E0, 0x74B0 ); + rtl8168_mac_ocp_write( tp, 0xF8E2, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF8E4, 0xF11C ); + rtl8168_mac_ocp_write( tp, 0xF8E6, 0xC71E ); + rtl8168_mac_ocp_write( tp, 0xF8E8, 0x74FC ); + rtl8168_mac_ocp_write( tp, 0xF8EA, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF8EC, 0xF118 ); + rtl8168_mac_ocp_write( tp, 0xF8EE, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF8F0, 0xF116 ); + rtl8168_mac_ocp_write( tp, 0xF8F2, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF8F4, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF8F6, 0xF013 ); + rtl8168_mac_ocp_write( tp, 0xF8F8, 0x48C3 ); + rtl8168_mac_ocp_write( tp, 0xF8FA, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF8FC, 0xC516 ); + rtl8168_mac_ocp_write( tp, 0xF8FE, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xF900, 0x49CE ); + rtl8168_mac_ocp_write( tp, 0xF902, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xF904, 0xC411 ); + rtl8168_mac_ocp_write( tp, 0xF906, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF908, 0xC411 ); + rtl8168_mac_ocp_write( tp, 0xF90A, 0x1C13 ); + rtl8168_mac_ocp_write( tp, 0xF90C, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xF90E, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xF910, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xF912, 0x49CF ); + rtl8168_mac_ocp_write( tp, 0xF914, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xF916, 0x7100 ); + rtl8168_mac_ocp_write( tp, 0xF918, 0x4891 ); + rtl8168_mac_ocp_write( tp, 0xF91A, 0x8900 ); + rtl8168_mac_ocp_write( tp, 0xF91C, 0xFF80 ); + rtl8168_mac_ocp_write( tp, 0xF91E, 0xE400 ); + rtl8168_mac_ocp_write( tp, 0xF920, 0xD3E0 ); + rtl8168_mac_ocp_write( tp, 0xF922, 0xE000 ); + rtl8168_mac_ocp_write( tp, 0xF924, 0x0481 ); + rtl8168_mac_ocp_write( tp, 0xF926, 0x0C81 ); + rtl8168_mac_ocp_write( tp, 0xF928, 0xDE20 ); + rtl8168_mac_ocp_write( tp, 0xF92A, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xF92C, 0x0992 ); + rtl8168_mac_ocp_write( tp, 0xF92E, 0x1B76 ); + rtl8168_mac_ocp_write( tp, 0xF930, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF932, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF934, 0x059C ); + rtl8168_mac_ocp_write( tp, 0xF936, 0x1B76 ); + rtl8168_mac_ocp_write( tp, 0xF938, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF93A, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF93C, 0x065A ); + rtl8168_mac_ocp_write( tp, 0xF93E, 0xB400 ); + rtl8168_mac_ocp_write( tp, 0xF940, 0x18DE ); + rtl8168_mac_ocp_write( tp, 0xF942, 0x2008 ); + rtl8168_mac_ocp_write( tp, 0xF944, 0x4001 ); + rtl8168_mac_ocp_write( tp, 0xF946, 0xF10F ); + rtl8168_mac_ocp_write( tp, 0xF948, 0x7342 ); + rtl8168_mac_ocp_write( tp, 0xF94A, 0x1880 ); + rtl8168_mac_ocp_write( tp, 0xF94C, 0x2008 ); + rtl8168_mac_ocp_write( tp, 0xF94E, 0x0009 ); + rtl8168_mac_ocp_write( tp, 0xF950, 0x4018 ); + rtl8168_mac_ocp_write( tp, 0xF952, 0xF109 ); + rtl8168_mac_ocp_write( tp, 0xF954, 0x7340 ); + rtl8168_mac_ocp_write( tp, 0xF956, 0x25BC ); + rtl8168_mac_ocp_write( tp, 0xF958, 0x130F ); + rtl8168_mac_ocp_write( tp, 0xF95A, 0xF105 ); + rtl8168_mac_ocp_write( tp, 0xF95C, 0xC00A ); + rtl8168_mac_ocp_write( tp, 0xF95E, 0x7300 ); + rtl8168_mac_ocp_write( tp, 0xF960, 0x4831 ); + rtl8168_mac_ocp_write( tp, 0xF962, 0x9B00 ); + rtl8168_mac_ocp_write( tp, 0xF964, 0xB000 ); + rtl8168_mac_ocp_write( tp, 0xF966, 0x7340 ); + rtl8168_mac_ocp_write( tp, 0xF968, 0x8320 ); + rtl8168_mac_ocp_write( tp, 0xF96A, 0xC302 ); + rtl8168_mac_ocp_write( tp, 0xF96C, 0xBB00 ); + rtl8168_mac_ocp_write( tp, 0xF96E, 0x0C12 ); + rtl8168_mac_ocp_write( tp, 0xF970, 0xE860 ); + rtl8168_mac_ocp_write( tp, 0xF972, 0xC406 ); + rtl8168_mac_ocp_write( tp, 0xF974, 0x7580 ); + rtl8168_mac_ocp_write( tp, 0xF976, 0x4851 ); + rtl8168_mac_ocp_write( tp, 0xF978, 0x8D80 ); + rtl8168_mac_ocp_write( tp, 0xF97A, 0xC403 ); + rtl8168_mac_ocp_write( tp, 0xF97C, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF97E, 0xD3E0 ); + rtl8168_mac_ocp_write( tp, 0xF980, 0x02C8 ); + rtl8168_mac_ocp_write( tp, 0xF982, 0xC406 ); + rtl8168_mac_ocp_write( tp, 0xF984, 0x7580 ); + rtl8168_mac_ocp_write( tp, 0xF986, 0x4850 ); + rtl8168_mac_ocp_write( tp, 0xF988, 0x8D80 ); + rtl8168_mac_ocp_write( tp, 0xF98A, 0xC403 ); + rtl8168_mac_ocp_write( tp, 0xF98C, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF98E, 0xD3E0 ); + rtl8168_mac_ocp_write( tp, 0xF990, 0x0298 ); + + rtl8168_mac_ocp_write( tp, 0xDE30, 0x0080 ); + + rtl8168_mac_ocp_write( tp, 0xFC26, 0x8000 ); + + rtl8168_mac_ocp_write( tp, 0xFC28, 0x0075 ); + rtl8168_mac_ocp_write( tp, 0xFC2A, 0x02B1 ); + rtl8168_mac_ocp_write( tp, 0xFC2C, 0x0991 ); + rtl8168_mac_ocp_write( tp, 0xFC2E, 0x059B ); + rtl8168_mac_ocp_write( tp, 0xFC30, 0x0659 ); + rtl8168_mac_ocp_write( tp, 0xFC32, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFC34, 0x02C7 ); + rtl8168_mac_ocp_write( tp, 0xFC36, 0x0279 ); +} + +static void +rtl8168_set_mac_mcu_8168gu_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + rtl8168_hw_disable_mac_mcu_bps(dev); + + rtl8168_mac_ocp_write( tp, 0xF800, 0xE008 ); + rtl8168_mac_ocp_write( tp, 0xF802, 0xE011 ); + rtl8168_mac_ocp_write( tp, 0xF804, 0xE015 ); + rtl8168_mac_ocp_write( tp, 0xF806, 0xE018 ); + rtl8168_mac_ocp_write( tp, 0xF808, 0xE01B ); + rtl8168_mac_ocp_write( tp, 0xF80A, 0xE027 ); + rtl8168_mac_ocp_write( tp, 0xF80C, 0xE043 ); + rtl8168_mac_ocp_write( tp, 0xF80E, 0xE065 ); + rtl8168_mac_ocp_write( tp, 0xF810, 0x49E2 ); + rtl8168_mac_ocp_write( tp, 0xF812, 0xF005 ); + rtl8168_mac_ocp_write( tp, 0xF814, 0x49EA ); + rtl8168_mac_ocp_write( tp, 0xF816, 0xF003 ); + rtl8168_mac_ocp_write( tp, 0xF818, 0xC404 ); + rtl8168_mac_ocp_write( tp, 0xF81A, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF81C, 0xC403 ); + rtl8168_mac_ocp_write( tp, 0xF81E, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF820, 0x0496 ); + rtl8168_mac_ocp_write( tp, 0xF822, 0x051A ); + rtl8168_mac_ocp_write( tp, 0xF824, 0x1D01 ); + rtl8168_mac_ocp_write( tp, 0xF826, 0x8DE8 ); + rtl8168_mac_ocp_write( tp, 0xF828, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF82A, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF82C, 0x0206 ); + rtl8168_mac_ocp_write( tp, 0xF82E, 0x1B76 ); + rtl8168_mac_ocp_write( tp, 0xF830, 0xC202 ); + rtl8168_mac_ocp_write( tp, 0xF832, 0xBA00 ); + rtl8168_mac_ocp_write( tp, 0xF834, 0x058A ); + rtl8168_mac_ocp_write( tp, 0xF836, 0x1B76 ); + rtl8168_mac_ocp_write( tp, 0xF838, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF83A, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF83C, 0x0648 ); + rtl8168_mac_ocp_write( tp, 0xF83E, 0x74E6 ); + rtl8168_mac_ocp_write( tp, 0xF840, 0x1B78 ); + rtl8168_mac_ocp_write( tp, 0xF842, 0x46DC ); + rtl8168_mac_ocp_write( tp, 0xF844, 0x1300 ); + rtl8168_mac_ocp_write( tp, 0xF846, 0xF005 ); + rtl8168_mac_ocp_write( tp, 0xF848, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF84A, 0x48C3 ); + rtl8168_mac_ocp_write( tp, 0xF84C, 0x48C4 ); + rtl8168_mac_ocp_write( tp, 0xF84E, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF850, 0x64E7 ); + rtl8168_mac_ocp_write( tp, 0xF852, 0xC302 ); + rtl8168_mac_ocp_write( tp, 0xF854, 0xBB00 ); + rtl8168_mac_ocp_write( tp, 0xF856, 0x068E ); + rtl8168_mac_ocp_write( tp, 0xF858, 0x74E4 ); + rtl8168_mac_ocp_write( tp, 0xF85A, 0x49C5 ); + rtl8168_mac_ocp_write( tp, 0xF85C, 0xF106 ); + rtl8168_mac_ocp_write( tp, 0xF85E, 0x49C6 ); + rtl8168_mac_ocp_write( tp, 0xF860, 0xF107 ); + rtl8168_mac_ocp_write( tp, 0xF862, 0x48C8 ); + rtl8168_mac_ocp_write( tp, 0xF864, 0x48C9 ); + rtl8168_mac_ocp_write( tp, 0xF866, 0xE011 ); + rtl8168_mac_ocp_write( tp, 0xF868, 0x48C9 ); + rtl8168_mac_ocp_write( tp, 0xF86A, 0x4848 ); + rtl8168_mac_ocp_write( tp, 0xF86C, 0xE00E ); + rtl8168_mac_ocp_write( tp, 0xF86E, 0x4848 ); + rtl8168_mac_ocp_write( tp, 0xF870, 0x49C7 ); + rtl8168_mac_ocp_write( tp, 0xF872, 0xF00A ); + rtl8168_mac_ocp_write( tp, 0xF874, 0x48C9 ); + rtl8168_mac_ocp_write( tp, 0xF876, 0xC60D ); + rtl8168_mac_ocp_write( tp, 0xF878, 0x1D1F ); + rtl8168_mac_ocp_write( tp, 0xF87A, 0x8DC2 ); + rtl8168_mac_ocp_write( tp, 0xF87C, 0x1D00 ); + rtl8168_mac_ocp_write( tp, 0xF87E, 0x8DC3 ); + rtl8168_mac_ocp_write( tp, 0xF880, 0x1D11 ); + rtl8168_mac_ocp_write( tp, 0xF882, 0x8DC0 ); + rtl8168_mac_ocp_write( tp, 0xF884, 0xE002 ); + rtl8168_mac_ocp_write( tp, 0xF886, 0x4849 ); + rtl8168_mac_ocp_write( tp, 0xF888, 0x94E5 ); + rtl8168_mac_ocp_write( tp, 0xF88A, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF88C, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF88E, 0x0238 ); + rtl8168_mac_ocp_write( tp, 0xF890, 0xE434 ); + rtl8168_mac_ocp_write( tp, 0xF892, 0x49D9 ); + rtl8168_mac_ocp_write( tp, 0xF894, 0xF01B ); + rtl8168_mac_ocp_write( tp, 0xF896, 0xC31E ); + rtl8168_mac_ocp_write( tp, 0xF898, 0x7464 ); + rtl8168_mac_ocp_write( tp, 0xF89A, 0x49C4 ); + rtl8168_mac_ocp_write( tp, 0xF89C, 0xF114 ); + rtl8168_mac_ocp_write( tp, 0xF89E, 0xC31B ); + rtl8168_mac_ocp_write( tp, 0xF8A0, 0x6460 ); + rtl8168_mac_ocp_write( tp, 0xF8A2, 0x14FA ); + rtl8168_mac_ocp_write( tp, 0xF8A4, 0xFA02 ); + rtl8168_mac_ocp_write( tp, 0xF8A6, 0xE00F ); + rtl8168_mac_ocp_write( tp, 0xF8A8, 0xC317 ); + rtl8168_mac_ocp_write( tp, 0xF8AA, 0x7460 ); + rtl8168_mac_ocp_write( tp, 0xF8AC, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF8AE, 0xF10B ); + rtl8168_mac_ocp_write( tp, 0xF8B0, 0xC311 ); + rtl8168_mac_ocp_write( tp, 0xF8B2, 0x7462 ); + rtl8168_mac_ocp_write( tp, 0xF8B4, 0x48C1 ); + rtl8168_mac_ocp_write( tp, 0xF8B6, 0x9C62 ); + rtl8168_mac_ocp_write( tp, 0xF8B8, 0x4841 ); + rtl8168_mac_ocp_write( tp, 0xF8BA, 0x9C62 ); + rtl8168_mac_ocp_write( tp, 0xF8BC, 0xC30A ); + rtl8168_mac_ocp_write( tp, 0xF8BE, 0x1C04 ); + rtl8168_mac_ocp_write( tp, 0xF8C0, 0x8C60 ); + rtl8168_mac_ocp_write( tp, 0xF8C2, 0xE004 ); + rtl8168_mac_ocp_write( tp, 0xF8C4, 0x1C15 ); + rtl8168_mac_ocp_write( tp, 0xF8C6, 0xC305 ); + rtl8168_mac_ocp_write( tp, 0xF8C8, 0x8C60 ); + rtl8168_mac_ocp_write( tp, 0xF8CA, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF8CC, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF8CE, 0x0374 ); + rtl8168_mac_ocp_write( tp, 0xF8D0, 0xE434 ); + rtl8168_mac_ocp_write( tp, 0xF8D2, 0xE030 ); + rtl8168_mac_ocp_write( tp, 0xF8D4, 0xE61C ); + rtl8168_mac_ocp_write( tp, 0xF8D6, 0xE906 ); + rtl8168_mac_ocp_write( tp, 0xF8D8, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF8DA, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF8DC, 0x0000 ); + + rtl8168_mac_ocp_write( tp, 0xFC26, 0x8000 ); + + rtl8168_mac_ocp_write( tp, 0xFC28, 0x0493 ); + rtl8168_mac_ocp_write( tp, 0xFC2A, 0x0205 ); + rtl8168_mac_ocp_write( tp, 0xFC2C, 0x0589 ); + rtl8168_mac_ocp_write( tp, 0xFC2E, 0x0647 ); + rtl8168_mac_ocp_write( tp, 0xFC30, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFC32, 0x0215 ); + rtl8168_mac_ocp_write( tp, 0xFC34, 0x0285 ); +} + +static void +rtl8168_set_mac_mcu_8168gu_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + rtl8168_hw_disable_mac_mcu_bps(dev); + + rtl8168_mac_ocp_write( tp, 0xF800, 0xE008 ); + rtl8168_mac_ocp_write( tp, 0xF802, 0xE00A ); + rtl8168_mac_ocp_write( tp, 0xF804, 0xE00D ); + rtl8168_mac_ocp_write( tp, 0xF806, 0xE02F ); + rtl8168_mac_ocp_write( tp, 0xF808, 0xE031 ); + rtl8168_mac_ocp_write( tp, 0xF80A, 0xE038 ); + rtl8168_mac_ocp_write( tp, 0xF80C, 0xE03A ); + rtl8168_mac_ocp_write( tp, 0xF80E, 0xE051 ); + rtl8168_mac_ocp_write( tp, 0xF810, 0xC202 ); + rtl8168_mac_ocp_write( tp, 0xF812, 0xBA00 ); + rtl8168_mac_ocp_write( tp, 0xF814, 0x0DFC ); + rtl8168_mac_ocp_write( tp, 0xF816, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xF818, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF81A, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF81C, 0x0A30 ); + rtl8168_mac_ocp_write( tp, 0xF81E, 0x49D9 ); + rtl8168_mac_ocp_write( tp, 0xF820, 0xF019 ); + rtl8168_mac_ocp_write( tp, 0xF822, 0xC520 ); + rtl8168_mac_ocp_write( tp, 0xF824, 0x64A5 ); + rtl8168_mac_ocp_write( tp, 0xF826, 0x1400 ); + rtl8168_mac_ocp_write( tp, 0xF828, 0xF007 ); + rtl8168_mac_ocp_write( tp, 0xF82A, 0x0C01 ); + rtl8168_mac_ocp_write( tp, 0xF82C, 0x8CA5 ); + rtl8168_mac_ocp_write( tp, 0xF82E, 0x1C15 ); + rtl8168_mac_ocp_write( tp, 0xF830, 0xC515 ); + rtl8168_mac_ocp_write( tp, 0xF832, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF834, 0xE00F ); + rtl8168_mac_ocp_write( tp, 0xF836, 0xC513 ); + rtl8168_mac_ocp_write( tp, 0xF838, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF83A, 0x48C8 ); + rtl8168_mac_ocp_write( tp, 0xF83C, 0x48CA ); + rtl8168_mac_ocp_write( tp, 0xF83E, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF840, 0xC510 ); + rtl8168_mac_ocp_write( tp, 0xF842, 0x1B00 ); + rtl8168_mac_ocp_write( tp, 0xF844, 0x9BA0 ); + rtl8168_mac_ocp_write( tp, 0xF846, 0x1B1C ); + rtl8168_mac_ocp_write( tp, 0xF848, 0x483F ); + rtl8168_mac_ocp_write( tp, 0xF84A, 0x9BA2 ); + rtl8168_mac_ocp_write( tp, 0xF84C, 0x1B04 ); + rtl8168_mac_ocp_write( tp, 0xF84E, 0xC506 ); + rtl8168_mac_ocp_write( tp, 0xF850, 0x9BA0 ); + rtl8168_mac_ocp_write( tp, 0xF852, 0xC603 ); + rtl8168_mac_ocp_write( tp, 0xF854, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF856, 0x0298 ); + rtl8168_mac_ocp_write( tp, 0xF858, 0x03DE ); + rtl8168_mac_ocp_write( tp, 0xF85A, 0xE434 ); + rtl8168_mac_ocp_write( tp, 0xF85C, 0xE096 ); + rtl8168_mac_ocp_write( tp, 0xF85E, 0xE860 ); + rtl8168_mac_ocp_write( tp, 0xF860, 0xDE20 ); + rtl8168_mac_ocp_write( tp, 0xF862, 0xD3C0 ); + rtl8168_mac_ocp_write( tp, 0xF864, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF866, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF868, 0x0A64 ); + rtl8168_mac_ocp_write( tp, 0xF86A, 0xC707 ); + rtl8168_mac_ocp_write( tp, 0xF86C, 0x1D00 ); + rtl8168_mac_ocp_write( tp, 0xF86E, 0x8DE2 ); + rtl8168_mac_ocp_write( tp, 0xF870, 0x48C1 ); + rtl8168_mac_ocp_write( tp, 0xF872, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF874, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF876, 0x00AA ); + rtl8168_mac_ocp_write( tp, 0xF878, 0xE0C0 ); + rtl8168_mac_ocp_write( tp, 0xF87A, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF87C, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF87E, 0x0132 ); + rtl8168_mac_ocp_write( tp, 0xF880, 0xC50C ); + rtl8168_mac_ocp_write( tp, 0xF882, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xF884, 0x49CE ); + rtl8168_mac_ocp_write( tp, 0xF886, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xF888, 0x1C00 ); + rtl8168_mac_ocp_write( tp, 0xF88A, 0x9EA0 ); + rtl8168_mac_ocp_write( tp, 0xF88C, 0x1C1C ); + rtl8168_mac_ocp_write( tp, 0xF88E, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xF890, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xF892, 0xC402 ); + rtl8168_mac_ocp_write( tp, 0xF894, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF896, 0x0AFA ); + rtl8168_mac_ocp_write( tp, 0xF898, 0xDE20 ); + rtl8168_mac_ocp_write( tp, 0xF89A, 0xE000 ); + rtl8168_mac_ocp_write( tp, 0xF89C, 0xE092 ); + rtl8168_mac_ocp_write( tp, 0xF89E, 0xE430 ); + rtl8168_mac_ocp_write( tp, 0xF8A0, 0xDE20 ); + rtl8168_mac_ocp_write( tp, 0xF8A2, 0xE0C0 ); + rtl8168_mac_ocp_write( tp, 0xF8A4, 0xE860 ); + rtl8168_mac_ocp_write( tp, 0xF8A6, 0xE84C ); + rtl8168_mac_ocp_write( tp, 0xF8A8, 0xB400 ); + rtl8168_mac_ocp_write( tp, 0xF8AA, 0xB430 ); + rtl8168_mac_ocp_write( tp, 0xF8AC, 0xE410 ); + rtl8168_mac_ocp_write( tp, 0xF8AE, 0xC0AE ); + rtl8168_mac_ocp_write( tp, 0xF8B0, 0xB407 ); + rtl8168_mac_ocp_write( tp, 0xF8B2, 0xB406 ); + rtl8168_mac_ocp_write( tp, 0xF8B4, 0xB405 ); + rtl8168_mac_ocp_write( tp, 0xF8B6, 0xB404 ); + rtl8168_mac_ocp_write( tp, 0xF8B8, 0xB403 ); + rtl8168_mac_ocp_write( tp, 0xF8BA, 0xB402 ); + rtl8168_mac_ocp_write( tp, 0xF8BC, 0xB401 ); + rtl8168_mac_ocp_write( tp, 0xF8BE, 0xC7EE ); + rtl8168_mac_ocp_write( tp, 0xF8C0, 0x76F4 ); + rtl8168_mac_ocp_write( tp, 0xF8C2, 0xC2ED ); + rtl8168_mac_ocp_write( tp, 0xF8C4, 0xC3ED ); + rtl8168_mac_ocp_write( tp, 0xF8C6, 0xC1EF ); + rtl8168_mac_ocp_write( tp, 0xF8C8, 0xC5F3 ); + rtl8168_mac_ocp_write( tp, 0xF8CA, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF8CC, 0x49CD ); + rtl8168_mac_ocp_write( tp, 0xF8CE, 0xF001 ); + rtl8168_mac_ocp_write( tp, 0xF8D0, 0xC5EE ); + rtl8168_mac_ocp_write( tp, 0xF8D2, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF8D4, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF8D6, 0xF105 ); + rtl8168_mac_ocp_write( tp, 0xF8D8, 0xC5E4 ); + rtl8168_mac_ocp_write( tp, 0xF8DA, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xF8DC, 0x49CE ); + rtl8168_mac_ocp_write( tp, 0xF8DE, 0xF00B ); + rtl8168_mac_ocp_write( tp, 0xF8E0, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xF8E2, 0x484B ); + rtl8168_mac_ocp_write( tp, 0xF8E4, 0x9C44 ); + rtl8168_mac_ocp_write( tp, 0xF8E6, 0x1C10 ); + rtl8168_mac_ocp_write( tp, 0xF8E8, 0x9C62 ); + rtl8168_mac_ocp_write( tp, 0xF8EA, 0x1C11 ); + rtl8168_mac_ocp_write( tp, 0xF8EC, 0x8C60 ); + rtl8168_mac_ocp_write( tp, 0xF8EE, 0x1C00 ); + rtl8168_mac_ocp_write( tp, 0xF8F0, 0x9CF6 ); + rtl8168_mac_ocp_write( tp, 0xF8F2, 0xE0EC ); + rtl8168_mac_ocp_write( tp, 0xF8F4, 0x49E7 ); + rtl8168_mac_ocp_write( tp, 0xF8F6, 0xF016 ); + rtl8168_mac_ocp_write( tp, 0xF8F8, 0x1D80 ); + rtl8168_mac_ocp_write( tp, 0xF8FA, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xF8FC, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF8FE, 0x4843 ); + rtl8168_mac_ocp_write( tp, 0xF900, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF902, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF904, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF906, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xF908, 0x48C8 ); + rtl8168_mac_ocp_write( tp, 0xF90A, 0x48C9 ); + rtl8168_mac_ocp_write( tp, 0xF90C, 0x48CA ); + rtl8168_mac_ocp_write( tp, 0xF90E, 0x9C44 ); + rtl8168_mac_ocp_write( tp, 0xF910, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF912, 0x4844 ); + rtl8168_mac_ocp_write( tp, 0xF914, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF916, 0x1E01 ); + rtl8168_mac_ocp_write( tp, 0xF918, 0xE8DB ); + rtl8168_mac_ocp_write( tp, 0xF91A, 0x7420 ); + rtl8168_mac_ocp_write( tp, 0xF91C, 0x48C1 ); + rtl8168_mac_ocp_write( tp, 0xF91E, 0x9C20 ); + rtl8168_mac_ocp_write( tp, 0xF920, 0xE0D5 ); + rtl8168_mac_ocp_write( tp, 0xF922, 0x49E6 ); + rtl8168_mac_ocp_write( tp, 0xF924, 0xF02A ); + rtl8168_mac_ocp_write( tp, 0xF926, 0x1D40 ); + rtl8168_mac_ocp_write( tp, 0xF928, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xF92A, 0x74FC ); + rtl8168_mac_ocp_write( tp, 0xF92C, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF92E, 0xF124 ); + rtl8168_mac_ocp_write( tp, 0xF930, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF932, 0xF122 ); + rtl8168_mac_ocp_write( tp, 0xF934, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF936, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF938, 0xF01F ); + rtl8168_mac_ocp_write( tp, 0xF93A, 0xE8D3 ); + rtl8168_mac_ocp_write( tp, 0xF93C, 0x48C4 ); + rtl8168_mac_ocp_write( tp, 0xF93E, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF940, 0x1E00 ); + rtl8168_mac_ocp_write( tp, 0xF942, 0xE8C6 ); + rtl8168_mac_ocp_write( tp, 0xF944, 0xC5B1 ); + rtl8168_mac_ocp_write( tp, 0xF946, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF948, 0x49C3 ); + rtl8168_mac_ocp_write( tp, 0xF94A, 0xF016 ); + rtl8168_mac_ocp_write( tp, 0xF94C, 0xC5AF ); + rtl8168_mac_ocp_write( tp, 0xF94E, 0x74A4 ); + rtl8168_mac_ocp_write( tp, 0xF950, 0x49C2 ); + rtl8168_mac_ocp_write( tp, 0xF952, 0xF005 ); + rtl8168_mac_ocp_write( tp, 0xF954, 0xC5AA ); + rtl8168_mac_ocp_write( tp, 0xF956, 0x74B2 ); + rtl8168_mac_ocp_write( tp, 0xF958, 0x49C9 ); + rtl8168_mac_ocp_write( tp, 0xF95A, 0xF10E ); + rtl8168_mac_ocp_write( tp, 0xF95C, 0xC5A6 ); + rtl8168_mac_ocp_write( tp, 0xF95E, 0x74A8 ); + rtl8168_mac_ocp_write( tp, 0xF960, 0x4845 ); + rtl8168_mac_ocp_write( tp, 0xF962, 0x4846 ); + rtl8168_mac_ocp_write( tp, 0xF964, 0x4847 ); + rtl8168_mac_ocp_write( tp, 0xF966, 0x4848 ); + rtl8168_mac_ocp_write( tp, 0xF968, 0x9CA8 ); + rtl8168_mac_ocp_write( tp, 0xF96A, 0x74B2 ); + rtl8168_mac_ocp_write( tp, 0xF96C, 0x4849 ); + rtl8168_mac_ocp_write( tp, 0xF96E, 0x9CB2 ); + rtl8168_mac_ocp_write( tp, 0xF970, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF972, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xF974, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF976, 0xE0AA ); + rtl8168_mac_ocp_write( tp, 0xF978, 0x49E4 ); + rtl8168_mac_ocp_write( tp, 0xF97A, 0xF018 ); + rtl8168_mac_ocp_write( tp, 0xF97C, 0x1D10 ); + rtl8168_mac_ocp_write( tp, 0xF97E, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xF980, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF982, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF984, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF986, 0x4843 ); + rtl8168_mac_ocp_write( tp, 0xF988, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF98A, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF98C, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF98E, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF990, 0x4844 ); + rtl8168_mac_ocp_write( tp, 0xF992, 0x4842 ); + rtl8168_mac_ocp_write( tp, 0xF994, 0x4841 ); + rtl8168_mac_ocp_write( tp, 0xF996, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF998, 0x1E01 ); + rtl8168_mac_ocp_write( tp, 0xF99A, 0xE89A ); + rtl8168_mac_ocp_write( tp, 0xF99C, 0x7420 ); + rtl8168_mac_ocp_write( tp, 0xF99E, 0x4841 ); + rtl8168_mac_ocp_write( tp, 0xF9A0, 0x9C20 ); + rtl8168_mac_ocp_write( tp, 0xF9A2, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xF9A4, 0x4848 ); + rtl8168_mac_ocp_write( tp, 0xF9A6, 0x9C44 ); + rtl8168_mac_ocp_write( tp, 0xF9A8, 0xE091 ); + rtl8168_mac_ocp_write( tp, 0xF9AA, 0x49E5 ); + rtl8168_mac_ocp_write( tp, 0xF9AC, 0xF03E ); + rtl8168_mac_ocp_write( tp, 0xF9AE, 0x1D20 ); + rtl8168_mac_ocp_write( tp, 0xF9B0, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xF9B2, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF9B4, 0x48C2 ); + rtl8168_mac_ocp_write( tp, 0xF9B6, 0x4841 ); + rtl8168_mac_ocp_write( tp, 0xF9B8, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF9BA, 0x1E01 ); + rtl8168_mac_ocp_write( tp, 0xF9BC, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xF9BE, 0x49CA ); + rtl8168_mac_ocp_write( tp, 0xF9C0, 0xF103 ); + rtl8168_mac_ocp_write( tp, 0xF9C2, 0x49C2 ); + rtl8168_mac_ocp_write( tp, 0xF9C4, 0xF00C ); + rtl8168_mac_ocp_write( tp, 0xF9C6, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF9C8, 0xF004 ); + rtl8168_mac_ocp_write( tp, 0xF9CA, 0x6447 ); + rtl8168_mac_ocp_write( tp, 0xF9CC, 0x2244 ); + rtl8168_mac_ocp_write( tp, 0xF9CE, 0xE002 ); + rtl8168_mac_ocp_write( tp, 0xF9D0, 0x1C01 ); + rtl8168_mac_ocp_write( tp, 0xF9D2, 0x9C62 ); + rtl8168_mac_ocp_write( tp, 0xF9D4, 0x1C11 ); + rtl8168_mac_ocp_write( tp, 0xF9D6, 0x8C60 ); + rtl8168_mac_ocp_write( tp, 0xF9D8, 0x1C00 ); + rtl8168_mac_ocp_write( tp, 0xF9DA, 0x9CF6 ); + rtl8168_mac_ocp_write( tp, 0xF9DC, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xF9DE, 0x49C8 ); + rtl8168_mac_ocp_write( tp, 0xF9E0, 0xF01D ); + rtl8168_mac_ocp_write( tp, 0xF9E2, 0x74FC ); + rtl8168_mac_ocp_write( tp, 0xF9E4, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF9E6, 0xF11A ); + rtl8168_mac_ocp_write( tp, 0xF9E8, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xF9EA, 0xF118 ); + rtl8168_mac_ocp_write( tp, 0xF9EC, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF9EE, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF9F0, 0xF015 ); + rtl8168_mac_ocp_write( tp, 0xF9F2, 0x49C6 ); + rtl8168_mac_ocp_write( tp, 0xF9F4, 0xF113 ); + rtl8168_mac_ocp_write( tp, 0xF9F6, 0xE875 ); + rtl8168_mac_ocp_write( tp, 0xF9F8, 0x48C4 ); + rtl8168_mac_ocp_write( tp, 0xF9FA, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF9FC, 0x7420 ); + rtl8168_mac_ocp_write( tp, 0xF9FE, 0x48C1 ); + rtl8168_mac_ocp_write( tp, 0xFA00, 0x9C20 ); + rtl8168_mac_ocp_write( tp, 0xFA02, 0xC50A ); + rtl8168_mac_ocp_write( tp, 0xFA04, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFA06, 0x8CA5 ); + rtl8168_mac_ocp_write( tp, 0xFA08, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xFA0A, 0xC505 ); + rtl8168_mac_ocp_write( tp, 0xFA0C, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xFA0E, 0x1C11 ); + rtl8168_mac_ocp_write( tp, 0xFA10, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xFA12, 0xE00A ); + rtl8168_mac_ocp_write( tp, 0xFA14, 0xE434 ); + rtl8168_mac_ocp_write( tp, 0xFA16, 0xD3C0 ); + rtl8168_mac_ocp_write( tp, 0xFA18, 0xDC00 ); + rtl8168_mac_ocp_write( tp, 0xFA1A, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xFA1C, 0x49CA ); + rtl8168_mac_ocp_write( tp, 0xFA1E, 0xF004 ); + rtl8168_mac_ocp_write( tp, 0xFA20, 0x48CA ); + rtl8168_mac_ocp_write( tp, 0xFA22, 0x9C44 ); + rtl8168_mac_ocp_write( tp, 0xFA24, 0xE855 ); + rtl8168_mac_ocp_write( tp, 0xFA26, 0xE052 ); + rtl8168_mac_ocp_write( tp, 0xFA28, 0x49E8 ); + rtl8168_mac_ocp_write( tp, 0xFA2A, 0xF024 ); + rtl8168_mac_ocp_write( tp, 0xFA2C, 0x1D01 ); + rtl8168_mac_ocp_write( tp, 0xFA2E, 0x8DF5 ); + rtl8168_mac_ocp_write( tp, 0xFA30, 0x7440 ); + rtl8168_mac_ocp_write( tp, 0xFA32, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xFA34, 0xF11E ); + rtl8168_mac_ocp_write( tp, 0xFA36, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xFA38, 0x49C8 ); + rtl8168_mac_ocp_write( tp, 0xFA3A, 0xF01B ); + rtl8168_mac_ocp_write( tp, 0xFA3C, 0x49CA ); + rtl8168_mac_ocp_write( tp, 0xFA3E, 0xF119 ); + rtl8168_mac_ocp_write( tp, 0xFA40, 0xC5EC ); + rtl8168_mac_ocp_write( tp, 0xFA42, 0x76A4 ); + rtl8168_mac_ocp_write( tp, 0xFA44, 0x49E3 ); + rtl8168_mac_ocp_write( tp, 0xFA46, 0xF015 ); + rtl8168_mac_ocp_write( tp, 0xFA48, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xFA4A, 0xF103 ); + rtl8168_mac_ocp_write( tp, 0xFA4C, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xFA4E, 0xF011 ); + rtl8168_mac_ocp_write( tp, 0xFA50, 0x4849 ); + rtl8168_mac_ocp_write( tp, 0xFA52, 0x9C44 ); + rtl8168_mac_ocp_write( tp, 0xFA54, 0x1C00 ); + rtl8168_mac_ocp_write( tp, 0xFA56, 0x9CF6 ); + rtl8168_mac_ocp_write( tp, 0xFA58, 0x7444 ); + rtl8168_mac_ocp_write( tp, 0xFA5A, 0x49C1 ); + rtl8168_mac_ocp_write( tp, 0xFA5C, 0xF004 ); + rtl8168_mac_ocp_write( tp, 0xFA5E, 0x6446 ); + rtl8168_mac_ocp_write( tp, 0xFA60, 0x1E07 ); + rtl8168_mac_ocp_write( tp, 0xFA62, 0xE003 ); + rtl8168_mac_ocp_write( tp, 0xFA64, 0x1C01 ); + rtl8168_mac_ocp_write( tp, 0xFA66, 0x1E03 ); + rtl8168_mac_ocp_write( tp, 0xFA68, 0x9C62 ); + rtl8168_mac_ocp_write( tp, 0xFA6A, 0x1C11 ); + rtl8168_mac_ocp_write( tp, 0xFA6C, 0x8C60 ); + rtl8168_mac_ocp_write( tp, 0xFA6E, 0xE830 ); + rtl8168_mac_ocp_write( tp, 0xFA70, 0xE02D ); + rtl8168_mac_ocp_write( tp, 0xFA72, 0x49E9 ); + rtl8168_mac_ocp_write( tp, 0xFA74, 0xF004 ); + rtl8168_mac_ocp_write( tp, 0xFA76, 0x1D02 ); + rtl8168_mac_ocp_write( tp, 0xFA78, 0x8DF5 ); + rtl8168_mac_ocp_write( tp, 0xFA7A, 0xE79C ); + rtl8168_mac_ocp_write( tp, 0xFA7C, 0x49E3 ); + rtl8168_mac_ocp_write( tp, 0xFA7E, 0xF006 ); + rtl8168_mac_ocp_write( tp, 0xFA80, 0x1D08 ); + rtl8168_mac_ocp_write( tp, 0xFA82, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xFA84, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xFA86, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xFA88, 0xE73A ); + rtl8168_mac_ocp_write( tp, 0xFA8A, 0x49E1 ); + rtl8168_mac_ocp_write( tp, 0xFA8C, 0xF007 ); + rtl8168_mac_ocp_write( tp, 0xFA8E, 0x1D02 ); + rtl8168_mac_ocp_write( tp, 0xFA90, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xFA92, 0x1E01 ); + rtl8168_mac_ocp_write( tp, 0xFA94, 0xE7A7 ); + rtl8168_mac_ocp_write( tp, 0xFA96, 0xDE20 ); + rtl8168_mac_ocp_write( tp, 0xFA98, 0xE410 ); + rtl8168_mac_ocp_write( tp, 0xFA9A, 0x49E0 ); + rtl8168_mac_ocp_write( tp, 0xFA9C, 0xF017 ); + rtl8168_mac_ocp_write( tp, 0xFA9E, 0x1D01 ); + rtl8168_mac_ocp_write( tp, 0xFAA0, 0x8DF4 ); + rtl8168_mac_ocp_write( tp, 0xFAA2, 0xC5FA ); + rtl8168_mac_ocp_write( tp, 0xFAA4, 0x1C00 ); + rtl8168_mac_ocp_write( tp, 0xFAA6, 0x8CA0 ); + rtl8168_mac_ocp_write( tp, 0xFAA8, 0x1C1B ); + rtl8168_mac_ocp_write( tp, 0xFAAA, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xFAAC, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFAAE, 0x49CF ); + rtl8168_mac_ocp_write( tp, 0xFAB0, 0xF0FE ); + rtl8168_mac_ocp_write( tp, 0xFAB2, 0xC5F3 ); + rtl8168_mac_ocp_write( tp, 0xFAB4, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xFAB6, 0x4849 ); + rtl8168_mac_ocp_write( tp, 0xFAB8, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xFABA, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xFABC, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xFABE, 0xF006 ); + rtl8168_mac_ocp_write( tp, 0xFAC0, 0x48C3 ); + rtl8168_mac_ocp_write( tp, 0xFAC2, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xFAC4, 0xE820 ); + rtl8168_mac_ocp_write( tp, 0xFAC6, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xFAC8, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xFACA, 0xC432 ); + rtl8168_mac_ocp_write( tp, 0xFACC, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xFACE, 0xC5E4 ); + rtl8168_mac_ocp_write( tp, 0xFAD0, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFAD2, 0x49CE ); + rtl8168_mac_ocp_write( tp, 0xFAD4, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xFAD6, 0x9EA0 ); + rtl8168_mac_ocp_write( tp, 0xFAD8, 0x1C1C ); + rtl8168_mac_ocp_write( tp, 0xFADA, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xFADC, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xFADE, 0xFF80 ); + rtl8168_mac_ocp_write( tp, 0xFAE0, 0xB404 ); + rtl8168_mac_ocp_write( tp, 0xFAE2, 0xB405 ); + rtl8168_mac_ocp_write( tp, 0xFAE4, 0xC5D9 ); + rtl8168_mac_ocp_write( tp, 0xFAE6, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFAE8, 0x49CE ); + rtl8168_mac_ocp_write( tp, 0xFAEA, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xFAEC, 0xC41F ); + rtl8168_mac_ocp_write( tp, 0xFAEE, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xFAF0, 0xC41C ); + rtl8168_mac_ocp_write( tp, 0xFAF2, 0x1C13 ); + rtl8168_mac_ocp_write( tp, 0xFAF4, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xFAF6, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xFAF8, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFAFA, 0x49CF ); + rtl8168_mac_ocp_write( tp, 0xFAFC, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xFAFE, 0xB005 ); + rtl8168_mac_ocp_write( tp, 0xFB00, 0xB004 ); + rtl8168_mac_ocp_write( tp, 0xFB02, 0xFF80 ); + rtl8168_mac_ocp_write( tp, 0xFB04, 0xB404 ); + rtl8168_mac_ocp_write( tp, 0xFB06, 0xB405 ); + rtl8168_mac_ocp_write( tp, 0xFB08, 0xC5C7 ); + rtl8168_mac_ocp_write( tp, 0xFB0A, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFB0C, 0x49CE ); + rtl8168_mac_ocp_write( tp, 0xFB0E, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xFB10, 0xC40E ); + rtl8168_mac_ocp_write( tp, 0xFB12, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xFB14, 0xC40A ); + rtl8168_mac_ocp_write( tp, 0xFB16, 0x1C13 ); + rtl8168_mac_ocp_write( tp, 0xFB18, 0x484F ); + rtl8168_mac_ocp_write( tp, 0xFB1A, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xFB1C, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xFB1E, 0x49CF ); + rtl8168_mac_ocp_write( tp, 0xFB20, 0xF1FE ); + rtl8168_mac_ocp_write( tp, 0xFB22, 0xB005 ); + rtl8168_mac_ocp_write( tp, 0xFB24, 0xB004 ); + rtl8168_mac_ocp_write( tp, 0xFB26, 0xFF80 ); + rtl8168_mac_ocp_write( tp, 0xFB28, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFB2A, 0x0481 ); + rtl8168_mac_ocp_write( tp, 0xFB2C, 0x0C81 ); + rtl8168_mac_ocp_write( tp, 0xFB2E, 0x0AE0 ); + + + rtl8168_mac_ocp_write( tp, 0xFC26, 0x8000 ); + + rtl8168_mac_ocp_write( tp, 0xFC28, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFC2A, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFC2C, 0x0297 ); + rtl8168_mac_ocp_write( tp, 0xFC2E, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFC30, 0x00A9 ); + rtl8168_mac_ocp_write( tp, 0xFC32, 0x012D ); + rtl8168_mac_ocp_write( tp, 0xFC34, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xFC36, 0x08DF ); +} + +static void +rtl8168_set_mac_mcu_8411b_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + rtl8168_hw_disable_mac_mcu_bps(dev); + + rtl8168_mac_ocp_write( tp, 0xF800, 0xE008 ); + rtl8168_mac_ocp_write( tp, 0xF802, 0xE00A ); + rtl8168_mac_ocp_write( tp, 0xF804, 0xE00C ); + rtl8168_mac_ocp_write( tp, 0xF806, 0xE00E ); + rtl8168_mac_ocp_write( tp, 0xF808, 0xE027 ); + rtl8168_mac_ocp_write( tp, 0xF80A, 0xE04F ); + rtl8168_mac_ocp_write( tp, 0xF80C, 0xE05E ); + rtl8168_mac_ocp_write( tp, 0xF80E, 0xE065 ); + rtl8168_mac_ocp_write( tp, 0xF810, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF812, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF814, 0x0000 ); + rtl8168_mac_ocp_write( tp, 0xF816, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF818, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF81A, 0x074C ); + rtl8168_mac_ocp_write( tp, 0xF81C, 0xC302 ); + rtl8168_mac_ocp_write( tp, 0xF81E, 0xBB00 ); + rtl8168_mac_ocp_write( tp, 0xF820, 0x080A ); + rtl8168_mac_ocp_write( tp, 0xF822, 0x6420 ); + rtl8168_mac_ocp_write( tp, 0xF824, 0x48C2 ); + rtl8168_mac_ocp_write( tp, 0xF826, 0x8C20 ); + rtl8168_mac_ocp_write( tp, 0xF828, 0xC516 ); + rtl8168_mac_ocp_write( tp, 0xF82A, 0x64A4 ); + rtl8168_mac_ocp_write( tp, 0xF82C, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF82E, 0xF009 ); + rtl8168_mac_ocp_write( tp, 0xF830, 0x74A2 ); + rtl8168_mac_ocp_write( tp, 0xF832, 0x8CA5 ); + rtl8168_mac_ocp_write( tp, 0xF834, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF836, 0xC50E ); + rtl8168_mac_ocp_write( tp, 0xF838, 0x9CA2 ); + rtl8168_mac_ocp_write( tp, 0xF83A, 0x1C11 ); + rtl8168_mac_ocp_write( tp, 0xF83C, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF83E, 0xE006 ); + rtl8168_mac_ocp_write( tp, 0xF840, 0x74F8 ); + rtl8168_mac_ocp_write( tp, 0xF842, 0x48C4 ); + rtl8168_mac_ocp_write( tp, 0xF844, 0x8CF8 ); + rtl8168_mac_ocp_write( tp, 0xF846, 0xC404 ); + rtl8168_mac_ocp_write( tp, 0xF848, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF84A, 0xC403 ); + rtl8168_mac_ocp_write( tp, 0xF84C, 0xBC00 ); + rtl8168_mac_ocp_write( tp, 0xF84E, 0x0BF2 ); + rtl8168_mac_ocp_write( tp, 0xF850, 0x0C0A ); + rtl8168_mac_ocp_write( tp, 0xF852, 0xE434 ); + rtl8168_mac_ocp_write( tp, 0xF854, 0xD3C0 ); + rtl8168_mac_ocp_write( tp, 0xF856, 0x49D9 ); + rtl8168_mac_ocp_write( tp, 0xF858, 0xF01F ); + rtl8168_mac_ocp_write( tp, 0xF85A, 0xC526 ); + rtl8168_mac_ocp_write( tp, 0xF85C, 0x64A5 ); + rtl8168_mac_ocp_write( tp, 0xF85E, 0x1400 ); + rtl8168_mac_ocp_write( tp, 0xF860, 0xF007 ); + rtl8168_mac_ocp_write( tp, 0xF862, 0x0C01 ); + rtl8168_mac_ocp_write( tp, 0xF864, 0x8CA5 ); + rtl8168_mac_ocp_write( tp, 0xF866, 0x1C15 ); + rtl8168_mac_ocp_write( tp, 0xF868, 0xC51B ); + rtl8168_mac_ocp_write( tp, 0xF86A, 0x9CA0 ); + rtl8168_mac_ocp_write( tp, 0xF86C, 0xE013 ); + rtl8168_mac_ocp_write( tp, 0xF86E, 0xC519 ); + rtl8168_mac_ocp_write( tp, 0xF870, 0x74A0 ); + rtl8168_mac_ocp_write( tp, 0xF872, 0x48C4 ); + rtl8168_mac_ocp_write( tp, 0xF874, 0x8CA0 ); + rtl8168_mac_ocp_write( tp, 0xF876, 0xC516 ); + rtl8168_mac_ocp_write( tp, 0xF878, 0x74A4 ); + rtl8168_mac_ocp_write( tp, 0xF87A, 0x48C8 ); + rtl8168_mac_ocp_write( tp, 0xF87C, 0x48CA ); + rtl8168_mac_ocp_write( tp, 0xF87E, 0x9CA4 ); + rtl8168_mac_ocp_write( tp, 0xF880, 0xC512 ); + rtl8168_mac_ocp_write( tp, 0xF882, 0x1B00 ); + rtl8168_mac_ocp_write( tp, 0xF884, 0x9BA0 ); + rtl8168_mac_ocp_write( tp, 0xF886, 0x1B1C ); + rtl8168_mac_ocp_write( tp, 0xF888, 0x483F ); + rtl8168_mac_ocp_write( tp, 0xF88A, 0x9BA2 ); + rtl8168_mac_ocp_write( tp, 0xF88C, 0x1B04 ); + rtl8168_mac_ocp_write( tp, 0xF88E, 0xC508 ); + rtl8168_mac_ocp_write( tp, 0xF890, 0x9BA0 ); + rtl8168_mac_ocp_write( tp, 0xF892, 0xC505 ); + rtl8168_mac_ocp_write( tp, 0xF894, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF896, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF898, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF89A, 0x0300 ); + rtl8168_mac_ocp_write( tp, 0xF89C, 0x051E ); + rtl8168_mac_ocp_write( tp, 0xF89E, 0xE434 ); + rtl8168_mac_ocp_write( tp, 0xF8A0, 0xE018 ); + rtl8168_mac_ocp_write( tp, 0xF8A2, 0xE092 ); + rtl8168_mac_ocp_write( tp, 0xF8A4, 0xDE20 ); + rtl8168_mac_ocp_write( tp, 0xF8A6, 0xD3C0 ); + rtl8168_mac_ocp_write( tp, 0xF8A8, 0xC50F ); + rtl8168_mac_ocp_write( tp, 0xF8AA, 0x76A4 ); + rtl8168_mac_ocp_write( tp, 0xF8AC, 0x49E3 ); + rtl8168_mac_ocp_write( tp, 0xF8AE, 0xF007 ); + rtl8168_mac_ocp_write( tp, 0xF8B0, 0x49C0 ); + rtl8168_mac_ocp_write( tp, 0xF8B2, 0xF103 ); + rtl8168_mac_ocp_write( tp, 0xF8B4, 0xC607 ); + rtl8168_mac_ocp_write( tp, 0xF8B6, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF8B8, 0xC606 ); + rtl8168_mac_ocp_write( tp, 0xF8BA, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF8BC, 0xC602 ); + rtl8168_mac_ocp_write( tp, 0xF8BE, 0xBE00 ); + rtl8168_mac_ocp_write( tp, 0xF8C0, 0x0C4C ); + rtl8168_mac_ocp_write( tp, 0xF8C2, 0x0C28 ); + rtl8168_mac_ocp_write( tp, 0xF8C4, 0x0C2C ); + rtl8168_mac_ocp_write( tp, 0xF8C6, 0xDC00 ); + rtl8168_mac_ocp_write( tp, 0xF8C8, 0xC707 ); + rtl8168_mac_ocp_write( tp, 0xF8CA, 0x1D00 ); + rtl8168_mac_ocp_write( tp, 0xF8CC, 0x8DE2 ); + rtl8168_mac_ocp_write( tp, 0xF8CE, 0x48C1 ); + rtl8168_mac_ocp_write( tp, 0xF8D0, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF8D2, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF8D4, 0x00AA ); + rtl8168_mac_ocp_write( tp, 0xF8D6, 0xE0C0 ); + rtl8168_mac_ocp_write( tp, 0xF8D8, 0xC502 ); + rtl8168_mac_ocp_write( tp, 0xF8DA, 0xBD00 ); + rtl8168_mac_ocp_write( tp, 0xF8DC, 0x0132 ); + + rtl8168_mac_ocp_write( tp, 0xFC26, 0x8000 ); + + rtl8168_mac_ocp_write( tp, 0xFC2A, 0x0743 ); + rtl8168_mac_ocp_write( tp, 0xFC2C, 0x0801 ); + rtl8168_mac_ocp_write( tp, 0xFC2E, 0x0BE9 ); + rtl8168_mac_ocp_write( tp, 0xFC30, 0x02FD ); + rtl8168_mac_ocp_write( tp, 0xFC32, 0x0C25 ); + rtl8168_mac_ocp_write( tp, 0xFC34, 0x00A9 ); + rtl8168_mac_ocp_write( tp, 0xFC36, 0x012D ); +} + +static void +rtl8168_set_mac_mcu_8168ep_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 i; + static const u16 mcu_patch_code_8168ep_1[] = { + 0xE008, 0xE0D3, 0xE0D6, 0xE0D9, 0xE0DB, 0xE0DD, 0xE0DF, 0xE0E1, 0xC251, + 0x7340, 0x49B1, 0xF010, 0x1D02, 0x8D40, 0xC202, 0xBA00, 0x2C3A, 0xC0F0, + 0xE8DE, 0x2000, 0x8000, 0xC0B6, 0x268C, 0x752C, 0x49D4, 0xF112, 0xE025, + 0xC2F6, 0x7146, 0xC2F5, 0x7340, 0x49BE, 0xF103, 0xC7F2, 0xE002, 0xC7F1, + 0x304F, 0x6226, 0x49A1, 0xF1F0, 0x7222, 0x49A0, 0xF1ED, 0x2525, 0x1F28, + 0x3097, 0x3091, 0x9A36, 0x752C, 0x21DC, 0x25BC, 0xC6E2, 0x77C0, 0x1304, + 0xF014, 0x1303, 0xF014, 0x1302, 0xF014, 0x1301, 0xF014, 0x49D4, 0xF103, + 0xC3D7, 0xBB00, 0xC618, 0x67C6, 0x752E, 0x22D7, 0x26DD, 0x1505, 0xF013, + 0xC60A, 0xBE00, 0xC309, 0xBB00, 0xC308, 0xBB00, 0xC307, 0xBB00, 0xC306, + 0xBB00, 0x25C8, 0x25A6, 0x25AC, 0x25B2, 0x25B8, 0xCD08, 0x0000, 0xC0BC, + 0xC2FF, 0x7340, 0x49B0, 0xF04E, 0x1F46, 0x308F, 0xC3F7, 0x1C04, 0xE84D, + 0x1401, 0xF147, 0x7226, 0x49A7, 0xF044, 0x7222, 0x2525, 0x1F30, 0x3097, + 0x3091, 0x7340, 0xC4EA, 0x401C, 0xF006, 0xC6E8, 0x75C0, 0x49D7, 0xF105, + 0xE036, 0x1D08, 0x8DC1, 0x0208, 0x6640, 0x2764, 0x1606, 0xF12F, 0x6346, + 0x133B, 0xF12C, 0x9B34, 0x1B18, 0x3093, 0xC32A, 0x1C10, 0xE82A, 0x1401, + 0xF124, 0x1A36, 0x308A, 0x7322, 0x25B5, 0x0B0E, 0x1C00, 0xE82C, 0xC71F, + 0x4027, 0xF11A, 0xE838, 0x1F42, 0x308F, 0x1B08, 0xE824, 0x7236, 0x7746, + 0x1700, 0xF00D, 0xC313, 0x401F, 0xF103, 0x1F00, 0x9F46, 0x7744, 0x449F, + 0x445F, 0xE817, 0xC70A, 0x4027, 0xF105, 0xC302, 0xBB00, 0x2E08, 0x2DC2, + 0xC7FF, 0xBF00, 0xCDB8, 0xFFFF, 0x0C02, 0xA554, 0xA5DC, 0x402F, 0xF105, + 0x1400, 0xF1FA, 0x1C01, 0xE002, 0x1C00, 0xFF80, 0x49B0, 0xF004, 0x0B01, + 0xA1D3, 0xE003, 0x0B02, 0xA5D3, 0x3127, 0x3720, 0x0B02, 0xA5D3, 0x3127, + 0x3720, 0x1300, 0xF1FB, 0xFF80, 0x7322, 0x25B5, 0x1E28, 0x30DE, 0x30D9, + 0x7264, 0x1E11, 0x2368, 0x3116, 0xFF80, 0x1B7E, 0xC602, 0xBE00, 0x06A6, + 0x1B7E, 0xC602, 0xBE00, 0x0764, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, + 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, + 0x0000 + }; + + rtl8168_hw_disable_mac_mcu_bps(dev); + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168ep_1); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168ep_1[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x2549); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x06A5); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x0763); +} + +static void +rtl8168_set_mac_mcu_8168ep_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 i; + static const u16 mcu_patch_code_8168ep_2[] = { + 0xE008, 0xE017, 0xE052, 0xE056, 0xE058, 0xE05A, 0xE05C, 0xE05E, 0xC50F, + 0x76A4, 0x49E3, 0xF007, 0x49C0, 0xF103, 0xC607, 0xBE00, 0xC606, 0xBE00, + 0xC602, 0xBE00, 0x0BDA, 0x0BB6, 0x0BBA, 0xDC00, 0xB400, 0xB401, 0xB402, + 0xB403, 0xB404, 0xC02E, 0x7206, 0x49AE, 0xF1FE, 0xC12B, 0x9904, 0xC12A, + 0x9906, 0x7206, 0x49AE, 0xF1FE, 0x7200, 0x49A0, 0xF117, 0xC123, 0xC223, + 0xC323, 0xE808, 0xC322, 0xE806, 0xC321, 0xE804, 0xC320, 0xE802, 0xE00C, + 0x740E, 0x49CE, 0xF1FE, 0x9908, 0x990A, 0x9A0C, 0x9B0E, 0x740E, 0x49CE, + 0xF1FE, 0xFF80, 0xB004, 0xB003, 0xB002, 0xB001, 0xB000, 0xC604, 0xC002, + 0xB800, 0x1FC8, 0xE000, 0xE8E0, 0xF128, 0x0002, 0xFFFF, 0xF000, 0x8001, + 0x8002, 0x8003, 0x8004, 0x48C1, 0x48C2, 0xC502, 0xBD00, 0x0490, 0xC602, + 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, + 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, + }; + + rtl8168_hw_disable_mac_mcu_bps(dev); + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168ep_2); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168ep_2[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x0BB3); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x1FC7); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x0485); +} + +static void +rtl8168_set_mac_mcu_8168h_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 i; + static const u16 mcu_patch_code_8168h_1[] = { + 0xE008, 0xE00F, 0xE011, 0xE047, 0xE049, 0xE073, 0xE075, 0xE079, 0xC707, + 0x1D00, 0x8DE2, 0x48C1, 0xC502, 0xBD00, 0x00E4, 0xE0C0, 0xC502, 0xBD00, + 0x0216, 0xC634, 0x75C0, 0x49D3, 0xF027, 0xC631, 0x75C0, 0x49D3, 0xF123, + 0xC627, 0x75C0, 0xB405, 0xC525, 0x9DC0, 0xC621, 0x75C8, 0x49D5, 0xF00A, + 0x49D6, 0xF008, 0x49D7, 0xF006, 0x49D8, 0xF004, 0x75D2, 0x49D9, 0xF111, + 0xC517, 0x9DC8, 0xC516, 0x9DD2, 0xC618, 0x75C0, 0x49D4, 0xF003, 0x49D0, + 0xF104, 0xC60A, 0xC50E, 0x9DC0, 0xB005, 0xC607, 0x9DC0, 0xB007, 0xC602, + 0xBE00, 0x1A06, 0xB400, 0xE86C, 0xA000, 0x01E1, 0x0200, 0x9200, 0xE84C, + 0xE004, 0xE908, 0xC502, 0xBD00, 0x0B58, 0xB407, 0xB404, 0x2195, 0x25BD, + 0x9BE0, 0x1C1C, 0x484F, 0x9CE2, 0x72E2, 0x49AE, 0xF1FE, 0x0B00, 0xF116, + 0xC71C, 0xC419, 0x9CE0, 0x1C13, 0x484F, 0x9CE2, 0x74E2, 0x49CE, 0xF1FE, + 0xC412, 0x9CE0, 0x1C13, 0x484F, 0x9CE2, 0x74E2, 0x49CE, 0xF1FE, 0xC70C, + 0x74F8, 0x48C3, 0x8CF8, 0xB004, 0xB007, 0xC502, 0xBD00, 0x0F24, 0x0481, + 0x0C81, 0xDE24, 0xE000, 0xC602, 0xBE00, 0x0CA4, 0x48C1, 0x48C2, 0xC502, + 0xBD00, 0x0578, 0xC602, 0xBE00, 0x0000 + }; + + rtl8168_hw_disable_mac_mcu_bps(dev); + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168h_1); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168h_1[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x00E2); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x0210); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x1A04); + rtl8168_mac_ocp_write(tp, 0xFC2E, 0x0B26); + rtl8168_mac_ocp_write(tp, 0xFC30, 0x0F02); + rtl8168_mac_ocp_write(tp, 0xFC32, 0x0CA0); + rtl8168_mac_ocp_write(tp, 0xFC34, 0x056C); + + rtl8168_mac_ocp_write(tp, 0xFC38, 0x007F); +} + +static void +rtl8168_set_mac_mcu_8168fp_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 i; + u16 breakPointEnabled = 0; + + rtl8168_hw_disable_mac_mcu_bps(dev); + + if(tp->HwPkgDet == 0x00 || tp->HwPkgDet == 0x0F) { + static const u16 mcu_patch_code_8168fp_1_1[] = { + 0xE00A, 0xE0C1, 0xE104, 0xE108, 0xE10D, 0xE112, 0xE11C, 0xE121, 0xE000, + 0xE0C8, 0xB400, 0xC1FE, 0x49E2, 0xF04C, 0x49EA, 0xF04A, 0x74E6, 0xC246, + 0x7542, 0x73EC, 0x1800, 0x49C0, 0xF10D, 0x49C1, 0xF10B, 0x49C2, 0xF109, + 0x49B0, 0xF107, 0x49B1, 0xF105, 0x7220, 0x49A2, 0xF102, 0xE002, 0x4800, + 0x49D0, 0xF10A, 0x49D1, 0xF108, 0x49D2, 0xF106, 0x49D3, 0xF104, 0x49DF, + 0xF102, 0xE00C, 0x4801, 0x72E4, 0x49AD, 0xF108, 0xC225, 0x6741, 0x48F0, + 0x8F41, 0x4870, 0x8F41, 0xC7CF, 0x49B5, 0xF01F, 0x49B2, 0xF00B, 0x4980, + 0xF003, 0x484E, 0x94E7, 0x4981, 0xF004, 0x485E, 0xC212, 0x9543, 0xE071, + 0x49B6, 0xF003, 0x49B3, 0xF10F, 0x4980, 0xF003, 0x484E, 0x94E7, 0x4981, + 0xF004, 0x485E, 0xC204, 0x9543, 0xE005, 0xE000, 0xE0FC, 0xE0FA, 0xE065, + 0x49B7, 0xF007, 0x4980, 0xF005, 0x1A38, 0x46D4, 0x1200, 0xF109, 0x4981, + 0xF055, 0x49C3, 0xF105, 0x1A30, 0x46D5, 0x1200, 0xF04F, 0x7220, 0x49A2, + 0xF130, 0x49C1, 0xF12E, 0x49B0, 0xF12C, 0xC2E6, 0x7240, 0x49A8, 0xF003, + 0x49D0, 0xF126, 0x49A9, 0xF003, 0x49D1, 0xF122, 0x49AA, 0xF003, 0x49D2, + 0xF11E, 0x49AB, 0xF003, 0x49DF, 0xF11A, 0x49AC, 0xF003, 0x49D3, 0xF116, + 0x4980, 0xF003, 0x49C7, 0xF105, 0x4981, 0xF02C, 0x49D7, 0xF02A, 0x49C0, + 0xF00C, 0xC721, 0x62F4, 0x49A0, 0xF008, 0x49A4, 0xF106, 0x4824, 0x8AF4, + 0xC71A, 0x1A40, 0x9AE0, 0x49B6, 0xF017, 0x200E, 0xC7B8, 0x72E0, 0x4710, + 0x92E1, 0xC70E, 0x77E0, 0x49F0, 0xF112, 0xC70B, 0x77E0, 0x27FE, 0x1AFA, + 0x4317, 0xC705, 0x9AE2, 0x1A11, 0x8AE0, 0xE008, 0xE41C, 0xC0AE, 0xD23A, + 0xC7A2, 0x74E6, 0x484F, 0x94E7, 0xC79E, 0x8CE6, 0x8BEC, 0xC29C, 0x8D42, + 0x7220, 0xB000, 0xC502, 0xBD00, 0x0932, 0xB400, 0xC240, 0xC340, 0x7060, + 0x498F, 0xF014, 0x488F, 0x9061, 0x744C, 0x49C3, 0xF004, 0x7562, 0x485E, + 0x9563, 0x7446, 0x49C3, 0xF106, 0x7562, 0x1C30, 0x46E5, 0x1200, 0xF004, + 0x7446, 0x484F, 0x9447, 0xC32A, 0x7466, 0x49C0, 0xF00F, 0x48C0, 0x9C66, + 0x7446, 0x4840, 0x4841, 0x4842, 0x9C46, 0x744C, 0x4840, 0x9C4C, 0x744A, + 0x484A, 0x9C4A, 0xE013, 0x498E, 0xF011, 0x488E, 0x9061, 0x744C, 0x49C3, + 0xF004, 0x7446, 0x484E, 0x9447, 0x7446, 0x1D38, 0x46EC, 0x1500, 0xF004, + 0x7446, 0x484F, 0x9447, 0xB000, 0xC502, 0xBD00, 0x074C, 0xE000, 0xE0FC, + 0xE0C0, 0x4830, 0x4837, 0xC502, 0xBD00, 0x0978, 0x63E2, 0x4830, 0x4837, + 0xC502, 0xBD00, 0x09FE, 0x73E2, 0x4830, 0x8BE2, 0xC302, 0xBB00, 0x0A12, + 0x73E2, 0x48B0, 0x48B3, 0x48B4, 0x48B5, 0x48B6, 0x48B7, 0x8BE2, 0xC302, + 0xBB00, 0x0A5A, 0x73E2, 0x4830, 0x8BE2, 0xC302, 0xBB00, 0x0A6C, 0x73E2, + 0x4830, 0x4837, 0xC502, 0xBD00, 0x0A86 + }; + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168fp_1_1); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168fp_1_1[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x0890); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x0712); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x0974); + rtl8168_mac_ocp_write(tp, 0xFC2E, 0x09FC); + rtl8168_mac_ocp_write(tp, 0xFC30, 0x0A0E); + rtl8168_mac_ocp_write(tp, 0xFC32, 0x0A56); + rtl8168_mac_ocp_write(tp, 0xFC34, 0x0A68); + rtl8168_mac_ocp_write(tp, 0xFC36, 0x0A84); + + } else if (tp->HwPkgDet == 0x6) { + static const u16 mcu_patch_code_8168fp_1_2[] = { + 0xE008, 0xE00A, 0xE031, 0xE033, 0xE035, 0xE144, 0xE166, 0xE168, 0xC502, + 0xBD00, 0x0000, 0xC725, 0x75E0, 0x48D0, 0x9DE0, 0xC722, 0x75E0, 0x1C78, + 0x416C, 0x1530, 0xF111, 0xC71D, 0x75F6, 0x49D1, 0xF00D, 0x75E0, 0x1C1F, + 0x416C, 0x1502, 0xF108, 0x75FA, 0x49D3, 0xF005, 0x75EC, 0x9DE4, 0x4853, + 0x9DFA, 0xC70B, 0x75E0, 0x4852, 0x4850, 0x9DE0, 0xC602, 0xBE00, 0x04B8, + 0xE420, 0xE000, 0xE0FC, 0xE43C, 0xDC00, 0xEB00, 0xC202, 0xBA00, 0x0000, + 0xC002, 0xB800, 0x0000, 0xB401, 0xB402, 0xB403, 0xB404, 0xB405, 0xB406, + 0xC44D, 0xC54D, 0x1867, 0xE8A2, 0x2318, 0x276E, 0x1601, 0xF106, 0x1A07, + 0xE861, 0xE86B, 0xE873, 0xE037, 0x231E, 0x276E, 0x1602, 0xF10B, 0x1A07, + 0xE858, 0xE862, 0xC247, 0xC344, 0xE8E3, 0xC73B, 0x66E0, 0xE8B5, 0xE029, + 0x231A, 0x276C, 0xC733, 0x9EE0, 0x1866, 0xE885, 0x251C, 0x120F, 0xF011, + 0x1209, 0xF011, 0x2014, 0x240E, 0x1000, 0xF007, 0x120C, 0xF00D, 0x1203, + 0xF00D, 0x1200, 0xF00D, 0x120C, 0xF00D, 0x1203, 0xF00D, 0x1A03, 0xE00C, + 0x1A07, 0xE00A, 0x1A00, 0xE008, 0x1A01, 0xE006, 0x1A02, 0xE004, 0x1A04, + 0xE002, 0x1A05, 0xE829, 0xE833, 0xB006, 0xB005, 0xB004, 0xB003, 0xB002, + 0xB001, 0x60C4, 0xC702, 0xBF00, 0x2786, 0xDD00, 0xD030, 0xE0C4, 0xE0F8, + 0xDC42, 0xD3F0, 0x0000, 0x0004, 0x0007, 0x0014, 0x0090, 0x1000, 0x0F00, + 0x1004, 0x1008, 0x3000, 0x3004, 0x3008, 0x4000, 0x7777, 0x8000, 0x8001, + 0x8008, 0x8003, 0x8004, 0xC000, 0xC004, 0xF004, 0xFFFF, 0xB406, 0xB407, + 0xC6E5, 0x77C0, 0x27F3, 0x23F3, 0x47FA, 0x9FC0, 0xB007, 0xB006, 0xFF80, + 0xB405, 0xB407, 0xC7D8, 0x75E0, 0x48D0, 0x9DE0, 0xB007, 0xB005, 0xFF80, + 0xB401, 0xC0EA, 0xC2DC, 0xC3D8, 0xE865, 0xC0D3, 0xC1E0, 0xC2E3, 0xE861, + 0xE817, 0xC0CD, 0xC2CF, 0xE85D, 0xC0C9, 0xC1D6, 0xC2DB, 0xE859, 0xE80F, + 0xC1C7, 0xC2CE, 0xE855, 0xC0C0, 0xC1D1, 0xC2D3, 0xE851, 0xE807, 0xC0BE, + 0xC2C2, 0xE84D, 0xE803, 0xB001, 0xFF80, 0xB402, 0xC2C6, 0xE859, 0x499F, + 0xF1FE, 0xB002, 0xFF80, 0xB402, 0xB403, 0xB407, 0xE821, 0x8882, 0x1980, + 0x8983, 0xE81D, 0x7180, 0x218B, 0x25BB, 0x1310, 0xF014, 0x1310, 0xFB03, + 0x1F20, 0x38FB, 0x3288, 0x434B, 0x2491, 0x430B, 0x1F0F, 0x38FB, 0x4313, + 0x2121, 0x4353, 0x2521, 0x418A, 0x6282, 0x2527, 0x212F, 0x418A, 0xB007, + 0xB003, 0xB002, 0xFF80, 0x6183, 0x2496, 0x1100, 0xF1FD, 0xFF80, 0x4800, + 0x4801, 0xC213, 0xC313, 0xE815, 0x4860, 0x8EE0, 0xC210, 0xC310, 0xE822, + 0x481E, 0xC20C, 0xC30C, 0xE80C, 0xC206, 0x7358, 0x483A, 0x9B58, 0xFF80, + 0xE8E0, 0xE000, 0x1008, 0x0F00, 0x800C, 0x0F00, 0xB407, 0xB406, 0xB403, + 0xC7F7, 0x98E0, 0x99E2, 0x9AE4, 0x21B2, 0x4831, 0x483F, 0x9BE6, 0x66E7, + 0x49E6, 0xF1FE, 0xB003, 0xB006, 0xB007, 0xFF80, 0xB407, 0xB406, 0xB403, + 0xC7E5, 0x9AE4, 0x21B2, 0x4831, 0x9BE6, 0x66E7, 0x49E6, 0xF1FE, 0x70E0, + 0x71E2, 0xB003, 0xB006, 0xB007, 0xFF80, 0x4882, 0xB406, 0xB405, 0xC71E, + 0x76E0, 0x1D78, 0x4175, 0x1630, 0xF10C, 0xC715, 0x76E0, 0x4861, 0x9EE0, + 0xC713, 0x1EFF, 0x9EE2, 0x75E0, 0x4850, 0x9DE0, 0xE005, 0xC70B, 0x76E0, + 0x4865, 0x9EE0, 0xB005, 0xB006, 0xC708, 0xC102, 0xB900, 0x279E, 0xEB16, + 0xEB00, 0xE43C, 0xDC00, 0xD3EC, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, + 0x0000 + }; + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168fp_1_2); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168fp_1_2[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x04b4); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC2E, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC30, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC32, 0x279C); + rtl8168_mac_ocp_write(tp, 0xFC34, 0x0000); + rtl8168_mac_ocp_write(tp, 0xFC36, 0x0000); + } + + if (tp->HwPkgDet == 0x00) + breakPointEnabled = 0x00FC; + else if (tp->HwPkgDet == 0x0F) + breakPointEnabled = 0x00FF; + else if (tp->HwPkgDet == 0x06) + breakPointEnabled = 0x0022; + + rtl8168_mac_ocp_write(tp, 0xFC38, breakPointEnabled); +} + +static void +rtl8168_set_mac_mcu_8168fp_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 i; + static const u16 mcu_patch_code_8168fp_2[] = { + 0xE008, 0xE00A, 0xE00F, 0xE014, 0xE05F, 0xE063, 0xE065, 0xE067, 0xC602, + 0xBE00, 0x2AB2, 0x1BC0, 0x46EB, 0x1BFE, 0xC102, 0xB900, 0x0B1A, 0x1BC0, + 0x46EB, 0x1B7E, 0xC102, 0xB900, 0x0BEA, 0xB400, 0xB401, 0xB402, 0xB403, + 0xB404, 0xB405, 0xC03A, 0x7206, 0x49AE, 0xF1FE, 0xC137, 0x9904, 0xC136, + 0x9906, 0x7206, 0x49AE, 0xF1FE, 0x7200, 0x49A0, 0xF10B, 0xC52F, 0xC12E, + 0xC232, 0xC332, 0xE812, 0xC331, 0xE810, 0xC330, 0xE80E, 0xE018, 0xC126, + 0xC229, 0xC525, 0xC328, 0xE808, 0xC523, 0xC326, 0xE805, 0xC521, 0xC324, + 0xE802, 0xE00C, 0x740E, 0x49CE, 0xF1FE, 0x9908, 0x9D0A, 0x9A0C, 0x9B0E, + 0x740E, 0x49CE, 0xF1FE, 0xFF80, 0xB005, 0xB004, 0xB003, 0xB002, 0xB001, + 0xB000, 0xC604, 0xC002, 0xB800, 0x2A5E, 0xE000, 0xE8E0, 0xF128, 0x3DC2, + 0xFFFF, 0x10EC, 0x816A, 0x816D, 0x816C, 0xF000, 0x8002, 0x8004, 0x8007, + 0x48C1, 0x48C2, 0xC502, 0xBD00, 0x07BC, 0xC602, 0xBE00, 0x0000, 0xC602, + 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000 + }; + + rtl8168_hw_disable_mac_mcu_bps(dev); + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168fp_2); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168fp_2[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x2AAC); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x0B14); + rtl8168_mac_ocp_write(tp, 0xFC2C, 0x0BE4); + rtl8168_mac_ocp_write(tp, 0xFC2E, 0x2A5C); + rtl8168_mac_ocp_write(tp, 0xFC30, 0x07B0); + + if (tp->HwSuppSerDesPhyVer == 1) + rtl8168_mac_ocp_write(tp, 0xFC38, 0x001F); + else + rtl8168_mac_ocp_write(tp, 0xFC38, 0x001E); + +} + +static void +rtl8168_set_mac_mcu_8168fp_3(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 i; + static const u16 mcu_patch_code_8168fp_3[] = { + 0xE008, 0xE053, 0xE057, 0xE059, 0xE05B, 0xE05D, 0xE05F, 0xE061, 0xB400, + 0xB401, 0xB402, 0xB403, 0xB404, 0xB405, 0xC03A, 0x7206, 0x49AE, 0xF1FE, + 0xC137, 0x9904, 0xC136, 0x9906, 0x7206, 0x49AE, 0xF1FE, 0x7200, 0x49A0, + 0xF10B, 0xC52F, 0xC12E, 0xC232, 0xC332, 0xE812, 0xC331, 0xE810, 0xC330, + 0xE80E, 0xE018, 0xC126, 0xC229, 0xC525, 0xC328, 0xE808, 0xC523, 0xC326, + 0xE805, 0xC521, 0xC324, 0xE802, 0xE00C, 0x740E, 0x49CE, 0xF1FE, 0x9908, + 0x9D0A, 0x9A0C, 0x9B0E, 0x740E, 0x49CE, 0xF1FE, 0xFF80, 0xB005, 0xB004, + 0xB003, 0xB002, 0xB001, 0xB000, 0xC604, 0xC002, 0xB800, 0x2B16, 0xE000, + 0xE8E0, 0xF128, 0x3DC2, 0xFFFF, 0x10EC, 0x816A, 0x816D, 0x816C, 0xF000, + 0x8002, 0x8004, 0x8007, 0x48C1, 0x48C2, 0xC502, 0xBD00, 0x07BC, 0xC602, + 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, + 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000, 0xC602, 0xBE00, 0x0000 + }; + + rtl8168_hw_disable_mac_mcu_bps(dev); + + for (i = 0; i < ARRAY_SIZE(mcu_patch_code_8168fp_3); i++) { + rtl8168_mac_ocp_write(tp, 0xF800 + i * 2, mcu_patch_code_8168fp_3[i]); + } + + rtl8168_mac_ocp_write(tp, 0xFC26, 0x8000); + + rtl8168_mac_ocp_write(tp, 0xFC28, 0x2B14); + rtl8168_mac_ocp_write(tp, 0xFC2A, 0x07B0); + + rtl8168_mac_ocp_write(tp, 0xFC38, 0x0003); +} + +static void +rtl8168_hw_mac_mcu_config(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (tp->NotWrMcuPatchCode == TRUE) return; + + switch (tp->mcfg) { + case CFG_METHOD_21: + rtl8168_set_mac_mcu_8168g_1(dev); + break; + case CFG_METHOD_24: + rtl8168_set_mac_mcu_8168gu_1(dev); + break; + case CFG_METHOD_25: + rtl8168_set_mac_mcu_8168gu_2(dev); + break; + case CFG_METHOD_26: + rtl8168_set_mac_mcu_8411b_1(dev); + break; + case CFG_METHOD_27: + rtl8168_set_mac_mcu_8168ep_1(dev); + break; + case CFG_METHOD_28: + rtl8168_set_mac_mcu_8168ep_2(dev); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + rtl8168_set_mac_mcu_8168h_1(dev); + break; + case CFG_METHOD_31: + rtl8168_set_mac_mcu_8168fp_1(dev); + break; + case CFG_METHOD_32: + rtl8168_set_mac_mcu_8168fp_2(dev); + break; + case CFG_METHOD_33: + rtl8168_set_mac_mcu_8168fp_3(dev); + break; + } +} +#endif + +#ifdef ENABLE_USE_FIRMWARE_FILE +static void rtl8168_release_firmware(struct rtl8168_private *tp) +{ + if (tp->rtl_fw) { + rtl8168_fw_release_firmware(tp->rtl_fw); + kfree(tp->rtl_fw); + tp->rtl_fw = NULL; + } +} + +void rtl8168_apply_firmware(struct rtl8168_private *tp) +{ + /* TODO: release firmware if rtl_fw_write_firmware signals failure. */ + if (tp->rtl_fw) { + rtl8168_fw_write_firmware(tp, tp->rtl_fw); + /* At least one firmware doesn't reset tp->ocp_base. */ + tp->ocp_base = OCP_STD_PHY_BASE; + + /* PHY soft reset may still be in progress */ + //phy_read_poll_timeout(tp->phydev, MII_BMCR, val, + // !(val & BMCR_RESET), + // 50000, 600000, true); + rtl8168_wait_phy_reset_complete(tp); + + tp->hw_ram_code_ver = rtl8168_get_hw_phy_mcu_code_ver(tp); + tp->sw_ram_code_ver = tp->hw_ram_code_ver; + tp->HwHasWrRamCodeToMicroP = TRUE; + } +} +#endif + +static void +rtl8168_hw_init(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 csi_tmp; + + if (tp->HwSuppAspmClkIntrLock) { + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) & ~BIT_7); + rtl8168_enable_cfg9346_write(tp); + rtl8168_hw_aspm_clkreq_enable(tp, false); + rtl8168_disable_cfg9346_write(tp); + } + + //Disable UPS + if (HW_SUPPORT_UPS_MODE(tp)) + rtl8168_mac_ocp_write(tp, 0xD400, rtl8168_mac_ocp_read( tp, 0xD400) & ~(BIT_0)); + + //Disable DMA Aggregation + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mac_ocp_write(tp, 0xE63E, rtl8168_mac_ocp_read( tp, 0xE63E) & ~(BIT_3 | BIT_2 | BIT_1)); + rtl8168_mac_ocp_write(tp, 0xE63E, rtl8168_mac_ocp_read( tp, 0xE63E) | (BIT_0)); + rtl8168_mac_ocp_write(tp, 0xE63E, rtl8168_mac_ocp_read( tp, 0xE63E) & ~(BIT_0)); + rtl8168_mac_ocp_write(tp, 0xC094, 0x0); + rtl8168_mac_ocp_write(tp, 0xC09E, 0x0); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + RTL_W8(tp, DBG_reg, RTL_R8(tp, DBG_reg) | BIT_1 | BIT_7); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + RTL_W8(tp, 0xF2, (RTL_R8(tp, 0xF2) & ~(BIT_2 | BIT_1 | BIT_0))); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + if (aspm) { + RTL_W8(tp, 0x6E, RTL_R8(tp, 0x6E) | BIT_6); + rtl8168_eri_write(tp, 0x1AE, 2, 0x0403, ERIAR_ExGMAC); + } + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_29: + case CFG_METHOD_30: + if (aspm) { + if ((rtl8168_mac_ocp_read(tp, 0xDC00) & BIT_3) || (RTL_R8(tp, Config0) & 0x07)) { + RTL_W8(tp, 0x6E, RTL_R8(tp, 0x6E) | BIT_6); + rtl8168_eri_write(tp, 0x1AE, 2, 0x0403, ERIAR_ExGMAC); + } + } + break; + } + + if (tp->mcfg == CFG_METHOD_10 || tp->mcfg == CFG_METHOD_14 || tp->mcfg == CFG_METHOD_15) + RTL_W8(tp, 0xF3, RTL_R8(tp, 0xF3) | BIT_2); + +#ifndef ENABLE_USE_FIRMWARE_FILE + if (!tp->rtl_fw) + rtl8168_hw_mac_mcu_config(dev); +#endif + + /*disable ocp phy power saving*/ + if (tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) + if (!tp->dash_printer_enabled) + rtl8168_disable_ocp_phy_power_saving(dev); + + //Set PCIE uncorrectable error status mask pcie 0x108 + csi_tmp = rtl8168_csi_read(tp, 0x108); + csi_tmp |= BIT_20; + rtl8168_csi_write(tp, 0x108, csi_tmp); + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + csi_tmp = rtl8168_eri_read(tp, 0x1AB, 1, ERIAR_ExGMAC); + csi_tmp |= ( BIT_2 | BIT_3 | BIT_4 | BIT_5 | BIT_6 | BIT_7 ); + rtl8168_eri_write(tp, 0x1AB, 1, csi_tmp, ERIAR_ExGMAC); + break; + } + + rtl8168_set_pci_pme(tp, 0); + + if (s0_magic_packet == 1) + rtl8168_enable_magic_packet(dev); + +#ifdef ENABLE_USE_FIRMWARE_FILE + if (tp->rtl_fw && + !(HW_DASH_SUPPORT_TYPE_3(tp) && + tp->HwPkgDet == 0x06)) + rtl8168_apply_firmware(tp); +#endif +} + +static void +rtl8168_hw_ephy_config(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 ephy_data; + + switch (tp->mcfg) { + case CFG_METHOD_4: + /*Set EPHY registers begin*/ + /*Set EPHY register offset 0x02 bit 11 to 0 and bit 12 to 1*/ + ephy_data = rtl8168_ephy_read(tp, 0x02); + ephy_data &= ~BIT_11; + ephy_data |= BIT_12; + rtl8168_ephy_write(tp, 0x02, ephy_data); + + /*Set EPHY register offset 0x03 bit 1 to 1*/ + ephy_data = rtl8168_ephy_read(tp, 0x03); + ephy_data |= (1 << 1); + rtl8168_ephy_write(tp, 0x03, ephy_data); + + /*Set EPHY register offset 0x06 bit 7 to 0*/ + ephy_data = rtl8168_ephy_read(tp, 0x06); + ephy_data &= ~(1 << 7); + rtl8168_ephy_write(tp, 0x06, ephy_data); + /*Set EPHY registers end*/ + + break; + case CFG_METHOD_5: + /* set EPHY registers */ + SetPCIePhyBit(tp, 0x01, BIT_0); + + ClearAndSetPCIePhyBit(tp, + 0x03, + BIT_10, + BIT_5 + ); + + break; + case CFG_METHOD_9: + /* set EPHY registers */ + rtl8168_ephy_write(tp, 0x01, 0x7C7F); + rtl8168_ephy_write(tp, 0x02, 0x011F); + if (tp->eeprom_type != EEPROM_TYPE_NONE) { + ClearAndSetPCIePhyBit(tp, + 0x03, + 0xFFB0, + 0x05B0 + ); + } else { + ClearAndSetPCIePhyBit(tp, + 0x03, + 0xFFF0, + 0x05F0 + ); + } + rtl8168_ephy_write(tp, 0x06, 0xB271); + rtl8168_ephy_write(tp, 0x07, 0xCE00); + + break; + case CFG_METHOD_10: + /* set EPHY registers */ + rtl8168_ephy_write(tp, 0x01, 0x6C7F); + rtl8168_ephy_write(tp, 0x02, 0x011F); + ClearAndSetPCIePhyBit(tp, + 0x03, + 0xFFF0, + 0x01B0 + ); + rtl8168_ephy_write(tp, 0x1A, 0x0546); + rtl8168_ephy_write(tp, 0x1C, 0x80C4); + rtl8168_ephy_write(tp, 0x1D, 0x78E5); + rtl8168_ephy_write(tp, 0x0A, 0x8100); + + break; + case CFG_METHOD_12: + case CFG_METHOD_13: + ephy_data = rtl8168_ephy_read(tp, 0x0B); + rtl8168_ephy_write(tp, 0x0B, ephy_data|0x48); + ephy_data = rtl8168_ephy_read(tp, 0x19); + ephy_data &= ~0x20; + rtl8168_ephy_write(tp, 0x19, ephy_data|0x50); + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~0x100; + rtl8168_ephy_write(tp, 0x0C, ephy_data|0x20); + ephy_data = rtl8168_ephy_read(tp, 0x10); + ephy_data &= ~0x04; + rtl8168_ephy_write(tp, 0x10, ephy_data); + + break; + case CFG_METHOD_14: + case CFG_METHOD_15: + /* set EPHY registers */ + ephy_data = rtl8168_ephy_read(tp, 0x00) & ~0x0200; + ephy_data |= 0x0100; + rtl8168_ephy_write(tp, 0x00, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data |= 0x0004; + rtl8168_ephy_write(tp, 0x00, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x06) & ~0x0002; + ephy_data |= 0x0001; + rtl8168_ephy_write(tp, 0x06, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x06); + ephy_data |= 0x0030; + rtl8168_ephy_write(tp, 0x06, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x07); + ephy_data |= 0x2000; + rtl8168_ephy_write(tp, 0x07, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data |= 0x0020; + rtl8168_ephy_write(tp, 0x00, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x03) & ~0x5800; + ephy_data |= 0x2000; + rtl8168_ephy_write(tp, 0x03, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x03); + ephy_data |= 0x0001; + rtl8168_ephy_write(tp, 0x03, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x01) & ~0x0800; + ephy_data |= 0x1000; + rtl8168_ephy_write(tp, 0x01, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x07); + ephy_data |= 0x4000; + rtl8168_ephy_write(tp, 0x07, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x1E); + ephy_data |= 0x2000; + rtl8168_ephy_write(tp, 0x1E, ephy_data); + + rtl8168_ephy_write(tp, 0x19, 0xFE6C); + + ephy_data = rtl8168_ephy_read(tp, 0x0A); + ephy_data |= 0x0040; + rtl8168_ephy_write(tp, 0x0A, ephy_data); + + break; + case CFG_METHOD_16: + case CFG_METHOD_17: + if (tp->mcfg == CFG_METHOD_16) { + rtl8168_ephy_write(tp, 0x06, 0xF020); + rtl8168_ephy_write(tp, 0x07, 0x01FF); + rtl8168_ephy_write(tp, 0x00, 0x5027); + rtl8168_ephy_write(tp, 0x01, 0x0003); + rtl8168_ephy_write(tp, 0x02, 0x2D16); + rtl8168_ephy_write(tp, 0x03, 0x6D49); + rtl8168_ephy_write(tp, 0x08, 0x0006); + rtl8168_ephy_write(tp, 0x0A, 0x00C8); + } + + ephy_data = rtl8168_ephy_read(tp, 0x09); + ephy_data |= BIT_7; + rtl8168_ephy_write(tp, 0x09, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x19); + ephy_data |= (BIT_2 | BIT_5 | BIT_9); + rtl8168_ephy_write(tp, 0x19, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data |= BIT_3; + rtl8168_ephy_write(tp, 0x00, ephy_data); + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~(BIT_13 | BIT_12 | BIT_11 | BIT_10 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4); + ephy_data |= BIT_9; + rtl8168_ephy_write(tp, 0x0C, ephy_data); + + break; + case CFG_METHOD_18: + case CFG_METHOD_19: + if (tp->mcfg == CFG_METHOD_18) { + ephy_data = rtl8168_ephy_read(tp, 0x06); + ephy_data |= BIT_5; + ephy_data &= ~(BIT_7 | BIT_6); + rtl8168_ephy_write(tp, 0x06, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x08); + ephy_data |= BIT_1; + ephy_data &= ~BIT_0; + rtl8168_ephy_write(tp, 0x08, ephy_data); + } + + ephy_data = rtl8168_ephy_read(tp, 0x09); + ephy_data |= BIT_7; + rtl8168_ephy_write(tp, 0x09, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x19); + ephy_data |= (BIT_2 | BIT_5 | BIT_9); + rtl8168_ephy_write(tp, 0x19, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data |= BIT_3; + rtl8168_ephy_write(tp, 0x00, ephy_data); + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~(BIT_13 | BIT_12 | BIT_11 | BIT_10 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4); + ephy_data |= BIT_9; + rtl8168_ephy_write(tp, 0x0C, ephy_data); + + break; + case CFG_METHOD_20: + ephy_data = rtl8168_ephy_read(tp, 0x06); + ephy_data |= BIT_5; + ephy_data &= ~(BIT_7 | BIT_6); + rtl8168_ephy_write(tp, 0x06, ephy_data); + + rtl8168_ephy_write(tp, 0x0f, 0x5200); + + ephy_data = rtl8168_ephy_read(tp, 0x19); + ephy_data |= (BIT_2 | BIT_5 | BIT_9); + rtl8168_ephy_write(tp, 0x19, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data |= BIT_3; + rtl8168_ephy_write(tp, 0x00, ephy_data); + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~(BIT_13 | BIT_12 | BIT_11 | BIT_10 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4); + ephy_data |= BIT_9; + rtl8168_ephy_write(tp, 0x0C, ephy_data); + + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data &= ~(BIT_3); + rtl8168_ephy_write(tp, 0x00, ephy_data); + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~(BIT_13 | BIT_12 | BIT_11 | BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4); + ephy_data |= (BIT_5 | BIT_11); + rtl8168_ephy_write(tp, 0x0C, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x1E); + ephy_data |= (BIT_0); + rtl8168_ephy_write(tp, 0x1E, ephy_data); + + ephy_data = rtl8168_ephy_read(tp, 0x19); + ephy_data &= ~(BIT_15); + rtl8168_ephy_write(tp, 0x19, ephy_data); + + break; + case CFG_METHOD_25: + ephy_data = rtl8168_ephy_read(tp, 0x00); + ephy_data &= ~BIT_3; + rtl8168_ephy_write(tp, 0x00, ephy_data); + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~(BIT_13 | BIT_12 | BIT_11 | BIT_10| BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4); + ephy_data |= (BIT_5 | BIT_11); + rtl8168_ephy_write(tp, 0x0C, ephy_data); + + rtl8168_ephy_write(tp, 0x19, 0x7C00); + rtl8168_ephy_write(tp, 0x1E, 0x20EB); + rtl8168_ephy_write(tp, 0x0D, 0x1666); + rtl8168_ephy_write(tp, 0x00, 0x10A3); + rtl8168_ephy_write(tp, 0x06, 0xF050); + + SetPCIePhyBit(tp, 0x04, BIT_4); + ClearPCIePhyBit(tp, 0x1D, BIT_14); + + break; + case CFG_METHOD_26: + ClearPCIePhyBit(tp, 0x00, BIT_3); + ClearAndSetPCIePhyBit( tp, + 0x0C, + (BIT_13 | BIT_12 | BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_4), + (BIT_5 | BIT_11) + ); + SetPCIePhyBit(tp, 0x1E, BIT_0); + ClearPCIePhyBit(tp, 0x19, BIT_15); + + ClearPCIePhyBit(tp, 0x19, (BIT_5 | BIT_0)); + + SetPCIePhyBit(tp, 0x1E, BIT_13); + ClearPCIePhyBit(tp, 0x0D, BIT_8); + SetPCIePhyBit(tp, 0x0D, BIT_9); + SetPCIePhyBit(tp, 0x00, BIT_7); + + SetPCIePhyBit(tp, 0x06, BIT_4); + + SetPCIePhyBit(tp, 0x04, BIT_4); + SetPCIePhyBit(tp, 0x1D, BIT_14); + + break; + case CFG_METHOD_23: + rtl8168_ephy_write(tp, 0x00, 0x10AB); + rtl8168_ephy_write(tp, 0x06, 0xf030); + rtl8168_ephy_write(tp, 0x08, 0x2006); + rtl8168_ephy_write(tp, 0x0D, 0x1666); + + ephy_data = rtl8168_ephy_read(tp, 0x0C); + ephy_data &= ~(BIT_13 | BIT_12 | BIT_11 | BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4); + rtl8168_ephy_write(tp, 0x0C, ephy_data); + + break; + case CFG_METHOD_27: + rtl8168_ephy_write(tp, 0x00, 0x10A3); + rtl8168_ephy_write(tp, 0x19, 0xFC00); + rtl8168_ephy_write(tp, 0x1E, 0x20EA); + + break; + case CFG_METHOD_28: + rtl8168_ephy_write(tp, 0x00, 0x10AB); + rtl8168_ephy_write(tp, 0x19, 0xFC00); + rtl8168_ephy_write(tp, 0x1E, 0x20EB); + rtl8168_ephy_write(tp, 0x0D, 0x1666); + ClearPCIePhyBit(tp, 0x0B, BIT_0); + SetPCIePhyBit(tp, 0x1D, BIT_14); + ClearAndSetPCIePhyBit(tp, + 0x0C, + BIT_13 | BIT_12 | BIT_11 | BIT_10 | BIT_8 | BIT_7 | BIT_6 | BIT_5, + BIT_9 | BIT_4 + ); + + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + ClearPCIePhyBit(tp, 0x1E, BIT_11); + + SetPCIePhyBit(tp, 0x1E, BIT_0); + SetPCIePhyBit(tp, 0x1D, BIT_11); + + rtl8168_ephy_write(tp, 0x05, 0x2089); + rtl8168_ephy_write(tp, 0x06, 0x5881); + + rtl8168_ephy_write(tp, 0x04, 0x854A); + rtl8168_ephy_write(tp, 0x01, 0x068B); + + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + ClearAndSetPCIePhyBit(tp, + 0x19, + BIT_6, + (BIT_12| BIT_8) + ); + ClearAndSetPCIePhyBit(tp, + 0x59, + BIT_6, + (BIT_12| BIT_8) + ); + + ClearPCIePhyBit(tp, 0x0C, BIT_4); + ClearPCIePhyBit(tp, 0x4C, BIT_4); + ClearPCIePhyBit(tp, 0x0B, BIT_0); + + break; + } +} + +static int +rtl8168_set_phy_mcu_patch_request(struct rtl8168_private *tp) +{ + u16 PhyRegValue; + u32 WaitCnt; + int retval = TRUE; + + switch (tp->mcfg) { + case CFG_METHOD_21 ... CFG_METHOD_33: + rtl8168_mdio_write(tp,0x1f, 0x0B82); + rtl8168_set_eth_phy_bit(tp, 0x10, BIT_4); + + rtl8168_mdio_write(tp,0x1f, 0x0B80); + WaitCnt = 0; + do { + PhyRegValue = rtl8168_mdio_read(tp, 0x10); + udelay(100); + WaitCnt++; + } while (!(PhyRegValue & BIT_6) && (WaitCnt < 1000)); + + if (!(PhyRegValue & BIT_6) && (WaitCnt == 1000)) retval = FALSE; + + rtl8168_mdio_write(tp,0x1f, 0x0000); + break; + } + + return retval; +} + +static int +rtl8168_clear_phy_mcu_patch_request(struct rtl8168_private *tp) +{ + u16 PhyRegValue; + u32 WaitCnt; + int retval = TRUE; + + switch (tp->mcfg) { + case CFG_METHOD_21 ... CFG_METHOD_33: + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + rtl8168_clear_eth_phy_bit(tp, 0x10, BIT_4); + + rtl8168_mdio_write(tp,0x1f, 0x0B80); + WaitCnt = 0; + do { + PhyRegValue = rtl8168_mdio_read(tp, 0x10); + udelay(100); + WaitCnt++; + } while ((PhyRegValue & BIT_6) && (WaitCnt < 1000)); + + if ((PhyRegValue & BIT_6) && (WaitCnt == 1000)) retval = FALSE; + + rtl8168_mdio_write(tp,0x1f, 0x0000); + break; + } + + return retval; +} + +static u16 +rtl8168_get_hw_phy_mcu_code_ver(struct rtl8168_private *tp) +{ + u16 hw_ram_code_ver = ~0; + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B60); + hw_ram_code_ver = rtl8168_mdio_read(tp, 0x06); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B30); + hw_ram_code_ver = rtl8168_mdio_read(tp, 0x06); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x801E); + hw_ram_code_ver = rtl8168_mdio_read(tp, 0x14); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + break; + default: + tp->hw_ram_code_ver = ~0; + break; + } + + return hw_ram_code_ver; +} + +#ifndef ENABLE_USE_FIRMWARE_FILE +static void +rtl8168_enable_phy_disable_mode(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->HwSuppCheckPhyDisableModeVer) { + case 1: + rtl8168_mac_ocp_write(tp, 0xDC20, rtl8168_mac_ocp_read(tp, 0xDC20) | BIT_1); + break; + case 2: + case 3: + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_5); + break; + } + + dprintk("enable phy disable mode.\n"); +} + +static void +rtl8168_disable_phy_disable_mode(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->HwSuppCheckPhyDisableModeVer) { + case 1: + rtl8168_mac_ocp_write(tp, 0xDC20, rtl8168_mac_ocp_read(tp, 0xDC20) & ~BIT_1); + break; + case 2: + case 3: + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) & ~BIT_5); + break; + } + + mdelay(1); + + dprintk("disable phy disable mode.\n"); +} + +static int +rtl8168_check_hw_phy_mcu_code_ver(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int ram_code_ver_match = 0; + + tp->hw_ram_code_ver = rtl8168_get_hw_phy_mcu_code_ver(tp); + + if ( tp->hw_ram_code_ver == tp->sw_ram_code_ver) { + ram_code_ver_match = 1; + tp->HwHasWrRamCodeToMicroP = TRUE; + } + + return ram_code_ver_match; +} + +static void +rtl8168_write_hw_phy_mcu_code_ver(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B60); + rtl8168_mdio_write(tp, 0x06, tp->sw_ram_code_ver); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + tp->hw_ram_code_ver = tp->sw_ram_code_ver; + break; + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B30); + rtl8168_mdio_write(tp, 0x06, tp->sw_ram_code_ver); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + tp->hw_ram_code_ver = tp->sw_ram_code_ver; + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x801E); + rtl8168_mdio_write(tp, 0x14, tp->sw_ram_code_ver); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + tp->hw_ram_code_ver = tp->sw_ram_code_ver; + break; + } +} +static int +rtl8168_phy_ram_code_check(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 PhyRegValue; + int retval = TRUE; + + switch(tp->mcfg) { + case CFG_METHOD_21: + rtl8168_mdio_write(tp, 0x1f, 0x0A40); + PhyRegValue = rtl8168_mdio_read(tp, 0x10); + PhyRegValue &= ~(BIT_11); + rtl8168_mdio_write(tp, 0x10, PhyRegValue); + + + rtl8168_mdio_write(tp, 0x1f, 0x0A00); + PhyRegValue = rtl8168_mdio_read(tp, 0x10); + PhyRegValue &= ~(BIT_12 | BIT_13 | BIT_14 | BIT_15); + rtl8168_mdio_write(tp, 0x10, PhyRegValue); + + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8010); + PhyRegValue = rtl8168_mdio_read(tp, 0x14); + PhyRegValue &= ~(BIT_11); + rtl8168_mdio_write(tp, 0x14, PhyRegValue); + + retval = rtl8168_set_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp, 0x1f, 0x0A40); + rtl8168_mdio_write(tp, 0x10, 0x0140); + + rtl8168_mdio_write(tp, 0x1f, 0x0A4A); + PhyRegValue = rtl8168_mdio_read(tp, 0x13); + PhyRegValue &= ~(BIT_6); + PhyRegValue |= (BIT_7); + rtl8168_mdio_write(tp, 0x13, PhyRegValue); + + rtl8168_mdio_write(tp, 0x1f, 0x0A44); + PhyRegValue = rtl8168_mdio_read(tp, 0x14); + PhyRegValue |= (BIT_2); + rtl8168_mdio_write(tp, 0x14, PhyRegValue); + + rtl8168_mdio_write(tp, 0x1f, 0x0A50); + PhyRegValue = rtl8168_mdio_read(tp, 0x11); + PhyRegValue |= (BIT_11|BIT_12); + rtl8168_mdio_write(tp, 0x11, PhyRegValue); + + retval = rtl8168_clear_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp, 0x1f, 0x0A40); + rtl8168_mdio_write(tp, 0x10, 0x1040); + + rtl8168_mdio_write(tp, 0x1f, 0x0A4A); + PhyRegValue = rtl8168_mdio_read(tp, 0x13); + PhyRegValue &= ~(BIT_6|BIT_7); + rtl8168_mdio_write(tp, 0x13, PhyRegValue); + + rtl8168_mdio_write(tp, 0x1f, 0x0A44); + PhyRegValue = rtl8168_mdio_read(tp, 0x14); + PhyRegValue &= ~(BIT_2); + rtl8168_mdio_write(tp, 0x14, PhyRegValue); + + rtl8168_mdio_write(tp, 0x1f, 0x0A50); + PhyRegValue = rtl8168_mdio_read(tp, 0x11); + PhyRegValue &= ~(BIT_11|BIT_12); + rtl8168_mdio_write(tp, 0x11, PhyRegValue); + + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8010); + PhyRegValue = rtl8168_mdio_read(tp, 0x14); + PhyRegValue |= (BIT_11); + rtl8168_mdio_write(tp, 0x14, PhyRegValue); + + retval = rtl8168_set_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp, 0x1f, 0x0A20); + PhyRegValue = rtl8168_mdio_read(tp, 0x13); + if (PhyRegValue & BIT_11) { + if (PhyRegValue & BIT_10) { + retval = FALSE; + } + } + + retval = rtl8168_clear_phy_mcu_patch_request(tp); + + mdelay(2); + break; + default: + break; + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + return retval; +} + +static void +rtl8168_set_phy_ram_code_check_fail_flag(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u16 TmpUshort; + + switch(tp->mcfg) { + case CFG_METHOD_21: + TmpUshort = rtl8168_mac_ocp_read(tp, 0xD3C0); + TmpUshort |= BIT_0; + rtl8168_mac_ocp_write(tp, 0xD3C0, TmpUshort); + break; + } +} + +static void +rtl8168_set_phy_mcu_8168e_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val,i; + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x1800); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x17, 0x0117); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + rtl8168_mdio_write(tp, 0x1B, 0x5000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x4104); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1E); + gphy_val &= 0x03FF; + if (gphy_val == 0x000C) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x07); + if ((gphy_val & BIT_5) == 0) + break; + } + gphy_val = rtl8168_mdio_read(tp, 0x07); + if (gphy_val & BIT_5) { + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x00a1); + rtl8168_mdio_write(tp, 0x17, 0x1000); + rtl8168_mdio_write(tp, 0x17, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2000); + rtl8168_mdio_write(tp, 0x1e, 0x002f); + rtl8168_mdio_write(tp, 0x18, 0x9bfb); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x07, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x08); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x08, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x000e); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x0010); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x0018); + rtl8168_mdio_write(tp, 0x19, 0x4801); + rtl8168_mdio_write(tp, 0x15, 0x0019); + rtl8168_mdio_write(tp, 0x19, 0x6801); + rtl8168_mdio_write(tp, 0x15, 0x001a); + rtl8168_mdio_write(tp, 0x19, 0x66a1); + rtl8168_mdio_write(tp, 0x15, 0x001f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0020); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0021); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0022); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0023); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0024); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0025); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0026); + rtl8168_mdio_write(tp, 0x19, 0x40ea); + rtl8168_mdio_write(tp, 0x15, 0x0027); + rtl8168_mdio_write(tp, 0x19, 0x4503); + rtl8168_mdio_write(tp, 0x15, 0x0028); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0029); + rtl8168_mdio_write(tp, 0x19, 0xa631); + rtl8168_mdio_write(tp, 0x15, 0x002a); + rtl8168_mdio_write(tp, 0x19, 0x9717); + rtl8168_mdio_write(tp, 0x15, 0x002b); + rtl8168_mdio_write(tp, 0x19, 0x302c); + rtl8168_mdio_write(tp, 0x15, 0x002c); + rtl8168_mdio_write(tp, 0x19, 0x4802); + rtl8168_mdio_write(tp, 0x15, 0x002d); + rtl8168_mdio_write(tp, 0x19, 0x58da); + rtl8168_mdio_write(tp, 0x15, 0x002e); + rtl8168_mdio_write(tp, 0x19, 0x400d); + rtl8168_mdio_write(tp, 0x15, 0x002f); + rtl8168_mdio_write(tp, 0x19, 0x4488); + rtl8168_mdio_write(tp, 0x15, 0x0030); + rtl8168_mdio_write(tp, 0x19, 0x9e00); + rtl8168_mdio_write(tp, 0x15, 0x0031); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0032); + rtl8168_mdio_write(tp, 0x19, 0x6481); + rtl8168_mdio_write(tp, 0x15, 0x0033); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0034); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0035); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0036); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0037); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0038); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0039); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x003a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x003b); + rtl8168_mdio_write(tp, 0x19, 0x63e8); + rtl8168_mdio_write(tp, 0x15, 0x003c); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x003d); + rtl8168_mdio_write(tp, 0x19, 0x59d4); + rtl8168_mdio_write(tp, 0x15, 0x003e); + rtl8168_mdio_write(tp, 0x19, 0x63f8); + rtl8168_mdio_write(tp, 0x15, 0x0040); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0041); + rtl8168_mdio_write(tp, 0x19, 0x30de); + rtl8168_mdio_write(tp, 0x15, 0x0044); + rtl8168_mdio_write(tp, 0x19, 0x480f); + rtl8168_mdio_write(tp, 0x15, 0x0045); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x0046); + rtl8168_mdio_write(tp, 0x19, 0x6680); + rtl8168_mdio_write(tp, 0x15, 0x0047); + rtl8168_mdio_write(tp, 0x19, 0x7c10); + rtl8168_mdio_write(tp, 0x15, 0x0048); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0049); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004f); + rtl8168_mdio_write(tp, 0x19, 0x40ea); + rtl8168_mdio_write(tp, 0x15, 0x0050); + rtl8168_mdio_write(tp, 0x19, 0x4503); + rtl8168_mdio_write(tp, 0x15, 0x0051); + rtl8168_mdio_write(tp, 0x19, 0x58ca); + rtl8168_mdio_write(tp, 0x15, 0x0052); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0053); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x0054); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x0055); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0056); + rtl8168_mdio_write(tp, 0x19, 0x3000); + rtl8168_mdio_write(tp, 0x15, 0x006E); + rtl8168_mdio_write(tp, 0x19, 0x9afa); + rtl8168_mdio_write(tp, 0x15, 0x00a1); + rtl8168_mdio_write(tp, 0x19, 0x3044); + rtl8168_mdio_write(tp, 0x15, 0x00ab); + rtl8168_mdio_write(tp, 0x19, 0x5820); + rtl8168_mdio_write(tp, 0x15, 0x00ac); + rtl8168_mdio_write(tp, 0x19, 0x5e04); + rtl8168_mdio_write(tp, 0x15, 0x00ad); + rtl8168_mdio_write(tp, 0x19, 0xb60c); + rtl8168_mdio_write(tp, 0x15, 0x00af); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x00b2); + rtl8168_mdio_write(tp, 0x19, 0x30b9); + rtl8168_mdio_write(tp, 0x15, 0x00b9); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x00ba); + rtl8168_mdio_write(tp, 0x19, 0x480b); + rtl8168_mdio_write(tp, 0x15, 0x00bb); + rtl8168_mdio_write(tp, 0x19, 0x5e00); + rtl8168_mdio_write(tp, 0x15, 0x00bc); + rtl8168_mdio_write(tp, 0x19, 0x405f); + rtl8168_mdio_write(tp, 0x15, 0x00bd); + rtl8168_mdio_write(tp, 0x19, 0x4448); + rtl8168_mdio_write(tp, 0x15, 0x00be); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x00bf); + rtl8168_mdio_write(tp, 0x19, 0x4468); + rtl8168_mdio_write(tp, 0x15, 0x00c0); + rtl8168_mdio_write(tp, 0x19, 0x9c02); + rtl8168_mdio_write(tp, 0x15, 0x00c1); + rtl8168_mdio_write(tp, 0x19, 0x58a0); + rtl8168_mdio_write(tp, 0x15, 0x00c2); + rtl8168_mdio_write(tp, 0x19, 0xb605); + rtl8168_mdio_write(tp, 0x15, 0x00c3); + rtl8168_mdio_write(tp, 0x19, 0xc0d3); + rtl8168_mdio_write(tp, 0x15, 0x00c4); + rtl8168_mdio_write(tp, 0x19, 0x00e6); + rtl8168_mdio_write(tp, 0x15, 0x00c5); + rtl8168_mdio_write(tp, 0x19, 0xdaec); + rtl8168_mdio_write(tp, 0x15, 0x00c6); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00c7); + rtl8168_mdio_write(tp, 0x19, 0x9df9); + rtl8168_mdio_write(tp, 0x15, 0x00c8); + rtl8168_mdio_write(tp, 0x19, 0x307a); + rtl8168_mdio_write(tp, 0x15, 0x0112); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0113); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0114); + rtl8168_mdio_write(tp, 0x19, 0x63f0); + rtl8168_mdio_write(tp, 0x15, 0x0115); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0116); + rtl8168_mdio_write(tp, 0x19, 0x4418); + rtl8168_mdio_write(tp, 0x15, 0x0117); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x15, 0x0118); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0119); + rtl8168_mdio_write(tp, 0x19, 0x64e1); + rtl8168_mdio_write(tp, 0x15, 0x011a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0150); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0151); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0152); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0153); + rtl8168_mdio_write(tp, 0x19, 0x4540); + rtl8168_mdio_write(tp, 0x15, 0x0154); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0155); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x15, 0x0156); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0157); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0158); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0159); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x015a); + rtl8168_mdio_write(tp, 0x19, 0x30fe); + rtl8168_mdio_write(tp, 0x15, 0x021e); + rtl8168_mdio_write(tp, 0x19, 0x5410); + rtl8168_mdio_write(tp, 0x15, 0x0225); + rtl8168_mdio_write(tp, 0x19, 0x5400); + rtl8168_mdio_write(tp, 0x15, 0x023D); + rtl8168_mdio_write(tp, 0x19, 0x4050); + rtl8168_mdio_write(tp, 0x15, 0x0295); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x02bd); + rtl8168_mdio_write(tp, 0x19, 0xa523); + rtl8168_mdio_write(tp, 0x15, 0x02be); + rtl8168_mdio_write(tp, 0x19, 0x32ca); + rtl8168_mdio_write(tp, 0x15, 0x02ca); + rtl8168_mdio_write(tp, 0x19, 0x48b3); + rtl8168_mdio_write(tp, 0x15, 0x02cb); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x02cc); + rtl8168_mdio_write(tp, 0x19, 0x4823); + rtl8168_mdio_write(tp, 0x15, 0x02cd); + rtl8168_mdio_write(tp, 0x19, 0x4510); + rtl8168_mdio_write(tp, 0x15, 0x02ce); + rtl8168_mdio_write(tp, 0x19, 0xb63a); + rtl8168_mdio_write(tp, 0x15, 0x02cf); + rtl8168_mdio_write(tp, 0x19, 0x7dc8); + rtl8168_mdio_write(tp, 0x15, 0x02d6); + rtl8168_mdio_write(tp, 0x19, 0x9bf8); + rtl8168_mdio_write(tp, 0x15, 0x02d8); + rtl8168_mdio_write(tp, 0x19, 0x85f6); + rtl8168_mdio_write(tp, 0x15, 0x02d9); + rtl8168_mdio_write(tp, 0x19, 0x32e0); + rtl8168_mdio_write(tp, 0x15, 0x02e0); + rtl8168_mdio_write(tp, 0x19, 0x4834); + rtl8168_mdio_write(tp, 0x15, 0x02e1); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x02e2); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x02e3); + rtl8168_mdio_write(tp, 0x19, 0x4824); + rtl8168_mdio_write(tp, 0x15, 0x02e4); + rtl8168_mdio_write(tp, 0x19, 0x4520); + rtl8168_mdio_write(tp, 0x15, 0x02e5); + rtl8168_mdio_write(tp, 0x19, 0x4008); + rtl8168_mdio_write(tp, 0x15, 0x02e6); + rtl8168_mdio_write(tp, 0x19, 0x4560); + rtl8168_mdio_write(tp, 0x15, 0x02e7); + rtl8168_mdio_write(tp, 0x19, 0x9d04); + rtl8168_mdio_write(tp, 0x15, 0x02e8); + rtl8168_mdio_write(tp, 0x19, 0x48c4); + rtl8168_mdio_write(tp, 0x15, 0x02e9); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02ea); + rtl8168_mdio_write(tp, 0x19, 0x4844); + rtl8168_mdio_write(tp, 0x15, 0x02eb); + rtl8168_mdio_write(tp, 0x19, 0x7dc8); + rtl8168_mdio_write(tp, 0x15, 0x02f0); + rtl8168_mdio_write(tp, 0x19, 0x9cf7); + rtl8168_mdio_write(tp, 0x15, 0x02f1); + rtl8168_mdio_write(tp, 0x19, 0xdf94); + rtl8168_mdio_write(tp, 0x15, 0x02f2); + rtl8168_mdio_write(tp, 0x19, 0x0002); + rtl8168_mdio_write(tp, 0x15, 0x02f3); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x02f4); + rtl8168_mdio_write(tp, 0x19, 0xb614); + rtl8168_mdio_write(tp, 0x15, 0x02f5); + rtl8168_mdio_write(tp, 0x19, 0xc42b); + rtl8168_mdio_write(tp, 0x15, 0x02f6); + rtl8168_mdio_write(tp, 0x19, 0x00d4); + rtl8168_mdio_write(tp, 0x15, 0x02f7); + rtl8168_mdio_write(tp, 0x19, 0xc455); + rtl8168_mdio_write(tp, 0x15, 0x02f8); + rtl8168_mdio_write(tp, 0x19, 0x0093); + rtl8168_mdio_write(tp, 0x15, 0x02f9); + rtl8168_mdio_write(tp, 0x19, 0x92ee); + rtl8168_mdio_write(tp, 0x15, 0x02fa); + rtl8168_mdio_write(tp, 0x19, 0xefed); + rtl8168_mdio_write(tp, 0x15, 0x02fb); + rtl8168_mdio_write(tp, 0x19, 0x3312); + rtl8168_mdio_write(tp, 0x15, 0x0312); + rtl8168_mdio_write(tp, 0x19, 0x49b5); + rtl8168_mdio_write(tp, 0x15, 0x0313); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x0314); + rtl8168_mdio_write(tp, 0x19, 0x4d00); + rtl8168_mdio_write(tp, 0x15, 0x0315); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x031e); + rtl8168_mdio_write(tp, 0x19, 0x404f); + rtl8168_mdio_write(tp, 0x15, 0x031f); + rtl8168_mdio_write(tp, 0x19, 0x44c8); + rtl8168_mdio_write(tp, 0x15, 0x0320); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0321); + rtl8168_mdio_write(tp, 0x19, 0x00e7); + rtl8168_mdio_write(tp, 0x15, 0x0322); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0323); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x0324); + rtl8168_mdio_write(tp, 0x19, 0x4d48); + rtl8168_mdio_write(tp, 0x15, 0x0325); + rtl8168_mdio_write(tp, 0x19, 0x3327); + rtl8168_mdio_write(tp, 0x15, 0x0326); + rtl8168_mdio_write(tp, 0x19, 0x4d40); + rtl8168_mdio_write(tp, 0x15, 0x0327); + rtl8168_mdio_write(tp, 0x19, 0xc8d7); + rtl8168_mdio_write(tp, 0x15, 0x0328); + rtl8168_mdio_write(tp, 0x19, 0x0003); + rtl8168_mdio_write(tp, 0x15, 0x0329); + rtl8168_mdio_write(tp, 0x19, 0x7c20); + rtl8168_mdio_write(tp, 0x15, 0x032a); + rtl8168_mdio_write(tp, 0x19, 0x4c20); + rtl8168_mdio_write(tp, 0x15, 0x032b); + rtl8168_mdio_write(tp, 0x19, 0xc8ed); + rtl8168_mdio_write(tp, 0x15, 0x032c); + rtl8168_mdio_write(tp, 0x19, 0x00f4); + rtl8168_mdio_write(tp, 0x15, 0x032d); + rtl8168_mdio_write(tp, 0x19, 0x82b3); + rtl8168_mdio_write(tp, 0x15, 0x032e); + rtl8168_mdio_write(tp, 0x19, 0xd11d); + rtl8168_mdio_write(tp, 0x15, 0x032f); + rtl8168_mdio_write(tp, 0x19, 0x00b1); + rtl8168_mdio_write(tp, 0x15, 0x0330); + rtl8168_mdio_write(tp, 0x19, 0xde18); + rtl8168_mdio_write(tp, 0x15, 0x0331); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x0332); + rtl8168_mdio_write(tp, 0x19, 0x91ee); + rtl8168_mdio_write(tp, 0x15, 0x0333); + rtl8168_mdio_write(tp, 0x19, 0x3339); + rtl8168_mdio_write(tp, 0x15, 0x033a); + rtl8168_mdio_write(tp, 0x19, 0x4064); + rtl8168_mdio_write(tp, 0x15, 0x0340); + rtl8168_mdio_write(tp, 0x19, 0x9e06); + rtl8168_mdio_write(tp, 0x15, 0x0341); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0342); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x0343); + rtl8168_mdio_write(tp, 0x19, 0x4d48); + rtl8168_mdio_write(tp, 0x15, 0x0344); + rtl8168_mdio_write(tp, 0x19, 0x3346); + rtl8168_mdio_write(tp, 0x15, 0x0345); + rtl8168_mdio_write(tp, 0x19, 0x4d40); + rtl8168_mdio_write(tp, 0x15, 0x0346); + rtl8168_mdio_write(tp, 0x19, 0xd11d); + rtl8168_mdio_write(tp, 0x15, 0x0347); + rtl8168_mdio_write(tp, 0x19, 0x0099); + rtl8168_mdio_write(tp, 0x15, 0x0348); + rtl8168_mdio_write(tp, 0x19, 0xbb17); + rtl8168_mdio_write(tp, 0x15, 0x0349); + rtl8168_mdio_write(tp, 0x19, 0x8102); + rtl8168_mdio_write(tp, 0x15, 0x034a); + rtl8168_mdio_write(tp, 0x19, 0x334d); + rtl8168_mdio_write(tp, 0x15, 0x034b); + rtl8168_mdio_write(tp, 0x19, 0xa22c); + rtl8168_mdio_write(tp, 0x15, 0x034c); + rtl8168_mdio_write(tp, 0x19, 0x3397); + rtl8168_mdio_write(tp, 0x15, 0x034d); + rtl8168_mdio_write(tp, 0x19, 0x91f2); + rtl8168_mdio_write(tp, 0x15, 0x034e); + rtl8168_mdio_write(tp, 0x19, 0xc218); + rtl8168_mdio_write(tp, 0x15, 0x034f); + rtl8168_mdio_write(tp, 0x19, 0x00f0); + rtl8168_mdio_write(tp, 0x15, 0x0350); + rtl8168_mdio_write(tp, 0x19, 0x3397); + rtl8168_mdio_write(tp, 0x15, 0x0351); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0364); + rtl8168_mdio_write(tp, 0x19, 0xbc05); + rtl8168_mdio_write(tp, 0x15, 0x0367); + rtl8168_mdio_write(tp, 0x19, 0xa1fc); + rtl8168_mdio_write(tp, 0x15, 0x0368); + rtl8168_mdio_write(tp, 0x19, 0x3377); + rtl8168_mdio_write(tp, 0x15, 0x0369); + rtl8168_mdio_write(tp, 0x19, 0x328b); + rtl8168_mdio_write(tp, 0x15, 0x036a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0377); + rtl8168_mdio_write(tp, 0x19, 0x4b97); + rtl8168_mdio_write(tp, 0x15, 0x0378); + rtl8168_mdio_write(tp, 0x19, 0x6818); + rtl8168_mdio_write(tp, 0x15, 0x0379); + rtl8168_mdio_write(tp, 0x19, 0x4b07); + rtl8168_mdio_write(tp, 0x15, 0x037a); + rtl8168_mdio_write(tp, 0x19, 0x40ac); + rtl8168_mdio_write(tp, 0x15, 0x037b); + rtl8168_mdio_write(tp, 0x19, 0x4445); + rtl8168_mdio_write(tp, 0x15, 0x037c); + rtl8168_mdio_write(tp, 0x19, 0x404e); + rtl8168_mdio_write(tp, 0x15, 0x037d); + rtl8168_mdio_write(tp, 0x19, 0x4461); + rtl8168_mdio_write(tp, 0x15, 0x037e); + rtl8168_mdio_write(tp, 0x19, 0x9c09); + rtl8168_mdio_write(tp, 0x15, 0x037f); + rtl8168_mdio_write(tp, 0x19, 0x63da); + rtl8168_mdio_write(tp, 0x15, 0x0380); + rtl8168_mdio_write(tp, 0x19, 0x5440); + rtl8168_mdio_write(tp, 0x15, 0x0381); + rtl8168_mdio_write(tp, 0x19, 0x4b98); + rtl8168_mdio_write(tp, 0x15, 0x0382); + rtl8168_mdio_write(tp, 0x19, 0x7c60); + rtl8168_mdio_write(tp, 0x15, 0x0383); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x0384); + rtl8168_mdio_write(tp, 0x19, 0x4b08); + rtl8168_mdio_write(tp, 0x15, 0x0385); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x0386); + rtl8168_mdio_write(tp, 0x19, 0x338d); + rtl8168_mdio_write(tp, 0x15, 0x0387); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0388); + rtl8168_mdio_write(tp, 0x19, 0x0080); + rtl8168_mdio_write(tp, 0x15, 0x0389); + rtl8168_mdio_write(tp, 0x19, 0x820c); + rtl8168_mdio_write(tp, 0x15, 0x038a); + rtl8168_mdio_write(tp, 0x19, 0xa10b); + rtl8168_mdio_write(tp, 0x15, 0x038b); + rtl8168_mdio_write(tp, 0x19, 0x9df3); + rtl8168_mdio_write(tp, 0x15, 0x038c); + rtl8168_mdio_write(tp, 0x19, 0x3395); + rtl8168_mdio_write(tp, 0x15, 0x038d); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x038e); + rtl8168_mdio_write(tp, 0x19, 0x00f9); + rtl8168_mdio_write(tp, 0x15, 0x038f); + rtl8168_mdio_write(tp, 0x19, 0xc017); + rtl8168_mdio_write(tp, 0x15, 0x0390); + rtl8168_mdio_write(tp, 0x19, 0x0005); + rtl8168_mdio_write(tp, 0x15, 0x0391); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x0392); + rtl8168_mdio_write(tp, 0x19, 0xa103); + rtl8168_mdio_write(tp, 0x15, 0x0393); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x0394); + rtl8168_mdio_write(tp, 0x19, 0x9df9); + rtl8168_mdio_write(tp, 0x15, 0x0395); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x0396); + rtl8168_mdio_write(tp, 0x19, 0x3397); + rtl8168_mdio_write(tp, 0x15, 0x0399); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x03a4); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x03a5); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x03a6); + rtl8168_mdio_write(tp, 0x19, 0x4d08); + rtl8168_mdio_write(tp, 0x15, 0x03a7); + rtl8168_mdio_write(tp, 0x19, 0x33a9); + rtl8168_mdio_write(tp, 0x15, 0x03a8); + rtl8168_mdio_write(tp, 0x19, 0x4d00); + rtl8168_mdio_write(tp, 0x15, 0x03a9); + rtl8168_mdio_write(tp, 0x19, 0x9bfa); + rtl8168_mdio_write(tp, 0x15, 0x03aa); + rtl8168_mdio_write(tp, 0x19, 0x33b6); + rtl8168_mdio_write(tp, 0x15, 0x03bb); + rtl8168_mdio_write(tp, 0x19, 0x4056); + rtl8168_mdio_write(tp, 0x15, 0x03bc); + rtl8168_mdio_write(tp, 0x19, 0x44e9); + rtl8168_mdio_write(tp, 0x15, 0x03bd); + rtl8168_mdio_write(tp, 0x19, 0x405e); + rtl8168_mdio_write(tp, 0x15, 0x03be); + rtl8168_mdio_write(tp, 0x19, 0x44f8); + rtl8168_mdio_write(tp, 0x15, 0x03bf); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x03c0); + rtl8168_mdio_write(tp, 0x19, 0x0037); + rtl8168_mdio_write(tp, 0x15, 0x03c1); + rtl8168_mdio_write(tp, 0x19, 0xbd37); + rtl8168_mdio_write(tp, 0x15, 0x03c2); + rtl8168_mdio_write(tp, 0x19, 0x9cfd); + rtl8168_mdio_write(tp, 0x15, 0x03c3); + rtl8168_mdio_write(tp, 0x19, 0xc639); + rtl8168_mdio_write(tp, 0x15, 0x03c4); + rtl8168_mdio_write(tp, 0x19, 0x0011); + rtl8168_mdio_write(tp, 0x15, 0x03c5); + rtl8168_mdio_write(tp, 0x19, 0x9b03); + rtl8168_mdio_write(tp, 0x15, 0x03c6); + rtl8168_mdio_write(tp, 0x19, 0x7c01); + rtl8168_mdio_write(tp, 0x15, 0x03c7); + rtl8168_mdio_write(tp, 0x19, 0x4c01); + rtl8168_mdio_write(tp, 0x15, 0x03c8); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x03c9); + rtl8168_mdio_write(tp, 0x19, 0x7c20); + rtl8168_mdio_write(tp, 0x15, 0x03ca); + rtl8168_mdio_write(tp, 0x19, 0x4c20); + rtl8168_mdio_write(tp, 0x15, 0x03cb); + rtl8168_mdio_write(tp, 0x19, 0x9af4); + rtl8168_mdio_write(tp, 0x15, 0x03cc); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03cd); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03ce); + rtl8168_mdio_write(tp, 0x19, 0x4470); + rtl8168_mdio_write(tp, 0x15, 0x03cf); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03d0); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03d1); + rtl8168_mdio_write(tp, 0x19, 0x33bf); + rtl8168_mdio_write(tp, 0x15, 0x03d6); + rtl8168_mdio_write(tp, 0x19, 0x4047); + rtl8168_mdio_write(tp, 0x15, 0x03d7); + rtl8168_mdio_write(tp, 0x19, 0x4469); + rtl8168_mdio_write(tp, 0x15, 0x03d8); + rtl8168_mdio_write(tp, 0x19, 0x492b); + rtl8168_mdio_write(tp, 0x15, 0x03d9); + rtl8168_mdio_write(tp, 0x19, 0x4479); + rtl8168_mdio_write(tp, 0x15, 0x03da); + rtl8168_mdio_write(tp, 0x19, 0x7c09); + rtl8168_mdio_write(tp, 0x15, 0x03db); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x03dc); + rtl8168_mdio_write(tp, 0x19, 0x4d48); + rtl8168_mdio_write(tp, 0x15, 0x03dd); + rtl8168_mdio_write(tp, 0x19, 0x33df); + rtl8168_mdio_write(tp, 0x15, 0x03de); + rtl8168_mdio_write(tp, 0x19, 0x4d40); + rtl8168_mdio_write(tp, 0x15, 0x03df); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x03e0); + rtl8168_mdio_write(tp, 0x19, 0x0017); + rtl8168_mdio_write(tp, 0x15, 0x03e1); + rtl8168_mdio_write(tp, 0x19, 0xbd17); + rtl8168_mdio_write(tp, 0x15, 0x03e2); + rtl8168_mdio_write(tp, 0x19, 0x9b03); + rtl8168_mdio_write(tp, 0x15, 0x03e3); + rtl8168_mdio_write(tp, 0x19, 0x7c20); + rtl8168_mdio_write(tp, 0x15, 0x03e4); + rtl8168_mdio_write(tp, 0x19, 0x4c20); + rtl8168_mdio_write(tp, 0x15, 0x03e5); + rtl8168_mdio_write(tp, 0x19, 0x88f5); + rtl8168_mdio_write(tp, 0x15, 0x03e6); + rtl8168_mdio_write(tp, 0x19, 0xc428); + rtl8168_mdio_write(tp, 0x15, 0x03e7); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x03e8); + rtl8168_mdio_write(tp, 0x19, 0x9af2); + rtl8168_mdio_write(tp, 0x15, 0x03e9); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03ea); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03eb); + rtl8168_mdio_write(tp, 0x19, 0x4470); + rtl8168_mdio_write(tp, 0x15, 0x03ec); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03ed); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03ee); + rtl8168_mdio_write(tp, 0x19, 0x33da); + rtl8168_mdio_write(tp, 0x15, 0x03ef); + rtl8168_mdio_write(tp, 0x19, 0x3312); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2179); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0040); + rtl8168_mdio_write(tp, 0x18, 0x0645); + rtl8168_mdio_write(tp, 0x19, 0xe200); + rtl8168_mdio_write(tp, 0x18, 0x0655); + rtl8168_mdio_write(tp, 0x19, 0x9000); + rtl8168_mdio_write(tp, 0x18, 0x0d05); + rtl8168_mdio_write(tp, 0x19, 0xbe00); + rtl8168_mdio_write(tp, 0x18, 0x0d15); + rtl8168_mdio_write(tp, 0x19, 0xd300); + rtl8168_mdio_write(tp, 0x18, 0x0d25); + rtl8168_mdio_write(tp, 0x19, 0xfe00); + rtl8168_mdio_write(tp, 0x18, 0x0d35); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x0d45); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x0d55); + rtl8168_mdio_write(tp, 0x19, 0x1000); + rtl8168_mdio_write(tp, 0x18, 0x0d65); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x0d75); + rtl8168_mdio_write(tp, 0x19, 0x8200); + rtl8168_mdio_write(tp, 0x18, 0x0d85); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x0d95); + rtl8168_mdio_write(tp, 0x19, 0x7000); + rtl8168_mdio_write(tp, 0x18, 0x0da5); + rtl8168_mdio_write(tp, 0x19, 0x0f00); + rtl8168_mdio_write(tp, 0x18, 0x0db5); + rtl8168_mdio_write(tp, 0x19, 0x0100); + rtl8168_mdio_write(tp, 0x18, 0x0dc5); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x18, 0x0dd5); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x0de5); + rtl8168_mdio_write(tp, 0x19, 0xe000); + rtl8168_mdio_write(tp, 0x18, 0x0df5); + rtl8168_mdio_write(tp, 0x19, 0xef00); + rtl8168_mdio_write(tp, 0x18, 0x16d5); + rtl8168_mdio_write(tp, 0x19, 0xe200); + rtl8168_mdio_write(tp, 0x18, 0x16e5); + rtl8168_mdio_write(tp, 0x19, 0xab00); + rtl8168_mdio_write(tp, 0x18, 0x2904); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x2914); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x2924); + rtl8168_mdio_write(tp, 0x19, 0x0100); + rtl8168_mdio_write(tp, 0x18, 0x2934); + rtl8168_mdio_write(tp, 0x19, 0x2000); + rtl8168_mdio_write(tp, 0x18, 0x2944); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2954); + rtl8168_mdio_write(tp, 0x19, 0x4600); + rtl8168_mdio_write(tp, 0x18, 0x2964); + rtl8168_mdio_write(tp, 0x19, 0xfc00); + rtl8168_mdio_write(tp, 0x18, 0x2974); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2984); + rtl8168_mdio_write(tp, 0x19, 0x5000); + rtl8168_mdio_write(tp, 0x18, 0x2994); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x18, 0x29a4); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x29b4); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x29c4); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x29d4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x29e4); + rtl8168_mdio_write(tp, 0x19, 0x2000); + rtl8168_mdio_write(tp, 0x18, 0x29f4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2a04); + rtl8168_mdio_write(tp, 0x19, 0xe600); + rtl8168_mdio_write(tp, 0x18, 0x2a14); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x2a24); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2a34); + rtl8168_mdio_write(tp, 0x19, 0x5000); + rtl8168_mdio_write(tp, 0x18, 0x2a44); + rtl8168_mdio_write(tp, 0x19, 0x8500); + rtl8168_mdio_write(tp, 0x18, 0x2a54); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x2a64); + rtl8168_mdio_write(tp, 0x19, 0xac00); + rtl8168_mdio_write(tp, 0x18, 0x2a74); + rtl8168_mdio_write(tp, 0x19, 0x0800); + rtl8168_mdio_write(tp, 0x18, 0x2a84); + rtl8168_mdio_write(tp, 0x19, 0xfc00); + rtl8168_mdio_write(tp, 0x18, 0x2a94); + rtl8168_mdio_write(tp, 0x19, 0xe000); + rtl8168_mdio_write(tp, 0x18, 0x2aa4); + rtl8168_mdio_write(tp, 0x19, 0x7400); + rtl8168_mdio_write(tp, 0x18, 0x2ab4); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x2ac4); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x2ad4); + rtl8168_mdio_write(tp, 0x19, 0x0100); + rtl8168_mdio_write(tp, 0x18, 0x2ae4); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x2af4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2b04); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x18, 0x2b14); + rtl8168_mdio_write(tp, 0x19, 0xfc00); + rtl8168_mdio_write(tp, 0x18, 0x2b24); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2b34); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x2b44); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x18, 0x2b54); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x2b64); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x2b74); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x2b84); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2b94); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x2ba4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2bb4); + rtl8168_mdio_write(tp, 0x19, 0xfc00); + rtl8168_mdio_write(tp, 0x18, 0x2bc4); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x2bd4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2be4); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x2bf4); + rtl8168_mdio_write(tp, 0x19, 0x8900); + rtl8168_mdio_write(tp, 0x18, 0x2c04); + rtl8168_mdio_write(tp, 0x19, 0x8300); + rtl8168_mdio_write(tp, 0x18, 0x2c14); + rtl8168_mdio_write(tp, 0x19, 0xe000); + rtl8168_mdio_write(tp, 0x18, 0x2c24); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x18, 0x2c34); + rtl8168_mdio_write(tp, 0x19, 0xac00); + rtl8168_mdio_write(tp, 0x18, 0x2c44); + rtl8168_mdio_write(tp, 0x19, 0x0800); + rtl8168_mdio_write(tp, 0x18, 0x2c54); + rtl8168_mdio_write(tp, 0x19, 0xfa00); + rtl8168_mdio_write(tp, 0x18, 0x2c64); + rtl8168_mdio_write(tp, 0x19, 0xe100); + rtl8168_mdio_write(tp, 0x18, 0x2c74); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x18, 0x0001); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2100); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8b88); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0xd480); + rtl8168_mdio_write(tp, 0x06, 0xc1e4); + rtl8168_mdio_write(tp, 0x06, 0x8b9a); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x9bee); + rtl8168_mdio_write(tp, 0x06, 0x8b83); + rtl8168_mdio_write(tp, 0x06, 0x41bf); + rtl8168_mdio_write(tp, 0x06, 0x8b88); + rtl8168_mdio_write(tp, 0x06, 0xec00); + rtl8168_mdio_write(tp, 0x06, 0x19a9); + rtl8168_mdio_write(tp, 0x06, 0x8b90); + rtl8168_mdio_write(tp, 0x06, 0xf9ee); + rtl8168_mdio_write(tp, 0x06, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x41f7); + rtl8168_mdio_write(tp, 0x06, 0x2ff6); + rtl8168_mdio_write(tp, 0x06, 0x28e4); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xe5e1); + rtl8168_mdio_write(tp, 0x06, 0x41f7); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x020c); + rtl8168_mdio_write(tp, 0x06, 0x0202); + rtl8168_mdio_write(tp, 0x06, 0x1d02); + rtl8168_mdio_write(tp, 0x06, 0x0230); + rtl8168_mdio_write(tp, 0x06, 0x0202); + rtl8168_mdio_write(tp, 0x06, 0x4002); + rtl8168_mdio_write(tp, 0x06, 0x028b); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x6c02); + rtl8168_mdio_write(tp, 0x06, 0x8085); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaec3); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x10ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x1310); + rtl8168_mdio_write(tp, 0x06, 0x021f); + rtl8168_mdio_write(tp, 0x06, 0x9d02); + rtl8168_mdio_write(tp, 0x06, 0x1f0c); + rtl8168_mdio_write(tp, 0x06, 0x0227); + rtl8168_mdio_write(tp, 0x06, 0x49fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x200b); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x830e); + rtl8168_mdio_write(tp, 0x06, 0x021b); + rtl8168_mdio_write(tp, 0x06, 0x67ad); + rtl8168_mdio_write(tp, 0x06, 0x2211); + rtl8168_mdio_write(tp, 0x06, 0xf622); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x2ba5); + rtl8168_mdio_write(tp, 0x06, 0x022a); + rtl8168_mdio_write(tp, 0x06, 0x2402); + rtl8168_mdio_write(tp, 0x06, 0x80c6); + rtl8168_mdio_write(tp, 0x06, 0x022a); + rtl8168_mdio_write(tp, 0x06, 0xf0ad); + rtl8168_mdio_write(tp, 0x06, 0x2511); + rtl8168_mdio_write(tp, 0x06, 0xf625); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x8226); + rtl8168_mdio_write(tp, 0x06, 0x0204); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x19cc); + rtl8168_mdio_write(tp, 0x06, 0x022b); + rtl8168_mdio_write(tp, 0x06, 0x5bfc); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x0105); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b83); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x44e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x23ad); + rtl8168_mdio_write(tp, 0x06, 0x223b); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xbea0); + rtl8168_mdio_write(tp, 0x06, 0x0005); + rtl8168_mdio_write(tp, 0x06, 0x0228); + rtl8168_mdio_write(tp, 0x06, 0xdeae); + rtl8168_mdio_write(tp, 0x06, 0x42a0); + rtl8168_mdio_write(tp, 0x06, 0x0105); + rtl8168_mdio_write(tp, 0x06, 0x0228); + rtl8168_mdio_write(tp, 0x06, 0xf1ae); + rtl8168_mdio_write(tp, 0x06, 0x3aa0); + rtl8168_mdio_write(tp, 0x06, 0x0205); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x25ae); + rtl8168_mdio_write(tp, 0x06, 0x32a0); + rtl8168_mdio_write(tp, 0x06, 0x0305); + rtl8168_mdio_write(tp, 0x06, 0x0229); + rtl8168_mdio_write(tp, 0x06, 0x9aae); + rtl8168_mdio_write(tp, 0x06, 0x2aa0); + rtl8168_mdio_write(tp, 0x06, 0x0405); + rtl8168_mdio_write(tp, 0x06, 0x0229); + rtl8168_mdio_write(tp, 0x06, 0xaeae); + rtl8168_mdio_write(tp, 0x06, 0x22a0); + rtl8168_mdio_write(tp, 0x06, 0x0505); + rtl8168_mdio_write(tp, 0x06, 0x0229); + rtl8168_mdio_write(tp, 0x06, 0xd7ae); + rtl8168_mdio_write(tp, 0x06, 0x1aa0); + rtl8168_mdio_write(tp, 0x06, 0x0605); + rtl8168_mdio_write(tp, 0x06, 0x0229); + rtl8168_mdio_write(tp, 0x06, 0xfeae); + rtl8168_mdio_write(tp, 0x06, 0x12ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac0); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac1); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x00fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0x022a); + rtl8168_mdio_write(tp, 0x06, 0x67e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x230d); + rtl8168_mdio_write(tp, 0x06, 0x0658); + rtl8168_mdio_write(tp, 0x06, 0x03a0); + rtl8168_mdio_write(tp, 0x06, 0x0202); + rtl8168_mdio_write(tp, 0x06, 0xae2d); + rtl8168_mdio_write(tp, 0x06, 0xa001); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x2da0); + rtl8168_mdio_write(tp, 0x06, 0x004d); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe201); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x44e0); + rtl8168_mdio_write(tp, 0x06, 0x8ac2); + rtl8168_mdio_write(tp, 0x06, 0xe48a); + rtl8168_mdio_write(tp, 0x06, 0xc4e0); + rtl8168_mdio_write(tp, 0x06, 0x8ac3); + rtl8168_mdio_write(tp, 0x06, 0xe48a); + rtl8168_mdio_write(tp, 0x06, 0xc5ee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x03e0); + rtl8168_mdio_write(tp, 0x06, 0x8b83); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x3aee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x05ae); + rtl8168_mdio_write(tp, 0x06, 0x34e0); + rtl8168_mdio_write(tp, 0x06, 0x8ace); + rtl8168_mdio_write(tp, 0x06, 0xae03); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xcfe1); + rtl8168_mdio_write(tp, 0x06, 0x8ac2); + rtl8168_mdio_write(tp, 0x06, 0x4905); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xc4e1); + rtl8168_mdio_write(tp, 0x06, 0x8ac3); + rtl8168_mdio_write(tp, 0x06, 0x4905); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xc5ee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x2ab6); + rtl8168_mdio_write(tp, 0x06, 0xac20); + rtl8168_mdio_write(tp, 0x06, 0x1202); + rtl8168_mdio_write(tp, 0x06, 0x819b); + rtl8168_mdio_write(tp, 0x06, 0xac20); + rtl8168_mdio_write(tp, 0x06, 0x0cee); + rtl8168_mdio_write(tp, 0x06, 0x8ac1); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x02fc); + rtl8168_mdio_write(tp, 0x06, 0x04d0); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x590f); + rtl8168_mdio_write(tp, 0x06, 0x3902); + rtl8168_mdio_write(tp, 0x06, 0xaa04); + rtl8168_mdio_write(tp, 0x06, 0xd001); + rtl8168_mdio_write(tp, 0x06, 0xae02); + rtl8168_mdio_write(tp, 0x06, 0xd000); + rtl8168_mdio_write(tp, 0x06, 0x04f9); + rtl8168_mdio_write(tp, 0x06, 0xfae2); + rtl8168_mdio_write(tp, 0x06, 0xe2d2); + rtl8168_mdio_write(tp, 0x06, 0xe3e2); + rtl8168_mdio_write(tp, 0x06, 0xd3f9); + rtl8168_mdio_write(tp, 0x06, 0x5af7); + rtl8168_mdio_write(tp, 0x06, 0xe6e2); + rtl8168_mdio_write(tp, 0x06, 0xd2e7); + rtl8168_mdio_write(tp, 0x06, 0xe2d3); + rtl8168_mdio_write(tp, 0x06, 0xe2e0); + rtl8168_mdio_write(tp, 0x06, 0x2ce3); + rtl8168_mdio_write(tp, 0x06, 0xe02d); + rtl8168_mdio_write(tp, 0x06, 0xf95b); + rtl8168_mdio_write(tp, 0x06, 0xe01e); + rtl8168_mdio_write(tp, 0x06, 0x30e6); + rtl8168_mdio_write(tp, 0x06, 0xe02c); + rtl8168_mdio_write(tp, 0x06, 0xe7e0); + rtl8168_mdio_write(tp, 0x06, 0x2de2); + rtl8168_mdio_write(tp, 0x06, 0xe2cc); + rtl8168_mdio_write(tp, 0x06, 0xe3e2); + rtl8168_mdio_write(tp, 0x06, 0xcdf9); + rtl8168_mdio_write(tp, 0x06, 0x5a0f); + rtl8168_mdio_write(tp, 0x06, 0x6a50); + rtl8168_mdio_write(tp, 0x06, 0xe6e2); + rtl8168_mdio_write(tp, 0x06, 0xcce7); + rtl8168_mdio_write(tp, 0x06, 0xe2cd); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x3ce1); + rtl8168_mdio_write(tp, 0x06, 0xe03d); + rtl8168_mdio_write(tp, 0x06, 0xef64); + rtl8168_mdio_write(tp, 0x06, 0xfde0); + rtl8168_mdio_write(tp, 0x06, 0xe2cc); + rtl8168_mdio_write(tp, 0x06, 0xe1e2); + rtl8168_mdio_write(tp, 0x06, 0xcd58); + rtl8168_mdio_write(tp, 0x06, 0x0f5a); + rtl8168_mdio_write(tp, 0x06, 0xf01e); + rtl8168_mdio_write(tp, 0x06, 0x02e4); + rtl8168_mdio_write(tp, 0x06, 0xe2cc); + rtl8168_mdio_write(tp, 0x06, 0xe5e2); + rtl8168_mdio_write(tp, 0x06, 0xcdfd); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x2ce1); + rtl8168_mdio_write(tp, 0x06, 0xe02d); + rtl8168_mdio_write(tp, 0x06, 0x59e0); + rtl8168_mdio_write(tp, 0x06, 0x5b1f); + rtl8168_mdio_write(tp, 0x06, 0x1e13); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x2ce5); + rtl8168_mdio_write(tp, 0x06, 0xe02d); + rtl8168_mdio_write(tp, 0x06, 0xfde0); + rtl8168_mdio_write(tp, 0x06, 0xe2d2); + rtl8168_mdio_write(tp, 0x06, 0xe1e2); + rtl8168_mdio_write(tp, 0x06, 0xd358); + rtl8168_mdio_write(tp, 0x06, 0xf75a); + rtl8168_mdio_write(tp, 0x06, 0x081e); + rtl8168_mdio_write(tp, 0x06, 0x02e4); + rtl8168_mdio_write(tp, 0x06, 0xe2d2); + rtl8168_mdio_write(tp, 0x06, 0xe5e2); + rtl8168_mdio_write(tp, 0x06, 0xd3ef); + rtl8168_mdio_write(tp, 0x06, 0x46fe); + rtl8168_mdio_write(tp, 0x06, 0xfd04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xc4e1); + rtl8168_mdio_write(tp, 0x06, 0x8b6e); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e58); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x6ead); + rtl8168_mdio_write(tp, 0x06, 0x2222); + rtl8168_mdio_write(tp, 0x06, 0xac27); + rtl8168_mdio_write(tp, 0x06, 0x55ac); + rtl8168_mdio_write(tp, 0x06, 0x2602); + rtl8168_mdio_write(tp, 0x06, 0xae1a); + rtl8168_mdio_write(tp, 0x06, 0xd106); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xba02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd107); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xbd02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd107); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xc002); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xae30); + rtl8168_mdio_write(tp, 0x06, 0xd103); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xc302); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xc602); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xca02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd10f); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xba02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xbd02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xc002); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xc302); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd011); + rtl8168_mdio_write(tp, 0x06, 0x022b); + rtl8168_mdio_write(tp, 0x06, 0xfb59); + rtl8168_mdio_write(tp, 0x06, 0x03ef); + rtl8168_mdio_write(tp, 0x06, 0x01d1); + rtl8168_mdio_write(tp, 0x06, 0x00a0); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0xc602); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xd111); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x020c); + rtl8168_mdio_write(tp, 0x06, 0x11ad); + rtl8168_mdio_write(tp, 0x06, 0x2102); + rtl8168_mdio_write(tp, 0x06, 0x0c12); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xca02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xaec8); + rtl8168_mdio_write(tp, 0x06, 0x70e4); + rtl8168_mdio_write(tp, 0x06, 0x2602); + rtl8168_mdio_write(tp, 0x06, 0x82d1); + rtl8168_mdio_write(tp, 0x06, 0x05f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0xe2fe); + rtl8168_mdio_write(tp, 0x06, 0xe1e2); + rtl8168_mdio_write(tp, 0x06, 0xffad); + rtl8168_mdio_write(tp, 0x06, 0x2d1a); + rtl8168_mdio_write(tp, 0x06, 0xe0e1); + rtl8168_mdio_write(tp, 0x06, 0x4ee1); + rtl8168_mdio_write(tp, 0x06, 0xe14f); + rtl8168_mdio_write(tp, 0x06, 0xac2d); + rtl8168_mdio_write(tp, 0x06, 0x22f6); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x033b); + rtl8168_mdio_write(tp, 0x06, 0xf703); + rtl8168_mdio_write(tp, 0x06, 0xf706); + rtl8168_mdio_write(tp, 0x06, 0xbf84); + rtl8168_mdio_write(tp, 0x06, 0x4402); + rtl8168_mdio_write(tp, 0x06, 0x2d21); + rtl8168_mdio_write(tp, 0x06, 0xae11); + rtl8168_mdio_write(tp, 0x06, 0xe0e1); + rtl8168_mdio_write(tp, 0x06, 0x4ee1); + rtl8168_mdio_write(tp, 0x06, 0xe14f); + rtl8168_mdio_write(tp, 0x06, 0xad2d); + rtl8168_mdio_write(tp, 0x06, 0x08bf); + rtl8168_mdio_write(tp, 0x06, 0x844f); + rtl8168_mdio_write(tp, 0x06, 0x022d); + rtl8168_mdio_write(tp, 0x06, 0x21f6); + rtl8168_mdio_write(tp, 0x06, 0x06ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0x4502); + rtl8168_mdio_write(tp, 0x06, 0x83a2); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe001); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x1fd1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x843b); + rtl8168_mdio_write(tp, 0x06, 0x022d); + rtl8168_mdio_write(tp, 0x06, 0xc1e0); + rtl8168_mdio_write(tp, 0x06, 0xe020); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x21ad); + rtl8168_mdio_write(tp, 0x06, 0x200e); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf84); + rtl8168_mdio_write(tp, 0x06, 0x3b02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xbf3b); + rtl8168_mdio_write(tp, 0x06, 0x9602); + rtl8168_mdio_write(tp, 0x06, 0x2d21); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x204c); + rtl8168_mdio_write(tp, 0x06, 0xd200); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x0058); + rtl8168_mdio_write(tp, 0x06, 0x010c); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0x5810); + rtl8168_mdio_write(tp, 0x06, 0x1e20); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x3658); + rtl8168_mdio_write(tp, 0x06, 0x031e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xe01e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0x8b64); + rtl8168_mdio_write(tp, 0x06, 0x1f02); + rtl8168_mdio_write(tp, 0x06, 0x9e22); + rtl8168_mdio_write(tp, 0x06, 0xe68b); + rtl8168_mdio_write(tp, 0x06, 0x64ad); + rtl8168_mdio_write(tp, 0x06, 0x3214); + rtl8168_mdio_write(tp, 0x06, 0xad34); + rtl8168_mdio_write(tp, 0x06, 0x11ef); + rtl8168_mdio_write(tp, 0x06, 0x0258); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x07ad); + rtl8168_mdio_write(tp, 0x06, 0x3508); + rtl8168_mdio_write(tp, 0x06, 0x5ac0); + rtl8168_mdio_write(tp, 0x06, 0x9f04); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xae02); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf84); + rtl8168_mdio_write(tp, 0x06, 0x3e02); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfbe0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x22e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x23e2); + rtl8168_mdio_write(tp, 0x06, 0xe036); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0x375a); + rtl8168_mdio_write(tp, 0x06, 0xc40d); + rtl8168_mdio_write(tp, 0x06, 0x0158); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x20e3); + rtl8168_mdio_write(tp, 0x06, 0x8ae7); + rtl8168_mdio_write(tp, 0x06, 0xac31); + rtl8168_mdio_write(tp, 0x06, 0x60ac); + rtl8168_mdio_write(tp, 0x06, 0x3a08); + rtl8168_mdio_write(tp, 0x06, 0xac3e); + rtl8168_mdio_write(tp, 0x06, 0x26ae); + rtl8168_mdio_write(tp, 0x06, 0x67af); + rtl8168_mdio_write(tp, 0x06, 0x8437); + rtl8168_mdio_write(tp, 0x06, 0xad37); + rtl8168_mdio_write(tp, 0x06, 0x61e0); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xe91b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x51d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8441); + rtl8168_mdio_write(tp, 0x06, 0x022d); + rtl8168_mdio_write(tp, 0x06, 0xc1ee); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x43ad); + rtl8168_mdio_write(tp, 0x06, 0x3627); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xeee1); + rtl8168_mdio_write(tp, 0x06, 0x8aef); + rtl8168_mdio_write(tp, 0x06, 0xef74); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xeae1); + rtl8168_mdio_write(tp, 0x06, 0x8aeb); + rtl8168_mdio_write(tp, 0x06, 0x1b74); + rtl8168_mdio_write(tp, 0x06, 0x9e2e); + rtl8168_mdio_write(tp, 0x06, 0x14e4); + rtl8168_mdio_write(tp, 0x06, 0x8aea); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xebef); + rtl8168_mdio_write(tp, 0x06, 0x74e0); + rtl8168_mdio_write(tp, 0x06, 0x8aee); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xef1b); + rtl8168_mdio_write(tp, 0x06, 0x479e); + rtl8168_mdio_write(tp, 0x06, 0x0fae); + rtl8168_mdio_write(tp, 0x06, 0x19ee); + rtl8168_mdio_write(tp, 0x06, 0x8aea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8aeb); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x0fac); + rtl8168_mdio_write(tp, 0x06, 0x390c); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf84); + rtl8168_mdio_write(tp, 0x06, 0x4102); + rtl8168_mdio_write(tp, 0x06, 0x2dc1); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe800); + rtl8168_mdio_write(tp, 0x06, 0xe68a); + rtl8168_mdio_write(tp, 0x06, 0xe7ff); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x0400); + rtl8168_mdio_write(tp, 0x06, 0xe234); + rtl8168_mdio_write(tp, 0x06, 0xcce2); + rtl8168_mdio_write(tp, 0x06, 0x0088); + rtl8168_mdio_write(tp, 0x06, 0xe200); + rtl8168_mdio_write(tp, 0x06, 0xa725); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x1de5); + rtl8168_mdio_write(tp, 0x06, 0x0a2c); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x6de5); + rtl8168_mdio_write(tp, 0x06, 0x0a1d); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x1ce5); + rtl8168_mdio_write(tp, 0x06, 0x0a2d); + rtl8168_mdio_write(tp, 0x06, 0xa755); + rtl8168_mdio_write(tp, 0x05, 0x8b64); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8b94); + rtl8168_mdio_write(tp, 0x06, 0x82cd); + rtl8168_mdio_write(tp, 0x05, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0x2000); + rtl8168_mdio_write(tp, 0x05, 0x8aee); + rtl8168_mdio_write(tp, 0x06, 0x03b8); + rtl8168_mdio_write(tp, 0x05, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x01); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x01, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~(BIT_2); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0028); + rtl8168_mdio_write(tp, 0x15, 0x0010); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0041); + rtl8168_mdio_write(tp, 0x15, 0x0802); + rtl8168_mdio_write(tp, 0x16, 0x2185); + rtl8168_mdio_write(tp, 0x1f, 0x0000); +} + +static void +rtl8168_set_phy_mcu_8168e_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val,i; + + if (rtl8168_efuse_read(tp, 0x22) == 0x0c) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x1800); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x17, 0x0117); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + rtl8168_mdio_write(tp, 0x1B, 0x5000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x4104); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1E); + gphy_val &= 0x03FF; + if (gphy_val==0x000C) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x07); + if ((gphy_val & BIT_5) == 0) + break; + } + gphy_val = rtl8168_mdio_read(tp, 0x07); + if (gphy_val & BIT_5) { + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x00a1); + rtl8168_mdio_write(tp, 0x17, 0x1000); + rtl8168_mdio_write(tp, 0x17, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2000); + rtl8168_mdio_write(tp, 0x1e, 0x002f); + rtl8168_mdio_write(tp, 0x18, 0x9bfb); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x07, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x08); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x08, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x000e); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x0010); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x0018); + rtl8168_mdio_write(tp, 0x19, 0x4801); + rtl8168_mdio_write(tp, 0x15, 0x0019); + rtl8168_mdio_write(tp, 0x19, 0x6801); + rtl8168_mdio_write(tp, 0x15, 0x001a); + rtl8168_mdio_write(tp, 0x19, 0x66a1); + rtl8168_mdio_write(tp, 0x15, 0x001f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0020); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0021); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0022); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0023); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0024); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0025); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0026); + rtl8168_mdio_write(tp, 0x19, 0x40ea); + rtl8168_mdio_write(tp, 0x15, 0x0027); + rtl8168_mdio_write(tp, 0x19, 0x4503); + rtl8168_mdio_write(tp, 0x15, 0x0028); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0029); + rtl8168_mdio_write(tp, 0x19, 0xa631); + rtl8168_mdio_write(tp, 0x15, 0x002a); + rtl8168_mdio_write(tp, 0x19, 0x9717); + rtl8168_mdio_write(tp, 0x15, 0x002b); + rtl8168_mdio_write(tp, 0x19, 0x302c); + rtl8168_mdio_write(tp, 0x15, 0x002c); + rtl8168_mdio_write(tp, 0x19, 0x4802); + rtl8168_mdio_write(tp, 0x15, 0x002d); + rtl8168_mdio_write(tp, 0x19, 0x58da); + rtl8168_mdio_write(tp, 0x15, 0x002e); + rtl8168_mdio_write(tp, 0x19, 0x400d); + rtl8168_mdio_write(tp, 0x15, 0x002f); + rtl8168_mdio_write(tp, 0x19, 0x4488); + rtl8168_mdio_write(tp, 0x15, 0x0030); + rtl8168_mdio_write(tp, 0x19, 0x9e00); + rtl8168_mdio_write(tp, 0x15, 0x0031); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0032); + rtl8168_mdio_write(tp, 0x19, 0x6481); + rtl8168_mdio_write(tp, 0x15, 0x0033); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0034); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0035); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0036); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0037); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0038); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0039); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x003a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x003b); + rtl8168_mdio_write(tp, 0x19, 0x63e8); + rtl8168_mdio_write(tp, 0x15, 0x003c); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x003d); + rtl8168_mdio_write(tp, 0x19, 0x59d4); + rtl8168_mdio_write(tp, 0x15, 0x003e); + rtl8168_mdio_write(tp, 0x19, 0x63f8); + rtl8168_mdio_write(tp, 0x15, 0x0040); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0041); + rtl8168_mdio_write(tp, 0x19, 0x30de); + rtl8168_mdio_write(tp, 0x15, 0x0044); + rtl8168_mdio_write(tp, 0x19, 0x480f); + rtl8168_mdio_write(tp, 0x15, 0x0045); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x0046); + rtl8168_mdio_write(tp, 0x19, 0x6680); + rtl8168_mdio_write(tp, 0x15, 0x0047); + rtl8168_mdio_write(tp, 0x19, 0x7c10); + rtl8168_mdio_write(tp, 0x15, 0x0048); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0049); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004f); + rtl8168_mdio_write(tp, 0x19, 0x40ea); + rtl8168_mdio_write(tp, 0x15, 0x0050); + rtl8168_mdio_write(tp, 0x19, 0x4503); + rtl8168_mdio_write(tp, 0x15, 0x0051); + rtl8168_mdio_write(tp, 0x19, 0x58ca); + rtl8168_mdio_write(tp, 0x15, 0x0052); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0053); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x0054); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x0055); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0056); + rtl8168_mdio_write(tp, 0x19, 0x3000); + rtl8168_mdio_write(tp, 0x15, 0x00a1); + rtl8168_mdio_write(tp, 0x19, 0x3044); + rtl8168_mdio_write(tp, 0x15, 0x00ab); + rtl8168_mdio_write(tp, 0x19, 0x5820); + rtl8168_mdio_write(tp, 0x15, 0x00ac); + rtl8168_mdio_write(tp, 0x19, 0x5e04); + rtl8168_mdio_write(tp, 0x15, 0x00ad); + rtl8168_mdio_write(tp, 0x19, 0xb60c); + rtl8168_mdio_write(tp, 0x15, 0x00af); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x00b2); + rtl8168_mdio_write(tp, 0x19, 0x30b9); + rtl8168_mdio_write(tp, 0x15, 0x00b9); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x00ba); + rtl8168_mdio_write(tp, 0x19, 0x480b); + rtl8168_mdio_write(tp, 0x15, 0x00bb); + rtl8168_mdio_write(tp, 0x19, 0x5e00); + rtl8168_mdio_write(tp, 0x15, 0x00bc); + rtl8168_mdio_write(tp, 0x19, 0x405f); + rtl8168_mdio_write(tp, 0x15, 0x00bd); + rtl8168_mdio_write(tp, 0x19, 0x4448); + rtl8168_mdio_write(tp, 0x15, 0x00be); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x00bf); + rtl8168_mdio_write(tp, 0x19, 0x4468); + rtl8168_mdio_write(tp, 0x15, 0x00c0); + rtl8168_mdio_write(tp, 0x19, 0x9c02); + rtl8168_mdio_write(tp, 0x15, 0x00c1); + rtl8168_mdio_write(tp, 0x19, 0x58a0); + rtl8168_mdio_write(tp, 0x15, 0x00c2); + rtl8168_mdio_write(tp, 0x19, 0xb605); + rtl8168_mdio_write(tp, 0x15, 0x00c3); + rtl8168_mdio_write(tp, 0x19, 0xc0d3); + rtl8168_mdio_write(tp, 0x15, 0x00c4); + rtl8168_mdio_write(tp, 0x19, 0x00e6); + rtl8168_mdio_write(tp, 0x15, 0x00c5); + rtl8168_mdio_write(tp, 0x19, 0xdaec); + rtl8168_mdio_write(tp, 0x15, 0x00c6); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00c7); + rtl8168_mdio_write(tp, 0x19, 0x9df9); + rtl8168_mdio_write(tp, 0x15, 0x0112); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0113); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0114); + rtl8168_mdio_write(tp, 0x19, 0x63f0); + rtl8168_mdio_write(tp, 0x15, 0x0115); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0116); + rtl8168_mdio_write(tp, 0x19, 0x4418); + rtl8168_mdio_write(tp, 0x15, 0x0117); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x15, 0x0118); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0119); + rtl8168_mdio_write(tp, 0x19, 0x64e1); + rtl8168_mdio_write(tp, 0x15, 0x011a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0150); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0151); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0152); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0153); + rtl8168_mdio_write(tp, 0x19, 0x4540); + rtl8168_mdio_write(tp, 0x15, 0x0154); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0155); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x15, 0x0156); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0157); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0158); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0159); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x015a); + rtl8168_mdio_write(tp, 0x19, 0x30fe); + rtl8168_mdio_write(tp, 0x15, 0x029c); + rtl8168_mdio_write(tp, 0x19, 0x0070); + rtl8168_mdio_write(tp, 0x15, 0x02b2); + rtl8168_mdio_write(tp, 0x19, 0x005a); + rtl8168_mdio_write(tp, 0x15, 0x02bd); + rtl8168_mdio_write(tp, 0x19, 0xa522); + rtl8168_mdio_write(tp, 0x15, 0x02ce); + rtl8168_mdio_write(tp, 0x19, 0xb63e); + rtl8168_mdio_write(tp, 0x15, 0x02d9); + rtl8168_mdio_write(tp, 0x19, 0x32df); + rtl8168_mdio_write(tp, 0x15, 0x02df); + rtl8168_mdio_write(tp, 0x19, 0x4500); + rtl8168_mdio_write(tp, 0x15, 0x02e7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02f4); + rtl8168_mdio_write(tp, 0x19, 0xb618); + rtl8168_mdio_write(tp, 0x15, 0x02fb); + rtl8168_mdio_write(tp, 0x19, 0xb900); + rtl8168_mdio_write(tp, 0x15, 0x02fc); + rtl8168_mdio_write(tp, 0x19, 0x49b5); + rtl8168_mdio_write(tp, 0x15, 0x02fd); + rtl8168_mdio_write(tp, 0x19, 0x6812); + rtl8168_mdio_write(tp, 0x15, 0x02fe); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x02ff); + rtl8168_mdio_write(tp, 0x19, 0x9900); + rtl8168_mdio_write(tp, 0x15, 0x0300); + rtl8168_mdio_write(tp, 0x19, 0x64a0); + rtl8168_mdio_write(tp, 0x15, 0x0301); + rtl8168_mdio_write(tp, 0x19, 0x3316); + rtl8168_mdio_write(tp, 0x15, 0x0308); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030c); + rtl8168_mdio_write(tp, 0x19, 0x3000); + rtl8168_mdio_write(tp, 0x15, 0x0312); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0313); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0314); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0315); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0316); + rtl8168_mdio_write(tp, 0x19, 0x49b5); + rtl8168_mdio_write(tp, 0x15, 0x0317); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x0318); + rtl8168_mdio_write(tp, 0x19, 0x4d00); + rtl8168_mdio_write(tp, 0x15, 0x0319); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x031a); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x031b); + rtl8168_mdio_write(tp, 0x19, 0x4925); + rtl8168_mdio_write(tp, 0x15, 0x031c); + rtl8168_mdio_write(tp, 0x19, 0x403b); + rtl8168_mdio_write(tp, 0x15, 0x031d); + rtl8168_mdio_write(tp, 0x19, 0xa602); + rtl8168_mdio_write(tp, 0x15, 0x031e); + rtl8168_mdio_write(tp, 0x19, 0x402f); + rtl8168_mdio_write(tp, 0x15, 0x031f); + rtl8168_mdio_write(tp, 0x19, 0x4484); + rtl8168_mdio_write(tp, 0x15, 0x0320); + rtl8168_mdio_write(tp, 0x19, 0x40c8); + rtl8168_mdio_write(tp, 0x15, 0x0321); + rtl8168_mdio_write(tp, 0x19, 0x44c4); + rtl8168_mdio_write(tp, 0x15, 0x0322); + rtl8168_mdio_write(tp, 0x19, 0x404f); + rtl8168_mdio_write(tp, 0x15, 0x0323); + rtl8168_mdio_write(tp, 0x19, 0x44c8); + rtl8168_mdio_write(tp, 0x15, 0x0324); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0325); + rtl8168_mdio_write(tp, 0x19, 0x00e7); + rtl8168_mdio_write(tp, 0x15, 0x0326); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0327); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x0328); + rtl8168_mdio_write(tp, 0x19, 0x4d48); + rtl8168_mdio_write(tp, 0x15, 0x0329); + rtl8168_mdio_write(tp, 0x19, 0x332b); + rtl8168_mdio_write(tp, 0x15, 0x032a); + rtl8168_mdio_write(tp, 0x19, 0x4d40); + rtl8168_mdio_write(tp, 0x15, 0x032c); + rtl8168_mdio_write(tp, 0x19, 0x00f8); + rtl8168_mdio_write(tp, 0x15, 0x032d); + rtl8168_mdio_write(tp, 0x19, 0x82b2); + rtl8168_mdio_write(tp, 0x15, 0x032f); + rtl8168_mdio_write(tp, 0x19, 0x00b0); + rtl8168_mdio_write(tp, 0x15, 0x0332); + rtl8168_mdio_write(tp, 0x19, 0x91f2); + rtl8168_mdio_write(tp, 0x15, 0x033f); + rtl8168_mdio_write(tp, 0x19, 0xb6cd); + rtl8168_mdio_write(tp, 0x15, 0x0340); + rtl8168_mdio_write(tp, 0x19, 0x9e01); + rtl8168_mdio_write(tp, 0x15, 0x0341); + rtl8168_mdio_write(tp, 0x19, 0xd11d); + rtl8168_mdio_write(tp, 0x15, 0x0342); + rtl8168_mdio_write(tp, 0x19, 0x009d); + rtl8168_mdio_write(tp, 0x15, 0x0343); + rtl8168_mdio_write(tp, 0x19, 0xbb1c); + rtl8168_mdio_write(tp, 0x15, 0x0344); + rtl8168_mdio_write(tp, 0x19, 0x8102); + rtl8168_mdio_write(tp, 0x15, 0x0345); + rtl8168_mdio_write(tp, 0x19, 0x3348); + rtl8168_mdio_write(tp, 0x15, 0x0346); + rtl8168_mdio_write(tp, 0x19, 0xa231); + rtl8168_mdio_write(tp, 0x15, 0x0347); + rtl8168_mdio_write(tp, 0x19, 0x335b); + rtl8168_mdio_write(tp, 0x15, 0x0348); + rtl8168_mdio_write(tp, 0x19, 0x91f7); + rtl8168_mdio_write(tp, 0x15, 0x0349); + rtl8168_mdio_write(tp, 0x19, 0xc218); + rtl8168_mdio_write(tp, 0x15, 0x034a); + rtl8168_mdio_write(tp, 0x19, 0x00f5); + rtl8168_mdio_write(tp, 0x15, 0x034b); + rtl8168_mdio_write(tp, 0x19, 0x335b); + rtl8168_mdio_write(tp, 0x15, 0x034c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x034d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x034e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x034f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0350); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x035b); + rtl8168_mdio_write(tp, 0x19, 0xa23c); + rtl8168_mdio_write(tp, 0x15, 0x035c); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x035d); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x035e); + rtl8168_mdio_write(tp, 0x19, 0x3397); + rtl8168_mdio_write(tp, 0x15, 0x0363); + rtl8168_mdio_write(tp, 0x19, 0xb6a9); + rtl8168_mdio_write(tp, 0x15, 0x0366); + rtl8168_mdio_write(tp, 0x19, 0x00f5); + rtl8168_mdio_write(tp, 0x15, 0x0382); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0388); + rtl8168_mdio_write(tp, 0x19, 0x0084); + rtl8168_mdio_write(tp, 0x15, 0x0389); + rtl8168_mdio_write(tp, 0x19, 0xdd17); + rtl8168_mdio_write(tp, 0x15, 0x038a); + rtl8168_mdio_write(tp, 0x19, 0x000b); + rtl8168_mdio_write(tp, 0x15, 0x038b); + rtl8168_mdio_write(tp, 0x19, 0xa10a); + rtl8168_mdio_write(tp, 0x15, 0x038c); + rtl8168_mdio_write(tp, 0x19, 0x337e); + rtl8168_mdio_write(tp, 0x15, 0x038d); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x038e); + rtl8168_mdio_write(tp, 0x19, 0xa107); + rtl8168_mdio_write(tp, 0x15, 0x038f); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x0390); + rtl8168_mdio_write(tp, 0x19, 0xc017); + rtl8168_mdio_write(tp, 0x15, 0x0391); + rtl8168_mdio_write(tp, 0x19, 0x0004); + rtl8168_mdio_write(tp, 0x15, 0x0392); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0393); + rtl8168_mdio_write(tp, 0x19, 0x00f4); + rtl8168_mdio_write(tp, 0x15, 0x0397); + rtl8168_mdio_write(tp, 0x19, 0x4098); + rtl8168_mdio_write(tp, 0x15, 0x0398); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x0399); + rtl8168_mdio_write(tp, 0x19, 0x55bf); + rtl8168_mdio_write(tp, 0x15, 0x039a); + rtl8168_mdio_write(tp, 0x19, 0x4bb9); + rtl8168_mdio_write(tp, 0x15, 0x039b); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x039c); + rtl8168_mdio_write(tp, 0x19, 0x4b29); + rtl8168_mdio_write(tp, 0x15, 0x039d); + rtl8168_mdio_write(tp, 0x19, 0x4041); + rtl8168_mdio_write(tp, 0x15, 0x039e); + rtl8168_mdio_write(tp, 0x19, 0x442a); + rtl8168_mdio_write(tp, 0x15, 0x039f); + rtl8168_mdio_write(tp, 0x19, 0x4029); + rtl8168_mdio_write(tp, 0x15, 0x03aa); + rtl8168_mdio_write(tp, 0x19, 0x33b8); + rtl8168_mdio_write(tp, 0x15, 0x03b6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b8); + rtl8168_mdio_write(tp, 0x19, 0x543f); + rtl8168_mdio_write(tp, 0x15, 0x03b9); + rtl8168_mdio_write(tp, 0x19, 0x499a); + rtl8168_mdio_write(tp, 0x15, 0x03ba); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x03bb); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03bc); + rtl8168_mdio_write(tp, 0x19, 0x490a); + rtl8168_mdio_write(tp, 0x15, 0x03bd); + rtl8168_mdio_write(tp, 0x19, 0x405e); + rtl8168_mdio_write(tp, 0x15, 0x03c2); + rtl8168_mdio_write(tp, 0x19, 0x9a03); + rtl8168_mdio_write(tp, 0x15, 0x03c4); + rtl8168_mdio_write(tp, 0x19, 0x0015); + rtl8168_mdio_write(tp, 0x15, 0x03c5); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x03c8); + rtl8168_mdio_write(tp, 0x19, 0x9cf7); + rtl8168_mdio_write(tp, 0x15, 0x03c9); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03ca); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03cb); + rtl8168_mdio_write(tp, 0x19, 0x4458); + rtl8168_mdio_write(tp, 0x15, 0x03cd); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03ce); + rtl8168_mdio_write(tp, 0x19, 0x33bf); + rtl8168_mdio_write(tp, 0x15, 0x03cf); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d0); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d1); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d9); + rtl8168_mdio_write(tp, 0x19, 0x49bb); + rtl8168_mdio_write(tp, 0x15, 0x03da); + rtl8168_mdio_write(tp, 0x19, 0x4478); + rtl8168_mdio_write(tp, 0x15, 0x03db); + rtl8168_mdio_write(tp, 0x19, 0x492b); + rtl8168_mdio_write(tp, 0x15, 0x03dc); + rtl8168_mdio_write(tp, 0x19, 0x7c01); + rtl8168_mdio_write(tp, 0x15, 0x03dd); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x03de); + rtl8168_mdio_write(tp, 0x19, 0xbd1a); + rtl8168_mdio_write(tp, 0x15, 0x03df); + rtl8168_mdio_write(tp, 0x19, 0xc428); + rtl8168_mdio_write(tp, 0x15, 0x03e0); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x03e1); + rtl8168_mdio_write(tp, 0x19, 0x9cfd); + rtl8168_mdio_write(tp, 0x15, 0x03e2); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03e3); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03e4); + rtl8168_mdio_write(tp, 0x19, 0x4458); + rtl8168_mdio_write(tp, 0x15, 0x03e5); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03e6); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03e7); + rtl8168_mdio_write(tp, 0x19, 0x33de); + rtl8168_mdio_write(tp, 0x15, 0x03e8); + rtl8168_mdio_write(tp, 0x19, 0xc218); + rtl8168_mdio_write(tp, 0x15, 0x03e9); + rtl8168_mdio_write(tp, 0x19, 0x0002); + rtl8168_mdio_write(tp, 0x15, 0x03ea); + rtl8168_mdio_write(tp, 0x19, 0x32df); + rtl8168_mdio_write(tp, 0x15, 0x03eb); + rtl8168_mdio_write(tp, 0x19, 0x3316); + rtl8168_mdio_write(tp, 0x15, 0x03ec); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ed); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ee); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ef); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03f7); + rtl8168_mdio_write(tp, 0x19, 0x330c); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x0200); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x9002); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x0202); + rtl8168_mdio_write(tp, 0x06, 0x3402); + rtl8168_mdio_write(tp, 0x06, 0x027f); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0xa602); + rtl8168_mdio_write(tp, 0x06, 0x80bf); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe600); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xee03); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xefb8); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe902); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8285); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8520); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8701); + rtl8168_mdio_write(tp, 0x06, 0xd481); + rtl8168_mdio_write(tp, 0x06, 0x35e4); + rtl8168_mdio_write(tp, 0x06, 0x8b94); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x95bf); + rtl8168_mdio_write(tp, 0x06, 0x8b88); + rtl8168_mdio_write(tp, 0x06, 0xec00); + rtl8168_mdio_write(tp, 0x06, 0x19a9); + rtl8168_mdio_write(tp, 0x06, 0x8b90); + rtl8168_mdio_write(tp, 0x06, 0xf9ee); + rtl8168_mdio_write(tp, 0x06, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x41f7); + rtl8168_mdio_write(tp, 0x06, 0x2ff6); + rtl8168_mdio_write(tp, 0x06, 0x28e4); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xe5e1); + rtl8168_mdio_write(tp, 0x06, 0x4104); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x0dee); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x82f4); + rtl8168_mdio_write(tp, 0x06, 0x021f); + rtl8168_mdio_write(tp, 0x06, 0x4102); + rtl8168_mdio_write(tp, 0x06, 0x2812); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x10ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x139d); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xd602); + rtl8168_mdio_write(tp, 0x06, 0x1f99); + rtl8168_mdio_write(tp, 0x06, 0x0227); + rtl8168_mdio_write(tp, 0x06, 0xeafc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2014); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x8104); + rtl8168_mdio_write(tp, 0x06, 0x021b); + rtl8168_mdio_write(tp, 0x06, 0xf402); + rtl8168_mdio_write(tp, 0x06, 0x2c9c); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x7902); + rtl8168_mdio_write(tp, 0x06, 0x8443); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x11f6); + rtl8168_mdio_write(tp, 0x06, 0x22e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x022c); + rtl8168_mdio_write(tp, 0x06, 0x4602); + rtl8168_mdio_write(tp, 0x06, 0x2ac5); + rtl8168_mdio_write(tp, 0x06, 0x0229); + rtl8168_mdio_write(tp, 0x06, 0x2002); + rtl8168_mdio_write(tp, 0x06, 0x2b91); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x11f6); + rtl8168_mdio_write(tp, 0x06, 0x25e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0xe202); + rtl8168_mdio_write(tp, 0x06, 0x043a); + rtl8168_mdio_write(tp, 0x06, 0x021a); + rtl8168_mdio_write(tp, 0x06, 0x5902); + rtl8168_mdio_write(tp, 0x06, 0x2bfc); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe001); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x1fd1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8638); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50e0); + rtl8168_mdio_write(tp, 0x06, 0xe020); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x21ad); + rtl8168_mdio_write(tp, 0x06, 0x200e); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xbf3d); + rtl8168_mdio_write(tp, 0x06, 0x3902); + rtl8168_mdio_write(tp, 0x06, 0x2eb0); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x0402); + rtl8168_mdio_write(tp, 0x06, 0x8591); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x3c05); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xfee1); + rtl8168_mdio_write(tp, 0x06, 0xe2ff); + rtl8168_mdio_write(tp, 0x06, 0xad2d); + rtl8168_mdio_write(tp, 0x06, 0x1ae0); + rtl8168_mdio_write(tp, 0x06, 0xe14e); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x4fac); + rtl8168_mdio_write(tp, 0x06, 0x2d22); + rtl8168_mdio_write(tp, 0x06, 0xf603); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x36f7); + rtl8168_mdio_write(tp, 0x06, 0x03f7); + rtl8168_mdio_write(tp, 0x06, 0x06bf); + rtl8168_mdio_write(tp, 0x06, 0x8622); + rtl8168_mdio_write(tp, 0x06, 0x022e); + rtl8168_mdio_write(tp, 0x06, 0xb0ae); + rtl8168_mdio_write(tp, 0x06, 0x11e0); + rtl8168_mdio_write(tp, 0x06, 0xe14e); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x4fad); + rtl8168_mdio_write(tp, 0x06, 0x2d08); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x2d02); + rtl8168_mdio_write(tp, 0x06, 0x2eb0); + rtl8168_mdio_write(tp, 0x06, 0xf606); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x204c); + rtl8168_mdio_write(tp, 0x06, 0xd200); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x0058); + rtl8168_mdio_write(tp, 0x06, 0x010c); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0x5810); + rtl8168_mdio_write(tp, 0x06, 0x1e20); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x3658); + rtl8168_mdio_write(tp, 0x06, 0x031e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xe01e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0x8ae6); + rtl8168_mdio_write(tp, 0x06, 0x1f02); + rtl8168_mdio_write(tp, 0x06, 0x9e22); + rtl8168_mdio_write(tp, 0x06, 0xe68a); + rtl8168_mdio_write(tp, 0x06, 0xe6ad); + rtl8168_mdio_write(tp, 0x06, 0x3214); + rtl8168_mdio_write(tp, 0x06, 0xad34); + rtl8168_mdio_write(tp, 0x06, 0x11ef); + rtl8168_mdio_write(tp, 0x06, 0x0258); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x07ad); + rtl8168_mdio_write(tp, 0x06, 0x3508); + rtl8168_mdio_write(tp, 0x06, 0x5ac0); + rtl8168_mdio_write(tp, 0x06, 0x9f04); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xae02); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x3e02); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfae0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac26); + rtl8168_mdio_write(tp, 0x06, 0x0ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xac24); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x6bee); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xe0eb); + rtl8168_mdio_write(tp, 0x06, 0x00e2); + rtl8168_mdio_write(tp, 0x06, 0xe07c); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0x7da5); + rtl8168_mdio_write(tp, 0x06, 0x1111); + rtl8168_mdio_write(tp, 0x06, 0x15d2); + rtl8168_mdio_write(tp, 0x06, 0x60d6); + rtl8168_mdio_write(tp, 0x06, 0x6666); + rtl8168_mdio_write(tp, 0x06, 0x0207); + rtl8168_mdio_write(tp, 0x06, 0xf9d2); + rtl8168_mdio_write(tp, 0x06, 0xa0d6); + rtl8168_mdio_write(tp, 0x06, 0xaaaa); + rtl8168_mdio_write(tp, 0x06, 0x0207); + rtl8168_mdio_write(tp, 0x06, 0xf902); + rtl8168_mdio_write(tp, 0x06, 0x825c); + rtl8168_mdio_write(tp, 0x06, 0xae44); + rtl8168_mdio_write(tp, 0x06, 0xa566); + rtl8168_mdio_write(tp, 0x06, 0x6602); + rtl8168_mdio_write(tp, 0x06, 0xae38); + rtl8168_mdio_write(tp, 0x06, 0xa5aa); + rtl8168_mdio_write(tp, 0x06, 0xaa02); + rtl8168_mdio_write(tp, 0x06, 0xae32); + rtl8168_mdio_write(tp, 0x06, 0xeee0); + rtl8168_mdio_write(tp, 0x06, 0xea04); + rtl8168_mdio_write(tp, 0x06, 0xeee0); + rtl8168_mdio_write(tp, 0x06, 0xeb06); + rtl8168_mdio_write(tp, 0x06, 0xe2e0); + rtl8168_mdio_write(tp, 0x06, 0x7ce3); + rtl8168_mdio_write(tp, 0x06, 0xe07d); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x38e1); + rtl8168_mdio_write(tp, 0x06, 0xe039); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x21ad); + rtl8168_mdio_write(tp, 0x06, 0x3f13); + rtl8168_mdio_write(tp, 0x06, 0xe0e4); + rtl8168_mdio_write(tp, 0x06, 0x14e1); + rtl8168_mdio_write(tp, 0x06, 0xe415); + rtl8168_mdio_write(tp, 0x06, 0x6880); + rtl8168_mdio_write(tp, 0x06, 0xe4e4); + rtl8168_mdio_write(tp, 0x06, 0x14e5); + rtl8168_mdio_write(tp, 0x06, 0xe415); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x5cae); + rtl8168_mdio_write(tp, 0x06, 0x0bac); + rtl8168_mdio_write(tp, 0x06, 0x3e02); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x8602); + rtl8168_mdio_write(tp, 0x06, 0x82b0); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2605); + rtl8168_mdio_write(tp, 0x06, 0x0221); + rtl8168_mdio_write(tp, 0x06, 0xf3f7); + rtl8168_mdio_write(tp, 0x06, 0x28e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xad21); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x22f8); + rtl8168_mdio_write(tp, 0x06, 0xf729); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2405); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xebf7); + rtl8168_mdio_write(tp, 0x06, 0x2ae5); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x2134); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2109); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x2eac); + rtl8168_mdio_write(tp, 0x06, 0x2003); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0x52e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x09e0); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x8337); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2608); + rtl8168_mdio_write(tp, 0x06, 0xe085); + rtl8168_mdio_write(tp, 0x06, 0xd2ad); + rtl8168_mdio_write(tp, 0x06, 0x2502); + rtl8168_mdio_write(tp, 0x06, 0xf628); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x210a); + rtl8168_mdio_write(tp, 0x06, 0xe086); + rtl8168_mdio_write(tp, 0x06, 0x0af6); + rtl8168_mdio_write(tp, 0x06, 0x27a0); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0xf629); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2408); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xedad); + rtl8168_mdio_write(tp, 0x06, 0x2002); + rtl8168_mdio_write(tp, 0x06, 0xf62a); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x2ea1); + rtl8168_mdio_write(tp, 0x06, 0x0003); + rtl8168_mdio_write(tp, 0x06, 0x0221); + rtl8168_mdio_write(tp, 0x06, 0x11fc); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x8aed); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8aec); + rtl8168_mdio_write(tp, 0x06, 0x0004); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x3ae0); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xeb58); + rtl8168_mdio_write(tp, 0x06, 0xf8d1); + rtl8168_mdio_write(tp, 0x06, 0x01e4); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0xebe0); + rtl8168_mdio_write(tp, 0x06, 0xe07c); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x7d5c); + rtl8168_mdio_write(tp, 0x06, 0x00ff); + rtl8168_mdio_write(tp, 0x06, 0x3c00); + rtl8168_mdio_write(tp, 0x06, 0x1eab); + rtl8168_mdio_write(tp, 0x06, 0x1ce0); + rtl8168_mdio_write(tp, 0x06, 0xe04c); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x4d58); + rtl8168_mdio_write(tp, 0x06, 0xc1e4); + rtl8168_mdio_write(tp, 0x06, 0xe04c); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0x4de0); + rtl8168_mdio_write(tp, 0x06, 0xe0ee); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0x3ce4); + rtl8168_mdio_write(tp, 0x06, 0xe0ee); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0xeffc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2412); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0xeee1); + rtl8168_mdio_write(tp, 0x06, 0xe0ef); + rtl8168_mdio_write(tp, 0x06, 0x59c3); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0xeee5); + rtl8168_mdio_write(tp, 0x06, 0xe0ef); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xed01); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac25); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x8363); + rtl8168_mdio_write(tp, 0x06, 0xae03); + rtl8168_mdio_write(tp, 0x06, 0x0225); + rtl8168_mdio_write(tp, 0x06, 0x16fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfae0); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0xa000); + rtl8168_mdio_write(tp, 0x06, 0x19e0); + rtl8168_mdio_write(tp, 0x06, 0x860b); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x331b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x04aa); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x06ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0xe602); + rtl8168_mdio_write(tp, 0x06, 0x241e); + rtl8168_mdio_write(tp, 0x06, 0xae14); + rtl8168_mdio_write(tp, 0x06, 0xa001); + rtl8168_mdio_write(tp, 0x06, 0x1402); + rtl8168_mdio_write(tp, 0x06, 0x2426); + rtl8168_mdio_write(tp, 0x06, 0xbf26); + rtl8168_mdio_write(tp, 0x06, 0x6d02); + rtl8168_mdio_write(tp, 0x06, 0x2eb0); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0b00); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0a02); + rtl8168_mdio_write(tp, 0x06, 0xaf84); + rtl8168_mdio_write(tp, 0x06, 0x3ca0); + rtl8168_mdio_write(tp, 0x06, 0x0252); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0400); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0500); + rtl8168_mdio_write(tp, 0x06, 0xe086); + rtl8168_mdio_write(tp, 0x06, 0x0be1); + rtl8168_mdio_write(tp, 0x06, 0x8b32); + rtl8168_mdio_write(tp, 0x06, 0x1b10); + rtl8168_mdio_write(tp, 0x06, 0x9e04); + rtl8168_mdio_write(tp, 0x06, 0xaa02); + rtl8168_mdio_write(tp, 0x06, 0xaecb); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0b00); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x3ae2); + rtl8168_mdio_write(tp, 0x06, 0x8604); + rtl8168_mdio_write(tp, 0x06, 0xe386); + rtl8168_mdio_write(tp, 0x06, 0x05ef); + rtl8168_mdio_write(tp, 0x06, 0x65e2); + rtl8168_mdio_write(tp, 0x06, 0x8606); + rtl8168_mdio_write(tp, 0x06, 0xe386); + rtl8168_mdio_write(tp, 0x06, 0x071b); + rtl8168_mdio_write(tp, 0x06, 0x56aa); + rtl8168_mdio_write(tp, 0x06, 0x0eef); + rtl8168_mdio_write(tp, 0x06, 0x56e6); + rtl8168_mdio_write(tp, 0x06, 0x8606); + rtl8168_mdio_write(tp, 0x06, 0xe786); + rtl8168_mdio_write(tp, 0x06, 0x07e2); + rtl8168_mdio_write(tp, 0x06, 0x8609); + rtl8168_mdio_write(tp, 0x06, 0xe686); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8609); + rtl8168_mdio_write(tp, 0x06, 0xa000); + rtl8168_mdio_write(tp, 0x06, 0x07ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x03af); + rtl8168_mdio_write(tp, 0x06, 0x8369); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x2426); + rtl8168_mdio_write(tp, 0x06, 0xae48); + rtl8168_mdio_write(tp, 0x06, 0xa003); + rtl8168_mdio_write(tp, 0x06, 0x21e0); + rtl8168_mdio_write(tp, 0x06, 0x8608); + rtl8168_mdio_write(tp, 0x06, 0xe186); + rtl8168_mdio_write(tp, 0x06, 0x091b); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x0caa); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x249d); + rtl8168_mdio_write(tp, 0x06, 0xaee7); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x8eae); + rtl8168_mdio_write(tp, 0x06, 0xe2ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x860b); + rtl8168_mdio_write(tp, 0x06, 0x00af); + rtl8168_mdio_write(tp, 0x06, 0x8369); + rtl8168_mdio_write(tp, 0x06, 0xa004); + rtl8168_mdio_write(tp, 0x06, 0x15e0); + rtl8168_mdio_write(tp, 0x06, 0x860b); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x341b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x05aa); + rtl8168_mdio_write(tp, 0x06, 0x03af); + rtl8168_mdio_write(tp, 0x06, 0x8383); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0a05); + rtl8168_mdio_write(tp, 0x06, 0xae0c); + rtl8168_mdio_write(tp, 0x06, 0xa005); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x0702); + rtl8168_mdio_write(tp, 0x06, 0x2309); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0a00); + rtl8168_mdio_write(tp, 0x06, 0xfeef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfbe0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x22e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x23e2); + rtl8168_mdio_write(tp, 0x06, 0xe036); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0x375a); + rtl8168_mdio_write(tp, 0x06, 0xc40d); + rtl8168_mdio_write(tp, 0x06, 0x0158); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x20e3); + rtl8168_mdio_write(tp, 0x06, 0x8ae7); + rtl8168_mdio_write(tp, 0x06, 0xac31); + rtl8168_mdio_write(tp, 0x06, 0x60ac); + rtl8168_mdio_write(tp, 0x06, 0x3a08); + rtl8168_mdio_write(tp, 0x06, 0xac3e); + rtl8168_mdio_write(tp, 0x06, 0x26ae); + rtl8168_mdio_write(tp, 0x06, 0x67af); + rtl8168_mdio_write(tp, 0x06, 0x84db); + rtl8168_mdio_write(tp, 0x06, 0xad37); + rtl8168_mdio_write(tp, 0x06, 0x61e0); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xe91b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x51d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x863b); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50ee); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x43ad); + rtl8168_mdio_write(tp, 0x06, 0x3627); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xeee1); + rtl8168_mdio_write(tp, 0x06, 0x8aef); + rtl8168_mdio_write(tp, 0x06, 0xef74); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xeae1); + rtl8168_mdio_write(tp, 0x06, 0x8aeb); + rtl8168_mdio_write(tp, 0x06, 0x1b74); + rtl8168_mdio_write(tp, 0x06, 0x9e2e); + rtl8168_mdio_write(tp, 0x06, 0x14e4); + rtl8168_mdio_write(tp, 0x06, 0x8aea); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xebef); + rtl8168_mdio_write(tp, 0x06, 0x74e0); + rtl8168_mdio_write(tp, 0x06, 0x8aee); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xef1b); + rtl8168_mdio_write(tp, 0x06, 0x479e); + rtl8168_mdio_write(tp, 0x06, 0x0fae); + rtl8168_mdio_write(tp, 0x06, 0x19ee); + rtl8168_mdio_write(tp, 0x06, 0x8aea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8aeb); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x0fac); + rtl8168_mdio_write(tp, 0x06, 0x390c); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x3b02); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe800); + rtl8168_mdio_write(tp, 0x06, 0xe68a); + rtl8168_mdio_write(tp, 0x06, 0xe7ff); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xc4e1); + rtl8168_mdio_write(tp, 0x06, 0x8b6e); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e24); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x6ead); + rtl8168_mdio_write(tp, 0x06, 0x2218); + rtl8168_mdio_write(tp, 0x06, 0xac27); + rtl8168_mdio_write(tp, 0x06, 0x0dac); + rtl8168_mdio_write(tp, 0x06, 0x2605); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x8fae); + rtl8168_mdio_write(tp, 0x06, 0x1302); + rtl8168_mdio_write(tp, 0x06, 0x03c8); + rtl8168_mdio_write(tp, 0x06, 0xae0e); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0xe102); + rtl8168_mdio_write(tp, 0x06, 0x8520); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x8f02); + rtl8168_mdio_write(tp, 0x06, 0x8566); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x82ad); + rtl8168_mdio_write(tp, 0x06, 0x2737); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4402); + rtl8168_mdio_write(tp, 0x06, 0x2f23); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x2ed1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8647); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50bf); + rtl8168_mdio_write(tp, 0x06, 0x8641); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x23e5); + rtl8168_mdio_write(tp, 0x06, 0x8af0); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x22e1); + rtl8168_mdio_write(tp, 0x06, 0xe023); + rtl8168_mdio_write(tp, 0x06, 0xac2e); + rtl8168_mdio_write(tp, 0x06, 0x04d1); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0x02d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8641); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50d1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8644); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4702); + rtl8168_mdio_write(tp, 0x06, 0x2f23); + rtl8168_mdio_write(tp, 0x06, 0xad28); + rtl8168_mdio_write(tp, 0x06, 0x19d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8644); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50e1); + rtl8168_mdio_write(tp, 0x06, 0x8af0); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4102); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4702); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xfee1); + rtl8168_mdio_write(tp, 0x06, 0xe2ff); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x63e0); + rtl8168_mdio_write(tp, 0x06, 0xe038); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x39ad); + rtl8168_mdio_write(tp, 0x06, 0x2f10); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xf726); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xae0e); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e1); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xf728); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e5); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xf72b); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xd07d); + rtl8168_mdio_write(tp, 0x06, 0xb0fe); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xf62b); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xf626); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e1); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xf628); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e5); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xae20); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0xa725); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x1de5); + rtl8168_mdio_write(tp, 0x06, 0x0a2c); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x6de5); + rtl8168_mdio_write(tp, 0x06, 0x0a1d); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x1ce5); + rtl8168_mdio_write(tp, 0x06, 0x0a2d); + rtl8168_mdio_write(tp, 0x06, 0xa755); + rtl8168_mdio_write(tp, 0x06, 0x00e2); + rtl8168_mdio_write(tp, 0x06, 0x3488); + rtl8168_mdio_write(tp, 0x06, 0xe200); + rtl8168_mdio_write(tp, 0x06, 0xcce2); + rtl8168_mdio_write(tp, 0x06, 0x0055); + rtl8168_mdio_write(tp, 0x06, 0xe020); + rtl8168_mdio_write(tp, 0x06, 0x55e2); + rtl8168_mdio_write(tp, 0x06, 0xd600); + rtl8168_mdio_write(tp, 0x06, 0xe24a); + gphy_val = rtl8168_mdio_read(tp, 0x01); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x01, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2179); + rtl8168_mdio_write(tp, 0x1f, 0x0001); + rtl8168_mdio_write(tp, 0x10, 0xf274); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0042); + rtl8168_mdio_write(tp, 0x15, 0x0f00); + rtl8168_mdio_write(tp, 0x15, 0x0f00); + rtl8168_mdio_write(tp, 0x16, 0x7408); + rtl8168_mdio_write(tp, 0x15, 0x0e00); + rtl8168_mdio_write(tp, 0x15, 0x0f00); + rtl8168_mdio_write(tp, 0x15, 0x0f01); + rtl8168_mdio_write(tp, 0x16, 0x4000); + rtl8168_mdio_write(tp, 0x15, 0x0e01); + rtl8168_mdio_write(tp, 0x15, 0x0f01); + rtl8168_mdio_write(tp, 0x15, 0x0f02); + rtl8168_mdio_write(tp, 0x16, 0x9400); + rtl8168_mdio_write(tp, 0x15, 0x0e02); + rtl8168_mdio_write(tp, 0x15, 0x0f02); + rtl8168_mdio_write(tp, 0x15, 0x0f03); + rtl8168_mdio_write(tp, 0x16, 0x7408); + rtl8168_mdio_write(tp, 0x15, 0x0e03); + rtl8168_mdio_write(tp, 0x15, 0x0f03); + rtl8168_mdio_write(tp, 0x15, 0x0f04); + rtl8168_mdio_write(tp, 0x16, 0x4008); + rtl8168_mdio_write(tp, 0x15, 0x0e04); + rtl8168_mdio_write(tp, 0x15, 0x0f04); + rtl8168_mdio_write(tp, 0x15, 0x0f05); + rtl8168_mdio_write(tp, 0x16, 0x9400); + rtl8168_mdio_write(tp, 0x15, 0x0e05); + rtl8168_mdio_write(tp, 0x15, 0x0f05); + rtl8168_mdio_write(tp, 0x15, 0x0f06); + rtl8168_mdio_write(tp, 0x16, 0x0803); + rtl8168_mdio_write(tp, 0x15, 0x0e06); + rtl8168_mdio_write(tp, 0x15, 0x0f06); + rtl8168_mdio_write(tp, 0x15, 0x0d00); + rtl8168_mdio_write(tp, 0x15, 0x0100); + rtl8168_mdio_write(tp, 0x1f, 0x0001); + rtl8168_mdio_write(tp, 0x10, 0xf074); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2149); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~(BIT_2); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val |= BIT_14; + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1e, 0x0020); + gphy_val = rtl8168_mdio_read(tp, 0x1b); + gphy_val |= BIT_7; + rtl8168_mdio_write(tp, 0x1b, gphy_val); + rtl8168_mdio_write(tp, 0x1e, 0x0041); + rtl8168_mdio_write(tp, 0x15, 0x0e02); + rtl8168_mdio_write(tp, 0x1e, 0x0028); + gphy_val = rtl8168_mdio_read(tp, 0x19); + gphy_val |= BIT_15; + rtl8168_mdio_write(tp, 0x19, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } else { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x1800); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x17, 0x0117); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + rtl8168_mdio_write(tp, 0x1B, 0x5000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x4104); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1E); + gphy_val &= 0x03FF; + if (gphy_val==0x000C) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x07); + if ((gphy_val & BIT_5) == 0) + break; + } + gphy_val = rtl8168_mdio_read(tp, 0x07); + if (gphy_val & BIT_5) { + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x00a1); + rtl8168_mdio_write(tp, 0x17, 0x1000); + rtl8168_mdio_write(tp, 0x17, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2000); + rtl8168_mdio_write(tp, 0x1e, 0x002f); + rtl8168_mdio_write(tp, 0x18, 0x9bfb); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x07, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x08); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x08, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x000e); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x0010); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x0018); + rtl8168_mdio_write(tp, 0x19, 0x4801); + rtl8168_mdio_write(tp, 0x15, 0x0019); + rtl8168_mdio_write(tp, 0x19, 0x6801); + rtl8168_mdio_write(tp, 0x15, 0x001a); + rtl8168_mdio_write(tp, 0x19, 0x66a1); + rtl8168_mdio_write(tp, 0x15, 0x001f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0020); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0021); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0022); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0023); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0024); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0025); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0026); + rtl8168_mdio_write(tp, 0x19, 0x40ea); + rtl8168_mdio_write(tp, 0x15, 0x0027); + rtl8168_mdio_write(tp, 0x19, 0x4503); + rtl8168_mdio_write(tp, 0x15, 0x0028); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0029); + rtl8168_mdio_write(tp, 0x19, 0xa631); + rtl8168_mdio_write(tp, 0x15, 0x002a); + rtl8168_mdio_write(tp, 0x19, 0x9717); + rtl8168_mdio_write(tp, 0x15, 0x002b); + rtl8168_mdio_write(tp, 0x19, 0x302c); + rtl8168_mdio_write(tp, 0x15, 0x002c); + rtl8168_mdio_write(tp, 0x19, 0x4802); + rtl8168_mdio_write(tp, 0x15, 0x002d); + rtl8168_mdio_write(tp, 0x19, 0x58da); + rtl8168_mdio_write(tp, 0x15, 0x002e); + rtl8168_mdio_write(tp, 0x19, 0x400d); + rtl8168_mdio_write(tp, 0x15, 0x002f); + rtl8168_mdio_write(tp, 0x19, 0x4488); + rtl8168_mdio_write(tp, 0x15, 0x0030); + rtl8168_mdio_write(tp, 0x19, 0x9e00); + rtl8168_mdio_write(tp, 0x15, 0x0031); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0032); + rtl8168_mdio_write(tp, 0x19, 0x6481); + rtl8168_mdio_write(tp, 0x15, 0x0033); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0034); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0035); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0036); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0037); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0038); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0039); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x003a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x003b); + rtl8168_mdio_write(tp, 0x19, 0x63e8); + rtl8168_mdio_write(tp, 0x15, 0x003c); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x003d); + rtl8168_mdio_write(tp, 0x19, 0x59d4); + rtl8168_mdio_write(tp, 0x15, 0x003e); + rtl8168_mdio_write(tp, 0x19, 0x63f8); + rtl8168_mdio_write(tp, 0x15, 0x0040); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0041); + rtl8168_mdio_write(tp, 0x19, 0x30de); + rtl8168_mdio_write(tp, 0x15, 0x0044); + rtl8168_mdio_write(tp, 0x19, 0x480f); + rtl8168_mdio_write(tp, 0x15, 0x0045); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x0046); + rtl8168_mdio_write(tp, 0x19, 0x6680); + rtl8168_mdio_write(tp, 0x15, 0x0047); + rtl8168_mdio_write(tp, 0x19, 0x7c10); + rtl8168_mdio_write(tp, 0x15, 0x0048); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0049); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004f); + rtl8168_mdio_write(tp, 0x19, 0x40ea); + rtl8168_mdio_write(tp, 0x15, 0x0050); + rtl8168_mdio_write(tp, 0x19, 0x4503); + rtl8168_mdio_write(tp, 0x15, 0x0051); + rtl8168_mdio_write(tp, 0x19, 0x58ca); + rtl8168_mdio_write(tp, 0x15, 0x0052); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x0053); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x0054); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x0055); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0056); + rtl8168_mdio_write(tp, 0x19, 0x3000); + rtl8168_mdio_write(tp, 0x15, 0x00a1); + rtl8168_mdio_write(tp, 0x19, 0x3044); + rtl8168_mdio_write(tp, 0x15, 0x00ab); + rtl8168_mdio_write(tp, 0x19, 0x5820); + rtl8168_mdio_write(tp, 0x15, 0x00ac); + rtl8168_mdio_write(tp, 0x19, 0x5e04); + rtl8168_mdio_write(tp, 0x15, 0x00ad); + rtl8168_mdio_write(tp, 0x19, 0xb60c); + rtl8168_mdio_write(tp, 0x15, 0x00af); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x00b2); + rtl8168_mdio_write(tp, 0x19, 0x30b9); + rtl8168_mdio_write(tp, 0x15, 0x00b9); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x00ba); + rtl8168_mdio_write(tp, 0x19, 0x480b); + rtl8168_mdio_write(tp, 0x15, 0x00bb); + rtl8168_mdio_write(tp, 0x19, 0x5e00); + rtl8168_mdio_write(tp, 0x15, 0x00bc); + rtl8168_mdio_write(tp, 0x19, 0x405f); + rtl8168_mdio_write(tp, 0x15, 0x00bd); + rtl8168_mdio_write(tp, 0x19, 0x4448); + rtl8168_mdio_write(tp, 0x15, 0x00be); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x00bf); + rtl8168_mdio_write(tp, 0x19, 0x4468); + rtl8168_mdio_write(tp, 0x15, 0x00c0); + rtl8168_mdio_write(tp, 0x19, 0x9c02); + rtl8168_mdio_write(tp, 0x15, 0x00c1); + rtl8168_mdio_write(tp, 0x19, 0x58a0); + rtl8168_mdio_write(tp, 0x15, 0x00c2); + rtl8168_mdio_write(tp, 0x19, 0xb605); + rtl8168_mdio_write(tp, 0x15, 0x00c3); + rtl8168_mdio_write(tp, 0x19, 0xc0d3); + rtl8168_mdio_write(tp, 0x15, 0x00c4); + rtl8168_mdio_write(tp, 0x19, 0x00e6); + rtl8168_mdio_write(tp, 0x15, 0x00c5); + rtl8168_mdio_write(tp, 0x19, 0xdaec); + rtl8168_mdio_write(tp, 0x15, 0x00c6); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00c7); + rtl8168_mdio_write(tp, 0x19, 0x9df9); + rtl8168_mdio_write(tp, 0x15, 0x0112); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0113); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0114); + rtl8168_mdio_write(tp, 0x19, 0x63f0); + rtl8168_mdio_write(tp, 0x15, 0x0115); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0116); + rtl8168_mdio_write(tp, 0x19, 0x4418); + rtl8168_mdio_write(tp, 0x15, 0x0117); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x15, 0x0118); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0119); + rtl8168_mdio_write(tp, 0x19, 0x64e1); + rtl8168_mdio_write(tp, 0x15, 0x011a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0150); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0151); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0152); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0153); + rtl8168_mdio_write(tp, 0x19, 0x4540); + rtl8168_mdio_write(tp, 0x15, 0x0154); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0155); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x15, 0x0156); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0157); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0158); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0159); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x015a); + rtl8168_mdio_write(tp, 0x19, 0x30fe); + rtl8168_mdio_write(tp, 0x15, 0x029c); + rtl8168_mdio_write(tp, 0x19, 0x0070); + rtl8168_mdio_write(tp, 0x15, 0x02b2); + rtl8168_mdio_write(tp, 0x19, 0x005a); + rtl8168_mdio_write(tp, 0x15, 0x02bd); + rtl8168_mdio_write(tp, 0x19, 0xa522); + rtl8168_mdio_write(tp, 0x15, 0x02ce); + rtl8168_mdio_write(tp, 0x19, 0xb63e); + rtl8168_mdio_write(tp, 0x15, 0x02d9); + rtl8168_mdio_write(tp, 0x19, 0x32df); + rtl8168_mdio_write(tp, 0x15, 0x02df); + rtl8168_mdio_write(tp, 0x19, 0x4500); + rtl8168_mdio_write(tp, 0x15, 0x02f4); + rtl8168_mdio_write(tp, 0x19, 0xb618); + rtl8168_mdio_write(tp, 0x15, 0x02fb); + rtl8168_mdio_write(tp, 0x19, 0xb900); + rtl8168_mdio_write(tp, 0x15, 0x02fc); + rtl8168_mdio_write(tp, 0x19, 0x49b5); + rtl8168_mdio_write(tp, 0x15, 0x02fd); + rtl8168_mdio_write(tp, 0x19, 0x6812); + rtl8168_mdio_write(tp, 0x15, 0x02fe); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x02ff); + rtl8168_mdio_write(tp, 0x19, 0x9900); + rtl8168_mdio_write(tp, 0x15, 0x0300); + rtl8168_mdio_write(tp, 0x19, 0x64a0); + rtl8168_mdio_write(tp, 0x15, 0x0301); + rtl8168_mdio_write(tp, 0x19, 0x3316); + rtl8168_mdio_write(tp, 0x15, 0x0308); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030c); + rtl8168_mdio_write(tp, 0x19, 0x3000); + rtl8168_mdio_write(tp, 0x15, 0x0312); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0313); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0314); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0315); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0316); + rtl8168_mdio_write(tp, 0x19, 0x49b5); + rtl8168_mdio_write(tp, 0x15, 0x0317); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x0318); + rtl8168_mdio_write(tp, 0x19, 0x4d00); + rtl8168_mdio_write(tp, 0x15, 0x0319); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x031a); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x031b); + rtl8168_mdio_write(tp, 0x19, 0x4925); + rtl8168_mdio_write(tp, 0x15, 0x031c); + rtl8168_mdio_write(tp, 0x19, 0x403b); + rtl8168_mdio_write(tp, 0x15, 0x031d); + rtl8168_mdio_write(tp, 0x19, 0xa602); + rtl8168_mdio_write(tp, 0x15, 0x031e); + rtl8168_mdio_write(tp, 0x19, 0x402f); + rtl8168_mdio_write(tp, 0x15, 0x031f); + rtl8168_mdio_write(tp, 0x19, 0x4484); + rtl8168_mdio_write(tp, 0x15, 0x0320); + rtl8168_mdio_write(tp, 0x19, 0x40c8); + rtl8168_mdio_write(tp, 0x15, 0x0321); + rtl8168_mdio_write(tp, 0x19, 0x44c4); + rtl8168_mdio_write(tp, 0x15, 0x0322); + rtl8168_mdio_write(tp, 0x19, 0x404f); + rtl8168_mdio_write(tp, 0x15, 0x0323); + rtl8168_mdio_write(tp, 0x19, 0x44c8); + rtl8168_mdio_write(tp, 0x15, 0x0324); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0325); + rtl8168_mdio_write(tp, 0x19, 0x00e7); + rtl8168_mdio_write(tp, 0x15, 0x0326); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0327); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x0328); + rtl8168_mdio_write(tp, 0x19, 0x4d48); + rtl8168_mdio_write(tp, 0x15, 0x0329); + rtl8168_mdio_write(tp, 0x19, 0x332b); + rtl8168_mdio_write(tp, 0x15, 0x032a); + rtl8168_mdio_write(tp, 0x19, 0x4d40); + rtl8168_mdio_write(tp, 0x15, 0x032c); + rtl8168_mdio_write(tp, 0x19, 0x00f8); + rtl8168_mdio_write(tp, 0x15, 0x032d); + rtl8168_mdio_write(tp, 0x19, 0x82b2); + rtl8168_mdio_write(tp, 0x15, 0x032f); + rtl8168_mdio_write(tp, 0x19, 0x00b0); + rtl8168_mdio_write(tp, 0x15, 0x0332); + rtl8168_mdio_write(tp, 0x19, 0x91f2); + rtl8168_mdio_write(tp, 0x15, 0x033f); + rtl8168_mdio_write(tp, 0x19, 0xb6cd); + rtl8168_mdio_write(tp, 0x15, 0x0340); + rtl8168_mdio_write(tp, 0x19, 0x9e01); + rtl8168_mdio_write(tp, 0x15, 0x0341); + rtl8168_mdio_write(tp, 0x19, 0xd11d); + rtl8168_mdio_write(tp, 0x15, 0x0342); + rtl8168_mdio_write(tp, 0x19, 0x009d); + rtl8168_mdio_write(tp, 0x15, 0x0343); + rtl8168_mdio_write(tp, 0x19, 0xbb1c); + rtl8168_mdio_write(tp, 0x15, 0x0344); + rtl8168_mdio_write(tp, 0x19, 0x8102); + rtl8168_mdio_write(tp, 0x15, 0x0345); + rtl8168_mdio_write(tp, 0x19, 0x3348); + rtl8168_mdio_write(tp, 0x15, 0x0346); + rtl8168_mdio_write(tp, 0x19, 0xa231); + rtl8168_mdio_write(tp, 0x15, 0x0347); + rtl8168_mdio_write(tp, 0x19, 0x335b); + rtl8168_mdio_write(tp, 0x15, 0x0348); + rtl8168_mdio_write(tp, 0x19, 0x91f7); + rtl8168_mdio_write(tp, 0x15, 0x0349); + rtl8168_mdio_write(tp, 0x19, 0xc218); + rtl8168_mdio_write(tp, 0x15, 0x034a); + rtl8168_mdio_write(tp, 0x19, 0x00f5); + rtl8168_mdio_write(tp, 0x15, 0x034b); + rtl8168_mdio_write(tp, 0x19, 0x335b); + rtl8168_mdio_write(tp, 0x15, 0x034c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x034d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x034e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x034f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0350); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x035b); + rtl8168_mdio_write(tp, 0x19, 0xa23c); + rtl8168_mdio_write(tp, 0x15, 0x035c); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x035d); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x035e); + rtl8168_mdio_write(tp, 0x19, 0x3397); + rtl8168_mdio_write(tp, 0x15, 0x0363); + rtl8168_mdio_write(tp, 0x19, 0xb6a9); + rtl8168_mdio_write(tp, 0x15, 0x0366); + rtl8168_mdio_write(tp, 0x19, 0x00f5); + rtl8168_mdio_write(tp, 0x15, 0x0382); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0388); + rtl8168_mdio_write(tp, 0x19, 0x0084); + rtl8168_mdio_write(tp, 0x15, 0x0389); + rtl8168_mdio_write(tp, 0x19, 0xdd17); + rtl8168_mdio_write(tp, 0x15, 0x038a); + rtl8168_mdio_write(tp, 0x19, 0x000b); + rtl8168_mdio_write(tp, 0x15, 0x038b); + rtl8168_mdio_write(tp, 0x19, 0xa10a); + rtl8168_mdio_write(tp, 0x15, 0x038c); + rtl8168_mdio_write(tp, 0x19, 0x337e); + rtl8168_mdio_write(tp, 0x15, 0x038d); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x038e); + rtl8168_mdio_write(tp, 0x19, 0xa107); + rtl8168_mdio_write(tp, 0x15, 0x038f); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x0390); + rtl8168_mdio_write(tp, 0x19, 0xc017); + rtl8168_mdio_write(tp, 0x15, 0x0391); + rtl8168_mdio_write(tp, 0x19, 0x0004); + rtl8168_mdio_write(tp, 0x15, 0x0392); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0393); + rtl8168_mdio_write(tp, 0x19, 0x00f4); + rtl8168_mdio_write(tp, 0x15, 0x0397); + rtl8168_mdio_write(tp, 0x19, 0x4098); + rtl8168_mdio_write(tp, 0x15, 0x0398); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x0399); + rtl8168_mdio_write(tp, 0x19, 0x55bf); + rtl8168_mdio_write(tp, 0x15, 0x039a); + rtl8168_mdio_write(tp, 0x19, 0x4bb9); + rtl8168_mdio_write(tp, 0x15, 0x039b); + rtl8168_mdio_write(tp, 0x19, 0x6810); + rtl8168_mdio_write(tp, 0x15, 0x039c); + rtl8168_mdio_write(tp, 0x19, 0x4b29); + rtl8168_mdio_write(tp, 0x15, 0x039d); + rtl8168_mdio_write(tp, 0x19, 0x4041); + rtl8168_mdio_write(tp, 0x15, 0x039e); + rtl8168_mdio_write(tp, 0x19, 0x442a); + rtl8168_mdio_write(tp, 0x15, 0x039f); + rtl8168_mdio_write(tp, 0x19, 0x4029); + rtl8168_mdio_write(tp, 0x15, 0x03aa); + rtl8168_mdio_write(tp, 0x19, 0x33b8); + rtl8168_mdio_write(tp, 0x15, 0x03b6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b8); + rtl8168_mdio_write(tp, 0x19, 0x543f); + rtl8168_mdio_write(tp, 0x15, 0x03b9); + rtl8168_mdio_write(tp, 0x19, 0x499a); + rtl8168_mdio_write(tp, 0x15, 0x03ba); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x03bb); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03bc); + rtl8168_mdio_write(tp, 0x19, 0x490a); + rtl8168_mdio_write(tp, 0x15, 0x03bd); + rtl8168_mdio_write(tp, 0x19, 0x405e); + rtl8168_mdio_write(tp, 0x15, 0x03c2); + rtl8168_mdio_write(tp, 0x19, 0x9a03); + rtl8168_mdio_write(tp, 0x15, 0x03c4); + rtl8168_mdio_write(tp, 0x19, 0x0015); + rtl8168_mdio_write(tp, 0x15, 0x03c5); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x03c8); + rtl8168_mdio_write(tp, 0x19, 0x9cf7); + rtl8168_mdio_write(tp, 0x15, 0x03c9); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03ca); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03cb); + rtl8168_mdio_write(tp, 0x19, 0x4458); + rtl8168_mdio_write(tp, 0x15, 0x03cd); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03ce); + rtl8168_mdio_write(tp, 0x19, 0x33bf); + rtl8168_mdio_write(tp, 0x15, 0x03cf); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d0); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d1); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03d9); + rtl8168_mdio_write(tp, 0x19, 0x49bb); + rtl8168_mdio_write(tp, 0x15, 0x03da); + rtl8168_mdio_write(tp, 0x19, 0x4478); + rtl8168_mdio_write(tp, 0x15, 0x03db); + rtl8168_mdio_write(tp, 0x19, 0x492b); + rtl8168_mdio_write(tp, 0x15, 0x03dc); + rtl8168_mdio_write(tp, 0x19, 0x7c01); + rtl8168_mdio_write(tp, 0x15, 0x03dd); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x03de); + rtl8168_mdio_write(tp, 0x19, 0xbd1a); + rtl8168_mdio_write(tp, 0x15, 0x03df); + rtl8168_mdio_write(tp, 0x19, 0xc428); + rtl8168_mdio_write(tp, 0x15, 0x03e0); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x03e1); + rtl8168_mdio_write(tp, 0x19, 0x9cfd); + rtl8168_mdio_write(tp, 0x15, 0x03e2); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03e3); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03e4); + rtl8168_mdio_write(tp, 0x19, 0x4458); + rtl8168_mdio_write(tp, 0x15, 0x03e5); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03e6); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03e7); + rtl8168_mdio_write(tp, 0x19, 0x33de); + rtl8168_mdio_write(tp, 0x15, 0x03e8); + rtl8168_mdio_write(tp, 0x19, 0xc218); + rtl8168_mdio_write(tp, 0x15, 0x03e9); + rtl8168_mdio_write(tp, 0x19, 0x0002); + rtl8168_mdio_write(tp, 0x15, 0x03ea); + rtl8168_mdio_write(tp, 0x19, 0x32df); + rtl8168_mdio_write(tp, 0x15, 0x03eb); + rtl8168_mdio_write(tp, 0x19, 0x3316); + rtl8168_mdio_write(tp, 0x15, 0x03ec); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ed); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ee); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ef); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03f7); + rtl8168_mdio_write(tp, 0x19, 0x330c); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x0200); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x9002); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x0202); + rtl8168_mdio_write(tp, 0x06, 0x3402); + rtl8168_mdio_write(tp, 0x06, 0x027f); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0xa602); + rtl8168_mdio_write(tp, 0x06, 0x80bf); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe600); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xee03); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xefb8); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe902); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8285); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8520); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8701); + rtl8168_mdio_write(tp, 0x06, 0xd481); + rtl8168_mdio_write(tp, 0x06, 0x35e4); + rtl8168_mdio_write(tp, 0x06, 0x8b94); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x95bf); + rtl8168_mdio_write(tp, 0x06, 0x8b88); + rtl8168_mdio_write(tp, 0x06, 0xec00); + rtl8168_mdio_write(tp, 0x06, 0x19a9); + rtl8168_mdio_write(tp, 0x06, 0x8b90); + rtl8168_mdio_write(tp, 0x06, 0xf9ee); + rtl8168_mdio_write(tp, 0x06, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x41f7); + rtl8168_mdio_write(tp, 0x06, 0x2ff6); + rtl8168_mdio_write(tp, 0x06, 0x28e4); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xe5e1); + rtl8168_mdio_write(tp, 0x06, 0x4104); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x0dee); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x82f4); + rtl8168_mdio_write(tp, 0x06, 0x021f); + rtl8168_mdio_write(tp, 0x06, 0x4102); + rtl8168_mdio_write(tp, 0x06, 0x2812); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x10ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x139d); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xd602); + rtl8168_mdio_write(tp, 0x06, 0x1f99); + rtl8168_mdio_write(tp, 0x06, 0x0227); + rtl8168_mdio_write(tp, 0x06, 0xeafc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2014); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x8104); + rtl8168_mdio_write(tp, 0x06, 0x021b); + rtl8168_mdio_write(tp, 0x06, 0xf402); + rtl8168_mdio_write(tp, 0x06, 0x2c9c); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x7902); + rtl8168_mdio_write(tp, 0x06, 0x8443); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x11f6); + rtl8168_mdio_write(tp, 0x06, 0x22e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x022c); + rtl8168_mdio_write(tp, 0x06, 0x4602); + rtl8168_mdio_write(tp, 0x06, 0x2ac5); + rtl8168_mdio_write(tp, 0x06, 0x0229); + rtl8168_mdio_write(tp, 0x06, 0x2002); + rtl8168_mdio_write(tp, 0x06, 0x2b91); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x11f6); + rtl8168_mdio_write(tp, 0x06, 0x25e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0xe202); + rtl8168_mdio_write(tp, 0x06, 0x043a); + rtl8168_mdio_write(tp, 0x06, 0x021a); + rtl8168_mdio_write(tp, 0x06, 0x5902); + rtl8168_mdio_write(tp, 0x06, 0x2bfc); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe001); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x1fd1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8638); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50e0); + rtl8168_mdio_write(tp, 0x06, 0xe020); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x21ad); + rtl8168_mdio_write(tp, 0x06, 0x200e); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xbf3d); + rtl8168_mdio_write(tp, 0x06, 0x3902); + rtl8168_mdio_write(tp, 0x06, 0x2eb0); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x0402); + rtl8168_mdio_write(tp, 0x06, 0x8591); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x3c05); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xfee1); + rtl8168_mdio_write(tp, 0x06, 0xe2ff); + rtl8168_mdio_write(tp, 0x06, 0xad2d); + rtl8168_mdio_write(tp, 0x06, 0x1ae0); + rtl8168_mdio_write(tp, 0x06, 0xe14e); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x4fac); + rtl8168_mdio_write(tp, 0x06, 0x2d22); + rtl8168_mdio_write(tp, 0x06, 0xf603); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x36f7); + rtl8168_mdio_write(tp, 0x06, 0x03f7); + rtl8168_mdio_write(tp, 0x06, 0x06bf); + rtl8168_mdio_write(tp, 0x06, 0x8622); + rtl8168_mdio_write(tp, 0x06, 0x022e); + rtl8168_mdio_write(tp, 0x06, 0xb0ae); + rtl8168_mdio_write(tp, 0x06, 0x11e0); + rtl8168_mdio_write(tp, 0x06, 0xe14e); + rtl8168_mdio_write(tp, 0x06, 0xe1e1); + rtl8168_mdio_write(tp, 0x06, 0x4fad); + rtl8168_mdio_write(tp, 0x06, 0x2d08); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x2d02); + rtl8168_mdio_write(tp, 0x06, 0x2eb0); + rtl8168_mdio_write(tp, 0x06, 0xf606); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x204c); + rtl8168_mdio_write(tp, 0x06, 0xd200); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x0058); + rtl8168_mdio_write(tp, 0x06, 0x010c); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0x5810); + rtl8168_mdio_write(tp, 0x06, 0x1e20); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x3658); + rtl8168_mdio_write(tp, 0x06, 0x031e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xe01e); + rtl8168_mdio_write(tp, 0x06, 0x20e0); + rtl8168_mdio_write(tp, 0x06, 0x8ae6); + rtl8168_mdio_write(tp, 0x06, 0x1f02); + rtl8168_mdio_write(tp, 0x06, 0x9e22); + rtl8168_mdio_write(tp, 0x06, 0xe68a); + rtl8168_mdio_write(tp, 0x06, 0xe6ad); + rtl8168_mdio_write(tp, 0x06, 0x3214); + rtl8168_mdio_write(tp, 0x06, 0xad34); + rtl8168_mdio_write(tp, 0x06, 0x11ef); + rtl8168_mdio_write(tp, 0x06, 0x0258); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x07ad); + rtl8168_mdio_write(tp, 0x06, 0x3508); + rtl8168_mdio_write(tp, 0x06, 0x5ac0); + rtl8168_mdio_write(tp, 0x06, 0x9f04); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xae02); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x3e02); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfae0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac26); + rtl8168_mdio_write(tp, 0x06, 0x0ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xac24); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x6bee); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xe0eb); + rtl8168_mdio_write(tp, 0x06, 0x00e2); + rtl8168_mdio_write(tp, 0x06, 0xe07c); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0x7da5); + rtl8168_mdio_write(tp, 0x06, 0x1111); + rtl8168_mdio_write(tp, 0x06, 0x15d2); + rtl8168_mdio_write(tp, 0x06, 0x60d6); + rtl8168_mdio_write(tp, 0x06, 0x6666); + rtl8168_mdio_write(tp, 0x06, 0x0207); + rtl8168_mdio_write(tp, 0x06, 0xf9d2); + rtl8168_mdio_write(tp, 0x06, 0xa0d6); + rtl8168_mdio_write(tp, 0x06, 0xaaaa); + rtl8168_mdio_write(tp, 0x06, 0x0207); + rtl8168_mdio_write(tp, 0x06, 0xf902); + rtl8168_mdio_write(tp, 0x06, 0x825c); + rtl8168_mdio_write(tp, 0x06, 0xae44); + rtl8168_mdio_write(tp, 0x06, 0xa566); + rtl8168_mdio_write(tp, 0x06, 0x6602); + rtl8168_mdio_write(tp, 0x06, 0xae38); + rtl8168_mdio_write(tp, 0x06, 0xa5aa); + rtl8168_mdio_write(tp, 0x06, 0xaa02); + rtl8168_mdio_write(tp, 0x06, 0xae32); + rtl8168_mdio_write(tp, 0x06, 0xeee0); + rtl8168_mdio_write(tp, 0x06, 0xea04); + rtl8168_mdio_write(tp, 0x06, 0xeee0); + rtl8168_mdio_write(tp, 0x06, 0xeb06); + rtl8168_mdio_write(tp, 0x06, 0xe2e0); + rtl8168_mdio_write(tp, 0x06, 0x7ce3); + rtl8168_mdio_write(tp, 0x06, 0xe07d); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x38e1); + rtl8168_mdio_write(tp, 0x06, 0xe039); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x21ad); + rtl8168_mdio_write(tp, 0x06, 0x3f13); + rtl8168_mdio_write(tp, 0x06, 0xe0e4); + rtl8168_mdio_write(tp, 0x06, 0x14e1); + rtl8168_mdio_write(tp, 0x06, 0xe415); + rtl8168_mdio_write(tp, 0x06, 0x6880); + rtl8168_mdio_write(tp, 0x06, 0xe4e4); + rtl8168_mdio_write(tp, 0x06, 0x14e5); + rtl8168_mdio_write(tp, 0x06, 0xe415); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x5cae); + rtl8168_mdio_write(tp, 0x06, 0x0bac); + rtl8168_mdio_write(tp, 0x06, 0x3e02); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x8602); + rtl8168_mdio_write(tp, 0x06, 0x82b0); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2605); + rtl8168_mdio_write(tp, 0x06, 0x0221); + rtl8168_mdio_write(tp, 0x06, 0xf3f7); + rtl8168_mdio_write(tp, 0x06, 0x28e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xad21); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x22f8); + rtl8168_mdio_write(tp, 0x06, 0xf729); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2405); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xebf7); + rtl8168_mdio_write(tp, 0x06, 0x2ae5); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x2134); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2109); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x2eac); + rtl8168_mdio_write(tp, 0x06, 0x2003); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0x52e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x09e0); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x8337); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8b2e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2608); + rtl8168_mdio_write(tp, 0x06, 0xe085); + rtl8168_mdio_write(tp, 0x06, 0xd2ad); + rtl8168_mdio_write(tp, 0x06, 0x2502); + rtl8168_mdio_write(tp, 0x06, 0xf628); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x210a); + rtl8168_mdio_write(tp, 0x06, 0xe086); + rtl8168_mdio_write(tp, 0x06, 0x0af6); + rtl8168_mdio_write(tp, 0x06, 0x27a0); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0xf629); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2408); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xedad); + rtl8168_mdio_write(tp, 0x06, 0x2002); + rtl8168_mdio_write(tp, 0x06, 0xf62a); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x2ea1); + rtl8168_mdio_write(tp, 0x06, 0x0003); + rtl8168_mdio_write(tp, 0x06, 0x0221); + rtl8168_mdio_write(tp, 0x06, 0x11fc); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x8aed); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8aec); + rtl8168_mdio_write(tp, 0x06, 0x0004); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x3ae0); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xeb58); + rtl8168_mdio_write(tp, 0x06, 0xf8d1); + rtl8168_mdio_write(tp, 0x06, 0x01e4); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0xebe0); + rtl8168_mdio_write(tp, 0x06, 0xe07c); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x7d5c); + rtl8168_mdio_write(tp, 0x06, 0x00ff); + rtl8168_mdio_write(tp, 0x06, 0x3c00); + rtl8168_mdio_write(tp, 0x06, 0x1eab); + rtl8168_mdio_write(tp, 0x06, 0x1ce0); + rtl8168_mdio_write(tp, 0x06, 0xe04c); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x4d58); + rtl8168_mdio_write(tp, 0x06, 0xc1e4); + rtl8168_mdio_write(tp, 0x06, 0xe04c); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0x4de0); + rtl8168_mdio_write(tp, 0x06, 0xe0ee); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0x3ce4); + rtl8168_mdio_write(tp, 0x06, 0xe0ee); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0xeffc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2412); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0xeee1); + rtl8168_mdio_write(tp, 0x06, 0xe0ef); + rtl8168_mdio_write(tp, 0x06, 0x59c3); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0xeee5); + rtl8168_mdio_write(tp, 0x06, 0xe0ef); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xed01); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac25); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x8363); + rtl8168_mdio_write(tp, 0x06, 0xae03); + rtl8168_mdio_write(tp, 0x06, 0x0225); + rtl8168_mdio_write(tp, 0x06, 0x16fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfae0); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0xa000); + rtl8168_mdio_write(tp, 0x06, 0x19e0); + rtl8168_mdio_write(tp, 0x06, 0x860b); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x331b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x04aa); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x06ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0xe602); + rtl8168_mdio_write(tp, 0x06, 0x241e); + rtl8168_mdio_write(tp, 0x06, 0xae14); + rtl8168_mdio_write(tp, 0x06, 0xa001); + rtl8168_mdio_write(tp, 0x06, 0x1402); + rtl8168_mdio_write(tp, 0x06, 0x2426); + rtl8168_mdio_write(tp, 0x06, 0xbf26); + rtl8168_mdio_write(tp, 0x06, 0x6d02); + rtl8168_mdio_write(tp, 0x06, 0x2eb0); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0b00); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0a02); + rtl8168_mdio_write(tp, 0x06, 0xaf84); + rtl8168_mdio_write(tp, 0x06, 0x3ca0); + rtl8168_mdio_write(tp, 0x06, 0x0252); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0400); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0500); + rtl8168_mdio_write(tp, 0x06, 0xe086); + rtl8168_mdio_write(tp, 0x06, 0x0be1); + rtl8168_mdio_write(tp, 0x06, 0x8b32); + rtl8168_mdio_write(tp, 0x06, 0x1b10); + rtl8168_mdio_write(tp, 0x06, 0x9e04); + rtl8168_mdio_write(tp, 0x06, 0xaa02); + rtl8168_mdio_write(tp, 0x06, 0xaecb); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0b00); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x3ae2); + rtl8168_mdio_write(tp, 0x06, 0x8604); + rtl8168_mdio_write(tp, 0x06, 0xe386); + rtl8168_mdio_write(tp, 0x06, 0x05ef); + rtl8168_mdio_write(tp, 0x06, 0x65e2); + rtl8168_mdio_write(tp, 0x06, 0x8606); + rtl8168_mdio_write(tp, 0x06, 0xe386); + rtl8168_mdio_write(tp, 0x06, 0x071b); + rtl8168_mdio_write(tp, 0x06, 0x56aa); + rtl8168_mdio_write(tp, 0x06, 0x0eef); + rtl8168_mdio_write(tp, 0x06, 0x56e6); + rtl8168_mdio_write(tp, 0x06, 0x8606); + rtl8168_mdio_write(tp, 0x06, 0xe786); + rtl8168_mdio_write(tp, 0x06, 0x07e2); + rtl8168_mdio_write(tp, 0x06, 0x8609); + rtl8168_mdio_write(tp, 0x06, 0xe686); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8609); + rtl8168_mdio_write(tp, 0x06, 0xa000); + rtl8168_mdio_write(tp, 0x06, 0x07ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x03af); + rtl8168_mdio_write(tp, 0x06, 0x8369); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x2426); + rtl8168_mdio_write(tp, 0x06, 0xae48); + rtl8168_mdio_write(tp, 0x06, 0xa003); + rtl8168_mdio_write(tp, 0x06, 0x21e0); + rtl8168_mdio_write(tp, 0x06, 0x8608); + rtl8168_mdio_write(tp, 0x06, 0xe186); + rtl8168_mdio_write(tp, 0x06, 0x091b); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x0caa); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x249d); + rtl8168_mdio_write(tp, 0x06, 0xaee7); + rtl8168_mdio_write(tp, 0x06, 0x0224); + rtl8168_mdio_write(tp, 0x06, 0x8eae); + rtl8168_mdio_write(tp, 0x06, 0xe2ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x860b); + rtl8168_mdio_write(tp, 0x06, 0x00af); + rtl8168_mdio_write(tp, 0x06, 0x8369); + rtl8168_mdio_write(tp, 0x06, 0xa004); + rtl8168_mdio_write(tp, 0x06, 0x15e0); + rtl8168_mdio_write(tp, 0x06, 0x860b); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x341b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x05aa); + rtl8168_mdio_write(tp, 0x06, 0x03af); + rtl8168_mdio_write(tp, 0x06, 0x8383); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0a05); + rtl8168_mdio_write(tp, 0x06, 0xae0c); + rtl8168_mdio_write(tp, 0x06, 0xa005); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x0702); + rtl8168_mdio_write(tp, 0x06, 0x2309); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0a00); + rtl8168_mdio_write(tp, 0x06, 0xfeef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfbe0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x22e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x23e2); + rtl8168_mdio_write(tp, 0x06, 0xe036); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0x375a); + rtl8168_mdio_write(tp, 0x06, 0xc40d); + rtl8168_mdio_write(tp, 0x06, 0x0158); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x20e3); + rtl8168_mdio_write(tp, 0x06, 0x8ae7); + rtl8168_mdio_write(tp, 0x06, 0xac31); + rtl8168_mdio_write(tp, 0x06, 0x60ac); + rtl8168_mdio_write(tp, 0x06, 0x3a08); + rtl8168_mdio_write(tp, 0x06, 0xac3e); + rtl8168_mdio_write(tp, 0x06, 0x26ae); + rtl8168_mdio_write(tp, 0x06, 0x67af); + rtl8168_mdio_write(tp, 0x06, 0x84db); + rtl8168_mdio_write(tp, 0x06, 0xad37); + rtl8168_mdio_write(tp, 0x06, 0x61e0); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xe91b); + rtl8168_mdio_write(tp, 0x06, 0x109e); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x51d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x863b); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50ee); + rtl8168_mdio_write(tp, 0x06, 0x8ae8); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x43ad); + rtl8168_mdio_write(tp, 0x06, 0x3627); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xeee1); + rtl8168_mdio_write(tp, 0x06, 0x8aef); + rtl8168_mdio_write(tp, 0x06, 0xef74); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xeae1); + rtl8168_mdio_write(tp, 0x06, 0x8aeb); + rtl8168_mdio_write(tp, 0x06, 0x1b74); + rtl8168_mdio_write(tp, 0x06, 0x9e2e); + rtl8168_mdio_write(tp, 0x06, 0x14e4); + rtl8168_mdio_write(tp, 0x06, 0x8aea); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xebef); + rtl8168_mdio_write(tp, 0x06, 0x74e0); + rtl8168_mdio_write(tp, 0x06, 0x8aee); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xef1b); + rtl8168_mdio_write(tp, 0x06, 0x479e); + rtl8168_mdio_write(tp, 0x06, 0x0fae); + rtl8168_mdio_write(tp, 0x06, 0x19ee); + rtl8168_mdio_write(tp, 0x06, 0x8aea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8aeb); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0x0fac); + rtl8168_mdio_write(tp, 0x06, 0x390c); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x3b02); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xe800); + rtl8168_mdio_write(tp, 0x06, 0xe68a); + rtl8168_mdio_write(tp, 0x06, 0xe7ff); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xc4e1); + rtl8168_mdio_write(tp, 0x06, 0x8b6e); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e24); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x6ead); + rtl8168_mdio_write(tp, 0x06, 0x2218); + rtl8168_mdio_write(tp, 0x06, 0xac27); + rtl8168_mdio_write(tp, 0x06, 0x0dac); + rtl8168_mdio_write(tp, 0x06, 0x2605); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x8fae); + rtl8168_mdio_write(tp, 0x06, 0x1302); + rtl8168_mdio_write(tp, 0x06, 0x03c8); + rtl8168_mdio_write(tp, 0x06, 0xae0e); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0xe102); + rtl8168_mdio_write(tp, 0x06, 0x8520); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x8f02); + rtl8168_mdio_write(tp, 0x06, 0x8566); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x82ad); + rtl8168_mdio_write(tp, 0x06, 0x2737); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4402); + rtl8168_mdio_write(tp, 0x06, 0x2f23); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x2ed1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8647); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50bf); + rtl8168_mdio_write(tp, 0x06, 0x8641); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x23e5); + rtl8168_mdio_write(tp, 0x06, 0x8af0); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x22e1); + rtl8168_mdio_write(tp, 0x06, 0xe023); + rtl8168_mdio_write(tp, 0x06, 0xac2e); + rtl8168_mdio_write(tp, 0x06, 0x04d1); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0x02d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8641); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50d1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8644); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4702); + rtl8168_mdio_write(tp, 0x06, 0x2f23); + rtl8168_mdio_write(tp, 0x06, 0xad28); + rtl8168_mdio_write(tp, 0x06, 0x19d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8644); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x50e1); + rtl8168_mdio_write(tp, 0x06, 0x8af0); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4102); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x4702); + rtl8168_mdio_write(tp, 0x06, 0x2f50); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xfee1); + rtl8168_mdio_write(tp, 0x06, 0xe2ff); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x63e0); + rtl8168_mdio_write(tp, 0x06, 0xe038); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x39ad); + rtl8168_mdio_write(tp, 0x06, 0x2f10); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xf726); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xae0e); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e1); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xf728); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e5); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xf72b); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xd07d); + rtl8168_mdio_write(tp, 0x06, 0xb0fe); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xf62b); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xf626); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe035); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e1); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xf628); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0xd6e5); + rtl8168_mdio_write(tp, 0x06, 0xe2d7); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xae20); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0xa725); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x1de5); + rtl8168_mdio_write(tp, 0x06, 0x0a2c); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x6de5); + rtl8168_mdio_write(tp, 0x06, 0x0a1d); + rtl8168_mdio_write(tp, 0x06, 0xe50a); + rtl8168_mdio_write(tp, 0x06, 0x1ce5); + rtl8168_mdio_write(tp, 0x06, 0x0a2d); + rtl8168_mdio_write(tp, 0x06, 0xa755); + rtl8168_mdio_write(tp, 0x06, 0x00e2); + rtl8168_mdio_write(tp, 0x06, 0x3488); + rtl8168_mdio_write(tp, 0x06, 0xe200); + rtl8168_mdio_write(tp, 0x06, 0xcce2); + rtl8168_mdio_write(tp, 0x06, 0x0055); + rtl8168_mdio_write(tp, 0x06, 0xe020); + rtl8168_mdio_write(tp, 0x06, 0x55e2); + rtl8168_mdio_write(tp, 0x06, 0xd600); + rtl8168_mdio_write(tp, 0x06, 0xe24a); + gphy_val = rtl8168_mdio_read(tp, 0x01); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x01, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~(BIT_2); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } +} + +static void +rtl8168_set_phy_mcu_8168evl_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val,i; + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x1800); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val &= ~(BIT_12); + rtl8168_mdio_write(tp, 0x15, gphy_val); + mdelay(20); + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + if ((gphy_val & BIT_11) == 0x0000) { + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x17, gphy_val); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x17); + if (gphy_val & BIT_11) + break; + } + } + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + rtl8168_mdio_write(tp, 0x1B, 0x5000); + rtl8168_mdio_write(tp, 0x1E, 0x002d); + rtl8168_mdio_write(tp, 0x19, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1E); + if ((gphy_val & 0x03FF) == 0x0014) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x07); + if ((gphy_val & BIT_5) == 0) + break; + } + gphy_val = rtl8168_mdio_read(tp, 0x07); + if (gphy_val & BIT_5) { + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x00a1); + rtl8168_mdio_write(tp, 0x17, 0x1000); + rtl8168_mdio_write(tp, 0x17, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2000); + rtl8168_mdio_write(tp, 0x1e, 0x002f); + rtl8168_mdio_write(tp, 0x18, 0x9bfb); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x07, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + gphy_val = rtl8168_mdio_read(tp, 0x00); + gphy_val &= ~(BIT_7); + rtl8168_mdio_write(tp, 0x00, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x0000); + rtl8168_mdio_write(tp, 0x19, 0x407d); + rtl8168_mdio_write(tp, 0x15, 0x0001); + rtl8168_mdio_write(tp, 0x19, 0x440f); + rtl8168_mdio_write(tp, 0x15, 0x0002); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0003); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x0004); + rtl8168_mdio_write(tp, 0x19, 0xc4d5); + rtl8168_mdio_write(tp, 0x15, 0x0005); + rtl8168_mdio_write(tp, 0x19, 0x00ff); + rtl8168_mdio_write(tp, 0x15, 0x0006); + rtl8168_mdio_write(tp, 0x19, 0x74f0); + rtl8168_mdio_write(tp, 0x15, 0x0007); + rtl8168_mdio_write(tp, 0x19, 0x4880); + rtl8168_mdio_write(tp, 0x15, 0x0008); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x0009); + rtl8168_mdio_write(tp, 0x19, 0x4800); + rtl8168_mdio_write(tp, 0x15, 0x000a); + rtl8168_mdio_write(tp, 0x19, 0x5000); + rtl8168_mdio_write(tp, 0x15, 0x000b); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x000c); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x000d); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x15, 0x000e); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x000f); + rtl8168_mdio_write(tp, 0x19, 0x7010); + rtl8168_mdio_write(tp, 0x15, 0x0010); + rtl8168_mdio_write(tp, 0x19, 0x6804); + rtl8168_mdio_write(tp, 0x15, 0x0011); + rtl8168_mdio_write(tp, 0x19, 0x64a0); + rtl8168_mdio_write(tp, 0x15, 0x0012); + rtl8168_mdio_write(tp, 0x19, 0x63da); + rtl8168_mdio_write(tp, 0x15, 0x0013); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x0014); + rtl8168_mdio_write(tp, 0x19, 0x6f05); + rtl8168_mdio_write(tp, 0x15, 0x0015); + rtl8168_mdio_write(tp, 0x19, 0x5420); + rtl8168_mdio_write(tp, 0x15, 0x0016); + rtl8168_mdio_write(tp, 0x19, 0x58ce); + rtl8168_mdio_write(tp, 0x15, 0x0017); + rtl8168_mdio_write(tp, 0x19, 0x5cf3); + rtl8168_mdio_write(tp, 0x15, 0x0018); + rtl8168_mdio_write(tp, 0x19, 0xb600); + rtl8168_mdio_write(tp, 0x15, 0x0019); + rtl8168_mdio_write(tp, 0x19, 0xc659); + rtl8168_mdio_write(tp, 0x15, 0x001a); + rtl8168_mdio_write(tp, 0x19, 0x0018); + rtl8168_mdio_write(tp, 0x15, 0x001b); + rtl8168_mdio_write(tp, 0x19, 0xc403); + rtl8168_mdio_write(tp, 0x15, 0x001c); + rtl8168_mdio_write(tp, 0x19, 0x0016); + rtl8168_mdio_write(tp, 0x15, 0x001d); + rtl8168_mdio_write(tp, 0x19, 0xaa05); + rtl8168_mdio_write(tp, 0x15, 0x001e); + rtl8168_mdio_write(tp, 0x19, 0xc503); + rtl8168_mdio_write(tp, 0x15, 0x001f); + rtl8168_mdio_write(tp, 0x19, 0x0003); + rtl8168_mdio_write(tp, 0x15, 0x0020); + rtl8168_mdio_write(tp, 0x19, 0x89f8); + rtl8168_mdio_write(tp, 0x15, 0x0021); + rtl8168_mdio_write(tp, 0x19, 0x32ae); + rtl8168_mdio_write(tp, 0x15, 0x0022); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0023); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x0024); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0025); + rtl8168_mdio_write(tp, 0x19, 0x6801); + rtl8168_mdio_write(tp, 0x15, 0x0026); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x0027); + rtl8168_mdio_write(tp, 0x19, 0xa300); + rtl8168_mdio_write(tp, 0x15, 0x0028); + rtl8168_mdio_write(tp, 0x19, 0x64a0); + rtl8168_mdio_write(tp, 0x15, 0x0029); + rtl8168_mdio_write(tp, 0x19, 0x76f0); + rtl8168_mdio_write(tp, 0x15, 0x002a); + rtl8168_mdio_write(tp, 0x19, 0x7670); + rtl8168_mdio_write(tp, 0x15, 0x002b); + rtl8168_mdio_write(tp, 0x19, 0x7630); + rtl8168_mdio_write(tp, 0x15, 0x002c); + rtl8168_mdio_write(tp, 0x19, 0x31a6); + rtl8168_mdio_write(tp, 0x15, 0x002d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x002e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x002f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0030); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0031); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0032); + rtl8168_mdio_write(tp, 0x19, 0x4801); + rtl8168_mdio_write(tp, 0x15, 0x0033); + rtl8168_mdio_write(tp, 0x19, 0x6803); + rtl8168_mdio_write(tp, 0x15, 0x0034); + rtl8168_mdio_write(tp, 0x19, 0x66a1); + rtl8168_mdio_write(tp, 0x15, 0x0035); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0036); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x0037); + rtl8168_mdio_write(tp, 0x19, 0xa300); + rtl8168_mdio_write(tp, 0x15, 0x0038); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0039); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x003a); + rtl8168_mdio_write(tp, 0x19, 0x74f8); + rtl8168_mdio_write(tp, 0x15, 0x003b); + rtl8168_mdio_write(tp, 0x19, 0x63d0); + rtl8168_mdio_write(tp, 0x15, 0x003c); + rtl8168_mdio_write(tp, 0x19, 0x7ff0); + rtl8168_mdio_write(tp, 0x15, 0x003d); + rtl8168_mdio_write(tp, 0x19, 0x77f0); + rtl8168_mdio_write(tp, 0x15, 0x003e); + rtl8168_mdio_write(tp, 0x19, 0x7ff0); + rtl8168_mdio_write(tp, 0x15, 0x003f); + rtl8168_mdio_write(tp, 0x19, 0x7750); + rtl8168_mdio_write(tp, 0x15, 0x0040); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x0041); + rtl8168_mdio_write(tp, 0x19, 0x7cf0); + rtl8168_mdio_write(tp, 0x15, 0x0042); + rtl8168_mdio_write(tp, 0x19, 0x7708); + rtl8168_mdio_write(tp, 0x15, 0x0043); + rtl8168_mdio_write(tp, 0x19, 0xa654); + rtl8168_mdio_write(tp, 0x15, 0x0044); + rtl8168_mdio_write(tp, 0x19, 0x304a); + rtl8168_mdio_write(tp, 0x15, 0x0045); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0046); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0047); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0048); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0049); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x004a); + rtl8168_mdio_write(tp, 0x19, 0x4802); + rtl8168_mdio_write(tp, 0x15, 0x004b); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x004c); + rtl8168_mdio_write(tp, 0x19, 0x4440); + rtl8168_mdio_write(tp, 0x15, 0x004d); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x004e); + rtl8168_mdio_write(tp, 0x19, 0x6481); + rtl8168_mdio_write(tp, 0x15, 0x004f); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x15, 0x0050); + rtl8168_mdio_write(tp, 0x19, 0x63e8); + rtl8168_mdio_write(tp, 0x15, 0x0051); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x0052); + rtl8168_mdio_write(tp, 0x19, 0x5900); + rtl8168_mdio_write(tp, 0x15, 0x0053); + rtl8168_mdio_write(tp, 0x19, 0x63f8); + rtl8168_mdio_write(tp, 0x15, 0x0054); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0055); + rtl8168_mdio_write(tp, 0x19, 0x3116); + rtl8168_mdio_write(tp, 0x15, 0x0056); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0057); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0058); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0059); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x005a); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x005b); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x005c); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x005d); + rtl8168_mdio_write(tp, 0x19, 0x6000); + rtl8168_mdio_write(tp, 0x15, 0x005e); + rtl8168_mdio_write(tp, 0x19, 0x59ce); + rtl8168_mdio_write(tp, 0x15, 0x005f); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x0060); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x0061); + rtl8168_mdio_write(tp, 0x19, 0x72b0); + rtl8168_mdio_write(tp, 0x15, 0x0062); + rtl8168_mdio_write(tp, 0x19, 0x400e); + rtl8168_mdio_write(tp, 0x15, 0x0063); + rtl8168_mdio_write(tp, 0x19, 0x4440); + rtl8168_mdio_write(tp, 0x15, 0x0064); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x15, 0x0065); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x15, 0x0066); + rtl8168_mdio_write(tp, 0x19, 0x70b0); + rtl8168_mdio_write(tp, 0x15, 0x0067); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0068); + rtl8168_mdio_write(tp, 0x19, 0x6008); + rtl8168_mdio_write(tp, 0x15, 0x0069); + rtl8168_mdio_write(tp, 0x19, 0x7cf0); + rtl8168_mdio_write(tp, 0x15, 0x006a); + rtl8168_mdio_write(tp, 0x19, 0x7750); + rtl8168_mdio_write(tp, 0x15, 0x006b); + rtl8168_mdio_write(tp, 0x19, 0x4007); + rtl8168_mdio_write(tp, 0x15, 0x006c); + rtl8168_mdio_write(tp, 0x19, 0x4500); + rtl8168_mdio_write(tp, 0x15, 0x006d); + rtl8168_mdio_write(tp, 0x19, 0x4023); + rtl8168_mdio_write(tp, 0x15, 0x006e); + rtl8168_mdio_write(tp, 0x19, 0x4580); + rtl8168_mdio_write(tp, 0x15, 0x006f); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x0070); + rtl8168_mdio_write(tp, 0x19, 0xcd78); + rtl8168_mdio_write(tp, 0x15, 0x0071); + rtl8168_mdio_write(tp, 0x19, 0x0003); + rtl8168_mdio_write(tp, 0x15, 0x0072); + rtl8168_mdio_write(tp, 0x19, 0xbe02); + rtl8168_mdio_write(tp, 0x15, 0x0073); + rtl8168_mdio_write(tp, 0x19, 0x3070); + rtl8168_mdio_write(tp, 0x15, 0x0074); + rtl8168_mdio_write(tp, 0x19, 0x7cf0); + rtl8168_mdio_write(tp, 0x15, 0x0075); + rtl8168_mdio_write(tp, 0x19, 0x77f0); + rtl8168_mdio_write(tp, 0x15, 0x0076); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x0077); + rtl8168_mdio_write(tp, 0x19, 0x4007); + rtl8168_mdio_write(tp, 0x15, 0x0078); + rtl8168_mdio_write(tp, 0x19, 0x4500); + rtl8168_mdio_write(tp, 0x15, 0x0079); + rtl8168_mdio_write(tp, 0x19, 0x4023); + rtl8168_mdio_write(tp, 0x15, 0x007a); + rtl8168_mdio_write(tp, 0x19, 0x4580); + rtl8168_mdio_write(tp, 0x15, 0x007b); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x007c); + rtl8168_mdio_write(tp, 0x19, 0xce80); + rtl8168_mdio_write(tp, 0x15, 0x007d); + rtl8168_mdio_write(tp, 0x19, 0x0004); + rtl8168_mdio_write(tp, 0x15, 0x007e); + rtl8168_mdio_write(tp, 0x19, 0xce80); + rtl8168_mdio_write(tp, 0x15, 0x007f); + rtl8168_mdio_write(tp, 0x19, 0x0002); + rtl8168_mdio_write(tp, 0x15, 0x0080); + rtl8168_mdio_write(tp, 0x19, 0x307c); + rtl8168_mdio_write(tp, 0x15, 0x0081); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x0082); + rtl8168_mdio_write(tp, 0x19, 0x480f); + rtl8168_mdio_write(tp, 0x15, 0x0083); + rtl8168_mdio_write(tp, 0x19, 0x6802); + rtl8168_mdio_write(tp, 0x15, 0x0084); + rtl8168_mdio_write(tp, 0x19, 0x6680); + rtl8168_mdio_write(tp, 0x15, 0x0085); + rtl8168_mdio_write(tp, 0x19, 0x7c10); + rtl8168_mdio_write(tp, 0x15, 0x0086); + rtl8168_mdio_write(tp, 0x19, 0x6010); + rtl8168_mdio_write(tp, 0x15, 0x0087); + rtl8168_mdio_write(tp, 0x19, 0x400a); + rtl8168_mdio_write(tp, 0x15, 0x0088); + rtl8168_mdio_write(tp, 0x19, 0x4580); + rtl8168_mdio_write(tp, 0x15, 0x0089); + rtl8168_mdio_write(tp, 0x19, 0x9e00); + rtl8168_mdio_write(tp, 0x15, 0x008a); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x008b); + rtl8168_mdio_write(tp, 0x19, 0x5800); + rtl8168_mdio_write(tp, 0x15, 0x008c); + rtl8168_mdio_write(tp, 0x19, 0x63c8); + rtl8168_mdio_write(tp, 0x15, 0x008d); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x008e); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x008f); + rtl8168_mdio_write(tp, 0x19, 0x8300); + rtl8168_mdio_write(tp, 0x15, 0x0090); + rtl8168_mdio_write(tp, 0x19, 0x7ff0); + rtl8168_mdio_write(tp, 0x15, 0x0091); + rtl8168_mdio_write(tp, 0x19, 0x74f0); + rtl8168_mdio_write(tp, 0x15, 0x0092); + rtl8168_mdio_write(tp, 0x19, 0x3006); + rtl8168_mdio_write(tp, 0x15, 0x0093); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0094); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0095); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0096); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0097); + rtl8168_mdio_write(tp, 0x19, 0x4803); + rtl8168_mdio_write(tp, 0x15, 0x0098); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0099); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x009a); + rtl8168_mdio_write(tp, 0x19, 0xa203); + rtl8168_mdio_write(tp, 0x15, 0x009b); + rtl8168_mdio_write(tp, 0x19, 0x64b1); + rtl8168_mdio_write(tp, 0x15, 0x009c); + rtl8168_mdio_write(tp, 0x19, 0x309e); + rtl8168_mdio_write(tp, 0x15, 0x009d); + rtl8168_mdio_write(tp, 0x19, 0x64b3); + rtl8168_mdio_write(tp, 0x15, 0x009e); + rtl8168_mdio_write(tp, 0x19, 0x4030); + rtl8168_mdio_write(tp, 0x15, 0x009f); + rtl8168_mdio_write(tp, 0x19, 0x440e); + rtl8168_mdio_write(tp, 0x15, 0x00a0); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x00a1); + rtl8168_mdio_write(tp, 0x19, 0x4419); + rtl8168_mdio_write(tp, 0x15, 0x00a2); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x00a3); + rtl8168_mdio_write(tp, 0x19, 0xc520); + rtl8168_mdio_write(tp, 0x15, 0x00a4); + rtl8168_mdio_write(tp, 0x19, 0x000b); + rtl8168_mdio_write(tp, 0x15, 0x00a5); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x00a6); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x00a7); + rtl8168_mdio_write(tp, 0x19, 0x58a4); + rtl8168_mdio_write(tp, 0x15, 0x00a8); + rtl8168_mdio_write(tp, 0x19, 0x63da); + rtl8168_mdio_write(tp, 0x15, 0x00a9); + rtl8168_mdio_write(tp, 0x19, 0x5cb0); + rtl8168_mdio_write(tp, 0x15, 0x00aa); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x00ab); + rtl8168_mdio_write(tp, 0x19, 0x72b0); + rtl8168_mdio_write(tp, 0x15, 0x00ac); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x15, 0x00ad); + rtl8168_mdio_write(tp, 0x19, 0x70b0); + rtl8168_mdio_write(tp, 0x15, 0x00ae); + rtl8168_mdio_write(tp, 0x19, 0x30b8); + rtl8168_mdio_write(tp, 0x15, 0x00AF); + rtl8168_mdio_write(tp, 0x19, 0x4060); + rtl8168_mdio_write(tp, 0x15, 0x00B0); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x00B1); + rtl8168_mdio_write(tp, 0x19, 0x7e00); + rtl8168_mdio_write(tp, 0x15, 0x00B2); + rtl8168_mdio_write(tp, 0x19, 0x72B0); + rtl8168_mdio_write(tp, 0x15, 0x00B3); + rtl8168_mdio_write(tp, 0x19, 0x7F00); + rtl8168_mdio_write(tp, 0x15, 0x00B4); + rtl8168_mdio_write(tp, 0x19, 0x73B0); + rtl8168_mdio_write(tp, 0x15, 0x00b5); + rtl8168_mdio_write(tp, 0x19, 0x58a0); + rtl8168_mdio_write(tp, 0x15, 0x00b6); + rtl8168_mdio_write(tp, 0x19, 0x63d2); + rtl8168_mdio_write(tp, 0x15, 0x00b7); + rtl8168_mdio_write(tp, 0x19, 0x5c00); + rtl8168_mdio_write(tp, 0x15, 0x00b8); + rtl8168_mdio_write(tp, 0x19, 0x5780); + rtl8168_mdio_write(tp, 0x15, 0x00b9); + rtl8168_mdio_write(tp, 0x19, 0xb60d); + rtl8168_mdio_write(tp, 0x15, 0x00ba); + rtl8168_mdio_write(tp, 0x19, 0x9bff); + rtl8168_mdio_write(tp, 0x15, 0x00bb); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x00bc); + rtl8168_mdio_write(tp, 0x19, 0x6001); + rtl8168_mdio_write(tp, 0x15, 0x00bd); + rtl8168_mdio_write(tp, 0x19, 0xc020); + rtl8168_mdio_write(tp, 0x15, 0x00be); + rtl8168_mdio_write(tp, 0x19, 0x002b); + rtl8168_mdio_write(tp, 0x15, 0x00bf); + rtl8168_mdio_write(tp, 0x19, 0xc137); + rtl8168_mdio_write(tp, 0x15, 0x00c0); + rtl8168_mdio_write(tp, 0x19, 0x0006); + rtl8168_mdio_write(tp, 0x15, 0x00c1); + rtl8168_mdio_write(tp, 0x19, 0x9af8); + rtl8168_mdio_write(tp, 0x15, 0x00c2); + rtl8168_mdio_write(tp, 0x19, 0x30c6); + rtl8168_mdio_write(tp, 0x15, 0x00c3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00c4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00c5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00c6); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x00c7); + rtl8168_mdio_write(tp, 0x19, 0x70b0); + rtl8168_mdio_write(tp, 0x15, 0x00c8); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x00c9); + rtl8168_mdio_write(tp, 0x19, 0x4804); + rtl8168_mdio_write(tp, 0x15, 0x00ca); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x00cb); + rtl8168_mdio_write(tp, 0x19, 0x5c80); + rtl8168_mdio_write(tp, 0x15, 0x00cc); + rtl8168_mdio_write(tp, 0x19, 0x4010); + rtl8168_mdio_write(tp, 0x15, 0x00cd); + rtl8168_mdio_write(tp, 0x19, 0x4415); + rtl8168_mdio_write(tp, 0x15, 0x00ce); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x15, 0x00cf); + rtl8168_mdio_write(tp, 0x19, 0x7f00); + rtl8168_mdio_write(tp, 0x15, 0x00d0); + rtl8168_mdio_write(tp, 0x19, 0x70b0); + rtl8168_mdio_write(tp, 0x15, 0x00d1); + rtl8168_mdio_write(tp, 0x19, 0x3177); + rtl8168_mdio_write(tp, 0x15, 0x00d2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00d3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00d4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00d5); + rtl8168_mdio_write(tp, 0x19, 0x4808); + rtl8168_mdio_write(tp, 0x15, 0x00d6); + rtl8168_mdio_write(tp, 0x19, 0x4007); + rtl8168_mdio_write(tp, 0x15, 0x00d7); + rtl8168_mdio_write(tp, 0x19, 0x4420); + rtl8168_mdio_write(tp, 0x15, 0x00d8); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x00d9); + rtl8168_mdio_write(tp, 0x19, 0xb608); + rtl8168_mdio_write(tp, 0x15, 0x00da); + rtl8168_mdio_write(tp, 0x19, 0xbcbd); + rtl8168_mdio_write(tp, 0x15, 0x00db); + rtl8168_mdio_write(tp, 0x19, 0xc60b); + rtl8168_mdio_write(tp, 0x15, 0x00dc); + rtl8168_mdio_write(tp, 0x19, 0x00fd); + rtl8168_mdio_write(tp, 0x15, 0x00dd); + rtl8168_mdio_write(tp, 0x19, 0x30e1); + rtl8168_mdio_write(tp, 0x15, 0x00de); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00df); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00e0); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00e1); + rtl8168_mdio_write(tp, 0x19, 0x4809); + rtl8168_mdio_write(tp, 0x15, 0x00e2); + rtl8168_mdio_write(tp, 0x19, 0x7e40); + rtl8168_mdio_write(tp, 0x15, 0x00e3); + rtl8168_mdio_write(tp, 0x19, 0x5a40); + rtl8168_mdio_write(tp, 0x15, 0x00e4); + rtl8168_mdio_write(tp, 0x19, 0x305a); + rtl8168_mdio_write(tp, 0x15, 0x00e5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00e6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00e7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00e8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00e9); + rtl8168_mdio_write(tp, 0x19, 0x480a); + rtl8168_mdio_write(tp, 0x15, 0x00ea); + rtl8168_mdio_write(tp, 0x19, 0x5820); + rtl8168_mdio_write(tp, 0x15, 0x00eb); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x00ec); + rtl8168_mdio_write(tp, 0x19, 0xb60a); + rtl8168_mdio_write(tp, 0x15, 0x00ed); + rtl8168_mdio_write(tp, 0x19, 0xda07); + rtl8168_mdio_write(tp, 0x15, 0x00ee); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x00ef); + rtl8168_mdio_write(tp, 0x19, 0xc60b); + rtl8168_mdio_write(tp, 0x15, 0x00f0); + rtl8168_mdio_write(tp, 0x19, 0x00fc); + rtl8168_mdio_write(tp, 0x15, 0x00f1); + rtl8168_mdio_write(tp, 0x19, 0x30f6); + rtl8168_mdio_write(tp, 0x15, 0x00f2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00f3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00f4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00f5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x00f6); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x00f7); + rtl8168_mdio_write(tp, 0x19, 0x480b); + rtl8168_mdio_write(tp, 0x15, 0x00f8); + rtl8168_mdio_write(tp, 0x19, 0x6f03); + rtl8168_mdio_write(tp, 0x15, 0x00f9); + rtl8168_mdio_write(tp, 0x19, 0x405f); + rtl8168_mdio_write(tp, 0x15, 0x00fa); + rtl8168_mdio_write(tp, 0x19, 0x4448); + rtl8168_mdio_write(tp, 0x15, 0x00fb); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x00fc); + rtl8168_mdio_write(tp, 0x19, 0x4468); + rtl8168_mdio_write(tp, 0x15, 0x00fd); + rtl8168_mdio_write(tp, 0x19, 0x9c03); + rtl8168_mdio_write(tp, 0x15, 0x00fe); + rtl8168_mdio_write(tp, 0x19, 0x6f07); + rtl8168_mdio_write(tp, 0x15, 0x00ff); + rtl8168_mdio_write(tp, 0x19, 0x58a0); + rtl8168_mdio_write(tp, 0x15, 0x0100); + rtl8168_mdio_write(tp, 0x19, 0xd6d1); + rtl8168_mdio_write(tp, 0x15, 0x0101); + rtl8168_mdio_write(tp, 0x19, 0x0004); + rtl8168_mdio_write(tp, 0x15, 0x0102); + rtl8168_mdio_write(tp, 0x19, 0xc137); + rtl8168_mdio_write(tp, 0x15, 0x0103); + rtl8168_mdio_write(tp, 0x19, 0x0002); + rtl8168_mdio_write(tp, 0x15, 0x0104); + rtl8168_mdio_write(tp, 0x19, 0xa0e5); + rtl8168_mdio_write(tp, 0x15, 0x0105); + rtl8168_mdio_write(tp, 0x19, 0x9df8); + rtl8168_mdio_write(tp, 0x15, 0x0106); + rtl8168_mdio_write(tp, 0x19, 0x30c6); + rtl8168_mdio_write(tp, 0x15, 0x0107); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0108); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0109); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x010a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x010b); + rtl8168_mdio_write(tp, 0x19, 0x4808); + rtl8168_mdio_write(tp, 0x15, 0x010c); + rtl8168_mdio_write(tp, 0x19, 0xc32d); + rtl8168_mdio_write(tp, 0x15, 0x010d); + rtl8168_mdio_write(tp, 0x19, 0x0003); + rtl8168_mdio_write(tp, 0x15, 0x010e); + rtl8168_mdio_write(tp, 0x19, 0xc8b3); + rtl8168_mdio_write(tp, 0x15, 0x010f); + rtl8168_mdio_write(tp, 0x19, 0x00fc); + rtl8168_mdio_write(tp, 0x15, 0x0110); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x0111); + rtl8168_mdio_write(tp, 0x19, 0x3116); + rtl8168_mdio_write(tp, 0x15, 0x0112); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0113); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0114); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0115); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0116); + rtl8168_mdio_write(tp, 0x19, 0x4803); + rtl8168_mdio_write(tp, 0x15, 0x0117); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0118); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x0119); + rtl8168_mdio_write(tp, 0x19, 0x7c04); + rtl8168_mdio_write(tp, 0x15, 0x011a); + rtl8168_mdio_write(tp, 0x19, 0x6000); + rtl8168_mdio_write(tp, 0x15, 0x011b); + rtl8168_mdio_write(tp, 0x19, 0x5cf7); + rtl8168_mdio_write(tp, 0x15, 0x011c); + rtl8168_mdio_write(tp, 0x19, 0x7c2a); + rtl8168_mdio_write(tp, 0x15, 0x011d); + rtl8168_mdio_write(tp, 0x19, 0x5800); + rtl8168_mdio_write(tp, 0x15, 0x011e); + rtl8168_mdio_write(tp, 0x19, 0x5400); + rtl8168_mdio_write(tp, 0x15, 0x011f); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0120); + rtl8168_mdio_write(tp, 0x19, 0x74f0); + rtl8168_mdio_write(tp, 0x15, 0x0121); + rtl8168_mdio_write(tp, 0x19, 0x4019); + rtl8168_mdio_write(tp, 0x15, 0x0122); + rtl8168_mdio_write(tp, 0x19, 0x440d); + rtl8168_mdio_write(tp, 0x15, 0x0123); + rtl8168_mdio_write(tp, 0x19, 0xb6c1); + rtl8168_mdio_write(tp, 0x15, 0x0124); + rtl8168_mdio_write(tp, 0x19, 0xc05b); + rtl8168_mdio_write(tp, 0x15, 0x0125); + rtl8168_mdio_write(tp, 0x19, 0x00bf); + rtl8168_mdio_write(tp, 0x15, 0x0126); + rtl8168_mdio_write(tp, 0x19, 0xc025); + rtl8168_mdio_write(tp, 0x15, 0x0127); + rtl8168_mdio_write(tp, 0x19, 0x00bd); + rtl8168_mdio_write(tp, 0x15, 0x0128); + rtl8168_mdio_write(tp, 0x19, 0xc603); + rtl8168_mdio_write(tp, 0x15, 0x0129); + rtl8168_mdio_write(tp, 0x19, 0x00bb); + rtl8168_mdio_write(tp, 0x15, 0x012a); + rtl8168_mdio_write(tp, 0x19, 0x8805); + rtl8168_mdio_write(tp, 0x15, 0x012b); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x012c); + rtl8168_mdio_write(tp, 0x19, 0x4001); + rtl8168_mdio_write(tp, 0x15, 0x012d); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x012e); + rtl8168_mdio_write(tp, 0x19, 0xa3dd); + rtl8168_mdio_write(tp, 0x15, 0x012f); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0130); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x0131); + rtl8168_mdio_write(tp, 0x19, 0x8407); + rtl8168_mdio_write(tp, 0x15, 0x0132); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0133); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x0134); + rtl8168_mdio_write(tp, 0x19, 0xd9b8); + rtl8168_mdio_write(tp, 0x15, 0x0135); + rtl8168_mdio_write(tp, 0x19, 0x0003); + rtl8168_mdio_write(tp, 0x15, 0x0136); + rtl8168_mdio_write(tp, 0x19, 0xc240); + rtl8168_mdio_write(tp, 0x15, 0x0137); + rtl8168_mdio_write(tp, 0x19, 0x0015); + rtl8168_mdio_write(tp, 0x15, 0x0138); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0139); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x013a); + rtl8168_mdio_write(tp, 0x19, 0x9ae9); + rtl8168_mdio_write(tp, 0x15, 0x013b); + rtl8168_mdio_write(tp, 0x19, 0x3140); + rtl8168_mdio_write(tp, 0x15, 0x013c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x013d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x013e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x013f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0140); + rtl8168_mdio_write(tp, 0x19, 0x4807); + rtl8168_mdio_write(tp, 0x15, 0x0141); + rtl8168_mdio_write(tp, 0x19, 0x4004); + rtl8168_mdio_write(tp, 0x15, 0x0142); + rtl8168_mdio_write(tp, 0x19, 0x4410); + rtl8168_mdio_write(tp, 0x15, 0x0143); + rtl8168_mdio_write(tp, 0x19, 0x7c0c); + rtl8168_mdio_write(tp, 0x15, 0x0144); + rtl8168_mdio_write(tp, 0x19, 0x600c); + rtl8168_mdio_write(tp, 0x15, 0x0145); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x15, 0x0146); + rtl8168_mdio_write(tp, 0x19, 0xa68f); + rtl8168_mdio_write(tp, 0x15, 0x0147); + rtl8168_mdio_write(tp, 0x19, 0x3116); + rtl8168_mdio_write(tp, 0x15, 0x0148); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0149); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x014a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x014b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x014c); + rtl8168_mdio_write(tp, 0x19, 0x4804); + rtl8168_mdio_write(tp, 0x15, 0x014d); + rtl8168_mdio_write(tp, 0x19, 0x54c0); + rtl8168_mdio_write(tp, 0x15, 0x014e); + rtl8168_mdio_write(tp, 0x19, 0xb703); + rtl8168_mdio_write(tp, 0x15, 0x014f); + rtl8168_mdio_write(tp, 0x19, 0x5cff); + rtl8168_mdio_write(tp, 0x15, 0x0150); + rtl8168_mdio_write(tp, 0x19, 0x315f); + rtl8168_mdio_write(tp, 0x15, 0x0151); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0152); + rtl8168_mdio_write(tp, 0x19, 0x74f8); + rtl8168_mdio_write(tp, 0x15, 0x0153); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0154); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0155); + rtl8168_mdio_write(tp, 0x19, 0x6000); + rtl8168_mdio_write(tp, 0x15, 0x0156); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x0157); + rtl8168_mdio_write(tp, 0x19, 0x4418); + rtl8168_mdio_write(tp, 0x15, 0x0158); + rtl8168_mdio_write(tp, 0x19, 0x9b00); + rtl8168_mdio_write(tp, 0x15, 0x0159); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x015a); + rtl8168_mdio_write(tp, 0x19, 0x64e1); + rtl8168_mdio_write(tp, 0x15, 0x015b); + rtl8168_mdio_write(tp, 0x19, 0x7c20); + rtl8168_mdio_write(tp, 0x15, 0x015c); + rtl8168_mdio_write(tp, 0x19, 0x5820); + rtl8168_mdio_write(tp, 0x15, 0x015d); + rtl8168_mdio_write(tp, 0x19, 0x5ccf); + rtl8168_mdio_write(tp, 0x15, 0x015e); + rtl8168_mdio_write(tp, 0x19, 0x7050); + rtl8168_mdio_write(tp, 0x15, 0x015f); + rtl8168_mdio_write(tp, 0x19, 0xd9b8); + rtl8168_mdio_write(tp, 0x15, 0x0160); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x0161); + rtl8168_mdio_write(tp, 0x19, 0xdab1); + rtl8168_mdio_write(tp, 0x15, 0x0162); + rtl8168_mdio_write(tp, 0x19, 0x0015); + rtl8168_mdio_write(tp, 0x15, 0x0163); + rtl8168_mdio_write(tp, 0x19, 0xc244); + rtl8168_mdio_write(tp, 0x15, 0x0164); + rtl8168_mdio_write(tp, 0x19, 0x0013); + rtl8168_mdio_write(tp, 0x15, 0x0165); + rtl8168_mdio_write(tp, 0x19, 0xc021); + rtl8168_mdio_write(tp, 0x15, 0x0166); + rtl8168_mdio_write(tp, 0x19, 0x00f9); + rtl8168_mdio_write(tp, 0x15, 0x0167); + rtl8168_mdio_write(tp, 0x19, 0x3177); + rtl8168_mdio_write(tp, 0x15, 0x0168); + rtl8168_mdio_write(tp, 0x19, 0x5cf7); + rtl8168_mdio_write(tp, 0x15, 0x0169); + rtl8168_mdio_write(tp, 0x19, 0x4010); + rtl8168_mdio_write(tp, 0x15, 0x016a); + rtl8168_mdio_write(tp, 0x19, 0x4428); + rtl8168_mdio_write(tp, 0x15, 0x016b); + rtl8168_mdio_write(tp, 0x19, 0x9c00); + rtl8168_mdio_write(tp, 0x15, 0x016c); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x016d); + rtl8168_mdio_write(tp, 0x19, 0x6008); + rtl8168_mdio_write(tp, 0x15, 0x016e); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x016f); + rtl8168_mdio_write(tp, 0x19, 0x74f0); + rtl8168_mdio_write(tp, 0x15, 0x0170); + rtl8168_mdio_write(tp, 0x19, 0x6461); + rtl8168_mdio_write(tp, 0x15, 0x0171); + rtl8168_mdio_write(tp, 0x19, 0x6421); + rtl8168_mdio_write(tp, 0x15, 0x0172); + rtl8168_mdio_write(tp, 0x19, 0x64a1); + rtl8168_mdio_write(tp, 0x15, 0x0173); + rtl8168_mdio_write(tp, 0x19, 0x3116); + rtl8168_mdio_write(tp, 0x15, 0x0174); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0175); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0176); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0177); + rtl8168_mdio_write(tp, 0x19, 0x4805); + rtl8168_mdio_write(tp, 0x15, 0x0178); + rtl8168_mdio_write(tp, 0x19, 0xa103); + rtl8168_mdio_write(tp, 0x15, 0x0179); + rtl8168_mdio_write(tp, 0x19, 0x7c02); + rtl8168_mdio_write(tp, 0x15, 0x017a); + rtl8168_mdio_write(tp, 0x19, 0x6002); + rtl8168_mdio_write(tp, 0x15, 0x017b); + rtl8168_mdio_write(tp, 0x19, 0x7e00); + rtl8168_mdio_write(tp, 0x15, 0x017c); + rtl8168_mdio_write(tp, 0x19, 0x5400); + rtl8168_mdio_write(tp, 0x15, 0x017d); + rtl8168_mdio_write(tp, 0x19, 0x7c6b); + rtl8168_mdio_write(tp, 0x15, 0x017e); + rtl8168_mdio_write(tp, 0x19, 0x5c63); + rtl8168_mdio_write(tp, 0x15, 0x017f); + rtl8168_mdio_write(tp, 0x19, 0x407d); + rtl8168_mdio_write(tp, 0x15, 0x0180); + rtl8168_mdio_write(tp, 0x19, 0xa602); + rtl8168_mdio_write(tp, 0x15, 0x0181); + rtl8168_mdio_write(tp, 0x19, 0x4001); + rtl8168_mdio_write(tp, 0x15, 0x0182); + rtl8168_mdio_write(tp, 0x19, 0x4420); + rtl8168_mdio_write(tp, 0x15, 0x0183); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x0184); + rtl8168_mdio_write(tp, 0x19, 0x44a1); + rtl8168_mdio_write(tp, 0x15, 0x0185); + rtl8168_mdio_write(tp, 0x19, 0xd6e0); + rtl8168_mdio_write(tp, 0x15, 0x0186); + rtl8168_mdio_write(tp, 0x19, 0x0009); + rtl8168_mdio_write(tp, 0x15, 0x0187); + rtl8168_mdio_write(tp, 0x19, 0x9efe); + rtl8168_mdio_write(tp, 0x15, 0x0188); + rtl8168_mdio_write(tp, 0x19, 0x7c02); + rtl8168_mdio_write(tp, 0x15, 0x0189); + rtl8168_mdio_write(tp, 0x19, 0x6000); + rtl8168_mdio_write(tp, 0x15, 0x018a); + rtl8168_mdio_write(tp, 0x19, 0x9c00); + rtl8168_mdio_write(tp, 0x15, 0x018b); + rtl8168_mdio_write(tp, 0x19, 0x318f); + rtl8168_mdio_write(tp, 0x15, 0x018c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x018d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x018e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x018f); + rtl8168_mdio_write(tp, 0x19, 0x4806); + rtl8168_mdio_write(tp, 0x15, 0x0190); + rtl8168_mdio_write(tp, 0x19, 0x7c10); + rtl8168_mdio_write(tp, 0x15, 0x0191); + rtl8168_mdio_write(tp, 0x19, 0x5c10); + rtl8168_mdio_write(tp, 0x15, 0x0192); + rtl8168_mdio_write(tp, 0x19, 0x40fa); + rtl8168_mdio_write(tp, 0x15, 0x0193); + rtl8168_mdio_write(tp, 0x19, 0xa602); + rtl8168_mdio_write(tp, 0x15, 0x0194); + rtl8168_mdio_write(tp, 0x19, 0x4010); + rtl8168_mdio_write(tp, 0x15, 0x0195); + rtl8168_mdio_write(tp, 0x19, 0x4440); + rtl8168_mdio_write(tp, 0x15, 0x0196); + rtl8168_mdio_write(tp, 0x19, 0x9d00); + rtl8168_mdio_write(tp, 0x15, 0x0197); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x0198); + rtl8168_mdio_write(tp, 0x19, 0x6400); + rtl8168_mdio_write(tp, 0x15, 0x0199); + rtl8168_mdio_write(tp, 0x19, 0x4003); + rtl8168_mdio_write(tp, 0x15, 0x019a); + rtl8168_mdio_write(tp, 0x19, 0x4540); + rtl8168_mdio_write(tp, 0x15, 0x019b); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x019c); + rtl8168_mdio_write(tp, 0x19, 0x6008); + rtl8168_mdio_write(tp, 0x15, 0x019d); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x019e); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x019f); + rtl8168_mdio_write(tp, 0x19, 0x6400); + rtl8168_mdio_write(tp, 0x15, 0x01a0); + rtl8168_mdio_write(tp, 0x19, 0x7c80); + rtl8168_mdio_write(tp, 0x15, 0x01a1); + rtl8168_mdio_write(tp, 0x19, 0x6480); + rtl8168_mdio_write(tp, 0x15, 0x01a2); + rtl8168_mdio_write(tp, 0x19, 0x3140); + rtl8168_mdio_write(tp, 0x15, 0x01a3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01a4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01a5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01a6); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x01a7); + rtl8168_mdio_write(tp, 0x19, 0x7c0b); + rtl8168_mdio_write(tp, 0x15, 0x01a8); + rtl8168_mdio_write(tp, 0x19, 0x6c01); + rtl8168_mdio_write(tp, 0x15, 0x01a9); + rtl8168_mdio_write(tp, 0x19, 0x64a8); + rtl8168_mdio_write(tp, 0x15, 0x01aa); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x01ab); + rtl8168_mdio_write(tp, 0x19, 0x5cf0); + rtl8168_mdio_write(tp, 0x15, 0x01ac); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x01ad); + rtl8168_mdio_write(tp, 0x19, 0xb628); + rtl8168_mdio_write(tp, 0x15, 0x01ae); + rtl8168_mdio_write(tp, 0x19, 0xc053); + rtl8168_mdio_write(tp, 0x15, 0x01af); + rtl8168_mdio_write(tp, 0x19, 0x0026); + rtl8168_mdio_write(tp, 0x15, 0x01b0); + rtl8168_mdio_write(tp, 0x19, 0xc02d); + rtl8168_mdio_write(tp, 0x15, 0x01b1); + rtl8168_mdio_write(tp, 0x19, 0x0024); + rtl8168_mdio_write(tp, 0x15, 0x01b2); + rtl8168_mdio_write(tp, 0x19, 0xc603); + rtl8168_mdio_write(tp, 0x15, 0x01b3); + rtl8168_mdio_write(tp, 0x19, 0x0022); + rtl8168_mdio_write(tp, 0x15, 0x01b4); + rtl8168_mdio_write(tp, 0x19, 0x8cf9); + rtl8168_mdio_write(tp, 0x15, 0x01b5); + rtl8168_mdio_write(tp, 0x19, 0x31ba); + rtl8168_mdio_write(tp, 0x15, 0x01b6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01b7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01b8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01b9); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01ba); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x01bb); + rtl8168_mdio_write(tp, 0x19, 0x5420); + rtl8168_mdio_write(tp, 0x15, 0x01bc); + rtl8168_mdio_write(tp, 0x19, 0x4811); + rtl8168_mdio_write(tp, 0x15, 0x01bd); + rtl8168_mdio_write(tp, 0x19, 0x5000); + rtl8168_mdio_write(tp, 0x15, 0x01be); + rtl8168_mdio_write(tp, 0x19, 0x4801); + rtl8168_mdio_write(tp, 0x15, 0x01bf); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x01c0); + rtl8168_mdio_write(tp, 0x19, 0x31f5); + rtl8168_mdio_write(tp, 0x15, 0x01c1); + rtl8168_mdio_write(tp, 0x19, 0xb614); + rtl8168_mdio_write(tp, 0x15, 0x01c2); + rtl8168_mdio_write(tp, 0x19, 0x8ce4); + rtl8168_mdio_write(tp, 0x15, 0x01c3); + rtl8168_mdio_write(tp, 0x19, 0xb30c); + rtl8168_mdio_write(tp, 0x15, 0x01c4); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x01c5); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x01c6); + rtl8168_mdio_write(tp, 0x19, 0x8206); + rtl8168_mdio_write(tp, 0x15, 0x01c7); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x01c8); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x01c9); + rtl8168_mdio_write(tp, 0x19, 0x7c04); + rtl8168_mdio_write(tp, 0x15, 0x01ca); + rtl8168_mdio_write(tp, 0x19, 0x7404); + rtl8168_mdio_write(tp, 0x15, 0x01cb); + rtl8168_mdio_write(tp, 0x19, 0x31c0); + rtl8168_mdio_write(tp, 0x15, 0x01cc); + rtl8168_mdio_write(tp, 0x19, 0x7c04); + rtl8168_mdio_write(tp, 0x15, 0x01cd); + rtl8168_mdio_write(tp, 0x19, 0x7400); + rtl8168_mdio_write(tp, 0x15, 0x01ce); + rtl8168_mdio_write(tp, 0x19, 0x31c0); + rtl8168_mdio_write(tp, 0x15, 0x01cf); + rtl8168_mdio_write(tp, 0x19, 0x8df1); + rtl8168_mdio_write(tp, 0x15, 0x01d0); + rtl8168_mdio_write(tp, 0x19, 0x3248); + rtl8168_mdio_write(tp, 0x15, 0x01d1); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01d2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01d3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01d4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01d5); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x01d6); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x01d7); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x01d8); + rtl8168_mdio_write(tp, 0x19, 0x7670); + rtl8168_mdio_write(tp, 0x15, 0x01d9); + rtl8168_mdio_write(tp, 0x19, 0x4023); + rtl8168_mdio_write(tp, 0x15, 0x01da); + rtl8168_mdio_write(tp, 0x19, 0x4500); + rtl8168_mdio_write(tp, 0x15, 0x01db); + rtl8168_mdio_write(tp, 0x19, 0x4069); + rtl8168_mdio_write(tp, 0x15, 0x01dc); + rtl8168_mdio_write(tp, 0x19, 0x4580); + rtl8168_mdio_write(tp, 0x15, 0x01dd); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x01de); + rtl8168_mdio_write(tp, 0x19, 0xcff5); + rtl8168_mdio_write(tp, 0x15, 0x01df); + rtl8168_mdio_write(tp, 0x19, 0x00ff); + rtl8168_mdio_write(tp, 0x15, 0x01e0); + rtl8168_mdio_write(tp, 0x19, 0x76f0); + rtl8168_mdio_write(tp, 0x15, 0x01e1); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x01e2); + rtl8168_mdio_write(tp, 0x19, 0x4023); + rtl8168_mdio_write(tp, 0x15, 0x01e3); + rtl8168_mdio_write(tp, 0x19, 0x4500); + rtl8168_mdio_write(tp, 0x15, 0x01e4); + rtl8168_mdio_write(tp, 0x19, 0x4069); + rtl8168_mdio_write(tp, 0x15, 0x01e5); + rtl8168_mdio_write(tp, 0x19, 0x4580); + rtl8168_mdio_write(tp, 0x15, 0x01e6); + rtl8168_mdio_write(tp, 0x19, 0x9f00); + rtl8168_mdio_write(tp, 0x15, 0x01e7); + rtl8168_mdio_write(tp, 0x19, 0xd0f5); + rtl8168_mdio_write(tp, 0x15, 0x01e8); + rtl8168_mdio_write(tp, 0x19, 0x00ff); + rtl8168_mdio_write(tp, 0x15, 0x01e9); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x01ea); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x01eb); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x01ec); + rtl8168_mdio_write(tp, 0x19, 0x66a0); + rtl8168_mdio_write(tp, 0x15, 0x01ed); + rtl8168_mdio_write(tp, 0x19, 0x8300); + rtl8168_mdio_write(tp, 0x15, 0x01ee); + rtl8168_mdio_write(tp, 0x19, 0x74f0); + rtl8168_mdio_write(tp, 0x15, 0x01ef); + rtl8168_mdio_write(tp, 0x19, 0x3006); + rtl8168_mdio_write(tp, 0x15, 0x01f0); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01f1); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01f2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01f3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01f4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x01f5); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x01f6); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x01f7); + rtl8168_mdio_write(tp, 0x19, 0x409d); + rtl8168_mdio_write(tp, 0x15, 0x01f8); + rtl8168_mdio_write(tp, 0x19, 0x7c87); + rtl8168_mdio_write(tp, 0x15, 0x01f9); + rtl8168_mdio_write(tp, 0x19, 0xae14); + rtl8168_mdio_write(tp, 0x15, 0x01fa); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x01fb); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x01fc); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x01fd); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x01fe); + rtl8168_mdio_write(tp, 0x19, 0x980e); + rtl8168_mdio_write(tp, 0x15, 0x01ff); + rtl8168_mdio_write(tp, 0x19, 0x930c); + rtl8168_mdio_write(tp, 0x15, 0x0200); + rtl8168_mdio_write(tp, 0x19, 0x9206); + rtl8168_mdio_write(tp, 0x15, 0x0201); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0202); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0203); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0204); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0205); + rtl8168_mdio_write(tp, 0x19, 0x320c); + rtl8168_mdio_write(tp, 0x15, 0x0206); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x15, 0x0207); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0208); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x0209); + rtl8168_mdio_write(tp, 0x19, 0x5500); + rtl8168_mdio_write(tp, 0x15, 0x020a); + rtl8168_mdio_write(tp, 0x19, 0x320c); + rtl8168_mdio_write(tp, 0x15, 0x020b); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x020c); + rtl8168_mdio_write(tp, 0x19, 0x3220); + rtl8168_mdio_write(tp, 0x15, 0x020d); + rtl8168_mdio_write(tp, 0x19, 0x4480); + rtl8168_mdio_write(tp, 0x15, 0x020e); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x020f); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0210); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x0211); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0212); + rtl8168_mdio_write(tp, 0x19, 0x980e); + rtl8168_mdio_write(tp, 0x15, 0x0213); + rtl8168_mdio_write(tp, 0x19, 0x930c); + rtl8168_mdio_write(tp, 0x15, 0x0214); + rtl8168_mdio_write(tp, 0x19, 0x9206); + rtl8168_mdio_write(tp, 0x15, 0x0215); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x15, 0x0216); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0217); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0218); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0219); + rtl8168_mdio_write(tp, 0x19, 0x3220); + rtl8168_mdio_write(tp, 0x15, 0x021a); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x021b); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x021c); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x021d); + rtl8168_mdio_write(tp, 0x19, 0x5540); + rtl8168_mdio_write(tp, 0x15, 0x021e); + rtl8168_mdio_write(tp, 0x19, 0x3220); + rtl8168_mdio_write(tp, 0x15, 0x021f); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x15, 0x0220); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0221); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0222); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0223); + rtl8168_mdio_write(tp, 0x19, 0x3231); + rtl8168_mdio_write(tp, 0x15, 0x0224); + rtl8168_mdio_write(tp, 0x19, 0xab06); + rtl8168_mdio_write(tp, 0x15, 0x0225); + rtl8168_mdio_write(tp, 0x19, 0xbf08); + rtl8168_mdio_write(tp, 0x15, 0x0226); + rtl8168_mdio_write(tp, 0x19, 0x4076); + rtl8168_mdio_write(tp, 0x15, 0x0227); + rtl8168_mdio_write(tp, 0x19, 0x7d07); + rtl8168_mdio_write(tp, 0x15, 0x0228); + rtl8168_mdio_write(tp, 0x19, 0x4502); + rtl8168_mdio_write(tp, 0x15, 0x0229); + rtl8168_mdio_write(tp, 0x19, 0x3231); + rtl8168_mdio_write(tp, 0x15, 0x022a); + rtl8168_mdio_write(tp, 0x19, 0x7d80); + rtl8168_mdio_write(tp, 0x15, 0x022b); + rtl8168_mdio_write(tp, 0x19, 0x5180); + rtl8168_mdio_write(tp, 0x15, 0x022c); + rtl8168_mdio_write(tp, 0x19, 0x322f); + rtl8168_mdio_write(tp, 0x15, 0x022d); + rtl8168_mdio_write(tp, 0x19, 0x7d80); + rtl8168_mdio_write(tp, 0x15, 0x022e); + rtl8168_mdio_write(tp, 0x19, 0x5000); + rtl8168_mdio_write(tp, 0x15, 0x022f); + rtl8168_mdio_write(tp, 0x19, 0x7d07); + rtl8168_mdio_write(tp, 0x15, 0x0230); + rtl8168_mdio_write(tp, 0x19, 0x4402); + rtl8168_mdio_write(tp, 0x15, 0x0231); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0232); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x0233); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0234); + rtl8168_mdio_write(tp, 0x19, 0xb309); + rtl8168_mdio_write(tp, 0x15, 0x0235); + rtl8168_mdio_write(tp, 0x19, 0xb204); + rtl8168_mdio_write(tp, 0x15, 0x0236); + rtl8168_mdio_write(tp, 0x19, 0xb105); + rtl8168_mdio_write(tp, 0x15, 0x0237); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0238); + rtl8168_mdio_write(tp, 0x19, 0x31c1); + rtl8168_mdio_write(tp, 0x15, 0x0239); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x023a); + rtl8168_mdio_write(tp, 0x19, 0x3261); + rtl8168_mdio_write(tp, 0x15, 0x023b); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x023c); + rtl8168_mdio_write(tp, 0x19, 0x3250); + rtl8168_mdio_write(tp, 0x15, 0x023d); + rtl8168_mdio_write(tp, 0x19, 0xb203); + rtl8168_mdio_write(tp, 0x15, 0x023e); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x023f); + rtl8168_mdio_write(tp, 0x19, 0x327a); + rtl8168_mdio_write(tp, 0x15, 0x0240); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0241); + rtl8168_mdio_write(tp, 0x19, 0x3293); + rtl8168_mdio_write(tp, 0x15, 0x0242); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0243); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0244); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0245); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0246); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0247); + rtl8168_mdio_write(tp, 0x19, 0x32a3); + rtl8168_mdio_write(tp, 0x15, 0x0248); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0249); + rtl8168_mdio_write(tp, 0x19, 0x403d); + rtl8168_mdio_write(tp, 0x15, 0x024a); + rtl8168_mdio_write(tp, 0x19, 0x440c); + rtl8168_mdio_write(tp, 0x15, 0x024b); + rtl8168_mdio_write(tp, 0x19, 0x4812); + rtl8168_mdio_write(tp, 0x15, 0x024c); + rtl8168_mdio_write(tp, 0x19, 0x5001); + rtl8168_mdio_write(tp, 0x15, 0x024d); + rtl8168_mdio_write(tp, 0x19, 0x4802); + rtl8168_mdio_write(tp, 0x15, 0x024e); + rtl8168_mdio_write(tp, 0x19, 0x6880); + rtl8168_mdio_write(tp, 0x15, 0x024f); + rtl8168_mdio_write(tp, 0x19, 0x31f5); + rtl8168_mdio_write(tp, 0x15, 0x0250); + rtl8168_mdio_write(tp, 0x19, 0xb685); + rtl8168_mdio_write(tp, 0x15, 0x0251); + rtl8168_mdio_write(tp, 0x19, 0x801c); + rtl8168_mdio_write(tp, 0x15, 0x0252); + rtl8168_mdio_write(tp, 0x19, 0xbaf5); + rtl8168_mdio_write(tp, 0x15, 0x0253); + rtl8168_mdio_write(tp, 0x19, 0xc07c); + rtl8168_mdio_write(tp, 0x15, 0x0254); + rtl8168_mdio_write(tp, 0x19, 0x00fb); + rtl8168_mdio_write(tp, 0x15, 0x0255); + rtl8168_mdio_write(tp, 0x19, 0x325a); + rtl8168_mdio_write(tp, 0x15, 0x0256); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0257); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0258); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0259); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x025a); + rtl8168_mdio_write(tp, 0x19, 0x481a); + rtl8168_mdio_write(tp, 0x15, 0x025b); + rtl8168_mdio_write(tp, 0x19, 0x5001); + rtl8168_mdio_write(tp, 0x15, 0x025c); + rtl8168_mdio_write(tp, 0x19, 0x401b); + rtl8168_mdio_write(tp, 0x15, 0x025d); + rtl8168_mdio_write(tp, 0x19, 0x480a); + rtl8168_mdio_write(tp, 0x15, 0x025e); + rtl8168_mdio_write(tp, 0x19, 0x4418); + rtl8168_mdio_write(tp, 0x15, 0x025f); + rtl8168_mdio_write(tp, 0x19, 0x6900); + rtl8168_mdio_write(tp, 0x15, 0x0260); + rtl8168_mdio_write(tp, 0x19, 0x31f5); + rtl8168_mdio_write(tp, 0x15, 0x0261); + rtl8168_mdio_write(tp, 0x19, 0xb64b); + rtl8168_mdio_write(tp, 0x15, 0x0262); + rtl8168_mdio_write(tp, 0x19, 0xdb00); + rtl8168_mdio_write(tp, 0x15, 0x0263); + rtl8168_mdio_write(tp, 0x19, 0x0048); + rtl8168_mdio_write(tp, 0x15, 0x0264); + rtl8168_mdio_write(tp, 0x19, 0xdb7d); + rtl8168_mdio_write(tp, 0x15, 0x0265); + rtl8168_mdio_write(tp, 0x19, 0x0002); + rtl8168_mdio_write(tp, 0x15, 0x0266); + rtl8168_mdio_write(tp, 0x19, 0xa0fa); + rtl8168_mdio_write(tp, 0x15, 0x0267); + rtl8168_mdio_write(tp, 0x19, 0x4408); + rtl8168_mdio_write(tp, 0x15, 0x0268); + rtl8168_mdio_write(tp, 0x19, 0x3248); + rtl8168_mdio_write(tp, 0x15, 0x0269); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x026a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x026b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x026c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x026d); + rtl8168_mdio_write(tp, 0x19, 0xb806); + rtl8168_mdio_write(tp, 0x15, 0x026e); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x026f); + rtl8168_mdio_write(tp, 0x19, 0x5500); + rtl8168_mdio_write(tp, 0x15, 0x0270); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0271); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0272); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0273); + rtl8168_mdio_write(tp, 0x19, 0x4814); + rtl8168_mdio_write(tp, 0x15, 0x0274); + rtl8168_mdio_write(tp, 0x19, 0x500b); + rtl8168_mdio_write(tp, 0x15, 0x0275); + rtl8168_mdio_write(tp, 0x19, 0x4804); + rtl8168_mdio_write(tp, 0x15, 0x0276); + rtl8168_mdio_write(tp, 0x19, 0x40c4); + rtl8168_mdio_write(tp, 0x15, 0x0277); + rtl8168_mdio_write(tp, 0x19, 0x4425); + rtl8168_mdio_write(tp, 0x15, 0x0278); + rtl8168_mdio_write(tp, 0x19, 0x6a00); + rtl8168_mdio_write(tp, 0x15, 0x0279); + rtl8168_mdio_write(tp, 0x19, 0x31f5); + rtl8168_mdio_write(tp, 0x15, 0x027a); + rtl8168_mdio_write(tp, 0x19, 0xb632); + rtl8168_mdio_write(tp, 0x15, 0x027b); + rtl8168_mdio_write(tp, 0x19, 0xdc03); + rtl8168_mdio_write(tp, 0x15, 0x027c); + rtl8168_mdio_write(tp, 0x19, 0x0027); + rtl8168_mdio_write(tp, 0x15, 0x027d); + rtl8168_mdio_write(tp, 0x19, 0x80fc); + rtl8168_mdio_write(tp, 0x15, 0x027e); + rtl8168_mdio_write(tp, 0x19, 0x3283); + rtl8168_mdio_write(tp, 0x15, 0x027f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0280); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0281); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0282); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0283); + rtl8168_mdio_write(tp, 0x19, 0xb806); + rtl8168_mdio_write(tp, 0x15, 0x0284); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0285); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0286); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0287); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x15, 0x0288); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0289); + rtl8168_mdio_write(tp, 0x19, 0x4818); + rtl8168_mdio_write(tp, 0x15, 0x028a); + rtl8168_mdio_write(tp, 0x19, 0x5051); + rtl8168_mdio_write(tp, 0x15, 0x028b); + rtl8168_mdio_write(tp, 0x19, 0x4808); + rtl8168_mdio_write(tp, 0x15, 0x028c); + rtl8168_mdio_write(tp, 0x19, 0x4050); + rtl8168_mdio_write(tp, 0x15, 0x028d); + rtl8168_mdio_write(tp, 0x19, 0x4462); + rtl8168_mdio_write(tp, 0x15, 0x028e); + rtl8168_mdio_write(tp, 0x19, 0x40c4); + rtl8168_mdio_write(tp, 0x15, 0x028f); + rtl8168_mdio_write(tp, 0x19, 0x4473); + rtl8168_mdio_write(tp, 0x15, 0x0290); + rtl8168_mdio_write(tp, 0x19, 0x5041); + rtl8168_mdio_write(tp, 0x15, 0x0291); + rtl8168_mdio_write(tp, 0x19, 0x6b00); + rtl8168_mdio_write(tp, 0x15, 0x0292); + rtl8168_mdio_write(tp, 0x19, 0x31f5); + rtl8168_mdio_write(tp, 0x15, 0x0293); + rtl8168_mdio_write(tp, 0x19, 0xb619); + rtl8168_mdio_write(tp, 0x15, 0x0294); + rtl8168_mdio_write(tp, 0x19, 0x80d9); + rtl8168_mdio_write(tp, 0x15, 0x0295); + rtl8168_mdio_write(tp, 0x19, 0xbd06); + rtl8168_mdio_write(tp, 0x15, 0x0296); + rtl8168_mdio_write(tp, 0x19, 0xbb0d); + rtl8168_mdio_write(tp, 0x15, 0x0297); + rtl8168_mdio_write(tp, 0x19, 0xaf14); + rtl8168_mdio_write(tp, 0x15, 0x0298); + rtl8168_mdio_write(tp, 0x19, 0x8efa); + rtl8168_mdio_write(tp, 0x15, 0x0299); + rtl8168_mdio_write(tp, 0x19, 0x5049); + rtl8168_mdio_write(tp, 0x15, 0x029a); + rtl8168_mdio_write(tp, 0x19, 0x3248); + rtl8168_mdio_write(tp, 0x15, 0x029b); + rtl8168_mdio_write(tp, 0x19, 0x4c10); + rtl8168_mdio_write(tp, 0x15, 0x029c); + rtl8168_mdio_write(tp, 0x19, 0x44b0); + rtl8168_mdio_write(tp, 0x15, 0x029d); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x029e); + rtl8168_mdio_write(tp, 0x19, 0x3292); + rtl8168_mdio_write(tp, 0x15, 0x029f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02a0); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02a1); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02a2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02a3); + rtl8168_mdio_write(tp, 0x19, 0x481f); + rtl8168_mdio_write(tp, 0x15, 0x02a4); + rtl8168_mdio_write(tp, 0x19, 0x5005); + rtl8168_mdio_write(tp, 0x15, 0x02a5); + rtl8168_mdio_write(tp, 0x19, 0x480f); + rtl8168_mdio_write(tp, 0x15, 0x02a6); + rtl8168_mdio_write(tp, 0x19, 0xac00); + rtl8168_mdio_write(tp, 0x15, 0x02a7); + rtl8168_mdio_write(tp, 0x19, 0x31a6); + rtl8168_mdio_write(tp, 0x15, 0x02a8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02a9); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02aa); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02ab); + rtl8168_mdio_write(tp, 0x19, 0x31ba); + rtl8168_mdio_write(tp, 0x15, 0x02ac); + rtl8168_mdio_write(tp, 0x19, 0x31d5); + rtl8168_mdio_write(tp, 0x15, 0x02ad); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02ae); + rtl8168_mdio_write(tp, 0x19, 0x5cf0); + rtl8168_mdio_write(tp, 0x15, 0x02af); + rtl8168_mdio_write(tp, 0x19, 0x588c); + rtl8168_mdio_write(tp, 0x15, 0x02b0); + rtl8168_mdio_write(tp, 0x19, 0x542f); + rtl8168_mdio_write(tp, 0x15, 0x02b1); + rtl8168_mdio_write(tp, 0x19, 0x7ffb); + rtl8168_mdio_write(tp, 0x15, 0x02b2); + rtl8168_mdio_write(tp, 0x19, 0x6ff8); + rtl8168_mdio_write(tp, 0x15, 0x02b3); + rtl8168_mdio_write(tp, 0x19, 0x64a4); + rtl8168_mdio_write(tp, 0x15, 0x02b4); + rtl8168_mdio_write(tp, 0x19, 0x64a0); + rtl8168_mdio_write(tp, 0x15, 0x02b5); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x02b6); + rtl8168_mdio_write(tp, 0x19, 0x4400); + rtl8168_mdio_write(tp, 0x15, 0x02b7); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x02b8); + rtl8168_mdio_write(tp, 0x19, 0x4480); + rtl8168_mdio_write(tp, 0x15, 0x02b9); + rtl8168_mdio_write(tp, 0x19, 0x9e00); + rtl8168_mdio_write(tp, 0x15, 0x02ba); + rtl8168_mdio_write(tp, 0x19, 0x4891); + rtl8168_mdio_write(tp, 0x15, 0x02bb); + rtl8168_mdio_write(tp, 0x19, 0x4cc0); + rtl8168_mdio_write(tp, 0x15, 0x02bc); + rtl8168_mdio_write(tp, 0x19, 0x4801); + rtl8168_mdio_write(tp, 0x15, 0x02bd); + rtl8168_mdio_write(tp, 0x19, 0xa609); + rtl8168_mdio_write(tp, 0x15, 0x02be); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x02bf); + rtl8168_mdio_write(tp, 0x19, 0x004e); + rtl8168_mdio_write(tp, 0x15, 0x02c0); + rtl8168_mdio_write(tp, 0x19, 0x87fe); + rtl8168_mdio_write(tp, 0x15, 0x02c1); + rtl8168_mdio_write(tp, 0x19, 0x32c6); + rtl8168_mdio_write(tp, 0x15, 0x02c2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02c3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02c4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02c5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02c6); + rtl8168_mdio_write(tp, 0x19, 0x48b2); + rtl8168_mdio_write(tp, 0x15, 0x02c7); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x02c8); + rtl8168_mdio_write(tp, 0x19, 0x4822); + rtl8168_mdio_write(tp, 0x15, 0x02c9); + rtl8168_mdio_write(tp, 0x19, 0x4488); + rtl8168_mdio_write(tp, 0x15, 0x02ca); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x02cb); + rtl8168_mdio_write(tp, 0x19, 0x0042); + rtl8168_mdio_write(tp, 0x15, 0x02cc); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x02cd); + rtl8168_mdio_write(tp, 0x19, 0x4cc8); + rtl8168_mdio_write(tp, 0x15, 0x02ce); + rtl8168_mdio_write(tp, 0x19, 0x32d0); + rtl8168_mdio_write(tp, 0x15, 0x02cf); + rtl8168_mdio_write(tp, 0x19, 0x4cc0); + rtl8168_mdio_write(tp, 0x15, 0x02d0); + rtl8168_mdio_write(tp, 0x19, 0xc4d4); + rtl8168_mdio_write(tp, 0x15, 0x02d1); + rtl8168_mdio_write(tp, 0x19, 0x00f9); + rtl8168_mdio_write(tp, 0x15, 0x02d2); + rtl8168_mdio_write(tp, 0x19, 0xa51a); + rtl8168_mdio_write(tp, 0x15, 0x02d3); + rtl8168_mdio_write(tp, 0x19, 0x32d9); + rtl8168_mdio_write(tp, 0x15, 0x02d4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02d5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02d6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02d7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02d8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02d9); + rtl8168_mdio_write(tp, 0x19, 0x48b3); + rtl8168_mdio_write(tp, 0x15, 0x02da); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x02db); + rtl8168_mdio_write(tp, 0x19, 0x4823); + rtl8168_mdio_write(tp, 0x15, 0x02dc); + rtl8168_mdio_write(tp, 0x19, 0x4410); + rtl8168_mdio_write(tp, 0x15, 0x02dd); + rtl8168_mdio_write(tp, 0x19, 0xb630); + rtl8168_mdio_write(tp, 0x15, 0x02de); + rtl8168_mdio_write(tp, 0x19, 0x7dc8); + rtl8168_mdio_write(tp, 0x15, 0x02df); + rtl8168_mdio_write(tp, 0x19, 0x8203); + rtl8168_mdio_write(tp, 0x15, 0x02e0); + rtl8168_mdio_write(tp, 0x19, 0x4c48); + rtl8168_mdio_write(tp, 0x15, 0x02e1); + rtl8168_mdio_write(tp, 0x19, 0x32e3); + rtl8168_mdio_write(tp, 0x15, 0x02e2); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x02e3); + rtl8168_mdio_write(tp, 0x19, 0x9bfa); + rtl8168_mdio_write(tp, 0x15, 0x02e4); + rtl8168_mdio_write(tp, 0x19, 0x84ca); + rtl8168_mdio_write(tp, 0x15, 0x02e5); + rtl8168_mdio_write(tp, 0x19, 0x85f8); + rtl8168_mdio_write(tp, 0x15, 0x02e6); + rtl8168_mdio_write(tp, 0x19, 0x32ec); + rtl8168_mdio_write(tp, 0x15, 0x02e7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02e8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02e9); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02ea); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02eb); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x02ec); + rtl8168_mdio_write(tp, 0x19, 0x48d4); + rtl8168_mdio_write(tp, 0x15, 0x02ed); + rtl8168_mdio_write(tp, 0x19, 0x4020); + rtl8168_mdio_write(tp, 0x15, 0x02ee); + rtl8168_mdio_write(tp, 0x19, 0x4844); + rtl8168_mdio_write(tp, 0x15, 0x02ef); + rtl8168_mdio_write(tp, 0x19, 0x4420); + rtl8168_mdio_write(tp, 0x15, 0x02f0); + rtl8168_mdio_write(tp, 0x19, 0x6800); + rtl8168_mdio_write(tp, 0x15, 0x02f1); + rtl8168_mdio_write(tp, 0x19, 0x7dc0); + rtl8168_mdio_write(tp, 0x15, 0x02f2); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x02f3); + rtl8168_mdio_write(tp, 0x19, 0x7c0b); + rtl8168_mdio_write(tp, 0x15, 0x02f4); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x02f5); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x02f6); + rtl8168_mdio_write(tp, 0x19, 0x9cfd); + rtl8168_mdio_write(tp, 0x15, 0x02f7); + rtl8168_mdio_write(tp, 0x19, 0xb616); + rtl8168_mdio_write(tp, 0x15, 0x02f8); + rtl8168_mdio_write(tp, 0x19, 0xc42b); + rtl8168_mdio_write(tp, 0x15, 0x02f9); + rtl8168_mdio_write(tp, 0x19, 0x00e0); + rtl8168_mdio_write(tp, 0x15, 0x02fa); + rtl8168_mdio_write(tp, 0x19, 0xc455); + rtl8168_mdio_write(tp, 0x15, 0x02fb); + rtl8168_mdio_write(tp, 0x19, 0x00b3); + rtl8168_mdio_write(tp, 0x15, 0x02fc); + rtl8168_mdio_write(tp, 0x19, 0xb20a); + rtl8168_mdio_write(tp, 0x15, 0x02fd); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x02fe); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x02ff); + rtl8168_mdio_write(tp, 0x19, 0x8204); + rtl8168_mdio_write(tp, 0x15, 0x0300); + rtl8168_mdio_write(tp, 0x19, 0x7c04); + rtl8168_mdio_write(tp, 0x15, 0x0301); + rtl8168_mdio_write(tp, 0x19, 0x7404); + rtl8168_mdio_write(tp, 0x15, 0x0302); + rtl8168_mdio_write(tp, 0x19, 0x32f3); + rtl8168_mdio_write(tp, 0x15, 0x0303); + rtl8168_mdio_write(tp, 0x19, 0x7c04); + rtl8168_mdio_write(tp, 0x15, 0x0304); + rtl8168_mdio_write(tp, 0x19, 0x7400); + rtl8168_mdio_write(tp, 0x15, 0x0305); + rtl8168_mdio_write(tp, 0x19, 0x32f3); + rtl8168_mdio_write(tp, 0x15, 0x0306); + rtl8168_mdio_write(tp, 0x19, 0xefed); + rtl8168_mdio_write(tp, 0x15, 0x0307); + rtl8168_mdio_write(tp, 0x19, 0x3342); + rtl8168_mdio_write(tp, 0x15, 0x0308); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0309); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030d); + rtl8168_mdio_write(tp, 0x19, 0x3006); + rtl8168_mdio_write(tp, 0x15, 0x030e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x030f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0310); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0311); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0312); + rtl8168_mdio_write(tp, 0x19, 0xa207); + rtl8168_mdio_write(tp, 0x15, 0x0313); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x0314); + rtl8168_mdio_write(tp, 0x19, 0x3322); + rtl8168_mdio_write(tp, 0x15, 0x0315); + rtl8168_mdio_write(tp, 0x19, 0x4041); + rtl8168_mdio_write(tp, 0x15, 0x0316); + rtl8168_mdio_write(tp, 0x19, 0x7d07); + rtl8168_mdio_write(tp, 0x15, 0x0317); + rtl8168_mdio_write(tp, 0x19, 0x4502); + rtl8168_mdio_write(tp, 0x15, 0x0318); + rtl8168_mdio_write(tp, 0x19, 0x3322); + rtl8168_mdio_write(tp, 0x15, 0x0319); + rtl8168_mdio_write(tp, 0x19, 0x4c08); + rtl8168_mdio_write(tp, 0x15, 0x031a); + rtl8168_mdio_write(tp, 0x19, 0x3322); + rtl8168_mdio_write(tp, 0x15, 0x031b); + rtl8168_mdio_write(tp, 0x19, 0x7d80); + rtl8168_mdio_write(tp, 0x15, 0x031c); + rtl8168_mdio_write(tp, 0x19, 0x5180); + rtl8168_mdio_write(tp, 0x15, 0x031d); + rtl8168_mdio_write(tp, 0x19, 0x3320); + rtl8168_mdio_write(tp, 0x15, 0x031e); + rtl8168_mdio_write(tp, 0x19, 0x7d80); + rtl8168_mdio_write(tp, 0x15, 0x031f); + rtl8168_mdio_write(tp, 0x19, 0x5000); + rtl8168_mdio_write(tp, 0x15, 0x0320); + rtl8168_mdio_write(tp, 0x19, 0x7d07); + rtl8168_mdio_write(tp, 0x15, 0x0321); + rtl8168_mdio_write(tp, 0x19, 0x4402); + rtl8168_mdio_write(tp, 0x15, 0x0322); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0323); + rtl8168_mdio_write(tp, 0x19, 0x6c02); + rtl8168_mdio_write(tp, 0x15, 0x0324); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x0325); + rtl8168_mdio_write(tp, 0x19, 0xb30c); + rtl8168_mdio_write(tp, 0x15, 0x0326); + rtl8168_mdio_write(tp, 0x19, 0xb206); + rtl8168_mdio_write(tp, 0x15, 0x0327); + rtl8168_mdio_write(tp, 0x19, 0xb103); + rtl8168_mdio_write(tp, 0x15, 0x0328); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0329); + rtl8168_mdio_write(tp, 0x19, 0x32f6); + rtl8168_mdio_write(tp, 0x15, 0x032a); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x032b); + rtl8168_mdio_write(tp, 0x19, 0x3352); + rtl8168_mdio_write(tp, 0x15, 0x032c); + rtl8168_mdio_write(tp, 0x19, 0xb103); + rtl8168_mdio_write(tp, 0x15, 0x032d); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x032e); + rtl8168_mdio_write(tp, 0x19, 0x336a); + rtl8168_mdio_write(tp, 0x15, 0x032f); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0330); + rtl8168_mdio_write(tp, 0x19, 0x3382); + rtl8168_mdio_write(tp, 0x15, 0x0331); + rtl8168_mdio_write(tp, 0x19, 0xb206); + rtl8168_mdio_write(tp, 0x15, 0x0332); + rtl8168_mdio_write(tp, 0x19, 0xb103); + rtl8168_mdio_write(tp, 0x15, 0x0333); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0334); + rtl8168_mdio_write(tp, 0x19, 0x3395); + rtl8168_mdio_write(tp, 0x15, 0x0335); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0336); + rtl8168_mdio_write(tp, 0x19, 0x33c6); + rtl8168_mdio_write(tp, 0x15, 0x0337); + rtl8168_mdio_write(tp, 0x19, 0xb103); + rtl8168_mdio_write(tp, 0x15, 0x0338); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x0339); + rtl8168_mdio_write(tp, 0x19, 0x33d7); + rtl8168_mdio_write(tp, 0x15, 0x033a); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x033b); + rtl8168_mdio_write(tp, 0x19, 0x33f2); + rtl8168_mdio_write(tp, 0x15, 0x033c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x033d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x033e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x033f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0340); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0341); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0342); + rtl8168_mdio_write(tp, 0x19, 0x49b5); + rtl8168_mdio_write(tp, 0x15, 0x0343); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x15, 0x0344); + rtl8168_mdio_write(tp, 0x19, 0x4d00); + rtl8168_mdio_write(tp, 0x15, 0x0345); + rtl8168_mdio_write(tp, 0x19, 0x6880); + rtl8168_mdio_write(tp, 0x15, 0x0346); + rtl8168_mdio_write(tp, 0x19, 0x7c08); + rtl8168_mdio_write(tp, 0x15, 0x0347); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x0348); + rtl8168_mdio_write(tp, 0x19, 0x4925); + rtl8168_mdio_write(tp, 0x15, 0x0349); + rtl8168_mdio_write(tp, 0x19, 0x403b); + rtl8168_mdio_write(tp, 0x15, 0x034a); + rtl8168_mdio_write(tp, 0x19, 0xa602); + rtl8168_mdio_write(tp, 0x15, 0x034b); + rtl8168_mdio_write(tp, 0x19, 0x402f); + rtl8168_mdio_write(tp, 0x15, 0x034c); + rtl8168_mdio_write(tp, 0x19, 0x4484); + rtl8168_mdio_write(tp, 0x15, 0x034d); + rtl8168_mdio_write(tp, 0x19, 0x40c8); + rtl8168_mdio_write(tp, 0x15, 0x034e); + rtl8168_mdio_write(tp, 0x19, 0x44c4); + rtl8168_mdio_write(tp, 0x15, 0x034f); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x0350); + rtl8168_mdio_write(tp, 0x19, 0x00bd); + rtl8168_mdio_write(tp, 0x15, 0x0351); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x0352); + rtl8168_mdio_write(tp, 0x19, 0xc8ed); + rtl8168_mdio_write(tp, 0x15, 0x0353); + rtl8168_mdio_write(tp, 0x19, 0x00fc); + rtl8168_mdio_write(tp, 0x15, 0x0354); + rtl8168_mdio_write(tp, 0x19, 0x8221); + rtl8168_mdio_write(tp, 0x15, 0x0355); + rtl8168_mdio_write(tp, 0x19, 0xd11d); + rtl8168_mdio_write(tp, 0x15, 0x0356); + rtl8168_mdio_write(tp, 0x19, 0x001f); + rtl8168_mdio_write(tp, 0x15, 0x0357); + rtl8168_mdio_write(tp, 0x19, 0xde18); + rtl8168_mdio_write(tp, 0x15, 0x0358); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x0359); + rtl8168_mdio_write(tp, 0x19, 0x91f6); + rtl8168_mdio_write(tp, 0x15, 0x035a); + rtl8168_mdio_write(tp, 0x19, 0x3360); + rtl8168_mdio_write(tp, 0x15, 0x035b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x035c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x035d); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x035e); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x035f); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0360); + rtl8168_mdio_write(tp, 0x19, 0x4bb6); + rtl8168_mdio_write(tp, 0x15, 0x0361); + rtl8168_mdio_write(tp, 0x19, 0x4064); + rtl8168_mdio_write(tp, 0x15, 0x0362); + rtl8168_mdio_write(tp, 0x19, 0x4b26); + rtl8168_mdio_write(tp, 0x15, 0x0363); + rtl8168_mdio_write(tp, 0x19, 0x4410); + rtl8168_mdio_write(tp, 0x15, 0x0364); + rtl8168_mdio_write(tp, 0x19, 0x4006); + rtl8168_mdio_write(tp, 0x15, 0x0365); + rtl8168_mdio_write(tp, 0x19, 0x4490); + rtl8168_mdio_write(tp, 0x15, 0x0366); + rtl8168_mdio_write(tp, 0x19, 0x6900); + rtl8168_mdio_write(tp, 0x15, 0x0367); + rtl8168_mdio_write(tp, 0x19, 0xb6a6); + rtl8168_mdio_write(tp, 0x15, 0x0368); + rtl8168_mdio_write(tp, 0x19, 0x9e02); + rtl8168_mdio_write(tp, 0x15, 0x0369); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x036a); + rtl8168_mdio_write(tp, 0x19, 0xd11d); + rtl8168_mdio_write(tp, 0x15, 0x036b); + rtl8168_mdio_write(tp, 0x19, 0x000a); + rtl8168_mdio_write(tp, 0x15, 0x036c); + rtl8168_mdio_write(tp, 0x19, 0xbb0f); + rtl8168_mdio_write(tp, 0x15, 0x036d); + rtl8168_mdio_write(tp, 0x19, 0x8102); + rtl8168_mdio_write(tp, 0x15, 0x036e); + rtl8168_mdio_write(tp, 0x19, 0x3371); + rtl8168_mdio_write(tp, 0x15, 0x036f); + rtl8168_mdio_write(tp, 0x19, 0xa21e); + rtl8168_mdio_write(tp, 0x15, 0x0370); + rtl8168_mdio_write(tp, 0x19, 0x33b6); + rtl8168_mdio_write(tp, 0x15, 0x0371); + rtl8168_mdio_write(tp, 0x19, 0x91f6); + rtl8168_mdio_write(tp, 0x15, 0x0372); + rtl8168_mdio_write(tp, 0x19, 0xc218); + rtl8168_mdio_write(tp, 0x15, 0x0373); + rtl8168_mdio_write(tp, 0x19, 0x00f4); + rtl8168_mdio_write(tp, 0x15, 0x0374); + rtl8168_mdio_write(tp, 0x19, 0x33b6); + rtl8168_mdio_write(tp, 0x15, 0x0375); + rtl8168_mdio_write(tp, 0x19, 0x32ec); + rtl8168_mdio_write(tp, 0x15, 0x0376); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0377); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0378); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x0379); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x037a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x037b); + rtl8168_mdio_write(tp, 0x19, 0x4b97); + rtl8168_mdio_write(tp, 0x15, 0x037c); + rtl8168_mdio_write(tp, 0x19, 0x402b); + rtl8168_mdio_write(tp, 0x15, 0x037d); + rtl8168_mdio_write(tp, 0x19, 0x4b07); + rtl8168_mdio_write(tp, 0x15, 0x037e); + rtl8168_mdio_write(tp, 0x19, 0x4422); + rtl8168_mdio_write(tp, 0x15, 0x037f); + rtl8168_mdio_write(tp, 0x19, 0x6980); + rtl8168_mdio_write(tp, 0x15, 0x0380); + rtl8168_mdio_write(tp, 0x19, 0xb608); + rtl8168_mdio_write(tp, 0x15, 0x0381); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x0382); + rtl8168_mdio_write(tp, 0x19, 0xbc05); + rtl8168_mdio_write(tp, 0x15, 0x0383); + rtl8168_mdio_write(tp, 0x19, 0xc21c); + rtl8168_mdio_write(tp, 0x15, 0x0384); + rtl8168_mdio_write(tp, 0x19, 0x0032); + rtl8168_mdio_write(tp, 0x15, 0x0385); + rtl8168_mdio_write(tp, 0x19, 0xa1fb); + rtl8168_mdio_write(tp, 0x15, 0x0386); + rtl8168_mdio_write(tp, 0x19, 0x338d); + rtl8168_mdio_write(tp, 0x15, 0x0387); + rtl8168_mdio_write(tp, 0x19, 0x32ae); + rtl8168_mdio_write(tp, 0x15, 0x0388); + rtl8168_mdio_write(tp, 0x19, 0x330d); + rtl8168_mdio_write(tp, 0x15, 0x0389); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x038a); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x038b); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x038c); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x038d); + rtl8168_mdio_write(tp, 0x19, 0x4b97); + rtl8168_mdio_write(tp, 0x15, 0x038e); + rtl8168_mdio_write(tp, 0x19, 0x6a08); + rtl8168_mdio_write(tp, 0x15, 0x038f); + rtl8168_mdio_write(tp, 0x19, 0x4b07); + rtl8168_mdio_write(tp, 0x15, 0x0390); + rtl8168_mdio_write(tp, 0x19, 0x40ac); + rtl8168_mdio_write(tp, 0x15, 0x0391); + rtl8168_mdio_write(tp, 0x19, 0x4445); + rtl8168_mdio_write(tp, 0x15, 0x0392); + rtl8168_mdio_write(tp, 0x19, 0x404e); + rtl8168_mdio_write(tp, 0x15, 0x0393); + rtl8168_mdio_write(tp, 0x19, 0x4461); + rtl8168_mdio_write(tp, 0x15, 0x0394); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x0395); + rtl8168_mdio_write(tp, 0x19, 0x9c0a); + rtl8168_mdio_write(tp, 0x15, 0x0396); + rtl8168_mdio_write(tp, 0x19, 0x63da); + rtl8168_mdio_write(tp, 0x15, 0x0397); + rtl8168_mdio_write(tp, 0x19, 0x6f0c); + rtl8168_mdio_write(tp, 0x15, 0x0398); + rtl8168_mdio_write(tp, 0x19, 0x5440); + rtl8168_mdio_write(tp, 0x15, 0x0399); + rtl8168_mdio_write(tp, 0x19, 0x4b98); + rtl8168_mdio_write(tp, 0x15, 0x039a); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x039b); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x039c); + rtl8168_mdio_write(tp, 0x19, 0x4b08); + rtl8168_mdio_write(tp, 0x15, 0x039d); + rtl8168_mdio_write(tp, 0x19, 0x63d8); + rtl8168_mdio_write(tp, 0x15, 0x039e); + rtl8168_mdio_write(tp, 0x19, 0x33a5); + rtl8168_mdio_write(tp, 0x15, 0x039f); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x03a0); + rtl8168_mdio_write(tp, 0x19, 0x00e8); + rtl8168_mdio_write(tp, 0x15, 0x03a1); + rtl8168_mdio_write(tp, 0x19, 0x820e); + rtl8168_mdio_write(tp, 0x15, 0x03a2); + rtl8168_mdio_write(tp, 0x19, 0xa10d); + rtl8168_mdio_write(tp, 0x15, 0x03a3); + rtl8168_mdio_write(tp, 0x19, 0x9df1); + rtl8168_mdio_write(tp, 0x15, 0x03a4); + rtl8168_mdio_write(tp, 0x19, 0x33af); + rtl8168_mdio_write(tp, 0x15, 0x03a5); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x03a6); + rtl8168_mdio_write(tp, 0x19, 0x00f9); + rtl8168_mdio_write(tp, 0x15, 0x03a7); + rtl8168_mdio_write(tp, 0x19, 0xc017); + rtl8168_mdio_write(tp, 0x15, 0x03a8); + rtl8168_mdio_write(tp, 0x19, 0x0007); + rtl8168_mdio_write(tp, 0x15, 0x03a9); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x03aa); + rtl8168_mdio_write(tp, 0x19, 0x6c03); + rtl8168_mdio_write(tp, 0x15, 0x03ab); + rtl8168_mdio_write(tp, 0x19, 0xa104); + rtl8168_mdio_write(tp, 0x15, 0x03ac); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x03ad); + rtl8168_mdio_write(tp, 0x19, 0x6c00); + rtl8168_mdio_write(tp, 0x15, 0x03ae); + rtl8168_mdio_write(tp, 0x19, 0x9df7); + rtl8168_mdio_write(tp, 0x15, 0x03af); + rtl8168_mdio_write(tp, 0x19, 0x7c03); + rtl8168_mdio_write(tp, 0x15, 0x03b0); + rtl8168_mdio_write(tp, 0x19, 0x6c08); + rtl8168_mdio_write(tp, 0x15, 0x03b1); + rtl8168_mdio_write(tp, 0x19, 0x33b6); + rtl8168_mdio_write(tp, 0x15, 0x03b2); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b3); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b4); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03b6); + rtl8168_mdio_write(tp, 0x19, 0x55af); + rtl8168_mdio_write(tp, 0x15, 0x03b7); + rtl8168_mdio_write(tp, 0x19, 0x7ff0); + rtl8168_mdio_write(tp, 0x15, 0x03b8); + rtl8168_mdio_write(tp, 0x19, 0x6ff0); + rtl8168_mdio_write(tp, 0x15, 0x03b9); + rtl8168_mdio_write(tp, 0x19, 0x4bb9); + rtl8168_mdio_write(tp, 0x15, 0x03ba); + rtl8168_mdio_write(tp, 0x19, 0x6a80); + rtl8168_mdio_write(tp, 0x15, 0x03bb); + rtl8168_mdio_write(tp, 0x19, 0x4b29); + rtl8168_mdio_write(tp, 0x15, 0x03bc); + rtl8168_mdio_write(tp, 0x19, 0x4041); + rtl8168_mdio_write(tp, 0x15, 0x03bd); + rtl8168_mdio_write(tp, 0x19, 0x440a); + rtl8168_mdio_write(tp, 0x15, 0x03be); + rtl8168_mdio_write(tp, 0x19, 0x4029); + rtl8168_mdio_write(tp, 0x15, 0x03bf); + rtl8168_mdio_write(tp, 0x19, 0x4418); + rtl8168_mdio_write(tp, 0x15, 0x03c0); + rtl8168_mdio_write(tp, 0x19, 0x4090); + rtl8168_mdio_write(tp, 0x15, 0x03c1); + rtl8168_mdio_write(tp, 0x19, 0x4438); + rtl8168_mdio_write(tp, 0x15, 0x03c2); + rtl8168_mdio_write(tp, 0x19, 0x40c4); + rtl8168_mdio_write(tp, 0x15, 0x03c3); + rtl8168_mdio_write(tp, 0x19, 0x447b); + rtl8168_mdio_write(tp, 0x15, 0x03c4); + rtl8168_mdio_write(tp, 0x19, 0xb6c4); + rtl8168_mdio_write(tp, 0x15, 0x03c5); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x03c6); + rtl8168_mdio_write(tp, 0x19, 0x9bfe); + rtl8168_mdio_write(tp, 0x15, 0x03c7); + rtl8168_mdio_write(tp, 0x19, 0x33cc); + rtl8168_mdio_write(tp, 0x15, 0x03c8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03c9); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03ca); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03cb); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03cc); + rtl8168_mdio_write(tp, 0x19, 0x542f); + rtl8168_mdio_write(tp, 0x15, 0x03cd); + rtl8168_mdio_write(tp, 0x19, 0x499a); + rtl8168_mdio_write(tp, 0x15, 0x03ce); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x03cf); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03d0); + rtl8168_mdio_write(tp, 0x19, 0x490a); + rtl8168_mdio_write(tp, 0x15, 0x03d1); + rtl8168_mdio_write(tp, 0x19, 0x405e); + rtl8168_mdio_write(tp, 0x15, 0x03d2); + rtl8168_mdio_write(tp, 0x19, 0x44f8); + rtl8168_mdio_write(tp, 0x15, 0x03d3); + rtl8168_mdio_write(tp, 0x19, 0x6b00); + rtl8168_mdio_write(tp, 0x15, 0x03d4); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x03d5); + rtl8168_mdio_write(tp, 0x19, 0x0028); + rtl8168_mdio_write(tp, 0x15, 0x03d6); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x03d7); + rtl8168_mdio_write(tp, 0x19, 0xbd27); + rtl8168_mdio_write(tp, 0x15, 0x03d8); + rtl8168_mdio_write(tp, 0x19, 0x9cfc); + rtl8168_mdio_write(tp, 0x15, 0x03d9); + rtl8168_mdio_write(tp, 0x19, 0xc639); + rtl8168_mdio_write(tp, 0x15, 0x03da); + rtl8168_mdio_write(tp, 0x19, 0x000f); + rtl8168_mdio_write(tp, 0x15, 0x03db); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x03dc); + rtl8168_mdio_write(tp, 0x19, 0x7c01); + rtl8168_mdio_write(tp, 0x15, 0x03dd); + rtl8168_mdio_write(tp, 0x19, 0x4c01); + rtl8168_mdio_write(tp, 0x15, 0x03de); + rtl8168_mdio_write(tp, 0x19, 0x9af6); + rtl8168_mdio_write(tp, 0x15, 0x03df); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03e0); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03e1); + rtl8168_mdio_write(tp, 0x19, 0x4470); + rtl8168_mdio_write(tp, 0x15, 0x03e2); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03e3); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03e4); + rtl8168_mdio_write(tp, 0x19, 0x33d4); + rtl8168_mdio_write(tp, 0x15, 0x03e5); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03e6); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03e7); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03e8); + rtl8168_mdio_write(tp, 0x19, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x03e9); + rtl8168_mdio_write(tp, 0x19, 0x49bb); + rtl8168_mdio_write(tp, 0x15, 0x03ea); + rtl8168_mdio_write(tp, 0x19, 0x4478); + rtl8168_mdio_write(tp, 0x15, 0x03eb); + rtl8168_mdio_write(tp, 0x19, 0x492b); + rtl8168_mdio_write(tp, 0x15, 0x03ec); + rtl8168_mdio_write(tp, 0x19, 0x6b80); + rtl8168_mdio_write(tp, 0x15, 0x03ed); + rtl8168_mdio_write(tp, 0x19, 0x7c01); + rtl8168_mdio_write(tp, 0x15, 0x03ee); + rtl8168_mdio_write(tp, 0x19, 0x4c00); + rtl8168_mdio_write(tp, 0x15, 0x03ef); + rtl8168_mdio_write(tp, 0x19, 0xd64f); + rtl8168_mdio_write(tp, 0x15, 0x03f0); + rtl8168_mdio_write(tp, 0x19, 0x000d); + rtl8168_mdio_write(tp, 0x15, 0x03f1); + rtl8168_mdio_write(tp, 0x19, 0x3311); + rtl8168_mdio_write(tp, 0x15, 0x03f2); + rtl8168_mdio_write(tp, 0x19, 0xbd0c); + rtl8168_mdio_write(tp, 0x15, 0x03f3); + rtl8168_mdio_write(tp, 0x19, 0xc428); + rtl8168_mdio_write(tp, 0x15, 0x03f4); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x15, 0x03f5); + rtl8168_mdio_write(tp, 0x19, 0x9afa); + rtl8168_mdio_write(tp, 0x15, 0x03f6); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03f7); + rtl8168_mdio_write(tp, 0x19, 0x4c52); + rtl8168_mdio_write(tp, 0x15, 0x03f8); + rtl8168_mdio_write(tp, 0x19, 0x4470); + rtl8168_mdio_write(tp, 0x15, 0x03f9); + rtl8168_mdio_write(tp, 0x19, 0x7c12); + rtl8168_mdio_write(tp, 0x15, 0x03fa); + rtl8168_mdio_write(tp, 0x19, 0x4c40); + rtl8168_mdio_write(tp, 0x15, 0x03fb); + rtl8168_mdio_write(tp, 0x19, 0x33ef); + rtl8168_mdio_write(tp, 0x15, 0x03fc); + rtl8168_mdio_write(tp, 0x19, 0x3342); + rtl8168_mdio_write(tp, 0x15, 0x03fd); + rtl8168_mdio_write(tp, 0x19, 0x330d); + rtl8168_mdio_write(tp, 0x15, 0x03fe); + rtl8168_mdio_write(tp, 0x19, 0x32ae); + rtl8168_mdio_write(tp, 0x15, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x0112); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x1f02); + rtl8168_mdio_write(tp, 0x06, 0x012c); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x3c02); + rtl8168_mdio_write(tp, 0x06, 0x0156); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x6d02); + rtl8168_mdio_write(tp, 0x06, 0x809d); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xc702); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd105); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xcd02); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xca02); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd105); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xd002); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd481); + rtl8168_mdio_write(tp, 0x06, 0xc9e4); + rtl8168_mdio_write(tp, 0x06, 0x8b90); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x91d4); + rtl8168_mdio_write(tp, 0x06, 0x81b8); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x92e5); + rtl8168_mdio_write(tp, 0x06, 0x8b93); + rtl8168_mdio_write(tp, 0x06, 0xbf8b); + rtl8168_mdio_write(tp, 0x06, 0x88ec); + rtl8168_mdio_write(tp, 0x06, 0x0019); + rtl8168_mdio_write(tp, 0x06, 0xa98b); + rtl8168_mdio_write(tp, 0x06, 0x90f9); + rtl8168_mdio_write(tp, 0x06, 0xeeff); + rtl8168_mdio_write(tp, 0x06, 0xf600); + rtl8168_mdio_write(tp, 0x06, 0xeeff); + rtl8168_mdio_write(tp, 0x06, 0xf7fc); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xc102); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xc402); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x201a); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x824b); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x1902); + rtl8168_mdio_write(tp, 0x06, 0x2c9d); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x9602); + rtl8168_mdio_write(tp, 0x06, 0x0473); + rtl8168_mdio_write(tp, 0x06, 0x022e); + rtl8168_mdio_write(tp, 0x06, 0x3902); + rtl8168_mdio_write(tp, 0x06, 0x044d); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x210b); + rtl8168_mdio_write(tp, 0x06, 0xf621); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x0416); + rtl8168_mdio_write(tp, 0x06, 0x021b); + rtl8168_mdio_write(tp, 0x06, 0xa4e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x22e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2305); + rtl8168_mdio_write(tp, 0x06, 0xf623); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x24e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2505); + rtl8168_mdio_write(tp, 0x06, 0xf625); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x26e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xdae0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x27e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x5cfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad21); + rtl8168_mdio_write(tp, 0x06, 0x57e0); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x2358); + rtl8168_mdio_write(tp, 0x06, 0xc059); + rtl8168_mdio_write(tp, 0x06, 0x021e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b3c); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e44); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x3cad); + rtl8168_mdio_write(tp, 0x06, 0x211d); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x84f7); + rtl8168_mdio_write(tp, 0x06, 0x29e5); + rtl8168_mdio_write(tp, 0x06, 0x8b84); + rtl8168_mdio_write(tp, 0x06, 0xac27); + rtl8168_mdio_write(tp, 0x06, 0x0dac); + rtl8168_mdio_write(tp, 0x06, 0x2605); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x7fae); + rtl8168_mdio_write(tp, 0x06, 0x2b02); + rtl8168_mdio_write(tp, 0x06, 0x2c23); + rtl8168_mdio_write(tp, 0x06, 0xae26); + rtl8168_mdio_write(tp, 0x06, 0x022c); + rtl8168_mdio_write(tp, 0x06, 0x41ae); + rtl8168_mdio_write(tp, 0x06, 0x21e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x18e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0x58fc); + rtl8168_mdio_write(tp, 0x06, 0xe4ff); + rtl8168_mdio_write(tp, 0x06, 0xf7d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x2eee); + rtl8168_mdio_write(tp, 0x06, 0x0232); + rtl8168_mdio_write(tp, 0x06, 0x0ad1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x82e8); + rtl8168_mdio_write(tp, 0x06, 0x0232); + rtl8168_mdio_write(tp, 0x06, 0x0a02); + rtl8168_mdio_write(tp, 0x06, 0x2bdf); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x04d0); + rtl8168_mdio_write(tp, 0x06, 0x0202); + rtl8168_mdio_write(tp, 0x06, 0x1e97); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2228); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xd302); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd10c); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xd602); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd104); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xd902); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xe802); + rtl8168_mdio_write(tp, 0x06, 0x320a); + rtl8168_mdio_write(tp, 0x06, 0xe0ff); + rtl8168_mdio_write(tp, 0x06, 0xf768); + rtl8168_mdio_write(tp, 0x06, 0x03e4); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xd004); + rtl8168_mdio_write(tp, 0x06, 0x0228); + rtl8168_mdio_write(tp, 0x06, 0x7a04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0xe234); + rtl8168_mdio_write(tp, 0x06, 0xe1e2); + rtl8168_mdio_write(tp, 0x06, 0x35f6); + rtl8168_mdio_write(tp, 0x06, 0x2be4); + rtl8168_mdio_write(tp, 0x06, 0xe234); + rtl8168_mdio_write(tp, 0x06, 0xe5e2); + rtl8168_mdio_write(tp, 0x06, 0x35fc); + rtl8168_mdio_write(tp, 0x06, 0x05f8); + rtl8168_mdio_write(tp, 0x06, 0xe0e2); + rtl8168_mdio_write(tp, 0x06, 0x34e1); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xf72b); + rtl8168_mdio_write(tp, 0x06, 0xe4e2); + rtl8168_mdio_write(tp, 0x06, 0x34e5); + rtl8168_mdio_write(tp, 0x06, 0xe235); + rtl8168_mdio_write(tp, 0x06, 0xfc05); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69ac); + rtl8168_mdio_write(tp, 0x06, 0x1b4c); + rtl8168_mdio_write(tp, 0x06, 0xbf2e); + rtl8168_mdio_write(tp, 0x06, 0x3002); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0xef01); + rtl8168_mdio_write(tp, 0x06, 0xe28a); + rtl8168_mdio_write(tp, 0x06, 0x76e4); + rtl8168_mdio_write(tp, 0x06, 0x8a76); + rtl8168_mdio_write(tp, 0x06, 0x1f12); + rtl8168_mdio_write(tp, 0x06, 0x9e3a); + rtl8168_mdio_write(tp, 0x06, 0xef12); + rtl8168_mdio_write(tp, 0x06, 0x5907); + rtl8168_mdio_write(tp, 0x06, 0x9f12); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf721); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40d0); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x287a); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x34fc); + rtl8168_mdio_write(tp, 0x06, 0xa000); + rtl8168_mdio_write(tp, 0x06, 0x1002); + rtl8168_mdio_write(tp, 0x06, 0x2dc3); + rtl8168_mdio_write(tp, 0x06, 0x022e); + rtl8168_mdio_write(tp, 0x06, 0x21e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf621); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40ae); + rtl8168_mdio_write(tp, 0x06, 0x0fbf); + rtl8168_mdio_write(tp, 0x06, 0x3fa5); + rtl8168_mdio_write(tp, 0x06, 0x0231); + rtl8168_mdio_write(tp, 0x06, 0x6cbf); + rtl8168_mdio_write(tp, 0x06, 0x3fa2); + rtl8168_mdio_write(tp, 0x06, 0x0231); + rtl8168_mdio_write(tp, 0x06, 0x6c02); + rtl8168_mdio_write(tp, 0x06, 0x2dc3); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0xe2f4); + rtl8168_mdio_write(tp, 0x06, 0xe1e2); + rtl8168_mdio_write(tp, 0x06, 0xf5e4); + rtl8168_mdio_write(tp, 0x06, 0x8a78); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0x79ee); + rtl8168_mdio_write(tp, 0x06, 0xe2f4); + rtl8168_mdio_write(tp, 0x06, 0xd8ee); + rtl8168_mdio_write(tp, 0x06, 0xe2f5); + rtl8168_mdio_write(tp, 0x06, 0x20fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2065); + rtl8168_mdio_write(tp, 0x06, 0xd200); + rtl8168_mdio_write(tp, 0x06, 0xbf2e); + rtl8168_mdio_write(tp, 0x06, 0xe802); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0x1e21); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xdf02); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0x0c11); + rtl8168_mdio_write(tp, 0x06, 0x1e21); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xe202); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0x0c12); + rtl8168_mdio_write(tp, 0x06, 0x1e21); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xe502); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0x0c13); + rtl8168_mdio_write(tp, 0x06, 0x1e21); + rtl8168_mdio_write(tp, 0x06, 0xbf1f); + rtl8168_mdio_write(tp, 0x06, 0x5302); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0x0c14); + rtl8168_mdio_write(tp, 0x06, 0x1e21); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0xeb02); + rtl8168_mdio_write(tp, 0x06, 0x31dd); + rtl8168_mdio_write(tp, 0x06, 0x0c16); + rtl8168_mdio_write(tp, 0x06, 0x1e21); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0xe01f); + rtl8168_mdio_write(tp, 0x06, 0x029e); + rtl8168_mdio_write(tp, 0x06, 0x22e6); + rtl8168_mdio_write(tp, 0x06, 0x83e0); + rtl8168_mdio_write(tp, 0x06, 0xad31); + rtl8168_mdio_write(tp, 0x06, 0x14ad); + rtl8168_mdio_write(tp, 0x06, 0x3011); + rtl8168_mdio_write(tp, 0x06, 0xef02); + rtl8168_mdio_write(tp, 0x06, 0x580c); + rtl8168_mdio_write(tp, 0x06, 0x9e07); + rtl8168_mdio_write(tp, 0x06, 0xad36); + rtl8168_mdio_write(tp, 0x06, 0x085a); + rtl8168_mdio_write(tp, 0x06, 0x309f); + rtl8168_mdio_write(tp, 0x06, 0x04d1); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0x02d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x82dc); + rtl8168_mdio_write(tp, 0x06, 0x0232); + rtl8168_mdio_write(tp, 0x06, 0x0aef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x0400); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0x77e1); + rtl8168_mdio_write(tp, 0x06, 0x4010); + rtl8168_mdio_write(tp, 0x06, 0xe150); + rtl8168_mdio_write(tp, 0x06, 0x32e1); + rtl8168_mdio_write(tp, 0x06, 0x5030); + rtl8168_mdio_write(tp, 0x06, 0xe144); + rtl8168_mdio_write(tp, 0x06, 0x74e1); + rtl8168_mdio_write(tp, 0x06, 0x44bb); + rtl8168_mdio_write(tp, 0x06, 0xe2d2); + rtl8168_mdio_write(tp, 0x06, 0x40e0); + rtl8168_mdio_write(tp, 0x06, 0x2cfc); + rtl8168_mdio_write(tp, 0x06, 0xe2cc); + rtl8168_mdio_write(tp, 0x06, 0xcce2); + rtl8168_mdio_write(tp, 0x06, 0x00cc); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0x99e0); + rtl8168_mdio_write(tp, 0x06, 0x3688); + rtl8168_mdio_write(tp, 0x06, 0xe036); + rtl8168_mdio_write(tp, 0x06, 0x99e1); + rtl8168_mdio_write(tp, 0x06, 0x40dd); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x05, 0xe142); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x05, 0xe140); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + gphy_val |= BIT_2; + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); +} + +static void +rtl8168_set_phy_mcu_8168evl_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + unsigned int gphy_val,i; + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x1800); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val &= ~(BIT_12); + rtl8168_mdio_write(tp, 0x15, gphy_val); + rtl8168_mdio_write(tp, 0x00, 0x4800); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x002f); + for (i = 0; i < 1000; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1c); + if ((gphy_val & 0x0080) == 0x0080) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x1800); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x17); + if (!(gphy_val & 0x0001)) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x00AF); + rtl8168_mdio_write(tp, 0x19, 0x4060); + rtl8168_mdio_write(tp, 0x15, 0x00B0); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x00B1); + rtl8168_mdio_write(tp, 0x19, 0x7e00); + rtl8168_mdio_write(tp, 0x15, 0x00B2); + rtl8168_mdio_write(tp, 0x19, 0x72B0); + rtl8168_mdio_write(tp, 0x15, 0x00B3); + rtl8168_mdio_write(tp, 0x19, 0x7F00); + rtl8168_mdio_write(tp, 0x15, 0x00B4); + rtl8168_mdio_write(tp, 0x19, 0x73B0); + rtl8168_mdio_write(tp, 0x15, 0x0101); + rtl8168_mdio_write(tp, 0x19, 0x0005); + rtl8168_mdio_write(tp, 0x15, 0x0103); + rtl8168_mdio_write(tp, 0x19, 0x0003); + rtl8168_mdio_write(tp, 0x15, 0x0105); + rtl8168_mdio_write(tp, 0x19, 0x30FD); + rtl8168_mdio_write(tp, 0x15, 0x0106); + rtl8168_mdio_write(tp, 0x19, 0x9DF7); + rtl8168_mdio_write(tp, 0x15, 0x0107); + rtl8168_mdio_write(tp, 0x19, 0x30C6); + rtl8168_mdio_write(tp, 0x15, 0x0098); + rtl8168_mdio_write(tp, 0x19, 0x7c0b); + rtl8168_mdio_write(tp, 0x15, 0x0099); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00eb); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00f8); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00fe); + rtl8168_mdio_write(tp, 0x19, 0x6f0f); + rtl8168_mdio_write(tp, 0x15, 0x00db); + rtl8168_mdio_write(tp, 0x19, 0x6f09); + rtl8168_mdio_write(tp, 0x15, 0x00dc); + rtl8168_mdio_write(tp, 0x19, 0xaefd); + rtl8168_mdio_write(tp, 0x15, 0x00dd); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00de); + rtl8168_mdio_write(tp, 0x19, 0xc60b); + rtl8168_mdio_write(tp, 0x15, 0x00df); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00e0); + rtl8168_mdio_write(tp, 0x19, 0x30e1); + rtl8168_mdio_write(tp, 0x15, 0x020c); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x020e); + rtl8168_mdio_write(tp, 0x19, 0x9813); + rtl8168_mdio_write(tp, 0x15, 0x020f); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0210); + rtl8168_mdio_write(tp, 0x19, 0x930f); + rtl8168_mdio_write(tp, 0x15, 0x0211); + rtl8168_mdio_write(tp, 0x19, 0x9206); + rtl8168_mdio_write(tp, 0x15, 0x0212); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0213); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0214); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0215); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0216); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0217); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0218); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0219); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x021a); + rtl8168_mdio_write(tp, 0x19, 0x5540); + rtl8168_mdio_write(tp, 0x15, 0x021b); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x021c); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x021d); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x021e); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x021f); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0220); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0221); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x0222); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0223); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x0224); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0225); + rtl8168_mdio_write(tp, 0x19, 0x3231); + rtl8168_mdio_write(tp, 0x15, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2160); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0040); + rtl8168_mdio_write(tp, 0x18, 0x0004); + if (pdev->subsystem_vendor == 0x144d && + pdev->subsystem_device == 0xc0a6) { + rtl8168_mdio_write(tp, 0x18, 0x0724); + rtl8168_mdio_write(tp, 0x19, 0xfe00); + rtl8168_mdio_write(tp, 0x18, 0x0734); + rtl8168_mdio_write(tp, 0x19, 0xfd00); + rtl8168_mdio_write(tp, 0x18, 0x1824); + rtl8168_mdio_write(tp, 0x19, 0xfc00); + rtl8168_mdio_write(tp, 0x18, 0x1834); + rtl8168_mdio_write(tp, 0x19, 0xfd00); + } + rtl8168_mdio_write(tp, 0x18, 0x09d4); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x09e4); + rtl8168_mdio_write(tp, 0x19, 0x0800); + rtl8168_mdio_write(tp, 0x18, 0x09f4); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x0a04); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x0a14); + rtl8168_mdio_write(tp, 0x19, 0x0c00); + rtl8168_mdio_write(tp, 0x18, 0x0a24); + rtl8168_mdio_write(tp, 0x19, 0xff00); + rtl8168_mdio_write(tp, 0x18, 0x0a74); + rtl8168_mdio_write(tp, 0x19, 0xf600); + rtl8168_mdio_write(tp, 0x18, 0x1a24); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x18, 0x1a64); + rtl8168_mdio_write(tp, 0x19, 0x0500); + rtl8168_mdio_write(tp, 0x18, 0x1a74); + rtl8168_mdio_write(tp, 0x19, 0x9500); + rtl8168_mdio_write(tp, 0x18, 0x1a84); + rtl8168_mdio_write(tp, 0x19, 0x8000); + rtl8168_mdio_write(tp, 0x18, 0x1a94); + rtl8168_mdio_write(tp, 0x19, 0x7d00); + rtl8168_mdio_write(tp, 0x18, 0x1aa4); + rtl8168_mdio_write(tp, 0x19, 0x9600); + rtl8168_mdio_write(tp, 0x18, 0x1ac4); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x1ad4); + rtl8168_mdio_write(tp, 0x19, 0x0800); + rtl8168_mdio_write(tp, 0x18, 0x1af4); + rtl8168_mdio_write(tp, 0x19, 0xc400); + rtl8168_mdio_write(tp, 0x18, 0x1b04); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x1b14); + rtl8168_mdio_write(tp, 0x19, 0x0800); + rtl8168_mdio_write(tp, 0x18, 0x1b24); + rtl8168_mdio_write(tp, 0x19, 0xfd00); + rtl8168_mdio_write(tp, 0x18, 0x1b34); + rtl8168_mdio_write(tp, 0x19, 0x4000); + rtl8168_mdio_write(tp, 0x18, 0x1b44); + rtl8168_mdio_write(tp, 0x19, 0x0400); + rtl8168_mdio_write(tp, 0x18, 0x1b94); + rtl8168_mdio_write(tp, 0x19, 0xf100); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x17, 0x2100); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0040); + rtl8168_mdio_write(tp, 0x18, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x0115); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x2202); + rtl8168_mdio_write(tp, 0x06, 0x80a0); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x3f02); + rtl8168_mdio_write(tp, 0x06, 0x0159); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0xbd02); + rtl8168_mdio_write(tp, 0x06, 0x80da); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xd481); + rtl8168_mdio_write(tp, 0x06, 0xd2e4); + rtl8168_mdio_write(tp, 0x06, 0x8b92); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x93d1); + rtl8168_mdio_write(tp, 0x06, 0x03bf); + rtl8168_mdio_write(tp, 0x06, 0x859e); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23d1); + rtl8168_mdio_write(tp, 0x06, 0x02bf); + rtl8168_mdio_write(tp, 0x06, 0x85a1); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23ee); + rtl8168_mdio_write(tp, 0x06, 0x8608); + rtl8168_mdio_write(tp, 0x06, 0x03ee); + rtl8168_mdio_write(tp, 0x06, 0x860a); + rtl8168_mdio_write(tp, 0x06, 0x60ee); + rtl8168_mdio_write(tp, 0x06, 0x8610); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8611); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x07ee); + rtl8168_mdio_write(tp, 0x06, 0x8abf); + rtl8168_mdio_write(tp, 0x06, 0x73ee); + rtl8168_mdio_write(tp, 0x06, 0x8a95); + rtl8168_mdio_write(tp, 0x06, 0x02bf); + rtl8168_mdio_write(tp, 0x06, 0x8b88); + rtl8168_mdio_write(tp, 0x06, 0xec00); + rtl8168_mdio_write(tp, 0x06, 0x19a9); + rtl8168_mdio_write(tp, 0x06, 0x8b90); + rtl8168_mdio_write(tp, 0x06, 0xf9ee); + rtl8168_mdio_write(tp, 0x06, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xfed1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8595); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23d1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x8598); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x2304); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8a); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x14ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8a); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x1f9a); + rtl8168_mdio_write(tp, 0x06, 0xe0e4); + rtl8168_mdio_write(tp, 0x06, 0x26e1); + rtl8168_mdio_write(tp, 0x06, 0xe427); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x2623); + rtl8168_mdio_write(tp, 0x06, 0xe5e4); + rtl8168_mdio_write(tp, 0x06, 0x27fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8dad); + rtl8168_mdio_write(tp, 0x06, 0x2014); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8d00); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0x5a78); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x0902); + rtl8168_mdio_write(tp, 0x06, 0x05db); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x7b02); + rtl8168_mdio_write(tp, 0x06, 0x3231); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x1df6); + rtl8168_mdio_write(tp, 0x06, 0x20e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x5c02); + rtl8168_mdio_write(tp, 0x06, 0x2bcb); + rtl8168_mdio_write(tp, 0x06, 0x022d); + rtl8168_mdio_write(tp, 0x06, 0x2902); + rtl8168_mdio_write(tp, 0x06, 0x03b4); + rtl8168_mdio_write(tp, 0x06, 0x0285); + rtl8168_mdio_write(tp, 0x06, 0x6402); + rtl8168_mdio_write(tp, 0x06, 0x2eca); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0xcd02); + rtl8168_mdio_write(tp, 0x06, 0x046f); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x210b); + rtl8168_mdio_write(tp, 0x06, 0xf621); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x8520); + rtl8168_mdio_write(tp, 0x06, 0x021b); + rtl8168_mdio_write(tp, 0x06, 0xe8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x22e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2308); + rtl8168_mdio_write(tp, 0x06, 0xf623); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x311c); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2405); + rtl8168_mdio_write(tp, 0x06, 0xf624); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x25e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2608); + rtl8168_mdio_write(tp, 0x06, 0xf626); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x2df5); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2705); + rtl8168_mdio_write(tp, 0x06, 0xf627); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x037a); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x65d2); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x2fe9); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf61e); + rtl8168_mdio_write(tp, 0x06, 0x21bf); + rtl8168_mdio_write(tp, 0x06, 0x2ff5); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf60c); + rtl8168_mdio_write(tp, 0x06, 0x111e); + rtl8168_mdio_write(tp, 0x06, 0x21bf); + rtl8168_mdio_write(tp, 0x06, 0x2ff8); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf60c); + rtl8168_mdio_write(tp, 0x06, 0x121e); + rtl8168_mdio_write(tp, 0x06, 0x21bf); + rtl8168_mdio_write(tp, 0x06, 0x2ffb); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf60c); + rtl8168_mdio_write(tp, 0x06, 0x131e); + rtl8168_mdio_write(tp, 0x06, 0x21bf); + rtl8168_mdio_write(tp, 0x06, 0x1f97); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf60c); + rtl8168_mdio_write(tp, 0x06, 0x141e); + rtl8168_mdio_write(tp, 0x06, 0x21bf); + rtl8168_mdio_write(tp, 0x06, 0x859b); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf60c); + rtl8168_mdio_write(tp, 0x06, 0x161e); + rtl8168_mdio_write(tp, 0x06, 0x21e0); + rtl8168_mdio_write(tp, 0x06, 0x8a8c); + rtl8168_mdio_write(tp, 0x06, 0x1f02); + rtl8168_mdio_write(tp, 0x06, 0x9e22); + rtl8168_mdio_write(tp, 0x06, 0xe68a); + rtl8168_mdio_write(tp, 0x06, 0x8cad); + rtl8168_mdio_write(tp, 0x06, 0x3114); + rtl8168_mdio_write(tp, 0x06, 0xad30); + rtl8168_mdio_write(tp, 0x06, 0x11ef); + rtl8168_mdio_write(tp, 0x06, 0x0258); + rtl8168_mdio_write(tp, 0x06, 0x0c9e); + rtl8168_mdio_write(tp, 0x06, 0x07ad); + rtl8168_mdio_write(tp, 0x06, 0x3608); + rtl8168_mdio_write(tp, 0x06, 0x5a30); + rtl8168_mdio_write(tp, 0x06, 0x9f04); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xae02); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf2f); + rtl8168_mdio_write(tp, 0x06, 0xf202); + rtl8168_mdio_write(tp, 0x06, 0x3723); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xface); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69fa); + rtl8168_mdio_write(tp, 0x06, 0xd401); + rtl8168_mdio_write(tp, 0x06, 0x55b4); + rtl8168_mdio_write(tp, 0x06, 0xfebf); + rtl8168_mdio_write(tp, 0x06, 0x85a7); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf6ac); + rtl8168_mdio_write(tp, 0x06, 0x280b); + rtl8168_mdio_write(tp, 0x06, 0xbf85); + rtl8168_mdio_write(tp, 0x06, 0xa402); + rtl8168_mdio_write(tp, 0x06, 0x36f6); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x49ae); + rtl8168_mdio_write(tp, 0x06, 0x64bf); + rtl8168_mdio_write(tp, 0x06, 0x85a4); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf6ac); + rtl8168_mdio_write(tp, 0x06, 0x285b); + rtl8168_mdio_write(tp, 0x06, 0xd000); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x60ac); + rtl8168_mdio_write(tp, 0x06, 0x2105); + rtl8168_mdio_write(tp, 0x06, 0xac22); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x4ebf); + rtl8168_mdio_write(tp, 0x06, 0xe0c4); + rtl8168_mdio_write(tp, 0x06, 0xbe86); + rtl8168_mdio_write(tp, 0x06, 0x14d2); + rtl8168_mdio_write(tp, 0x06, 0x04d8); + rtl8168_mdio_write(tp, 0x06, 0x19d9); + rtl8168_mdio_write(tp, 0x06, 0x1907); + rtl8168_mdio_write(tp, 0x06, 0xdc19); + rtl8168_mdio_write(tp, 0x06, 0xdd19); + rtl8168_mdio_write(tp, 0x06, 0x0789); + rtl8168_mdio_write(tp, 0x06, 0x89ef); + rtl8168_mdio_write(tp, 0x06, 0x645e); + rtl8168_mdio_write(tp, 0x06, 0x07ff); + rtl8168_mdio_write(tp, 0x06, 0x0d65); + rtl8168_mdio_write(tp, 0x06, 0x5cf8); + rtl8168_mdio_write(tp, 0x06, 0x001e); + rtl8168_mdio_write(tp, 0x06, 0x46dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0x19b2); + rtl8168_mdio_write(tp, 0x06, 0xe2d4); + rtl8168_mdio_write(tp, 0x06, 0x0001); + rtl8168_mdio_write(tp, 0x06, 0xbf85); + rtl8168_mdio_write(tp, 0x06, 0xa402); + rtl8168_mdio_write(tp, 0x06, 0x3723); + rtl8168_mdio_write(tp, 0x06, 0xae1d); + rtl8168_mdio_write(tp, 0x06, 0xbee0); + rtl8168_mdio_write(tp, 0x06, 0xc4bf); + rtl8168_mdio_write(tp, 0x06, 0x8614); + rtl8168_mdio_write(tp, 0x06, 0xd204); + rtl8168_mdio_write(tp, 0x06, 0xd819); + rtl8168_mdio_write(tp, 0x06, 0xd919); + rtl8168_mdio_write(tp, 0x06, 0x07dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0x1907); + rtl8168_mdio_write(tp, 0x06, 0xb2f4); + rtl8168_mdio_write(tp, 0x06, 0xd400); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x85a4); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23fe); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfec6); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc05); + rtl8168_mdio_write(tp, 0x06, 0xf9e2); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0xeb5a); + rtl8168_mdio_write(tp, 0x06, 0x070c); + rtl8168_mdio_write(tp, 0x06, 0x031e); + rtl8168_mdio_write(tp, 0x06, 0x20e6); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe7e0); + rtl8168_mdio_write(tp, 0x06, 0xebe0); + rtl8168_mdio_write(tp, 0x06, 0xe0fc); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xfdfd); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac26); + rtl8168_mdio_write(tp, 0x06, 0x1ae0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x14e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xac20); + rtl8168_mdio_write(tp, 0x06, 0x0ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xac23); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xac24); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0x1ab5); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x1c04); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x1d04); + rtl8168_mdio_write(tp, 0x06, 0xe2e0); + rtl8168_mdio_write(tp, 0x06, 0x7ce3); + rtl8168_mdio_write(tp, 0x06, 0xe07d); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x38e1); + rtl8168_mdio_write(tp, 0x06, 0xe039); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x1bad); + rtl8168_mdio_write(tp, 0x06, 0x390d); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf21); + rtl8168_mdio_write(tp, 0x06, 0xd502); + rtl8168_mdio_write(tp, 0x06, 0x3723); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xd8ae); + rtl8168_mdio_write(tp, 0x06, 0x0bac); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0x1802); + rtl8168_mdio_write(tp, 0x06, 0x8360); + rtl8168_mdio_write(tp, 0x06, 0x021a); + rtl8168_mdio_write(tp, 0x06, 0xc6fd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2605); + rtl8168_mdio_write(tp, 0x06, 0x0222); + rtl8168_mdio_write(tp, 0x06, 0xa4f7); + rtl8168_mdio_write(tp, 0x06, 0x28e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xad21); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x23a9); + rtl8168_mdio_write(tp, 0x06, 0xf729); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2005); + rtl8168_mdio_write(tp, 0x06, 0x0214); + rtl8168_mdio_write(tp, 0x06, 0xabf7); + rtl8168_mdio_write(tp, 0x06, 0x2ae0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad23); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x12e7); + rtl8168_mdio_write(tp, 0x06, 0xf72b); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2405); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0xbcf7); + rtl8168_mdio_write(tp, 0x06, 0x2ce5); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x21e5); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2109); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xf4ac); + rtl8168_mdio_write(tp, 0x06, 0x2003); + rtl8168_mdio_write(tp, 0x06, 0x0223); + rtl8168_mdio_write(tp, 0x06, 0x98e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x09e0); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x13fb); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2309); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xf4ac); + rtl8168_mdio_write(tp, 0x06, 0x2203); + rtl8168_mdio_write(tp, 0x06, 0x0212); + rtl8168_mdio_write(tp, 0x06, 0xfae0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x09e0); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xac23); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x83c1); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2608); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0xd2ad); + rtl8168_mdio_write(tp, 0x06, 0x2502); + rtl8168_mdio_write(tp, 0x06, 0xf628); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x210a); + rtl8168_mdio_write(tp, 0x06, 0xe084); + rtl8168_mdio_write(tp, 0x06, 0x0af6); + rtl8168_mdio_write(tp, 0x06, 0x27a0); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0xf629); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2008); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xe8ad); + rtl8168_mdio_write(tp, 0x06, 0x2102); + rtl8168_mdio_write(tp, 0x06, 0xf62a); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2308); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x20a0); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0xf62b); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2408); + rtl8168_mdio_write(tp, 0x06, 0xe086); + rtl8168_mdio_write(tp, 0x06, 0x02a0); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0xf62c); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xf4a1); + rtl8168_mdio_write(tp, 0x06, 0x0008); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf21); + rtl8168_mdio_write(tp, 0x06, 0xd502); + rtl8168_mdio_write(tp, 0x06, 0x3723); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x0200); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x241e); + rtl8168_mdio_write(tp, 0x06, 0xe086); + rtl8168_mdio_write(tp, 0x06, 0x02a0); + rtl8168_mdio_write(tp, 0x06, 0x0005); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0xe8ae); + rtl8168_mdio_write(tp, 0x06, 0xf5a0); + rtl8168_mdio_write(tp, 0x06, 0x0105); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0xf8ae); + rtl8168_mdio_write(tp, 0x06, 0x0ba0); + rtl8168_mdio_write(tp, 0x06, 0x0205); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0x14ae); + rtl8168_mdio_write(tp, 0x06, 0x03a0); + rtl8168_mdio_write(tp, 0x06, 0x0300); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0x2bee); + rtl8168_mdio_write(tp, 0x06, 0x8602); + rtl8168_mdio_write(tp, 0x06, 0x01ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8ee); + rtl8168_mdio_write(tp, 0x06, 0x8609); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x8461); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xae10); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8608); + rtl8168_mdio_write(tp, 0x06, 0xe186); + rtl8168_mdio_write(tp, 0x06, 0x091f); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x0611); + rtl8168_mdio_write(tp, 0x06, 0xe586); + rtl8168_mdio_write(tp, 0x06, 0x09ae); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x8602); + rtl8168_mdio_write(tp, 0x06, 0x01fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfbbf); + rtl8168_mdio_write(tp, 0x06, 0x8604); + rtl8168_mdio_write(tp, 0x06, 0xef79); + rtl8168_mdio_write(tp, 0x06, 0xd200); + rtl8168_mdio_write(tp, 0x06, 0xd400); + rtl8168_mdio_write(tp, 0x06, 0x221e); + rtl8168_mdio_write(tp, 0x06, 0x02bf); + rtl8168_mdio_write(tp, 0x06, 0x2fec); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23bf); + rtl8168_mdio_write(tp, 0x06, 0x13f2); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf60d); + rtl8168_mdio_write(tp, 0x06, 0x4559); + rtl8168_mdio_write(tp, 0x06, 0x1fef); + rtl8168_mdio_write(tp, 0x06, 0x97dd); + rtl8168_mdio_write(tp, 0x06, 0xd308); + rtl8168_mdio_write(tp, 0x06, 0x1a93); + rtl8168_mdio_write(tp, 0x06, 0xdd12); + rtl8168_mdio_write(tp, 0x06, 0x17a2); + rtl8168_mdio_write(tp, 0x06, 0x04de); + rtl8168_mdio_write(tp, 0x06, 0xffef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfbee); + rtl8168_mdio_write(tp, 0x06, 0x8602); + rtl8168_mdio_write(tp, 0x06, 0x03d5); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x06, 0xbf86); + rtl8168_mdio_write(tp, 0x06, 0x04ef); + rtl8168_mdio_write(tp, 0x06, 0x79ef); + rtl8168_mdio_write(tp, 0x06, 0x45bf); + rtl8168_mdio_write(tp, 0x06, 0x2fec); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23bf); + rtl8168_mdio_write(tp, 0x06, 0x13f2); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf6ad); + rtl8168_mdio_write(tp, 0x06, 0x2702); + rtl8168_mdio_write(tp, 0x06, 0x78ff); + rtl8168_mdio_write(tp, 0x06, 0xe186); + rtl8168_mdio_write(tp, 0x06, 0x0a1b); + rtl8168_mdio_write(tp, 0x06, 0x01aa); + rtl8168_mdio_write(tp, 0x06, 0x2eef); + rtl8168_mdio_write(tp, 0x06, 0x97d9); + rtl8168_mdio_write(tp, 0x06, 0x7900); + rtl8168_mdio_write(tp, 0x06, 0x9e2b); + rtl8168_mdio_write(tp, 0x06, 0x81dd); + rtl8168_mdio_write(tp, 0x06, 0xbf85); + rtl8168_mdio_write(tp, 0x06, 0xad02); + rtl8168_mdio_write(tp, 0x06, 0x3723); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xef02); + rtl8168_mdio_write(tp, 0x06, 0x100c); + rtl8168_mdio_write(tp, 0x06, 0x11b0); + rtl8168_mdio_write(tp, 0x06, 0xfc0d); + rtl8168_mdio_write(tp, 0x06, 0x11bf); + rtl8168_mdio_write(tp, 0x06, 0x85aa); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x85aa); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x23ee); + rtl8168_mdio_write(tp, 0x06, 0x8602); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x0413); + rtl8168_mdio_write(tp, 0x06, 0xa38b); + rtl8168_mdio_write(tp, 0x06, 0xb4d3); + rtl8168_mdio_write(tp, 0x06, 0x8012); + rtl8168_mdio_write(tp, 0x06, 0x17a2); + rtl8168_mdio_write(tp, 0x06, 0x04ad); + rtl8168_mdio_write(tp, 0x06, 0xffef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x48e0); + rtl8168_mdio_write(tp, 0x06, 0x8a96); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0x977c); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x9e35); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x9600); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x9700); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xbee1); + rtl8168_mdio_write(tp, 0x06, 0x8abf); + rtl8168_mdio_write(tp, 0x06, 0xe286); + rtl8168_mdio_write(tp, 0x06, 0x10e3); + rtl8168_mdio_write(tp, 0x06, 0x8611); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0x1aad); + rtl8168_mdio_write(tp, 0x06, 0x2012); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x9603); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x97b7); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x1000); + rtl8168_mdio_write(tp, 0x06, 0xee86); + rtl8168_mdio_write(tp, 0x06, 0x1100); + rtl8168_mdio_write(tp, 0x06, 0xae11); + rtl8168_mdio_write(tp, 0x06, 0x15e6); + rtl8168_mdio_write(tp, 0x06, 0x8610); + rtl8168_mdio_write(tp, 0x06, 0xe786); + rtl8168_mdio_write(tp, 0x06, 0x11ae); + rtl8168_mdio_write(tp, 0x06, 0x08ee); + rtl8168_mdio_write(tp, 0x06, 0x8610); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8611); + rtl8168_mdio_write(tp, 0x06, 0x00fd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe001); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x32e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf720); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40bf); + rtl8168_mdio_write(tp, 0x06, 0x31f5); + rtl8168_mdio_write(tp, 0x06, 0x0236); + rtl8168_mdio_write(tp, 0x06, 0xf6ad); + rtl8168_mdio_write(tp, 0x06, 0x2821); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x20e1); + rtl8168_mdio_write(tp, 0x06, 0xe021); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x18e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40ee); + rtl8168_mdio_write(tp, 0x06, 0x8b3b); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0x8a8a); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0x8be4); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0x01ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x80ad); + rtl8168_mdio_write(tp, 0x06, 0x2722); + rtl8168_mdio_write(tp, 0x06, 0xbf44); + rtl8168_mdio_write(tp, 0x06, 0xfc02); + rtl8168_mdio_write(tp, 0x06, 0x36f6); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x441f); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x15e5); + rtl8168_mdio_write(tp, 0x06, 0x8b44); + rtl8168_mdio_write(tp, 0x06, 0xad29); + rtl8168_mdio_write(tp, 0x06, 0x07ac); + rtl8168_mdio_write(tp, 0x06, 0x2804); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xae02); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf85); + rtl8168_mdio_write(tp, 0x06, 0xb002); + rtl8168_mdio_write(tp, 0x06, 0x3723); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x0400); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0x77e1); + rtl8168_mdio_write(tp, 0x06, 0x40dd); + rtl8168_mdio_write(tp, 0x06, 0xe022); + rtl8168_mdio_write(tp, 0x06, 0x32e1); + rtl8168_mdio_write(tp, 0x06, 0x5074); + rtl8168_mdio_write(tp, 0x06, 0xe144); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0xdaff); + rtl8168_mdio_write(tp, 0x06, 0xe0c0); + rtl8168_mdio_write(tp, 0x06, 0x52e0); + rtl8168_mdio_write(tp, 0x06, 0xeed9); + rtl8168_mdio_write(tp, 0x06, 0xe04c); + rtl8168_mdio_write(tp, 0x06, 0xbbe0); + rtl8168_mdio_write(tp, 0x06, 0x2a00); + rtl8168_mdio_write(tp, 0x05, 0xe142); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x05, 0xe140); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0042); + rtl8168_mdio_write(tp, 0x18, 0x2300); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + if (tp->RequiredSecLanDonglePatch) { + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~BIT_2; + rtl8168_mdio_write(tp, 0x17, gphy_val); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); +} + +static void +rtl8168_set_phy_mcu_8168f_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val,i; + + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x1800); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val &= ~(BIT_12); + rtl8168_mdio_write(tp,0x15, gphy_val); + rtl8168_mdio_write(tp,0x00, 0x4800); + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x002f); + for (i = 0; i < 1000; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1c); + if (gphy_val & 0x0080) + break; + } + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x1800); + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x0023); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x18); + if (!(gphy_val & 0x0001)) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x0194); + rtl8168_mdio_write(tp, 0x19, 0x407D); + rtl8168_mdio_write(tp, 0x15, 0x0098); + rtl8168_mdio_write(tp, 0x19, 0x7c0b); + rtl8168_mdio_write(tp, 0x15, 0x0099); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00eb); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00f8); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00fe); + rtl8168_mdio_write(tp, 0x19, 0x6f0f); + rtl8168_mdio_write(tp, 0x15, 0x00db); + rtl8168_mdio_write(tp, 0x19, 0x6f09); + rtl8168_mdio_write(tp, 0x15, 0x00dc); + rtl8168_mdio_write(tp, 0x19, 0xaefd); + rtl8168_mdio_write(tp, 0x15, 0x00dd); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00de); + rtl8168_mdio_write(tp, 0x19, 0xc60b); + rtl8168_mdio_write(tp, 0x15, 0x00df); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00e0); + rtl8168_mdio_write(tp, 0x19, 0x30e1); + rtl8168_mdio_write(tp, 0x15, 0x020c); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x020e); + rtl8168_mdio_write(tp, 0x19, 0x9813); + rtl8168_mdio_write(tp, 0x15, 0x020f); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0210); + rtl8168_mdio_write(tp, 0x19, 0x930f); + rtl8168_mdio_write(tp, 0x15, 0x0211); + rtl8168_mdio_write(tp, 0x19, 0x9206); + rtl8168_mdio_write(tp, 0x15, 0x0212); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0213); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0214); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0215); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0216); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0217); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0218); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0219); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x021a); + rtl8168_mdio_write(tp, 0x19, 0x5540); + rtl8168_mdio_write(tp, 0x15, 0x021b); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x021c); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x021d); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x021e); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x021f); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0220); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0221); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x0222); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0223); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x0224); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0225); + rtl8168_mdio_write(tp, 0x19, 0x3231); + rtl8168_mdio_write(tp, 0x15, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x0118); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x2502); + rtl8168_mdio_write(tp, 0x06, 0x8090); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x4202); + rtl8168_mdio_write(tp, 0x06, 0x015c); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0xad02); + rtl8168_mdio_write(tp, 0x06, 0x80ca); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xd484); + rtl8168_mdio_write(tp, 0x06, 0x3ce4); + rtl8168_mdio_write(tp, 0x06, 0x8b92); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x93ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac8); + rtl8168_mdio_write(tp, 0x06, 0x03ee); + rtl8168_mdio_write(tp, 0x06, 0x8aca); + rtl8168_mdio_write(tp, 0x06, 0x60ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac0); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac1); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8abe); + rtl8168_mdio_write(tp, 0x06, 0x07ee); + rtl8168_mdio_write(tp, 0x06, 0x8abf); + rtl8168_mdio_write(tp, 0x06, 0x73ee); + rtl8168_mdio_write(tp, 0x06, 0x8a95); + rtl8168_mdio_write(tp, 0x06, 0x02bf); + rtl8168_mdio_write(tp, 0x06, 0x8b88); + rtl8168_mdio_write(tp, 0x06, 0xec00); + rtl8168_mdio_write(tp, 0x06, 0x19a9); + rtl8168_mdio_write(tp, 0x06, 0x8b90); + rtl8168_mdio_write(tp, 0x06, 0xf9ee); + rtl8168_mdio_write(tp, 0x06, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xfed1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x85a4); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7dd1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x85a7); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7d04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8a); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x14ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8a); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x204b); + rtl8168_mdio_write(tp, 0x06, 0xe0e4); + rtl8168_mdio_write(tp, 0x06, 0x26e1); + rtl8168_mdio_write(tp, 0x06, 0xe427); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x2623); + rtl8168_mdio_write(tp, 0x06, 0xe5e4); + rtl8168_mdio_write(tp, 0x06, 0x27fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8dad); + rtl8168_mdio_write(tp, 0x06, 0x2014); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8d00); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0x5a78); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x0902); + rtl8168_mdio_write(tp, 0x06, 0x05e8); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x4f02); + rtl8168_mdio_write(tp, 0x06, 0x326c); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x1df6); + rtl8168_mdio_write(tp, 0x06, 0x20e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x0902); + rtl8168_mdio_write(tp, 0x06, 0x2ab0); + rtl8168_mdio_write(tp, 0x06, 0x0285); + rtl8168_mdio_write(tp, 0x06, 0x1602); + rtl8168_mdio_write(tp, 0x06, 0x03ba); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0xe502); + rtl8168_mdio_write(tp, 0x06, 0x2df1); + rtl8168_mdio_write(tp, 0x06, 0x0283); + rtl8168_mdio_write(tp, 0x06, 0x8302); + rtl8168_mdio_write(tp, 0x06, 0x0475); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x210b); + rtl8168_mdio_write(tp, 0x06, 0xf621); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x83f8); + rtl8168_mdio_write(tp, 0x06, 0x021c); + rtl8168_mdio_write(tp, 0x06, 0x99e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x22e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0235); + rtl8168_mdio_write(tp, 0x06, 0x63e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad23); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x23e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0231); + rtl8168_mdio_write(tp, 0x06, 0x57e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x24e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2505); + rtl8168_mdio_write(tp, 0x06, 0xf625); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x26e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x022d); + rtl8168_mdio_write(tp, 0x06, 0x1ce0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x27e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x80fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac26); + rtl8168_mdio_write(tp, 0x06, 0x1ae0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x14e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xac20); + rtl8168_mdio_write(tp, 0x06, 0x0ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xac23); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xac24); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0x1ac2); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x1c04); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x1d04); + rtl8168_mdio_write(tp, 0x06, 0xe2e0); + rtl8168_mdio_write(tp, 0x06, 0x7ce3); + rtl8168_mdio_write(tp, 0x06, 0xe07d); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x38e1); + rtl8168_mdio_write(tp, 0x06, 0xe039); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x1bad); + rtl8168_mdio_write(tp, 0x06, 0x390d); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf22); + rtl8168_mdio_write(tp, 0x06, 0x7a02); + rtl8168_mdio_write(tp, 0x06, 0x387d); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xacae); + rtl8168_mdio_write(tp, 0x06, 0x0bac); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xe902); + rtl8168_mdio_write(tp, 0x06, 0x822e); + rtl8168_mdio_write(tp, 0x06, 0x021a); + rtl8168_mdio_write(tp, 0x06, 0xd3fd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2602); + rtl8168_mdio_write(tp, 0x06, 0xf728); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2105); + rtl8168_mdio_write(tp, 0x06, 0x0222); + rtl8168_mdio_write(tp, 0x06, 0x8ef7); + rtl8168_mdio_write(tp, 0x06, 0x29e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x14b8); + rtl8168_mdio_write(tp, 0x06, 0xf72a); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2305); + rtl8168_mdio_write(tp, 0x06, 0x0212); + rtl8168_mdio_write(tp, 0x06, 0xf4f7); + rtl8168_mdio_write(tp, 0x06, 0x2be0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0x8284); + rtl8168_mdio_write(tp, 0x06, 0xf72c); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xf4fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2600); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2109); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xf4ac); + rtl8168_mdio_write(tp, 0x06, 0x2003); + rtl8168_mdio_write(tp, 0x06, 0x0222); + rtl8168_mdio_write(tp, 0x06, 0x7de0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x09e0); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x1408); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2309); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xf4ac); + rtl8168_mdio_write(tp, 0x06, 0x2203); + rtl8168_mdio_write(tp, 0x06, 0x0213); + rtl8168_mdio_write(tp, 0x06, 0x07e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x09e0); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xac23); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0x8289); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e1); + rtl8168_mdio_write(tp, 0x06, 0x8af4); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x2602); + rtl8168_mdio_write(tp, 0x06, 0xf628); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ad); + rtl8168_mdio_write(tp, 0x06, 0x210a); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0xecf6); + rtl8168_mdio_write(tp, 0x06, 0x27a0); + rtl8168_mdio_write(tp, 0x06, 0x0502); + rtl8168_mdio_write(tp, 0x06, 0xf629); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2008); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xe8ad); + rtl8168_mdio_write(tp, 0x06, 0x2102); + rtl8168_mdio_write(tp, 0x06, 0xf62a); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ad); + rtl8168_mdio_write(tp, 0x06, 0x2308); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x20a0); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0xf62b); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x2408); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xc2a0); + rtl8168_mdio_write(tp, 0x06, 0x0302); + rtl8168_mdio_write(tp, 0x06, 0xf62c); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xf4a1); + rtl8168_mdio_write(tp, 0x06, 0x0008); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf22); + rtl8168_mdio_write(tp, 0x06, 0x7a02); + rtl8168_mdio_write(tp, 0x06, 0x387d); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xc200); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ad); + rtl8168_mdio_write(tp, 0x06, 0x241e); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xc2a0); + rtl8168_mdio_write(tp, 0x06, 0x0005); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xb0ae); + rtl8168_mdio_write(tp, 0x06, 0xf5a0); + rtl8168_mdio_write(tp, 0x06, 0x0105); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xc0ae); + rtl8168_mdio_write(tp, 0x06, 0x0ba0); + rtl8168_mdio_write(tp, 0x06, 0x0205); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xcaae); + rtl8168_mdio_write(tp, 0x06, 0x03a0); + rtl8168_mdio_write(tp, 0x06, 0x0300); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xe1ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac2); + rtl8168_mdio_write(tp, 0x06, 0x01ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac9); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x8317); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8ac8); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xc91f); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x0611); + rtl8168_mdio_write(tp, 0x06, 0xe58a); + rtl8168_mdio_write(tp, 0x06, 0xc9ae); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac2); + rtl8168_mdio_write(tp, 0x06, 0x01fc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfbbf); + rtl8168_mdio_write(tp, 0x06, 0x8ac4); + rtl8168_mdio_write(tp, 0x06, 0xef79); + rtl8168_mdio_write(tp, 0x06, 0xd200); + rtl8168_mdio_write(tp, 0x06, 0xd400); + rtl8168_mdio_write(tp, 0x06, 0x221e); + rtl8168_mdio_write(tp, 0x06, 0x02bf); + rtl8168_mdio_write(tp, 0x06, 0x3024); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7dbf); + rtl8168_mdio_write(tp, 0x06, 0x13ff); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x500d); + rtl8168_mdio_write(tp, 0x06, 0x4559); + rtl8168_mdio_write(tp, 0x06, 0x1fef); + rtl8168_mdio_write(tp, 0x06, 0x97dd); + rtl8168_mdio_write(tp, 0x06, 0xd308); + rtl8168_mdio_write(tp, 0x06, 0x1a93); + rtl8168_mdio_write(tp, 0x06, 0xdd12); + rtl8168_mdio_write(tp, 0x06, 0x17a2); + rtl8168_mdio_write(tp, 0x06, 0x04de); + rtl8168_mdio_write(tp, 0x06, 0xffef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xfbee); + rtl8168_mdio_write(tp, 0x06, 0x8ac2); + rtl8168_mdio_write(tp, 0x06, 0x03d5); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x06, 0xbf8a); + rtl8168_mdio_write(tp, 0x06, 0xc4ef); + rtl8168_mdio_write(tp, 0x06, 0x79ef); + rtl8168_mdio_write(tp, 0x06, 0x45bf); + rtl8168_mdio_write(tp, 0x06, 0x3024); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7dbf); + rtl8168_mdio_write(tp, 0x06, 0x13ff); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x50ad); + rtl8168_mdio_write(tp, 0x06, 0x2702); + rtl8168_mdio_write(tp, 0x06, 0x78ff); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0xca1b); + rtl8168_mdio_write(tp, 0x06, 0x01aa); + rtl8168_mdio_write(tp, 0x06, 0x2eef); + rtl8168_mdio_write(tp, 0x06, 0x97d9); + rtl8168_mdio_write(tp, 0x06, 0x7900); + rtl8168_mdio_write(tp, 0x06, 0x9e2b); + rtl8168_mdio_write(tp, 0x06, 0x81dd); + rtl8168_mdio_write(tp, 0x06, 0xbf85); + rtl8168_mdio_write(tp, 0x06, 0xad02); + rtl8168_mdio_write(tp, 0x06, 0x387d); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xef02); + rtl8168_mdio_write(tp, 0x06, 0x100c); + rtl8168_mdio_write(tp, 0x06, 0x11b0); + rtl8168_mdio_write(tp, 0x06, 0xfc0d); + rtl8168_mdio_write(tp, 0x06, 0x11bf); + rtl8168_mdio_write(tp, 0x06, 0x85aa); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7dd1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x85aa); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7dee); + rtl8168_mdio_write(tp, 0x06, 0x8ac2); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x0413); + rtl8168_mdio_write(tp, 0x06, 0xa38b); + rtl8168_mdio_write(tp, 0x06, 0xb4d3); + rtl8168_mdio_write(tp, 0x06, 0x8012); + rtl8168_mdio_write(tp, 0x06, 0x17a2); + rtl8168_mdio_write(tp, 0x06, 0x04ad); + rtl8168_mdio_write(tp, 0x06, 0xffef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x48e0); + rtl8168_mdio_write(tp, 0x06, 0x8a96); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0x977c); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x9e35); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x9600); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x9700); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0xbee1); + rtl8168_mdio_write(tp, 0x06, 0x8abf); + rtl8168_mdio_write(tp, 0x06, 0xe28a); + rtl8168_mdio_write(tp, 0x06, 0xc0e3); + rtl8168_mdio_write(tp, 0x06, 0x8ac1); + rtl8168_mdio_write(tp, 0x06, 0x0237); + rtl8168_mdio_write(tp, 0x06, 0x74ad); + rtl8168_mdio_write(tp, 0x06, 0x2012); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x9603); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0x97b7); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xc000); + rtl8168_mdio_write(tp, 0x06, 0xee8a); + rtl8168_mdio_write(tp, 0x06, 0xc100); + rtl8168_mdio_write(tp, 0x06, 0xae11); + rtl8168_mdio_write(tp, 0x06, 0x15e6); + rtl8168_mdio_write(tp, 0x06, 0x8ac0); + rtl8168_mdio_write(tp, 0x06, 0xe78a); + rtl8168_mdio_write(tp, 0x06, 0xc1ae); + rtl8168_mdio_write(tp, 0x06, 0x08ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac0); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8ac1); + rtl8168_mdio_write(tp, 0x06, 0x00fd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xae20); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe001); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x32e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf720); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40bf); + rtl8168_mdio_write(tp, 0x06, 0x3230); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x50ad); + rtl8168_mdio_write(tp, 0x06, 0x2821); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x20e1); + rtl8168_mdio_write(tp, 0x06, 0xe021); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x18e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40ee); + rtl8168_mdio_write(tp, 0x06, 0x8b3b); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0x8a8a); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0x8be4); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0x01ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xface); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69fa); + rtl8168_mdio_write(tp, 0x06, 0xd401); + rtl8168_mdio_write(tp, 0x06, 0x55b4); + rtl8168_mdio_write(tp, 0x06, 0xfebf); + rtl8168_mdio_write(tp, 0x06, 0x1c1e); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x50ac); + rtl8168_mdio_write(tp, 0x06, 0x280b); + rtl8168_mdio_write(tp, 0x06, 0xbf1c); + rtl8168_mdio_write(tp, 0x06, 0x1b02); + rtl8168_mdio_write(tp, 0x06, 0x3850); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x49ae); + rtl8168_mdio_write(tp, 0x06, 0x64bf); + rtl8168_mdio_write(tp, 0x06, 0x1c1b); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x50ac); + rtl8168_mdio_write(tp, 0x06, 0x285b); + rtl8168_mdio_write(tp, 0x06, 0xd000); + rtl8168_mdio_write(tp, 0x06, 0x0284); + rtl8168_mdio_write(tp, 0x06, 0xcaac); + rtl8168_mdio_write(tp, 0x06, 0x2105); + rtl8168_mdio_write(tp, 0x06, 0xac22); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x4ebf); + rtl8168_mdio_write(tp, 0x06, 0xe0c4); + rtl8168_mdio_write(tp, 0x06, 0xbe85); + rtl8168_mdio_write(tp, 0x06, 0xf6d2); + rtl8168_mdio_write(tp, 0x06, 0x04d8); + rtl8168_mdio_write(tp, 0x06, 0x19d9); + rtl8168_mdio_write(tp, 0x06, 0x1907); + rtl8168_mdio_write(tp, 0x06, 0xdc19); + rtl8168_mdio_write(tp, 0x06, 0xdd19); + rtl8168_mdio_write(tp, 0x06, 0x0789); + rtl8168_mdio_write(tp, 0x06, 0x89ef); + rtl8168_mdio_write(tp, 0x06, 0x645e); + rtl8168_mdio_write(tp, 0x06, 0x07ff); + rtl8168_mdio_write(tp, 0x06, 0x0d65); + rtl8168_mdio_write(tp, 0x06, 0x5cf8); + rtl8168_mdio_write(tp, 0x06, 0x001e); + rtl8168_mdio_write(tp, 0x06, 0x46dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0x19b2); + rtl8168_mdio_write(tp, 0x06, 0xe2d4); + rtl8168_mdio_write(tp, 0x06, 0x0001); + rtl8168_mdio_write(tp, 0x06, 0xbf1c); + rtl8168_mdio_write(tp, 0x06, 0x1b02); + rtl8168_mdio_write(tp, 0x06, 0x387d); + rtl8168_mdio_write(tp, 0x06, 0xae1d); + rtl8168_mdio_write(tp, 0x06, 0xbee0); + rtl8168_mdio_write(tp, 0x06, 0xc4bf); + rtl8168_mdio_write(tp, 0x06, 0x85f6); + rtl8168_mdio_write(tp, 0x06, 0xd204); + rtl8168_mdio_write(tp, 0x06, 0xd819); + rtl8168_mdio_write(tp, 0x06, 0xd919); + rtl8168_mdio_write(tp, 0x06, 0x07dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0x1907); + rtl8168_mdio_write(tp, 0x06, 0xb2f4); + rtl8168_mdio_write(tp, 0x06, 0xd400); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x1c1b); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7dfe); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfec6); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc05); + rtl8168_mdio_write(tp, 0x06, 0xf9e2); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0xeb5a); + rtl8168_mdio_write(tp, 0x06, 0x070c); + rtl8168_mdio_write(tp, 0x06, 0x031e); + rtl8168_mdio_write(tp, 0x06, 0x20e6); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe7e0); + rtl8168_mdio_write(tp, 0x06, 0xebe0); + rtl8168_mdio_write(tp, 0x06, 0xe0fc); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xfdfd); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0x8b80); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x22bf); + rtl8168_mdio_write(tp, 0x06, 0x4616); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x50e0); + rtl8168_mdio_write(tp, 0x06, 0x8b44); + rtl8168_mdio_write(tp, 0x06, 0x1f01); + rtl8168_mdio_write(tp, 0x06, 0x9e15); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x44ad); + rtl8168_mdio_write(tp, 0x06, 0x2907); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x04d1); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0x02d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x85b0); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7def); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x30e0); + rtl8168_mdio_write(tp, 0x06, 0xe036); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x37e1); + rtl8168_mdio_write(tp, 0x06, 0x8b3f); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e23); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x3fac); + rtl8168_mdio_write(tp, 0x06, 0x200b); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x0dac); + rtl8168_mdio_write(tp, 0x06, 0x250f); + rtl8168_mdio_write(tp, 0x06, 0xac27); + rtl8168_mdio_write(tp, 0x06, 0x11ae); + rtl8168_mdio_write(tp, 0x06, 0x1202); + rtl8168_mdio_write(tp, 0x06, 0x2c47); + rtl8168_mdio_write(tp, 0x06, 0xae0d); + rtl8168_mdio_write(tp, 0x06, 0x0285); + rtl8168_mdio_write(tp, 0x06, 0x4fae); + rtl8168_mdio_write(tp, 0x06, 0x0802); + rtl8168_mdio_write(tp, 0x06, 0x2c69); + rtl8168_mdio_write(tp, 0x06, 0xae03); + rtl8168_mdio_write(tp, 0x06, 0x022c); + rtl8168_mdio_write(tp, 0x06, 0x7cfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x6902); + rtl8168_mdio_write(tp, 0x06, 0x856c); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x14e1); + rtl8168_mdio_write(tp, 0x06, 0xe015); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x08d1); + rtl8168_mdio_write(tp, 0x06, 0x1ebf); + rtl8168_mdio_write(tp, 0x06, 0x2cd9); + rtl8168_mdio_write(tp, 0x06, 0x0238); + rtl8168_mdio_write(tp, 0x06, 0x7def); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x2fd0); + rtl8168_mdio_write(tp, 0x06, 0x0b02); + rtl8168_mdio_write(tp, 0x06, 0x3682); + rtl8168_mdio_write(tp, 0x06, 0x5882); + rtl8168_mdio_write(tp, 0x06, 0x7882); + rtl8168_mdio_write(tp, 0x06, 0x9f24); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x32e1); + rtl8168_mdio_write(tp, 0x06, 0x8b33); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e1a); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x8b32); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x28e1); + rtl8168_mdio_write(tp, 0x06, 0xe029); + rtl8168_mdio_write(tp, 0x06, 0xf72c); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x28e5); + rtl8168_mdio_write(tp, 0x06, 0xe029); + rtl8168_mdio_write(tp, 0x06, 0xf62c); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x28e5); + rtl8168_mdio_write(tp, 0x06, 0xe029); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0x4077); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0x52e0); + rtl8168_mdio_write(tp, 0x06, 0xeed9); + rtl8168_mdio_write(tp, 0x06, 0xe04c); + rtl8168_mdio_write(tp, 0x06, 0xbbe0); + rtl8168_mdio_write(tp, 0x06, 0x2a00); + rtl8168_mdio_write(tp, 0x05, 0xe142); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp,0x06, gphy_val); + rtl8168_mdio_write(tp, 0x05, 0xe140); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp,0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp,0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val |= BIT_1; + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~BIT_2; + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x09, 0xA20F); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0003); + rtl8168_mdio_write(tp, 0x01, 0x328A); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); +} + +static void +rtl8168_set_phy_mcu_8168f_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val,i; + + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x1800); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val &= ~(BIT_12); + rtl8168_mdio_write(tp,0x15, gphy_val); + rtl8168_mdio_write(tp,0x00, 0x4800); + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x002f); + for (i = 0; i < 1000; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1c); + if (gphy_val & 0x0080) + break; + } + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x1800); + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x0023); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x18); + if (!(gphy_val & 0x0001)) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x0098); + rtl8168_mdio_write(tp, 0x19, 0x7c0b); + rtl8168_mdio_write(tp, 0x15, 0x0099); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00eb); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00f8); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00fe); + rtl8168_mdio_write(tp, 0x19, 0x6f0f); + rtl8168_mdio_write(tp, 0x15, 0x00db); + rtl8168_mdio_write(tp, 0x19, 0x6f09); + rtl8168_mdio_write(tp, 0x15, 0x00dc); + rtl8168_mdio_write(tp, 0x19, 0xaefd); + rtl8168_mdio_write(tp, 0x15, 0x00dd); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00de); + rtl8168_mdio_write(tp, 0x19, 0xc60b); + rtl8168_mdio_write(tp, 0x15, 0x00df); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00e0); + rtl8168_mdio_write(tp, 0x19, 0x30e1); + rtl8168_mdio_write(tp, 0x15, 0x020c); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x020e); + rtl8168_mdio_write(tp, 0x19, 0x9813); + rtl8168_mdio_write(tp, 0x15, 0x020f); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0210); + rtl8168_mdio_write(tp, 0x19, 0x930f); + rtl8168_mdio_write(tp, 0x15, 0x0211); + rtl8168_mdio_write(tp, 0x19, 0x9206); + rtl8168_mdio_write(tp, 0x15, 0x0212); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0213); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0214); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0215); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0216); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0217); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0218); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0219); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x021a); + rtl8168_mdio_write(tp, 0x19, 0x5540); + rtl8168_mdio_write(tp, 0x15, 0x021b); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x021c); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x021d); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x021e); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x021f); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0220); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0221); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x0222); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0223); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x0224); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0225); + rtl8168_mdio_write(tp, 0x19, 0x3231); + rtl8168_mdio_write(tp, 0x15, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x011b); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x2802); + rtl8168_mdio_write(tp, 0x06, 0x0135); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x4502); + rtl8168_mdio_write(tp, 0x06, 0x015f); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x6b02); + rtl8168_mdio_write(tp, 0x06, 0x80e5); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xbf8b); + rtl8168_mdio_write(tp, 0x06, 0x88ec); + rtl8168_mdio_write(tp, 0x06, 0x0019); + rtl8168_mdio_write(tp, 0x06, 0xa98b); + rtl8168_mdio_write(tp, 0x06, 0x90f9); + rtl8168_mdio_write(tp, 0x06, 0xeeff); + rtl8168_mdio_write(tp, 0x06, 0xf600); + rtl8168_mdio_write(tp, 0x06, 0xeeff); + rtl8168_mdio_write(tp, 0x06, 0xf7fe); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf81); + rtl8168_mdio_write(tp, 0x06, 0x9802); + rtl8168_mdio_write(tp, 0x06, 0x39f3); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf81); + rtl8168_mdio_write(tp, 0x06, 0x9b02); + rtl8168_mdio_write(tp, 0x06, 0x39f3); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8dad); + rtl8168_mdio_write(tp, 0x06, 0x2014); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8d00); + rtl8168_mdio_write(tp, 0x06, 0xe08a); + rtl8168_mdio_write(tp, 0x06, 0x5a78); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x0902); + rtl8168_mdio_write(tp, 0x06, 0x05fc); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x8802); + rtl8168_mdio_write(tp, 0x06, 0x32dd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ac); + rtl8168_mdio_write(tp, 0x06, 0x261a); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x81ac); + rtl8168_mdio_write(tp, 0x06, 0x2114); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ac); + rtl8168_mdio_write(tp, 0x06, 0x200e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x85ac); + rtl8168_mdio_write(tp, 0x06, 0x2308); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x87ac); + rtl8168_mdio_write(tp, 0x06, 0x2402); + rtl8168_mdio_write(tp, 0x06, 0xae38); + rtl8168_mdio_write(tp, 0x06, 0x021a); + rtl8168_mdio_write(tp, 0x06, 0xd6ee); + rtl8168_mdio_write(tp, 0x06, 0xe41c); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0xe41d); + rtl8168_mdio_write(tp, 0x06, 0x04e2); + rtl8168_mdio_write(tp, 0x06, 0xe07c); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0x7de0); + rtl8168_mdio_write(tp, 0x06, 0xe038); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x39ad); + rtl8168_mdio_write(tp, 0x06, 0x2e1b); + rtl8168_mdio_write(tp, 0x06, 0xad39); + rtl8168_mdio_write(tp, 0x06, 0x0dd1); + rtl8168_mdio_write(tp, 0x06, 0x01bf); + rtl8168_mdio_write(tp, 0x06, 0x22c8); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xf302); + rtl8168_mdio_write(tp, 0x06, 0x21f0); + rtl8168_mdio_write(tp, 0x06, 0xae0b); + rtl8168_mdio_write(tp, 0x06, 0xac38); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x0602); + rtl8168_mdio_write(tp, 0x06, 0x222d); + rtl8168_mdio_write(tp, 0x06, 0x0222); + rtl8168_mdio_write(tp, 0x06, 0x7202); + rtl8168_mdio_write(tp, 0x06, 0x1ae7); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x201a); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x2afe); + rtl8168_mdio_write(tp, 0x06, 0x022c); + rtl8168_mdio_write(tp, 0x06, 0x5c02); + rtl8168_mdio_write(tp, 0x06, 0x03c5); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x6702); + rtl8168_mdio_write(tp, 0x06, 0x2e4f); + rtl8168_mdio_write(tp, 0x06, 0x0204); + rtl8168_mdio_write(tp, 0x06, 0x8902); + rtl8168_mdio_write(tp, 0x06, 0x2f7a); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x210b); + rtl8168_mdio_write(tp, 0x06, 0xf621); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x0445); + rtl8168_mdio_write(tp, 0x06, 0x021c); + rtl8168_mdio_write(tp, 0x06, 0xb8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad22); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x22e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0235); + rtl8168_mdio_write(tp, 0x06, 0xd4e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad23); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x23e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0231); + rtl8168_mdio_write(tp, 0x06, 0xc8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad24); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x24e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2505); + rtl8168_mdio_write(tp, 0x06, 0xf625); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x08f6); + rtl8168_mdio_write(tp, 0x06, 0x26e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x022d); + rtl8168_mdio_write(tp, 0x06, 0x6ae0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x27e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0x8bfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0x8b80); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x22bf); + rtl8168_mdio_write(tp, 0x06, 0x479a); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xc6e0); + rtl8168_mdio_write(tp, 0x06, 0x8b44); + rtl8168_mdio_write(tp, 0x06, 0x1f01); + rtl8168_mdio_write(tp, 0x06, 0x9e15); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x44ad); + rtl8168_mdio_write(tp, 0x06, 0x2907); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x04d1); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0x02d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x819e); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xf3ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0x4077); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xbbe0); + rtl8168_mdio_write(tp, 0x06, 0x2a00); + rtl8168_mdio_write(tp, 0x05, 0xe142); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x05, 0xe140); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp,0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val |= BIT_1; + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~BIT_2; + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); +} + +static void +rtl8168_set_phy_mcu_8411_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val,i; + + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x1800); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val &= ~(BIT_12); + rtl8168_mdio_write(tp,0x15, gphy_val); + rtl8168_mdio_write(tp,0x00, 0x4800); + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x002f); + for (i = 0; i < 1000; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x1c); + if (gphy_val & 0x0080) + break; + } + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x1800); + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x0023); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x18); + if (!(gphy_val & 0x0001)) + break; + } + rtl8168_mdio_write(tp,0x1f, 0x0005); + rtl8168_mdio_write(tp,0x05, 0xfff6); + rtl8168_mdio_write(tp,0x06, 0x0080); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0307); + rtl8168_mdio_write(tp, 0x15, 0x0098); + rtl8168_mdio_write(tp, 0x19, 0x7c0b); + rtl8168_mdio_write(tp, 0x15, 0x0099); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00eb); + rtl8168_mdio_write(tp, 0x19, 0x6c0b); + rtl8168_mdio_write(tp, 0x15, 0x00f8); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00fe); + rtl8168_mdio_write(tp, 0x19, 0x6f0f); + rtl8168_mdio_write(tp, 0x15, 0x00db); + rtl8168_mdio_write(tp, 0x19, 0x6f09); + rtl8168_mdio_write(tp, 0x15, 0x00dc); + rtl8168_mdio_write(tp, 0x19, 0xaefd); + rtl8168_mdio_write(tp, 0x15, 0x00dd); + rtl8168_mdio_write(tp, 0x19, 0x6f0b); + rtl8168_mdio_write(tp, 0x15, 0x00de); + rtl8168_mdio_write(tp, 0x19, 0xc60b); + rtl8168_mdio_write(tp, 0x15, 0x00df); + rtl8168_mdio_write(tp, 0x19, 0x00fa); + rtl8168_mdio_write(tp, 0x15, 0x00e0); + rtl8168_mdio_write(tp, 0x19, 0x30e1); + rtl8168_mdio_write(tp, 0x15, 0x020c); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x020e); + rtl8168_mdio_write(tp, 0x19, 0x9813); + rtl8168_mdio_write(tp, 0x15, 0x020f); + rtl8168_mdio_write(tp, 0x19, 0x7801); + rtl8168_mdio_write(tp, 0x15, 0x0210); + rtl8168_mdio_write(tp, 0x19, 0x930f); + rtl8168_mdio_write(tp, 0x15, 0x0211); + rtl8168_mdio_write(tp, 0x19, 0x9206); + rtl8168_mdio_write(tp, 0x15, 0x0212); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0213); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0214); + rtl8168_mdio_write(tp, 0x19, 0x588f); + rtl8168_mdio_write(tp, 0x15, 0x0215); + rtl8168_mdio_write(tp, 0x19, 0x5520); + rtl8168_mdio_write(tp, 0x15, 0x0216); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0217); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0218); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0219); + rtl8168_mdio_write(tp, 0x19, 0x588d); + rtl8168_mdio_write(tp, 0x15, 0x021a); + rtl8168_mdio_write(tp, 0x19, 0x5540); + rtl8168_mdio_write(tp, 0x15, 0x021b); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x021c); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x021d); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x021e); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x021f); + rtl8168_mdio_write(tp, 0x19, 0x4002); + rtl8168_mdio_write(tp, 0x15, 0x0220); + rtl8168_mdio_write(tp, 0x19, 0x3224); + rtl8168_mdio_write(tp, 0x15, 0x0221); + rtl8168_mdio_write(tp, 0x19, 0x9e03); + rtl8168_mdio_write(tp, 0x15, 0x0222); + rtl8168_mdio_write(tp, 0x19, 0x7c40); + rtl8168_mdio_write(tp, 0x15, 0x0223); + rtl8168_mdio_write(tp, 0x19, 0x6840); + rtl8168_mdio_write(tp, 0x15, 0x0224); + rtl8168_mdio_write(tp, 0x19, 0x7800); + rtl8168_mdio_write(tp, 0x15, 0x0225); + rtl8168_mdio_write(tp, 0x19, 0x3231); + rtl8168_mdio_write(tp, 0x15, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x0306); + rtl8168_mdio_write(tp, 0x16, 0x0300); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x48f7); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xfff7); + rtl8168_mdio_write(tp, 0x06, 0xa080); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0xf602); + rtl8168_mdio_write(tp, 0x06, 0x011e); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x2b02); + rtl8168_mdio_write(tp, 0x06, 0x8077); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x4802); + rtl8168_mdio_write(tp, 0x06, 0x0162); + rtl8168_mdio_write(tp, 0x06, 0x0280); + rtl8168_mdio_write(tp, 0x06, 0x9402); + rtl8168_mdio_write(tp, 0x06, 0x810e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x88e1); + rtl8168_mdio_write(tp, 0x06, 0x8b89); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8a1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8b); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8c1e); + rtl8168_mdio_write(tp, 0x06, 0x01e1); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x1e01); + rtl8168_mdio_write(tp, 0x06, 0xe18b); + rtl8168_mdio_write(tp, 0x06, 0x8e1e); + rtl8168_mdio_write(tp, 0x06, 0x01a0); + rtl8168_mdio_write(tp, 0x06, 0x00c7); + rtl8168_mdio_write(tp, 0x06, 0xaebb); + rtl8168_mdio_write(tp, 0x06, 0xd481); + rtl8168_mdio_write(tp, 0x06, 0xd4e4); + rtl8168_mdio_write(tp, 0x06, 0x8b92); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x9302); + rtl8168_mdio_write(tp, 0x06, 0x2e5a); + rtl8168_mdio_write(tp, 0x06, 0xbf8b); + rtl8168_mdio_write(tp, 0x06, 0x88ec); + rtl8168_mdio_write(tp, 0x06, 0x0019); + rtl8168_mdio_write(tp, 0x06, 0xa98b); + rtl8168_mdio_write(tp, 0x06, 0x90f9); + rtl8168_mdio_write(tp, 0x06, 0xeeff); + rtl8168_mdio_write(tp, 0x06, 0xf600); + rtl8168_mdio_write(tp, 0x06, 0xeeff); + rtl8168_mdio_write(tp, 0x06, 0xf7fc); + rtl8168_mdio_write(tp, 0x06, 0xd100); + rtl8168_mdio_write(tp, 0x06, 0xbf83); + rtl8168_mdio_write(tp, 0x06, 0x3c02); + rtl8168_mdio_write(tp, 0x06, 0x3a21); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf83); + rtl8168_mdio_write(tp, 0x06, 0x3f02); + rtl8168_mdio_write(tp, 0x06, 0x3a21); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8aad); + rtl8168_mdio_write(tp, 0x06, 0x2014); + rtl8168_mdio_write(tp, 0x06, 0xee8b); + rtl8168_mdio_write(tp, 0x06, 0x8a00); + rtl8168_mdio_write(tp, 0x06, 0x0220); + rtl8168_mdio_write(tp, 0x06, 0x8be0); + rtl8168_mdio_write(tp, 0x06, 0xe426); + rtl8168_mdio_write(tp, 0x06, 0xe1e4); + rtl8168_mdio_write(tp, 0x06, 0x27ee); + rtl8168_mdio_write(tp, 0x06, 0xe426); + rtl8168_mdio_write(tp, 0x06, 0x23e5); + rtl8168_mdio_write(tp, 0x06, 0xe427); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x14ee); + rtl8168_mdio_write(tp, 0x06, 0x8b8d); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0x8a5a); + rtl8168_mdio_write(tp, 0x06, 0x7803); + rtl8168_mdio_write(tp, 0x06, 0x9e09); + rtl8168_mdio_write(tp, 0x06, 0x0206); + rtl8168_mdio_write(tp, 0x06, 0x2802); + rtl8168_mdio_write(tp, 0x06, 0x80b1); + rtl8168_mdio_write(tp, 0x06, 0x0232); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xf9e0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac26); + rtl8168_mdio_write(tp, 0x06, 0x1ae0); + rtl8168_mdio_write(tp, 0x06, 0x8b81); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x14e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xac20); + rtl8168_mdio_write(tp, 0x06, 0x0ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xac23); + rtl8168_mdio_write(tp, 0x06, 0x08e0); + rtl8168_mdio_write(tp, 0x06, 0x8b87); + rtl8168_mdio_write(tp, 0x06, 0xac24); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0x1b02); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x1c04); + rtl8168_mdio_write(tp, 0x06, 0xeee4); + rtl8168_mdio_write(tp, 0x06, 0x1d04); + rtl8168_mdio_write(tp, 0x06, 0xe2e0); + rtl8168_mdio_write(tp, 0x06, 0x7ce3); + rtl8168_mdio_write(tp, 0x06, 0xe07d); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x38e1); + rtl8168_mdio_write(tp, 0x06, 0xe039); + rtl8168_mdio_write(tp, 0x06, 0xad2e); + rtl8168_mdio_write(tp, 0x06, 0x1bad); + rtl8168_mdio_write(tp, 0x06, 0x390d); + rtl8168_mdio_write(tp, 0x06, 0xd101); + rtl8168_mdio_write(tp, 0x06, 0xbf22); + rtl8168_mdio_write(tp, 0x06, 0xe802); + rtl8168_mdio_write(tp, 0x06, 0x3a21); + rtl8168_mdio_write(tp, 0x06, 0x0222); + rtl8168_mdio_write(tp, 0x06, 0x10ae); + rtl8168_mdio_write(tp, 0x06, 0x0bac); + rtl8168_mdio_write(tp, 0x06, 0x3802); + rtl8168_mdio_write(tp, 0x06, 0xae06); + rtl8168_mdio_write(tp, 0x06, 0x0222); + rtl8168_mdio_write(tp, 0x06, 0x4d02); + rtl8168_mdio_write(tp, 0x06, 0x2292); + rtl8168_mdio_write(tp, 0x06, 0x021b); + rtl8168_mdio_write(tp, 0x06, 0x13fd); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x1af6); + rtl8168_mdio_write(tp, 0x06, 0x20e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x022b); + rtl8168_mdio_write(tp, 0x06, 0x1e02); + rtl8168_mdio_write(tp, 0x06, 0x82ae); + rtl8168_mdio_write(tp, 0x06, 0x0203); + rtl8168_mdio_write(tp, 0x06, 0xc002); + rtl8168_mdio_write(tp, 0x06, 0x827d); + rtl8168_mdio_write(tp, 0x06, 0x022e); + rtl8168_mdio_write(tp, 0x06, 0x6f02); + rtl8168_mdio_write(tp, 0x06, 0x047b); + rtl8168_mdio_write(tp, 0x06, 0x022f); + rtl8168_mdio_write(tp, 0x06, 0x9ae0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad21); + rtl8168_mdio_write(tp, 0x06, 0x0bf6); + rtl8168_mdio_write(tp, 0x06, 0x21e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0x9002); + rtl8168_mdio_write(tp, 0x06, 0x1cd9); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2208); + rtl8168_mdio_write(tp, 0x06, 0xf622); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x35f4); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2308); + rtl8168_mdio_write(tp, 0x06, 0xf623); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x31e8); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2405); + rtl8168_mdio_write(tp, 0x06, 0xf624); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8ee0); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xad25); + rtl8168_mdio_write(tp, 0x06, 0x05f6); + rtl8168_mdio_write(tp, 0x06, 0x25e4); + rtl8168_mdio_write(tp, 0x06, 0x8b8e); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2608); + rtl8168_mdio_write(tp, 0x06, 0xf626); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x2d8a); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x8ead); + rtl8168_mdio_write(tp, 0x06, 0x2705); + rtl8168_mdio_write(tp, 0x06, 0xf627); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x8e02); + rtl8168_mdio_write(tp, 0x06, 0x0386); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xef69); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0xe001); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x32e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf720); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40bf); + rtl8168_mdio_write(tp, 0x06, 0x32c1); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xf4ad); + rtl8168_mdio_write(tp, 0x06, 0x2821); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x20e1); + rtl8168_mdio_write(tp, 0x06, 0xe021); + rtl8168_mdio_write(tp, 0x06, 0xad20); + rtl8168_mdio_write(tp, 0x06, 0x18e0); + rtl8168_mdio_write(tp, 0x06, 0x8b40); + rtl8168_mdio_write(tp, 0x06, 0xf620); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x40ee); + rtl8168_mdio_write(tp, 0x06, 0x8b3b); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0x8a8a); + rtl8168_mdio_write(tp, 0x06, 0xe18a); + rtl8168_mdio_write(tp, 0x06, 0x8be4); + rtl8168_mdio_write(tp, 0x06, 0xe000); + rtl8168_mdio_write(tp, 0x06, 0xe5e0); + rtl8168_mdio_write(tp, 0x06, 0x01ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xface); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69fa); + rtl8168_mdio_write(tp, 0x06, 0xd401); + rtl8168_mdio_write(tp, 0x06, 0x55b4); + rtl8168_mdio_write(tp, 0x06, 0xfebf); + rtl8168_mdio_write(tp, 0x06, 0x1c5e); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xf4ac); + rtl8168_mdio_write(tp, 0x06, 0x280b); + rtl8168_mdio_write(tp, 0x06, 0xbf1c); + rtl8168_mdio_write(tp, 0x06, 0x5b02); + rtl8168_mdio_write(tp, 0x06, 0x39f4); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x49ae); + rtl8168_mdio_write(tp, 0x06, 0x64bf); + rtl8168_mdio_write(tp, 0x06, 0x1c5b); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xf4ac); + rtl8168_mdio_write(tp, 0x06, 0x285b); + rtl8168_mdio_write(tp, 0x06, 0xd000); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x62ac); + rtl8168_mdio_write(tp, 0x06, 0x2105); + rtl8168_mdio_write(tp, 0x06, 0xac22); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x4ebf); + rtl8168_mdio_write(tp, 0x06, 0xe0c4); + rtl8168_mdio_write(tp, 0x06, 0xbe85); + rtl8168_mdio_write(tp, 0x06, 0xecd2); + rtl8168_mdio_write(tp, 0x06, 0x04d8); + rtl8168_mdio_write(tp, 0x06, 0x19d9); + rtl8168_mdio_write(tp, 0x06, 0x1907); + rtl8168_mdio_write(tp, 0x06, 0xdc19); + rtl8168_mdio_write(tp, 0x06, 0xdd19); + rtl8168_mdio_write(tp, 0x06, 0x0789); + rtl8168_mdio_write(tp, 0x06, 0x89ef); + rtl8168_mdio_write(tp, 0x06, 0x645e); + rtl8168_mdio_write(tp, 0x06, 0x07ff); + rtl8168_mdio_write(tp, 0x06, 0x0d65); + rtl8168_mdio_write(tp, 0x06, 0x5cf8); + rtl8168_mdio_write(tp, 0x06, 0x001e); + rtl8168_mdio_write(tp, 0x06, 0x46dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0x19b2); + rtl8168_mdio_write(tp, 0x06, 0xe2d4); + rtl8168_mdio_write(tp, 0x06, 0x0001); + rtl8168_mdio_write(tp, 0x06, 0xbf1c); + rtl8168_mdio_write(tp, 0x06, 0x5b02); + rtl8168_mdio_write(tp, 0x06, 0x3a21); + rtl8168_mdio_write(tp, 0x06, 0xae1d); + rtl8168_mdio_write(tp, 0x06, 0xbee0); + rtl8168_mdio_write(tp, 0x06, 0xc4bf); + rtl8168_mdio_write(tp, 0x06, 0x85ec); + rtl8168_mdio_write(tp, 0x06, 0xd204); + rtl8168_mdio_write(tp, 0x06, 0xd819); + rtl8168_mdio_write(tp, 0x06, 0xd919); + rtl8168_mdio_write(tp, 0x06, 0x07dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0x1907); + rtl8168_mdio_write(tp, 0x06, 0xb2f4); + rtl8168_mdio_write(tp, 0x06, 0xd400); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x1c5b); + rtl8168_mdio_write(tp, 0x06, 0x023a); + rtl8168_mdio_write(tp, 0x06, 0x21fe); + rtl8168_mdio_write(tp, 0x06, 0xef96); + rtl8168_mdio_write(tp, 0x06, 0xfec6); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc05); + rtl8168_mdio_write(tp, 0x06, 0xf9e2); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe3e0); + rtl8168_mdio_write(tp, 0x06, 0xeb5a); + rtl8168_mdio_write(tp, 0x06, 0x070c); + rtl8168_mdio_write(tp, 0x06, 0x031e); + rtl8168_mdio_write(tp, 0x06, 0x20e6); + rtl8168_mdio_write(tp, 0x06, 0xe0ea); + rtl8168_mdio_write(tp, 0x06, 0xe7e0); + rtl8168_mdio_write(tp, 0x06, 0xebe0); + rtl8168_mdio_write(tp, 0x06, 0xe0fc); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0xfdfd); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x69e0); + rtl8168_mdio_write(tp, 0x06, 0x8b80); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x22bf); + rtl8168_mdio_write(tp, 0x06, 0x47ba); + rtl8168_mdio_write(tp, 0x06, 0x0239); + rtl8168_mdio_write(tp, 0x06, 0xf4e0); + rtl8168_mdio_write(tp, 0x06, 0x8b44); + rtl8168_mdio_write(tp, 0x06, 0x1f01); + rtl8168_mdio_write(tp, 0x06, 0x9e15); + rtl8168_mdio_write(tp, 0x06, 0xe58b); + rtl8168_mdio_write(tp, 0x06, 0x44ad); + rtl8168_mdio_write(tp, 0x06, 0x2907); + rtl8168_mdio_write(tp, 0x06, 0xac28); + rtl8168_mdio_write(tp, 0x06, 0x04d1); + rtl8168_mdio_write(tp, 0x06, 0x01ae); + rtl8168_mdio_write(tp, 0x06, 0x02d1); + rtl8168_mdio_write(tp, 0x06, 0x00bf); + rtl8168_mdio_write(tp, 0x06, 0x8342); + rtl8168_mdio_write(tp, 0x06, 0x023a); + rtl8168_mdio_write(tp, 0x06, 0x21ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x30e0); + rtl8168_mdio_write(tp, 0x06, 0xe036); + rtl8168_mdio_write(tp, 0x06, 0xe1e0); + rtl8168_mdio_write(tp, 0x06, 0x37e1); + rtl8168_mdio_write(tp, 0x06, 0x8b3f); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e23); + rtl8168_mdio_write(tp, 0x06, 0xe48b); + rtl8168_mdio_write(tp, 0x06, 0x3fac); + rtl8168_mdio_write(tp, 0x06, 0x200b); + rtl8168_mdio_write(tp, 0x06, 0xac21); + rtl8168_mdio_write(tp, 0x06, 0x0dac); + rtl8168_mdio_write(tp, 0x06, 0x250f); + rtl8168_mdio_write(tp, 0x06, 0xac27); + rtl8168_mdio_write(tp, 0x06, 0x11ae); + rtl8168_mdio_write(tp, 0x06, 0x1202); + rtl8168_mdio_write(tp, 0x06, 0x2cb5); + rtl8168_mdio_write(tp, 0x06, 0xae0d); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0xe7ae); + rtl8168_mdio_write(tp, 0x06, 0x0802); + rtl8168_mdio_write(tp, 0x06, 0x2cd7); + rtl8168_mdio_write(tp, 0x06, 0xae03); + rtl8168_mdio_write(tp, 0x06, 0x022c); + rtl8168_mdio_write(tp, 0x06, 0xeafc); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x6902); + rtl8168_mdio_write(tp, 0x06, 0x8304); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x14e1); + rtl8168_mdio_write(tp, 0x06, 0xe015); + rtl8168_mdio_write(tp, 0x06, 0xad26); + rtl8168_mdio_write(tp, 0x06, 0x08d1); + rtl8168_mdio_write(tp, 0x06, 0x1ebf); + rtl8168_mdio_write(tp, 0x06, 0x2d47); + rtl8168_mdio_write(tp, 0x06, 0x023a); + rtl8168_mdio_write(tp, 0x06, 0x21ef); + rtl8168_mdio_write(tp, 0x06, 0x96fe); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0x8b85); + rtl8168_mdio_write(tp, 0x06, 0xad27); + rtl8168_mdio_write(tp, 0x06, 0x2fd0); + rtl8168_mdio_write(tp, 0x06, 0x0b02); + rtl8168_mdio_write(tp, 0x06, 0x3826); + rtl8168_mdio_write(tp, 0x06, 0x5882); + rtl8168_mdio_write(tp, 0x06, 0x7882); + rtl8168_mdio_write(tp, 0x06, 0x9f24); + rtl8168_mdio_write(tp, 0x06, 0xe08b); + rtl8168_mdio_write(tp, 0x06, 0x32e1); + rtl8168_mdio_write(tp, 0x06, 0x8b33); + rtl8168_mdio_write(tp, 0x06, 0x1f10); + rtl8168_mdio_write(tp, 0x06, 0x9e1a); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x8b32); + rtl8168_mdio_write(tp, 0x06, 0xe0e0); + rtl8168_mdio_write(tp, 0x06, 0x28e1); + rtl8168_mdio_write(tp, 0x06, 0xe029); + rtl8168_mdio_write(tp, 0x06, 0xf72c); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x28e5); + rtl8168_mdio_write(tp, 0x06, 0xe029); + rtl8168_mdio_write(tp, 0x06, 0xf62c); + rtl8168_mdio_write(tp, 0x06, 0xe4e0); + rtl8168_mdio_write(tp, 0x06, 0x28e5); + rtl8168_mdio_write(tp, 0x06, 0xe029); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0x00e1); + rtl8168_mdio_write(tp, 0x06, 0x4077); + rtl8168_mdio_write(tp, 0x06, 0xe140); + rtl8168_mdio_write(tp, 0x06, 0xbbe0); + rtl8168_mdio_write(tp, 0x06, 0x2a00); + rtl8168_mdio_write(tp, 0x05, 0xe142); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp,0x06, gphy_val); + rtl8168_mdio_write(tp, 0x05, 0xe140); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp,0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp,0x1f, 0x0005); + for (i = 0; i < 200; i++) { + udelay(100); + gphy_val = rtl8168_mdio_read(tp, 0x00); + if (gphy_val & BIT_7) + break; + } + rtl8168_mdio_write(tp,0x1f, 0x0007); + rtl8168_mdio_write(tp,0x1e, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val |= BIT_1; + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~BIT_2; + rtl8168_mdio_write(tp,0x17, gphy_val); + rtl8168_mdio_write(tp,0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x09, 0xA20F); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0003); + rtl8168_mdio_write(tp, 0x01, 0x328A); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp,0x1f, 0x0000); + rtl8168_mdio_write(tp,0x00, 0x9200); +} + +static void +rtl8168_set_phy_mcu_8168g_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val; + + rtl8168_set_phy_mcu_patch_request(tp); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8146); + rtl8168_mdio_write(tp, 0x14, 0x2300); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0210); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0290); + rtl8168_mdio_write(tp, 0x13, 0xA012); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xA014); + rtl8168_mdio_write(tp, 0x14, 0x2c04); + rtl8168_mdio_write(tp, 0x14, 0x2c0c); + rtl8168_mdio_write(tp, 0x14, 0x2c6c); + rtl8168_mdio_write(tp, 0x14, 0x2d0d); + rtl8168_mdio_write(tp, 0x14, 0x31ce); + rtl8168_mdio_write(tp, 0x14, 0x506d); + rtl8168_mdio_write(tp, 0x14, 0xd708); + rtl8168_mdio_write(tp, 0x14, 0x3108); + rtl8168_mdio_write(tp, 0x14, 0x106d); + rtl8168_mdio_write(tp, 0x14, 0x1560); + rtl8168_mdio_write(tp, 0x14, 0x15a9); + rtl8168_mdio_write(tp, 0x14, 0x206e); + rtl8168_mdio_write(tp, 0x14, 0x175b); + rtl8168_mdio_write(tp, 0x14, 0x6062); + rtl8168_mdio_write(tp, 0x14, 0xd700); + rtl8168_mdio_write(tp, 0x14, 0x5fae); + rtl8168_mdio_write(tp, 0x14, 0xd708); + rtl8168_mdio_write(tp, 0x14, 0x3107); + rtl8168_mdio_write(tp, 0x14, 0x4c1e); + rtl8168_mdio_write(tp, 0x14, 0x4169); + rtl8168_mdio_write(tp, 0x14, 0x316a); + rtl8168_mdio_write(tp, 0x14, 0x0c19); + rtl8168_mdio_write(tp, 0x14, 0x31aa); + rtl8168_mdio_write(tp, 0x14, 0x0c19); + rtl8168_mdio_write(tp, 0x14, 0x2c1b); + rtl8168_mdio_write(tp, 0x14, 0x5e62); + rtl8168_mdio_write(tp, 0x14, 0x26b5); + rtl8168_mdio_write(tp, 0x14, 0x31ab); + rtl8168_mdio_write(tp, 0x14, 0x5c1e); + rtl8168_mdio_write(tp, 0x14, 0x2c0c); + rtl8168_mdio_write(tp, 0x14, 0xc040); + rtl8168_mdio_write(tp, 0x14, 0x8808); + rtl8168_mdio_write(tp, 0x14, 0xc520); + rtl8168_mdio_write(tp, 0x14, 0xc421); + rtl8168_mdio_write(tp, 0x14, 0xd05a); + rtl8168_mdio_write(tp, 0x14, 0xd19a); + rtl8168_mdio_write(tp, 0x14, 0xd709); + rtl8168_mdio_write(tp, 0x14, 0x608f); + rtl8168_mdio_write(tp, 0x14, 0xd06b); + rtl8168_mdio_write(tp, 0x14, 0xd18a); + rtl8168_mdio_write(tp, 0x14, 0x2c2c); + rtl8168_mdio_write(tp, 0x14, 0xd0be); + rtl8168_mdio_write(tp, 0x14, 0xd188); + rtl8168_mdio_write(tp, 0x14, 0x2c2c); + rtl8168_mdio_write(tp, 0x14, 0xd708); + rtl8168_mdio_write(tp, 0x14, 0x4072); + rtl8168_mdio_write(tp, 0x14, 0xc104); + rtl8168_mdio_write(tp, 0x14, 0x2c3e); + rtl8168_mdio_write(tp, 0x14, 0x4076); + rtl8168_mdio_write(tp, 0x14, 0xc110); + rtl8168_mdio_write(tp, 0x14, 0x2c3e); + rtl8168_mdio_write(tp, 0x14, 0x4071); + rtl8168_mdio_write(tp, 0x14, 0xc102); + rtl8168_mdio_write(tp, 0x14, 0x2c3e); + rtl8168_mdio_write(tp, 0x14, 0x4070); + rtl8168_mdio_write(tp, 0x14, 0xc101); + rtl8168_mdio_write(tp, 0x14, 0x2c3e); + rtl8168_mdio_write(tp, 0x14, 0x175b); + rtl8168_mdio_write(tp, 0x14, 0xd709); + rtl8168_mdio_write(tp, 0x14, 0x3390); + rtl8168_mdio_write(tp, 0x14, 0x5c39); + rtl8168_mdio_write(tp, 0x14, 0x2c4e); + rtl8168_mdio_write(tp, 0x14, 0x175b); + rtl8168_mdio_write(tp, 0x14, 0xd708); + rtl8168_mdio_write(tp, 0x14, 0x6193); + rtl8168_mdio_write(tp, 0x14, 0xd709); + rtl8168_mdio_write(tp, 0x14, 0x5f9d); + rtl8168_mdio_write(tp, 0x14, 0x408b); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x6042); + rtl8168_mdio_write(tp, 0x14, 0xb401); + rtl8168_mdio_write(tp, 0x14, 0x175b); + rtl8168_mdio_write(tp, 0x14, 0xd708); + rtl8168_mdio_write(tp, 0x14, 0x6073); + rtl8168_mdio_write(tp, 0x14, 0x5fbc); + rtl8168_mdio_write(tp, 0x14, 0x2c4d); + rtl8168_mdio_write(tp, 0x14, 0x26ed); + rtl8168_mdio_write(tp, 0x14, 0xb280); + rtl8168_mdio_write(tp, 0x14, 0xa841); + rtl8168_mdio_write(tp, 0x14, 0x9420); + rtl8168_mdio_write(tp, 0x14, 0x8710); + rtl8168_mdio_write(tp, 0x14, 0xd709); + rtl8168_mdio_write(tp, 0x14, 0x42ec); + rtl8168_mdio_write(tp, 0x14, 0x606d); + rtl8168_mdio_write(tp, 0x14, 0xd207); + rtl8168_mdio_write(tp, 0x14, 0x2c57); + rtl8168_mdio_write(tp, 0x14, 0xd203); + rtl8168_mdio_write(tp, 0x14, 0x33ff); + rtl8168_mdio_write(tp, 0x14, 0x563b); + rtl8168_mdio_write(tp, 0x14, 0x3275); + rtl8168_mdio_write(tp, 0x14, 0x7c5e); + rtl8168_mdio_write(tp, 0x14, 0xb240); + rtl8168_mdio_write(tp, 0x14, 0xb402); + rtl8168_mdio_write(tp, 0x14, 0x263b); + rtl8168_mdio_write(tp, 0x14, 0x6096); + rtl8168_mdio_write(tp, 0x14, 0xb240); + rtl8168_mdio_write(tp, 0x14, 0xb406); + rtl8168_mdio_write(tp, 0x14, 0x263b); + rtl8168_mdio_write(tp, 0x14, 0x31d7); + rtl8168_mdio_write(tp, 0x14, 0x7c67); + rtl8168_mdio_write(tp, 0x14, 0xb240); + rtl8168_mdio_write(tp, 0x14, 0xb40e); + rtl8168_mdio_write(tp, 0x14, 0x263b); + rtl8168_mdio_write(tp, 0x14, 0xb410); + rtl8168_mdio_write(tp, 0x14, 0x8802); + rtl8168_mdio_write(tp, 0x14, 0xb240); + rtl8168_mdio_write(tp, 0x14, 0x940e); + rtl8168_mdio_write(tp, 0x14, 0x263b); + rtl8168_mdio_write(tp, 0x14, 0xba04); + rtl8168_mdio_write(tp, 0x14, 0x1cd6); + rtl8168_mdio_write(tp, 0x14, 0xa902); + rtl8168_mdio_write(tp, 0x14, 0xd711); + rtl8168_mdio_write(tp, 0x14, 0x4045); + rtl8168_mdio_write(tp, 0x14, 0xa980); + rtl8168_mdio_write(tp, 0x14, 0x3003); + rtl8168_mdio_write(tp, 0x14, 0x59b1); + rtl8168_mdio_write(tp, 0x14, 0xa540); + rtl8168_mdio_write(tp, 0x14, 0xa601); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4043); + rtl8168_mdio_write(tp, 0x14, 0xa910); + rtl8168_mdio_write(tp, 0x14, 0xd711); + rtl8168_mdio_write(tp, 0x14, 0x60a0); + rtl8168_mdio_write(tp, 0x14, 0xca33); + rtl8168_mdio_write(tp, 0x14, 0xcb33); + rtl8168_mdio_write(tp, 0x14, 0xa941); + rtl8168_mdio_write(tp, 0x14, 0x2c82); + rtl8168_mdio_write(tp, 0x14, 0xcaff); + rtl8168_mdio_write(tp, 0x14, 0xcbff); + rtl8168_mdio_write(tp, 0x14, 0xa921); + rtl8168_mdio_write(tp, 0x14, 0xce02); + rtl8168_mdio_write(tp, 0x14, 0xe070); + rtl8168_mdio_write(tp, 0x14, 0x0f10); + rtl8168_mdio_write(tp, 0x14, 0xaf01); + rtl8168_mdio_write(tp, 0x14, 0x8f01); + rtl8168_mdio_write(tp, 0x14, 0x1766); + rtl8168_mdio_write(tp, 0x14, 0x8e02); + rtl8168_mdio_write(tp, 0x14, 0x1787); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x609c); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fa4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0x1ce9); + rtl8168_mdio_write(tp, 0x14, 0xce04); + rtl8168_mdio_write(tp, 0x14, 0xe070); + rtl8168_mdio_write(tp, 0x14, 0x0f20); + rtl8168_mdio_write(tp, 0x14, 0xaf01); + rtl8168_mdio_write(tp, 0x14, 0x8f01); + rtl8168_mdio_write(tp, 0x14, 0x1766); + rtl8168_mdio_write(tp, 0x14, 0x8e04); + rtl8168_mdio_write(tp, 0x14, 0x6044); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0xa520); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4043); + rtl8168_mdio_write(tp, 0x14, 0x2cc1); + rtl8168_mdio_write(tp, 0x14, 0xe00f); + rtl8168_mdio_write(tp, 0x14, 0x0501); + rtl8168_mdio_write(tp, 0x14, 0x1cef); + rtl8168_mdio_write(tp, 0x14, 0xb801); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x4060); + rtl8168_mdio_write(tp, 0x14, 0x7fc4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0x1cf5); + rtl8168_mdio_write(tp, 0x14, 0xe00f); + rtl8168_mdio_write(tp, 0x14, 0x0502); + rtl8168_mdio_write(tp, 0x14, 0x1cef); + rtl8168_mdio_write(tp, 0x14, 0xb802); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x4061); + rtl8168_mdio_write(tp, 0x14, 0x7fc4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0x1cf5); + rtl8168_mdio_write(tp, 0x14, 0xe00f); + rtl8168_mdio_write(tp, 0x14, 0x0504); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6099); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fa4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0xc17f); + rtl8168_mdio_write(tp, 0x14, 0xc200); + rtl8168_mdio_write(tp, 0x14, 0xc43f); + rtl8168_mdio_write(tp, 0x14, 0xcc03); + rtl8168_mdio_write(tp, 0x14, 0xa701); + rtl8168_mdio_write(tp, 0x14, 0xa510); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4018); + rtl8168_mdio_write(tp, 0x14, 0x9910); + rtl8168_mdio_write(tp, 0x14, 0x8510); + rtl8168_mdio_write(tp, 0x14, 0x2860); + rtl8168_mdio_write(tp, 0x14, 0xe00f); + rtl8168_mdio_write(tp, 0x14, 0x0504); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6099); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fa4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0xa608); + rtl8168_mdio_write(tp, 0x14, 0xc17d); + rtl8168_mdio_write(tp, 0x14, 0xc200); + rtl8168_mdio_write(tp, 0x14, 0xc43f); + rtl8168_mdio_write(tp, 0x14, 0xcc03); + rtl8168_mdio_write(tp, 0x14, 0xa701); + rtl8168_mdio_write(tp, 0x14, 0xa510); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4018); + rtl8168_mdio_write(tp, 0x14, 0x9910); + rtl8168_mdio_write(tp, 0x14, 0x8510); + rtl8168_mdio_write(tp, 0x14, 0x2926); + rtl8168_mdio_write(tp, 0x14, 0x1792); + rtl8168_mdio_write(tp, 0x14, 0x27db); + rtl8168_mdio_write(tp, 0x14, 0xc000); + rtl8168_mdio_write(tp, 0x14, 0xc100); + rtl8168_mdio_write(tp, 0x14, 0xc200); + rtl8168_mdio_write(tp, 0x14, 0xc300); + rtl8168_mdio_write(tp, 0x14, 0xc400); + rtl8168_mdio_write(tp, 0x14, 0xc500); + rtl8168_mdio_write(tp, 0x14, 0xc600); + rtl8168_mdio_write(tp, 0x14, 0xc7c1); + rtl8168_mdio_write(tp, 0x14, 0xc800); + rtl8168_mdio_write(tp, 0x14, 0xcc00); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xca0f); + rtl8168_mdio_write(tp, 0x14, 0xcbff); + rtl8168_mdio_write(tp, 0x14, 0xa901); + rtl8168_mdio_write(tp, 0x14, 0x8902); + rtl8168_mdio_write(tp, 0x14, 0xc900); + rtl8168_mdio_write(tp, 0x14, 0xca00); + rtl8168_mdio_write(tp, 0x14, 0xcb00); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xb804); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x6044); + rtl8168_mdio_write(tp, 0x14, 0x9804); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6099); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fa4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xa510); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6098); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fa4); + rtl8168_mdio_write(tp, 0x14, 0x2cd4); + rtl8168_mdio_write(tp, 0x14, 0x8510); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xd711); + rtl8168_mdio_write(tp, 0x14, 0x3003); + rtl8168_mdio_write(tp, 0x14, 0x1d01); + rtl8168_mdio_write(tp, 0x14, 0x2d0b); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x60be); + rtl8168_mdio_write(tp, 0x14, 0xe060); + rtl8168_mdio_write(tp, 0x14, 0x0920); + rtl8168_mdio_write(tp, 0x14, 0x1cd6); + rtl8168_mdio_write(tp, 0x14, 0x2c89); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x3063); + rtl8168_mdio_write(tp, 0x14, 0x1948); + rtl8168_mdio_write(tp, 0x14, 0x288a); + rtl8168_mdio_write(tp, 0x14, 0x1cd6); + rtl8168_mdio_write(tp, 0x14, 0x29bd); + rtl8168_mdio_write(tp, 0x14, 0xa802); + rtl8168_mdio_write(tp, 0x14, 0xa303); + rtl8168_mdio_write(tp, 0x14, 0x843f); + rtl8168_mdio_write(tp, 0x14, 0x81ff); + rtl8168_mdio_write(tp, 0x14, 0x8208); + rtl8168_mdio_write(tp, 0x14, 0xa201); + rtl8168_mdio_write(tp, 0x14, 0xc001); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x30a0); + rtl8168_mdio_write(tp, 0x14, 0x0d1c); + rtl8168_mdio_write(tp, 0x14, 0x30a0); + rtl8168_mdio_write(tp, 0x14, 0x3d13); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7f4c); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0xe003); + rtl8168_mdio_write(tp, 0x14, 0x0202); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6090); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fac); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0xa20c); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6091); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fac); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0x820e); + rtl8168_mdio_write(tp, 0x14, 0xa3e0); + rtl8168_mdio_write(tp, 0x14, 0xa520); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x609d); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fac); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0x8520); + rtl8168_mdio_write(tp, 0x14, 0x6703); + rtl8168_mdio_write(tp, 0x14, 0x2d34); + rtl8168_mdio_write(tp, 0x14, 0xa13e); + rtl8168_mdio_write(tp, 0x14, 0xc001); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0x6046); + rtl8168_mdio_write(tp, 0x14, 0x2d0d); + rtl8168_mdio_write(tp, 0x14, 0xa43f); + rtl8168_mdio_write(tp, 0x14, 0xa101); + rtl8168_mdio_write(tp, 0x14, 0xc020); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x3121); + rtl8168_mdio_write(tp, 0x14, 0x0d45); + rtl8168_mdio_write(tp, 0x14, 0x30c0); + rtl8168_mdio_write(tp, 0x14, 0x3d0d); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7f4c); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0xa540); + rtl8168_mdio_write(tp, 0x14, 0xc001); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4001); + rtl8168_mdio_write(tp, 0x14, 0xe00f); + rtl8168_mdio_write(tp, 0x14, 0x0501); + rtl8168_mdio_write(tp, 0x14, 0x1dac); + rtl8168_mdio_write(tp, 0x14, 0xc1c4); + rtl8168_mdio_write(tp, 0x14, 0xa268); + rtl8168_mdio_write(tp, 0x14, 0xa303); + rtl8168_mdio_write(tp, 0x14, 0x8420); + rtl8168_mdio_write(tp, 0x14, 0xe00f); + rtl8168_mdio_write(tp, 0x14, 0x0502); + rtl8168_mdio_write(tp, 0x14, 0x1dac); + rtl8168_mdio_write(tp, 0x14, 0xc002); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0x8208); + rtl8168_mdio_write(tp, 0x14, 0x8410); + rtl8168_mdio_write(tp, 0x14, 0xa121); + rtl8168_mdio_write(tp, 0x14, 0xc002); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0x8120); + rtl8168_mdio_write(tp, 0x14, 0x8180); + rtl8168_mdio_write(tp, 0x14, 0x1d97); + rtl8168_mdio_write(tp, 0x14, 0xa180); + rtl8168_mdio_write(tp, 0x14, 0xa13a); + rtl8168_mdio_write(tp, 0x14, 0x8240); + rtl8168_mdio_write(tp, 0x14, 0xa430); + rtl8168_mdio_write(tp, 0x14, 0xc010); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x30e1); + rtl8168_mdio_write(tp, 0x14, 0x0abc); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7f8c); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0xa480); + rtl8168_mdio_write(tp, 0x14, 0xa230); + rtl8168_mdio_write(tp, 0x14, 0xa303); + rtl8168_mdio_write(tp, 0x14, 0xc001); + rtl8168_mdio_write(tp, 0x14, 0xd70c); + rtl8168_mdio_write(tp, 0x14, 0x4124); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x6120); + rtl8168_mdio_write(tp, 0x14, 0xd711); + rtl8168_mdio_write(tp, 0x14, 0x3128); + rtl8168_mdio_write(tp, 0x14, 0x3d76); + rtl8168_mdio_write(tp, 0x14, 0x2d70); + rtl8168_mdio_write(tp, 0x14, 0xa801); + rtl8168_mdio_write(tp, 0x14, 0x2d6c); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0xe018); + rtl8168_mdio_write(tp, 0x14, 0x0208); + rtl8168_mdio_write(tp, 0x14, 0xa1f8); + rtl8168_mdio_write(tp, 0x14, 0x8480); + rtl8168_mdio_write(tp, 0x14, 0xc004); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0x6046); + rtl8168_mdio_write(tp, 0x14, 0x2d0d); + rtl8168_mdio_write(tp, 0x14, 0xa43f); + rtl8168_mdio_write(tp, 0x14, 0xa105); + rtl8168_mdio_write(tp, 0x14, 0x8228); + rtl8168_mdio_write(tp, 0x14, 0xc004); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0x81bc); + rtl8168_mdio_write(tp, 0x14, 0xa220); + rtl8168_mdio_write(tp, 0x14, 0x1d97); + rtl8168_mdio_write(tp, 0x14, 0x8220); + rtl8168_mdio_write(tp, 0x14, 0xa1bc); + rtl8168_mdio_write(tp, 0x14, 0xc040); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x30e1); + rtl8168_mdio_write(tp, 0x14, 0x0abc); + rtl8168_mdio_write(tp, 0x14, 0x30e1); + rtl8168_mdio_write(tp, 0x14, 0x3d0d); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7f4c); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0xa802); + rtl8168_mdio_write(tp, 0x14, 0xd70c); + rtl8168_mdio_write(tp, 0x14, 0x4244); + rtl8168_mdio_write(tp, 0x14, 0xa301); + rtl8168_mdio_write(tp, 0x14, 0xc004); + rtl8168_mdio_write(tp, 0x14, 0xd711); + rtl8168_mdio_write(tp, 0x14, 0x3128); + rtl8168_mdio_write(tp, 0x14, 0x3da5); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x5f80); + rtl8168_mdio_write(tp, 0x14, 0xd711); + rtl8168_mdio_write(tp, 0x14, 0x3109); + rtl8168_mdio_write(tp, 0x14, 0x3da7); + rtl8168_mdio_write(tp, 0x14, 0x2dab); + rtl8168_mdio_write(tp, 0x14, 0xa801); + rtl8168_mdio_write(tp, 0x14, 0x2d9a); + rtl8168_mdio_write(tp, 0x14, 0xa802); + rtl8168_mdio_write(tp, 0x14, 0xc004); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x4000); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x14, 0xa510); + rtl8168_mdio_write(tp, 0x14, 0xd710); + rtl8168_mdio_write(tp, 0x14, 0x609a); + rtl8168_mdio_write(tp, 0x14, 0xd71e); + rtl8168_mdio_write(tp, 0x14, 0x7fac); + rtl8168_mdio_write(tp, 0x14, 0x2ab6); + rtl8168_mdio_write(tp, 0x14, 0x8510); + rtl8168_mdio_write(tp, 0x14, 0x0800); + rtl8168_mdio_write(tp, 0x13, 0xA01A); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xA006); + rtl8168_mdio_write(tp, 0x14, 0x0ad6); + rtl8168_mdio_write(tp, 0x13, 0xA004); + rtl8168_mdio_write(tp, 0x14, 0x07f5); + rtl8168_mdio_write(tp, 0x13, 0xA002); + rtl8168_mdio_write(tp, 0x14, 0x06a9); + rtl8168_mdio_write(tp, 0x13, 0xA000); + rtl8168_mdio_write(tp, 0x14, 0xf069); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0210); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x83a0); + rtl8168_mdio_write(tp, 0x14, 0xaf83); + rtl8168_mdio_write(tp, 0x14, 0xacaf); + rtl8168_mdio_write(tp, 0x14, 0x83b8); + rtl8168_mdio_write(tp, 0x14, 0xaf83); + rtl8168_mdio_write(tp, 0x14, 0xcdaf); + rtl8168_mdio_write(tp, 0x14, 0x83d3); + rtl8168_mdio_write(tp, 0x14, 0x0204); + rtl8168_mdio_write(tp, 0x14, 0x9a02); + rtl8168_mdio_write(tp, 0x14, 0x09a9); + rtl8168_mdio_write(tp, 0x14, 0x0284); + rtl8168_mdio_write(tp, 0x14, 0x61af); + rtl8168_mdio_write(tp, 0x14, 0x02fc); + rtl8168_mdio_write(tp, 0x14, 0xad20); + rtl8168_mdio_write(tp, 0x14, 0x0302); + rtl8168_mdio_write(tp, 0x14, 0x867c); + rtl8168_mdio_write(tp, 0x14, 0xad21); + rtl8168_mdio_write(tp, 0x14, 0x0302); + rtl8168_mdio_write(tp, 0x14, 0x85c9); + rtl8168_mdio_write(tp, 0x14, 0xad22); + rtl8168_mdio_write(tp, 0x14, 0x0302); + rtl8168_mdio_write(tp, 0x14, 0x1bc0); + rtl8168_mdio_write(tp, 0x14, 0xaf17); + rtl8168_mdio_write(tp, 0x14, 0xe302); + rtl8168_mdio_write(tp, 0x14, 0x8703); + rtl8168_mdio_write(tp, 0x14, 0xaf18); + rtl8168_mdio_write(tp, 0x14, 0x6201); + rtl8168_mdio_write(tp, 0x14, 0x06e0); + rtl8168_mdio_write(tp, 0x14, 0x8148); + rtl8168_mdio_write(tp, 0x14, 0xaf3c); + rtl8168_mdio_write(tp, 0x14, 0x69f8); + rtl8168_mdio_write(tp, 0x14, 0xf9fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0x10f7); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0x131f); + rtl8168_mdio_write(tp, 0x14, 0xd104); + rtl8168_mdio_write(tp, 0x14, 0xbf87); + rtl8168_mdio_write(tp, 0x14, 0xf302); + rtl8168_mdio_write(tp, 0x14, 0x4259); + rtl8168_mdio_write(tp, 0x14, 0x0287); + rtl8168_mdio_write(tp, 0x14, 0x88bf); + rtl8168_mdio_write(tp, 0x14, 0x87cf); + rtl8168_mdio_write(tp, 0x14, 0xd7b8); + rtl8168_mdio_write(tp, 0x14, 0x22d0); + rtl8168_mdio_write(tp, 0x14, 0x0c02); + rtl8168_mdio_write(tp, 0x14, 0x4252); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xcda0); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xce8b); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xd1f5); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xd2a9); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xd30a); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xf010); + rtl8168_mdio_write(tp, 0x14, 0xee80); + rtl8168_mdio_write(tp, 0x14, 0xf38f); + rtl8168_mdio_write(tp, 0x14, 0xee81); + rtl8168_mdio_write(tp, 0x14, 0x011e); + rtl8168_mdio_write(tp, 0x14, 0xee81); + rtl8168_mdio_write(tp, 0x14, 0x0b4a); + rtl8168_mdio_write(tp, 0x14, 0xee81); + rtl8168_mdio_write(tp, 0x14, 0x0c7c); + rtl8168_mdio_write(tp, 0x14, 0xee81); + rtl8168_mdio_write(tp, 0x14, 0x127f); + rtl8168_mdio_write(tp, 0x14, 0xd100); + rtl8168_mdio_write(tp, 0x14, 0x0210); + rtl8168_mdio_write(tp, 0x14, 0xb5ee); + rtl8168_mdio_write(tp, 0x14, 0x8088); + rtl8168_mdio_write(tp, 0x14, 0xa4ee); + rtl8168_mdio_write(tp, 0x14, 0x8089); + rtl8168_mdio_write(tp, 0x14, 0x44ee); + rtl8168_mdio_write(tp, 0x14, 0x809a); + rtl8168_mdio_write(tp, 0x14, 0xa4ee); + rtl8168_mdio_write(tp, 0x14, 0x809b); + rtl8168_mdio_write(tp, 0x14, 0x44ee); + rtl8168_mdio_write(tp, 0x14, 0x809c); + rtl8168_mdio_write(tp, 0x14, 0xa7ee); + rtl8168_mdio_write(tp, 0x14, 0x80a5); + rtl8168_mdio_write(tp, 0x14, 0xa7d2); + rtl8168_mdio_write(tp, 0x14, 0x0002); + rtl8168_mdio_write(tp, 0x14, 0x0e66); + rtl8168_mdio_write(tp, 0x14, 0x0285); + rtl8168_mdio_write(tp, 0x14, 0xc0ee); + rtl8168_mdio_write(tp, 0x14, 0x87fc); + rtl8168_mdio_write(tp, 0x14, 0x00e0); + rtl8168_mdio_write(tp, 0x14, 0x8245); + rtl8168_mdio_write(tp, 0x14, 0xf622); + rtl8168_mdio_write(tp, 0x14, 0xe482); + rtl8168_mdio_write(tp, 0x14, 0x45ef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfdfc); + rtl8168_mdio_write(tp, 0x14, 0x0402); + rtl8168_mdio_write(tp, 0x14, 0x847a); + rtl8168_mdio_write(tp, 0x14, 0x0284); + rtl8168_mdio_write(tp, 0x14, 0xb302); + rtl8168_mdio_write(tp, 0x14, 0x0cab); + rtl8168_mdio_write(tp, 0x14, 0x020c); + rtl8168_mdio_write(tp, 0x14, 0xc402); + rtl8168_mdio_write(tp, 0x14, 0x0cef); + rtl8168_mdio_write(tp, 0x14, 0x020d); + rtl8168_mdio_write(tp, 0x14, 0x0802); + rtl8168_mdio_write(tp, 0x14, 0x0d33); + rtl8168_mdio_write(tp, 0x14, 0x020c); + rtl8168_mdio_write(tp, 0x14, 0x3d04); + rtl8168_mdio_write(tp, 0x14, 0xf8fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xe182); + rtl8168_mdio_write(tp, 0x14, 0x2fac); + rtl8168_mdio_write(tp, 0x14, 0x291a); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x24ac); + rtl8168_mdio_write(tp, 0x14, 0x2102); + rtl8168_mdio_write(tp, 0x14, 0xae22); + rtl8168_mdio_write(tp, 0x14, 0x0210); + rtl8168_mdio_write(tp, 0x14, 0x57f6); + rtl8168_mdio_write(tp, 0x14, 0x21e4); + rtl8168_mdio_write(tp, 0x14, 0x8224); + rtl8168_mdio_write(tp, 0x14, 0xd101); + rtl8168_mdio_write(tp, 0x14, 0xbf44); + rtl8168_mdio_write(tp, 0x14, 0xd202); + rtl8168_mdio_write(tp, 0x14, 0x4259); + rtl8168_mdio_write(tp, 0x14, 0xae10); + rtl8168_mdio_write(tp, 0x14, 0x0212); + rtl8168_mdio_write(tp, 0x14, 0x4cf6); + rtl8168_mdio_write(tp, 0x14, 0x29e5); + rtl8168_mdio_write(tp, 0x14, 0x822f); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x24f6); + rtl8168_mdio_write(tp, 0x14, 0x21e4); + rtl8168_mdio_write(tp, 0x14, 0x8224); + rtl8168_mdio_write(tp, 0x14, 0xef96); + rtl8168_mdio_write(tp, 0x14, 0xfefc); + rtl8168_mdio_write(tp, 0x14, 0x04f8); + rtl8168_mdio_write(tp, 0x14, 0xe182); + rtl8168_mdio_write(tp, 0x14, 0x2fac); + rtl8168_mdio_write(tp, 0x14, 0x2a18); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x24ac); + rtl8168_mdio_write(tp, 0x14, 0x2202); + rtl8168_mdio_write(tp, 0x14, 0xae26); + rtl8168_mdio_write(tp, 0x14, 0x0284); + rtl8168_mdio_write(tp, 0x14, 0xf802); + rtl8168_mdio_write(tp, 0x14, 0x8565); + rtl8168_mdio_write(tp, 0x14, 0xd101); + rtl8168_mdio_write(tp, 0x14, 0xbf44); + rtl8168_mdio_write(tp, 0x14, 0xd502); + rtl8168_mdio_write(tp, 0x14, 0x4259); + rtl8168_mdio_write(tp, 0x14, 0xae0e); + rtl8168_mdio_write(tp, 0x14, 0x0284); + rtl8168_mdio_write(tp, 0x14, 0xea02); + rtl8168_mdio_write(tp, 0x14, 0x85a9); + rtl8168_mdio_write(tp, 0x14, 0xe182); + rtl8168_mdio_write(tp, 0x14, 0x2ff6); + rtl8168_mdio_write(tp, 0x14, 0x2ae5); + rtl8168_mdio_write(tp, 0x14, 0x822f); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x24f6); + rtl8168_mdio_write(tp, 0x14, 0x22e4); + rtl8168_mdio_write(tp, 0x14, 0x8224); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xf9e2); + rtl8168_mdio_write(tp, 0x14, 0x8011); + rtl8168_mdio_write(tp, 0x14, 0xad31); + rtl8168_mdio_write(tp, 0x14, 0x05d2); + rtl8168_mdio_write(tp, 0x14, 0x0002); + rtl8168_mdio_write(tp, 0x14, 0x0e66); + rtl8168_mdio_write(tp, 0x14, 0xfd04); + rtl8168_mdio_write(tp, 0x14, 0xf8f9); + rtl8168_mdio_write(tp, 0x14, 0xfaef); + rtl8168_mdio_write(tp, 0x14, 0x69e0); + rtl8168_mdio_write(tp, 0x14, 0x8011); + rtl8168_mdio_write(tp, 0x14, 0xad21); + rtl8168_mdio_write(tp, 0x14, 0x5cbf); + rtl8168_mdio_write(tp, 0x14, 0x43be); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97ac); + rtl8168_mdio_write(tp, 0x14, 0x281b); + rtl8168_mdio_write(tp, 0x14, 0xbf43); + rtl8168_mdio_write(tp, 0x14, 0xc102); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0xac28); + rtl8168_mdio_write(tp, 0x14, 0x12bf); + rtl8168_mdio_write(tp, 0x14, 0x43c7); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97ac); + rtl8168_mdio_write(tp, 0x14, 0x2804); + rtl8168_mdio_write(tp, 0x14, 0xd300); + rtl8168_mdio_write(tp, 0x14, 0xae07); + rtl8168_mdio_write(tp, 0x14, 0xd306); + rtl8168_mdio_write(tp, 0x14, 0xaf85); + rtl8168_mdio_write(tp, 0x14, 0x56d3); + rtl8168_mdio_write(tp, 0x14, 0x03e0); + rtl8168_mdio_write(tp, 0x14, 0x8011); + rtl8168_mdio_write(tp, 0x14, 0xad26); + rtl8168_mdio_write(tp, 0x14, 0x25bf); + rtl8168_mdio_write(tp, 0x14, 0x4559); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97e2); + rtl8168_mdio_write(tp, 0x14, 0x8073); + rtl8168_mdio_write(tp, 0x14, 0x0d21); + rtl8168_mdio_write(tp, 0x14, 0xf637); + rtl8168_mdio_write(tp, 0x14, 0x0d11); + rtl8168_mdio_write(tp, 0x14, 0xf62f); + rtl8168_mdio_write(tp, 0x14, 0x1b21); + rtl8168_mdio_write(tp, 0x14, 0xaa02); + rtl8168_mdio_write(tp, 0x14, 0xae10); + rtl8168_mdio_write(tp, 0x14, 0xe280); + rtl8168_mdio_write(tp, 0x14, 0x740d); + rtl8168_mdio_write(tp, 0x14, 0x21f6); + rtl8168_mdio_write(tp, 0x14, 0x371b); + rtl8168_mdio_write(tp, 0x14, 0x21aa); + rtl8168_mdio_write(tp, 0x14, 0x0313); + rtl8168_mdio_write(tp, 0x14, 0xae02); + rtl8168_mdio_write(tp, 0x14, 0x2b02); + rtl8168_mdio_write(tp, 0x14, 0x020e); + rtl8168_mdio_write(tp, 0x14, 0x5102); + rtl8168_mdio_write(tp, 0x14, 0x0e66); + rtl8168_mdio_write(tp, 0x14, 0x020f); + rtl8168_mdio_write(tp, 0x14, 0xa3ef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfdfc); + rtl8168_mdio_write(tp, 0x14, 0x04f8); + rtl8168_mdio_write(tp, 0x14, 0xf9fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xe080); + rtl8168_mdio_write(tp, 0x14, 0x12ad); + rtl8168_mdio_write(tp, 0x14, 0x2733); + rtl8168_mdio_write(tp, 0x14, 0xbf43); + rtl8168_mdio_write(tp, 0x14, 0xbe02); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0xac28); + rtl8168_mdio_write(tp, 0x14, 0x09bf); + rtl8168_mdio_write(tp, 0x14, 0x43c1); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97ad); + rtl8168_mdio_write(tp, 0x14, 0x2821); + rtl8168_mdio_write(tp, 0x14, 0xbf45); + rtl8168_mdio_write(tp, 0x14, 0x5902); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0xe387); + rtl8168_mdio_write(tp, 0x14, 0xffd2); + rtl8168_mdio_write(tp, 0x14, 0x001b); + rtl8168_mdio_write(tp, 0x14, 0x45ac); + rtl8168_mdio_write(tp, 0x14, 0x2711); + rtl8168_mdio_write(tp, 0x14, 0xe187); + rtl8168_mdio_write(tp, 0x14, 0xfebf); + rtl8168_mdio_write(tp, 0x14, 0x87e4); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x590d); + rtl8168_mdio_write(tp, 0x14, 0x11bf); + rtl8168_mdio_write(tp, 0x14, 0x87e7); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59ef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfdfc); + rtl8168_mdio_write(tp, 0x14, 0x04f8); + rtl8168_mdio_write(tp, 0x14, 0xfaef); + rtl8168_mdio_write(tp, 0x14, 0x69d1); + rtl8168_mdio_write(tp, 0x14, 0x00bf); + rtl8168_mdio_write(tp, 0x14, 0x87e4); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59bf); + rtl8168_mdio_write(tp, 0x14, 0x87e7); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59ef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xee87); + rtl8168_mdio_write(tp, 0x14, 0xff46); + rtl8168_mdio_write(tp, 0x14, 0xee87); + rtl8168_mdio_write(tp, 0x14, 0xfe01); + rtl8168_mdio_write(tp, 0x14, 0x04f8); + rtl8168_mdio_write(tp, 0x14, 0xfaef); + rtl8168_mdio_write(tp, 0x14, 0x69e0); + rtl8168_mdio_write(tp, 0x14, 0x8241); + rtl8168_mdio_write(tp, 0x14, 0xa000); + rtl8168_mdio_write(tp, 0x14, 0x0502); + rtl8168_mdio_write(tp, 0x14, 0x85eb); + rtl8168_mdio_write(tp, 0x14, 0xae0e); + rtl8168_mdio_write(tp, 0x14, 0xa001); + rtl8168_mdio_write(tp, 0x14, 0x0502); + rtl8168_mdio_write(tp, 0x14, 0x1a5a); + rtl8168_mdio_write(tp, 0x14, 0xae06); + rtl8168_mdio_write(tp, 0x14, 0xa002); + rtl8168_mdio_write(tp, 0x14, 0x0302); + rtl8168_mdio_write(tp, 0x14, 0x1ae6); + rtl8168_mdio_write(tp, 0x14, 0xef96); + rtl8168_mdio_write(tp, 0x14, 0xfefc); + rtl8168_mdio_write(tp, 0x14, 0x04f8); + rtl8168_mdio_write(tp, 0x14, 0xf9fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x29f6); + rtl8168_mdio_write(tp, 0x14, 0x21e4); + rtl8168_mdio_write(tp, 0x14, 0x8229); + rtl8168_mdio_write(tp, 0x14, 0xe080); + rtl8168_mdio_write(tp, 0x14, 0x10ac); + rtl8168_mdio_write(tp, 0x14, 0x2202); + rtl8168_mdio_write(tp, 0x14, 0xae76); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x27f7); + rtl8168_mdio_write(tp, 0x14, 0x21e4); + rtl8168_mdio_write(tp, 0x14, 0x8227); + rtl8168_mdio_write(tp, 0x14, 0xbf43); + rtl8168_mdio_write(tp, 0x14, 0x1302); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0xef21); + rtl8168_mdio_write(tp, 0x14, 0xbf43); + rtl8168_mdio_write(tp, 0x14, 0x1602); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0x0c11); + rtl8168_mdio_write(tp, 0x14, 0x1e21); + rtl8168_mdio_write(tp, 0x14, 0xbf43); + rtl8168_mdio_write(tp, 0x14, 0x1902); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0x0c12); + rtl8168_mdio_write(tp, 0x14, 0x1e21); + rtl8168_mdio_write(tp, 0x14, 0xe682); + rtl8168_mdio_write(tp, 0x14, 0x43a2); + rtl8168_mdio_write(tp, 0x14, 0x000a); + rtl8168_mdio_write(tp, 0x14, 0xe182); + rtl8168_mdio_write(tp, 0x14, 0x27f6); + rtl8168_mdio_write(tp, 0x14, 0x29e5); + rtl8168_mdio_write(tp, 0x14, 0x8227); + rtl8168_mdio_write(tp, 0x14, 0xae42); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x44f7); + rtl8168_mdio_write(tp, 0x14, 0x21e4); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x0246); + rtl8168_mdio_write(tp, 0x14, 0xaebf); + rtl8168_mdio_write(tp, 0x14, 0x4325); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97ef); + rtl8168_mdio_write(tp, 0x14, 0x21bf); + rtl8168_mdio_write(tp, 0x14, 0x431c); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x970c); + rtl8168_mdio_write(tp, 0x14, 0x121e); + rtl8168_mdio_write(tp, 0x14, 0x21bf); + rtl8168_mdio_write(tp, 0x14, 0x431f); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x970c); + rtl8168_mdio_write(tp, 0x14, 0x131e); + rtl8168_mdio_write(tp, 0x14, 0x21bf); + rtl8168_mdio_write(tp, 0x14, 0x4328); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x970c); + rtl8168_mdio_write(tp, 0x14, 0x141e); + rtl8168_mdio_write(tp, 0x14, 0x21bf); + rtl8168_mdio_write(tp, 0x14, 0x44b1); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x970c); + rtl8168_mdio_write(tp, 0x14, 0x161e); + rtl8168_mdio_write(tp, 0x14, 0x21e6); + rtl8168_mdio_write(tp, 0x14, 0x8242); + rtl8168_mdio_write(tp, 0x14, 0xee82); + rtl8168_mdio_write(tp, 0x14, 0x4101); + rtl8168_mdio_write(tp, 0x14, 0xef96); + rtl8168_mdio_write(tp, 0x14, 0xfefd); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xf8fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x46a0); + rtl8168_mdio_write(tp, 0x14, 0x0005); + rtl8168_mdio_write(tp, 0x14, 0x0286); + rtl8168_mdio_write(tp, 0x14, 0x96ae); + rtl8168_mdio_write(tp, 0x14, 0x06a0); + rtl8168_mdio_write(tp, 0x14, 0x0103); + rtl8168_mdio_write(tp, 0x14, 0x0219); + rtl8168_mdio_write(tp, 0x14, 0x19ef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xf8fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x29f6); + rtl8168_mdio_write(tp, 0x14, 0x20e4); + rtl8168_mdio_write(tp, 0x14, 0x8229); + rtl8168_mdio_write(tp, 0x14, 0xe080); + rtl8168_mdio_write(tp, 0x14, 0x10ac); + rtl8168_mdio_write(tp, 0x14, 0x2102); + rtl8168_mdio_write(tp, 0x14, 0xae54); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x27f7); + rtl8168_mdio_write(tp, 0x14, 0x20e4); + rtl8168_mdio_write(tp, 0x14, 0x8227); + rtl8168_mdio_write(tp, 0x14, 0xbf42); + rtl8168_mdio_write(tp, 0x14, 0xe602); + rtl8168_mdio_write(tp, 0x14, 0x4297); + rtl8168_mdio_write(tp, 0x14, 0xac28); + rtl8168_mdio_write(tp, 0x14, 0x22bf); + rtl8168_mdio_write(tp, 0x14, 0x430d); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97e5); + rtl8168_mdio_write(tp, 0x14, 0x8247); + rtl8168_mdio_write(tp, 0x14, 0xac28); + rtl8168_mdio_write(tp, 0x14, 0x20d1); + rtl8168_mdio_write(tp, 0x14, 0x03bf); + rtl8168_mdio_write(tp, 0x14, 0x4307); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59ee); + rtl8168_mdio_write(tp, 0x14, 0x8246); + rtl8168_mdio_write(tp, 0x14, 0x00e1); + rtl8168_mdio_write(tp, 0x14, 0x8227); + rtl8168_mdio_write(tp, 0x14, 0xf628); + rtl8168_mdio_write(tp, 0x14, 0xe582); + rtl8168_mdio_write(tp, 0x14, 0x27ae); + rtl8168_mdio_write(tp, 0x14, 0x21d1); + rtl8168_mdio_write(tp, 0x14, 0x04bf); + rtl8168_mdio_write(tp, 0x14, 0x4307); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59ae); + rtl8168_mdio_write(tp, 0x14, 0x08d1); + rtl8168_mdio_write(tp, 0x14, 0x05bf); + rtl8168_mdio_write(tp, 0x14, 0x4307); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59e0); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0xf720); + rtl8168_mdio_write(tp, 0x14, 0xe482); + rtl8168_mdio_write(tp, 0x14, 0x4402); + rtl8168_mdio_write(tp, 0x14, 0x46ae); + rtl8168_mdio_write(tp, 0x14, 0xee82); + rtl8168_mdio_write(tp, 0x14, 0x4601); + rtl8168_mdio_write(tp, 0x14, 0xef96); + rtl8168_mdio_write(tp, 0x14, 0xfefc); + rtl8168_mdio_write(tp, 0x14, 0x04f8); + rtl8168_mdio_write(tp, 0x14, 0xfaef); + rtl8168_mdio_write(tp, 0x14, 0x69e0); + rtl8168_mdio_write(tp, 0x14, 0x8013); + rtl8168_mdio_write(tp, 0x14, 0xad24); + rtl8168_mdio_write(tp, 0x14, 0x1cbf); + rtl8168_mdio_write(tp, 0x14, 0x87f0); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x97ad); + rtl8168_mdio_write(tp, 0x14, 0x2813); + rtl8168_mdio_write(tp, 0x14, 0xe087); + rtl8168_mdio_write(tp, 0x14, 0xfca0); + rtl8168_mdio_write(tp, 0x14, 0x0005); + rtl8168_mdio_write(tp, 0x14, 0x0287); + rtl8168_mdio_write(tp, 0x14, 0x36ae); + rtl8168_mdio_write(tp, 0x14, 0x10a0); + rtl8168_mdio_write(tp, 0x14, 0x0105); + rtl8168_mdio_write(tp, 0x14, 0x0287); + rtl8168_mdio_write(tp, 0x14, 0x48ae); + rtl8168_mdio_write(tp, 0x14, 0x08e0); + rtl8168_mdio_write(tp, 0x14, 0x8230); + rtl8168_mdio_write(tp, 0x14, 0xf626); + rtl8168_mdio_write(tp, 0x14, 0xe482); + rtl8168_mdio_write(tp, 0x14, 0x30ef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xf8e0); + rtl8168_mdio_write(tp, 0x14, 0x8245); + rtl8168_mdio_write(tp, 0x14, 0xf722); + rtl8168_mdio_write(tp, 0x14, 0xe482); + rtl8168_mdio_write(tp, 0x14, 0x4502); + rtl8168_mdio_write(tp, 0x14, 0x46ae); + rtl8168_mdio_write(tp, 0x14, 0xee87); + rtl8168_mdio_write(tp, 0x14, 0xfc01); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xf8fa); + rtl8168_mdio_write(tp, 0x14, 0xef69); + rtl8168_mdio_write(tp, 0x14, 0xfb02); + rtl8168_mdio_write(tp, 0x14, 0x46d3); + rtl8168_mdio_write(tp, 0x14, 0xad50); + rtl8168_mdio_write(tp, 0x14, 0x2fbf); + rtl8168_mdio_write(tp, 0x14, 0x87ed); + rtl8168_mdio_write(tp, 0x14, 0xd101); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59bf); + rtl8168_mdio_write(tp, 0x14, 0x87ed); + rtl8168_mdio_write(tp, 0x14, 0xd100); + rtl8168_mdio_write(tp, 0x14, 0x0242); + rtl8168_mdio_write(tp, 0x14, 0x59e0); + rtl8168_mdio_write(tp, 0x14, 0x8245); + rtl8168_mdio_write(tp, 0x14, 0xf622); + rtl8168_mdio_write(tp, 0x14, 0xe482); + rtl8168_mdio_write(tp, 0x14, 0x4502); + rtl8168_mdio_write(tp, 0x14, 0x46ae); + rtl8168_mdio_write(tp, 0x14, 0xd100); + rtl8168_mdio_write(tp, 0x14, 0xbf87); + rtl8168_mdio_write(tp, 0x14, 0xf002); + rtl8168_mdio_write(tp, 0x14, 0x4259); + rtl8168_mdio_write(tp, 0x14, 0xee87); + rtl8168_mdio_write(tp, 0x14, 0xfc00); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x30f6); + rtl8168_mdio_write(tp, 0x14, 0x26e4); + rtl8168_mdio_write(tp, 0x14, 0x8230); + rtl8168_mdio_write(tp, 0x14, 0xffef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xfc04); + rtl8168_mdio_write(tp, 0x14, 0xf8f9); + rtl8168_mdio_write(tp, 0x14, 0xface); + rtl8168_mdio_write(tp, 0x14, 0xfaef); + rtl8168_mdio_write(tp, 0x14, 0x69fb); + rtl8168_mdio_write(tp, 0x14, 0xbf87); + rtl8168_mdio_write(tp, 0x14, 0xb3d7); + rtl8168_mdio_write(tp, 0x14, 0x001c); + rtl8168_mdio_write(tp, 0x14, 0xd819); + rtl8168_mdio_write(tp, 0x14, 0xd919); + rtl8168_mdio_write(tp, 0x14, 0xda19); + rtl8168_mdio_write(tp, 0x14, 0xdb19); + rtl8168_mdio_write(tp, 0x14, 0x07ef); + rtl8168_mdio_write(tp, 0x14, 0x9502); + rtl8168_mdio_write(tp, 0x14, 0x4259); + rtl8168_mdio_write(tp, 0x14, 0x073f); + rtl8168_mdio_write(tp, 0x14, 0x0004); + rtl8168_mdio_write(tp, 0x14, 0x9fec); + rtl8168_mdio_write(tp, 0x14, 0xffef); + rtl8168_mdio_write(tp, 0x14, 0x96fe); + rtl8168_mdio_write(tp, 0x14, 0xc6fe); + rtl8168_mdio_write(tp, 0x14, 0xfdfc); + rtl8168_mdio_write(tp, 0x14, 0x0400); + rtl8168_mdio_write(tp, 0x14, 0x0145); + rtl8168_mdio_write(tp, 0x14, 0x7d00); + rtl8168_mdio_write(tp, 0x14, 0x0345); + rtl8168_mdio_write(tp, 0x14, 0x5c00); + rtl8168_mdio_write(tp, 0x14, 0x0143); + rtl8168_mdio_write(tp, 0x14, 0x4f00); + rtl8168_mdio_write(tp, 0x14, 0x0387); + rtl8168_mdio_write(tp, 0x14, 0xdb00); + rtl8168_mdio_write(tp, 0x14, 0x0987); + rtl8168_mdio_write(tp, 0x14, 0xde00); + rtl8168_mdio_write(tp, 0x14, 0x0987); + rtl8168_mdio_write(tp, 0x14, 0xe100); + rtl8168_mdio_write(tp, 0x14, 0x0087); + rtl8168_mdio_write(tp, 0x14, 0xeaa4); + rtl8168_mdio_write(tp, 0x14, 0x00b8); + rtl8168_mdio_write(tp, 0x14, 0x20c4); + rtl8168_mdio_write(tp, 0x14, 0x1600); + rtl8168_mdio_write(tp, 0x14, 0x000f); + rtl8168_mdio_write(tp, 0x14, 0xf800); + rtl8168_mdio_write(tp, 0x14, 0x7098); + rtl8168_mdio_write(tp, 0x14, 0xa58a); + rtl8168_mdio_write(tp, 0x14, 0xb6a8); + rtl8168_mdio_write(tp, 0x14, 0x3e50); + rtl8168_mdio_write(tp, 0x14, 0xa83e); + rtl8168_mdio_write(tp, 0x14, 0x33bc); + rtl8168_mdio_write(tp, 0x14, 0xc622); + rtl8168_mdio_write(tp, 0x14, 0xbcc6); + rtl8168_mdio_write(tp, 0x14, 0xaaa4); + rtl8168_mdio_write(tp, 0x14, 0x42ff); + rtl8168_mdio_write(tp, 0x14, 0xc408); + rtl8168_mdio_write(tp, 0x14, 0x00c4); + rtl8168_mdio_write(tp, 0x14, 0x16a8); + rtl8168_mdio_write(tp, 0x14, 0xbcc0); + rtl8168_mdio_write(tp, 0x13, 0xb818); + rtl8168_mdio_write(tp, 0x14, 0x02f3); + rtl8168_mdio_write(tp, 0x13, 0xb81a); + rtl8168_mdio_write(tp, 0x14, 0x17d1); + rtl8168_mdio_write(tp, 0x13, 0xb81c); + rtl8168_mdio_write(tp, 0x14, 0x185a); + rtl8168_mdio_write(tp, 0x13, 0xb81e); + rtl8168_mdio_write(tp, 0x14, 0x3c66); + rtl8168_mdio_write(tp, 0x13, 0xb820); + rtl8168_mdio_write(tp, 0x14, 0x021f); + rtl8168_mdio_write(tp, 0x13, 0xc416); + rtl8168_mdio_write(tp, 0x14, 0x0500); + rtl8168_mdio_write(tp, 0x13, 0xb82e); + rtl8168_mdio_write(tp, 0x14, 0xfffc); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + gphy_val = rtl8168_mdio_read(tp, 0x10); + gphy_val &= ~(BIT_9); + rtl8168_mdio_write(tp, 0x10, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8146); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_clear_phy_mcu_patch_request(tp); +} + +static void +rtl8168_set_phy_mcu_8168gu_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val; + + rtl8168_set_phy_mcu_patch_request(tp); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8146); + rtl8168_mdio_write(tp, 0x14, 0x0300); + rtl8168_mdio_write(tp, 0x13, 0xB82E); + rtl8168_mdio_write(tp, 0x14, 0x0001); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0xb820); + rtl8168_mdio_write(tp, 0x14, 0x0290); + rtl8168_mdio_write(tp, 0x13, 0xa012); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xa014); + rtl8168_mdio_write(tp, 0x14, 0x2c04); + rtl8168_mdio_write(tp, 0x14, 0x2c07); + rtl8168_mdio_write(tp, 0x14, 0x2c07); + rtl8168_mdio_write(tp, 0x14, 0x2c07); + rtl8168_mdio_write(tp, 0x14, 0xa304); + rtl8168_mdio_write(tp, 0x14, 0xa301); + rtl8168_mdio_write(tp, 0x14, 0x207e); + rtl8168_mdio_write(tp, 0x13, 0xa01a); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xa006); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xa004); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xa002); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xa000); + rtl8168_mdio_write(tp, 0x14, 0x107c); + rtl8168_mdio_write(tp, 0x13, 0xb820); + rtl8168_mdio_write(tp, 0x14, 0x0210); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8146); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_clear_phy_mcu_patch_request(tp); +} + +static void +rtl8168_set_phy_mcu_8411b_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val; + + rtl8168_set_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp,0x1f, 0x0A43); + rtl8168_mdio_write(tp,0x13, 0x8146); + rtl8168_mdio_write(tp,0x14, 0x0100); + rtl8168_mdio_write(tp,0x13, 0xB82E); + rtl8168_mdio_write(tp,0x14, 0x0001); + + + rtl8168_mdio_write(tp,0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0xb820); + rtl8168_mdio_write(tp, 0x14, 0x0290); + rtl8168_mdio_write(tp, 0x13, 0xa012); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xa014); + rtl8168_mdio_write(tp, 0x14, 0x2c04); + rtl8168_mdio_write(tp, 0x14, 0x2c07); + rtl8168_mdio_write(tp, 0x14, 0x2c07); + rtl8168_mdio_write(tp, 0x14, 0x2c07); + rtl8168_mdio_write(tp, 0x14, 0xa304); + rtl8168_mdio_write(tp, 0x14, 0xa301); + rtl8168_mdio_write(tp, 0x14, 0x207e); + rtl8168_mdio_write(tp, 0x13, 0xa01a); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xa006); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xa004); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xa002); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xa000); + rtl8168_mdio_write(tp, 0x14, 0x107c); + rtl8168_mdio_write(tp, 0x13, 0xb820); + rtl8168_mdio_write(tp, 0x14, 0x0210); + + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8146); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_clear_phy_mcu_patch_request(tp); +} + +static void +rtl8168_set_phy_mcu_8168ep_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val; + + rtl8168_set_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp,0x1f, 0x0A43); + rtl8168_mdio_write(tp,0x13, 0x8146); + rtl8168_mdio_write(tp,0x14, 0x8700); + rtl8168_mdio_write(tp,0x13, 0xB82E); + rtl8168_mdio_write(tp,0x14, 0x0001); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + + rtl8168_mdio_write(tp, 0x13, 0x83DD); + rtl8168_mdio_write(tp, 0x14, 0xAF83); + rtl8168_mdio_write(tp, 0x14, 0xE9AF); + rtl8168_mdio_write(tp, 0x14, 0x83EE); + rtl8168_mdio_write(tp, 0x14, 0xAF83); + rtl8168_mdio_write(tp, 0x14, 0xF1A1); + rtl8168_mdio_write(tp, 0x14, 0x83F4); + rtl8168_mdio_write(tp, 0x14, 0xD149); + rtl8168_mdio_write(tp, 0x14, 0xAF06); + rtl8168_mdio_write(tp, 0x14, 0x47AF); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0xAF00); + rtl8168_mdio_write(tp, 0x14, 0x00AF); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_mdio_write(tp, 0x13, 0xB818); + rtl8168_mdio_write(tp, 0x14, 0x0645); + + rtl8168_mdio_write(tp, 0x13, 0xB81A); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_mdio_write(tp, 0x13, 0xB81C); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_mdio_write(tp, 0x13, 0xB81E); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_mdio_write(tp, 0x13, 0xB832); + rtl8168_mdio_write(tp, 0x14, 0x0001); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8146); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_clear_phy_mcu_patch_request(tp); +} + +static void +rtl8168_set_phy_mcu_8168h_1(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val; + + rtl8168_set_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8028); + rtl8168_mdio_write(tp, 0x14, 0x6200); + rtl8168_mdio_write(tp, 0x13, 0xB82E); + rtl8168_mdio_write(tp, 0x14, 0x0001); + + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0290); + rtl8168_mdio_write(tp, 0x13, 0xA012); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xA014); + rtl8168_mdio_write(tp, 0x14, 0x2c04); + rtl8168_mdio_write(tp, 0x14, 0x2c10); + rtl8168_mdio_write(tp, 0x14, 0x2c10); + rtl8168_mdio_write(tp, 0x14, 0x2c10); + rtl8168_mdio_write(tp, 0x14, 0xa210); + rtl8168_mdio_write(tp, 0x14, 0xa101); + rtl8168_mdio_write(tp, 0x14, 0xce10); + rtl8168_mdio_write(tp, 0x14, 0xe070); + rtl8168_mdio_write(tp, 0x14, 0x0f40); + rtl8168_mdio_write(tp, 0x14, 0xaf01); + rtl8168_mdio_write(tp, 0x14, 0x8f01); + rtl8168_mdio_write(tp, 0x14, 0x183e); + rtl8168_mdio_write(tp, 0x14, 0x8e10); + rtl8168_mdio_write(tp, 0x14, 0x8101); + rtl8168_mdio_write(tp, 0x14, 0x8210); + rtl8168_mdio_write(tp, 0x14, 0x28da); + rtl8168_mdio_write(tp, 0x13, 0xA01A); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xA006); + rtl8168_mdio_write(tp, 0x14, 0x0017); + rtl8168_mdio_write(tp, 0x13, 0xA004); + rtl8168_mdio_write(tp, 0x14, 0x0015); + rtl8168_mdio_write(tp, 0x13, 0xA002); + rtl8168_mdio_write(tp, 0x14, 0x0013); + rtl8168_mdio_write(tp, 0x13, 0xA000); + rtl8168_mdio_write(tp, 0x14, 0x18d1); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0210); + + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8028); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_clear_phy_mcu_patch_request(tp); +} + +static void +rtl8168_set_phy_mcu_8168h_2(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int gphy_val; + + rtl8168_set_phy_mcu_patch_request(tp); + + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8028); + rtl8168_mdio_write(tp, 0x14, 0x6201); + rtl8168_mdio_write(tp, 0x13, 0xB82E); + rtl8168_mdio_write(tp, 0x14, 0x0001); + + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0290); + rtl8168_mdio_write(tp, 0x13, 0xA012); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xA014); + rtl8168_mdio_write(tp, 0x14, 0x2c04); + rtl8168_mdio_write(tp, 0x14, 0x2c09); + rtl8168_mdio_write(tp, 0x14, 0x2c09); + rtl8168_mdio_write(tp, 0x14, 0x2c09); + rtl8168_mdio_write(tp, 0x14, 0xad01); + rtl8168_mdio_write(tp, 0x14, 0xad01); + rtl8168_mdio_write(tp, 0x14, 0xad01); + rtl8168_mdio_write(tp, 0x14, 0xad01); + rtl8168_mdio_write(tp, 0x14, 0x236c); + rtl8168_mdio_write(tp, 0x13, 0xA01A); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x13, 0xA006); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xA004); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xA002); + rtl8168_mdio_write(tp, 0x14, 0x0fff); + rtl8168_mdio_write(tp, 0x13, 0xA000); + rtl8168_mdio_write(tp, 0x14, 0x136b); + rtl8168_mdio_write(tp, 0x13, 0xB820); + rtl8168_mdio_write(tp, 0x14, 0x0210); + + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8323); + rtl8168_mdio_write(tp, 0x14, 0xaf83); + rtl8168_mdio_write(tp, 0x14, 0x2faf); + rtl8168_mdio_write(tp, 0x14, 0x853d); + rtl8168_mdio_write(tp, 0x14, 0xaf85); + rtl8168_mdio_write(tp, 0x14, 0x3daf); + rtl8168_mdio_write(tp, 0x14, 0x853d); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x45ad); + rtl8168_mdio_write(tp, 0x14, 0x2052); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7ae3); + rtl8168_mdio_write(tp, 0x14, 0x85fe); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f6); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7a1b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fa); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7be3); + rtl8168_mdio_write(tp, 0x14, 0x85fe); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f7); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7b1b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fb); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7ce3); + rtl8168_mdio_write(tp, 0x14, 0x85fe); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f8); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7c1b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fc); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7de3); + rtl8168_mdio_write(tp, 0x14, 0x85fe); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f9); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7d1b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fd); + rtl8168_mdio_write(tp, 0x14, 0xae50); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7ee3); + rtl8168_mdio_write(tp, 0x14, 0x85ff); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f6); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7e1b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fa); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7fe3); + rtl8168_mdio_write(tp, 0x14, 0x85ff); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f7); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x7f1b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fb); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x80e3); + rtl8168_mdio_write(tp, 0x14, 0x85ff); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f8); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x801b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fc); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x81e3); + rtl8168_mdio_write(tp, 0x14, 0x85ff); + rtl8168_mdio_write(tp, 0x14, 0x1a03); + rtl8168_mdio_write(tp, 0x14, 0x10e4); + rtl8168_mdio_write(tp, 0x14, 0x85f9); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x811b); + rtl8168_mdio_write(tp, 0x14, 0x03e4); + rtl8168_mdio_write(tp, 0x14, 0x85fd); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf6ad); + rtl8168_mdio_write(tp, 0x14, 0x2404); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xf610); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf7ad); + rtl8168_mdio_write(tp, 0x14, 0x2404); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xf710); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf8ad); + rtl8168_mdio_write(tp, 0x14, 0x2404); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xf810); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf9ad); + rtl8168_mdio_write(tp, 0x14, 0x2404); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xf910); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfaad); + rtl8168_mdio_write(tp, 0x14, 0x2704); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xfa00); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfbad); + rtl8168_mdio_write(tp, 0x14, 0x2704); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xfb00); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfcad); + rtl8168_mdio_write(tp, 0x14, 0x2704); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xfc00); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfdad); + rtl8168_mdio_write(tp, 0x14, 0x2704); + rtl8168_mdio_write(tp, 0x14, 0xee85); + rtl8168_mdio_write(tp, 0x14, 0xfd00); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x44ad); + rtl8168_mdio_write(tp, 0x14, 0x203f); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf6e4); + rtl8168_mdio_write(tp, 0x14, 0x8288); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfae4); + rtl8168_mdio_write(tp, 0x14, 0x8289); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x440d); + rtl8168_mdio_write(tp, 0x14, 0x0458); + rtl8168_mdio_write(tp, 0x14, 0x01bf); + rtl8168_mdio_write(tp, 0x14, 0x8264); + rtl8168_mdio_write(tp, 0x14, 0x0215); + rtl8168_mdio_write(tp, 0x14, 0x38bf); + rtl8168_mdio_write(tp, 0x14, 0x824e); + rtl8168_mdio_write(tp, 0x14, 0x0213); + rtl8168_mdio_write(tp, 0x14, 0x06a0); + rtl8168_mdio_write(tp, 0x14, 0x010f); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x44f6); + rtl8168_mdio_write(tp, 0x14, 0x20e4); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x580f); + rtl8168_mdio_write(tp, 0x14, 0xe582); + rtl8168_mdio_write(tp, 0x14, 0x5aae); + rtl8168_mdio_write(tp, 0x14, 0x0ebf); + rtl8168_mdio_write(tp, 0x14, 0x825e); + rtl8168_mdio_write(tp, 0x14, 0xe382); + rtl8168_mdio_write(tp, 0x14, 0x44f7); + rtl8168_mdio_write(tp, 0x14, 0x3ce7); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x0212); + rtl8168_mdio_write(tp, 0x14, 0xf0ad); + rtl8168_mdio_write(tp, 0x14, 0x213f); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf7e4); + rtl8168_mdio_write(tp, 0x14, 0x8288); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfbe4); + rtl8168_mdio_write(tp, 0x14, 0x8289); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x440d); + rtl8168_mdio_write(tp, 0x14, 0x0558); + rtl8168_mdio_write(tp, 0x14, 0x01bf); + rtl8168_mdio_write(tp, 0x14, 0x826b); + rtl8168_mdio_write(tp, 0x14, 0x0215); + rtl8168_mdio_write(tp, 0x14, 0x38bf); + rtl8168_mdio_write(tp, 0x14, 0x824f); + rtl8168_mdio_write(tp, 0x14, 0x0213); + rtl8168_mdio_write(tp, 0x14, 0x06a0); + rtl8168_mdio_write(tp, 0x14, 0x010f); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x44f6); + rtl8168_mdio_write(tp, 0x14, 0x21e4); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x580f); + rtl8168_mdio_write(tp, 0x14, 0xe582); + rtl8168_mdio_write(tp, 0x14, 0x5bae); + rtl8168_mdio_write(tp, 0x14, 0x0ebf); + rtl8168_mdio_write(tp, 0x14, 0x8265); + rtl8168_mdio_write(tp, 0x14, 0xe382); + rtl8168_mdio_write(tp, 0x14, 0x44f7); + rtl8168_mdio_write(tp, 0x14, 0x3de7); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x0212); + rtl8168_mdio_write(tp, 0x14, 0xf0ad); + rtl8168_mdio_write(tp, 0x14, 0x223f); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf8e4); + rtl8168_mdio_write(tp, 0x14, 0x8288); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfce4); + rtl8168_mdio_write(tp, 0x14, 0x8289); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x440d); + rtl8168_mdio_write(tp, 0x14, 0x0658); + rtl8168_mdio_write(tp, 0x14, 0x01bf); + rtl8168_mdio_write(tp, 0x14, 0x8272); + rtl8168_mdio_write(tp, 0x14, 0x0215); + rtl8168_mdio_write(tp, 0x14, 0x38bf); + rtl8168_mdio_write(tp, 0x14, 0x8250); + rtl8168_mdio_write(tp, 0x14, 0x0213); + rtl8168_mdio_write(tp, 0x14, 0x06a0); + rtl8168_mdio_write(tp, 0x14, 0x010f); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x44f6); + rtl8168_mdio_write(tp, 0x14, 0x22e4); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x580f); + rtl8168_mdio_write(tp, 0x14, 0xe582); + rtl8168_mdio_write(tp, 0x14, 0x5cae); + rtl8168_mdio_write(tp, 0x14, 0x0ebf); + rtl8168_mdio_write(tp, 0x14, 0x826c); + rtl8168_mdio_write(tp, 0x14, 0xe382); + rtl8168_mdio_write(tp, 0x14, 0x44f7); + rtl8168_mdio_write(tp, 0x14, 0x3ee7); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x0212); + rtl8168_mdio_write(tp, 0x14, 0xf0ad); + rtl8168_mdio_write(tp, 0x14, 0x233f); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xf9e4); + rtl8168_mdio_write(tp, 0x14, 0x8288); + rtl8168_mdio_write(tp, 0x14, 0xe085); + rtl8168_mdio_write(tp, 0x14, 0xfde4); + rtl8168_mdio_write(tp, 0x14, 0x8289); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x440d); + rtl8168_mdio_write(tp, 0x14, 0x0758); + rtl8168_mdio_write(tp, 0x14, 0x01bf); + rtl8168_mdio_write(tp, 0x14, 0x8279); + rtl8168_mdio_write(tp, 0x14, 0x0215); + rtl8168_mdio_write(tp, 0x14, 0x38bf); + rtl8168_mdio_write(tp, 0x14, 0x8251); + rtl8168_mdio_write(tp, 0x14, 0x0213); + rtl8168_mdio_write(tp, 0x14, 0x06a0); + rtl8168_mdio_write(tp, 0x14, 0x010f); + rtl8168_mdio_write(tp, 0x14, 0xe082); + rtl8168_mdio_write(tp, 0x14, 0x44f6); + rtl8168_mdio_write(tp, 0x14, 0x23e4); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x580f); + rtl8168_mdio_write(tp, 0x14, 0xe582); + rtl8168_mdio_write(tp, 0x14, 0x5dae); + rtl8168_mdio_write(tp, 0x14, 0x0ebf); + rtl8168_mdio_write(tp, 0x14, 0x8273); + rtl8168_mdio_write(tp, 0x14, 0xe382); + rtl8168_mdio_write(tp, 0x14, 0x44f7); + rtl8168_mdio_write(tp, 0x14, 0x3fe7); + rtl8168_mdio_write(tp, 0x14, 0x8244); + rtl8168_mdio_write(tp, 0x14, 0x0212); + rtl8168_mdio_write(tp, 0x14, 0xf0ee); + rtl8168_mdio_write(tp, 0x14, 0x8288); + rtl8168_mdio_write(tp, 0x14, 0x10ee); + rtl8168_mdio_write(tp, 0x14, 0x8289); + rtl8168_mdio_write(tp, 0x14, 0x00af); + rtl8168_mdio_write(tp, 0x14, 0x14aa); + rtl8168_mdio_write(tp, 0x13, 0xb818); + rtl8168_mdio_write(tp, 0x14, 0x13cf); + rtl8168_mdio_write(tp, 0x13, 0xb81a); + rtl8168_mdio_write(tp, 0x14, 0xfffd); + rtl8168_mdio_write(tp, 0x13, 0xb81c); + rtl8168_mdio_write(tp, 0x14, 0xfffd); + rtl8168_mdio_write(tp, 0x13, 0xb81e); + rtl8168_mdio_write(tp, 0x14, 0xfffd); + rtl8168_mdio_write(tp, 0x13, 0xb832); + rtl8168_mdio_write(tp, 0x14, 0x0001); + + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x0000); + rtl8168_mdio_write(tp, 0x14, 0x0000); + rtl8168_mdio_write(tp, 0x1f, 0x0B82); + gphy_val = rtl8168_mdio_read(tp, 0x17); + gphy_val &= ~(BIT_0); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8028); + rtl8168_mdio_write(tp, 0x14, 0x0000); + + rtl8168_clear_phy_mcu_patch_request(tp); + + if (tp->RequiredSecLanDonglePatch) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + gphy_val = rtl8168_mdio_read(tp, 0x11); + gphy_val &= ~BIT_6; + rtl8168_mdio_write(tp, 0x11, gphy_val); + } +} + +static void +rtl8168_init_hw_phy_mcu(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u8 require_disable_phy_disable_mode = FALSE; + + if (tp->NotWrRamCodeToMicroP == TRUE) return; + if (rtl8168_check_hw_phy_mcu_code_ver(dev)) return; + + if (FALSE == rtl8168_phy_ram_code_check(dev)) { + rtl8168_set_phy_ram_code_check_fail_flag(dev); + return; + } + + if (HW_SUPPORT_CHECK_PHY_DISABLE_MODE(tp) && rtl8168_is_in_phy_disable_mode(dev)) + require_disable_phy_disable_mode = TRUE; + + if (require_disable_phy_disable_mode) + rtl8168_disable_phy_disable_mode(dev); + + switch (tp->mcfg) { + case CFG_METHOD_14: + rtl8168_set_phy_mcu_8168e_1(dev); + break; + case CFG_METHOD_15: + rtl8168_set_phy_mcu_8168e_2(dev); + break; + case CFG_METHOD_16: + rtl8168_set_phy_mcu_8168evl_1(dev); + break; + case CFG_METHOD_17: + rtl8168_set_phy_mcu_8168evl_2(dev); + break; + case CFG_METHOD_18: + rtl8168_set_phy_mcu_8168f_1(dev); + break; + case CFG_METHOD_19: + rtl8168_set_phy_mcu_8168f_2(dev); + break; + case CFG_METHOD_20: + rtl8168_set_phy_mcu_8411_1(dev); + break; + case CFG_METHOD_21: + rtl8168_set_phy_mcu_8168g_1(dev); + break; + case CFG_METHOD_25: + rtl8168_set_phy_mcu_8168gu_2(dev); + break; + case CFG_METHOD_26: + rtl8168_set_phy_mcu_8411b_1(dev); + break; + case CFG_METHOD_28: + rtl8168_set_phy_mcu_8168ep_2(dev); + break; + case CFG_METHOD_29: + rtl8168_set_phy_mcu_8168h_1(dev); + break; + case CFG_METHOD_30: + rtl8168_set_phy_mcu_8168h_2(dev); + break; + } + + if (require_disable_phy_disable_mode) + rtl8168_enable_phy_disable_mode(dev); + + rtl8168_write_hw_phy_mcu_code_ver(dev); + + rtl8168_mdio_write(tp,0x1F, 0x0000); + + tp->HwHasWrRamCodeToMicroP = TRUE; +} +#endif + +static void +rtl8168_hw_phy_config(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + u16 gphy_val; + unsigned int i; + + tp->phy_reset_enable(dev); + + if (HW_DASH_SUPPORT_TYPE_3(tp) && tp->HwPkgDet == 0x06) return; + +#ifndef ENABLE_USE_FIRMWARE_FILE + if (!tp->rtl_fw) { + rtl8168_init_hw_phy_mcu(dev); + } +#endif + + if (tp->mcfg == CFG_METHOD_1) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0B, 0x94B0); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x12, 0x6096); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x0D, 0xF8A0); + } else if (tp->mcfg == CFG_METHOD_2) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0B, 0x94B0); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x12, 0x6096); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_3) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0B, 0x94B0); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x12, 0x6096); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_4) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x12, 0x2300); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x16, 0x000A); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x12, 0xC096); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x00, 0x88DE); + rtl8168_mdio_write(tp, 0x01, 0x82B1); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x08, 0x9E30); + rtl8168_mdio_write(tp, 0x09, 0x01F0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0A, 0x5500); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x03, 0x7002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0C, 0x00C8); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | (1 << 5)); + rtl8168_mdio_write(tp, 0x0D, rtl8168_mdio_read(tp, 0x0D) & ~(1 << 5)); + } else if (tp->mcfg == CFG_METHOD_5) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x12, 0x2300); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x16, 0x0F0A); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x00, 0x88DE); + rtl8168_mdio_write(tp, 0x01, 0x82B1); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0C, 0x7EB8); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x0761); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x03, 0x802F); + rtl8168_mdio_write(tp, 0x02, 0x4F02); + rtl8168_mdio_write(tp, 0x01, 0x0409); + rtl8168_mdio_write(tp, 0x00, 0xF099); + rtl8168_mdio_write(tp, 0x04, 0x9800); + rtl8168_mdio_write(tp, 0x04, 0x9000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x16, rtl8168_mdio_read(tp, 0x16) | (1 << 0)); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | (1 << 5)); + rtl8168_mdio_write(tp, 0x0D, rtl8168_mdio_read(tp, 0x0D) & ~(1 << 5)); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x1D, 0x3D98); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_6) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x12, 0x2300); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x16, 0x0F0A); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x00, 0x88DE); + rtl8168_mdio_write(tp, 0x01, 0x82B1); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0C, 0x7EB8); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x5461); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x5461); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x16, rtl8168_mdio_read(tp, 0x16) | (1 << 0)); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | (1 << 5)); + rtl8168_mdio_write(tp, 0x0D, rtl8168_mdio_read(tp, 0x0D) & ~(1 << 5)); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x1D, 0x3D98); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_7) { + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | (1 << 5)); + rtl8168_mdio_write(tp, 0x0D, rtl8168_mdio_read(tp, 0x0D) & ~(1 << 5)); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x1D, 0x3D98); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x14, 0xCAA3); + rtl8168_mdio_write(tp, 0x1C, 0x000A); + rtl8168_mdio_write(tp, 0x18, 0x65D0); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x17, 0xB580); + rtl8168_mdio_write(tp, 0x18, 0xFF54); + rtl8168_mdio_write(tp, 0x19, 0x3954); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0D, 0x310C); + rtl8168_mdio_write(tp, 0x0E, 0x310C); + rtl8168_mdio_write(tp, 0x0F, 0x311C); + rtl8168_mdio_write(tp, 0x06, 0x0761); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x18, 0xFF55); + rtl8168_mdio_write(tp, 0x19, 0x3955); + rtl8168_mdio_write(tp, 0x18, 0xFF54); + rtl8168_mdio_write(tp, 0x19, 0x3954); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_8) { + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | (1 << 5)); + rtl8168_mdio_write(tp, 0x0D, rtl8168_mdio_read(tp, 0x0D) & ~(1 << 5)); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x14, 0xCAA3); + rtl8168_mdio_write(tp, 0x1C, 0x000A); + rtl8168_mdio_write(tp, 0x18, 0x65D0); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x17, 0xB580); + rtl8168_mdio_write(tp, 0x18, 0xFF54); + rtl8168_mdio_write(tp, 0x19, 0x3954); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0D, 0x310C); + rtl8168_mdio_write(tp, 0x0E, 0x310C); + rtl8168_mdio_write(tp, 0x0F, 0x311C); + rtl8168_mdio_write(tp, 0x06, 0x0761); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x18, 0xFF55); + rtl8168_mdio_write(tp, 0x19, 0x3955); + rtl8168_mdio_write(tp, 0x18, 0xFF54); + rtl8168_mdio_write(tp, 0x19, 0x3954); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x16, rtl8168_mdio_read(tp, 0x16) | (1 << 0)); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_9) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x06, 0x4064); + rtl8168_mdio_write(tp, 0x07, 0x2863); + rtl8168_mdio_write(tp, 0x08, 0x059C); + rtl8168_mdio_write(tp, 0x09, 0x26B4); + rtl8168_mdio_write(tp, 0x0A, 0x6A19); + rtl8168_mdio_write(tp, 0x0B, 0xDCC8); + rtl8168_mdio_write(tp, 0x10, 0xF06D); + rtl8168_mdio_write(tp, 0x14, 0x7F68); + rtl8168_mdio_write(tp, 0x18, 0x7FD9); + rtl8168_mdio_write(tp, 0x1C, 0xF0FF); + rtl8168_mdio_write(tp, 0x1D, 0x3D9C); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x12, 0xF49F); + rtl8168_mdio_write(tp, 0x13, 0x070B); + rtl8168_mdio_write(tp, 0x1A, 0x05AD); + rtl8168_mdio_write(tp, 0x14, 0x94C0); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0B) & 0xFF00; + gphy_val |= 0x10; + rtl8168_mdio_write(tp, 0x0B, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x0C) & 0x00FF; + gphy_val |= 0xA200; + rtl8168_mdio_write(tp, 0x0C, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x5561); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8332); + rtl8168_mdio_write(tp, 0x06, 0x5561); + + if (rtl8168_efuse_read(tp, 0x01) == 0xb1) { + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x05, 0x669A); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8330); + rtl8168_mdio_write(tp, 0x06, 0x669A); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0D); + if ((gphy_val & 0x00FF) != 0x006C) { + gphy_val &= 0xFF00; + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0065); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0066); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0067); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0068); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0069); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x006A); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x006B); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x006C); + } + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x05, 0x6662); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8330); + rtl8168_mdio_write(tp, 0x06, 0x6662); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0D); + gphy_val |= BIT_9; + gphy_val |= BIT_8; + rtl8168_mdio_write(tp, 0x0D, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x0F); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x0F, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x02); + gphy_val &= ~BIT_10; + gphy_val &= ~BIT_9; + gphy_val |= BIT_8; + rtl8168_mdio_write(tp, 0x02, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x03); + gphy_val &= ~BIT_15; + gphy_val &= ~BIT_14; + gphy_val &= ~BIT_13; + rtl8168_mdio_write(tp, 0x03, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x001B); + if (rtl8168_mdio_read(tp, 0x06) == 0xBF00) { + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaef); + rtl8168_mdio_write(tp, 0x06, 0x59ee); + rtl8168_mdio_write(tp, 0x06, 0xf8ea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xf8eb); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0xf87c); + rtl8168_mdio_write(tp, 0x06, 0xe1f8); + rtl8168_mdio_write(tp, 0x06, 0x7d59); + rtl8168_mdio_write(tp, 0x06, 0x0fef); + rtl8168_mdio_write(tp, 0x06, 0x0139); + rtl8168_mdio_write(tp, 0x06, 0x029e); + rtl8168_mdio_write(tp, 0x06, 0x06ef); + rtl8168_mdio_write(tp, 0x06, 0x1039); + rtl8168_mdio_write(tp, 0x06, 0x089f); + rtl8168_mdio_write(tp, 0x06, 0x2aee); + rtl8168_mdio_write(tp, 0x06, 0xf8ea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xf8eb); + rtl8168_mdio_write(tp, 0x06, 0x01e0); + rtl8168_mdio_write(tp, 0x06, 0xf87c); + rtl8168_mdio_write(tp, 0x06, 0xe1f8); + rtl8168_mdio_write(tp, 0x06, 0x7d58); + rtl8168_mdio_write(tp, 0x06, 0x409e); + rtl8168_mdio_write(tp, 0x06, 0x0f39); + rtl8168_mdio_write(tp, 0x06, 0x46aa); + rtl8168_mdio_write(tp, 0x06, 0x0bbf); + rtl8168_mdio_write(tp, 0x06, 0x8290); + rtl8168_mdio_write(tp, 0x06, 0xd682); + rtl8168_mdio_write(tp, 0x06, 0x9802); + rtl8168_mdio_write(tp, 0x06, 0x014f); + rtl8168_mdio_write(tp, 0x06, 0xae09); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0x98d6); + rtl8168_mdio_write(tp, 0x06, 0x82a0); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x4fef); + rtl8168_mdio_write(tp, 0x06, 0x95fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x05f8); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xeef8); + rtl8168_mdio_write(tp, 0x06, 0xea00); + rtl8168_mdio_write(tp, 0x06, 0xeef8); + rtl8168_mdio_write(tp, 0x06, 0xeb00); + rtl8168_mdio_write(tp, 0x06, 0xe2f8); + rtl8168_mdio_write(tp, 0x06, 0x7ce3); + rtl8168_mdio_write(tp, 0x06, 0xf87d); + rtl8168_mdio_write(tp, 0x06, 0xa511); + rtl8168_mdio_write(tp, 0x06, 0x1112); + rtl8168_mdio_write(tp, 0x06, 0xd240); + rtl8168_mdio_write(tp, 0x06, 0xd644); + rtl8168_mdio_write(tp, 0x06, 0x4402); + rtl8168_mdio_write(tp, 0x06, 0x8217); + rtl8168_mdio_write(tp, 0x06, 0xd2a0); + rtl8168_mdio_write(tp, 0x06, 0xd6aa); + rtl8168_mdio_write(tp, 0x06, 0xaa02); + rtl8168_mdio_write(tp, 0x06, 0x8217); + rtl8168_mdio_write(tp, 0x06, 0xae0f); + rtl8168_mdio_write(tp, 0x06, 0xa544); + rtl8168_mdio_write(tp, 0x06, 0x4402); + rtl8168_mdio_write(tp, 0x06, 0xae4d); + rtl8168_mdio_write(tp, 0x06, 0xa5aa); + rtl8168_mdio_write(tp, 0x06, 0xaa02); + rtl8168_mdio_write(tp, 0x06, 0xae47); + rtl8168_mdio_write(tp, 0x06, 0xaf82); + rtl8168_mdio_write(tp, 0x06, 0x13ee); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0x0fee); + rtl8168_mdio_write(tp, 0x06, 0x834c); + rtl8168_mdio_write(tp, 0x06, 0x0fee); + rtl8168_mdio_write(tp, 0x06, 0x834f); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8351); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x834a); + rtl8168_mdio_write(tp, 0x06, 0xffee); + rtl8168_mdio_write(tp, 0x06, 0x834b); + rtl8168_mdio_write(tp, 0x06, 0xffe0); + rtl8168_mdio_write(tp, 0x06, 0x8330); + rtl8168_mdio_write(tp, 0x06, 0xe183); + rtl8168_mdio_write(tp, 0x06, 0x3158); + rtl8168_mdio_write(tp, 0x06, 0xfee4); + rtl8168_mdio_write(tp, 0x06, 0xf88a); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x8be0); + rtl8168_mdio_write(tp, 0x06, 0x8332); + rtl8168_mdio_write(tp, 0x06, 0xe183); + rtl8168_mdio_write(tp, 0x06, 0x3359); + rtl8168_mdio_write(tp, 0x06, 0x0fe2); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0x0c24); + rtl8168_mdio_write(tp, 0x06, 0x5af0); + rtl8168_mdio_write(tp, 0x06, 0x1e12); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x8ce5); + rtl8168_mdio_write(tp, 0x06, 0xf88d); + rtl8168_mdio_write(tp, 0x06, 0xaf82); + rtl8168_mdio_write(tp, 0x06, 0x13e0); + rtl8168_mdio_write(tp, 0x06, 0x834f); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x834f); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x009f); + rtl8168_mdio_write(tp, 0x06, 0x0ae0); + rtl8168_mdio_write(tp, 0x06, 0x834f); + rtl8168_mdio_write(tp, 0x06, 0xa010); + rtl8168_mdio_write(tp, 0x06, 0xa5ee); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x01e0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7805); + rtl8168_mdio_write(tp, 0x06, 0x9e9a); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x049e); + rtl8168_mdio_write(tp, 0x06, 0x10e0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7803); + rtl8168_mdio_write(tp, 0x06, 0x9e0f); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x05ae); + rtl8168_mdio_write(tp, 0x06, 0x0caf); + rtl8168_mdio_write(tp, 0x06, 0x81f8); + rtl8168_mdio_write(tp, 0x06, 0xaf81); + rtl8168_mdio_write(tp, 0x06, 0xa3af); + rtl8168_mdio_write(tp, 0x06, 0x81dc); + rtl8168_mdio_write(tp, 0x06, 0xaf82); + rtl8168_mdio_write(tp, 0x06, 0x13ee); + rtl8168_mdio_write(tp, 0x06, 0x8348); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0x8349); + rtl8168_mdio_write(tp, 0x06, 0x00e0); + rtl8168_mdio_write(tp, 0x06, 0x8351); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x8351); + rtl8168_mdio_write(tp, 0x06, 0x5801); + rtl8168_mdio_write(tp, 0x06, 0x9fea); + rtl8168_mdio_write(tp, 0x06, 0xd000); + rtl8168_mdio_write(tp, 0x06, 0xd180); + rtl8168_mdio_write(tp, 0x06, 0x1f66); + rtl8168_mdio_write(tp, 0x06, 0xe2f8); + rtl8168_mdio_write(tp, 0x06, 0xeae3); + rtl8168_mdio_write(tp, 0x06, 0xf8eb); + rtl8168_mdio_write(tp, 0x06, 0x5af8); + rtl8168_mdio_write(tp, 0x06, 0x1e20); + rtl8168_mdio_write(tp, 0x06, 0xe6f8); + rtl8168_mdio_write(tp, 0x06, 0xeae5); + rtl8168_mdio_write(tp, 0x06, 0xf8eb); + rtl8168_mdio_write(tp, 0x06, 0xd302); + rtl8168_mdio_write(tp, 0x06, 0xb3fe); + rtl8168_mdio_write(tp, 0x06, 0xe2f8); + rtl8168_mdio_write(tp, 0x06, 0x7cef); + rtl8168_mdio_write(tp, 0x06, 0x325b); + rtl8168_mdio_write(tp, 0x06, 0x80e3); + rtl8168_mdio_write(tp, 0x06, 0xf87d); + rtl8168_mdio_write(tp, 0x06, 0x9e03); + rtl8168_mdio_write(tp, 0x06, 0x7dff); + rtl8168_mdio_write(tp, 0x06, 0xff0d); + rtl8168_mdio_write(tp, 0x06, 0x581c); + rtl8168_mdio_write(tp, 0x06, 0x551a); + rtl8168_mdio_write(tp, 0x06, 0x6511); + rtl8168_mdio_write(tp, 0x06, 0xa190); + rtl8168_mdio_write(tp, 0x06, 0xd3e2); + rtl8168_mdio_write(tp, 0x06, 0x8348); + rtl8168_mdio_write(tp, 0x06, 0xe383); + rtl8168_mdio_write(tp, 0x06, 0x491b); + rtl8168_mdio_write(tp, 0x06, 0x56ab); + rtl8168_mdio_write(tp, 0x06, 0x08ef); + rtl8168_mdio_write(tp, 0x06, 0x56e6); + rtl8168_mdio_write(tp, 0x06, 0x8348); + rtl8168_mdio_write(tp, 0x06, 0xe783); + rtl8168_mdio_write(tp, 0x06, 0x4910); + rtl8168_mdio_write(tp, 0x06, 0xd180); + rtl8168_mdio_write(tp, 0x06, 0x1f66); + rtl8168_mdio_write(tp, 0x06, 0xa004); + rtl8168_mdio_write(tp, 0x06, 0xb9e2); + rtl8168_mdio_write(tp, 0x06, 0x8348); + rtl8168_mdio_write(tp, 0x06, 0xe383); + rtl8168_mdio_write(tp, 0x06, 0x49ef); + rtl8168_mdio_write(tp, 0x06, 0x65e2); + rtl8168_mdio_write(tp, 0x06, 0x834a); + rtl8168_mdio_write(tp, 0x06, 0xe383); + rtl8168_mdio_write(tp, 0x06, 0x4b1b); + rtl8168_mdio_write(tp, 0x06, 0x56aa); + rtl8168_mdio_write(tp, 0x06, 0x0eef); + rtl8168_mdio_write(tp, 0x06, 0x56e6); + rtl8168_mdio_write(tp, 0x06, 0x834a); + rtl8168_mdio_write(tp, 0x06, 0xe783); + rtl8168_mdio_write(tp, 0x06, 0x4be2); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0xe683); + rtl8168_mdio_write(tp, 0x06, 0x4ce0); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0xa000); + rtl8168_mdio_write(tp, 0x06, 0x0caf); + rtl8168_mdio_write(tp, 0x06, 0x81dc); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4d10); + rtl8168_mdio_write(tp, 0x06, 0xe483); + rtl8168_mdio_write(tp, 0x06, 0x4dae); + rtl8168_mdio_write(tp, 0x06, 0x0480); + rtl8168_mdio_write(tp, 0x06, 0xe483); + rtl8168_mdio_write(tp, 0x06, 0x4de0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7803); + rtl8168_mdio_write(tp, 0x06, 0x9e0b); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x049e); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x02e0); + rtl8168_mdio_write(tp, 0x06, 0x8332); + rtl8168_mdio_write(tp, 0x06, 0xe183); + rtl8168_mdio_write(tp, 0x06, 0x3359); + rtl8168_mdio_write(tp, 0x06, 0x0fe2); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0x0c24); + rtl8168_mdio_write(tp, 0x06, 0x5af0); + rtl8168_mdio_write(tp, 0x06, 0x1e12); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x8ce5); + rtl8168_mdio_write(tp, 0x06, 0xf88d); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x30e1); + rtl8168_mdio_write(tp, 0x06, 0x8331); + rtl8168_mdio_write(tp, 0x06, 0x6801); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x8ae5); + rtl8168_mdio_write(tp, 0x06, 0xf88b); + rtl8168_mdio_write(tp, 0x06, 0xae37); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4e03); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4ce1); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0x1b01); + rtl8168_mdio_write(tp, 0x06, 0x9e04); + rtl8168_mdio_write(tp, 0x06, 0xaaa1); + rtl8168_mdio_write(tp, 0x06, 0xaea8); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4e04); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4f00); + rtl8168_mdio_write(tp, 0x06, 0xaeab); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4f78); + rtl8168_mdio_write(tp, 0x06, 0x039f); + rtl8168_mdio_write(tp, 0x06, 0x14ee); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x05d2); + rtl8168_mdio_write(tp, 0x06, 0x40d6); + rtl8168_mdio_write(tp, 0x06, 0x5554); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x17d2); + rtl8168_mdio_write(tp, 0x06, 0xa0d6); + rtl8168_mdio_write(tp, 0x06, 0xba00); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x17fe); + rtl8168_mdio_write(tp, 0x06, 0xfdfc); + rtl8168_mdio_write(tp, 0x06, 0x05f8); + rtl8168_mdio_write(tp, 0x06, 0xe0f8); + rtl8168_mdio_write(tp, 0x06, 0x60e1); + rtl8168_mdio_write(tp, 0x06, 0xf861); + rtl8168_mdio_write(tp, 0x06, 0x6802); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x60e5); + rtl8168_mdio_write(tp, 0x06, 0xf861); + rtl8168_mdio_write(tp, 0x06, 0xe0f8); + rtl8168_mdio_write(tp, 0x06, 0x48e1); + rtl8168_mdio_write(tp, 0x06, 0xf849); + rtl8168_mdio_write(tp, 0x06, 0x580f); + rtl8168_mdio_write(tp, 0x06, 0x1e02); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x48e5); + rtl8168_mdio_write(tp, 0x06, 0xf849); + rtl8168_mdio_write(tp, 0x06, 0xd000); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x5bbf); + rtl8168_mdio_write(tp, 0x06, 0x8350); + rtl8168_mdio_write(tp, 0x06, 0xef46); + rtl8168_mdio_write(tp, 0x06, 0xdc19); + rtl8168_mdio_write(tp, 0x06, 0xddd0); + rtl8168_mdio_write(tp, 0x06, 0x0102); + rtl8168_mdio_write(tp, 0x06, 0x825b); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x77e0); + rtl8168_mdio_write(tp, 0x06, 0xf860); + rtl8168_mdio_write(tp, 0x06, 0xe1f8); + rtl8168_mdio_write(tp, 0x06, 0x6158); + rtl8168_mdio_write(tp, 0x06, 0xfde4); + rtl8168_mdio_write(tp, 0x06, 0xf860); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x61fc); + rtl8168_mdio_write(tp, 0x06, 0x04f9); + rtl8168_mdio_write(tp, 0x06, 0xfafb); + rtl8168_mdio_write(tp, 0x06, 0xc6bf); + rtl8168_mdio_write(tp, 0x06, 0xf840); + rtl8168_mdio_write(tp, 0x06, 0xbe83); + rtl8168_mdio_write(tp, 0x06, 0x50a0); + rtl8168_mdio_write(tp, 0x06, 0x0101); + rtl8168_mdio_write(tp, 0x06, 0x071b); + rtl8168_mdio_write(tp, 0x06, 0x89cf); + rtl8168_mdio_write(tp, 0x06, 0xd208); + rtl8168_mdio_write(tp, 0x06, 0xebdb); + rtl8168_mdio_write(tp, 0x06, 0x19b2); + rtl8168_mdio_write(tp, 0x06, 0xfbff); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0x04f8); + rtl8168_mdio_write(tp, 0x06, 0xe0f8); + rtl8168_mdio_write(tp, 0x06, 0x48e1); + rtl8168_mdio_write(tp, 0x06, 0xf849); + rtl8168_mdio_write(tp, 0x06, 0x6808); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x48e5); + rtl8168_mdio_write(tp, 0x06, 0xf849); + rtl8168_mdio_write(tp, 0x06, 0x58f7); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x48e5); + rtl8168_mdio_write(tp, 0x06, 0xf849); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0x4d20); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x4e22); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x4ddf); + rtl8168_mdio_write(tp, 0x06, 0xff01); + rtl8168_mdio_write(tp, 0x06, 0x4edd); + rtl8168_mdio_write(tp, 0x06, 0xff01); + rtl8168_mdio_write(tp, 0x06, 0xf8fa); + rtl8168_mdio_write(tp, 0x06, 0xfbef); + rtl8168_mdio_write(tp, 0x06, 0x79bf); + rtl8168_mdio_write(tp, 0x06, 0xf822); + rtl8168_mdio_write(tp, 0x06, 0xd819); + rtl8168_mdio_write(tp, 0x06, 0xd958); + rtl8168_mdio_write(tp, 0x06, 0x849f); + rtl8168_mdio_write(tp, 0x06, 0x09bf); + rtl8168_mdio_write(tp, 0x06, 0x82be); + rtl8168_mdio_write(tp, 0x06, 0xd682); + rtl8168_mdio_write(tp, 0x06, 0xc602); + rtl8168_mdio_write(tp, 0x06, 0x014f); + rtl8168_mdio_write(tp, 0x06, 0xef97); + rtl8168_mdio_write(tp, 0x06, 0xfffe); + rtl8168_mdio_write(tp, 0x06, 0xfc05); + rtl8168_mdio_write(tp, 0x06, 0x17ff); + rtl8168_mdio_write(tp, 0x06, 0xfe01); + rtl8168_mdio_write(tp, 0x06, 0x1700); + rtl8168_mdio_write(tp, 0x06, 0x0102); + rtl8168_mdio_write(tp, 0x05, 0x83d8); + rtl8168_mdio_write(tp, 0x06, 0x8051); + rtl8168_mdio_write(tp, 0x05, 0x83d6); + rtl8168_mdio_write(tp, 0x06, 0x82a0); + rtl8168_mdio_write(tp, 0x05, 0x83d4); + rtl8168_mdio_write(tp, 0x06, 0x8000); + rtl8168_mdio_write(tp, 0x02, 0x2010); + rtl8168_mdio_write(tp, 0x03, 0xdc00); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x0b, 0x0600); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00fc); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0xF880); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_10) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x06, 0x4064); + rtl8168_mdio_write(tp, 0x07, 0x2863); + rtl8168_mdio_write(tp, 0x08, 0x059C); + rtl8168_mdio_write(tp, 0x09, 0x26B4); + rtl8168_mdio_write(tp, 0x0A, 0x6A19); + rtl8168_mdio_write(tp, 0x0B, 0xDCC8); + rtl8168_mdio_write(tp, 0x10, 0xF06D); + rtl8168_mdio_write(tp, 0x14, 0x7F68); + rtl8168_mdio_write(tp, 0x18, 0x7FD9); + rtl8168_mdio_write(tp, 0x1C, 0xF0FF); + rtl8168_mdio_write(tp, 0x1D, 0x3D9C); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x12, 0xF49F); + rtl8168_mdio_write(tp, 0x13, 0x070B); + rtl8168_mdio_write(tp, 0x1A, 0x05AD); + rtl8168_mdio_write(tp, 0x14, 0x94C0); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x5561); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8332); + rtl8168_mdio_write(tp, 0x06, 0x5561); + + if (rtl8168_efuse_read(tp, 0x01) == 0xb1) { + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x05, 0x669A); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8330); + rtl8168_mdio_write(tp, 0x06, 0x669A); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0D); + if ((gphy_val & 0x00FF) != 0x006C) { + gphy_val &= 0xFF00; + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0065); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0066); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0067); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0068); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x0069); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x006A); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x006B); + rtl8168_mdio_write(tp, 0x0D, gphy_val | 0x006C); + } + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x05, 0x2642); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8330); + rtl8168_mdio_write(tp, 0x06, 0x2642); + } + + if (rtl8168_efuse_read(tp, 0x30) == 0x98) { + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) & ~BIT_1); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x01, rtl8168_mdio_read(tp, 0x01) | BIT_9); + } else if (rtl8168_efuse_read(tp, 0x30) == 0x90) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x01, rtl8168_mdio_read(tp, 0x01) & ~BIT_9); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x16, 0x5101); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x02); + gphy_val &= ~BIT_10; + gphy_val &= ~BIT_9; + gphy_val |= BIT_8; + rtl8168_mdio_write(tp, 0x02, gphy_val); + gphy_val = rtl8168_mdio_read(tp, 0x03); + gphy_val &= ~BIT_15; + gphy_val &= ~BIT_14; + gphy_val &= ~BIT_13; + rtl8168_mdio_write(tp, 0x03, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0F); + gphy_val |= BIT_4; + gphy_val |= BIT_2; + gphy_val |= BIT_1; + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x0F, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x001B); + if (rtl8168_mdio_read(tp, 0x06) == 0xB300) { + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x0080); + rtl8168_mdio_write(tp, 0x05, 0x8000); + rtl8168_mdio_write(tp, 0x06, 0xf8f9); + rtl8168_mdio_write(tp, 0x06, 0xfaee); + rtl8168_mdio_write(tp, 0x06, 0xf8ea); + rtl8168_mdio_write(tp, 0x06, 0x00ee); + rtl8168_mdio_write(tp, 0x06, 0xf8eb); + rtl8168_mdio_write(tp, 0x06, 0x00e2); + rtl8168_mdio_write(tp, 0x06, 0xf87c); + rtl8168_mdio_write(tp, 0x06, 0xe3f8); + rtl8168_mdio_write(tp, 0x06, 0x7da5); + rtl8168_mdio_write(tp, 0x06, 0x1111); + rtl8168_mdio_write(tp, 0x06, 0x12d2); + rtl8168_mdio_write(tp, 0x06, 0x40d6); + rtl8168_mdio_write(tp, 0x06, 0x4444); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xc6d2); + rtl8168_mdio_write(tp, 0x06, 0xa0d6); + rtl8168_mdio_write(tp, 0x06, 0xaaaa); + rtl8168_mdio_write(tp, 0x06, 0x0281); + rtl8168_mdio_write(tp, 0x06, 0xc6ae); + rtl8168_mdio_write(tp, 0x06, 0x0fa5); + rtl8168_mdio_write(tp, 0x06, 0x4444); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x4da5); + rtl8168_mdio_write(tp, 0x06, 0xaaaa); + rtl8168_mdio_write(tp, 0x06, 0x02ae); + rtl8168_mdio_write(tp, 0x06, 0x47af); + rtl8168_mdio_write(tp, 0x06, 0x81c2); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4e00); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4d0f); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4c0f); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4f00); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x5100); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4aff); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4bff); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x30e1); + rtl8168_mdio_write(tp, 0x06, 0x8331); + rtl8168_mdio_write(tp, 0x06, 0x58fe); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x8ae5); + rtl8168_mdio_write(tp, 0x06, 0xf88b); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x32e1); + rtl8168_mdio_write(tp, 0x06, 0x8333); + rtl8168_mdio_write(tp, 0x06, 0x590f); + rtl8168_mdio_write(tp, 0x06, 0xe283); + rtl8168_mdio_write(tp, 0x06, 0x4d0c); + rtl8168_mdio_write(tp, 0x06, 0x245a); + rtl8168_mdio_write(tp, 0x06, 0xf01e); + rtl8168_mdio_write(tp, 0x06, 0x12e4); + rtl8168_mdio_write(tp, 0x06, 0xf88c); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x8daf); + rtl8168_mdio_write(tp, 0x06, 0x81c2); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4f10); + rtl8168_mdio_write(tp, 0x06, 0xe483); + rtl8168_mdio_write(tp, 0x06, 0x4fe0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7800); + rtl8168_mdio_write(tp, 0x06, 0x9f0a); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4fa0); + rtl8168_mdio_write(tp, 0x06, 0x10a5); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4e01); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x059e); + rtl8168_mdio_write(tp, 0x06, 0x9ae0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7804); + rtl8168_mdio_write(tp, 0x06, 0x9e10); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x0fe0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7801); + rtl8168_mdio_write(tp, 0x06, 0x9e05); + rtl8168_mdio_write(tp, 0x06, 0xae0c); + rtl8168_mdio_write(tp, 0x06, 0xaf81); + rtl8168_mdio_write(tp, 0x06, 0xa7af); + rtl8168_mdio_write(tp, 0x06, 0x8152); + rtl8168_mdio_write(tp, 0x06, 0xaf81); + rtl8168_mdio_write(tp, 0x06, 0x8baf); + rtl8168_mdio_write(tp, 0x06, 0x81c2); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4800); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4900); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x5110); + rtl8168_mdio_write(tp, 0x06, 0xe483); + rtl8168_mdio_write(tp, 0x06, 0x5158); + rtl8168_mdio_write(tp, 0x06, 0x019f); + rtl8168_mdio_write(tp, 0x06, 0xead0); + rtl8168_mdio_write(tp, 0x06, 0x00d1); + rtl8168_mdio_write(tp, 0x06, 0x801f); + rtl8168_mdio_write(tp, 0x06, 0x66e2); + rtl8168_mdio_write(tp, 0x06, 0xf8ea); + rtl8168_mdio_write(tp, 0x06, 0xe3f8); + rtl8168_mdio_write(tp, 0x06, 0xeb5a); + rtl8168_mdio_write(tp, 0x06, 0xf81e); + rtl8168_mdio_write(tp, 0x06, 0x20e6); + rtl8168_mdio_write(tp, 0x06, 0xf8ea); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0xebd3); + rtl8168_mdio_write(tp, 0x06, 0x02b3); + rtl8168_mdio_write(tp, 0x06, 0xfee2); + rtl8168_mdio_write(tp, 0x06, 0xf87c); + rtl8168_mdio_write(tp, 0x06, 0xef32); + rtl8168_mdio_write(tp, 0x06, 0x5b80); + rtl8168_mdio_write(tp, 0x06, 0xe3f8); + rtl8168_mdio_write(tp, 0x06, 0x7d9e); + rtl8168_mdio_write(tp, 0x06, 0x037d); + rtl8168_mdio_write(tp, 0x06, 0xffff); + rtl8168_mdio_write(tp, 0x06, 0x0d58); + rtl8168_mdio_write(tp, 0x06, 0x1c55); + rtl8168_mdio_write(tp, 0x06, 0x1a65); + rtl8168_mdio_write(tp, 0x06, 0x11a1); + rtl8168_mdio_write(tp, 0x06, 0x90d3); + rtl8168_mdio_write(tp, 0x06, 0xe283); + rtl8168_mdio_write(tp, 0x06, 0x48e3); + rtl8168_mdio_write(tp, 0x06, 0x8349); + rtl8168_mdio_write(tp, 0x06, 0x1b56); + rtl8168_mdio_write(tp, 0x06, 0xab08); + rtl8168_mdio_write(tp, 0x06, 0xef56); + rtl8168_mdio_write(tp, 0x06, 0xe683); + rtl8168_mdio_write(tp, 0x06, 0x48e7); + rtl8168_mdio_write(tp, 0x06, 0x8349); + rtl8168_mdio_write(tp, 0x06, 0x10d1); + rtl8168_mdio_write(tp, 0x06, 0x801f); + rtl8168_mdio_write(tp, 0x06, 0x66a0); + rtl8168_mdio_write(tp, 0x06, 0x04b9); + rtl8168_mdio_write(tp, 0x06, 0xe283); + rtl8168_mdio_write(tp, 0x06, 0x48e3); + rtl8168_mdio_write(tp, 0x06, 0x8349); + rtl8168_mdio_write(tp, 0x06, 0xef65); + rtl8168_mdio_write(tp, 0x06, 0xe283); + rtl8168_mdio_write(tp, 0x06, 0x4ae3); + rtl8168_mdio_write(tp, 0x06, 0x834b); + rtl8168_mdio_write(tp, 0x06, 0x1b56); + rtl8168_mdio_write(tp, 0x06, 0xaa0e); + rtl8168_mdio_write(tp, 0x06, 0xef56); + rtl8168_mdio_write(tp, 0x06, 0xe683); + rtl8168_mdio_write(tp, 0x06, 0x4ae7); + rtl8168_mdio_write(tp, 0x06, 0x834b); + rtl8168_mdio_write(tp, 0x06, 0xe283); + rtl8168_mdio_write(tp, 0x06, 0x4de6); + rtl8168_mdio_write(tp, 0x06, 0x834c); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4da0); + rtl8168_mdio_write(tp, 0x06, 0x000c); + rtl8168_mdio_write(tp, 0x06, 0xaf81); + rtl8168_mdio_write(tp, 0x06, 0x8be0); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0x10e4); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0xae04); + rtl8168_mdio_write(tp, 0x06, 0x80e4); + rtl8168_mdio_write(tp, 0x06, 0x834d); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x4e78); + rtl8168_mdio_write(tp, 0x06, 0x039e); + rtl8168_mdio_write(tp, 0x06, 0x0be0); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x7804); + rtl8168_mdio_write(tp, 0x06, 0x9e04); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4e02); + rtl8168_mdio_write(tp, 0x06, 0xe083); + rtl8168_mdio_write(tp, 0x06, 0x32e1); + rtl8168_mdio_write(tp, 0x06, 0x8333); + rtl8168_mdio_write(tp, 0x06, 0x590f); + rtl8168_mdio_write(tp, 0x06, 0xe283); + rtl8168_mdio_write(tp, 0x06, 0x4d0c); + rtl8168_mdio_write(tp, 0x06, 0x245a); + rtl8168_mdio_write(tp, 0x06, 0xf01e); + rtl8168_mdio_write(tp, 0x06, 0x12e4); + rtl8168_mdio_write(tp, 0x06, 0xf88c); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x8de0); + rtl8168_mdio_write(tp, 0x06, 0x8330); + rtl8168_mdio_write(tp, 0x06, 0xe183); + rtl8168_mdio_write(tp, 0x06, 0x3168); + rtl8168_mdio_write(tp, 0x06, 0x01e4); + rtl8168_mdio_write(tp, 0x06, 0xf88a); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x8bae); + rtl8168_mdio_write(tp, 0x06, 0x37ee); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x03e0); + rtl8168_mdio_write(tp, 0x06, 0x834c); + rtl8168_mdio_write(tp, 0x06, 0xe183); + rtl8168_mdio_write(tp, 0x06, 0x4d1b); + rtl8168_mdio_write(tp, 0x06, 0x019e); + rtl8168_mdio_write(tp, 0x06, 0x04aa); + rtl8168_mdio_write(tp, 0x06, 0xa1ae); + rtl8168_mdio_write(tp, 0x06, 0xa8ee); + rtl8168_mdio_write(tp, 0x06, 0x834e); + rtl8168_mdio_write(tp, 0x06, 0x04ee); + rtl8168_mdio_write(tp, 0x06, 0x834f); + rtl8168_mdio_write(tp, 0x06, 0x00ae); + rtl8168_mdio_write(tp, 0x06, 0xabe0); + rtl8168_mdio_write(tp, 0x06, 0x834f); + rtl8168_mdio_write(tp, 0x06, 0x7803); + rtl8168_mdio_write(tp, 0x06, 0x9f14); + rtl8168_mdio_write(tp, 0x06, 0xee83); + rtl8168_mdio_write(tp, 0x06, 0x4e05); + rtl8168_mdio_write(tp, 0x06, 0xd240); + rtl8168_mdio_write(tp, 0x06, 0xd655); + rtl8168_mdio_write(tp, 0x06, 0x5402); + rtl8168_mdio_write(tp, 0x06, 0x81c6); + rtl8168_mdio_write(tp, 0x06, 0xd2a0); + rtl8168_mdio_write(tp, 0x06, 0xd6ba); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x81c6); + rtl8168_mdio_write(tp, 0x06, 0xfefd); + rtl8168_mdio_write(tp, 0x06, 0xfc05); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0xf860); + rtl8168_mdio_write(tp, 0x06, 0xe1f8); + rtl8168_mdio_write(tp, 0x06, 0x6168); + rtl8168_mdio_write(tp, 0x06, 0x02e4); + rtl8168_mdio_write(tp, 0x06, 0xf860); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x61e0); + rtl8168_mdio_write(tp, 0x06, 0xf848); + rtl8168_mdio_write(tp, 0x06, 0xe1f8); + rtl8168_mdio_write(tp, 0x06, 0x4958); + rtl8168_mdio_write(tp, 0x06, 0x0f1e); + rtl8168_mdio_write(tp, 0x06, 0x02e4); + rtl8168_mdio_write(tp, 0x06, 0xf848); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x49d0); + rtl8168_mdio_write(tp, 0x06, 0x0002); + rtl8168_mdio_write(tp, 0x06, 0x820a); + rtl8168_mdio_write(tp, 0x06, 0xbf83); + rtl8168_mdio_write(tp, 0x06, 0x50ef); + rtl8168_mdio_write(tp, 0x06, 0x46dc); + rtl8168_mdio_write(tp, 0x06, 0x19dd); + rtl8168_mdio_write(tp, 0x06, 0xd001); + rtl8168_mdio_write(tp, 0x06, 0x0282); + rtl8168_mdio_write(tp, 0x06, 0x0a02); + rtl8168_mdio_write(tp, 0x06, 0x8226); + rtl8168_mdio_write(tp, 0x06, 0xe0f8); + rtl8168_mdio_write(tp, 0x06, 0x60e1); + rtl8168_mdio_write(tp, 0x06, 0xf861); + rtl8168_mdio_write(tp, 0x06, 0x58fd); + rtl8168_mdio_write(tp, 0x06, 0xe4f8); + rtl8168_mdio_write(tp, 0x06, 0x60e5); + rtl8168_mdio_write(tp, 0x06, 0xf861); + rtl8168_mdio_write(tp, 0x06, 0xfc04); + rtl8168_mdio_write(tp, 0x06, 0xf9fa); + rtl8168_mdio_write(tp, 0x06, 0xfbc6); + rtl8168_mdio_write(tp, 0x06, 0xbff8); + rtl8168_mdio_write(tp, 0x06, 0x40be); + rtl8168_mdio_write(tp, 0x06, 0x8350); + rtl8168_mdio_write(tp, 0x06, 0xa001); + rtl8168_mdio_write(tp, 0x06, 0x0107); + rtl8168_mdio_write(tp, 0x06, 0x1b89); + rtl8168_mdio_write(tp, 0x06, 0xcfd2); + rtl8168_mdio_write(tp, 0x06, 0x08eb); + rtl8168_mdio_write(tp, 0x06, 0xdb19); + rtl8168_mdio_write(tp, 0x06, 0xb2fb); + rtl8168_mdio_write(tp, 0x06, 0xfffe); + rtl8168_mdio_write(tp, 0x06, 0xfd04); + rtl8168_mdio_write(tp, 0x06, 0xf8e0); + rtl8168_mdio_write(tp, 0x06, 0xf848); + rtl8168_mdio_write(tp, 0x06, 0xe1f8); + rtl8168_mdio_write(tp, 0x06, 0x4968); + rtl8168_mdio_write(tp, 0x06, 0x08e4); + rtl8168_mdio_write(tp, 0x06, 0xf848); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x4958); + rtl8168_mdio_write(tp, 0x06, 0xf7e4); + rtl8168_mdio_write(tp, 0x06, 0xf848); + rtl8168_mdio_write(tp, 0x06, 0xe5f8); + rtl8168_mdio_write(tp, 0x06, 0x49fc); + rtl8168_mdio_write(tp, 0x06, 0x044d); + rtl8168_mdio_write(tp, 0x06, 0x2000); + rtl8168_mdio_write(tp, 0x06, 0x024e); + rtl8168_mdio_write(tp, 0x06, 0x2200); + rtl8168_mdio_write(tp, 0x06, 0x024d); + rtl8168_mdio_write(tp, 0x06, 0xdfff); + rtl8168_mdio_write(tp, 0x06, 0x014e); + rtl8168_mdio_write(tp, 0x06, 0xddff); + rtl8168_mdio_write(tp, 0x06, 0x01f8); + rtl8168_mdio_write(tp, 0x06, 0xfafb); + rtl8168_mdio_write(tp, 0x06, 0xef79); + rtl8168_mdio_write(tp, 0x06, 0xbff8); + rtl8168_mdio_write(tp, 0x06, 0x22d8); + rtl8168_mdio_write(tp, 0x06, 0x19d9); + rtl8168_mdio_write(tp, 0x06, 0x5884); + rtl8168_mdio_write(tp, 0x06, 0x9f09); + rtl8168_mdio_write(tp, 0x06, 0xbf82); + rtl8168_mdio_write(tp, 0x06, 0x6dd6); + rtl8168_mdio_write(tp, 0x06, 0x8275); + rtl8168_mdio_write(tp, 0x06, 0x0201); + rtl8168_mdio_write(tp, 0x06, 0x4fef); + rtl8168_mdio_write(tp, 0x06, 0x97ff); + rtl8168_mdio_write(tp, 0x06, 0xfefc); + rtl8168_mdio_write(tp, 0x06, 0x0517); + rtl8168_mdio_write(tp, 0x06, 0xfffe); + rtl8168_mdio_write(tp, 0x06, 0x0117); + rtl8168_mdio_write(tp, 0x06, 0x0001); + rtl8168_mdio_write(tp, 0x06, 0x0200); + rtl8168_mdio_write(tp, 0x05, 0x83d8); + rtl8168_mdio_write(tp, 0x06, 0x8000); + rtl8168_mdio_write(tp, 0x05, 0x83d6); + rtl8168_mdio_write(tp, 0x06, 0x824f); + rtl8168_mdio_write(tp, 0x02, 0x2010); + rtl8168_mdio_write(tp, 0x03, 0xdc00); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x0b, 0x0600); + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0xfff6); + rtl8168_mdio_write(tp, 0x06, 0x00fc); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x0D, 0xF880); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_11) { + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x10, 0x0008); + rtl8168_mdio_write(tp, 0x0D, 0x006C); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0B, 0xA4D8); + rtl8168_mdio_write(tp, 0x09, 0x281C); + rtl8168_mdio_write(tp, 0x07, 0x2883); + rtl8168_mdio_write(tp, 0x0A, 0x6B35); + rtl8168_mdio_write(tp, 0x1D, 0x3DA4); + rtl8168_mdio_write(tp, 0x1C, 0xEFFD); + rtl8168_mdio_write(tp, 0x14, 0x7F52); + rtl8168_mdio_write(tp, 0x18, 0x7FC6); + rtl8168_mdio_write(tp, 0x08, 0x0601); + rtl8168_mdio_write(tp, 0x06, 0x4063); + rtl8168_mdio_write(tp, 0x10, 0xF074); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x13, 0x0789); + rtl8168_mdio_write(tp, 0x12, 0xF4BD); + rtl8168_mdio_write(tp, 0x1A, 0x04FD); + rtl8168_mdio_write(tp, 0x14, 0x84B0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x00, 0x9200); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x01, 0x0340); + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x04, 0x4000); + rtl8168_mdio_write(tp, 0x03, 0x1D21); + rtl8168_mdio_write(tp, 0x02, 0x0C32); + rtl8168_mdio_write(tp, 0x01, 0x0200); + rtl8168_mdio_write(tp, 0x00, 0x5554); + rtl8168_mdio_write(tp, 0x04, 0x4800); + rtl8168_mdio_write(tp, 0x04, 0x4000); + rtl8168_mdio_write(tp, 0x04, 0xF000); + rtl8168_mdio_write(tp, 0x03, 0xDF01); + rtl8168_mdio_write(tp, 0x02, 0xDF20); + rtl8168_mdio_write(tp, 0x01, 0x101A); + rtl8168_mdio_write(tp, 0x00, 0xA0FF); + rtl8168_mdio_write(tp, 0x04, 0xF800); + rtl8168_mdio_write(tp, 0x04, 0xF000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0023); + rtl8168_mdio_write(tp, 0x16, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + gphy_val = rtl8168_mdio_read(tp, 0x0D); + gphy_val |= BIT_5; + rtl8168_mdio_write(tp, 0x0D, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0C); + gphy_val |= BIT_10; + rtl8168_mdio_write(tp, 0x0C, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_12) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x17, 0x0CC0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x0D); + gphy_val |= BIT_5; + rtl8168_mdio_write(tp, 0x0D, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0C); + gphy_val |= BIT_10; + rtl8168_mdio_write(tp, 0x0C, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + rtl8168_mdio_write(tp, 0x15, 0x035D); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x01, 0x0300); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_13) { + rtl8168_mdio_write(tp, 0x1F, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x0D); + gphy_val |= BIT_5; + rtl8168_mdio_write(tp, 0x0D, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x0C); + gphy_val |= BIT_10; + rtl8168_mdio_write(tp, 0x0C, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_14 || tp->mcfg == CFG_METHOD_15) { + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0023); + gphy_val = rtl8168_mdio_read(tp, 0x17) | BIT_1; + if (tp->RequiredSecLanDonglePatch) + gphy_val &= ~(BIT_2); + else + gphy_val |= (BIT_2); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b80); + rtl8168_mdio_write(tp, 0x06, 0xc896); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0B, 0x6C20); + rtl8168_mdio_write(tp, 0x07, 0x2872); + rtl8168_mdio_write(tp, 0x1C, 0xEFFF); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x14, 0x6420); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + gphy_val = rtl8168_mdio_read(tp, 0x08) & 0x00FF; + rtl8168_mdio_write(tp, 0x08, gphy_val | 0x8000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + rtl8168_mdio_write(tp, 0x18, gphy_val | 0x0010); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x14); + rtl8168_mdio_write(tp, 0x14, gphy_val | 0x8000); + + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x00, 0x080B); + rtl8168_mdio_write(tp, 0x0B, 0x09D7); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x1006); + } + } + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x19, 0x7F46); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8AD2); + rtl8168_mdio_write(tp, 0x06, 0x6810); + rtl8168_mdio_write(tp, 0x05, 0x8AD4); + rtl8168_mdio_write(tp, 0x06, 0x8002); + rtl8168_mdio_write(tp, 0x05, 0x8ADE); + rtl8168_mdio_write(tp, 0x06, 0x8025); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002F); + rtl8168_mdio_write(tp, 0x15, 0x1919); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + rtl8168_mdio_write(tp, 0x18, gphy_val | 0x0040); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + rtl8168_mdio_write(tp, 0x06, gphy_val | 0x0001); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x00AC); + rtl8168_mdio_write(tp, 0x18, 0x0006); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_16) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_2 | BIT_1; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x18, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x14); + gphy_val |= BIT_15; + rtl8168_mdio_write(tp, 0x14, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x15, 0x1006); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0B, 0x6C14); + rtl8168_mdio_write(tp, 0x14, 0x7F3D); + rtl8168_mdio_write(tp, 0x1C, 0xFAFE); + rtl8168_mdio_write(tp, 0x08, 0x07C5); + rtl8168_mdio_write(tp, 0x10, 0xF090); + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x14, 0x641A); + rtl8168_mdio_write(tp, 0x1A, 0x0606); + rtl8168_mdio_write(tp, 0x12, 0xF480); + rtl8168_mdio_write(tp, 0x13, 0x0747); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0004); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0078); + rtl8168_mdio_write(tp, 0x15, 0xA408); + rtl8168_mdio_write(tp, 0x17, 0x5100); + rtl8168_mdio_write(tp, 0x19, 0x0008); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x0D, 0x0207); + rtl8168_mdio_write(tp, 0x02, 0x5FD0); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0004); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x00A1); + gphy_val = rtl8168_mdio_read(tp, 0x1A); + gphy_val &= ~BIT_2; + rtl8168_mdio_write(tp, 0x1A, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0004); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x16); + gphy_val |= BIT_5; + rtl8168_mdio_write(tp, 0x16, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0004); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x00AC); + rtl8168_mdio_write(tp, 0x18, 0x0006); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x09, 0xA20F); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B5B); + rtl8168_mdio_write(tp, 0x06, 0x9222); + rtl8168_mdio_write(tp, 0x05, 0x8B6D); + rtl8168_mdio_write(tp, 0x06, 0x8000); + rtl8168_mdio_write(tp, 0x05, 0x8B76); + rtl8168_mdio_write(tp, 0x06, 0x8000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (pdev->subsystem_vendor == 0x1043 && + pdev->subsystem_device == 0x13F7) { + + static const u16 evl_phy_value[] = { + 0x8B56, 0x8B5F, 0x8B68, 0x8B71, + 0x8B7A, 0x8A7B, 0x8A7E, 0x8A81, + 0x8A84, 0x8A87 + }; + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + for (i = 0; i < ARRAY_SIZE(evl_phy_value); i++) { + rtl8168_mdio_write(tp, 0x05, evl_phy_value[i]); + gphy_val = (0xAA << 8) | (rtl8168_mdio_read(tp, 0x06) & 0xFF); + rtl8168_mdio_write(tp, 0x06, gphy_val); + } + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0078); + rtl8168_mdio_write(tp, 0x17, 0x51AA); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B54); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8B5D); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8A7C); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A7F); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A82); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A88); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + gphy_val = rtl8168_mdio_read(tp, 0x06) | BIT_14 | BIT_15; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } else if (tp->mcfg == CFG_METHOD_17) { + if (pdev->subsystem_vendor == 0x144d && + pdev->subsystem_device == 0xc0a6) { + rtl8168_mdio_write(tp, 0x1F, 0x0001); + rtl8168_mdio_write(tp, 0x0e, 0x6b7f); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } else { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_2 | BIT_1; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val &= ~BIT_4; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0004); + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x18, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0002); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x14); + gphy_val |= BIT_15; + rtl8168_mdio_write(tp, 0x14, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0004); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x00AC); + rtl8168_mdio_write(tp, 0x18, 0x0006); + rtl8168_mdio_write(tp, 0x1F, 0x0002); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x09, 0xA20F); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + gphy_val = rtl8168_mdio_read(tp, 0x06) | BIT_14 | BIT_15; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B5B); + rtl8168_mdio_write(tp, 0x06, 0x9222); + rtl8168_mdio_write(tp, 0x05, 0x8B6D); + rtl8168_mdio_write(tp, 0x06, 0x8000); + rtl8168_mdio_write(tp, 0x05, 0x8B76); + rtl8168_mdio_write(tp, 0x06, 0x8000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (pdev->subsystem_vendor == 0x1043 && + pdev->subsystem_device == 0x13F7) { + + static const u16 evl_phy_value[] = { + 0x8B56, 0x8B5F, 0x8B68, 0x8B71, + 0x8B7A, 0x8A7B, 0x8A7E, 0x8A81, + 0x8A84, 0x8A87 + }; + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + for (i = 0; i < ARRAY_SIZE(evl_phy_value); i++) { + rtl8168_mdio_write(tp, 0x05, evl_phy_value[i]); + gphy_val = (0xAA << 8) | (rtl8168_mdio_read(tp, 0x06) & 0xFF); + rtl8168_mdio_write(tp, 0x06, gphy_val); + } + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0078); + rtl8168_mdio_write(tp, 0x17, 0x51AA); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B54); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8B5D); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8A7C); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A7F); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A82); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A88); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val |= BIT_12; + rtl8168_mdio_write(tp, 0x15, gphy_val); + } + } + } else if (tp->mcfg == CFG_METHOD_18) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_2 | BIT_1; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x18, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x14); + gphy_val |= BIT_15; + rtl8168_mdio_write(tp, 0x14, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_14; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x09, 0xA20F); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B55); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8B5E); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8B67); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8B70); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0078); + rtl8168_mdio_write(tp, 0x17, 0x0000); + rtl8168_mdio_write(tp, 0x19, 0x00FB); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B79); + rtl8168_mdio_write(tp, 0x06, 0xAA00); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0003); + rtl8168_mdio_write(tp, 0x01, 0x328A); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B54); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8B5D); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8A7C); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A7F); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A82); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A88); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_15); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val |= BIT_12; + rtl8168_mdio_write(tp, 0x15, gphy_val); + } + } + } else if (tp->mcfg == CFG_METHOD_19) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_2 | BIT_1; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x18, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x14); + gphy_val |= BIT_15; + rtl8168_mdio_write(tp, 0x14, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B54); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8B5D); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8A7C); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A7F); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A82); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A88); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_15); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val |= BIT_12; + rtl8168_mdio_write(tp, 0x15, gphy_val); + } + } + } else if (tp->mcfg == CFG_METHOD_20) { + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_2 | BIT_1; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + rtl8168_mdio_write(tp, 0x1f, 0x0007); + rtl8168_mdio_write(tp, 0x1e, 0x002D); + gphy_val = rtl8168_mdio_read(tp, 0x18); + gphy_val |= BIT_4; + rtl8168_mdio_write(tp, 0x18, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x14); + gphy_val |= BIT_15; + rtl8168_mdio_write(tp, 0x14, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B86); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_0; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B85); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_14; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0003); + rtl8168_mdio_write(tp, 0x09, 0xA20F); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B55); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8B5E); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8B67); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x05, 0x8B70); + rtl8168_mdio_write(tp, 0x06, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x0078); + rtl8168_mdio_write(tp, 0x17, 0x0000); + rtl8168_mdio_write(tp, 0x19, 0x00FB); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B79); + rtl8168_mdio_write(tp, 0x06, 0xAA00); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B54); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8B5D); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_11); + rtl8168_mdio_write(tp, 0x05, 0x8A7C); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A7F); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A82); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x05, 0x8A88); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8b85); + rtl8168_mdio_write(tp, 0x06, rtl8168_mdio_read(tp, 0x06) | BIT_15); + rtl8168_mdio_write(tp, 0x1f, 0x0000); + } + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1f, 0x0000); + gphy_val = rtl8168_mdio_read(tp, 0x15); + gphy_val |= BIT_12; + rtl8168_mdio_write(tp, 0x15, gphy_val); + } + } + } else if (tp->mcfg == CFG_METHOD_21) { + rtl8168_mdio_write(tp, 0x1F, 0x0A46); + gphy_val = rtl8168_mdio_read(tp, 0x10); + rtl8168_mdio_write(tp, 0x1F, 0x0BCC); + if (gphy_val & BIT_8) + rtl8168_clear_eth_phy_bit(tp, 0x12, BIT_15); + else + rtl8168_set_eth_phy_bit(tp, 0x12, BIT_15); + rtl8168_mdio_write(tp, 0x1F, 0x0A46); + gphy_val = rtl8168_mdio_read(tp, 0x13); + rtl8168_mdio_write(tp, 0x1F, 0x0C41); + if (gphy_val & BIT_8) + rtl8168_set_eth_phy_bit(tp, 0x15, BIT_1); + else + rtl8168_clear_eth_phy_bit(tp, 0x15, BIT_1); + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_2 | BIT_3); + + rtl8168_mdio_write(tp, 0x1F, 0x0BCC); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_7); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_6); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8084); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~(BIT_14 | BIT_13)); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_12); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_1); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_0); + + rtl8168_mdio_write(tp, 0x1F, 0x0A4B); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_2); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8012); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | BIT_15); + + rtl8168_mdio_write(tp, 0x1F, 0x0C42); + gphy_val = rtl8168_mdio_read(tp, 0x11); + gphy_val |= BIT_14; + gphy_val &= ~BIT_13; + rtl8168_mdio_write(tp, 0x11, gphy_val); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x809A); + rtl8168_mdio_write(tp, 0x14, 0x8022); + rtl8168_mdio_write(tp, 0x13, 0x80A0); + gphy_val = rtl8168_mdio_read(tp, 0x14) & 0x00FF; + gphy_val |= 0x1000; + rtl8168_mdio_write(tp, 0x14, gphy_val); + rtl8168_mdio_write(tp, 0x13, 0x8088); + rtl8168_mdio_write(tp, 0x14, 0x9222); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_2); + } + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_22) { + //do nothing + } else if (tp->mcfg == CFG_METHOD_23) { + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | (BIT_3 | BIT_2)); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0BCC); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_7); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_6); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8084); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~(BIT_14 | BIT_13)); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_12); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_1); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_0); + + rtl8168_mdio_write(tp, 0x1F, 0x0A4B); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_2); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8012); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | BIT_15); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0C42); + ClearAndSetEthPhyBit(tp, + 0x11, + BIT_13, + BIT_14 + ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_2); + } + } + } else if (tp->mcfg == CFG_METHOD_24) { + rtl8168_mdio_write(tp, 0x1F, 0x0BCC); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_7); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_6); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8084); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~(BIT_14 | BIT_13)); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_12); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_1); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_0); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8012); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | BIT_15); + + rtl8168_mdio_write(tp, 0x1F, 0x0C42); + gphy_val = rtl8168_mdio_read(tp, 0x11); + gphy_val |= BIT_14; + gphy_val &= ~BIT_13; + rtl8168_mdio_write(tp, 0x11, gphy_val); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_2); + } + } + } else if (tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26) { + rtl8168_mdio_write(tp, 0x1F, 0x0BCC); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_7); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_6); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8084); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~(BIT_14 | BIT_13)); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_12); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_1); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_0); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8012); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | BIT_15); + + rtl8168_mdio_write(tp, 0x1F, 0x0BCE); + rtl8168_mdio_write(tp, 0x12, 0x8860); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x80F3); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x8B00); + rtl8168_mdio_write(tp, 0x13, 0x80F0); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x3A00); + rtl8168_mdio_write(tp, 0x13, 0x80EF); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x0500); + rtl8168_mdio_write(tp, 0x13, 0x80F6); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x6E00); + rtl8168_mdio_write(tp, 0x13, 0x80EC); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x6800); + rtl8168_mdio_write(tp, 0x13, 0x80ED); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x7C00); + rtl8168_mdio_write(tp, 0x13, 0x80F2); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xF400); + rtl8168_mdio_write(tp, 0x13, 0x80F4); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x8500); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8110); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xA800); + rtl8168_mdio_write(tp, 0x13, 0x810F); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x1D00); + rtl8168_mdio_write(tp, 0x13, 0x8111); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xF500); + rtl8168_mdio_write(tp, 0x13, 0x8113); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x6100); + rtl8168_mdio_write(tp, 0x13, 0x8115); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x9200); + rtl8168_mdio_write(tp, 0x13, 0x810E); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x0400); + rtl8168_mdio_write(tp, 0x13, 0x810C); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x7C00); + rtl8168_mdio_write(tp, 0x13, 0x810B); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x5A00); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x80D1); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xFF00); + rtl8168_mdio_write(tp, 0x13, 0x80CD); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x9E00); + rtl8168_mdio_write(tp, 0x13, 0x80D3); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x0E00); + rtl8168_mdio_write(tp, 0x13, 0x80D5); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xCA00); + rtl8168_mdio_write(tp, 0x13, 0x80D7); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x8400); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_2); + } + } + } else if (tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28) { + rtl8168_mdio_write(tp, 0x1F, 0x0BCC); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~BIT_8); + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_7); + rtl8168_mdio_write(tp, 0x11, rtl8168_mdio_read(tp, 0x11) | BIT_6); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8084); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) & ~(BIT_14 | BIT_13)); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_12); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_1); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_0); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8012); + rtl8168_mdio_write(tp, 0x14, rtl8168_mdio_read(tp, 0x14) | BIT_15); + + rtl8168_mdio_write(tp, 0x1F, 0x0C42); + rtl8168_mdio_write(tp, 0x11, (rtl8168_mdio_read(tp, 0x11) & ~BIT_13) | BIT_14); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x80F3); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x8B00); + rtl8168_mdio_write(tp, 0x13, 0x80F0); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x3A00); + rtl8168_mdio_write(tp, 0x13, 0x80EF); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x0500); + rtl8168_mdio_write(tp, 0x13, 0x80F6); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x6E00); + rtl8168_mdio_write(tp, 0x13, 0x80EC); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x6800); + rtl8168_mdio_write(tp, 0x13, 0x80ED); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x7C00); + rtl8168_mdio_write(tp, 0x13, 0x80F2); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xF400); + rtl8168_mdio_write(tp, 0x13, 0x80F4); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x8500); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8110); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xA800); + rtl8168_mdio_write(tp, 0x13, 0x810F); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x1D00); + rtl8168_mdio_write(tp, 0x13, 0x8111); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xF500); + rtl8168_mdio_write(tp, 0x13, 0x8113); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x6100); + rtl8168_mdio_write(tp, 0x13, 0x8115); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x9200); + rtl8168_mdio_write(tp, 0x13, 0x810E); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x0400); + rtl8168_mdio_write(tp, 0x13, 0x810C); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x7C00); + rtl8168_mdio_write(tp, 0x13, 0x810B); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x5A00); + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x80D1); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xFF00); + rtl8168_mdio_write(tp, 0x13, 0x80CD); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x9E00); + rtl8168_mdio_write(tp, 0x13, 0x80D3); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x0E00); + rtl8168_mdio_write(tp, 0x13, 0x80D5); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0xCA00); + rtl8168_mdio_write(tp, 0x13, 0x80D7); + rtl8168_mdio_write(tp, 0x14, (rtl8168_mdio_read(tp, 0x14) & ~0xFF00) | 0x8400); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x10, rtl8168_mdio_read(tp, 0x10) | BIT_2); + } + } + } else if (tp->mcfg == CFG_METHOD_29) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x809b); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xF800 , + 0x8000 + ); + rtl8168_mdio_write(tp, 0x13, 0x80A2); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x8000 + ); + rtl8168_mdio_write(tp, 0x13, 0x80A4); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x8500 + ); + rtl8168_mdio_write(tp, 0x13, 0x809C); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0xbd00 + ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x80AD); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xF800 , + 0x7000 + ); + rtl8168_mdio_write(tp, 0x13, 0x80B4); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x5000 + ); + rtl8168_mdio_write(tp, 0x13, 0x80AC); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x4000 + ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x808E); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x1200 + ); + rtl8168_mdio_write(tp, 0x13, 0x8090); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0xE500 + ); + rtl8168_mdio_write(tp, 0x13, 0x8092); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x9F00 + ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + u16 dout_tapbin; + + dout_tapbin = 0x0000; + rtl8168_mdio_write( tp, 0x1F, 0x0A46 ); + gphy_val = rtl8168_mdio_read( tp, 0x13 ); + gphy_val &= (BIT_1|BIT_0); + gphy_val <<= 2; + dout_tapbin |= gphy_val; + + gphy_val = rtl8168_mdio_read( tp, 0x12 ); + gphy_val &= (BIT_15|BIT_14); + gphy_val >>= 14; + dout_tapbin |= gphy_val; + + dout_tapbin = ~( dout_tapbin^BIT_3 ); + dout_tapbin <<= 12; + dout_tapbin &= 0xF000; + + rtl8168_mdio_write( tp, 0x1F, 0x0A43 ); + + rtl8168_mdio_write( tp, 0x13, 0x827A ); + ClearAndSetEthPhyBit( tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_12, + dout_tapbin + ); + + + rtl8168_mdio_write( tp, 0x13, 0x827B ); + ClearAndSetEthPhyBit( tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_12, + dout_tapbin + ); + + + rtl8168_mdio_write( tp, 0x13, 0x827C ); + ClearAndSetEthPhyBit( tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_12, + dout_tapbin + ); + + + rtl8168_mdio_write( tp, 0x13, 0x827D ); + ClearAndSetEthPhyBit( tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_12, + dout_tapbin + ); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_11); + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + rtl8168_set_eth_phy_bit(tp, 0x16, BIT_1); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_set_eth_phy_bit( tp, 0x11, BIT_11 ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + + rtl8168_mdio_write(tp, 0x1F, 0x0BCA); + ClearAndSetEthPhyBit( tp, + 0x17, + (BIT_13 | BIT_12) , + BIT_14 + ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x803F); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x13, 0x8047); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x13, 0x804F); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x13, 0x8057); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x13, 0x805F); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x13, 0x8067 ); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x13, 0x806F ); + rtl8168_clear_eth_phy_bit( tp, 0x14, (BIT_13 | BIT_12)); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_set_eth_phy_bit( tp, 0x10, BIT_2 ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + } + } else if (tp->mcfg == CFG_METHOD_30) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x808A); + ClearAndSetEthPhyBit( tp, + 0x14, + BIT_5 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0, + 0x0A ); + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_11); + rtl8168_mdio_write(tp, 0x1F, 0x0A42); + rtl8168_set_eth_phy_bit(tp, 0x16, BIT_1); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_set_eth_phy_bit( tp, 0x11, BIT_11 ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (tp->RequireAdcBiasPatch) { + rtl8168_mdio_write(tp, 0x1F, 0x0BCF); + rtl8168_mdio_write(tp, 0x16, tp->AdcBiasPatchIoffset); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + { + u16 rlen; + + rtl8168_mdio_write(tp, 0x1F, 0x0BCD); + gphy_val = rtl8168_mdio_read( tp, 0x16 ); + gphy_val &= 0x000F; + + if ( gphy_val > 3 ) { + rlen = gphy_val - 3; + } else { + rlen = 0; + } + + gphy_val = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12); + + rtl8168_mdio_write(tp, 0x1F, 0x0BCD); + rtl8168_mdio_write(tp, 0x17, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x85FE); + ClearAndSetEthPhyBit( + tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_12|BIT_11|BIT_10|BIT_8, + BIT_9); + rtl8168_mdio_write(tp, 0x13, 0x85FF); + ClearAndSetEthPhyBit( + tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_12, + BIT_11|BIT_10|BIT_9|BIT_8); + rtl8168_mdio_write(tp, 0x13, 0x814B); + ClearAndSetEthPhyBit( + tp, + 0x14, + BIT_15|BIT_14|BIT_13|BIT_11|BIT_10|BIT_9|BIT_8, + BIT_12); + } + + if (aspm) { + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_set_eth_phy_bit( tp, 0x10, BIT_2 ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + } + } else if (tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x808E); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x4800 + ); + rtl8168_mdio_write(tp, 0x13, 0x8090); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0xCC00 + ); + rtl8168_mdio_write(tp, 0x13, 0x8092); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0xB000 + ); + rtl8168_mdio_write(tp, 0x13, 0x8088); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x6000 + ); + rtl8168_mdio_write(tp, 0x13, 0x808B); + ClearAndSetEthPhyBit( tp, + 0x14, + 0x3F00 , + 0x0B00 + ); + rtl8168_mdio_write(tp, 0x13, 0x808D); + ClearAndSetEthPhyBit( tp, + 0x14, + 0x1F00 , + 0x0600 + ); + rtl8168_mdio_write(tp, 0x13, 0x808C); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0xB000 + ); + + rtl8168_mdio_write(tp, 0x13, 0x80A0); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x2800 + ); + rtl8168_mdio_write(tp, 0x13, 0x80A2); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x5000 + ); + rtl8168_mdio_write(tp, 0x13, 0x809B); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xF800 , + 0xB000 + ); + rtl8168_mdio_write(tp, 0x13, 0x809A); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x4B00 + ); + rtl8168_mdio_write(tp, 0x13, 0x809D); + ClearAndSetEthPhyBit( tp, + 0x14, + 0x3F00 , + 0x0800 + ); + rtl8168_mdio_write(tp, 0x13, 0x80A1); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x7000 + ); + rtl8168_mdio_write(tp, 0x13, 0x809F); + ClearAndSetEthPhyBit( tp, + 0x14, + 0x1F00 , + 0x0300 + ); + rtl8168_mdio_write(tp, 0x13, 0x809E); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x8800 + ); + + rtl8168_mdio_write(tp, 0x13, 0x80B2); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x2200 + ); + rtl8168_mdio_write(tp, 0x13, 0x80AD); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xF800 , + 0x9800 + ); + rtl8168_mdio_write(tp, 0x13, 0x80AF); + ClearAndSetEthPhyBit( tp, + 0x14, + 0x3F00 , + 0x0800 + ); + rtl8168_mdio_write(tp, 0x13, 0x80B3); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x6F00 + ); + rtl8168_mdio_write(tp, 0x13, 0x80B1); + ClearAndSetEthPhyBit( tp, + 0x14, + 0x1F00 , + 0x0300 + ); + rtl8168_mdio_write(tp, 0x13, 0x80B0); + ClearAndSetEthPhyBit( tp, + 0x14, + 0xFF00 , + 0x9300 + ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8011); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_11); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_set_eth_phy_bit(tp, 0x11, BIT_11); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_mdio_write(tp, 0x13, 0x8016); + rtl8168_set_eth_phy_bit(tp, 0x14, BIT_10); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (aspm) { + if (!HW_SUPP_SERDES_PHY(tp) && + HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + rtl8168_mdio_write(tp, 0x1F, 0x0A43); + rtl8168_set_eth_phy_bit( tp, 0x10, BIT_2 ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + } + } + +#ifdef ENABLE_FIBER_SUPPORT + if (HW_FIBER_MODE_ENABLED(tp)) + rtl8168_hw_fiber_phy_config(dev); +#endif //ENABLE_FIBER_SUPPORT + + //EthPhyPPSW + if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_24 || tp->mcfg == CFG_METHOD_25 || + tp->mcfg == CFG_METHOD_26) { + //disable EthPhyPPSW + rtl8168_mdio_write(tp, 0x1F, 0x0BCD); + rtl8168_mdio_write(tp, 0x14, 0x5065); + rtl8168_mdio_write(tp, 0x14, 0xD065); + rtl8168_mdio_write(tp, 0x1F, 0x0BC8); + rtl8168_mdio_write(tp, 0x11, 0x5655); + rtl8168_mdio_write(tp, 0x1F, 0x0BCD); + rtl8168_mdio_write(tp, 0x14, 0x1065); + rtl8168_mdio_write(tp, 0x14, 0x9065); + rtl8168_mdio_write(tp, 0x14, 0x1065); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } else if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + //enable EthPhyPPSW + rtl8168_mdio_write(tp, 0x1F, 0x0A44); + rtl8168_set_eth_phy_bit( tp, 0x11, BIT_7 ); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + + /*ocp phy power saving*/ + if (tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + if (aspm) + rtl8168_enable_ocp_phy_power_saving(dev); + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if (HW_HAS_WRITE_PHY_MCU_RAM_CODE(tp)) { + if (tp->eee_enabled) + rtl8168_enable_EEE(tp); + else + rtl8168_disable_EEE(tp); + } +} + +static inline void rtl8168_delete_esd_timer(struct net_device *dev, struct timer_list *timer) +{ + del_timer_sync(timer); +} + +static inline void rtl8168_request_esd_timer(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct timer_list *timer = &tp->esd_timer; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + setup_timer(timer, rtl8168_esd_timer, (unsigned long)dev); +#else + timer_setup(timer, rtl8168_esd_timer, 0); +#endif + mod_timer(timer, jiffies + RTL8168_ESD_TIMEOUT); +} + +static inline void rtl8168_delete_link_timer(struct net_device *dev, struct timer_list *timer) +{ + del_timer_sync(timer); +} + +static inline void rtl8168_request_link_timer(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct timer_list *timer = &tp->link_timer; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + setup_timer(timer, rtl8168_link_timer, (unsigned long)dev); +#else + timer_setup(timer, rtl8168_link_timer, 0); +#endif + mod_timer(timer, jiffies + RTL8168_LINK_TIMEOUT); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +/* + * Polling 'interrupt' - used by things like netconsole to send skbs + * without having to re-enable interrupts. It's not called while + * the interrupt routine is executing. + */ +static void +rtl8168_netpoll(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + disable_irq(pdev->irq); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) + rtl8168_interrupt(pdev->irq, dev, NULL); +#else + rtl8168_interrupt(pdev->irq, dev); +#endif + enable_irq(pdev->irq); +} +#endif + +static void +rtl8168_get_bios_setting(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->bios_setting = RTL_R32(tp, 0x8c); + break; + } +} + +static void +rtl8168_set_bios_setting(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W32(tp, 0x8C, tp->bios_setting); + break; + } +} + +static void +rtl8168_init_software_variable(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + rtl8168_get_bios_setting(dev); + + switch (tp->mcfg) { + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + tp->HwSuppDashVer = 1; + break; + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + tp->HwSuppDashVer = 2; + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppDashVer = 3; + break; + default: + tp->HwSuppDashVer = 0; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwPkgDet = rtl8168_mac_ocp_read(tp, 0xDC00); + tp->HwPkgDet = (tp->HwPkgDet >> 3) & 0x0F; + break; + } + + if (HW_SUPP_SERDES_PHY(tp)) + eee_enable = 0; + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppNowIsOobVer = 1; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppPhyOcpVer = 1; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppUpsVer = 1; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwPcieSNOffset = 0x16C; + break; + case CFG_METHOD_DEFAULT: + tp->HwPcieSNOffset = 0; + break; + default: + tp->HwPcieSNOffset = 0x164; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppAspmClkIntrLock = 1; + break; + } + + if (!aspm || !tp->HwSuppAspmClkIntrLock) + dynamic_aspm = 0; + +#ifdef ENABLE_REALWOW_SUPPORT + rtl8168_get_realwow_hw_version(dev); +#endif //ENABLE_REALWOW_SUPPORT + + if (HW_DASH_SUPPORT_DASH(tp) && rtl8168_check_dash(tp)) + tp->DASH = 1; + else + tp->DASH = 0; + + if (tp->DASH) { + if (HW_DASH_SUPPORT_TYPE_3(tp)) { + u64 CmacMemPhysAddress; + void __iomem *cmac_ioaddr = NULL; + struct pci_dev *pdev_cmac; + + pdev_cmac = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0)); + + //map CMAC IO space + CmacMemPhysAddress = pci_resource_start(pdev_cmac, 2); + + /* ioremap MMIO region */ + cmac_ioaddr = ioremap(CmacMemPhysAddress, R8168_REGS_SIZE); + + if (cmac_ioaddr == NULL) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "cannot remap CMAC MMIO, aborting\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + tp->DASH = 0; + } else { + tp->mapped_cmac_ioaddr = cmac_ioaddr; + } + } + + eee_enable = 0; + } + +#ifdef ENABLE_DASH_SUPPORT +#ifdef ENABLE_DASH_PRINTER_SUPPORT + if (tp->DASH) { + if (HW_DASH_SUPPORT_TYPE_3(tp) && tp->HwPkgDet == 0x0F) + tp->dash_printer_enabled = 1; + else if (HW_DASH_SUPPORT_TYPE_2(tp)) + tp->dash_printer_enabled = 1; + } +#endif //ENABLE_DASH_PRINTER_SUPPORT +#endif //ENABLE_DASH_SUPPORT + + if (HW_DASH_SUPPORT_TYPE_2(tp)) + tp->cmac_ioaddr = tp->mmio_addr; + else if (HW_DASH_SUPPORT_TYPE_3(tp)) + tp->cmac_ioaddr = tp->mapped_cmac_ioaddr; + + switch (tp->mcfg) { + case CFG_METHOD_1: + tp->intr_mask = RxDescUnavail | RxFIFOOver | TxDescUnavail | TxOK | RxOK | SWInt; + tp->timer_intr_mask = PCSTimeout | RxFIFOOver; + break; + case CFG_METHOD_2: + case CFG_METHOD_3: + case CFG_METHOD_4: + tp->intr_mask = RxDescUnavail | TxDescUnavail | TxOK | RxOK | SWInt; + tp->timer_intr_mask = PCSTimeout; + break; + default: + tp->intr_mask = RxDescUnavail | TxOK | RxOK | SWInt; + tp->timer_intr_mask = PCSTimeout; + break; + } + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) { + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + tp->timer_intr_mask |= ( ISRIMR_DASH_INTR_EN | ISRIMR_DASH_INTR_CMAC_RESET); + tp->intr_mask |= ( ISRIMR_DASH_INTR_EN | ISRIMR_DASH_INTR_CMAC_RESET); + } else { + tp->timer_intr_mask |= ( ISRIMR_DP_DASH_OK | ISRIMR_DP_HOST_OK | ISRIMR_DP_REQSYS_OK ); + tp->intr_mask |= ( ISRIMR_DP_DASH_OK | ISRIMR_DP_HOST_OK | ISRIMR_DP_REQSYS_OK ); + } + } +#endif + if (aspm) { + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->org_pci_offset_99 = rtl8168_csi_fun0_read_byte(tp, 0x99); + tp->org_pci_offset_99 &= ~(BIT_5|BIT_6); + break; + } + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + tp->org_pci_offset_180 = rtl8168_csi_fun0_read_byte(tp, 0x180); + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->org_pci_offset_180 = rtl8168_csi_fun0_read_byte(tp, 0x214); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_27: + case CFG_METHOD_28: + if (tp->org_pci_offset_99 & BIT_2) + tp->issue_offset_99_event = TRUE; + break; + } + } + + pci_read_config_byte(pdev, 0x80, &tp->org_pci_offset_80); + pci_read_config_byte(pdev, 0x81, &tp->org_pci_offset_81); + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + if ((tp->features & RTL_FEATURE_MSI) && (tp->org_pci_offset_80 & BIT_1)) + tp->use_timer_interrrupt = FALSE; + else + tp->use_timer_interrrupt = TRUE; + break; + default: + tp->use_timer_interrrupt = TRUE; + break; + } + + if (timer_count == 0 || tp->mcfg == CFG_METHOD_DEFAULT) + tp->use_timer_interrrupt = FALSE; + + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + tp->ShortPacketSwChecksum = TRUE; + break; + case CFG_METHOD_16: + case CFG_METHOD_17: + tp->ShortPacketSwChecksum = TRUE; + tp->UseSwPaddingShortPkt = TRUE; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_30: { + u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0; + u16 TmpUshort; + + rtl8168_mac_ocp_write( tp, 0xDD02, 0x807D); + TmpUshort = rtl8168_mac_ocp_read( tp, 0xDD02 ); + ioffset_p3 = ( (TmpUshort & BIT_7) >>7 ); + ioffset_p3 <<= 3; + TmpUshort = rtl8168_mac_ocp_read( tp, 0xDD00 ); + + ioffset_p3 |= ((TmpUshort & (BIT_15 | BIT_14 | BIT_13))>>13); + + ioffset_p2 = ((TmpUshort & (BIT_12|BIT_11|BIT_10|BIT_9))>>9); + ioffset_p1 = ((TmpUshort & (BIT_8|BIT_7|BIT_6|BIT_5))>>5); + + ioffset_p0 = ( (TmpUshort & BIT_4) >>4 ); + ioffset_p0 <<= 3; + ioffset_p0 |= (TmpUshort & (BIT_2| BIT_1 | BIT_0)); + + if ((ioffset_p3 == 0x0F) && (ioffset_p2 == 0x0F) && (ioffset_p1 == 0x0F) && (ioffset_p0 == 0x0F)) { + tp->RequireAdcBiasPatch = FALSE; + } else { + tp->RequireAdcBiasPatch = TRUE; + tp->AdcBiasPatchIoffset = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0); + } + } + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: { + u16 rg_saw_cnt; + + rtl8168_mdio_write(tp, 0x1F, 0x0C42); + rg_saw_cnt = rtl8168_mdio_read(tp, 0x13); + rg_saw_cnt &= ~(BIT_15|BIT_14); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + if ( rg_saw_cnt > 0) { + tp->SwrCnt1msIni = 16000000/rg_saw_cnt; + tp->SwrCnt1msIni &= 0x0FFF; + + tp->RequireAdjustUpsTxLinkPulseTiming = TRUE; + } + } + break; + } + +#ifdef ENABLE_FIBER_SUPPORT + rtl8168_check_hw_fiber_mode_support(dev); +#endif //ENABLE_FIBER_SUPPORT + + switch(tp->mcfg) { + case CFG_METHOD_32: + case CFG_METHOD_33: + if (tp->HwPkgDet == 0x06) { + u8 tmpUchar = rtl8168_eri_read(tp, 0xE6, 1, ERIAR_ExGMAC); + if (tmpUchar == 0x02) + tp->HwSuppSerDesPhyVer = 1; + else if (tmpUchar == 0x00) + tp->HwSuppSerDesPhyVer = 2; + } + break; + } + + if (pdev->subsystem_vendor == 0x144d) { + if (pdev->subsystem_device == 0xc098 || + pdev->subsystem_device == 0xc0b1 || + pdev->subsystem_device == 0xc0b8) + hwoptimize |= HW_PATCH_SAMSUNG_LAN_DONGLE; + } + + if (hwoptimize & HW_PATCH_SAMSUNG_LAN_DONGLE) { + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_30: + tp->RequiredSecLanDonglePatch = TRUE; + break; + } + } + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppMagicPktVer = WAKEUP_MAGIC_PACKET_V2; + break; + case CFG_METHOD_DEFAULT: + tp->HwSuppMagicPktVer = WAKEUP_MAGIC_PACKET_NOT_SUPPORT; + break; + default: + tp->HwSuppMagicPktVer = WAKEUP_MAGIC_PACKET_V1; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_29: + case CFG_METHOD_30: + tp->HwSuppEsdVer = 2; + break; + default: + tp->HwSuppEsdVer = 1; + break; + } + + if (tp->HwSuppEsdVer == 2) { + rtl8168_mdio_write(tp, 0x1F, 0x0A46); + tp->BackupPhyFuseDout_15_0 = rtl8168_mdio_read(tp, 0x10); + tp->BackupPhyFuseDout_47_32 = rtl8168_mdio_read(tp, 0x12); + tp->BackupPhyFuseDout_63_48 = rtl8168_mdio_read(tp, 0x13); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + + tp->TestPhyOcpReg = TRUE; + } + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + tp->HwSuppCheckPhyDisableModeVer = 1; + break; + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_29: + case CFG_METHOD_30: + tp->HwSuppCheckPhyDisableModeVer = 2; + break; + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->HwSuppCheckPhyDisableModeVer = 3; + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_14; + break; + case CFG_METHOD_16: + case CFG_METHOD_17: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_16; + break; + case CFG_METHOD_18: + case CFG_METHOD_19: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_18; + break; + case CFG_METHOD_20: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_20; + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_21; + break; + case CFG_METHOD_23: + case CFG_METHOD_27: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_23; + break; + case CFG_METHOD_24: + case CFG_METHOD_25: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_24; + break; + case CFG_METHOD_26: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_26; + break; + case CFG_METHOD_28: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_28; + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_29; + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + tp->sw_ram_code_ver = NIC_RAMCODE_VERSION_CFG_METHOD_31; + break; + } + + if (tp->HwIcVerUnknown) { + tp->NotWrRamCodeToMicroP = TRUE; + tp->NotWrMcuPatchCode = TRUE; + } + + tp->NicCustLedValue = RTL_R16(tp, CustomLED); + + rtl8168_get_hw_wol(dev); + + rtl8168_link_option((u8*)&autoneg_mode, (u32*)&speed_mode, (u8*)&duplex_mode, (u32*)&advertising_mode); + + tp->autoneg = autoneg_mode; + tp->speed = speed_mode; + tp->duplex = duplex_mode; + tp->advertising = advertising_mode; + + tp->max_jumbo_frame_size = rtl_chip_info[tp->chipset].jumbo_frame_sz; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) + /* MTU range: 60 - hw-specific max */ + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = tp->max_jumbo_frame_size; +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) + tp->eee_enabled = eee_enable; + tp->eee_adv_t = MDIO_EEE_1000T | MDIO_EEE_100TX; + +#ifdef ENABLE_FIBER_SUPPORT + if (HW_FIBER_MODE_ENABLED(tp)) + rtl8168_set_fiber_mode_software_variable(dev); +#endif //ENABLE_FIBER_SUPPORT +} + +static void +rtl8168_release_board(struct pci_dev *pdev, + struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + rtl8168_set_bios_setting(dev); + rtl8168_rar_set(tp, tp->org_mac_addr); + tp->wol_enabled = WOL_DISABLED; + + if (!tp->DASH) + rtl8168_phy_power_down(dev); + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) + FreeAllocatedDashShareMemory(dev); +#endif + + if (tp->mapped_cmac_ioaddr != NULL) + iounmap(tp->mapped_cmac_ioaddr); + + iounmap(ioaddr); + pci_release_regions(pdev); + pci_clear_mwi(pdev); + pci_disable_device(pdev); + free_netdev(dev); +} + +static int +rtl8168_get_mac_address(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int i; + u8 mac_addr[MAC_ADDR_LEN]; + + for (i = 0; i < MAC_ADDR_LEN; i++) + mac_addr[i] = RTL_R8(tp, MAC0 + i); + + if (tp->mcfg == CFG_METHOD_18 || + tp->mcfg == CFG_METHOD_19 || + tp->mcfg == CFG_METHOD_20 || + tp->mcfg == CFG_METHOD_21 || + tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_23 || + tp->mcfg == CFG_METHOD_24 || + tp->mcfg == CFG_METHOD_25 || + tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || + tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || + tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || + tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + *(u32*)&mac_addr[0] = rtl8168_eri_read(tp, 0xE0, 4, ERIAR_ExGMAC); + *(u16*)&mac_addr[4] = rtl8168_eri_read(tp, 0xE4, 2, ERIAR_ExGMAC); + } else { + if (tp->eeprom_type != EEPROM_TYPE_NONE) { + u16 *pUshort = (u16*)mac_addr; + /* Get MAC address from EEPROM */ + if (tp->mcfg == CFG_METHOD_16 || + tp->mcfg == CFG_METHOD_17 || + tp->mcfg == CFG_METHOD_18 || + tp->mcfg == CFG_METHOD_19 || + tp->mcfg == CFG_METHOD_20 || + tp->mcfg == CFG_METHOD_21 || + tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_23 || + tp->mcfg == CFG_METHOD_24 || + tp->mcfg == CFG_METHOD_25 || + tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || + tp->mcfg == CFG_METHOD_28 || + tp->mcfg == CFG_METHOD_29 || + tp->mcfg == CFG_METHOD_30 || + tp->mcfg == CFG_METHOD_31 || + tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + *pUshort++ = rtl8168_eeprom_read_sc(tp, 1); + *pUshort++ = rtl8168_eeprom_read_sc(tp, 2); + *pUshort = rtl8168_eeprom_read_sc(tp, 3); + } else { + *pUshort++ = rtl8168_eeprom_read_sc(tp, 7); + *pUshort++ = rtl8168_eeprom_read_sc(tp, 8); + *pUshort = rtl8168_eeprom_read_sc(tp, 9); + } + } + } + + if (!is_valid_ether_addr(mac_addr)) { + netif_err(tp, probe, dev, "Invalid ether addr %pM\n", + mac_addr); + eth_hw_addr_random(dev); + ether_addr_copy(mac_addr, dev->dev_addr); + netif_info(tp, probe, dev, "Random ether addr %pM\n", + mac_addr); + tp->random_mac = 1; + } + + rtl8168_rar_set(tp, mac_addr); + + for (i = 0; i < MAC_ADDR_LEN; i++) { + dev->dev_addr[i] = RTL_R8(tp, MAC0 + i); + tp->org_mac_addr[i] = dev->dev_addr[i]; /* keep the original MAC address */ + } +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) + memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); +#endif +// memcpy(dev->dev_addr, dev->dev_addr, dev->addr_len); + + return 0; +} + +/** + * rtl8168_set_mac_address - Change the Ethernet Address of the NIC + * @dev: network interface device structure + * @p: pointer to an address structure + * + * Return 0 on success, negative on failure + **/ +static int +rtl8168_set_mac_address(struct net_device *dev, + void *p) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct sockaddr *addr = p; + unsigned long flags; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + spin_lock_irqsave(&tp->lock, flags); + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + rtl8168_rar_set(tp, dev->dev_addr); + + spin_unlock_irqrestore(&tp->lock, flags); + + return 0; +} + +/****************************************************************************** + * rtl8168_rar_set - Puts an ethernet address into a receive address register. + * + * tp - The private data structure for driver + * addr - Address to put into receive address register + *****************************************************************************/ +void +rtl8168_rar_set(struct rtl8168_private *tp, + uint8_t *addr) +{ + uint32_t rar_low = 0; + uint32_t rar_high = 0; + + rar_low = ((uint32_t) addr[0] | + ((uint32_t) addr[1] << 8) | + ((uint32_t) addr[2] << 16) | + ((uint32_t) addr[3] << 24)); + + rar_high = ((uint32_t) addr[4] | + ((uint32_t) addr[5] << 8)); + + rtl8168_enable_cfg9346_write(tp); + RTL_W32(tp, MAC0, rar_low); + RTL_W32(tp, MAC4, rar_high); + + switch (tp->mcfg) { + case CFG_METHOD_14: + case CFG_METHOD_15: + RTL_W32(tp, SecMAC0, rar_low); + RTL_W16(tp, SecMAC4, (uint16_t)rar_high); + break; + } + + if (tp->mcfg == CFG_METHOD_17) { + rtl8168_eri_write(tp, 0xf0, 4, rar_low << 16, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xf4, 4, rar_low >> 16 | rar_high << 16, ERIAR_ExGMAC); + } + + rtl8168_disable_cfg9346_write(tp); +} + +#ifdef ETHTOOL_OPS_COMPAT +static int ethtool_get_settings(struct net_device *dev, void *useraddr) +{ + struct ethtool_cmd cmd = { ETHTOOL_GSET }; + int err; + + if (!ethtool_ops->get_settings) + return -EOPNOTSUPP; + + err = ethtool_ops->get_settings(dev, &cmd); + if (err < 0) + return err; + + if (copy_to_user(useraddr, &cmd, sizeof(cmd))) + return -EFAULT; + return 0; +} + +static int ethtool_set_settings(struct net_device *dev, void *useraddr) +{ + struct ethtool_cmd cmd; + + if (!ethtool_ops->set_settings) + return -EOPNOTSUPP; + + if (copy_from_user(&cmd, useraddr, sizeof(cmd))) + return -EFAULT; + + return ethtool_ops->set_settings(dev, &cmd); +} + +static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr) +{ + struct ethtool_drvinfo info; + struct ethtool_ops *ops = ethtool_ops; + + if (!ops->get_drvinfo) + return -EOPNOTSUPP; + + memset(&info, 0, sizeof(info)); + info.cmd = ETHTOOL_GDRVINFO; + ops->get_drvinfo(dev, &info); + + if (ops->self_test_count) + info.testinfo_len = ops->self_test_count(dev); + if (ops->get_stats_count) + info.n_stats = ops->get_stats_count(dev); + if (ops->get_regs_len) + info.regdump_len = ops->get_regs_len(dev); + if (ops->get_eeprom_len) + info.eedump_len = ops->get_eeprom_len(dev); + + if (copy_to_user(useraddr, &info, sizeof(info))) + return -EFAULT; + return 0; +} + +static int ethtool_get_regs(struct net_device *dev, char *useraddr) +{ + struct ethtool_regs regs; + struct ethtool_ops *ops = ethtool_ops; + void *regbuf; + int reglen, ret; + + if (!ops->get_regs || !ops->get_regs_len) + return -EOPNOTSUPP; + + if (copy_from_user(®s, useraddr, sizeof(regs))) + return -EFAULT; + + reglen = ops->get_regs_len(dev); + if (regs.len > reglen) + regs.len = reglen; + + regbuf = kmalloc(reglen, GFP_USER); + if (!regbuf) + return -ENOMEM; + + ops->get_regs(dev, ®s, regbuf); + + ret = -EFAULT; + if (copy_to_user(useraddr, ®s, sizeof(regs))) + goto out; + useraddr += offsetof(struct ethtool_regs, data); + if (copy_to_user(useraddr, regbuf, reglen)) + goto out; + ret = 0; + +out: + kfree(regbuf); + return ret; +} + +static int ethtool_get_wol(struct net_device *dev, char *useraddr) +{ + struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; + + if (!ethtool_ops->get_wol) + return -EOPNOTSUPP; + + ethtool_ops->get_wol(dev, &wol); + + if (copy_to_user(useraddr, &wol, sizeof(wol))) + return -EFAULT; + return 0; +} + +static int ethtool_set_wol(struct net_device *dev, char *useraddr) +{ + struct ethtool_wolinfo wol; + + if (!ethtool_ops->set_wol) + return -EOPNOTSUPP; + + if (copy_from_user(&wol, useraddr, sizeof(wol))) + return -EFAULT; + + return ethtool_ops->set_wol(dev, &wol); +} + +static int ethtool_get_msglevel(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GMSGLVL }; + + if (!ethtool_ops->get_msglevel) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_msglevel(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_msglevel(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_msglevel) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + ethtool_ops->set_msglevel(dev, edata.data); + return 0; +} + +static int ethtool_nway_reset(struct net_device *dev) +{ + if (!ethtool_ops->nway_reset) + return -EOPNOTSUPP; + + return ethtool_ops->nway_reset(dev); +} + +static int ethtool_get_link(struct net_device *dev, void *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GLINK }; + + if (!ethtool_ops->get_link) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_link(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_get_eeprom(struct net_device *dev, void *useraddr) +{ + struct ethtool_eeprom eeprom; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->get_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + return -EINVAL; + + data = kmalloc(eeprom.len, GFP_USER); + if (!data) + return -ENOMEM; + + ret = -EFAULT; + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) + goto out; + + ret = ops->get_eeprom(dev, &eeprom, data); + if (ret) + goto out; + + ret = -EFAULT; + if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) + goto out; + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_set_eeprom(struct net_device *dev, void *useraddr) +{ + struct ethtool_eeprom eeprom; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->set_eeprom || !ops->get_eeprom_len) + return -EOPNOTSUPP; + + if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) + return -EFAULT; + + /* Check for wrap and zero */ + if (eeprom.offset + eeprom.len <= eeprom.offset) + return -EINVAL; + + /* Check for exceeding total eeprom len */ + if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) + return -EINVAL; + + data = kmalloc(eeprom.len, GFP_USER); + if (!data) + return -ENOMEM; + + ret = -EFAULT; + if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) + goto out; + + ret = ops->set_eeprom(dev, &eeprom, data); + if (ret) + goto out; + + if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) + ret = -EFAULT; + +out: + kfree(data); + return ret; +} + +static int ethtool_get_coalesce(struct net_device *dev, void *useraddr) +{ + struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; + + if (!ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + ethtool_ops->get_coalesce(dev, &coalesce); + + if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) + return -EFAULT; + return 0; +} + +static int ethtool_set_coalesce(struct net_device *dev, void *useraddr) +{ + struct ethtool_coalesce coalesce; + + if (!ethtool_ops->get_coalesce) + return -EOPNOTSUPP; + + if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) + return -EFAULT; + + return ethtool_ops->set_coalesce(dev, &coalesce); +} + +static int ethtool_get_ringparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; + + if (!ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + ethtool_ops->get_ringparam(dev, &ringparam); + + if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_ringparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_ringparam ringparam; + + if (!ethtool_ops->get_ringparam) + return -EOPNOTSUPP; + + if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) + return -EFAULT; + + return ethtool_ops->set_ringparam(dev, &ringparam); +} + +static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; + + if (!ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + ethtool_ops->get_pauseparam(dev, &pauseparam); + + if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) + return -EFAULT; + return 0; +} + +static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr) +{ + struct ethtool_pauseparam pauseparam; + + if (!ethtool_ops->get_pauseparam) + return -EOPNOTSUPP; + + if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) + return -EFAULT; + + return ethtool_ops->set_pauseparam(dev, &pauseparam); +} + +static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GRXCSUM }; + + if (!ethtool_ops->get_rx_csum) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_rx_csum(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_rx_csum) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + ethtool_ops->set_rx_csum(dev, edata.data); + return 0; +} + +static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GTXCSUM }; + + if (!ethtool_ops->get_tx_csum) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_tx_csum(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_tx_csum) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_tx_csum(dev, edata.data); +} + +static int ethtool_get_sg(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GSG }; + + if (!ethtool_ops->get_sg) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_sg(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_sg(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_sg) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_sg(dev, edata.data); +} + +static int ethtool_get_tso(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata = { ETHTOOL_GTSO }; + + if (!ethtool_ops->get_tso) + return -EOPNOTSUPP; + + edata.data = ethtool_ops->get_tso(dev); + + if (copy_to_user(useraddr, &edata, sizeof(edata))) + return -EFAULT; + return 0; +} + +static int ethtool_set_tso(struct net_device *dev, char *useraddr) +{ + struct ethtool_value edata; + + if (!ethtool_ops->set_tso) + return -EOPNOTSUPP; + + if (copy_from_user(&edata, useraddr, sizeof(edata))) + return -EFAULT; + + return ethtool_ops->set_tso(dev, edata.data); +} + +static int ethtool_self_test(struct net_device *dev, char *useraddr) +{ + struct ethtool_test test; + struct ethtool_ops *ops = ethtool_ops; + u64 *data; + int ret; + + if (!ops->self_test || !ops->self_test_count) + return -EOPNOTSUPP; + + if (copy_from_user(&test, useraddr, sizeof(test))) + return -EFAULT; + + test.len = ops->self_test_count(dev); + data = kmalloc(test.len * sizeof(u64), GFP_USER); + if (!data) + return -ENOMEM; + + ops->self_test(dev, &test, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &test, sizeof(test))) + goto out; + useraddr += sizeof(test); + if (copy_to_user(useraddr, data, test.len * sizeof(u64))) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_get_strings(struct net_device *dev, void *useraddr) +{ + struct ethtool_gstrings gstrings; + struct ethtool_ops *ops = ethtool_ops; + u8 *data; + int ret; + + if (!ops->get_strings) + return -EOPNOTSUPP; + + if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) + return -EFAULT; + + switch (gstrings.string_set) { + case ETH_SS_TEST: + if (!ops->self_test_count) + return -EOPNOTSUPP; + gstrings.len = ops->self_test_count(dev); + break; + case ETH_SS_STATS: + if (!ops->get_stats_count) + return -EOPNOTSUPP; + gstrings.len = ops->get_stats_count(dev); + break; + default: + return -EINVAL; + } + + data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); + if (!data) + return -ENOMEM; + + ops->get_strings(dev, gstrings.string_set, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) + goto out; + useraddr += sizeof(gstrings); + if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_phys_id(struct net_device *dev, void *useraddr) +{ + struct ethtool_value id; + + if (!ethtool_ops->phys_id) + return -EOPNOTSUPP; + + if (copy_from_user(&id, useraddr, sizeof(id))) + return -EFAULT; + + return ethtool_ops->phys_id(dev, id.data); +} + +static int ethtool_get_stats(struct net_device *dev, void *useraddr) +{ + struct ethtool_stats stats; + struct ethtool_ops *ops = ethtool_ops; + u64 *data; + int ret; + + if (!ops->get_ethtool_stats || !ops->get_stats_count) + return -EOPNOTSUPP; + + if (copy_from_user(&stats, useraddr, sizeof(stats))) + return -EFAULT; + + stats.n_stats = ops->get_stats_count(dev); + data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); + if (!data) + return -ENOMEM; + + ops->get_ethtool_stats(dev, &stats, data); + + ret = -EFAULT; + if (copy_to_user(useraddr, &stats, sizeof(stats))) + goto out; + useraddr += sizeof(stats); + if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) + goto out; + ret = 0; + +out: + kfree(data); + return ret; +} + +static int ethtool_ioctl(struct ifreq *ifr) +{ + struct net_device *dev = __dev_get_by_name(ifr->ifr_name); + void *useraddr = (void *) ifr->ifr_data; + u32 ethcmd; + + /* + * XXX: This can be pushed down into the ethtool_* handlers that + * need it. Keep existing behaviour for the moment. + */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!dev || !netif_device_present(dev)) + return -ENODEV; + + if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) + return -EFAULT; + + switch (ethcmd) { + case ETHTOOL_GSET: + return ethtool_get_settings(dev, useraddr); + case ETHTOOL_SSET: + return ethtool_set_settings(dev, useraddr); + case ETHTOOL_GDRVINFO: + return ethtool_get_drvinfo(dev, useraddr); + case ETHTOOL_GREGS: + return ethtool_get_regs(dev, useraddr); + case ETHTOOL_GWOL: + return ethtool_get_wol(dev, useraddr); + case ETHTOOL_SWOL: + return ethtool_set_wol(dev, useraddr); + case ETHTOOL_GMSGLVL: + return ethtool_get_msglevel(dev, useraddr); + case ETHTOOL_SMSGLVL: + return ethtool_set_msglevel(dev, useraddr); + case ETHTOOL_NWAY_RST: + return ethtool_nway_reset(dev); + case ETHTOOL_GLINK: + return ethtool_get_link(dev, useraddr); + case ETHTOOL_GEEPROM: + return ethtool_get_eeprom(dev, useraddr); + case ETHTOOL_SEEPROM: + return ethtool_set_eeprom(dev, useraddr); + case ETHTOOL_GCOALESCE: + return ethtool_get_coalesce(dev, useraddr); + case ETHTOOL_SCOALESCE: + return ethtool_set_coalesce(dev, useraddr); + case ETHTOOL_GRINGPARAM: + return ethtool_get_ringparam(dev, useraddr); + case ETHTOOL_SRINGPARAM: + return ethtool_set_ringparam(dev, useraddr); + case ETHTOOL_GPAUSEPARAM: + return ethtool_get_pauseparam(dev, useraddr); + case ETHTOOL_SPAUSEPARAM: + return ethtool_set_pauseparam(dev, useraddr); + case ETHTOOL_GRXCSUM: + return ethtool_get_rx_csum(dev, useraddr); + case ETHTOOL_SRXCSUM: + return ethtool_set_rx_csum(dev, useraddr); + case ETHTOOL_GTXCSUM: + return ethtool_get_tx_csum(dev, useraddr); + case ETHTOOL_STXCSUM: + return ethtool_set_tx_csum(dev, useraddr); + case ETHTOOL_GSG: + return ethtool_get_sg(dev, useraddr); + case ETHTOOL_SSG: + return ethtool_set_sg(dev, useraddr); + case ETHTOOL_GTSO: + return ethtool_get_tso(dev, useraddr); + case ETHTOOL_STSO: + return ethtool_set_tso(dev, useraddr); + case ETHTOOL_TEST: + return ethtool_self_test(dev, useraddr); + case ETHTOOL_GSTRINGS: + return ethtool_get_strings(dev, useraddr); + case ETHTOOL_PHYS_ID: + return ethtool_phys_id(dev, useraddr); + case ETHTOOL_GSTATS: + return ethtool_get_stats(dev, useraddr); + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} +#endif //ETHTOOL_OPS_COMPAT + +static int +rtl8168_do_ioctl(struct net_device *dev, + struct ifreq *ifr, + int cmd) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + int ret; + unsigned long flags; + + ret = 0; + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 32; /* Internal PHY */ + break; + + case SIOCGMIIREG: + spin_lock_irqsave(&tp->lock, flags); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + data->val_out = rtl8168_mdio_read(tp, data->reg_num); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + case SIOCSMIIREG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + spin_lock_irqsave(&tp->lock, flags); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + rtl8168_mdio_write(tp, data->reg_num, data->val_in); + spin_unlock_irqrestore(&tp->lock, flags); + break; + +#ifdef ETHTOOL_OPS_COMPAT + case SIOCETHTOOL: + ret = ethtool_ioctl(ifr); + break; +#endif + case SIOCDEVPRIVATE_RTLASF: + if (!netif_running(dev)) { + ret = -ENODEV; + break; + } + + ret = rtl8168_asf_ioctl(dev, ifr); + break; + +#ifdef ENABLE_DASH_SUPPORT + case SIOCDEVPRIVATE_RTLDASH: + if (!netif_running(dev)) { + ret = -ENODEV; + break; + } + if (!capable(CAP_NET_ADMIN)) { + ret = -EPERM; + break; + } + + ret = rtl8168_dash_ioctl(dev, ifr); + break; +#endif + +#ifdef ENABLE_REALWOW_SUPPORT + case SIOCDEVPRIVATE_RTLREALWOW: + if (!netif_running(dev)) { + ret = -ENODEV; + break; + } + + ret = rtl8168_realwow_ioctl(dev, ifr); + break; +#endif + + case SIOCRTLTOOL: + ret = rtl8168_tool_ioctl(tp, ifr); + break; + + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static void +rtl8168_phy_power_up(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (rtl8168_is_in_phy_disable_mode(dev)) + return; + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + case CFG_METHOD_4: + case CFG_METHOD_5: + case CFG_METHOD_6: + case CFG_METHOD_7: + case CFG_METHOD_8: + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + rtl8168_mdio_write(tp, 0x0E, 0x0000); + break; + } + rtl8168_mdio_write(tp, MII_BMCR, BMCR_ANENABLE); + + //wait mdc/mdio ready + switch (tp->mcfg) { + case CFG_METHOD_23: + case CFG_METHOD_27: + case CFG_METHOD_28: + mdelay(10); + break; + } + + //wait ups resume (phy state 3) + if (HW_SUPPORT_UPS_MODE(tp)) + rtl8168_wait_phy_ups_resume(dev, HW_PHY_STATUS_LAN_ON); +} + +static void +rtl8168_phy_power_down(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 csi_tmp; + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + csi_tmp = rtl8168_eri_read(tp, 0x1AB, 1, ERIAR_ExGMAC); + csi_tmp &= ~( BIT_2 | BIT_3 | BIT_4 | BIT_5 | BIT_6 | BIT_7 ); + rtl8168_eri_write(tp, 0x1AB, 1, csi_tmp, ERIAR_ExGMAC); + break; + } + + rtl8168_mdio_write(tp, 0x1F, 0x0000); + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + case CFG_METHOD_4: + case CFG_METHOD_5: + case CFG_METHOD_6: + case CFG_METHOD_7: + case CFG_METHOD_8: + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + rtl8168_mdio_write(tp, 0x0E, 0x0200); + rtl8168_mdio_write(tp, MII_BMCR, BMCR_PDOWN); + break; + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_29: + case CFG_METHOD_30: + rtl8168_mdio_write(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); + break; + case CFG_METHOD_21: + case CFG_METHOD_22: + rtl8168_mdio_write(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); + break; + case CFG_METHOD_23: + case CFG_METHOD_24: + rtl8168_mdio_write(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); + break; + default: + rtl8168_mdio_write(tp, MII_BMCR, BMCR_PDOWN); + break; + } +} + +static int __devinit +rtl8168_init_board(struct pci_dev *pdev, + struct net_device **dev_out, + void __iomem **ioaddr_out) +{ + void __iomem *ioaddr; + struct net_device *dev; + struct rtl8168_private *tp; + int rc = -ENOMEM, i, pm_cap; + + assert(ioaddr_out != NULL); + + /* dev zeroed in alloc_etherdev */ + dev = alloc_etherdev(sizeof (*tp)); + if (dev == NULL) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_drv(&debug)) + dev_err(&pdev->dev, "unable to alloc new ethernet\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + goto err_out; + } + + SET_MODULE_OWNER(dev); + SET_NETDEV_DEV(dev, &pdev->dev); + tp = netdev_priv(dev); + tp->dev = dev; + tp->msg_enable = netif_msg_init(debug.msg_enable, R8168_MSG_DEFAULT); + + if (!aspm || tp->mcfg == CFG_METHOD_9) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); +#endif + } + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pci_enable_device(pdev); + if (rc < 0) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "enable failure\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + goto err_out_free_dev; + } + + if (pci_set_mwi(pdev) < 0) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_drv(&debug)) + dev_info(&pdev->dev, "Mem-Wr-Inval unavailable.\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + } + + /* save power state before pci_enable_device overwrites it */ + pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); + if (pm_cap) { + u16 pwr_command; + + pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command); + } else { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) { + dev_err(&pdev->dev, "PowerManagement capability not found.\n"); + } +#else + printk("PowerManagement capability not found.\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + + } + + /* make sure PCI base addr 1 is MMIO */ + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + rc = -ENODEV; + goto err_out_mwi; + } + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, 2) < R8168_REGS_SIZE) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + rc = -ENODEV; + goto err_out_mwi; + } + + rc = pci_request_regions(pdev, MODULENAME); + if (rc < 0) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "could not request regions.\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + goto err_out_mwi; + } + + if ((sizeof(dma_addr_t) > 4) && + use_dac && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && + !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) { + dev->features |= NETIF_F_HIGHDMA; + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "DMA configuration failed.\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + goto err_out_free_res; + } + } + + /* ioremap MMIO region */ + ioaddr = ioremap(pci_resource_start(pdev, 2), R8168_REGS_SIZE); + if (ioaddr == NULL) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_err(&pdev->dev, "cannot remap MMIO, aborting\n"); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + rc = -EIO; + goto err_out_free_res; + } + + tp->mmio_addr = ioaddr; + + /* Identify chip attached to board */ + rtl8168_get_mac_version(tp); + + rtl8168_print_mac_version(tp); + + for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) { + if (tp->mcfg == rtl_chip_info[i].mcfg) + break; + } + + if (i < 0) { + /* Unknown chip: assume array element #0, original RTL-8168 */ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (netif_msg_probe(tp)) + dev_printk(KERN_DEBUG, &pdev->dev, "unknown chip version, assuming %s\n", rtl_chip_info[0].name); +#else + printk("Realtek unknown chip version, assuming %s\n", rtl_chip_info[0].name); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) + i++; + } + + tp->chipset = i; + + *ioaddr_out = ioaddr; + *dev_out = dev; +out: + return rc; + +err_out_free_res: + pci_release_regions(pdev); +err_out_mwi: + pci_clear_mwi(pdev); + pci_disable_device(pdev); +err_out_free_dev: + free_netdev(dev); +err_out: + *ioaddr_out = NULL; + *dev_out = NULL; + goto out; +} + +static void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) +rtl8168_esd_timer(unsigned long __opaque) +#else +rtl8168_esd_timer(struct timer_list *t) +#endif +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + struct net_device *dev = (struct net_device *)__opaque; + struct rtl8168_private *tp = netdev_priv(dev); + struct timer_list *timer = &tp->esd_timer; +#else + struct rtl8168_private *tp = from_timer(tp, t, esd_timer); + struct net_device *dev = tp->dev; + struct timer_list *timer = t; +#endif + struct pci_dev *pdev = tp->pci_dev; + unsigned long timeout = RTL8168_ESD_TIMEOUT; + unsigned long flags; + u8 cmd; + u16 io_base_l; + u16 mem_base_l; + u16 mem_base_h; + u8 ilr; + u16 resv_0x1c_h; + u16 resv_0x1c_l; + u16 resv_0x20_l; + u16 resv_0x20_h; + u16 resv_0x24_l; + u16 resv_0x24_h; + u16 resv_0x2c_h; + u16 resv_0x2c_l; + u32 pci_sn_l; + u32 pci_sn_h; + + spin_lock_irqsave(&tp->lock, flags); + + tp->esd_flag = 0; + + pci_read_config_byte(pdev, PCI_COMMAND, &cmd); + if (cmd != tp->pci_cfg_space.cmd) { + printk(KERN_ERR "%s: cmd = 0x%02x, should be 0x%02x \n.", dev->name, cmd, tp->pci_cfg_space.cmd); + pci_write_config_byte(pdev, PCI_COMMAND, tp->pci_cfg_space.cmd); + tp->esd_flag |= BIT_0; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_0, &io_base_l); + if (io_base_l != tp->pci_cfg_space.io_base_l) { + printk(KERN_ERR "%s: io_base_l = 0x%04x, should be 0x%04x \n.", dev->name, io_base_l, tp->pci_cfg_space.io_base_l); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_0, tp->pci_cfg_space.io_base_l); + tp->esd_flag |= BIT_1; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_2, &mem_base_l); + if (mem_base_l != tp->pci_cfg_space.mem_base_l) { + printk(KERN_ERR "%s: mem_base_l = 0x%04x, should be 0x%04x \n.", dev->name, mem_base_l, tp->pci_cfg_space.mem_base_l); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_2, tp->pci_cfg_space.mem_base_l); + tp->esd_flag |= BIT_2; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_2 + 2, &mem_base_h); + if (mem_base_h!= tp->pci_cfg_space.mem_base_h) { + printk(KERN_ERR "%s: mem_base_h = 0x%04x, should be 0x%04x \n.", dev->name, mem_base_h, tp->pci_cfg_space.mem_base_h); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_2 + 2, tp->pci_cfg_space.mem_base_h); + tp->esd_flag |= BIT_3; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_3, &resv_0x1c_l); + if (resv_0x1c_l != tp->pci_cfg_space.resv_0x1c_l) { + printk(KERN_ERR "%s: resv_0x1c_l = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x1c_l, tp->pci_cfg_space.resv_0x1c_l); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_3, tp->pci_cfg_space.resv_0x1c_l); + tp->esd_flag |= BIT_4; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_3 + 2, &resv_0x1c_h); + if (resv_0x1c_h != tp->pci_cfg_space.resv_0x1c_h) { + printk(KERN_ERR "%s: resv_0x1c_h = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x1c_h, tp->pci_cfg_space.resv_0x1c_h); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_3 + 2, tp->pci_cfg_space.resv_0x1c_h); + tp->esd_flag |= BIT_5; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_4, &resv_0x20_l); + if (resv_0x20_l != tp->pci_cfg_space.resv_0x20_l) { + printk(KERN_ERR "%s: resv_0x20_l = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x20_l, tp->pci_cfg_space.resv_0x20_l); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_4, tp->pci_cfg_space.resv_0x20_l); + tp->esd_flag |= BIT_6; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_4 + 2, &resv_0x20_h); + if (resv_0x20_h != tp->pci_cfg_space.resv_0x20_h) { + printk(KERN_ERR "%s: resv_0x20_h = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x20_h, tp->pci_cfg_space.resv_0x20_h); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_4 + 2, tp->pci_cfg_space.resv_0x20_h); + tp->esd_flag |= BIT_7; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_5, &resv_0x24_l); + if (resv_0x24_l != tp->pci_cfg_space.resv_0x24_l) { + printk(KERN_ERR "%s: resv_0x24_l = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x24_l, tp->pci_cfg_space.resv_0x24_l); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_5, tp->pci_cfg_space.resv_0x24_l); + tp->esd_flag |= BIT_8; + } + + pci_read_config_word(pdev, PCI_BASE_ADDRESS_5 + 2, &resv_0x24_h); + if (resv_0x24_h != tp->pci_cfg_space.resv_0x24_h) { + printk(KERN_ERR "%s: resv_0x24_h = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x24_h, tp->pci_cfg_space.resv_0x24_h); + pci_write_config_word(pdev, PCI_BASE_ADDRESS_5 + 2, tp->pci_cfg_space.resv_0x24_h); + tp->esd_flag |= BIT_9; + } + + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &ilr); + if (ilr != tp->pci_cfg_space.ilr) { + printk(KERN_ERR "%s: ilr = 0x%02x, should be 0x%02x \n.", dev->name, ilr, tp->pci_cfg_space.ilr); + pci_write_config_byte(pdev, PCI_INTERRUPT_LINE, tp->pci_cfg_space.ilr); + tp->esd_flag |= BIT_10; + } + + pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &resv_0x2c_l); + if (resv_0x2c_l != tp->pci_cfg_space.resv_0x2c_l) { + printk(KERN_ERR "%s: resv_0x2c_l = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x2c_l, tp->pci_cfg_space.resv_0x2c_l); + pci_write_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, tp->pci_cfg_space.resv_0x2c_l); + tp->esd_flag |= BIT_11; + } + + pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID + 2, &resv_0x2c_h); + if (resv_0x2c_h != tp->pci_cfg_space.resv_0x2c_h) { + printk(KERN_ERR "%s: resv_0x2c_h = 0x%04x, should be 0x%04x \n.", dev->name, resv_0x2c_h, tp->pci_cfg_space.resv_0x2c_h); + pci_write_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID + 2, tp->pci_cfg_space.resv_0x2c_h); + tp->esd_flag |= BIT_12; + } + + if (tp->HwPcieSNOffset > 0) { + pci_sn_l = rtl8168_csi_read(tp, tp->HwPcieSNOffset); + if (pci_sn_l != tp->pci_cfg_space.pci_sn_l) { + printk(KERN_ERR "%s: pci_sn_l = 0x%08x, should be 0x%08x \n.", dev->name, pci_sn_l, tp->pci_cfg_space.pci_sn_l); + rtl8168_csi_write(tp, tp->HwPcieSNOffset, tp->pci_cfg_space.pci_sn_l); + tp->esd_flag |= BIT_13; + } + + pci_sn_h = rtl8168_csi_read(tp, tp->HwPcieSNOffset + 4); + if (pci_sn_h != tp->pci_cfg_space.pci_sn_h) { + printk(KERN_ERR "%s: pci_sn_h = 0x%08x, should be 0x%08x \n.", dev->name, pci_sn_h, tp->pci_cfg_space.pci_sn_h); + rtl8168_csi_write(tp, tp->HwPcieSNOffset + 4, tp->pci_cfg_space.pci_sn_h); + tp->esd_flag |= BIT_14; + } + } + + if (tp->TestPhyOcpReg && rtl8168_test_phy_ocp(tp)) + tp->esd_flag |= BIT_15; + + if (tp->esd_flag != 0) { + printk(KERN_ERR "%s: esd_flag = 0x%04x\n.\n", dev->name, tp->esd_flag); + netif_stop_queue(dev); + netif_carrier_off(dev); + rtl8168_hw_reset(dev); + rtl8168_tx_clear(tp); + rtl8168_rx_clear(tp); + rtl8168_init_ring(dev); + rtl8168_hw_init(dev); + rtl8168_powerup_pll(dev); + rtl8168_hw_ephy_config(dev); + rtl8168_hw_phy_config(dev); + rtl8168_hw_config(dev); + rtl8168_set_speed(dev, tp->autoneg, tp->speed, tp->duplex, tp->advertising); + tp->esd_flag = 0; + } + spin_unlock_irqrestore(&tp->lock, flags); + + mod_timer(timer, jiffies + timeout); +} + +static void +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) +rtl8168_link_timer(unsigned long __opaque) +#else +rtl8168_link_timer(struct timer_list *t) +#endif +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0) + struct net_device *dev = (struct net_device *)__opaque; + struct rtl8168_private *tp = netdev_priv(dev); + struct timer_list *timer = &tp->link_timer; +#else + struct rtl8168_private *tp = from_timer(tp, t, link_timer); + struct net_device *dev = tp->dev; + struct timer_list *timer = t; +#endif + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_check_link_status(dev); + spin_unlock_irqrestore(&tp->lock, flags); + + mod_timer(timer, jiffies + RTL8168_LINK_TIMEOUT); +} + +/* Cfg9346_Unlock assumed. */ +static unsigned rtl8168_try_msi(struct pci_dev *pdev, struct rtl8168_private *tp) +{ + unsigned msi = 0; + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) + switch (tp->mcfg) { + case CFG_METHOD_1: + case CFG_METHOD_2: + case CFG_METHOD_3: + case CFG_METHOD_4: + case CFG_METHOD_5: + case CFG_METHOD_6: + case CFG_METHOD_7: + case CFG_METHOD_8: + dev_info(&pdev->dev, "Default use INTx.\n"); + break; + default: + if (pci_enable_msi(pdev)) + dev_info(&pdev->dev, "no MSI. Back to INTx.\n"); + else + msi |= RTL_FEATURE_MSI; + break; + } +#endif + + return msi; +} + +static void rtl8168_disable_msi(struct pci_dev *pdev, struct rtl8168_private *tp) +{ + if (tp->features & RTL_FEATURE_MSI) { +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) + pci_disable_msi(pdev); +#endif + tp->features &= ~RTL_FEATURE_MSI; + } +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29) +static const struct net_device_ops rtl8168_netdev_ops = { + .ndo_open = rtl8168_open, + .ndo_stop = rtl8168_close, + .ndo_get_stats = rtl8168_get_stats, + .ndo_start_xmit = rtl8168_start_xmit, + .ndo_tx_timeout = rtl8168_tx_timeout, + .ndo_change_mtu = rtl8168_change_mtu, + .ndo_set_mac_address = rtl8168_set_mac_address, + .ndo_do_ioctl = rtl8168_do_ioctl, +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) + .ndo_set_multicast_list = rtl8168_set_rx_mode, +#else + .ndo_set_rx_mode = rtl8168_set_rx_mode, +#endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) +#ifdef CONFIG_R8168_VLAN + .ndo_vlan_rx_register = rtl8168_vlan_rx_register, +#endif +#else + .ndo_fix_features = rtl8168_fix_features, + .ndo_set_features = rtl8168_set_features, +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8168_netpoll, +#endif +}; +#endif + +static int __devinit +rtl8168_init_one(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct net_device *dev = NULL; + struct rtl8168_private *tp; + void __iomem *ioaddr = NULL; + static int board_idx = -1; + + int rc; + + assert(pdev != NULL); + assert(ent != NULL); + + board_idx++; + + if (netif_msg_drv(&debug)) + printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", + MODULENAME, RTL8168_VERSION); + + rc = rtl8168_init_board(pdev, &dev, &ioaddr); + if (rc) + goto out; + + tp = netdev_priv(dev); + assert(ioaddr != NULL); + + tp->set_speed = rtl8168_set_speed_xmii; + tp->get_settings = rtl8168_gset_xmii; + tp->phy_reset_enable = rtl8168_xmii_reset_enable; + tp->phy_reset_pending = rtl8168_xmii_reset_pending; + tp->link_ok = rtl8168_xmii_link_ok; + + tp->features |= rtl8168_try_msi(pdev, tp); + + RTL_NET_DEVICE_OPS(rtl8168_netdev_ops); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,4,22) + SET_ETHTOOL_OPS(dev, &rtl8168_ethtool_ops); +#endif + + dev->watchdog_timeo = RTL8168_TX_TIMEOUT; + dev->irq = pdev->irq; + dev->base_addr = (unsigned long) ioaddr; + +#ifdef CONFIG_R8168_NAPI + RTL_NAPI_CONFIG(dev, tp, rtl8168_poll, R8168_NAPI_WEIGHT); +#endif + +#ifdef CONFIG_R8168_VLAN + if (tp->mcfg != CFG_METHOD_DEFAULT) { + dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + dev->vlan_rx_kill_vid = rtl8168_vlan_rx_kill_vid; +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + } +#endif + + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * Therefore disable both features by default, but allow users to + * enable them. Use at own risk! + */ + tp->cp_cmd |= RTL_R16(tp, CPlusCmd); + if (tp->mcfg != CFG_METHOD_DEFAULT) { + dev->features |= NETIF_F_IP_CSUM; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + tp->cp_cmd |= RxChkSum; +#else + dev->features |= NETIF_F_RXCSUM; + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | + NETIF_F_HIGHDMA; + if ((tp->mcfg != CFG_METHOD_16) && (tp->mcfg != CFG_METHOD_17)) { + //dev->features |= NETIF_F_TSO; + dev->hw_features |= NETIF_F_TSO; + dev->vlan_features |= NETIF_F_TSO; + } +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0) + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) + if ((tp->mcfg == CFG_METHOD_1) || (tp->mcfg == CFG_METHOD_2) || (tp->mcfg == CFG_METHOD_3)) { + dev->hw_features &= ~NETIF_F_IPV6_CSUM; + netif_set_gso_max_size(dev, LSO_32K); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0) + dev->gso_max_segs = NIC_MAX_PHYS_BUF_COUNT_LSO_64K; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) + dev->gso_min_segs = NIC_MIN_PHYS_BUF_COUNT; +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0) + } else { + dev->hw_features |= NETIF_F_IPV6_CSUM; + dev->features |= NETIF_F_IPV6_CSUM; + if ((tp->mcfg != CFG_METHOD_16) && (tp->mcfg != CFG_METHOD_17)) { + dev->hw_features |= NETIF_F_TSO6; + //dev->features |= NETIF_F_TSO6; + } + netif_set_gso_max_size(dev, LSO_64K); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0) + dev->gso_max_segs = NIC_MAX_PHYS_BUF_COUNT_LSO2; +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) + dev->gso_min_segs = NIC_MIN_PHYS_BUF_COUNT; +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0) +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(3,18,0) + } +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + } + + tp->pci_dev = pdev; + + spin_lock_init(&tp->lock); + + rtl8168_init_software_variable(dev); + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH) + AllocateDashShareMemory(dev); +#endif + + rtl8168_exit_oob(dev); + + rtl8168_hw_init(dev); + + rtl8168_hw_reset(dev); + + /* Get production from EEPROM */ + if (((tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_29 || + tp->mcfg == CFG_METHOD_30) && (rtl8168_mac_ocp_read(tp, 0xDC00) & BIT_3)) || + ((tp->mcfg == CFG_METHOD_26) && (rtl8168_mac_ocp_read(tp, 0xDC00) & BIT_4))) + tp->eeprom_type = EEPROM_TYPE_NONE; + else + rtl8168_eeprom_type(tp); + + if (tp->eeprom_type == EEPROM_TYPE_93C46 || tp->eeprom_type == EEPROM_TYPE_93C56) + rtl8168_set_eeprom_sel_low(tp); + + rtl8168_get_mac_address(dev); + + tp->fw_name = rtl_chip_fw_infos[tp->mcfg].fw_name; + +#if defined(ENABLE_DASH_PRINTER_SUPPORT) + init_completion(&tp->fw_host_ok); + init_completion(&tp->fw_ack); + init_completion(&tp->fw_req); +#endif + + tp->tally_vaddr = dma_alloc_coherent(&pdev->dev, sizeof(*tp->tally_vaddr), + &tp->tally_paddr, GFP_KERNEL); + if (!tp->tally_vaddr) { + rc = -ENOMEM; + goto err_out; + } + + rtl8168_tally_counter_clear(tp); + + pci_set_drvdata(pdev, dev); + + rc = register_netdev(dev); + if (rc) + goto err_out; + + printk(KERN_INFO "%s: This product is covered by one or more of the following patents: US6,570,884, US6,115,776, and US6,327,625.\n", MODULENAME); + + rtl8168_disable_rxdvgate(dev); + + device_set_wakeup_enable(&pdev->dev, tp->wol_enabled); + + netif_carrier_off(dev); + + printk("%s", GPL_CLAIM); + +out: + return rc; + +err_out: + if (tp->tally_vaddr != NULL) { + dma_free_coherent(&pdev->dev, sizeof(*tp->tally_vaddr), tp->tally_vaddr, + tp->tally_paddr); + + tp->tally_vaddr = NULL; + } +#ifdef CONFIG_R8168_NAPI + RTL_NAPI_DEL(tp); +#endif + rtl8168_disable_msi(pdev, tp); + rtl8168_release_board(pdev, dev); + + goto out; +} + +static void __devexit +rtl8168_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8168_private *tp = netdev_priv(dev); + + assert(dev != NULL); + assert(tp != NULL); + +#ifdef CONFIG_R8168_NAPI + RTL_NAPI_DEL(tp); +#endif + if (HW_DASH_SUPPORT_DASH(tp)) + rtl8168_driver_stop(tp); + + unregister_netdev(dev); + rtl8168_disable_msi(pdev, tp); +#ifdef ENABLE_R8168_PROCFS + rtl8168_proc_remove(dev); +#endif + if (tp->tally_vaddr != NULL) { + dma_free_coherent(&pdev->dev, sizeof(*tp->tally_vaddr), tp->tally_vaddr, tp->tally_paddr); + tp->tally_vaddr = NULL; + } + + rtl8168_release_board(pdev, dev); + +#ifdef ENABLE_USE_FIRMWARE_FILE + rtl8168_release_firmware(tp); +#endif + + pci_set_drvdata(pdev, NULL); +} + +static void +rtl8168_set_rxbufsize(struct rtl8168_private *tp, + struct net_device *dev) +{ + unsigned int mtu = dev->mtu; + + tp->rx_buf_sz = (mtu > ETH_DATA_LEN) ? mtu + ETH_HLEN + 8 + 1 : RX_BUF_SIZE; +} + +#ifdef ENABLE_USE_FIRMWARE_FILE +static void rtl8168_request_firmware(struct rtl8168_private *tp) +{ + struct rtl8168_fw *rtl_fw; + + /* firmware loaded already or no firmware available */ + if (tp->rtl_fw || !tp->fw_name) + return; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + return; + + rtl_fw->phy_write = rtl8168_mdio_write; + rtl_fw->phy_read = rtl8168_mdio_read; + rtl_fw->mac_mcu_write = mac_mcu_write; + rtl_fw->mac_mcu_read = mac_mcu_read; + rtl_fw->fw_name = tp->fw_name; + rtl_fw->dev = tp_to_dev(tp); + + if (rtl8168_fw_request_firmware(rtl_fw)) + kfree(rtl_fw); + else + tp->rtl_fw = rtl_fw; +} +#endif + +static int rtl8168_open(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + unsigned long flags; + int retval; + + retval = -ENOMEM; + +#ifdef ENABLE_R8168_PROCFS + rtl8168_proc_init(dev); +#endif + rtl8168_set_rxbufsize(tp, dev); + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * pci_alloc_consistent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8168_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto err_free_all_allocated_mem; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8168_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_all_allocated_mem; + + retval = rtl8168_init_ring(dev); + if (retval < 0) + goto err_free_all_allocated_mem; + + retval = request_irq(dev->irq, rtl8168_interrupt, (tp->features & RTL_FEATURE_MSI) ? 0 : SA_SHIRQ, dev->name, dev); + if (retval<0) + goto err_free_all_allocated_mem; + + if (netif_msg_probe(tp)) { + printk(KERN_INFO "%s: 0x%lx, " + "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, " + "IRQ %d\n", + dev->name, + dev->base_addr, + dev->dev_addr[0], dev->dev_addr[1], + dev->dev_addr[2], dev->dev_addr[3], + dev->dev_addr[4], dev->dev_addr[5], dev->irq); + } + +#ifdef ENABLE_USE_FIRMWARE_FILE + rtl8168_request_firmware(tp); +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + INIT_WORK(&tp->task, rtl8168_reset_task, dev); +#else + INIT_DELAYED_WORK(&tp->task, rtl8168_reset_task); +#endif + + pci_set_master(pdev); + +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + RTL_NAPI_ENABLE(dev, &tp->napi); +#endif +#endif + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_exit_oob(dev); + + rtl8168_hw_init(dev); + + rtl8168_hw_reset(dev); + + rtl8168_powerup_pll(dev); + + rtl8168_hw_ephy_config(dev); + + rtl8168_hw_phy_config(dev); + + rtl8168_hw_config(dev); + + rtl8168_dsm(dev, DSM_IF_UP); + + rtl8168_set_speed(dev, tp->autoneg, tp->speed, tp->duplex, tp->advertising); + + spin_unlock_irqrestore(&tp->lock, flags); + + if (tp->esd_flag == 0) + rtl8168_request_esd_timer(dev); + + rtl8168_request_link_timer(dev); + +out: + + return retval; + +err_free_all_allocated_mem: + if (tp->RxDescArray != NULL) { + dma_free_coherent(&pdev->dev, R8168_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; + } + + if (tp->TxDescArray != NULL) { + dma_free_coherent(&pdev->dev, R8168_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + } + + goto out; +} + +static void +rtl8168_dsm(struct net_device *dev, int dev_state) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + switch (dev_state) { + case DSM_MAC_INIT: + if ((tp->mcfg == CFG_METHOD_5) || (tp->mcfg == CFG_METHOD_6)) { + if (RTL_R8(tp, MACDBG) & 0x80) + RTL_W8(tp, GPIO, RTL_R8(tp, GPIO) | GPIO_en); + else + RTL_W8(tp, GPIO, RTL_R8(tp, GPIO) & ~GPIO_en); + } + + break; + case DSM_NIC_GOTO_D3: + case DSM_IF_DOWN: + if ((tp->mcfg == CFG_METHOD_5) || (tp->mcfg == CFG_METHOD_6)) { + if (RTL_R8(tp, MACDBG) & 0x80) + RTL_W8(tp, GPIO, RTL_R8(tp, GPIO) & ~GPIO_en); + } + break; + + case DSM_NIC_RESUME_D3: + case DSM_IF_UP: + if ((tp->mcfg == CFG_METHOD_5) || (tp->mcfg == CFG_METHOD_6)) { + if (RTL_R8(tp, MACDBG) & 0x80) + RTL_W8(tp, GPIO, RTL_R8(tp, GPIO) | GPIO_en); + } + + break; + } + +} + +static void +set_offset70F(struct rtl8168_private *tp, u8 setting) +{ + u32 csi_tmp; + u32 temp = (u32)setting; + temp = temp << 24; + /*set PCI configuration space offset 0x70F to setting*/ + /*When the register offset of PCI configuration space larger than 0xff, use CSI to access it.*/ + + csi_tmp = rtl8168_csi_read(tp, 0x70c) & 0x00ffffff; + rtl8168_csi_write(tp, 0x70c, csi_tmp | temp); +} + +static void +set_offset79(struct rtl8168_private *tp, u8 setting) +{ + //Set PCI configuration space offset 0x79 to setting + + struct pci_dev *pdev = tp->pci_dev; + u8 device_control; + + if (hwoptimize & HW_PATCH_SOC_LAN) return; + + pci_read_config_byte(pdev, 0x79, &device_control); + device_control &= ~0x70; + device_control |= setting; + pci_write_config_byte(pdev, 0x79, device_control); +} + +static void +set_offset711(struct rtl8168_private *tp, u8 setting) +{ + u32 csi_tmp; + u32 temp = (u32)setting; + temp &= 0x0f; + temp = temp << 12; + /*set PCI configuration space offset 0x711 to setting*/ + + csi_tmp = rtl8168_csi_read(tp, 0x710) & 0xffff0fff; + rtl8168_csi_write(tp, 0x710, csi_tmp | temp); +} + +static void +rtl8168_hw_set_rx_packet_filter(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + u32 tmp = 0; + + if (dev->flags & IFF_PROMISC) { + /* Unconditionally log net taps. */ + if (netif_msg_link(tp)) + printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", + dev->name); + + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) + || (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) + struct dev_mc_list *mclist; + unsigned int i; + + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; + i++, mclist = mclist->next) { + int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } +#else + struct netdev_hw_addr *ha; + + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } +#endif + } + + if (dev->features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + + tmp = mc_filter[0]; + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(tmp); + + tp->rtl8168_rx_config = rtl_chip_info[tp->chipset].RCR_Cfg; + tmp = tp->rtl8168_rx_config | rx_mode | (RTL_R32(tp, RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask); + + RTL_W32(tp, RxConfig, tmp); + RTL_W32(tp, MAR0 + 0, mc_filter[0]); + RTL_W32(tp, MAR0 + 4, mc_filter[1]); +} + +static void +rtl8168_set_rx_mode(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_hw_set_rx_packet_filter(dev); + + spin_unlock_irqrestore(&tp->lock, flags); +} + +static void +rtl8168_hw_config(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + u8 device_control; + u16 mac_ocp_data; + u32 csi_tmp; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + if (dev->mtu > ETH_DATA_LEN) { + dev->features &= ~(NETIF_F_IP_CSUM); + } else { + dev->features |= NETIF_F_IP_CSUM; + } +#endif + + RTL_W32(tp, RxConfig, (RX_DMA_BURST << RxCfgDMAShift)); + + rtl8168_hw_reset(dev); + + rtl8168_enable_cfg9346_write(tp); + if (tp->HwSuppAspmClkIntrLock) { + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) & ~BIT_7); + rtl8168_hw_aspm_clkreq_enable(tp, false); + } + + //clear io_rdy_l23 + switch (tp->mcfg) { + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~BIT_1); + break; + } + + //keep magic packet only + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + csi_tmp = rtl8168_eri_read(tp, 0xDE, 1, ERIAR_ExGMAC); + csi_tmp &= BIT_0; + rtl8168_eri_write(tp, 0xDE, 1, csi_tmp, ERIAR_ExGMAC); + break; + } + + RTL_W8(tp, MTPS, Reserved1_data); + + tp->cp_cmd |= INTT_1; + if (tp->use_timer_interrrupt) + tp->cp_cmd |= PktCntrDisable; + else + tp->cp_cmd &= ~PktCntrDisable; + + RTL_W16(tp, IntrMitigate, 0x5f51); + + rtl8168_tally_counter_addr_fill(tp); + + rtl8168_desc_addr_fill(tp); + + /* Set DMA burst size and Interframe Gap Time */ + if (tp->mcfg == CFG_METHOD_1) + RTL_W32(tp, TxConfig, (TX_DMA_BURST_512 << TxDMAShift) | + (InterFrameGap << TxInterFrameGapShift)); + else + RTL_W32(tp, TxConfig, (TX_DMA_BURST_unlimited << TxDMAShift) | + (InterFrameGap << TxInterFrameGapShift)); + + if (tp->mcfg == CFG_METHOD_4) { + set_offset70F(tp, 0x27); + + RTL_W8(tp, DBG_reg, (0x0E << 4) | Fix_Nak_1 | Fix_Nak_2); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + //disable clock request. + pci_write_config_byte(pdev, 0x81, 0x00); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + set_offset79(tp, 0x50); + } + + //rx checksum offload enable +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + tp->cp_cmd |= RxChkSum; +#else + dev->features |= NETIF_F_RXCSUM; +#endif + + tp->cp_cmd &= ~(EnableBist | Macdbgo_oe | Force_halfdup | + Force_rxflow_en | Force_txflow_en | Cxpl_dbg_sel | + ASF | PktCntrDisable | Macdbgo_sel); + } else if (tp->mcfg == CFG_METHOD_5) { + + set_offset70F(tp, 0x27); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + //disable clock request. + pci_write_config_byte(pdev, 0x81, 0x00); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + set_offset79(tp, 0x50); + } + + //rx checksum offload enable +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + tp->cp_cmd |= RxChkSum; +#else + dev->features |= NETIF_F_RXCSUM; +#endif + } else if (tp->mcfg == CFG_METHOD_6) { + set_offset70F(tp, 0x27); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + //disable clock request. + pci_write_config_byte(pdev, 0x81, 0x00); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + set_offset79(tp, 0x50); + } + + //rx checksum offload enable +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + tp->cp_cmd |= RxChkSum; +#else + dev->features |= NETIF_F_RXCSUM; +#endif + } else if (tp->mcfg == CFG_METHOD_7) { + set_offset70F(tp, 0x27); + + rtl8168_eri_write(tp, 0x1EC, 1, 0x07, ERIAR_ASF); + + //disable clock request. + pci_write_config_byte(pdev, 0x81, 0x00); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + + set_offset79(tp, 0x50); + } + } else if (tp->mcfg == CFG_METHOD_8) { + + set_offset70F(tp, 0x27); + + rtl8168_eri_write(tp, 0x1EC, 1, 0x07, ERIAR_ASF); + + //disable clock request. + pci_write_config_byte(pdev, 0x81, 0x00); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + RTL_W8(tp, 0xD1, 0x20); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + set_offset79(tp, 0x50); + } + } else if (tp->mcfg == CFG_METHOD_9) { + set_offset70F(tp, 0x27); + + /* disable clock request. */ + pci_write_config_byte(pdev, 0x81, 0x00); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~BIT_4); + RTL_W8(tp, DBG_reg, RTL_R8(tp, DBG_reg) | BIT_7 | BIT_1); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + set_offset79(tp, 0x50); + } + + RTL_W8(tp, TDFNR, 0x8); + + } else if (tp->mcfg == CFG_METHOD_10) { + set_offset70F(tp, 0x27); + + RTL_W8(tp, DBG_reg, RTL_R8(tp, DBG_reg) | BIT_7 | BIT_1); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1); + + set_offset79(tp, 0x20); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1); + + set_offset79(tp, 0x50); + } + + RTL_W8(tp, TDFNR, 0x8); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | 0x10); + + /* disable clock request. */ + pci_write_config_byte(pdev, 0x81, 0x00); + } else if (tp->mcfg == CFG_METHOD_11 || tp->mcfg == CFG_METHOD_13) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + else + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + + pci_write_config_byte(pdev, 0x81, 0x00); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | 0x10); + + } else if (tp->mcfg == CFG_METHOD_12) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + else + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + + pci_write_config_byte(pdev, 0x81, 0x01); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) | 0x10); + + } else if (tp->mcfg == CFG_METHOD_14 || tp->mcfg == CFG_METHOD_15) { + + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + if (dev->mtu > ETH_DATA_LEN) { + RTL_W8(tp, MTPS, 0x24); + RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01); + } else { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0); + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01); + } + + RTL_W8(tp, 0xF3, RTL_R8(tp, 0xF3) | BIT_5); + RTL_W8(tp, 0xF3, RTL_R8(tp, 0xF3) & ~BIT_5); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_7 | BIT_6); + + RTL_W8(tp, 0xD1, RTL_R8(tp, 0xD1) | BIT_2 | BIT_3); + + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_6 | BIT_5 | BIT_4 | BIT_2 | BIT_1); + + RTL_W8(tp, TDFNR, 0x8); + + /* + if (aspm) + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_7); + */ + + RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~BIT_3); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + RTL_W8(tp, Config1, RTL_R8(tp, Config1) & ~0x10); + } else if (tp->mcfg == CFG_METHOD_16 || tp->mcfg == CFG_METHOD_17) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + rtl8168_eri_write(tp, 0xC0, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xB8, 4, 0x00000000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xC8, 4, 0x00100002, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xE8, 4, 0x00100006, ERIAR_ExGMAC); + csi_tmp = rtl8168_eri_read(tp, 0x1D0, 4, ERIAR_ExGMAC); + csi_tmp |= BIT_1; + rtl8168_eri_write(tp, 0x1D0, 1, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0xDC, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | BIT_7); + RTL_W8(tp, 0xD3, RTL_R8(tp, 0xD3) & ~BIT_7); + RTL_W8(tp, 0x1B, RTL_R8(tp, 0x1B) & ~0x07); + + if (tp->mcfg == CFG_METHOD_16) { + RTL_W32(tp, 0xB0, 0xEE480010); + RTL_W8(tp, 0x1A, RTL_R8(tp, 0x1A) & ~(BIT_2|BIT_3)); + rtl8168_eri_write(tp, 0x1DC, 1, 0x64, ERIAR_ExGMAC); + } else { + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp |= BIT_4; + rtl8168_eri_write(tp, 0x1B0, 1, csi_tmp, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xCC, 4, 0x00000050, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xD0, 4, 0x07ff0060, ERIAR_ExGMAC); + } + + RTL_W8(tp, TDFNR, 0x8); + + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~PMSTS_En); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_6); + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, MTPS, 0x27); + + /* disable clock request. */ + pci_write_config_byte(pdev, 0x81, 0x00); + + } else if (tp->mcfg == CFG_METHOD_18 || tp->mcfg == CFG_METHOD_19) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + rtl8168_eri_write(tp, 0xC8, 4, 0x00100002, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xE8, 4, 0x00100006, ERIAR_ExGMAC); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | BIT_7); + RTL_W8(tp, 0xD3, RTL_R8(tp, 0xD3) & ~BIT_7); + csi_tmp = rtl8168_eri_read(tp, 0xDC, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + + /* + if (aspm) + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_7); + */ + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, MTPS, 0x27); + + RTL_W8(tp, TDFNR, 0x8); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_6); + + rtl8168_eri_write(tp, 0xC0, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xB8, 4, 0x00000000, ERIAR_ExGMAC); + RTL_W8(tp, 0x1B,RTL_R8(tp, 0x1B) & ~0x07); + + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_4; + rtl8168_eri_write(tp, 0x1B0, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp = rtl8168_eri_read(tp, 0x1d0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_4 | BIT_1; + rtl8168_eri_write(tp, 0x1d0, 1, csi_tmp, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xCC, 4, 0x00000050, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xd0, 4, 0x00000060, ERIAR_ExGMAC); + } else if (tp->mcfg == CFG_METHOD_20) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + rtl8168_eri_write(tp, 0xC8, 4, 0x00100002, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xE8, 4, 0x00100006, ERIAR_ExGMAC); + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | BIT_7); + RTL_W8(tp, 0xD3, RTL_R8(tp, 0xD3) & ~BIT_7); + csi_tmp = rtl8168_eri_read(tp, 0xDC, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + + /* + if (aspm) + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_7); + */ + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, MTPS, 0x27); + + RTL_W8(tp, TDFNR, 0x8); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_6); + rtl8168_eri_write(tp, 0xC0, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xB8, 4, 0x00000000, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_4; + rtl8168_eri_write(tp, 0x1B0, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp = rtl8168_eri_read(tp, 0x1d0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_4 | BIT_1; + rtl8168_eri_write(tp, 0x1d0, 1, csi_tmp, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xCC, 4, 0x00000050, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xd0, 4, 0x00000060, ERIAR_ExGMAC); + } else if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_24 || tp->mcfg == CFG_METHOD_25 || + tp->mcfg == CFG_METHOD_26 || tp->mcfg == CFG_METHOD_29 || + tp->mcfg == CFG_METHOD_30) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22) + set_offset711(tp, 0x04); + + rtl8168_eri_write(tp, 0xC8, 4, 0x00080002, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xCC, 1, 0x38, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xD0, 1, 0x48, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xE8, 4, 0x00100006, ERIAR_ExGMAC); + + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | BIT_7); + + csi_tmp = rtl8168_eri_read(tp, 0xDC, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + + if (tp->mcfg == CFG_METHOD_26) { + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD3C0); + mac_ocp_data &= ~(BIT_11 | BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0); + mac_ocp_data |= 0x0FFF; + rtl8168_mac_ocp_write(tp, 0xD3C0, mac_ocp_data); + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD3C2); + mac_ocp_data &= ~(BIT_7 | BIT_6 | BIT_5 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0); + rtl8168_mac_ocp_write(tp, 0xD3C2, mac_ocp_data); + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD3C4); + mac_ocp_data |= BIT_0; + rtl8168_mac_ocp_write(tp, 0xD3C4, mac_ocp_data); + } else if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30) { + + if (tp->RequireAdjustUpsTxLinkPulseTiming) { + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD412); + mac_ocp_data &= ~(0x0FFF); + mac_ocp_data |= tp->SwrCnt1msIni; + rtl8168_mac_ocp_write(tp, 0xD412, mac_ocp_data); + } + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xE056); + mac_ocp_data &= ~(BIT_7 | BIT_6 | BIT_5 | BIT_4); + //mac_ocp_data |= (BIT_6 | BIT_5 | BIT_4); + rtl8168_mac_ocp_write(tp, 0xE056, mac_ocp_data); + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xE052); + mac_ocp_data &= ~(BIT_15 | BIT_14 | BIT_13 | BIT_3); + mac_ocp_data |= BIT_15; + //mac_ocp_data |= BIT_3; + rtl8168_mac_ocp_write(tp, 0xE052, mac_ocp_data); + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD420); + mac_ocp_data &= ~(BIT_11 | BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0); + mac_ocp_data |= 0x45F; + rtl8168_mac_ocp_write(tp, 0xD420, mac_ocp_data); + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xE0D6); + mac_ocp_data &= ~(BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0); + mac_ocp_data |= 0x17F; + rtl8168_mac_ocp_write(tp, 0xE0D6, mac_ocp_data); + } + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + RTL_W8(tp, 0x1B, RTL_R8(tp, 0x1B) & ~0x07); + + RTL_W8(tp, TDFNR, 0x4); + + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~PMSTS_En); + + /* + if (aspm) + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_7); + */ + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, MTPS, 0x27); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_6); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_7); + + rtl8168_eri_write(tp, 0xC0, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xB8, 4, 0x00000000, ERIAR_ExGMAC); + + if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30) { + rtl8168_mac_ocp_write(tp, 0xE054, 0x0000); + + rtl8168_eri_write(tp, 0x5F0, 2, 0x4000, ERIAR_ExGMAC); + } else { + rtl8168_eri_write(tp, 0x5F0, 2, 0x4F87, ERIAR_ExGMAC); + } + + if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30) { + csi_tmp = rtl8168_eri_read(tp, 0xDC, 4, ERIAR_ExGMAC); + csi_tmp |= (BIT_2 | BIT_3 | BIT_4); + rtl8168_eri_write(tp, 0xDC, 4, csi_tmp, ERIAR_ExGMAC); + } + + if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_24 || tp->mcfg == CFG_METHOD_25) { + rtl8168_mac_ocp_write(tp, 0xC140, 0xFFFF); + } else if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30) { + rtl8168_mac_ocp_write(tp, 0xC140, 0xFFFF); + rtl8168_mac_ocp_write(tp, 0xC142, 0xFFFF); + } + + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp &= ~BIT_12; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + + if (tp->mcfg == CFG_METHOD_29 || tp->mcfg == CFG_METHOD_30) { + csi_tmp = rtl8168_eri_read(tp, 0x2FC, 1, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_2); + rtl8168_eri_write(tp, 0x2FC, 1, csi_tmp, ERIAR_ExGMAC); + } else { + csi_tmp = rtl8168_eri_read(tp, 0x2FC, 1, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_0 | BIT_1 | BIT_2); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0x2FC, 1, csi_tmp, ERIAR_ExGMAC); + } + + csi_tmp = rtl8168_eri_read(tp, 0x1D0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_1; + rtl8168_eri_write(tp, 0x1D0, 1, csi_tmp, ERIAR_ExGMAC); + } else if (tp->mcfg == CFG_METHOD_23 || tp->mcfg == CFG_METHOD_27 || + tp->mcfg == CFG_METHOD_28) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + rtl8168_eri_write(tp, 0xC8, 4, 0x00080002, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xCC, 1, 0x2F, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xD0, 1, 0x5F, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xE8, 4, 0x00100006, ERIAR_ExGMAC); + + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | BIT_7); + + csi_tmp = rtl8168_eri_read(tp, 0xDC, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_6); + + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_7); + + rtl8168_eri_write(tp, 0xC0, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xB8, 4, 0x00000000, ERIAR_ExGMAC); + RTL_W8(tp, 0x1B, RTL_R8(tp, 0x1B) & ~0x07); + + RTL_W8(tp, TDFNR, 0x4); + + /* + if (aspm) + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_7); + */ + + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp &= ~BIT_12; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x2FC, 1, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_0 | BIT_1 | BIT_2); + csi_tmp |= (BIT_0 | BIT_1); + rtl8168_eri_write(tp, 0x2FC, 1, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x1D0, 1, ERIAR_ExGMAC); + csi_tmp |= BIT_1; + rtl8168_eri_write(tp, 0x1D0, 1, csi_tmp, ERIAR_ExGMAC); + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, MTPS, 0x27); + + if (tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28) { + rtl8168_oob_mutex_lock(tp); + rtl8168_eri_write(tp, 0x5F0, 2, 0x4F87, ERIAR_ExGMAC); + rtl8168_oob_mutex_unlock(tp); + } + + rtl8168_mac_ocp_write(tp, 0xC140, 0xFFFF); + rtl8168_mac_ocp_write(tp, 0xC142, 0xFFFF); + + if (tp->mcfg == CFG_METHOD_28) { + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD3E2); + mac_ocp_data &= 0xF000; + mac_ocp_data |= 0xAFD; + rtl8168_mac_ocp_write(tp, 0xD3E2, mac_ocp_data); + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD3E4); + mac_ocp_data &= 0xFF00; + rtl8168_mac_ocp_write(tp, 0xD3E4, mac_ocp_data); + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xE860); + mac_ocp_data |= BIT_7; + rtl8168_mac_ocp_write(tp, 0xE860, mac_ocp_data); + } + } else if (tp->mcfg == CFG_METHOD_31 || tp->mcfg == CFG_METHOD_32 || + tp->mcfg == CFG_METHOD_33) { + set_offset70F(tp, 0x27); + set_offset79(tp, 0x50); + + rtl8168_eri_write(tp, 0xC8, 4, 0x00080002, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xCC, 1, 0x2F, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xD0, 1, 0x5F, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xE8, 4, 0x00100006, ERIAR_ExGMAC); + + RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | BIT_7); + + csi_tmp = rtl8168_eri_read(tp, 0xDC, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0xDC, 1, csi_tmp, ERIAR_ExGMAC); + + if (tp->RequireAdjustUpsTxLinkPulseTiming) { + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD412); + mac_ocp_data &= ~(0x0FFF); + mac_ocp_data |= tp->SwrCnt1msIni; + rtl8168_mac_ocp_write(tp, 0xD412, mac_ocp_data); + } + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xE056); + mac_ocp_data &= ~(BIT_7 | BIT_6 | BIT_5 | BIT_4); + rtl8168_mac_ocp_write(tp, 0xE056, mac_ocp_data); + if (FALSE == HW_SUPP_SERDES_PHY(tp)) + rtl8168_mac_ocp_write(tp, 0xEA80, 0x0003); + else + rtl8168_mac_ocp_write(tp, 0xEA80, 0x0000); + + rtl8168_oob_mutex_lock(tp); + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xE052); + mac_ocp_data &= ~(BIT_3 | BIT_0); + rtl8168_mac_ocp_write(tp, 0xE052, mac_ocp_data); + rtl8168_oob_mutex_unlock(tp); + + mac_ocp_data = rtl8168_mac_ocp_read(tp, 0xD420); + mac_ocp_data &= ~(BIT_11 | BIT_10 | BIT_9 | BIT_8 | BIT_7 | BIT_6 | BIT_5 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0); + mac_ocp_data |= 0x45F; + rtl8168_mac_ocp_write(tp, 0xD420, mac_ocp_data); + + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + RTL_W8(tp, 0x1B, RTL_R8(tp, 0x1B) & ~0x07); + + RTL_W8(tp, TDFNR, 0x4); + + RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~PMSTS_En); + + /* + if (aspm) + RTL_W8(tp, 0xF1, RTL_R8(tp, 0xF1) | BIT_7); + */ + + if (dev->mtu > ETH_DATA_LEN) + RTL_W8(tp, MTPS, 0x27); + + if (FALSE == HW_SUPP_SERDES_PHY(tp)) { + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) | BIT_6); + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) | BIT_7); + } else { + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) & ~BIT_6); + RTL_W8(tp, 0xF2, RTL_R8(tp, 0xF2) & ~BIT_6); + RTL_W8(tp, 0xD0, RTL_R8(tp, 0xD0) & ~BIT_7); + } + + rtl8168_eri_write(tp, 0xC0, 2, 0x0000, ERIAR_ExGMAC); + rtl8168_eri_write(tp, 0xB8, 4, 0x00000000, ERIAR_ExGMAC); + + rtl8168_oob_mutex_lock(tp); + rtl8168_eri_write(tp, 0x5F0, 2, 0x4000, ERIAR_ExGMAC); + rtl8168_oob_mutex_unlock(tp); + + if (tp->mcfg == CFG_METHOD_32 || tp->mcfg == CFG_METHOD_33) { + csi_tmp = rtl8168_eri_read(tp, 0xD4, 4, ERIAR_ExGMAC); + csi_tmp |= BIT_4; + rtl8168_eri_write(tp, 0xD4, 4, csi_tmp, ERIAR_ExGMAC); + } + + rtl8168_mac_ocp_write(tp, 0xC140, 0xFFFF); + rtl8168_mac_ocp_write(tp, 0xC142, 0xFFFF); + + csi_tmp = rtl8168_eri_read(tp, 0x1B0, 4, ERIAR_ExGMAC); + csi_tmp &= ~BIT_12; + rtl8168_eri_write(tp, 0x1B0, 4, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x2FC, 1, ERIAR_ExGMAC); + csi_tmp &= ~(BIT_0 | BIT_1); + csi_tmp |= BIT_0; + rtl8168_eri_write(tp, 0x2FC, 1, csi_tmp, ERIAR_ExGMAC); + + csi_tmp = rtl8168_eri_read(tp, 0x1D0, 1, ERIAR_ExGMAC); + csi_tmp &= ~BIT_1; + rtl8168_eri_write(tp, 0x1D0, 1, csi_tmp, ERIAR_ExGMAC); + } else if (tp->mcfg == CFG_METHOD_1) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + if (dev->mtu > ETH_DATA_LEN) { + pci_read_config_byte(pdev, 0x69, &device_control); + device_control &= ~0x70; + device_control |= 0x28; + pci_write_config_byte(pdev, 0x69, device_control); + } else { + pci_read_config_byte(pdev, 0x69, &device_control); + device_control &= ~0x70; + device_control |= 0x58; + pci_write_config_byte(pdev, 0x69, device_control); + } + } else if (tp->mcfg == CFG_METHOD_2) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + if (dev->mtu > ETH_DATA_LEN) { + pci_read_config_byte(pdev, 0x69, &device_control); + device_control &= ~0x70; + device_control |= 0x28; + pci_write_config_byte(pdev, 0x69, device_control); + + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); + } else { + pci_read_config_byte(pdev, 0x69, &device_control); + device_control &= ~0x70; + device_control |= 0x58; + pci_write_config_byte(pdev, 0x69, device_control); + + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); + } + } else if (tp->mcfg == CFG_METHOD_3) { + RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en); + + if (dev->mtu > ETH_DATA_LEN) { + pci_read_config_byte(pdev, 0x69, &device_control); + device_control &= ~0x70; + device_control |= 0x28; + pci_write_config_byte(pdev, 0x69, device_control); + + RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0)); + } else { + pci_read_config_byte(pdev, 0x69, &device_control); + device_control &= ~0x70; + device_control |= 0x58; + pci_write_config_byte(pdev, 0x69, device_control); + + RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0)); + } + } + + if ((tp->mcfg == CFG_METHOD_1) || (tp->mcfg == CFG_METHOD_2) || (tp->mcfg == CFG_METHOD_3)) { + /* csum offload command for RTL8168B/8111B */ + tp->tx_tcp_csum_cmd = TxTCPCS; + tp->tx_udp_csum_cmd = TxUDPCS; + tp->tx_ip_csum_cmd = TxIPCS; + tp->tx_ipv6_csum_cmd = 0; + } else { + /* csum offload command for RTL8168C/8111C and RTL8168CP/8111CP */ + tp->tx_tcp_csum_cmd = TxTCPCS_C; + tp->tx_udp_csum_cmd = TxUDPCS_C; + tp->tx_ip_csum_cmd = TxIPCS_C; + tp->tx_ipv6_csum_cmd = TxIPV6F_C; + } + + + //other hw parameters + if (tp->mcfg == CFG_METHOD_21 || tp->mcfg == CFG_METHOD_22 || + tp->mcfg == CFG_METHOD_23 || tp->mcfg == CFG_METHOD_24 || + tp->mcfg == CFG_METHOD_25 || tp->mcfg == CFG_METHOD_26 || + tp->mcfg == CFG_METHOD_27 || tp->mcfg == CFG_METHOD_28) + rtl8168_eri_write(tp, 0x2F8, 2, 0x1D8F, ERIAR_ExGMAC); + + if (tp->bios_setting & BIT_28) { + if (tp->mcfg == CFG_METHOD_18 || tp->mcfg == CFG_METHOD_19 || + tp->mcfg == CFG_METHOD_20) { + u32 gphy_val; + + rtl8168_mdio_write(tp, 0x1F, 0x0007); + rtl8168_mdio_write(tp, 0x1E, 0x002C); + gphy_val = rtl8168_mdio_read(tp, 0x16); + gphy_val |= BIT_10; + rtl8168_mdio_write(tp, 0x16, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0005); + rtl8168_mdio_write(tp, 0x05, 0x8B80); + gphy_val = rtl8168_mdio_read(tp, 0x06); + gphy_val |= BIT_7; + rtl8168_mdio_write(tp, 0x06, gphy_val); + rtl8168_mdio_write(tp, 0x1F, 0x0000); + } + } + + rtl8168_hw_clear_timer_int(dev); + + rtl8168_enable_exit_l1_mask(tp); + + switch (tp->mcfg) { + case CFG_METHOD_25: + rtl8168_mac_ocp_write(tp, 0xD3C0, 0x0B00); + rtl8168_mac_ocp_write(tp, 0xD3C2, 0x0000); + break; + case CFG_METHOD_29: + case CFG_METHOD_30: + rtl8168_mac_ocp_write(tp, 0xE098, 0x0AA2); + break; + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + rtl8168_mac_ocp_write(tp, 0xE098, 0xC302); + break; + } + + switch (tp->mcfg) { + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + if (aspm) { + rtl8168_init_pci_offset_99(tp); + } + break; + } + switch (tp->mcfg) { + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + if (aspm) { + rtl8168_init_pci_offset_180(tp); + } + break; + } + + tp->cp_cmd &= ~(EnableBist | Macdbgo_oe | Force_halfdup | + Force_rxflow_en | Force_txflow_en | Cxpl_dbg_sel | + ASF | Macdbgo_sel); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) + RTL_W16(tp, CPlusCmd, tp->cp_cmd); +#else + rtl8168_hw_set_features(dev, dev->features); +#endif + + switch (tp->mcfg) { + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: { + int timeout; + for (timeout = 0; timeout < 10; timeout++) { + if ((rtl8168_eri_read(tp, 0x1AE, 2, ERIAR_ExGMAC) & BIT_13)==0) + break; + mdelay(1); + } + } + break; + } + + RTL_W16(tp, RxMaxSize, tp->rx_buf_sz); + + rtl8168_disable_rxdvgate(dev); + + if (tp->mcfg == CFG_METHOD_11 || tp->mcfg == CFG_METHOD_12) + rtl8168_mac_loopback_test(tp); + + if (!tp->pci_cfg_is_read) { + pci_read_config_byte(pdev, PCI_COMMAND, &tp->pci_cfg_space.cmd); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_0, &tp->pci_cfg_space.io_base_l); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_0 + 2, &tp->pci_cfg_space.io_base_h); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_2, &tp->pci_cfg_space.mem_base_l); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_2 + 2, &tp->pci_cfg_space.mem_base_h); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_3, &tp->pci_cfg_space.resv_0x1c_l); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_3 + 2, &tp->pci_cfg_space.resv_0x1c_h); + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tp->pci_cfg_space.ilr); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_4, &tp->pci_cfg_space.resv_0x20_l); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_4 + 2, &tp->pci_cfg_space.resv_0x20_h); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_5, &tp->pci_cfg_space.resv_0x24_l); + pci_read_config_word(pdev, PCI_BASE_ADDRESS_5 + 2, &tp->pci_cfg_space.resv_0x24_h); + pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &tp->pci_cfg_space.resv_0x2c_l); + pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID + 2, &tp->pci_cfg_space.resv_0x2c_h); + if (tp->HwPcieSNOffset > 0) { + tp->pci_cfg_space.pci_sn_l = rtl8168_csi_read(tp, tp->HwPcieSNOffset); + tp->pci_cfg_space.pci_sn_h = rtl8168_csi_read(tp, tp->HwPcieSNOffset + 4); + } + + tp->pci_cfg_is_read = 1; + } + + rtl8168_dsm(dev, DSM_MAC_INIT); + + /* Set Rx packet filter */ + rtl8168_hw_set_rx_packet_filter(dev); + +#ifdef ENABLE_DASH_SUPPORT + if (tp->DASH && !tp->dash_printer_enabled) + NICChkTypeEnableDashInterrupt(tp); +#endif + + if (tp->HwSuppAspmClkIntrLock) + rtl8168_hw_aspm_clkreq_enable(tp, true); + + rtl8168_disable_cfg9346_write(tp); + + udelay(10); +} + +static void +rtl8168_hw_start(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); + + rtl8168_enable_hw_interrupt(tp); +} + +static int +rtl8168_change_mtu(struct net_device *dev, + int new_mtu) +{ + struct rtl8168_private *tp = netdev_priv(dev); + int ret = 0; + unsigned long flags; + +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) + if (new_mtu < ETH_MIN_MTU) + return -EINVAL; + else if (new_mtu > tp->max_jumbo_frame_size) + new_mtu = tp->max_jumbo_frame_size; +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0) + + spin_lock_irqsave(&tp->lock, flags); + dev->mtu = new_mtu; + spin_unlock_irqrestore(&tp->lock, flags); + + if (!netif_running(dev)) + goto out; + + rtl8168_down(dev); + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_set_rxbufsize(tp, dev); + + ret = rtl8168_init_ring(dev); + + if (ret < 0) { + spin_unlock_irqrestore(&tp->lock, flags); + goto err_out; + } + +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + RTL_NAPI_ENABLE(dev, &tp->napi); +#endif +#endif//CONFIG_R8168_NAPI + + netif_stop_queue(dev); + netif_carrier_off(dev); + rtl8168_hw_config(dev); + spin_unlock_irqrestore(&tp->lock, flags); + + rtl8168_set_speed(dev, tp->autoneg, tp->speed, tp->duplex, tp->advertising); + + mod_timer(&tp->esd_timer, jiffies + RTL8168_ESD_TIMEOUT); + mod_timer(&tp->link_timer, jiffies + RTL8168_LINK_TIMEOUT); +out: +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) + netdev_update_features(dev); +#endif + +err_out: + return ret; +} + +static inline void +rtl8168_make_unusable_by_asic(struct RxDesc *desc) +{ + desc->addr = 0x0badbadbadbadbadull; + desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); +} + +static void +rtl8168_free_rx_skb(struct rtl8168_private *tp, + struct sk_buff **sk_buff, + struct RxDesc *desc) +{ + struct pci_dev *pdev = tp->pci_dev; + + dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), tp->rx_buf_sz, + DMA_FROM_DEVICE); + dev_kfree_skb(*sk_buff); + *sk_buff = NULL; + rtl8168_make_unusable_by_asic(desc); +} + +static inline void +rtl8168_mark_to_asic(struct RxDesc *desc, + u32 rx_buf_sz) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz); +} + +static inline void +rtl8168_map_to_asic(struct RxDesc *desc, + dma_addr_t mapping, + u32 rx_buf_sz) +{ + desc->addr = cpu_to_le64(mapping); + wmb(); + rtl8168_mark_to_asic(desc, rx_buf_sz); +} + +static int +rtl8168_alloc_rx_skb(struct rtl8168_private *tp, + struct sk_buff **sk_buff, + struct RxDesc *desc, + int rx_buf_sz, + u8 in_intr) +{ + struct sk_buff *skb; + dma_addr_t mapping; + int ret = 0; + + if (in_intr) + skb = RTL_ALLOC_SKB_INTR(tp, rx_buf_sz + RTK_RX_ALIGN); + else + skb = dev_alloc_skb(rx_buf_sz + RTK_RX_ALIGN); + + if (unlikely(!skb)) + goto err_out; + + skb_reserve(skb, RTK_RX_ALIGN); + + mapping = dma_map_single(tp_to_dev(tp), skb->data, rx_buf_sz, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(tp_to_dev(tp), mapping))) { + if (unlikely(net_ratelimit())) + netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n"); + goto err_out; + } + + *sk_buff = skb; + rtl8168_map_to_asic(desc, mapping, rx_buf_sz); +out: + return ret; + +err_out: + if (skb) + dev_kfree_skb(skb); + ret = -ENOMEM; + rtl8168_make_unusable_by_asic(desc); + goto out; +} + +static void +rtl8168_rx_clear(struct rtl8168_private *tp) +{ + int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + if (tp->Rx_skbuff[i]) + rtl8168_free_rx_skb(tp, tp->Rx_skbuff + i, + tp->RxDescArray + i); + } +} + +static u32 +rtl8168_rx_fill(struct rtl8168_private *tp, + struct net_device *dev, + u32 start, + u32 end, + u8 in_intr) +{ + u32 cur; + + for (cur = start; end - cur > 0; cur++) { + int ret, i = cur % NUM_RX_DESC; + + if (tp->Rx_skbuff[i]) + continue; + + ret = rtl8168_alloc_rx_skb(tp, tp->Rx_skbuff + i, + tp->RxDescArray + i, + tp->rx_buf_sz, + in_intr); + if (ret < 0) + break; + } + return cur - start; +} + +static inline void +rtl8168_mark_as_last_descriptor(struct RxDesc *desc) +{ + desc->opts1 |= cpu_to_le32(RingEnd); +} + +static void +rtl8168_desc_addr_fill(struct rtl8168_private *tp) +{ + if (!tp->TxPhyAddr || !tp->RxPhyAddr) + return; + + RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_BIT_MASK(32))); + RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32)); + RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_BIT_MASK(32))); + RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32)); +} + +static void +rtl8168_tx_desc_init(struct rtl8168_private *tp) +{ + int i = 0; + + memset(tp->TxDescArray, 0x0, NUM_TX_DESC * sizeof(struct TxDesc)); + + for (i = 0; i < NUM_TX_DESC; i++) { + if (i == (NUM_TX_DESC - 1)) + tp->TxDescArray[i].opts1 = cpu_to_le32(RingEnd); + } +} + +static void +rtl8168_rx_desc_offset0_init(struct rtl8168_private *tp, int own) +{ + int i = 0; + int ownbit = 0; + + if (tp->RxDescArray == NULL) return; + + if (own) + ownbit = DescOwn; + + for (i = 0; i < NUM_RX_DESC; i++) { + if (i == (NUM_RX_DESC - 1)) + tp->RxDescArray[i].opts1 = cpu_to_le32((ownbit | RingEnd) | (unsigned long)tp->rx_buf_sz); + else + tp->RxDescArray[i].opts1 = cpu_to_le32(ownbit | (unsigned long)tp->rx_buf_sz); + } +} + +static void +rtl8168_rx_desc_init(struct rtl8168_private *tp) +{ + memset(tp->RxDescArray, 0x0, NUM_RX_DESC * sizeof(struct RxDesc)); +} + +static int +rtl8168_init_ring(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + rtl8168_init_ring_indexes(tp); + + memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); + memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); + + rtl8168_tx_desc_init(tp); + rtl8168_rx_desc_init(tp); + + if (rtl8168_rx_fill(tp, dev, 0, NUM_RX_DESC, 0) != NUM_RX_DESC) + goto err_out; + + rtl8168_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); + + return 0; + +err_out: + rtl8168_rx_clear(tp); + return -ENOMEM; +} + +static void +rtl8168_unmap_tx_skb(struct pci_dev *pdev, + struct ring_info *tx_skb, + struct TxDesc *desc) +{ + unsigned int len = tx_skb->len; + + dma_unmap_single(&pdev->dev, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE); + + desc->opts1 = cpu_to_le32(RTK_MAGIC_DEBUG_VALUE); + desc->opts2 = 0x00; + desc->addr = 0x00; + tx_skb->len = 0; +} + +static void rtl8168_tx_clear_range(struct rtl8168_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) + struct net_device *dev = tp->dev; +#endif + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8168_unmap_tx_skb(tp->pci_dev, tx_skb, + tp->TxDescArray + entry); + if (skb) { + RTLDEV->stats.tx_dropped++; + dev_kfree_skb_any(skb); + tx_skb->skb = NULL; + } + } + } +} + +static void +rtl8168_tx_clear(struct rtl8168_private *tp) +{ + rtl8168_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + tp->cur_tx = tp->dirty_tx = 0; +} + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) +static void rtl8168_schedule_work(struct net_device *dev, void (*task)(void *)) +{ +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + struct rtl8168_private *tp = netdev_priv(dev); + + INIT_WORK(&tp->task, task, dev); + schedule_delayed_work(&tp->task, 4); +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) +} + +#define rtl8168_cancel_schedule_work(a) + +#else +static void rtl8168_schedule_work(struct net_device *dev, work_func_t task) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + INIT_DELAYED_WORK(&tp->task, task); + schedule_delayed_work(&tp->task, 4); +} + +static void rtl8168_cancel_schedule_work(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct work_struct *work = &tp->task.work; + + if (!work->func) return; + + cancel_delayed_work_sync(&tp->task); +} +#endif + +static void +rtl8168_wait_for_quiescence(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + synchronize_irq(dev->irq); + + /* Wait for any pending NAPI task to complete */ +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + RTL_NAPI_DISABLE(dev, &tp->napi); +#endif +#endif//CONFIG_R8168_NAPI + + rtl8168_irq_mask_and_ack(tp); + +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + RTL_NAPI_ENABLE(dev, &tp->napi); +#endif +#endif//CONFIG_R8168_NAPI +} + +#if 0 +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) +static void rtl8168_reinit_task(void *_data) +#else +static void rtl8168_reinit_task(struct work_struct *work) +#endif +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) + struct net_device *dev = _data; +#else + struct rtl8168_private *tp = + container_of(work, struct rtl8168_private, task.work); + struct net_device *dev = tp->dev; +#endif + int ret; + + if (netif_running(dev)) { + rtl8168_wait_for_quiescence(dev); + rtl8168_close(dev); + } + + ret = rtl8168_open(dev); + if (unlikely(ret < 0)) { + if (unlikely(net_ratelimit())) { + struct rtl8168_private *tp = netdev_priv(dev); + + if (netif_msg_drv(tp)) { + printk(PFX KERN_ERR + "%s: reinit failure (status = %d)." + " Rescheduling.\n", dev->name, ret); + } + } + rtl8168_schedule_work(dev, rtl8168_reinit_task); + } +} +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) +static void rtl8168_reset_task(void *_data) +{ + struct net_device *dev = _data; + struct rtl8168_private *tp = netdev_priv(dev); +#else +static void rtl8168_reset_task(struct work_struct *work) +{ + struct rtl8168_private *tp = + container_of(work, struct rtl8168_private, task.work); + struct net_device *dev = tp->dev; +#endif + u32 budget = ~(u32)0; + unsigned long flags; + + if (!netif_running(dev)) + return; + + rtl8168_wait_for_quiescence(dev); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) + rtl8168_rx_interrupt(dev, tp, &budget); +#else + rtl8168_rx_interrupt(dev, tp, budget); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_tx_clear(tp); + + if (tp->dirty_rx == tp->cur_rx) { + rtl8168_rx_clear(tp); + rtl8168_init_ring(dev); + rtl8168_set_speed(dev, tp->autoneg, tp->speed, tp->duplex, tp->advertising); + spin_unlock_irqrestore(&tp->lock, flags); + } else { + spin_unlock_irqrestore(&tp->lock, flags); + if (unlikely(net_ratelimit())) { + struct rtl8168_private *tp = netdev_priv(dev); + + if (netif_msg_intr(tp)) { + printk(PFX KERN_EMERG + "%s: Rx buffers shortage\n", dev->name); + } + } + rtl8168_schedule_work(dev, rtl8168_reset_task); + } +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) +static void +rtl8168_tx_timeout(struct net_device *dev, unsigned int txqueue) +#else +static void +rtl8168_tx_timeout(struct net_device *dev) +#endif +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + spin_lock_irqsave(&tp->lock, flags); + netif_stop_queue(dev); + netif_carrier_off(dev); + rtl8168_hw_reset(dev); + spin_unlock_irqrestore(&tp->lock, flags); + + /* Let's wait a bit while any (async) irq lands on */ + rtl8168_schedule_work(dev, rtl8168_reset_task); +} + +static u32 +rtl8168_get_txd_opts1(u32 opts1, u32 len, unsigned int entry) +{ + u32 status = opts1 | len; + + if (entry == NUM_TX_DESC - 1) + status |= RingEnd; + + return status; +} + +static int +rtl8168_xmit_frags(struct rtl8168_private *tp, + struct sk_buff *skb, + const u32 *opts) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag, entry; + struct TxDesc *txd = NULL; + const unsigned char nr_frags = info->nr_frags; + + entry = tp->cur_tx; + for (cur_frag = 0; cur_frag < nr_frags; cur_frag++) { + skb_frag_t *frag = info->frags + cur_frag; + dma_addr_t mapping; + u32 status, len; + void *addr; + + entry = (entry + 1) % NUM_TX_DESC; + + txd = tp->TxDescArray + entry; +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) + len = frag->size; + addr = ((void *) page_address(frag->page)) + frag->page_offset; +#else + len = skb_frag_size(frag); + addr = skb_frag_address(frag); +#endif + mapping = dma_map_single(tp_to_dev(tp), addr, len, DMA_TO_DEVICE); + + if (unlikely(dma_mapping_error(tp_to_dev(tp), mapping))) { + if (unlikely(net_ratelimit())) + netif_err(tp, drv, tp->dev, + "Failed to map TX fragments DMA!\n"); + goto err_out; + } + + /* anti gcc 2.95.3 bugware (sic) */ + status = rtl8168_get_txd_opts1(opts[0], len, entry); + if (cur_frag == (nr_frags - 1)) { + tp->tx_skb[entry].skb = skb; + status |= LastFrag; + } + + txd->addr = cpu_to_le64(mapping); + + tp->tx_skb[entry].len = len; + + txd->opts2 = cpu_to_le32(opts[1]); + wmb(); + txd->opts1 = cpu_to_le32(status); + } + + return cur_frag; + +err_out: + rtl8168_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static inline +__be16 get_protocol(struct sk_buff *skb) +{ +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37) + return vlan_get_protocol(skb); +#else + __be16 protocol; + + if (skb->protocol == htons(ETH_P_8021Q)) + protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; + else + protocol = skb->protocol; + + return protocol; +#endif +} + +static bool rtl8168_skb_pad(struct sk_buff *skb) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) + if (skb_padto(skb, ETH_ZLEN)) + return false; + skb_put(skb, ETH_ZLEN - skb->len); + return true; +#else + return !eth_skb_pad(skb); +#endif +} + +static inline bool +rtl8168_tx_csum(struct sk_buff *skb, + struct net_device *dev, + u32 *opts) +{ + struct rtl8168_private *tp = netdev_priv(dev); + u32 csum_cmd = 0; + u8 sw_calc_csum = FALSE; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + const struct iphdr *ip = skb->nh.iph; + + if (dev->features & NETIF_F_IP_CSUM) { + if (ip->protocol == IPPROTO_TCP) + csum_cmd = tp->tx_ip_csum_cmd | tp->tx_tcp_csum_cmd; + else if (ip->protocol == IPPROTO_UDP) + csum_cmd = tp->tx_ip_csum_cmd | tp->tx_udp_csum_cmd; + else if (ip->protocol == IPPROTO_IP) + csum_cmd = tp->tx_ip_csum_cmd; + } +#else + u8 ip_protocol = IPPROTO_RAW; + + switch (get_protocol(skb)) { + case __constant_htons(ETH_P_IP): + if (dev->features & NETIF_F_IP_CSUM) { + ip_protocol = ip_hdr(skb)->protocol; + csum_cmd = tp->tx_ip_csum_cmd; + } + break; + case __constant_htons(ETH_P_IPV6): + if (dev->features & NETIF_F_IPV6_CSUM) { + u32 transport_offset = (u32)skb_transport_offset(skb); + if (transport_offset > 0 && transport_offset <= TCPHO_MAX) { + ip_protocol = ipv6_hdr(skb)->nexthdr; + csum_cmd = tp->tx_ipv6_csum_cmd; + csum_cmd |= transport_offset << TCPHO_SHIFT; + } + } + break; + default: + if (unlikely(net_ratelimit())) + dprintk("checksum_partial proto=%x!\n", skb->protocol); + break; + } + + if (ip_protocol == IPPROTO_TCP) + csum_cmd |= tp->tx_tcp_csum_cmd; + else if (ip_protocol == IPPROTO_UDP) + csum_cmd |= tp->tx_udp_csum_cmd; +#endif + if (csum_cmd == 0) { + sw_calc_csum = TRUE; +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + WARN_ON(1); /* we need a WARN() */ +#endif + } + } + + if (csum_cmd != 0) { + if (tp->ShortPacketSwChecksum && skb->len < ETH_ZLEN) { + sw_calc_csum = TRUE; + if (!rtl8168_skb_pad(skb)) + return false; + } else { + if ((tp->mcfg == CFG_METHOD_1) || (tp->mcfg == CFG_METHOD_2) || (tp->mcfg == CFG_METHOD_3)) + opts[0] |= csum_cmd; + else + opts[1] |= csum_cmd; + } + } + + if (tp->UseSwPaddingShortPkt && skb->len < ETH_ZLEN) + if (!rtl8168_skb_pad(skb)) + return false; + + if (sw_calc_csum) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) + skb_checksum_help(&skb, 0); +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10) + skb_checksum_help(skb, 0); +#else + skb_checksum_help(skb); +#endif + } + + return true; +} + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) +/* r8169_csum_workaround() + * The hw limits the value the transport offset. When the offset is out of the + * range, calculate the checksum by sw. + */ +static void r8168_csum_workaround(struct rtl8168_private *tp, + struct sk_buff *skb) +{ + if (skb_shinfo(skb)->gso_size) { + netdev_features_t features = tp->dev->features; + struct sk_buff *segs, *nskb; + + features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); + segs = skb_gso_segment(skb, features); + if (IS_ERR(segs) || !segs) + goto drop; + + do { + nskb = segs; + segs = segs->next; + nskb->next = NULL; + rtl8168_start_xmit(nskb, tp->dev); + } while (segs); + + dev_consume_skb_any(skb); + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb_checksum_help(skb) < 0) + goto drop; + + rtl8168_start_xmit(skb, tp->dev); + } else { + struct net_device_stats *stats; + +drop: + stats = &tp->dev->stats; + stats->tx_dropped++; + dev_kfree_skb_any(skb); + } +} + +/* msdn_giant_send_check() + * According to the document of microsoft, the TCP Pseudo Header excludes the + * packet length for IPv6 TCP large packets. + */ +static int msdn_giant_send_check(struct sk_buff *skb) +{ + const struct ipv6hdr *ipv6h; + struct tcphdr *th; + int ret; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + ipv6h = ipv6_hdr(skb); + th = tcp_hdr(skb); + + th->check = 0; + th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); + + return ret; +} +#endif + +static bool rtl8168_tx_slots_avail(struct rtl8168_private *tp, + unsigned int nr_frags) +{ + unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx; + + /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ + return slots_avail > nr_frags; +} + +static netdev_tx_t +rtl8168_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned int entry; + struct TxDesc *txd; + dma_addr_t mapping; + u32 len; + u32 opts[2]; + netdev_tx_t ret = NETDEV_TX_OK; + unsigned long flags, large_send; + int frags; + + spin_lock_irqsave(&tp->lock, flags); + + if (unlikely(!rtl8168_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { + if (netif_msg_drv(tp)) { + printk(KERN_ERR + "%s: BUG! Tx Ring full when queue awake!\n", + dev->name); + } + goto err_stop; + } + + entry = tp->cur_tx % NUM_TX_DESC; + txd = tp->TxDescArray + entry; + + if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) { + if (netif_msg_drv(tp)) { + printk(KERN_ERR + "%s: BUG! Tx Desc is own by hardware!\n", + dev->name); + } + goto err_stop; + } + + opts[0] = DescOwn; + opts[1] = rtl8168_tx_vlan_tag(tp, skb); + + large_send = 0; +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + if (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) + u32 mss = skb_shinfo(skb)->tso_size; +#else + u32 mss = skb_shinfo(skb)->gso_size; +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) + + /* TCP Segmentation Offload (or TCP Large Send) */ + if (mss) { + if ((tp->mcfg == CFG_METHOD_1) || + (tp->mcfg == CFG_METHOD_2) || + (tp->mcfg == CFG_METHOD_3)) { + opts[0] |= LargeSend | (min(mss, MSS_MAX) << 16); + large_send = 1; + } else { + u32 transport_offset = (u32)skb_transport_offset(skb); + switch (get_protocol(skb)) { + case __constant_htons(ETH_P_IP): + if (transport_offset <= GTTCPHO_MAX) { + opts[0] |= GiantSendv4; + opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[1] |= min(mss, MSS_MAX) << 18; + large_send = 1; + } + break; + case __constant_htons(ETH_P_IPV6): +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) + if (msdn_giant_send_check(skb)) { + spin_unlock_irqrestore(&tp->lock, flags); + r8168_csum_workaround(tp, skb); + goto out; + } +#endif + if (transport_offset <= GTTCPHO_MAX) { + opts[0] |= GiantSendv6; + opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[1] |= min(mss, MSS_MAX) << 18; + large_send = 1; + } + break; + default: + if (unlikely(net_ratelimit())) + dprintk("tso proto=%x!\n", skb->protocol); + break; + } + } + + if (large_send == 0) + goto err_dma_0; + } + } +#endif //LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + + if (large_send == 0) { + if (unlikely(!rtl8168_tx_csum(skb, dev, opts))) + goto err_dma_0; + } + + frags = rtl8168_xmit_frags(tp, skb, opts); + if (unlikely(frags < 0)) + goto err_dma_0; + if (frags) { + len = skb_headlen(skb); + opts[0] |= FirstFrag; + } else { + len = skb->len; + + tp->tx_skb[entry].skb = skb; + + opts[0] |= FirstFrag | LastFrag; + } + + opts[0] = rtl8168_get_txd_opts1(opts[0], len, entry); + mapping = dma_map_single(tp_to_dev(tp), skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(tp_to_dev(tp), mapping))) { + if (unlikely(net_ratelimit())) + netif_err(tp, drv, dev, "Failed to map TX DMA!\n"); + goto err_dma_1; + } + tp->tx_skb[entry].len = len; + txd->addr = cpu_to_le64(mapping); + txd->opts2 = cpu_to_le32(opts[1]); + wmb(); + txd->opts1 = cpu_to_le32(opts[0]); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) + dev->trans_start = jiffies; +#else + skb_tx_timestamp(skb); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) + + tp->cur_tx += frags + 1; + + wmb(); + + RTL_W8(tp, TxPoll, NPQ); /* set polling bit */ + + if (!rtl8168_tx_slots_avail(tp, MAX_SKB_FRAGS)) { + netif_stop_queue(dev); + smp_rmb(); + if (rtl8168_tx_slots_avail(tp, MAX_SKB_FRAGS)) + netif_wake_queue(dev); + } + + spin_unlock_irqrestore(&tp->lock, flags); +out: + return ret; +err_dma_1: + tp->tx_skb[entry].skb = NULL; + rtl8168_tx_clear_range(tp, tp->cur_tx + 1, frags); +err_dma_0: + RTLDEV->stats.tx_dropped++; + spin_unlock_irqrestore(&tp->lock, flags); + dev_kfree_skb_any(skb); + ret = NETDEV_TX_OK; + goto out; +err_stop: + netif_stop_queue(dev); + ret = NETDEV_TX_BUSY; + RTLDEV->stats.tx_dropped++; + + spin_unlock_irqrestore(&tp->lock, flags); + goto out; +} + +static void +rtl8168_tx_interrupt(struct net_device *dev, + struct rtl8168_private *tp) +{ + unsigned int dirty_tx, tx_left; + + assert(dev != NULL); + assert(tp != NULL); + + dirty_tx = tp->dirty_tx; + smp_rmb(); + tx_left = tp->cur_tx - dirty_tx; + tp->dynamic_aspm_packet_count += tx_left; + + while (tx_left > 0) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + u32 len = tx_skb->len; + u32 status; + + rmb(); + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + RTLDEV->stats.tx_bytes += len; + RTLDEV->stats.tx_packets++; + + rtl8168_unmap_tx_skb(tp->pci_dev, + tx_skb, + tp->TxDescArray + entry); + + if (tx_skb->skb!=NULL) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0) + dev_consume_skb_any(tx_skb->skb); +#else + dev_kfree_skb_any(tx_skb->skb); +#endif + tx_skb->skb = NULL; + } + dirty_tx++; + tx_left--; + } + + tp->dynamic_aspm_packet_count -= tx_left; + + if (tp->dirty_tx != dirty_tx) { + tp->dirty_tx = dirty_tx; + smp_wmb(); + if (netif_queue_stopped(dev) && + (rtl8168_tx_slots_avail(tp, MAX_SKB_FRAGS))) { + netif_wake_queue(dev); + } + smp_rmb(); + if (tp->cur_tx != dirty_tx) + RTL_W8(tp, TxPoll, NPQ); + } +} + +static inline int +rtl8168_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void +rtl8168_rx_csum(struct rtl8168_private *tp, + struct sk_buff *skb, + struct RxDesc *desc) +{ + u32 opts1 = le32_to_cpu(desc->opts1); + u32 opts2 = le32_to_cpu(desc->opts2); + + if ((tp->mcfg == CFG_METHOD_1) || + (tp->mcfg == CFG_METHOD_2) || + (tp->mcfg == CFG_METHOD_3)) { + u32 status = opts1 & RxProtoMask; + + /* rx csum offload for RTL8168B/8111B */ + if (((status == RxProtoTCP) && !(opts1 & (RxTCPF | RxIPF))) || + ((status == RxProtoUDP) && !(opts1 & (RxUDPF | RxIPF)))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + } else { + /* rx csum offload for RTL8168C/8111C and RTL8168CP/8111CP */ + if (((opts2 & RxV4F) && !(opts1 & RxIPF)) || (opts2 & RxV6F)) { + if (((opts1 & RxTCPT) && !(opts1 & RxTCPF)) || + ((opts1 & RxUDPT) && !(opts1 & RxUDPF))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; + } else + skb->ip_summed = CHECKSUM_NONE; + } +} + +static inline int +rtl8168_try_rx_copy(struct rtl8168_private *tp, + struct sk_buff **sk_buff, + int pkt_size, + struct RxDesc *desc, + int rx_buf_sz) +{ + int ret = -1; + + if (pkt_size < rx_copybreak) { + struct sk_buff *skb; + + skb = RTL_ALLOC_SKB_INTR(tp, pkt_size + RTK_RX_ALIGN); + if (skb) { + u8 *data; + + data = sk_buff[0]->data; + skb_reserve(skb, RTK_RX_ALIGN); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,37) + prefetch(data - RTK_RX_ALIGN); +#endif + eth_copy_and_sum(skb, data, pkt_size, 0); + *sk_buff = skb; + rtl8168_mark_to_asic(desc, rx_buf_sz); + ret = 0; + } + } + return ret; +} + +static inline void +rtl8168_rx_skb(struct rtl8168_private *tp, + struct sk_buff *skb) +{ +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) + netif_receive_skb(skb); +#else + napi_gro_receive(&tp->napi, skb); +#endif +#else + netif_rx(skb); +#endif +} + +static int +rtl8168_rx_interrupt(struct net_device *dev, + struct rtl8168_private *tp, + napi_budget budget) +{ + unsigned int cur_rx, rx_left; + unsigned int delta, count = 0; + unsigned int entry; + struct RxDesc *desc; + u32 status; + u32 rx_quota; + + assert(dev != NULL); + assert(tp != NULL); + + if (tp->RxDescArray == NULL) + goto rx_out; + + rx_quota = RTL_RX_QUOTA(budget); + cur_rx = tp->cur_rx; + entry = cur_rx % NUM_RX_DESC; + desc = tp->RxDescArray + entry; + rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; + rx_left = rtl8168_rx_quota(rx_left, (u32)rx_quota); + + for (; rx_left > 0; rx_left--) { + rmb(); + status = le32_to_cpu(desc->opts1); + if (status & DescOwn) + break; + if (unlikely(status & RxRES)) { + if (netif_msg_rx_err(tp)) { + printk(KERN_INFO + "%s: Rx ERROR. status = %08x\n", + dev->name, status); + } + + RTLDEV->stats.rx_errors++; + + if (status & (RxRWT | RxRUNT)) + RTLDEV->stats.rx_length_errors++; + if (status & RxCRC) + RTLDEV->stats.rx_crc_errors++; + if (dev->features & NETIF_F_RXALL) + goto process_pkt; + + rtl8168_mark_to_asic(desc, tp->rx_buf_sz); + } else { + struct sk_buff *skb; + int pkt_size; + +process_pkt: + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size = (status & 0x00003fff) - 4; + else + pkt_size = status & 0x00003fff; + + /* + * The driver does not support incoming fragmented + * frames. They are seen as a symptom of over-mtu + * sized frames. + */ + if (unlikely(rtl8168_fragmented_frame(status))) { + RTLDEV->stats.rx_dropped++; + RTLDEV->stats.rx_length_errors++; + rtl8168_mark_to_asic(desc, tp->rx_buf_sz); + continue; + } + + skb = tp->Rx_skbuff[entry]; + + dma_sync_single_for_cpu(tp_to_dev(tp), + le64_to_cpu(desc->addr), tp->rx_buf_sz, + DMA_FROM_DEVICE); + + if (rtl8168_try_rx_copy(tp, &skb, pkt_size, + desc, tp->rx_buf_sz)) { + tp->Rx_skbuff[entry] = NULL; + dma_unmap_single(tp_to_dev(tp), le64_to_cpu(desc->addr), + tp->rx_buf_sz, DMA_FROM_DEVICE); + } else { + dma_sync_single_for_device(tp_to_dev(tp), le64_to_cpu(desc->addr), + tp->rx_buf_sz, DMA_FROM_DEVICE); + } + + if (tp->cp_cmd & RxChkSum) + rtl8168_rx_csum(tp, skb, desc); + + skb->dev = dev; + skb_put(skb, pkt_size); + skb->protocol = eth_type_trans(skb, dev); + + if (skb->pkt_type == PACKET_MULTICAST) + RTLDEV->stats.multicast++; + + if (rtl8168_rx_vlan_skb(tp, desc, skb) < 0) + rtl8168_rx_skb(tp, skb); +#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0) + dev->last_rx = jiffies; +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0) + RTLDEV->stats.rx_bytes += pkt_size; + RTLDEV->stats.rx_packets++; + } + + cur_rx++; + entry = cur_rx % NUM_RX_DESC; + desc = tp->RxDescArray + entry; +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,37) + prefetch(desc); +#endif + } + + count = cur_rx - tp->cur_rx; + tp->cur_rx = cur_rx; + + delta = rtl8168_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx, 1); + if (!delta && count && netif_msg_intr(tp)) + printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name); + tp->dirty_rx += delta; + + tp->dynamic_aspm_packet_count += delta; + + /* + * FIXME: until there is periodic timer to try and refill the ring, + * a temporary shortage may definitely kill the Rx process. + * - disable the asic to try and avoid an overflow and kick it again + * after refill ? + * - how do others driver handle this condition (Uh oh...). + */ + if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp)) + printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name); + +rx_out: + return count; +} + +/* + *The interrupt handler does all of the Rx thread work and cleans up after + *the Tx thread. + */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) +static irqreturn_t rtl8168_interrupt(int irq, void *dev_instance, struct pt_regs *regs) +#else +static irqreturn_t rtl8168_interrupt(int irq, void *dev_instance) +#endif +{ + struct net_device *dev = (struct net_device *) dev_instance; + struct rtl8168_private *tp = netdev_priv(dev); + int status; + int handled = 0; + + do { + status = RTL_R16(tp, IntrStatus); + + if (!(tp->features & RTL_FEATURE_MSI)) { + /* hotplug/major error/no more work/shared irq */ + if ((status == 0xFFFF) || !status) + break; + + if (!(status & (tp->intr_mask | tp->timer_intr_mask))) + break; + } + + handled = 1; + + rtl8168_disable_hw_interrupt(tp); + + switch (tp->mcfg) { + case CFG_METHOD_9: + case CFG_METHOD_10: + case CFG_METHOD_11: + case CFG_METHOD_12: + case CFG_METHOD_13: + case CFG_METHOD_14: + case CFG_METHOD_15: + case CFG_METHOD_16: + case CFG_METHOD_17: + case CFG_METHOD_18: + case CFG_METHOD_19: + case CFG_METHOD_20: + case CFG_METHOD_21: + case CFG_METHOD_22: + case CFG_METHOD_23: + case CFG_METHOD_24: + case CFG_METHOD_25: + case CFG_METHOD_26: + case CFG_METHOD_27: + case CFG_METHOD_28: + case CFG_METHOD_29: + case CFG_METHOD_30: + case CFG_METHOD_31: + case CFG_METHOD_32: + case CFG_METHOD_33: + /* RX_OVERFLOW RE-START mechanism now HW handles it automatically*/ + RTL_W16(tp, IntrStatus, status&~RxFIFOOver); + break; + default: + RTL_W16(tp, IntrStatus, status); + break; + } + + //Work around for rx fifo overflow + if (unlikely(status & RxFIFOOver)) { + if (tp->mcfg == CFG_METHOD_1) { + netif_stop_queue(dev); + udelay(300); + rtl8168_hw_reset(dev); + rtl8168_tx_clear(tp); + rtl8168_rx_clear(tp); + rtl8168_init_ring(dev); + rtl8168_hw_config(dev); + rtl8168_hw_start(dev); + netif_wake_queue(dev); + } + } + +#ifdef ENABLE_DASH_SUPPORT + if ( tp->DASH ) { + if (HW_DASH_SUPPORT_TYPE_2(tp) || HW_DASH_SUPPORT_TYPE_3(tp)) { + u8 DashIntType2Status; + + if (status & ISRIMR_DASH_INTR_CMAC_RESET) + tp->CmacResetIntr = TRUE; + + DashIntType2Status = RTL_CMAC_R8(tp, CMAC_IBISR0); + if (DashIntType2Status & ISRIMR_DASH_TYPE2_ROK) { + tp->RcvFwDashOkEvt = TRUE; + } + if (DashIntType2Status & ISRIMR_DASH_TYPE2_TOK) { + tp->SendFwHostOkEvt = TRUE; + } + if (DashIntType2Status & ISRIMR_DASH_TYPE2_RX_DISABLE_IDLE) { + tp->DashFwDisableRx = TRUE; + } + + RTL_CMAC_W8(tp, CMAC_IBISR0, DashIntType2Status); + } else { + if (status & ISRIMR_DP_REQSYS_OK) { + tp->RcvFwReqSysOkEvt = TRUE; + } + if (status & ISRIMR_DP_DASH_OK) { + tp->RcvFwDashOkEvt = TRUE; + } + if (status & ISRIMR_DP_HOST_OK) { + tp->SendFwHostOkEvt = TRUE; + } + } + } +#endif + +#ifdef CONFIG_R8168_NAPI + if (status & tp->intr_mask || tp->keep_intr_cnt-- > 0) { + if (status & tp->intr_mask) + tp->keep_intr_cnt = RTK_KEEP_INTERRUPT_COUNT; + + if (likely(RTL_NETIF_RX_SCHEDULE_PREP(dev, &tp->napi))) + __RTL_NETIF_RX_SCHEDULE(dev, &tp->napi); + else if (netif_msg_intr(tp)) + printk(KERN_INFO "%s: interrupt %04x in poll\n", + dev->name, status); + } else { + tp->keep_intr_cnt = RTK_KEEP_INTERRUPT_COUNT; + rtl8168_switch_to_hw_interrupt(tp); + } +#else + if (status & tp->intr_mask || tp->keep_intr_cnt-- > 0) { + u32 budget = ~(u32)0; + + if (status & tp->intr_mask) + tp->keep_intr_cnt = RTK_KEEP_INTERRUPT_COUNT; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) + rtl8168_rx_interrupt(dev, tp, &budget); +#else + rtl8168_rx_interrupt(dev, tp, budget); +#endif //LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) + rtl8168_tx_interrupt(dev, tp); + +#ifdef ENABLE_DASH_SUPPORT + if ( tp->DASH ) { + struct net_device *dev = tp->dev; + + HandleDashInterrupt(dev); + } +#endif + + rtl8168_switch_to_timer_interrupt(tp); + } else { + tp->keep_intr_cnt = RTK_KEEP_INTERRUPT_COUNT; + rtl8168_switch_to_hw_interrupt(tp); + } +#endif + + } while (false); + + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_R8168_NAPI +static int rtl8168_poll(napi_ptr napi, napi_budget budget) +{ + struct rtl8168_private *tp = RTL_GET_PRIV(napi, struct rtl8168_private); + RTL_GET_NETDEV(tp) + unsigned int work_to_do = RTL_NAPI_QUOTA(budget, dev); + unsigned int work_done; + unsigned long flags; + + work_done = rtl8168_rx_interrupt(dev, tp, budget); + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_tx_interrupt(dev, tp); + spin_unlock_irqrestore(&tp->lock, flags); + + RTL_NAPI_QUOTA_UPDATE(dev, work_done, budget); + + if (work_done < work_to_do) { +#ifdef ENABLE_DASH_SUPPORT + if ( tp->DASH ) { + struct net_device *dev = tp->dev; + + spin_lock_irqsave(&tp->lock, flags); + HandleDashInterrupt(dev); + spin_unlock_irqrestore(&tp->lock, flags); + } +#endif + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0) + if (RTL_NETIF_RX_COMPLETE(dev, napi, work_done) == FALSE) return RTL_NAPI_RETURN_VALUE; +#else + RTL_NETIF_RX_COMPLETE(dev, napi, work_done); +#endif + /* + * 20040426: the barrier is not strictly required but the + * behavior of the irq handler could be less predictable + * without it. Btw, the lack of flush for the posted pci + * write is safe - FR + */ + smp_wmb(); + + rtl8168_switch_to_timer_interrupt(tp); + } + + return RTL_NAPI_RETURN_VALUE; +} +#endif//CONFIG_R8168_NAPI + +static void rtl8168_sleep_rx_enable(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + + if (tp->wol_enabled != WOL_ENABLED) return; + + if ((tp->mcfg == CFG_METHOD_1) || (tp->mcfg == CFG_METHOD_2)) { + RTL_W8(tp, ChipCmd, CmdReset); + rtl8168_rx_desc_offset0_init(tp, 0); + RTL_W8(tp, ChipCmd, CmdRxEnb); + } else if (tp->mcfg == CFG_METHOD_14 || tp->mcfg == CFG_METHOD_15) { + rtl8168_ephy_write(tp, 0x19, 0xFF64); + RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + } +} + +static void rtl8168_down(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; + + rtl8168_delete_esd_timer(dev, &tp->esd_timer); + + rtl8168_delete_link_timer(dev, &tp->link_timer); + +#ifdef CONFIG_R8168_NAPI +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + RTL_NAPI_DISABLE(dev, &tp->napi); +#endif +#endif//CONFIG_R8168_NAPI + + netif_stop_queue(dev); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11) + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_rcu(); /* FIXME: should this be synchronize_irq()? */ +#endif + + spin_lock_irqsave(&tp->lock, flags); + + netif_carrier_off(dev); + + rtl8168_dsm(dev, DSM_IF_DOWN); + + rtl8168_hw_reset(dev); + + spin_unlock_irqrestore(&tp->lock, flags); + + synchronize_irq(dev->irq); + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_tx_clear(tp); + + rtl8168_rx_clear(tp); + + spin_unlock_irqrestore(&tp->lock, flags); +} + +static int rtl8168_close(struct net_device *dev) +{ + struct rtl8168_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + unsigned long flags; + + if (tp->TxDescArray!=NULL && tp->RxDescArray!=NULL) { + rtl8168_cancel_schedule_work(dev); + + rtl8168_down(dev); + + pci_clear_master(tp->pci_dev); + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_hw_d3_para(dev); + + rtl8168_powerdown_pll(dev); + + rtl8168_sleep_rx_enable(dev); + + spin_unlock_irqrestore(&tp->lock, flags); + + free_irq(dev->irq, dev); + + dma_free_coherent(&pdev->dev, R8168_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8168_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + } else { + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_hw_d3_para(dev); + + rtl8168_powerdown_pll(dev); + + spin_unlock_irqrestore(&tp->lock, flags); + } + + return 0; +} + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11) +static void rtl8168_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8168_private *tp = netdev_priv(dev); + + if (HW_DASH_SUPPORT_DASH(tp)) + rtl8168_driver_stop(tp); + + rtl8168_set_bios_setting(dev); + if (s5_keep_curr_mac == 0 && tp->random_mac == 0) + rtl8168_rar_set(tp, tp->org_mac_addr); + +#ifdef ENABLE_FIBER_SUPPORT + rtl8168_hw_fiber_nic_d3_para(dev); +#endif //ENABLE_FIBER_SUPPORT + + if (s5wol == 0) + tp->wol_enabled = WOL_DISABLED; + + rtl8168_close(dev); + rtl8168_disable_msi(pdev, tp); + + if (system_state == SYSTEM_POWER_OFF) { + pci_clear_master(tp->pci_dev); + rtl8168_sleep_rx_enable(dev); + pci_wake_from_d3(pdev, tp->wol_enabled); + pci_set_power_state(pdev, PCI_D3hot); + } +} +#endif + +/** + * rtl8168_get_stats - Get rtl8168 read/write statistics + * @dev: The Ethernet Device to get statistics for + * + * Get TX/RX statistics for rtl8168 + */ +static struct +net_device_stats *rtl8168_get_stats(struct net_device *dev) +{ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) + struct rtl8168_private *tp = netdev_priv(dev); +#endif + if (netif_running(dev)) { +// spin_lock_irqsave(&tp->lock, flags); +// spin_unlock_irqrestore(&tp->lock, flags); + } + + return &RTLDEV->stats; +} + +#ifdef CONFIG_PM + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) +static int +rtl8168_suspend(struct pci_dev *pdev, u32 state) +#else +static int +rtl8168_suspend(struct pci_dev *pdev, pm_message_t state) +#endif +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8168_private *tp = netdev_priv(dev); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) + u32 pci_pm_state = pci_choose_state(pdev, state); +#endif + unsigned long flags; + + if (!netif_running(dev)) + goto out; + + rtl8168_cancel_schedule_work(dev); + + rtl8168_delete_esd_timer(dev, &tp->esd_timer); + + rtl8168_delete_link_timer(dev, &tp->link_timer); + + netif_stop_queue(dev); + + netif_carrier_off(dev); + + netif_device_detach(dev); + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_dsm(dev, DSM_NIC_GOTO_D3); + + rtl8168_hw_reset(dev); + + pci_clear_master(pdev); + + rtl8168_hw_d3_para(dev); + +#ifdef ENABLE_FIBER_SUPPORT + rtl8168_hw_fiber_nic_d3_para(dev); +#endif //ENABLE_FIBER_SUPPORT + + rtl8168_powerdown_pll(dev); + + rtl8168_sleep_rx_enable(dev); + + spin_unlock_irqrestore(&tp->lock, flags); + +out: + if (HW_DASH_SUPPORT_DASH(tp)) { + spin_lock_irqsave(&tp->lock, flags); + rtl8168_driver_stop(tp); + spin_unlock_irqrestore(&tp->lock, flags); + } + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) + pci_save_state(pdev, &pci_pm_state); +#else + pci_save_state(pdev); +#endif + pci_enable_wake(pdev, pci_choose_state(pdev, state), tp->wol_enabled); +// pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int +rtl8168_resume(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8168_private *tp = netdev_priv(dev); + unsigned long flags; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) + u32 pci_pm_state = PCI_D0; +#endif + + pci_set_power_state(pdev, PCI_D0); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) + pci_restore_state(pdev, &pci_pm_state); +#else + pci_restore_state(pdev); +#endif + pci_enable_wake(pdev, PCI_D0, 0); + + spin_lock_irqsave(&tp->lock, flags); + + /* restore last modified mac address */ + rtl8168_rar_set(tp, dev->dev_addr); + + spin_unlock_irqrestore(&tp->lock, flags); + + if (!netif_running(dev)) { + if (HW_DASH_SUPPORT_DASH(tp)) { + spin_lock_irqsave(&tp->lock, flags); + rtl8168_driver_start(tp); + spin_unlock_irqrestore(&tp->lock, flags); + } + goto out; + } + + pci_set_master(pdev); + + spin_lock_irqsave(&tp->lock, flags); + + rtl8168_exit_oob(dev); + + rtl8168_dsm(dev, DSM_NIC_RESUME_D3); + + rtl8168_hw_init(dev); + + rtl8168_powerup_pll(dev); + + rtl8168_hw_ephy_config(dev); + + rtl8168_hw_phy_config(dev); + + rtl8168_hw_config(dev); + + spin_unlock_irqrestore(&tp->lock, flags); + + rtl8168_schedule_work(dev, rtl8168_reset_task); + + netif_device_attach(dev); + + mod_timer(&tp->esd_timer, jiffies + RTL8168_ESD_TIMEOUT); + mod_timer(&tp->link_timer, jiffies + RTL8168_LINK_TIMEOUT); +out: + return 0; +} + +#endif /* CONFIG_PM */ + +static struct pci_driver rtl8168_pci_driver = { + .name = MODULENAME, + .id_table = rtl8168_pci_tbl, + .probe = rtl8168_init_one, + .remove = __devexit_p(rtl8168_remove_one), +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11) + .shutdown = rtl8168_shutdown, +#endif +#ifdef CONFIG_PM + .suspend = rtl8168_suspend, + .resume = rtl8168_resume, +#endif +}; + +static int __init +rtl8168_init_module(void) +{ +#ifdef ENABLE_R8168_PROCFS + rtl8168_proc_module_init(); +#endif +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) + return pci_register_driver(&rtl8168_pci_driver); +#else + return pci_module_init(&rtl8168_pci_driver); +#endif +} + +static void __exit +rtl8168_cleanup_module(void) +{ + pci_unregister_driver(&rtl8168_pci_driver); +#ifdef ENABLE_R8168_PROCFS + if (rtl8168_proc) { +#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) + remove_proc_subtree(MODULENAME, init_net.proc_net); +#else +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) + remove_proc_entry(MODULENAME, init_net.proc_net); +#else + remove_proc_entry(MODULENAME, proc_net); +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) +#endif //LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0) + rtl8168_proc = NULL; + } +#endif +} + +module_init(rtl8168_init_module); +module_exit(rtl8168_cleanup_module); diff --git a/addons/r8168/src/4.4.180/r8168_realwow.h b/addons/r8168/src/4.4.180/r8168_realwow.h new file mode 100644 index 00000000..28e2579e --- /dev/null +++ b/addons/r8168/src/4.4.180/r8168_realwow.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#ifndef _LINUX_R8168_REALWOW_H +#define _LINUX_R8168_REALWOW_H + +#define SIOCDEVPRIVATE_RTLREALWOW SIOCDEVPRIVATE+3 + +#define MAX_RealWoW_KCP_SIZE (100) +#define MAX_RealWoW_Payload (64) + +#define KA_TX_PACKET_SIZE (100) +#define KA_WAKEUP_PATTERN_SIZE (120) + +//HwSuppKeepAliveOffloadVer +#define HW_SUPPORT_KCP_OFFLOAD(_M) ((_M)->HwSuppKCPOffloadVer > 0) + +enum rtl_realwow_cmd { + + RTL_REALWOW_SET_KCP_DISABLE=0, + RTL_REALWOW_SET_KCP_INFO, + RTL_REALWOW_SET_KCP_CONTENT, + + RTL_REALWOW_SET_KCP_ACKPKTINFO, + RTL_REALWOW_SET_KCP_WPINFO, + RTL_REALWOW_SET_KCPDHCP_TIMEOUT, + + RTLT_REALWOW_COMMAND_INVALID +}; + +struct rtl_realwow_ioctl_struct { + __u32 cmd; + __u32 offset; + __u32 len; + union { + __u32 data; + void *data_buffer; + }; +}; + +typedef struct _MP_KCPInfo { + u8 DIPv4[4]; + u8 MacID[6]; + u16 UdpPort[2]; + u8 PKTLEN[2]; + + u16 ackLostCnt; + u8 KCP_WakePattern[MAX_RealWoW_Payload]; + u8 KCP_AckPacket[MAX_RealWoW_Payload]; + u32 KCP_interval; + u8 KCP_WakePattern_Len; + u8 KCP_AckPacket_Len; + u8 KCP_TxPacket[2][KA_TX_PACKET_SIZE]; +} MP_KCP_INFO, *PMP_KCP_INFO; + +typedef struct _KCPInfo { + u32 nId; // = id + u8 DIPv4[4]; + u8 MacID[6]; + u16 UdpPort; + u16 PKTLEN; +} KCPInfo, *PKCPInfo; + +typedef struct _KCPContent { + u32 id; // = id + u32 mSec; // = msec + u32 size; // =size + u8 bPacket[MAX_RealWoW_KCP_SIZE]; // put packet here +} KCPContent, *PKCPContent; + +typedef struct _RealWoWAckPktInfo { + u16 ackLostCnt; + u16 patterntSize; + u8 pattern[MAX_RealWoW_Payload]; +} RealWoWAckPktInfo,*PRealWoWAckPktInfo; + +typedef struct _RealWoWWPInfo { + u16 patterntSize; + u8 pattern[MAX_RealWoW_Payload]; +} RealWoWWPInfo,*PRealWoWWPInfo; + +int rtl8168_realwow_ioctl(struct net_device *dev, struct ifreq *ifr); +void rtl8168_realwow_hw_init(struct net_device *dev); +void rtl8168_get_realwow_hw_version(struct net_device *dev); +void rtl8168_set_realwow_d3_para(struct net_device *dev); + +#endif /* _LINUX_R8168_REALWOW_H */ diff --git a/addons/r8168/src/4.4.180/rtl_eeprom.c b/addons/r8168/src/4.4.180/rtl_eeprom.c new file mode 100644 index 00000000..51bd3a12 --- /dev/null +++ b/addons/r8168/src/4.4.180/rtl_eeprom.c @@ -0,0 +1,289 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#include +#include +#include +#include +#include +#include + +#include + +#include "r8168.h" +#include "rtl_eeprom.h" + +//------------------------------------------------------------------- +//rtl8168_eeprom_type(): +// tell the eeprom type +//return value: +// 0: the eeprom type is 93C46 +// 1: the eeprom type is 93C56 or 93C66 +//------------------------------------------------------------------- +void rtl8168_eeprom_type(struct rtl8168_private *tp) +{ + u16 magic = 0; + + if (tp->mcfg == CFG_METHOD_DEFAULT) + goto out_no_eeprom; + + if(RTL_R8(tp, 0xD2)&0x04) { + //not support + //tp->eeprom_type = EEPROM_TWSI; + //tp->eeprom_len = 256; + goto out_no_eeprom; + } else if(RTL_R32(tp, RxConfig) & RxCfg_9356SEL) { + tp->eeprom_type = EEPROM_TYPE_93C56; + tp->eeprom_len = 256; + } else { + tp->eeprom_type = EEPROM_TYPE_93C46; + tp->eeprom_len = 128; + } + + magic = rtl8168_eeprom_read_sc(tp, 0); + +out_no_eeprom: + if ((magic != 0x8129) && (magic != 0x8128)) { + tp->eeprom_type = EEPROM_TYPE_NONE; + tp->eeprom_len = 0; + } +} + +void rtl8168_eeprom_cleanup(struct rtl8168_private *tp) +{ + u8 x; + + x = RTL_R8(tp, Cfg9346); + x &= ~(Cfg9346_EEDI | Cfg9346_EECS); + + RTL_W8(tp, Cfg9346, x); + + rtl8168_raise_clock(tp, &x); + rtl8168_lower_clock(tp, &x); +} + +int rtl8168_eeprom_cmd_done(struct rtl8168_private *tp) +{ + u8 x; + int i; + + rtl8168_stand_by(tp); + + for (i = 0; i < 50000; i++) { + x = RTL_R8(tp, Cfg9346); + + if (x & Cfg9346_EEDO) { + udelay(RTL_CLOCK_RATE * 2 * 3); + return 0; + } + udelay(1); + } + + return -1; +} + +//------------------------------------------------------------------- +//rtl8168_eeprom_read_sc(): +// read one word from eeprom +//------------------------------------------------------------------- +u16 rtl8168_eeprom_read_sc(struct rtl8168_private *tp, u16 reg) +{ + int addr_sz = 6; + u8 x; + u16 data; + + if(tp->eeprom_type == EEPROM_TYPE_NONE) { + return -1; + } + + if (tp->eeprom_type==EEPROM_TYPE_93C46) + addr_sz = 6; + else if (tp->eeprom_type==EEPROM_TYPE_93C56) + addr_sz = 8; + + x = Cfg9346_EEM1 | Cfg9346_EECS; + RTL_W8(tp, Cfg9346, x); + + rtl8168_shift_out_bits(tp, RTL_EEPROM_READ_OPCODE, 3); + rtl8168_shift_out_bits(tp, reg, addr_sz); + + data = rtl8168_shift_in_bits(tp); + + rtl8168_eeprom_cleanup(tp); + + RTL_W8(tp, Cfg9346, 0); + + return data; +} + +//------------------------------------------------------------------- +//rtl8168_eeprom_write_sc(): +// write one word to a specific address in the eeprom +//------------------------------------------------------------------- +void rtl8168_eeprom_write_sc(struct rtl8168_private *tp, u16 reg, u16 data) +{ + u8 x; + int addr_sz = 6; + int w_dummy_addr = 4; + + if(tp->eeprom_type == EEPROM_TYPE_NONE) { + return ; + } + + if (tp->eeprom_type==EEPROM_TYPE_93C46) { + addr_sz = 6; + w_dummy_addr = 4; + } else if (tp->eeprom_type==EEPROM_TYPE_93C56) { + addr_sz = 8; + w_dummy_addr = 6; + } + + x = Cfg9346_EEM1 | Cfg9346_EECS; + RTL_W8(tp, Cfg9346, x); + + rtl8168_shift_out_bits(tp, RTL_EEPROM_EWEN_OPCODE, 5); + rtl8168_shift_out_bits(tp, reg, w_dummy_addr); + rtl8168_stand_by(tp); + + rtl8168_shift_out_bits(tp, RTL_EEPROM_ERASE_OPCODE, 3); + rtl8168_shift_out_bits(tp, reg, addr_sz); + if (rtl8168_eeprom_cmd_done(tp) < 0) { + return; + } + rtl8168_stand_by(tp); + + rtl8168_shift_out_bits(tp, RTL_EEPROM_WRITE_OPCODE, 3); + rtl8168_shift_out_bits(tp, reg, addr_sz); + rtl8168_shift_out_bits(tp, data, 16); + if (rtl8168_eeprom_cmd_done(tp) < 0) { + return; + } + rtl8168_stand_by(tp); + + rtl8168_shift_out_bits(tp, RTL_EEPROM_EWDS_OPCODE, 5); + rtl8168_shift_out_bits(tp, reg, w_dummy_addr); + + rtl8168_eeprom_cleanup(tp); + RTL_W8(tp, Cfg9346, 0); +} + +void rtl8168_raise_clock(struct rtl8168_private *tp, u8 *x) +{ + *x = *x | Cfg9346_EESK; + RTL_W8(tp, Cfg9346, *x); + udelay(RTL_CLOCK_RATE); +} + +void rtl8168_lower_clock(struct rtl8168_private *tp, u8 *x) +{ + + *x = *x & ~Cfg9346_EESK; + RTL_W8(tp, Cfg9346, *x); + udelay(RTL_CLOCK_RATE); +} + +void rtl8168_shift_out_bits(struct rtl8168_private *tp, int data, int count) +{ + u8 x; + int mask; + + mask = 0x01 << (count - 1); + x = RTL_R8(tp, Cfg9346); + x &= ~(Cfg9346_EEDI | Cfg9346_EEDO); + + do { + if (data & mask) + x |= Cfg9346_EEDI; + else + x &= ~Cfg9346_EEDI; + + RTL_W8(tp, Cfg9346, x); + udelay(RTL_CLOCK_RATE); + rtl8168_raise_clock(tp, &x); + rtl8168_lower_clock(tp, &x); + mask = mask >> 1; + } while(mask); + + x &= ~Cfg9346_EEDI; + RTL_W8(tp, Cfg9346, x); +} + +u16 rtl8168_shift_in_bits(struct rtl8168_private *tp) +{ + u8 x; + u16 d, i; + + x = RTL_R8(tp, Cfg9346); + x &= ~(Cfg9346_EEDI | Cfg9346_EEDO); + + d = 0; + + for (i = 0; i < 16; i++) { + d = d << 1; + rtl8168_raise_clock(tp, &x); + + x = RTL_R8(tp, Cfg9346); + x &= ~Cfg9346_EEDI; + + if (x & Cfg9346_EEDO) + d |= 1; + + rtl8168_lower_clock(tp, &x); + } + + return d; +} + +void rtl8168_stand_by(struct rtl8168_private *tp) +{ + u8 x; + + x = RTL_R8(tp, Cfg9346); + x &= ~(Cfg9346_EECS | Cfg9346_EESK); + RTL_W8(tp, Cfg9346, x); + udelay(RTL_CLOCK_RATE); + + x |= Cfg9346_EECS; + RTL_W8(tp, Cfg9346, x); +} + +void rtl8168_set_eeprom_sel_low(struct rtl8168_private *tp) +{ + RTL_W8(tp, Cfg9346, Cfg9346_EEM1); + RTL_W8(tp, Cfg9346, Cfg9346_EEM1 | Cfg9346_EESK); + + udelay(20); + + RTL_W8(tp, Cfg9346, Cfg9346_EEM1); +} diff --git a/addons/r8168/src/4.4.180/rtl_eeprom.h b/addons/r8168/src/4.4.180/rtl_eeprom.h new file mode 100644 index 00000000..861defd0 --- /dev/null +++ b/addons/r8168/src/4.4.180/rtl_eeprom.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +//EEPROM opcodes +#define RTL_EEPROM_READ_OPCODE 06 +#define RTL_EEPROM_WRITE_OPCODE 05 +#define RTL_EEPROM_ERASE_OPCODE 07 +#define RTL_EEPROM_EWEN_OPCODE 19 +#define RTL_EEPROM_EWDS_OPCODE 16 + +#define RTL_CLOCK_RATE 3 + +void rtl8168_eeprom_type(struct rtl8168_private *tp); +void rtl8168_eeprom_cleanup(struct rtl8168_private *tp); +u16 rtl8168_eeprom_read_sc(struct rtl8168_private *tp, u16 reg); +void rtl8168_eeprom_write_sc(struct rtl8168_private *tp, u16 reg, u16 data); +void rtl8168_shift_out_bits(struct rtl8168_private *tp, int data, int count); +u16 rtl8168_shift_in_bits(struct rtl8168_private *tp); +void rtl8168_raise_clock(struct rtl8168_private *tp, u8 *x); +void rtl8168_lower_clock(struct rtl8168_private *tp, u8 *x); +void rtl8168_stand_by(struct rtl8168_private *tp); +void rtl8168_set_eeprom_sel_low(struct rtl8168_private *tp); + + + diff --git a/addons/r8168/src/4.4.180/rtltool.c b/addons/r8168/src/4.4.180/rtltool.c new file mode 100644 index 00000000..2a1be083 --- /dev/null +++ b/addons/r8168/src/4.4.180/rtltool.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "r8168.h" +#include "rtl_eeprom.h" +#include "rtltool.h" + +int rtl8168_tool_ioctl(struct rtl8168_private *tp, struct ifreq *ifr) +{ + struct rtltool_cmd my_cmd; + unsigned long flags; + int ret; + + if (copy_from_user(&my_cmd, ifr->ifr_data, sizeof(my_cmd))) + return -EFAULT; + + ret = 0; + switch (my_cmd.cmd) { + case RTLTOOL_READ_MAC: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (my_cmd.len==1) + my_cmd.data = readb(tp->mmio_addr+my_cmd.offset); + else if (my_cmd.len==2) + my_cmd.data = readw(tp->mmio_addr+(my_cmd.offset&~1)); + else if (my_cmd.len==4) + my_cmd.data = readl(tp->mmio_addr+(my_cmd.offset&~3)); + else { + ret = -EOPNOTSUPP; + break; + } + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + break; + + case RTLTOOL_WRITE_MAC: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (my_cmd.len==1) + writeb(my_cmd.data, tp->mmio_addr+my_cmd.offset); + else if (my_cmd.len==2) + writew(my_cmd.data, tp->mmio_addr+(my_cmd.offset&~1)); + else if (my_cmd.len==4) + writel(my_cmd.data, tp->mmio_addr+(my_cmd.offset&~3)); + else { + ret = -EOPNOTSUPP; + break; + } + + break; + + case RTLTOOL_READ_PHY: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + my_cmd.data = rtl8168_mdio_prot_read(tp, my_cmd.offset); + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + + break; + + case RTLTOOL_WRITE_PHY: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_mdio_prot_write(tp, my_cmd.offset, my_cmd.data); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + case RTLTOOL_READ_EPHY: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + my_cmd.data = rtl8168_ephy_read(tp, my_cmd.offset); + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + + break; + + case RTLTOOL_WRITE_EPHY: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_ephy_write(tp, my_cmd.offset, my_cmd.data); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + case RTLTOOL_READ_ERI: + my_cmd.data = 0; + if (my_cmd.len==1 || my_cmd.len==2 || my_cmd.len==4) { + spin_lock_irqsave(&tp->lock, flags); + my_cmd.data = rtl8168_eri_read(tp, my_cmd.offset, my_cmd.len, ERIAR_ExGMAC); + spin_unlock_irqrestore(&tp->lock, flags); + } else { + ret = -EOPNOTSUPP; + break; + } + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + + break; + + case RTLTOOL_WRITE_ERI: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (my_cmd.len==1 || my_cmd.len==2 || my_cmd.len==4) { + spin_lock_irqsave(&tp->lock, flags); + rtl8168_eri_write(tp, my_cmd.offset, my_cmd.len, my_cmd.data, ERIAR_ExGMAC); + spin_unlock_irqrestore(&tp->lock, flags); + } else { + ret = -EOPNOTSUPP; + break; + } + break; + + case RTLTOOL_READ_PCI: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + my_cmd.data = 0; + if (my_cmd.len==1) + pci_read_config_byte(tp->pci_dev, my_cmd.offset, + (u8 *)&my_cmd.data); + else if (my_cmd.len==2) + pci_read_config_word(tp->pci_dev, my_cmd.offset, + (u16 *)&my_cmd.data); + else if (my_cmd.len==4) + pci_read_config_dword(tp->pci_dev, my_cmd.offset, + &my_cmd.data); + else { + ret = -EOPNOTSUPP; + break; + } + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + break; + + case RTLTOOL_WRITE_PCI: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (my_cmd.len==1) + pci_write_config_byte(tp->pci_dev, my_cmd.offset, + my_cmd.data); + else if (my_cmd.len==2) + pci_write_config_word(tp->pci_dev, my_cmd.offset, + my_cmd.data); + else if (my_cmd.len==4) + pci_write_config_dword(tp->pci_dev, my_cmd.offset, + my_cmd.data); + else { + ret = -EOPNOTSUPP; + break; + } + + break; + + case RTLTOOL_READ_EEPROM: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + my_cmd.data = rtl8168_eeprom_read_sc(tp, my_cmd.offset); + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + + break; + + case RTLTOOL_WRITE_EEPROM: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_eeprom_write_sc(tp, my_cmd.offset, my_cmd.data); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + case RTL_READ_OOB_MAC: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_oob_mutex_lock(tp); + my_cmd.data = rtl8168_ocp_read(tp, my_cmd.offset, 4); + rtl8168_oob_mutex_unlock(tp); + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + break; + + case RTL_WRITE_OOB_MAC: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (my_cmd.len == 0 || my_cmd.len > 4) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_oob_mutex_lock(tp); + rtl8168_ocp_write(tp, my_cmd.offset, my_cmd.len, my_cmd.data); + rtl8168_oob_mutex_unlock(tp); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + case RTL_ENABLE_PCI_DIAG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + tp->rtk_enable_diag = 1; + spin_unlock_irqrestore(&tp->lock, flags); + + dprintk("enable rtk diag\n"); + break; + + case RTL_DISABLE_PCI_DIAG: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + tp->rtk_enable_diag = 0; + spin_unlock_irqrestore(&tp->lock, flags); + + dprintk("disable rtk diag\n"); + break; + + case RTL_READ_MAC_OCP: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (my_cmd.offset % 2) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + my_cmd.data = rtl8168_mac_ocp_read(tp, my_cmd.offset); + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + break; + + case RTL_WRITE_MAC_OCP: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if ((my_cmd.offset % 2) || (my_cmd.len != 2)) + return -EOPNOTSUPP; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_mac_ocp_write(tp, my_cmd.offset, (u16)my_cmd.data); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + case RTL_DIRECT_READ_PHY_OCP: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + my_cmd.data = rtl8168_mdio_prot_direct_read_phy_ocp(tp, my_cmd.offset); + spin_unlock_irqrestore(&tp->lock, flags); + + if (copy_to_user(ifr->ifr_data, &my_cmd, sizeof(my_cmd))) { + ret = -EFAULT; + break; + } + + break; + + case RTL_DIRECT_WRITE_PHY_OCP: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + spin_lock_irqsave(&tp->lock, flags); + rtl8168_mdio_prot_direct_write_phy_ocp(tp, my_cmd.offset, my_cmd.data); + spin_unlock_irqrestore(&tp->lock, flags); + break; + + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} diff --git a/addons/r8168/src/4.4.180/rtltool.h b/addons/r8168/src/4.4.180/rtltool.h new file mode 100644 index 00000000..d8731dff --- /dev/null +++ b/addons/r8168/src/4.4.180/rtltool.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* +################################################################################ +# +# r8168 is the Linux device driver released for Realtek Gigabit Ethernet +# controllers with PCI-Express interface. +# +# Copyright(c) 2021 Realtek Semiconductor Corp. All rights reserved. +# +# This program is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation; either version 2 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program; if not, see . +# +# Author: +# Realtek NIC software team +# No. 2, Innovation Road II, Hsinchu Science Park, Hsinchu 300, Taiwan +# +################################################################################ +*/ + +/************************************************************************************ + * This product is covered by one or more of the following patents: + * US6,570,884, US6,115,776, and US6,327,625. + ***********************************************************************************/ + +#ifndef _LINUX_RTLTOOL_H +#define _LINUX_RTLTOOL_H + +#define SIOCRTLTOOL SIOCDEVPRIVATE+1 + +enum rtl_cmd { + RTLTOOL_READ_MAC=0, + RTLTOOL_WRITE_MAC, + RTLTOOL_READ_PHY, + RTLTOOL_WRITE_PHY, + RTLTOOL_READ_EPHY, + RTLTOOL_WRITE_EPHY, + RTLTOOL_READ_ERI, + RTLTOOL_WRITE_ERI, + RTLTOOL_READ_PCI, + RTLTOOL_WRITE_PCI, + RTLTOOL_READ_EEPROM, + RTLTOOL_WRITE_EEPROM, + + RTL_READ_OOB_MAC, + RTL_WRITE_OOB_MAC, + + RTL_ENABLE_PCI_DIAG, + RTL_DISABLE_PCI_DIAG, + + RTL_READ_MAC_OCP, + RTL_WRITE_MAC_OCP, + + RTL_DIRECT_READ_PHY_OCP, + RTL_DIRECT_WRITE_PHY_OCP, + + RTLTOOL_INVALID +}; + +struct rtltool_cmd { + __u32 cmd; + __u32 offset; + __u32 len; + __u32 data; +}; + +enum mode_access { + MODE_NONE=0, + MODE_READ, + MODE_WRITE +}; + +#ifdef __KERNEL__ +int rtl8168_tool_ioctl(struct rtl8168_private *tp, struct ifreq *ifr); +#endif + +#endif /* _LINUX_RTLTOOL_H */ diff --git a/addons/r8169/install.sh b/addons/r8169/install.sh new file mode 100644 index 00000000..d1e3f082 --- /dev/null +++ b/addons/r8169/install.sh @@ -0,0 +1,5 @@ +if [ "${1}" = "rd" ]; then + echo "Installing module for Realtek R8169 Ethernet adapter" + ${INSMOD} "/modules/mii.ko" + ${INSMOD} "/modules/r8169.ko" ${PARAMS} +fi diff --git a/addons/r8169/manifest.yml b/addons/r8169/manifest.yml new file mode 100644 index 00000000..d09dc879 --- /dev/null +++ b/addons/r8169/manifest.yml @@ -0,0 +1,30 @@ +version: 1 +name: r8169 +description: "Driver for Realtek R8169 Ethernet adapters" +conflits: + - r8168 +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/addons/r8169/src/3.10.108/Makefile b/addons/r8169/src/3.10.108/Makefile new file mode 100644 index 00000000..340f8cbe --- /dev/null +++ b/addons/r8169/src/3.10.108/Makefile @@ -0,0 +1,2 @@ +obj-m += r8169.o +obj-m += mii.o diff --git a/addons/r8169/src/3.10.108/mii.c b/addons/r8169/src/3.10.108/mii.c new file mode 100644 index 00000000..4a99c391 --- /dev/null +++ b/addons/r8169/src/3.10.108/mii.c @@ -0,0 +1,472 @@ +/* + + mii.c: MII interface library + + Maintained by Jeff Garzik + Copyright 2001,2002 Jeff Garzik + + Various code came from myson803.c and other files by + Donald Becker. Copyright: + + Written 1998-2002 by Donald Becker. + + This software may be used and distributed according + to the terms of the GNU General Public License (GPL), + incorporated herein by reference. Drivers based on + or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. + This file is not a complete program and may only be + used when the entire operating system is licensed + under the GPL. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + + */ + +#include +#include +#include +#include +#include + +static u32 mii_get_an(struct mii_if_info *mii, u16 addr) +{ + int advert; + + advert = mii->mdio_read(mii->dev, mii->phy_id, addr); + + return mii_lpa_to_ethtool_lpa_t(advert); +} + +/** + * mii_ethtool_gset - get settings that are specified in @ecmd + * @mii: MII interface + * @ecmd: requested ethtool_cmd + * + * The @ecmd parameter is expected to have been cleared before calling + * mii_ethtool_gset(). + * + * Returns 0 for success, negative on error. + */ +int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) +{ + struct net_device *dev = mii->dev; + u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; + u32 nego; + + ecmd->supported = + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); + if (mii->supports_gmii) + ecmd->supported |= SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + + /* only supports twisted-pair */ + ecmd->port = PORT_MII; + + /* only supports internal transceiver */ + ecmd->transceiver = XCVR_INTERNAL; + + /* this isn't fully supported at higher layers */ + ecmd->phy_address = mii->phy_id; + ecmd->mdio_support = ETH_MDIO_SUPPORTS_C22; + + ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; + + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR); + if (mii->supports_gmii) { + ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); + stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); + } + if (bmcr & BMCR_ANENABLE) { + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + + ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE); + if (mii->supports_gmii) + ecmd->advertising |= + mii_ctrl1000_to_ethtool_adv_t(ctrl1000); + + if (bmsr & BMSR_ANEGCOMPLETE) { + ecmd->lp_advertising = mii_get_an(mii, MII_LPA); + ecmd->lp_advertising |= + mii_stat1000_to_ethtool_lpa_t(stat1000); + } else { + ecmd->lp_advertising = 0; + } + + nego = ecmd->advertising & ecmd->lp_advertising; + + if (nego & (ADVERTISED_1000baseT_Full | + ADVERTISED_1000baseT_Half)) { + ethtool_cmd_speed_set(ecmd, SPEED_1000); + ecmd->duplex = !!(nego & ADVERTISED_1000baseT_Full); + } else if (nego & (ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half)) { + ethtool_cmd_speed_set(ecmd, SPEED_100); + ecmd->duplex = !!(nego & ADVERTISED_100baseT_Full); + } else { + ethtool_cmd_speed_set(ecmd, SPEED_10); + ecmd->duplex = !!(nego & ADVERTISED_10baseT_Full); + } + } else { + ecmd->autoneg = AUTONEG_DISABLE; + + ethtool_cmd_speed_set(ecmd, + ((bmcr & BMCR_SPEED1000 && + (bmcr & BMCR_SPEED100) == 0) ? + SPEED_1000 : + ((bmcr & BMCR_SPEED100) ? + SPEED_100 : SPEED_10))); + ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + } + + mii->full_duplex = ecmd->duplex; + + /* ignore maxtxpkt, maxrxpkt for now */ + + return 0; +} + +/** + * mii_ethtool_sset - set settings that are specified in @ecmd + * @mii: MII interface + * @ecmd: requested ethtool_cmd + * + * Returns 0 for success, negative on error. + */ +int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) +{ + struct net_device *dev = mii->dev; + u32 speed = ethtool_cmd_speed(ecmd); + + if (speed != SPEED_10 && + speed != SPEED_100 && + speed != SPEED_1000) + return -EINVAL; + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + if (ecmd->port != PORT_MII) + return -EINVAL; + if (ecmd->transceiver != XCVR_INTERNAL) + return -EINVAL; + if (ecmd->phy_address != mii->phy_id) + return -EINVAL; + if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + if ((speed == SPEED_1000) && (!mii->supports_gmii)) + return -EINVAL; + + /* ignore supported, maxtxpkt, maxrxpkt */ + + if (ecmd->autoneg == AUTONEG_ENABLE) { + u32 bmcr, advert, tmp; + u32 advert2 = 0, tmp2 = 0; + + if ((ecmd->advertising & (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) == 0) + return -EINVAL; + + /* advertise only what has been requested */ + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); + tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + if (mii->supports_gmii) { + advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); + tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); + } + tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising); + + if (mii->supports_gmii) + tmp2 |= + ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising); + if (advert != tmp) { + mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); + mii->advertising = tmp; + } + if ((mii->supports_gmii) && (advert2 != tmp2)) + mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2); + + /* turn on autonegotiation, and force a renegotiate */ + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); + + mii->force_media = 0; + } else { + u32 bmcr, tmp; + + /* turn off auto negotiation, set speed and duplexity */ + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | + BMCR_SPEED1000 | BMCR_FULLDPLX); + if (speed == SPEED_1000) + tmp |= BMCR_SPEED1000; + else if (speed == SPEED_100) + tmp |= BMCR_SPEED100; + if (ecmd->duplex == DUPLEX_FULL) { + tmp |= BMCR_FULLDPLX; + mii->full_duplex = 1; + } else + mii->full_duplex = 0; + if (bmcr != tmp) + mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); + + mii->force_media = 1; + } + return 0; +} + +/** + * mii_check_gmii_support - check if the MII supports Gb interfaces + * @mii: the MII interface + */ +int mii_check_gmii_support(struct mii_if_info *mii) +{ + int reg; + + reg = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); + if (reg & BMSR_ESTATEN) { + reg = mii->mdio_read(mii->dev, mii->phy_id, MII_ESTATUS); + if (reg & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) + return 1; + } + + return 0; +} + +/** + * mii_link_ok - is link status up/ok + * @mii: the MII interface + * + * Returns 1 if the MII reports link status up/ok, 0 otherwise. + */ +int mii_link_ok (struct mii_if_info *mii) +{ + /* first, a dummy read, needed to latch some MII phys */ + mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); + if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) + return 1; + return 0; +} + +/** + * mii_nway_restart - restart NWay (autonegotiation) for this interface + * @mii: the MII interface + * + * Returns 0 on success, negative on error. + */ +int mii_nway_restart (struct mii_if_info *mii) +{ + int bmcr; + int r = -EINVAL; + + /* if autoneg is off, it's an error */ + bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); + + if (bmcr & BMCR_ANENABLE) { + bmcr |= BMCR_ANRESTART; + mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); + r = 0; + } + + return r; +} + +/** + * mii_check_link - check MII link status + * @mii: MII interface + * + * If the link status changed (previous != current), call + * netif_carrier_on() if current link status is Up or call + * netif_carrier_off() if current link status is Down. + */ +void mii_check_link (struct mii_if_info *mii) +{ + int cur_link = mii_link_ok(mii); + int prev_link = netif_carrier_ok(mii->dev); + + if (cur_link && !prev_link) + netif_carrier_on(mii->dev); + else if (prev_link && !cur_link) + netif_carrier_off(mii->dev); +} + +/** + * mii_check_media - check the MII interface for a duplex change + * @mii: the MII interface + * @ok_to_print: OK to print link up/down messages + * @init_media: OK to save duplex mode in @mii + * + * Returns 1 if the duplex mode changed, 0 if not. + * If the media type is forced, always returns 0. + */ +unsigned int mii_check_media (struct mii_if_info *mii, + unsigned int ok_to_print, + unsigned int init_media) +{ + unsigned int old_carrier, new_carrier; + int advertise, lpa, media, duplex; + int lpa2 = 0; + + /* if forced media, go no further */ + if (mii->force_media) + return 0; /* duplex did not change */ + + /* check current and old link status */ + old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0; + new_carrier = (unsigned int) mii_link_ok(mii); + + /* if carrier state did not change, this is a "bounce", + * just exit as everything is already set correctly + */ + if ((!init_media) && (old_carrier == new_carrier)) + return 0; /* duplex did not change */ + + /* no carrier, nothing much to do */ + if (!new_carrier) { + netif_carrier_off(mii->dev); + if (ok_to_print) + netdev_info(mii->dev, "link down\n"); + return 0; /* duplex did not change */ + } + + /* + * we have carrier, see who's on the other end + */ + netif_carrier_on(mii->dev); + + /* get MII advertise and LPA values */ + if ((!init_media) && (mii->advertising)) + advertise = mii->advertising; + else { + advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); + mii->advertising = advertise; + } + lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); + if (mii->supports_gmii) + lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000); + + /* figure out media and duplex from advertise and LPA values */ + media = mii_nway_result(lpa & advertise); + duplex = (media & ADVERTISE_FULL) ? 1 : 0; + if (lpa2 & LPA_1000FULL) + duplex = 1; + + if (ok_to_print) + netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n", + lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 : + media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? + 100 : 10, + duplex ? "full" : "half", + lpa); + + if ((init_media) || (mii->full_duplex != duplex)) { + mii->full_duplex = duplex; + return 1; /* duplex changed */ + } + + return 0; /* duplex did not change */ +} + +/** + * generic_mii_ioctl - main MII ioctl interface + * @mii_if: the MII interface + * @mii_data: MII ioctl data structure + * @cmd: MII ioctl command + * @duplex_chg_out: pointer to @duplex_changed status if there was no + * ioctl error + * + * Returns 0 on success, negative on error. + */ +int generic_mii_ioctl(struct mii_if_info *mii_if, + struct mii_ioctl_data *mii_data, int cmd, + unsigned int *duplex_chg_out) +{ + int rc = 0; + unsigned int duplex_changed = 0; + + if (duplex_chg_out) + *duplex_chg_out = 0; + + mii_data->phy_id &= mii_if->phy_id_mask; + mii_data->reg_num &= mii_if->reg_num_mask; + + switch(cmd) { + case SIOCGMIIPHY: + mii_data->phy_id = mii_if->phy_id; + /* fall through */ + + case SIOCGMIIREG: + mii_data->val_out = + mii_if->mdio_read(mii_if->dev, mii_data->phy_id, + mii_data->reg_num); + break; + + case SIOCSMIIREG: { + u16 val = mii_data->val_in; + + if (mii_data->phy_id == mii_if->phy_id) { + switch(mii_data->reg_num) { + case MII_BMCR: { + unsigned int new_duplex = 0; + if (val & (BMCR_RESET|BMCR_ANENABLE)) + mii_if->force_media = 0; + else + mii_if->force_media = 1; + if (mii_if->force_media && + (val & BMCR_FULLDPLX)) + new_duplex = 1; + if (mii_if->full_duplex != new_duplex) { + duplex_changed = 1; + mii_if->full_duplex = new_duplex; + } + break; + } + case MII_ADVERTISE: + mii_if->advertising = val; + break; + default: + /* do nothing */ + break; + } + } + + mii_if->mdio_write(mii_if->dev, mii_data->phy_id, + mii_data->reg_num, val); + break; + } + + default: + rc = -EOPNOTSUPP; + break; + } + + if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) + *duplex_chg_out = 1; + + return rc; +} + +MODULE_AUTHOR ("Jeff Garzik "); +MODULE_DESCRIPTION ("MII hardware support library"); +MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL(mii_link_ok); +EXPORT_SYMBOL(mii_nway_restart); +EXPORT_SYMBOL(mii_ethtool_gset); +EXPORT_SYMBOL(mii_ethtool_sset); +EXPORT_SYMBOL(mii_check_link); +EXPORT_SYMBOL(mii_check_media); +EXPORT_SYMBOL(mii_check_gmii_support); +EXPORT_SYMBOL(generic_mii_ioctl); + diff --git a/addons/r8169/src/3.10.108/r8169.c b/addons/r8169/src/3.10.108/r8169.c new file mode 100644 index 00000000..2183c618 --- /dev/null +++ b/addons/r8169/src/3.10.108/r8169.c @@ -0,0 +1,7150 @@ +/* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define RTL8169_VERSION "2.3LK-NAPI" +#define MODULENAME "r8169" +#define PFX MODULENAME ": " + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" +#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" +#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" + +#ifdef RTL8169_DEBUG +#define assert(expr) \ + if (!(expr)) { \ + printk( "Assertion failed! %s,%s,%s,line=%d\n", \ + #expr,__FILE__,__func__,__LINE__); \ + } +#define dprintk(fmt, args...) \ + do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) +#else +#define assert(expr) do {} while (0) +#define dprintk(fmt, args...) do {} while (0) +#endif /* RTL8169_DEBUG */ + +#define R8169_MSG_DEFAULT \ + (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) + +#define TX_SLOTS_AVAIL(tp) \ + (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx) + +/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ +#define TX_FRAGS_READY_FOR(tp,nr_frags) \ + (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1)) + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +static const int multicast_filter_limit = 32; + +#define MAX_READ_REQUEST_SHIFT 12 +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +#define R8169_REGS_SIZE 256 +#define R8169_NAPI_WEIGHT 64 +#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */ +#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define RTL8169_TX_TIMEOUT (6*HZ) +#define RTL8169_PHY_TIMEOUT (10*HZ) + +/* write/read MMIO register */ +#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) +#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) +#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) +#define RTL_R8(reg) readb (ioaddr + (reg)) +#define RTL_R16(reg) readw (ioaddr + (reg)) +#define RTL_R32(reg) readl (ioaddr + (reg)) + +enum mac_version { + RTL_GIGA_MAC_VER_01 = 0, + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + RTL_GIGA_MAC_VER_12, + RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, + RTL_GIGA_MAC_VER_15, + RTL_GIGA_MAC_VER_16, + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + RTL_GIGA_MAC_VER_27, + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, + RTL_GIGA_MAC_VER_39, + RTL_GIGA_MAC_VER_40, + RTL_GIGA_MAC_VER_41, + RTL_GIGA_MAC_VER_42, + RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_NONE = 0xff, +}; + +enum rtl_tx_desc_version { + RTL_TD_0 = 0, + RTL_TD_1 = 1, +}; + +#define JUMBO_1K ETH_DATA_LEN +#define JUMBO_4K (4*1024 - ETH_HLEN - 2) +#define JUMBO_6K (6*1024 - ETH_HLEN - 2) +#define JUMBO_7K (7*1024 - ETH_HLEN - 2) +#define JUMBO_9K (9*1024 - ETH_HLEN - 2) + +#define _R(NAME,TD,FW,SZ,B) { \ + .name = NAME, \ + .txd_version = TD, \ + .fw_name = FW, \ + .jumbo_max = SZ, \ + .jumbo_tx_csum = B \ +} + +static const struct { + const char *name; + enum rtl_tx_desc_version txd_version; + const char *fw_name; + u16 jumbo_max; + bool jumbo_tx_csum; +} rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_01] = + _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_02] = + _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_03] = + _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_04] = + _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_05] = + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_06] = + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_08] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_09] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_10] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_11] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_12] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_13] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_14] = + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_15] = + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_16] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_17] = + _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_18] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_19] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_20] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_21] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_22] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_23] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_24] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_25] = + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_26] = + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_27] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_28] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_29] = + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_30] = + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_31] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_32] = + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_33] = + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_34] = + _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_35] = + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_36] = + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_37] = + _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_38] = + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_39] = + _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_40] = + _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_41] = + _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_42] = + _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_43] = + _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, + JUMBO_1K, true), +}; +#undef _R + +enum cfg_version { + RTL_CFG_0 = 0x00, + RTL_CFG_1, + RTL_CFG_2 +}; + +static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = { + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, + PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, + { 0x0001, 0x8168, + PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 }, + {0,}, +}; + +MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); + +static int rx_buf_sz = 16383; +static int use_dac; +static struct { + u32 msg_enable; +} debug = { -1 }; + +enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, +#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ +#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, +#define RX128_INT_EN (1 << 15) /* 8111c and later */ +#define RX_MULTI_EN (1 << 14) /* 8111c only */ +#define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ +#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RX_EARLY_OFF (1 << 11) +#define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ +#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + RxMissed = 0x4c, + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + MultiIntr = 0x5c, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + +#define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + +#define TxPacketMax (8064 >> 7) +#define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + FuncForceEvent = 0xfc, +}; + +enum rtl8110_registers { + TBICSR = 0x64, + TBI_ANAR = 0x68, + TBI_LPAR = 0x6a, +}; + +enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, +#define CSIAR_FLAG 0x80000000 +#define CSIAR_WRITE_CMD 0x80000000 +#define CSIAR_BYTE_ENABLE 0x0f +#define CSIAR_BYTE_ENABLE_SHIFT 12 +#define CSIAR_ADDR_MASK 0x0fff +#define CSIAR_FUNC_CARD 0x00000000 +#define CSIAR_FUNC_SDIO 0x00010000 +#define CSIAR_FUNC_NIC 0x00020000 + PMCH = 0x6f, + EPHYAR = 0x80, +#define EPHYAR_FLAG 0x80000000 +#define EPHYAR_WRITE_CMD 0x80000000 +#define EPHYAR_REG_MASK 0x1f +#define EPHYAR_REG_SHIFT 16 +#define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PFM_EN (1 << 6) + DBG_REG = 0xd1, +#define FIX_NAK_1 (1 << 4) +#define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define NOW_IS_OOB (1 << 7) +#define TX_EMPTY (1 << 5) +#define RX_EMPTY (1 << 4) +#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) +#define LINK_LIST_RDY (1 << 1) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff +}; + +enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, +#define ERIAR_FLAG 0x80000000 +#define ERIAR_WRITE_CMD 0x80000000 +#define ERIAR_READ_CMD 0x00000000 +#define ERIAR_ADDR_BYTE_ALIGN 4 +#define ERIAR_TYPE_SHIFT 16 +#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) +#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) +#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_MASK_SHIFT 12 +#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ +#define OCPDR_WRITE_CMD 0x80000000 +#define OCPDR_READ_CMD 0x00000000 +#define OCPDR_REG_MASK 0x7f +#define OCPDR_GPHY_REG_SHIFT 16 +#define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, +#define OCPAR_FLAG 0x80000000 +#define OCPAR_GPHY_WRITE_CMD 0x8000f060 +#define OCPAR_GPHY_READ_CMD 0x0000f060 + GPHY_OCP = 0xb8, + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ +#define TXPLA_RST (1 << 29) +#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ +#define PWM_EN (1 << 22) +#define RXDV_GATED_EN (1 << 19) +#define EARLY_TALLY_EN (1 << 16) +}; + +enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxBOVF = (1 << 24), + RxFOVF = (1 << 23), + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +#define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* TBICSR p.28 */ + TBIReset = 0x80000000, + TBILoopback = 0x40000000, + TBINwEnable = 0x20000000, + TBINwRestart = 0x10000000, + TBILinkOk = 0x02000000, + TBINwComplete = 0x01000000, + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), + INTT_0 = 0x0000, // 8168 + INTT_1 = 0x0001, // 8168 + INTT_2 = 0x0002, // 8168 + INTT_3 = 0x0003, // 8168 + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* _TBICSRBit */ + TBILinkOK = 0x02000000, + + /* DumpCounterCommand */ + CounterDump = 0x8, +}; + +enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ +}; + +/* Generic case. */ +enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ +#define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ +}; + +/* 8169, 8168b and 810x except 8102e. */ +enum rtl_tx_desc_bit_0 { + /* First doubleword. */ +#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ +}; + +/* 8102e, 8168c and beyond. */ +enum rtl_tx_desc_bit_1 { + /* Second doubleword. */ +#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IP_CS = (1 << 29), /* Calculate IP checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ +}; + +static const struct rtl_tx_desc_info { + struct { + u32 udp; + u32 tcp; + } checksum; + u16 mss_shift; + u16 opts_offset; +} tx_desc_info [] = { + [RTL_TD_0] = { + .checksum = { + .udp = TD0_IP_CS | TD0_UDP_CS, + .tcp = TD0_IP_CS | TD0_TCP_CS + }, + .mss_shift = TD0_MSS_SHIFT, + .opts_offset = 0 + }, + [RTL_TD_1] = { + .checksum = { + .udp = TD1_IP_CS | TD1_UDP_CS, + .tcp = TD1_IP_CS | TD1_TCP_CS + }, + .mss_shift = TD1_MSS_SHIFT, + .opts_offset = 1 + } +}; + +enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 2/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + RxVlanTag = (1 << 16), /* VLAN tag available */ +}; + +#define RsvdMask 0x3fffc000 + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; + u8 __pad[sizeof(void *) - sizeof(u32)]; +}; + +enum features { + RTL_FEATURE_WOL = (1 << 0), + RTL_FEATURE_MSI = (1 << 1), + RTL_FEATURE_GMII = (1 << 2), +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; +}; + +enum rtl_flag { + RTL_FLAG_TASK_ENABLED, + RTL_FLAG_TASK_SLOW_PENDING, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_TASK_PHY_PENDING, + RTL_FLAG_MAX +}; + +struct rtl8169_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct napi_struct napi; + u32 msg_enable; + u16 txd_version; + u16 mac_version; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_tx; + struct rtl8169_stats rx_stats; + struct rtl8169_stats tx_stats; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + struct timer_list timer; + u16 cp_cmd; + + u16 event_slow; + + struct mdio_ops { + void (*write)(struct rtl8169_private *, int, int); + int (*read)(struct rtl8169_private *, int); + } mdio_ops; + + struct pll_power_ops { + void (*down)(struct rtl8169_private *); + void (*up)(struct rtl8169_private *); + } pll_power_ops; + + struct jumbo_ops { + void (*enable)(struct rtl8169_private *); + void (*disable)(struct rtl8169_private *); + } jumbo_ops; + + struct csi_ops { + void (*write)(struct rtl8169_private *, int, int); + u32 (*read)(struct rtl8169_private *, int); + } csi_ops; + + int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + void (*phy_reset_enable)(struct rtl8169_private *tp); + void (*hw_start)(struct net_device *); + unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); + unsigned int (*link_ok)(void __iomem *); + int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct mutex mutex; + struct work_struct work; + } wk; + + unsigned features; + + struct mii_if_info mii; + struct rtl8169_counters counters; + u32 saved_wolopts; + u32 opts1_mask; + + struct rtl_fw { + const struct firmware *fw; + +#define RTL_VER_SIZE 32 + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; + } *rtl_fw; +#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN) + + u32 ocp_base; +}; + +MODULE_AUTHOR("Realtek and the Linux r8169 crew "); +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); +module_param(use_dac, int, 0); +MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); +module_param_named(debug, debug.msg_enable, int, 0); +MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(RTL8169_VERSION); +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8105E_1); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8402_1); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8106E_1); +MODULE_FIRMWARE(FIRMWARE_8106E_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); + +static void rtl_lock_work(struct rtl8169_private *tp) +{ + mutex_lock(&tp->wk.mutex); +} + +static void rtl_unlock_work(struct rtl8169_private *tp) +{ + mutex_unlock(&tp->wk.mutex); +} + +static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) +{ + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, force); +} + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +static void rtl_udelay(unsigned int d) +{ + udelay(d); +} + +static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, + void (*delay)(unsigned int), unsigned int d, int n, + bool high) +{ + int i; + + for (i = 0; i < n; i++) { + delay(d); + if (c->check(tp) == high) + return true; + } + netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n", + c->msg, !high, n, d); + return false; +} + +static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, rtl_udelay, d, n, true); +} + +static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, rtl_udelay, d, n, false); +} + +static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, msleep, d, n, true); +} + +static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, msleep, d, n, false); +} + +#define DECLARE_RTL_COND(name) \ +static bool name ## _check(struct rtl8169_private *); \ + \ +static const struct rtl_cond name = { \ + .check = name ## _check, \ + .msg = #name \ +}; \ + \ +static bool name ## _check(struct rtl8169_private *tp) + +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(OCPAR) & OCPAR_FLAG; +} + +static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + + return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(OCPDR) : ~0; +} + +static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(OCPDR, data); + RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + + rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +DECLARE_RTL_COND(rtl_eriar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(ERIAR) & ERIAR_FLAG; +} + +static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(ERIDR, cmd); + RTL_W32(ERIAR, 0x800010e8); + msleep(2); + + if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5)) + return; + + ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +DECLARE_RTL_COND(rtl_ocp_read_cond) +{ + u16 reg; + + reg = rtl8168_get_ocp_reg(tp); + + return ocp_read(tp, 0x0f, reg) & 0x00000800; +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); + + rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10); +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); + + rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10); +} + +static int r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0; +} + +static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg) +{ + if (reg & 0xffff0001) { + netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg); + return true; + } + return false; +} + +DECLARE_RTL_COND(rtl_ocp_gphy_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(GPHY_OCP) & OCPAR_FLAG; +} + +static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + + rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); +} + +static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(GPHY_OCP, reg << 15); + + return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? + (RTL_R32(GPHY_OCP) & 0xffff) : ~0; +} + +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data); +} + +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(OCPDR, reg << 15); + + return RTL_R32(OCPDR); +} + +#define OCP_STD_PHY_BASE 0xa400 + +static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; + return; + } + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); +} + +static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) +{ + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); +} + +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static int mac_mcu_read(struct rtl8169_private *tp, int reg) +{ + return r8168_mac_ocp_read(tp, tp->ocp_base + reg); +} + +DECLARE_RTL_COND(rtl_phyar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(PHYAR) & 0x80000000; +} + +static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + + rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); +} + +static int r8169_mdio_read(struct rtl8169_private *tp, int reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + int value; + + RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16); + + value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? + RTL_R32(PHYAR) & 0xffff : ~0; + + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; +} + +static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(EPHY_RXER_NUM, 0); + + rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); +} + +static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_1_mdio_access(tp, reg, + OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); +} + +static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); + + mdelay(1); + RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(EPHY_RXER_NUM, 0); + + return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? + RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0; +} + +#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + +static void r8168dp_2_mdio_start(void __iomem *ioaddr) +{ + RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_stop(void __iomem *ioaddr) +{ + RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168dp_2_mdio_start(ioaddr); + + r8169_mdio_write(tp, reg, value); + + r8168dp_2_mdio_stop(ioaddr); +} + +static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + int value; + + r8168dp_2_mdio_start(ioaddr); + + value = r8169_mdio_read(tp, reg); + + r8168dp_2_mdio_stop(ioaddr); + + return value; +} + +static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val) +{ + tp->mdio_ops.write(tp, location, val); +} + +static int rtl_readphy(struct rtl8169_private *tp, int location) +{ + return tp->mdio_ops.read(tp, location); +} + +static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value) +{ + rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value); +} + +static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) +{ + int val; + + val = rtl_readphy(tp, reg_addr); + rtl_writephy(tp, reg_addr, (val | p) & ~m); +} + +static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, + int val) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_writephy(tp, location, val); +} + +static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + return rtl_readphy(tp, location); +} + +DECLARE_RTL_COND(rtl_ephyar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(EPHYAR) & EPHYAR_FLAG; +} + +static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); + + udelay(10); +} + +static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? + RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0; +} + +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) +{ + void __iomem *ioaddr = tp->mmio_addr; + + BUG_ON((addr & 3) || (mask == 0)); + RTL_W32(ERIDR, val); + RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr); + + rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); +} + +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); + + return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? + RTL_R32(ERIDR) : ~0; +} + +static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, + u32 m, int type) +{ + u32 val; + + val = rtl_eri_read(tp, addr, type); + rtl_eri_write(tp, addr, mask, (val & ~m) | p, type); +} + +struct exgmac_reg { + u16 addr; + u16 mask; + u32 val; +}; + +static void rtl_write_exgmac_batch(struct rtl8169_private *tp, + const struct exgmac_reg *r, int len) +{ + while (len-- > 0) { + rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC); + r++; + } +} + +DECLARE_RTL_COND(rtl_efusear_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(EFUSEAR) & EFUSEAR_FLAG; +} + +static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? + RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0; +} + +static u16 rtl_get_events(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R16(IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrStatus, bits); + mmiowb(); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrMask, 0); + mmiowb(); +} + +static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrMask, bits); +} + +#define RTL_EVENT_NAPI_RX (RxOK | RxErr) +#define RTL_EVENT_NAPI_TX (TxOK | TxErr) +#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX) + +static void rtl_irq_enable_all(struct rtl8169_private *tp) +{ + rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow); +} + +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + rtl_irq_disable(tp); + rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow); + RTL_R8(ChipCmd); +} + +static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(TBICSR) & TBIReset; +} + +static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) +{ + return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; +} + +static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr) +{ + return RTL_R32(TBICSR) & TBILinkOk; +} + +static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr) +{ + return RTL_R8(PHYstatus) & LinkStatus; +} + +static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset); +} + +static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) +{ + unsigned int val; + + val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET; + rtl_writephy(tp, MII_BMCR, val & 0xffff); +} + +static void rtl_link_chg_patch(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct net_device *dev = tp->dev; + + if (!netif_running(dev)) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { + if (RTL_R8(PHYstatus) & _1000bpsF) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); + } else if (RTL_R8(PHYstatus) & _100bps) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, + ERIAR_EXGMAC); + } + /* Reset packet filter */ + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, + ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, + ERIAR_EXGMAC); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (RTL_R8(PHYstatus) & _1000bpsF) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, + ERIAR_EXGMAC); + } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (RTL_R8(PHYstatus) & _10bps) { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, + ERIAR_EXGMAC); + } else { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, + ERIAR_EXGMAC); + } + } +} + +static void __rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr, bool pm) +{ + if (tp->link_ok(ioaddr)) { + rtl_link_chg_patch(tp); + /* This is to cancel a scheduled suspend if there's one. */ + if (pm) + pm_request_resume(&tp->pci_dev->dev); + netif_carrier_on(dev); + if (net_ratelimit()) + netif_info(tp, ifup, dev, "link up\n"); + } else { + netif_carrier_off(dev); + netif_info(tp, ifdown, dev, "link down\n"); + if (pm) + pm_schedule_suspend(&tp->pci_dev->dev, 5000); + } +} + +static void rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr) +{ + __rtl8169_check_link_status(dev, tp, ioaddr, false); +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static u32 __rtl8169_get_wol(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u8 options; + u32 wolopts = 0; + + options = RTL_R8(Config1); + if (!(options & PMEnable)) + return 0; + + options = RTL_R8(Config3); + if (options & LinkUp) + wolopts |= WAKE_PHY; + if (options & MagicPacket) + wolopts |= WAKE_MAGIC; + + options = RTL_R8(Config5); + if (options & UWF) + wolopts |= WAKE_UCAST; + if (options & BWF) + wolopts |= WAKE_BCAST; + if (options & MWF) + wolopts |= WAKE_MCAST; + + return wolopts; +} + +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_lock_work(tp); + + wol->supported = WAKE_ANY; + wol->wolopts = __rtl8169_get_wol(tp); + + rtl_unlock_work(tp); +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ + void __iomem *ioaddr = tp->mmio_addr; + unsigned int i; + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_MAGIC, Config3, MagicPacket }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake } + }; + u8 options; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + for (i = 0; i < ARRAY_SIZE(cfg); i++) { + options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(cfg[i].reg, options); + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17: + options = RTL_R8(Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(Config1, options); + break; + default: + options = RTL_R8(Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(Config2, options); + break; + } + + RTL_W8(Cfg9346, Cfg9346_Lock); +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_lock_work(tp); + + if (wol->wolopts) + tp->features |= RTL_FEATURE_WOL; + else + tp->features &= ~RTL_FEATURE_WOL; + __rtl8169_set_wol(tp, wol->wolopts); + + rtl_unlock_work(tp); + + device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); + + return 0; +} + +static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp) +{ + return rtl_chip_infos[tp->mac_version].fw_name; +} + +static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strlcpy(info->driver, MODULENAME, sizeof(info->driver)); + strlcpy(info->version, RTL8169_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (!IS_ERR_OR_NULL(rtl_fw)) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int rtl8169_get_regs_len(struct net_device *dev) +{ + return R8169_REGS_SIZE; +} + +static int rtl8169_set_speed_tbi(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 ignored) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + int ret = 0; + u32 reg; + + reg = RTL_R32(TBICSR); + if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && + (duplex == DUPLEX_FULL)) { + RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart)); + } else if (autoneg == AUTONEG_ENABLE) + RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); + else { + netif_warn(tp, link, dev, + "incorrect speed setting refused in TBI mode\n"); + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int rtl8169_set_speed_xmii(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 adv) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int giga_ctrl, bmcr; + int rc = -EINVAL; + + rtl_writephy(tp, 0x1f, 0x0000); + + if (autoneg == AUTONEG_ENABLE) { + int auto_nego; + + auto_nego = rtl_readphy(tp, MII_ADVERTISE); + auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); + + if (adv & ADVERTISED_10baseT_Half) + auto_nego |= ADVERTISE_10HALF; + if (adv & ADVERTISED_10baseT_Full) + auto_nego |= ADVERTISE_10FULL; + if (adv & ADVERTISED_100baseT_Half) + auto_nego |= ADVERTISE_100HALF; + if (adv & ADVERTISED_100baseT_Full) + auto_nego |= ADVERTISE_100FULL; + + auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + + giga_ctrl = rtl_readphy(tp, MII_CTRL1000); + giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); + + /* The 8100e/8101e/8102e do Fast Ethernet only. */ + if (tp->mii.supports_gmii) { + if (adv & ADVERTISED_1000baseT_Half) + giga_ctrl |= ADVERTISE_1000HALF; + if (adv & ADVERTISED_1000baseT_Full) + giga_ctrl |= ADVERTISE_1000FULL; + } else if (adv & (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) { + netif_info(tp, link, dev, + "PHY does not support 1000Mbps\n"); + goto out; + } + + bmcr = BMCR_ANENABLE | BMCR_ANRESTART; + + rtl_writephy(tp, MII_ADVERTISE, auto_nego); + rtl_writephy(tp, MII_CTRL1000, giga_ctrl); + } else { + giga_ctrl = 0; + + if (speed == SPEED_10) + bmcr = 0; + else if (speed == SPEED_100) + bmcr = BMCR_SPEED100; + else + goto out; + + if (duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + } + + rtl_writephy(tp, MII_BMCR, bmcr); + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) { + if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) { + rtl_writephy(tp, 0x17, 0x2138); + rtl_writephy(tp, 0x0e, 0x0260); + } else { + rtl_writephy(tp, 0x17, 0x2108); + rtl_writephy(tp, 0x0e, 0x0000); + } + } + + rc = 0; +out: + return rc; +} + +static int rtl8169_set_speed(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 advertising) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = tp->set_speed(dev, autoneg, speed, duplex, advertising); + if (ret < 0) + goto out; + + if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) && + (advertising & ADVERTISED_1000baseT_Full)) { + mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); + } +out: + return ret; +} + +static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + del_timer_sync(&tp->timer); + + rtl_lock_work(tp); + ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), + cmd->duplex, cmd->advertising); + rtl_unlock_work(tp); + + return ret; +} + +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > JUMBO_1K && + !rtl_chip_infos[tp->mac_version].jumbo_tx_csum) + features &= ~NETIF_F_IP_CSUM; + + return features; +} + +static void __rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + netdev_features_t changed = features ^ dev->features; + void __iomem *ioaddr = tp->mmio_addr; + + if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_RX))) + return; + + if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX)) { + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + + RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_R16(CPlusCmd); + } + if (changed & NETIF_F_RXALL) { + int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt)); + if (features & NETIF_F_RXALL) + tmp |= (AcceptErr | AcceptRunt); + RTL_W32(RxConfig, tmp); + } +} + +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_lock_work(tp); + __rtl8169_set_features(dev, features); + rtl_unlock_work(tp); + + return 0; +} + + +static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) +{ + return (vlan_tx_tag_present(skb)) ? + TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00; +} + +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +} + +static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 status; + + cmd->supported = + SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_INTERNAL; + + status = RTL_R32(TBICSR); + cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; + cmd->autoneg = !!(status & TBINwEnable); + + ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->duplex = DUPLEX_FULL; /* Always set */ + + return 0; +} + +static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + return mii_ethtool_gset(&tp->mii, cmd); +} + +static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int rc; + + rtl_lock_work(tp); + rc = tp->get_settings(dev, cmd); + rtl_unlock_work(tp); + + return rc; +} + +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (regs->len > R8169_REGS_SIZE) + regs->len = R8169_REGS_SIZE; + + rtl_lock_work(tp); + memcpy_fromio(p, tp->mmio_addr, regs->len); + rtl_unlock_work(tp); +} + +static u32 rtl8169_get_msglevel(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + return tp->msg_enable; +} + +static void rtl8169_set_msglevel(struct net_device *dev, u32 value) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + tp->msg_enable = value; +} + +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; + +static int rtl8169_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } +} + +DECLARE_RTL_COND(rtl_counters_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(CounterAddrLow) & CounterDump; +} + +static void rtl8169_update_counters(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct device *d = &tp->pci_dev->dev; + struct rtl8169_counters *counters; + dma_addr_t paddr; + u32 cmd; + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. + */ + if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) + return; + + counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL); + if (!counters) + return; + + RTL_W32(CounterAddrHigh, (u64)paddr >> 32); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(CounterAddrLow, cmd); + RTL_W32(CounterAddrLow, cmd | CounterDump); + + if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000)) + memcpy(&tp->counters, counters, sizeof(*counters)); + + RTL_W32(CounterAddrLow, 0); + RTL_W32(CounterAddrHigh, 0); + + dma_free_coherent(d, sizeof(*counters), counters, paddr); +} + +static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + ASSERT_RTNL(); + + rtl8169_update_counters(dev); + + data[0] = le64_to_cpu(tp->counters.tx_packets); + data[1] = le64_to_cpu(tp->counters.rx_packets); + data[2] = le64_to_cpu(tp->counters.tx_errors); + data[3] = le32_to_cpu(tp->counters.rx_errors); + data[4] = le16_to_cpu(tp->counters.rx_missed); + data[5] = le16_to_cpu(tp->counters.align_errors); + data[6] = le32_to_cpu(tp->counters.tx_one_collision); + data[7] = le32_to_cpu(tp->counters.tx_multi_collision); + data[8] = le64_to_cpu(tp->counters.rx_unicast); + data[9] = le64_to_cpu(tp->counters.rx_broadcast); + data[10] = le32_to_cpu(tp->counters.rx_multicast); + data[11] = le16_to_cpu(tp->counters.tx_aborted); + data[12] = le16_to_cpu(tp->counters.tx_underun); +} + +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } +} + +static const struct ethtool_ops rtl8169_ethtool_ops = { + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_settings = rtl8169_get_settings, + .set_settings = rtl8169_set_settings, + .get_msglevel = rtl8169_get_msglevel, + .set_msglevel = rtl8169_set_msglevel, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static void rtl8169_get_mac_version(struct rtl8169_private *tp, + struct net_device *dev, u8 default_version) +{ + void __iomem *ioaddr = tp->mmio_addr; + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u32 mask; + u32 val; + int mac_version; + } mac_info[] = { + /* 8168G family. */ + { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 }, + { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 }, + { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 }, + + /* 8168F family. */ + { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 }, + { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, + { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 }, + { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 }, + { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 }, + { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, + { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, + { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 }, + { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 }, + { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 }, + { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, + { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, + { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 }, + { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 }, + { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 }, + { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 }, + { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 }, + { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 }, + { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 }, + { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 }, + { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 }, + { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 }, + { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 }, + { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, + { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 }, + { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 }, + { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 }, + { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 }, + { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 }, + { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 }, + /* FIXME: where did these entries come from ? -- FR */ + { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 }, + { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 }, + + /* 8110 family. */ + { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 }, + { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 }, + { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 }, + { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 }, + { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 }, + { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 }, + + /* Catch-all */ + { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + u32 reg; + + reg = RTL_R32(TxConfig); + while ((reg & p->mask) != p->val) + p++; + tp->mac_version = p->mac_version; + + if (tp->mac_version == RTL_GIGA_MAC_NONE) { + netif_notice(tp, probe, dev, + "unknown MAC, using family default\n"); + tp->mac_version = default_version; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) { + tp->mac_version = tp->mii.supports_gmii ? + RTL_GIGA_MAC_VER_42 : + RTL_GIGA_MAC_VER_43; + } +} + +static void rtl8169_print_mac_version(struct rtl8169_private *tp) +{ + dprintk("mac_version = 0x%02x\n", tp->mac_version); +} + +struct phy_reg { + u16 reg; + u16 val; +}; + +static void rtl_writephy_batch(struct rtl8169_private *tp, + const struct phy_reg *regs, int len) +{ + while (len-- > 0) { + rtl_writephy(tp, regs->reg, regs->val); + regs++; + } +} + +#define PHY_READ 0x00000000 +#define PHY_DATA_OR 0x10000000 +#define PHY_DATA_AND 0x20000000 +#define PHY_BJMPN 0x30000000 +#define PHY_MDIO_CHG 0x40000000 +#define PHY_CLEAR_READCOUNT 0x70000000 +#define PHY_WRITE 0x80000000 +#define PHY_READCOUNT_EQ_SKIP 0x90000000 +#define PHY_COMP_EQ_SKIPN 0xa0000000 +#define PHY_COMP_NEQ_SKIPN 0xb0000000 +#define PHY_WRITE_PREVIOUS 0xc0000000 +#define PHY_SKIPN 0xd0000000 +#define PHY_DELAY_MS 0xe0000000 + +struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code)) + +static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + char *version = rtl_fw->version; + bool rc = false; + + if (fw->size < FW_OPCODE_SIZE) + goto out; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + goto out; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + goto out; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + goto out; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + goto out; + + memcpy(version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + goto out; + + strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + version[RTL_VER_SIZE - 1] = 0; + + rc = true; +out: + return rc; +} + +static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev, + struct rtl_fw_phy_action *pa) +{ + bool rc = false; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 regno = (action & 0x0fff0000) >> 16; + + switch(action & 0xf0000000) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_MDIO_CHG: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_BJMPN: + if (regno > index) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + + default: + netif_err(tp, ifup, tp->dev, + "Invalid action 0x%08x\n", action); + goto out; + } + } + rc = true; +out: + return rc; +} + +static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct net_device *dev = tp->dev; + int rc = -EINVAL; + + if (!rtl_fw_format_ok(tp, rtl_fw)) { + netif_err(tp, ifup, dev, "invalid firwmare\n"); + goto out; + } + + if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action)) + rc = 0; +out: + return rc; +} + +static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + struct mdio_ops org, *ops = &tp->mdio_ops; + u32 predata, count; + size_t index; + + predata = count = 0; + org.write = ops->write; + org.read = ops->read; + + for (index = 0; index < pa->size; ) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + if (!action) + break; + + switch(action & 0xf0000000) { + case PHY_READ: + predata = rtl_readphy(tp, regno); + count++; + index++; + break; + case PHY_DATA_OR: + predata |= data; + index++; + break; + case PHY_DATA_AND: + predata &= data; + index++; + break; + case PHY_BJMPN: + index -= regno; + break; + case PHY_MDIO_CHG: + if (data == 0) { + ops->write = org.write; + ops->read = org.read; + } else if (data == 1) { + ops->write = mac_mcu_write; + ops->read = mac_mcu_read; + } + + index++; + break; + case PHY_CLEAR_READCOUNT: + count = 0; + index++; + break; + case PHY_WRITE: + rtl_writephy(tp, regno, data); + index++; + break; + case PHY_READCOUNT_EQ_SKIP: + index += (count == data) ? 2 : 1; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + index++; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + index++; + break; + case PHY_WRITE_PREVIOUS: + rtl_writephy(tp, regno, predata); + index++; + break; + case PHY_SKIPN: + index += regno + 1; + break; + case PHY_DELAY_MS: + mdelay(data); + index++; + break; + + default: + BUG(); + } + } + + ops->write = org.write; + ops->read = org.read; +} + +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + if (!IS_ERR_OR_NULL(tp->rtl_fw)) { + release_firmware(tp->rtl_fw->fw); + kfree(tp->rtl_fw); + } + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; +} + +static void rtl_apply_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw = tp->rtl_fw; + + /* TODO: release firmware once rtl_phy_write_fw signals failures. */ + if (!IS_ERR_OR_NULL(rtl_fw)) + rtl_phy_write_fw(tp, rtl_fw); +} + +static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) +{ + if (rtl_readphy(tp, reg) != val) + netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n"); + else + rtl_apply_firmware(tp); +} + +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x01, 0x90d0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + + if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) || + (pdev->subsystem_device != 0xe000)) + return; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_writephy(tp, 0x10, 0xf01b); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl8169scd_hw_phy_config_quirk(tp); +} + +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_patchphy(tp, 0x16, 1 << 0); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0000 }, + { 0x1d, 0x0f00 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x1ec8 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp) +{ + rtl8168c_3_hw_phy_config(tp); +} + +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef); + rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x6662 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x6662 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* RSET couple improve */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0d, 0x0300); + rtl_patchphy(tp, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x2642 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x2642 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000); + + /* Switching regulator Slew rate */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0f, 0x0017); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x0023 }, + { 0x16, 0x0000 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x002d }, + { 0x18, 0x0040 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_patchphy(tp, 0x0d, 1 << 5); +} + +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b80 }, + { 0x06, 0xc896 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + + /* Update PFM & 10M TX idle timer */ + { 0x1f, 0x0007 }, + { 0x1e, 0x002f }, + { 0x15, 0x1919 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* DCO enable for 10M IDLE Power */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0023); + rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* For impedance matching */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100); + rtl_writephy(tp, 0x1f, 0x0006); + rtl_writephy(tp, 0x00, 0x5a00); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); +} + +static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr) +{ + const u16 w[] = { + addr[0] | (addr[1] << 8), + addr[2] | (addr[3] << 8), + addr[4] | (addr[5] << 8) + }; + const struct exgmac_reg e[] = { + { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) }, + { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] }, + { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 }, + { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) } + }; + + rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e)); +} + +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0004 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0002 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Green Setting */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b5b }, + { 0x06, 0x9222 }, + { 0x05, 0x8b6d }, + { 0x06, 0x8000 }, + { 0x05, 0x8b76 }, + { 0x06, 0x8000 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + /* improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* EEE setting */ + rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ + rtl_rar_exgmac_set(tp, tp->dev->dev_addr); +} + +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp) +{ + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + /* Improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + + /* Modify green table for giga & fnet */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b55 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b5e }, + { 0x06, 0x0000 }, + { 0x05, 0x8b67 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b70 }, + { 0x06, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x0078 }, + { 0x17, 0x0000 }, + { 0x19, 0x00fb }, + { 0x1f, 0x0000 }, + + /* Modify green table for 10M */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b79 }, + { 0x06, 0xaa00 }, + { 0x1f, 0x0000 }, + + /* Disable hiimpedance detection (RTCT) */ + { 0x1f, 0x0003 }, + { 0x01, 0x328a }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl8168f_hw_phy_config(tp); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + + /* Modify green table for giga & fnet */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b55 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b5e }, + { 0x06, 0x0000 }, + { 0x05, 0x8b67 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b70 }, + { 0x06, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x0078 }, + { 0x17, 0x0000 }, + { 0x19, 0x00aa }, + { 0x1f, 0x0000 }, + + /* Modify green table for 10M */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b79 }, + { 0x06, 0xaa00 }, + { 0x1f, 0x0000 }, + + /* Disable hiimpedance detection (RTCT) */ + { 0x1f, 0x0003 }, + { 0x01, 0x328a }, + { 0x1f, 0x0000 } + }; + + + rtl_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* Modify green table for giga */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b54); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800); + rtl_writephy(tp, 0x05, 0x8b5d); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800); + rtl_writephy(tp, 0x05, 0x8a7c); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a7f); + rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000); + rtl_writephy(tp, 0x05, 0x8a82); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a88); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0000); + + /* uc same-seed solution */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* eee setting */ + rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); + + rtl_writephy(tp, 0x1f, 0x0a46); + if (rtl_readphy(tp, 0x10) & 0x0100) { + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w1w0_phy(tp, 0x12, 0x0000, 0x8000); + } else { + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w1w0_phy(tp, 0x12, 0x8000, 0x0000); + } + + rtl_writephy(tp, 0x1f, 0x0a46); + if (rtl_readphy(tp, 0x13) & 0x0100) { + rtl_writephy(tp, 0x1f, 0x0c41); + rtl_w1w0_phy(tp, 0x15, 0x0002, 0x0000); + } else { + rtl_writephy(tp, 0x1f, 0x0c41); + rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0002); + } + + /* Enable PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w1w0_phy(tp, 0x11, 0x000c, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w1w0_phy(tp, 0x14, 0x0100, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w1w0_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8084); + rtl_w1w0_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w1w0_phy(tp, 0x10, 0x1003, 0x0000); + + /* EEE auto-fallback function */ + rtl_writephy(tp, 0x1f, 0x0a4b); + rtl_w1w0_phy(tp, 0x11, 0x0004, 0x0000); + + /* Enable UC LPF tune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8012); + rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0c42); + rtl_w1w0_phy(tp, 0x11, 0x4000, 0x2000); + + /* Improve SWR Efficiency */ + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x5065); + rtl_writephy(tp, 0x14, 0xd065); + rtl_writephy(tp, 0x1f, 0x0bc8); + rtl_writephy(tp, 0x11, 0x5655); + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x1065); + rtl_writephy(tp, 0x14, 0x9065); + rtl_writephy(tp, 0x14, 0x1065); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); +} + +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x11, 1 << 12); + rtl_patchphy(tp, 0x19, 1 << 13); + rtl_patchphy(tp, 0x10, 1 << 15); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0005 }, + { 0x1a, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0004 }, + { 0x1c, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x15, 0x7701 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(100); + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8402_hw_phy_config(struct rtl8169_private *tp) +{ + /* Disable ALDPS before setting firmware */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(20); + + rtl_apply_firmware(tp); + + /* EEE setting */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x10, 0x401f); + rtl_writephy(tp, 0x19, 0x7030); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0004 }, + { 0x10, 0xc07f }, + { 0x19, 0x7030 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(100); + + rtl_apply_firmware(tp); + + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); +} + +static void rtl_hw_phy_config(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_print_mac_version(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + break; + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + rtl8169s_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_04: + rtl8169sb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_05: + rtl8169scd_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_06: + rtl8169sce_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + rtl8102e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_11: + rtl8168bb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_12: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_17: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_18: + rtl8168cp_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_19: + rtl8168c_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_20: + rtl8168c_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_21: + rtl8168c_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_22: + rtl8168c_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + rtl8168cp_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_25: + rtl8168d_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_26: + rtl8168d_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_27: + rtl8168d_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_28: + rtl8168d_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + rtl8105e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_31: + /* None. */ + break; + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl8168e_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_34: + rtl8168e_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_35: + rtl8168f_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_36: + rtl8168f_2_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_37: + rtl8402_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_38: + rtl8411_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_39: + rtl8106e_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_40: + rtl8168g_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + rtl8168g_2_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_41: + default: + break; + } +} + +static void rtl_phy_work(struct rtl8169_private *tp) +{ + struct timer_list *timer = &tp->timer; + void __iomem *ioaddr = tp->mmio_addr; + unsigned long timeout = RTL8169_PHY_TIMEOUT; + + assert(tp->mac_version > RTL_GIGA_MAC_VER_01); + + if (tp->phy_reset_pending(tp)) { + /* + * A busy loop could burn quite a few cycles on nowadays CPU. + * Let's delay the execution of the timer for a few ticks. + */ + timeout = HZ/10; + goto out_mod_timer; + } + + if (tp->link_ok(ioaddr)) + return; + + netif_warn(tp, link, tp->dev, "PHY reset until link up\n"); + + tp->phy_reset_enable(tp); + +out_mod_timer: + mod_timer(timer, jiffies + timeout); +} + +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) +{ + if (!test_and_set_bit(flag, tp->wk.flags)) + schedule_work(&tp->wk.work); +} + +static void rtl8169_phy_timer(unsigned long __opaque) +{ + struct net_device *dev = (struct net_device *)__opaque; + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING); +} + +static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, + void __iomem *ioaddr) +{ + iounmap(ioaddr); + pci_release_regions(pdev); + pci_clear_mwi(pdev); + pci_disable_device(pdev); + free_netdev(dev); +} + +DECLARE_RTL_COND(rtl_phy_reset_cond) +{ + return tp->phy_reset_pending(tp); +} + +static void rtl8169_phy_reset(struct net_device *dev, + struct rtl8169_private *tp) +{ + tp->phy_reset_enable(tp); + rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100); +} + +static bool rtl_tbi_enabled(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return (tp->mac_version == RTL_GIGA_MAC_VER_01) && + (RTL_R8(PHYstatus) & TBI_Enable); +} + +static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + rtl_hw_phy_config(dev); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8(0x82, 0x01); + } + + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + + if (tp->mac_version == RTL_GIGA_MAC_VER_02) { + dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8(0x82, 0x01); + dprintk("Set PHY Reg 0x0bh = 0x00h\n"); + rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 + } + + rtl8169_phy_reset(dev, tp); + + rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + (tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0)); + + if (rtl_tbi_enabled(tp)) + netif_info(tp, link, dev, "TBI auto-negotiating\n"); +} + +static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + rtl_lock_work(tp); + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + RTL_W32(MAC4, addr[4] | addr[5] << 8); + RTL_R32(MAC4); + + RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); + RTL_R32(MAC0); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + rtl_rar_exgmac_set(tp, addr); + + RTL_W8(Cfg9346, Cfg9346_Lock); + + rtl_unlock_work(tp); +} + +static int rtl_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + rtl_rar_set(tp, dev->dev_addr); + + return 0; +} + +static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + + return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV; +} + +static int rtl_xmii_ioctl(struct rtl8169_private *tp, + struct mii_ioctl_data *data, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 32; /* Internal PHY */ + return 0; + + case SIOCGMIIREG: + data->val_out = rtl_readphy(tp, data->reg_num & 0x1f); + return 0; + + case SIOCSMIIREG: + rtl_writephy(tp, data->reg_num & 0x1f, data->val_in); + return 0; + } + return -EOPNOTSUPP; +} + +static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd) +{ + return -EOPNOTSUPP; +} + +static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp) +{ + if (tp->features & RTL_FEATURE_MSI) { + pci_disable_msi(pdev); + tp->features &= ~RTL_FEATURE_MSI; + } +} + +static void rtl_init_mdio_ops(struct rtl8169_private *tp) +{ + struct mdio_ops *ops = &tp->mdio_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + ops->write = r8168dp_1_mdio_write; + ops->read = r8168dp_1_mdio_read; + break; + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + ops->write = r8168dp_2_mdio_write; + ops->read = r8168dp_2_mdio_read; + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + ops->write = r8168g_mdio_write; + ops->read = r8168g_mdio_read; + break; + default: + ops->write = r8169_mdio_write; + ops->read = r8169_mdio_read; + break; + } +} + +static void rtl_speed_down(struct rtl8169_private *tp) +{ + u32 adv; + int lpa; + + rtl_writephy(tp, 0x1f, 0x0000); + lpa = rtl_readphy(tp, MII_LPA); + + if (lpa & (LPA_10HALF | LPA_10FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; + else if (lpa & (LPA_100HALF | LPA_100FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; + else + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + (tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0); + + rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + adv); +} + +static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + RTL_W32(RxConfig, RTL_R32(RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + break; + default: + break; + } +} + +static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) +{ + if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) + return false; + + rtl_speed_down(tp); + rtl_wol_suspend_quirk(tp); + + return true; +} + +static void r810x_phy_power_down(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); +} + +static void r810x_phy_power_up(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); +} + +static void r810x_pll_power_down(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_wol_pll_power_down(tp)) + return; + + r810x_phy_power_down(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_16: + break; + default: + RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + break; + } +} + +static void r810x_pll_power_up(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r810x_phy_power_up(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_16: + break; + default: + RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + break; + } +} + +static void r8168_phy_power_up(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_writephy(tp, 0x0e, 0x0000); + break; + default: + break; + } + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); +} + +static void r8168_phy_power_down(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); + break; + + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_writephy(tp, 0x0e, 0x0200); + default: + rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); + break; + } +} + +static void r8168_pll_power_down(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) && + r8168dp_check_dash(tp)) { + return; + } + + if ((tp->mac_version == RTL_GIGA_MAC_VER_23 || + tp->mac_version == RTL_GIGA_MAC_VER_24) && + (RTL_R16(CPlusCmd) & ASF)) { + return; + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(tp, 0x19, 0xff64); + + if (rtl_wol_pll_power_down(tp)) + return; + + r8168_phy_power_down(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000, + 0xfc000000, ERIAR_EXGMAC); + break; + } +} + +static void r8168_pll_power_up(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + rtl_w1w0_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, + 0x00000000, ERIAR_EXGMAC); + break; + } + + r8168_phy_power_up(tp); +} + +static void rtl_generic_op(struct rtl8169_private *tp, + void (*op)(struct rtl8169_private *)) +{ + if (op) + op(tp); +} + +static void rtl_pll_power_down(struct rtl8169_private *tp) +{ + rtl_generic_op(tp, tp->pll_power_ops.down); +} + +static void rtl_pll_power_up(struct rtl8169_private *tp) +{ + rtl_generic_op(tp, tp->pll_power_ops.up); +} + +static void rtl_init_pll_power_ops(struct rtl8169_private *tp) +{ + struct pll_power_ops *ops = &tp->pll_power_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: + ops->down = r810x_pll_power_down; + ops->up = r810x_pll_power_up; + break; + + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + ops->down = r8168_pll_power_down; + ops->up = r8168_pll_power_up; + break; + + default: + ops->down = NULL; + ops->up = NULL; + break; + } +} + +static void rtl_init_rxcfg(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + case RTL_GIGA_MAC_VER_04: + case RTL_GIGA_MAC_VER_05: + case RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_14: + case RTL_GIGA_MAC_VER_15: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_17: + RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + default: + RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } +} + +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) +{ + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +} + +static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + rtl_generic_op(tp, tp->jumbo_ops.enable); + RTL_W8(Cfg9346, Cfg9346_Lock); +} + +static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + rtl_generic_op(tp, tp->jumbo_ops.disable); + RTL_W8(Cfg9346, Cfg9346_Lock); +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1); + rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1); + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(MaxTxPacketSize, 0x3f); + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) | 0x01); + rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(MaxTxPacketSize, 0x0c); + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) & ~0x01); + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); +} + +static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) +{ + rtl_tx_performance_tweak(tp->pci_dev, + (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); +} + +static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) +{ + rtl_tx_performance_tweak(tp->pci_dev, + (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168b_0_hw_jumbo_enable(tp); + + RTL_W8(Config4, RTL_R8(Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168b_0_hw_jumbo_disable(tp); + + RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); +} + +static void rtl_init_jumbo_ops(struct rtl8169_private *tp) +{ + struct jumbo_ops *ops = &tp->jumbo_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + ops->disable = r8168b_0_hw_jumbo_disable; + ops->enable = r8168b_0_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + ops->disable = r8168b_1_hw_jumbo_disable; + ops->enable = r8168b_1_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + ops->disable = r8168c_hw_jumbo_disable; + ops->enable = r8168c_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + ops->disable = r8168dp_hw_jumbo_disable; + ops->enable = r8168dp_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + ops->disable = r8168e_hw_jumbo_disable; + ops->enable = r8168e_hw_jumbo_enable; + break; + + /* + * No action needed for jumbo frames with 8169. + * No jumbo for 810x at all. + */ + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + default: + ops->disable = NULL; + ops->enable = NULL; + break; + } +} + +DECLARE_RTL_COND(rtl_chipcmd_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R8(ChipCmd) & CmdReset; +} + +static void rtl_hw_reset(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(ChipCmd, CmdReset); + + rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); +} + +static void rtl_request_uncached_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw; + const char *name; + int rc = -ENOMEM; + + name = rtl_lookup_firmware_name(tp); + if (!name) + goto out_no_firmware; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + goto err_warn; + + rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev); + if (rc < 0) + goto err_free; + + rc = rtl_check_firmware(tp, rtl_fw); + if (rc < 0) + goto err_release_firmware; + + tp->rtl_fw = rtl_fw; +out: + return; + +err_release_firmware: + release_firmware(rtl_fw->fw); +err_free: + kfree(rtl_fw); +err_warn: + netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n", + name, rc); +out_no_firmware: + tp->rtl_fw = NULL; + goto out; +} + +static void rtl_request_firmware(struct rtl8169_private *tp) +{ + if (IS_ERR(tp->rtl_fw)) + rtl_request_uncached_firmware(tp); +} + +static void rtl_rx_close(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} + +DECLARE_RTL_COND(rtl_npq_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R8(TxPoll) & NPQ; +} + +DECLARE_RTL_COND(rtl_txcfg_empty_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(TxConfig) & TXCFG_EMPTY; +} + +static void rtl8169_hw_reset(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(tp); + + rtl_rx_close(tp); + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36 || + tp->mac_version == RTL_GIGA_MAC_VER_37 || + tp->mac_version == RTL_GIGA_MAC_VER_40 || + tp->mac_version == RTL_GIGA_MAC_VER_41 || + tp->mac_version == RTL_GIGA_MAC_VER_42 || + tp->mac_version == RTL_GIGA_MAC_VER_43 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { + RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); + } else { + RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + udelay(100); + } + + rtl_hw_reset(tp); +} + +static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* Set DMA burst size and Interframe Gap Time */ + RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) | + (InterFrameGap << TxInterFrameGapShift)); +} + +static void rtl_hw_start(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + tp->hw_start(dev); + + rtl_irq_enable_all(tp); +} + +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp, + void __iomem *ioaddr) +{ + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); +} + +static u16 rtl_rw_cpluscmd(void __iomem *ioaddr) +{ + u16 cmd; + + cmd = RTL_R16(CPlusCmd); + RTL_W16(CPlusCmd, cmd); + return cmd; +} + +static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) +{ + /* Low hurts. Let's disable the filtering. */ + RTL_W16(RxMaxSize, rx_buf_sz + 1); +} + +static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) +{ + static const struct rtl_cfg2_info { + u32 mac_version; + u32 clk; + u32 val; + } cfg2_info [] = { + { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd + { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff }, + { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe + { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff } + }; + const struct rtl_cfg2_info *p = cfg2_info; + unsigned int i; + u32 clk; + + clk = RTL_R8(Config2) & PCI_Clock_66MHz; + for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) { + if ((p->mac_version == mac_version) && (p->clk == clk)) { + RTL_W32(0x7c, p->val); + break; + } + } +} + +static void rtl_set_rx_mode(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + u32 tmp = 0; + + if (dev->flags & IFF_PROMISC) { + /* Unconditionally log net taps. */ + netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + struct netdev_hw_addr *ha; + + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } + } + + if (dev->features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + + tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + u32 data = mc_filter[0]; + + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(data); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_35) + mc_filter[1] = mc_filter[0] = 0xffffffff; + + RTL_W32(MAR0 + 4, mc_filter[1]); + RTL_W32(MAR0 + 0, mc_filter[0]); + + RTL_W32(RxConfig, tmp); +} + +static void rtl_hw_start_8169(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) { + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW); + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); + } + + RTL_W8(Cfg9346, Cfg9346_Unlock); + if (tp->mac_version == RTL_GIGA_MAC_VER_01 || + tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03 || + tp->mac_version == RTL_GIGA_MAC_VER_04) + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + + rtl_init_rxcfg(tp); + + RTL_W8(EarlyTxThres, NoEarlyTx); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + if (tp->mac_version == RTL_GIGA_MAC_VER_01 || + tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03 || + tp->mac_version == RTL_GIGA_MAC_VER_04) + rtl_set_rx_tx_config_registers(tp); + + tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) { + dprintk("Set MAC Reg C+CR Offset 0xE0. " + "Bit-3 and bit-14 MUST be 1\n"); + tp->cp_cmd |= (1 << 14); + } + + RTL_W16(CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(ioaddr, tp->mac_version); + + /* + * Undocumented corner. Supposedly: + * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets + */ + RTL_W16(IntrMitigate, 0x0000); + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + if (tp->mac_version != RTL_GIGA_MAC_VER_01 && + tp->mac_version != RTL_GIGA_MAC_VER_02 && + tp->mac_version != RTL_GIGA_MAC_VER_03 && + tp->mac_version != RTL_GIGA_MAC_VER_04) { + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_set_rx_tx_config_registers(tp); + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + RTL_R8(IntrMask); + + RTL_W32(RxMissed, 0); + + rtl_set_rx_mode(dev); + + /* no early-rx interrupts */ + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); +} + +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + if (tp->csi_ops.write) + tp->csi_ops.write(tp, addr, value); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0; +} + +static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits) +{ + u32 csi; + + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | bits); +} + +static void rtl_csi_access_enable_1(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x17000000); +} + +static void rtl_csi_access_enable_2(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x27000000); +} + +DECLARE_RTL_COND(rtl_csiar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(CSIAR) & CSIAR_FLAG; +} + +static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 r8169_csi_read(struct rtl8169_private *tp, int addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(CSIDR) : ~0; +} + +static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | + CSIAR_FUNC_NIC); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 r8402_csi_read(struct rtl8169_private *tp, int addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(CSIDR) : ~0; +} + +static void rtl_init_csi_ops(struct rtl8169_private *tp) +{ + struct csi_ops *ops = &tp->csi_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + case RTL_GIGA_MAC_VER_04: + case RTL_GIGA_MAC_VER_05: + case RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_14: + case RTL_GIGA_MAC_VER_15: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_17: + ops->write = NULL; + ops->read = NULL; + break; + + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + ops->write = r8402_csi_write; + ops->read = r8402_csi_read; + break; + + default: + ops->write = r8169_csi_write; + ops->read = r8169_csi_read; + break; + } +} + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e, + int len) +{ + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(tp, e->offset, w); + e++; + } +} + +static void rtl_disable_clock_request(struct pci_dev *pdev) +{ + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_enable_clock_request(struct pci_dev *pdev) +{ + pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +#define R8168_CPCMD_QUIRK_MASK (\ + EnableBist | \ + Mac_dbgo_oe | \ + Force_half_dup | \ + Force_rxflow_en | \ + Force_txflow_en | \ + Cxpl_dbg_sel | \ + ASF | \ + PktCntrDisable | \ + Mac_dbgo_sel) + +static void rtl_hw_start_8168bb(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + + if (tp->dev->mtu <= ETH_DATA_LEN) { + rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) | + PCI_EXP_DEVCTL_NOSNOOP_EN); + } +} + +static void rtl_hw_start_8168bef(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + rtl_hw_start_8168bb(tp); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); +} + +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(Config1, RTL_R8(Config1) | Speed_down); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_disable_clock_request(pdev); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_csi_access_enable_2(tp); + + rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(DBG_REG, 0x20); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_csi_access_enable_2(tp); + + RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0220 } + }; + + rtl_csi_access_enable_2(tp); + + rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8168c_2(tp); +} + +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) +{ + rtl_csi_access_enable_2(tp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168d(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + rtl_disable_clock_request(pdev); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168dp(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_1(tp); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_disable_clock_request(pdev); +} + +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, ~0, 0x48 }, + { 0x19, 0x20, 0x50 }, + { 0x0c, ~0, 0x20 } + }; + int i; + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) { + const struct ephy_info *e = e_info_8168d_4 + i; + u16 w; + + w = rtl_ephy_read(tp, e->offset); + rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits); + } + + rtl_enable_clock_request(pdev); +} + +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_csi_access_enable_2(tp); + + rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_disable_clock_request(pdev); + + /* Reset tx FIFO pointer */ + RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST); + RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST); + + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_csi_access_enable_1(tp); + + rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_disable_clock_request(pdev); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168f(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); + + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_disable_clock_request(pdev); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + + rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); +} + +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x1e, 0x0000, 0x4000 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + + rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC); +} + +static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); +} + +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168g_2[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20eb } + }; + + rtl_hw_start_8168g_1(tp); + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2)); +} + +static void rtl_hw_start_8168(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; + + RTL_W16(CPlusCmd, tp->cp_cmd); + + RTL_W16(IntrMitigate, 0x5151); + + /* Work around for RxFIFO overflow. */ + if (tp->mac_version == RTL_GIGA_MAC_VER_11) { + tp->event_slow |= RxFIFOOver | PCSTimeout; + tp->event_slow &= ~RxOverflow; + } + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + rtl_set_rx_tx_config_registers(tp); + + RTL_R8(IntrMask); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + rtl_hw_start_8168bb(tp); + break; + + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + rtl_hw_start_8168bef(tp); + break; + + case RTL_GIGA_MAC_VER_18: + rtl_hw_start_8168cp_1(tp); + break; + + case RTL_GIGA_MAC_VER_19: + rtl_hw_start_8168c_1(tp); + break; + + case RTL_GIGA_MAC_VER_20: + rtl_hw_start_8168c_2(tp); + break; + + case RTL_GIGA_MAC_VER_21: + rtl_hw_start_8168c_3(tp); + break; + + case RTL_GIGA_MAC_VER_22: + rtl_hw_start_8168c_4(tp); + break; + + case RTL_GIGA_MAC_VER_23: + rtl_hw_start_8168cp_2(tp); + break; + + case RTL_GIGA_MAC_VER_24: + rtl_hw_start_8168cp_3(tp); + break; + + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + rtl_hw_start_8168d(tp); + break; + + case RTL_GIGA_MAC_VER_28: + rtl_hw_start_8168d_4(tp); + break; + + case RTL_GIGA_MAC_VER_31: + rtl_hw_start_8168dp(tp); + break; + + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl_hw_start_8168e_1(tp); + break; + case RTL_GIGA_MAC_VER_34: + rtl_hw_start_8168e_2(tp); + break; + + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + rtl_hw_start_8168f_1(tp); + break; + + case RTL_GIGA_MAC_VER_38: + rtl_hw_start_8411(tp); + break; + + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + rtl_hw_start_8168g_1(tp); + break; + case RTL_GIGA_MAC_VER_42: + rtl_hw_start_8168g_2(tp); + break; + + default: + printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", + dev->name, tp->mac_version); + break; + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + + rtl_set_rx_mode(dev); + + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); +} + +#define R810X_CPCMD_QUIRK_MASK (\ + EnableBist | \ + Mac_dbgo_oe | \ + Force_half_dup | \ + Force_rxflow_en | \ + Force_txflow_en | \ + Cxpl_dbg_sel | \ + ASF | \ + PktCntrDisable | \ + Mac_dbgo_sel) + +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_csi_access_enable_2(tp); + + RTL_W8(DBG_REG, FIX_NAK_1); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + cfg1 = RTL_R8(Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); +} + +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8102e_2(tp); + + rtl_ephy_write(tp, 0x03, 0xc2f9); +} + +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000); + + RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + + rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); +} + +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) +{ + rtl_hw_start_8105e_1(tp); + rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_csi_access_enable_2(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); + + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); +} + +static void rtl_hw_start_8106(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); +} + +static void rtl_hw_start_8101(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + if (tp->mac_version >= RTL_GIGA_MAC_VER_30) + tp->event_slow &= ~RxFIFOOver; + + if (tp->mac_version == RTL_GIGA_MAC_VER_13 || + tp->mac_version == RTL_GIGA_MAC_VER_16) + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_NOSNOOP_EN); + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; + RTL_W16(CPlusCmd, tp->cp_cmd); + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + rtl_set_rx_tx_config_registers(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + rtl_hw_start_8102e_1(tp); + break; + + case RTL_GIGA_MAC_VER_08: + rtl_hw_start_8102e_3(tp); + break; + + case RTL_GIGA_MAC_VER_09: + rtl_hw_start_8102e_2(tp); + break; + + case RTL_GIGA_MAC_VER_29: + rtl_hw_start_8105e_1(tp); + break; + case RTL_GIGA_MAC_VER_30: + rtl_hw_start_8105e_2(tp); + break; + + case RTL_GIGA_MAC_VER_37: + rtl_hw_start_8402(tp); + break; + + case RTL_GIGA_MAC_VER_39: + rtl_hw_start_8106(tp); + break; + case RTL_GIGA_MAC_VER_43: + rtl_hw_start_8168g_2(tp); + break; + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + RTL_W16(IntrMitigate, 0x0000); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + + rtl_set_rx_mode(dev); + + RTL_R8(IntrMask); + + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); +} + +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (new_mtu < ETH_ZLEN || + new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max) + return -EINVAL; + + if (new_mtu > ETH_DATA_LEN) + rtl_hw_jumbo_enable(tp); + else + rtl_hw_jumbo_disable(tp); + + dev->mtu = new_mtu; + netdev_update_features(dev); + + return 0; +} + +static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) +{ + desc->addr = cpu_to_le64(0x0badbadbadbadbadull); + desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); +} + +static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, + void **data_buff, struct RxDesc *desc) +{ + dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz, + DMA_FROM_DEVICE); + + kfree(*data_buff); + *data_buff = NULL; + rtl8169_make_unusable_by_asic(desc); +} + +static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz); +} + +static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, + u32 rx_buf_sz) +{ + desc->addr = cpu_to_le64(mapping); + wmb(); + rtl8169_mark_to_asic(desc, rx_buf_sz); +} + +static inline void *rtl8169_align(void *data) +{ + return (void *)ALIGN((long)data, 16); +} + +static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) +{ + void *data; + dma_addr_t mapping; + struct device *d = &tp->pci_dev->dev; + struct net_device *dev = tp->dev; + int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; + + data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + if (!data) + return NULL; + + if (rtl8169_align(data) != data) { + kfree(data); + data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node); + if (!data) + return NULL; + } + + mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n"); + goto err_out; + } + + rtl8169_map_to_asic(desc, mapping, rx_buf_sz); + return data; + +err_out: + kfree(data); + return NULL; +} + +static void rtl8169_rx_clear(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + if (tp->Rx_databuff[i]) { + rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i, + tp->RxDescArray + i); + } + } +} + +static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc) +{ + desc->opts1 |= cpu_to_le32(RingEnd); +} + +static int rtl8169_rx_fill(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + void *data; + + if (tp->Rx_databuff[i]) + continue; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_make_unusable_by_asic(tp->RxDescArray + i); + goto err_out; + } + tp->Rx_databuff[i] = data; + } + + rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); + return 0; + +err_out: + rtl8169_rx_clear(tp); + return -ENOMEM; +} + +static int rtl8169_init_ring(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); + memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *)); + + return rtl8169_rx_fill(tp); +} + +static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb, + struct TxDesc *desc) +{ + unsigned int len = tx_skb->len; + + dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE); + + desc->opts1 = 0x00; + desc->opts2 = 0x00; + desc->addr = 0x00; + tx_skb->len = 0; +} + +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + tp->TxDescArray + entry); + if (skb) { + tp->dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + tx_skb->skb = NULL; + } + } + } +} + +static void rtl8169_tx_clear(struct rtl8169_private *tp) +{ + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + tp->cur_tx = tp->dirty_tx = 0; +} + +static void rtl_reset_work(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + int i; + + napi_disable(&tp->napi); + netif_stop_queue(dev); + synchronize_sched(); + + rtl8169_hw_reset(tp); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); + + rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); + + napi_enable(&tp->napi); + rtl_hw_start(dev); + netif_wake_queue(dev); + rtl8169_check_link_status(dev, tp, tp->mmio_addr); +} + +static void rtl8169_tx_timeout(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + u32 *opts) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag, entry; + struct TxDesc * uninitialized_var(txd); + struct device *d = &tp->pci_dev->dev; + + entry = tp->cur_tx; + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + dma_addr_t mapping; + u32 status, len; + void *addr; + + entry = (entry + 1) % NUM_TX_DESC; + + txd = tp->TxDescArray + entry; + len = skb_frag_size(frag); + addr = skb_frag_address(frag); + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, tp->dev, + "Failed to map TX fragments DMA!\n"); + goto err_out; + } + + /* Anti gcc 2.95.3 bugware (sic) */ + status = opts[0] | len | + (RingEnd * !((entry + 1) % NUM_TX_DESC)); + + txd->opts1 = cpu_to_le32(status); + txd->opts2 = cpu_to_le32(opts[1]); + txd->addr = cpu_to_le64(mapping); + + tp->tx_skb[entry].len = len; + } + + if (cur_frag) { + tp->tx_skb[entry].skb = skb; + txd->opts1 |= cpu_to_le32(LastFrag); + } + + return cur_frag; + +err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static bool rtl_skb_pad(struct sk_buff *skb) +{ + if (skb_padto(skb, ETH_ZLEN)) + return false; + skb_put(skb, ETH_ZLEN - skb->len); + return true; +} + +static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) +{ + return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; +} + +static inline bool rtl8169_tso_csum(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version; + u32 mss = skb_shinfo(skb)->gso_size; + int offset = info->opts_offset; + + if (mss) { + opts[0] |= TD_LSO; + opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[offset] |= info->checksum.tcp; + else if (ip->protocol == IPPROTO_UDP) + opts[offset] |= info->checksum.udp; + else + WARN_ON_ONCE(1); + } else { + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + return rtl_skb_pad(skb); + } + return true; +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd = tp->TxDescArray + entry; + void __iomem *ioaddr = tp->mmio_addr; + struct device *d = &tp->pci_dev->dev; + dma_addr_t mapping; + u32 status, len; + u32 opts[2]; + int frags; + + if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { + netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) + goto err_stop_0; + + opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); + opts[0] = DescOwn; + + if (!rtl8169_tso_csum(tp, skb, opts)) + goto err_update_stats; + + len = skb_headlen(skb); + mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, dev, "Failed to map TX DMA!\n"); + goto err_dma_0; + } + + tp->tx_skb[entry].len = len; + txd->addr = cpu_to_le64(mapping); + + frags = rtl8169_xmit_frags(tp, skb, opts); + if (frags < 0) + goto err_dma_1; + else if (frags) + opts[0] |= FirstFrag; + else { + opts[0] |= FirstFrag | LastFrag; + tp->tx_skb[entry].skb = skb; + } + + txd->opts2 = cpu_to_le32(opts[1]); + + skb_tx_timestamp(skb); + + wmb(); + + /* Anti gcc 2.95.3 bugware (sic) */ + status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); + txd->opts1 = cpu_to_le32(status); + + tp->cur_tx += frags + 1; + + wmb(); + + RTL_W8(TxPoll, NPQ); + + mmiowb(); + + if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb(); + if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) + netif_wake_queue(dev); + } + + return NETDEV_TX_OK; + +err_dma_1: + rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); +err_dma_0: + dev_kfree_skb_any(skb); +err_update_stats: + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + +err_stop_0: + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +static void rtl8169_pcierr_interrupt(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + u16 pci_status, pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_read_config_word(pdev, PCI_STATUS, &pci_status); + + netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n", + pci_cmd, pci_status); + + /* + * The recovery sequence below admits a very elaborated explanation: + * - it seems to work; + * - I did not see what else could be done; + * - it makes iop3xx happy. + * + * Feel free to adjust to your needs. + */ + if (pdev->broken_parity_status) + pci_cmd &= ~PCI_COMMAND_PARITY; + else + pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; + + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + + pci_write_config_word(pdev, PCI_STATUS, + pci_status & (PCI_STATUS_DETECTED_PARITY | + PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT)); + + /* The infamous DAC f*ckup only happens at boot time */ + if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) { + void __iomem *ioaddr = tp->mmio_addr; + + netif_info(tp, intr, dev, "disabling PCI DAC\n"); + tp->cp_cmd &= ~PCIDAC; + RTL_W16(CPlusCmd, tp->cp_cmd); + dev->features &= ~NETIF_F_HIGHDMA; + } + + rtl8169_hw_reset(tp); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) +{ + unsigned int dirty_tx, tx_left; + + dirty_tx = tp->dirty_tx; + smp_rmb(); + tx_left = tp->cur_tx - dirty_tx; + + while (tx_left > 0) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + u32 status; + + rmb(); + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + tp->TxDescArray + entry); + if (status & LastFrag) { + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets++; + tp->tx_stats.bytes += tx_skb->skb->len; + u64_stats_update_end(&tp->tx_stats.syncp); + dev_kfree_skb_any(tx_skb->skb); + tx_skb->skb = NULL; + } + dirty_tx++; + tx_left--; + } + + if (tp->dirty_tx != dirty_tx) { + tp->dirty_tx = dirty_tx; + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_mb(); + if (netif_queue_stopped(dev) && + TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { + netif_wake_queue(dev); + } + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + */ + if (tp->cur_tx != dirty_tx) { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(TxPoll, NPQ); + } + } +} + +static inline int rtl8169_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) +{ + u32 status = opts1 & RxProtoMask; + + if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || + ((status == RxProtoUDP) && !(opts1 & UDPFail))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +static struct sk_buff *rtl8169_try_rx_copy(void *data, + struct rtl8169_private *tp, + int pkt_size, + dma_addr_t addr) +{ + struct sk_buff *skb; + struct device *d = &tp->pci_dev->dev; + + data = rtl8169_align(data); + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(data); + skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); + if (skb) + memcpy(skb->data, data, pkt_size); + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + return skb; +} + +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) +{ + unsigned int cur_rx, rx_left; + unsigned int count; + + cur_rx = tp->cur_rx; + + for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) { + unsigned int entry = cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + u32 status; + + rmb(); + status = le32_to_cpu(desc->opts1) & tp->opts1_mask; + + if (status & DescOwn) + break; + if (unlikely(status & RxRES)) { + netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + if (status & RxFOVF) { + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); + dev->stats.rx_fifo_errors++; + } + if ((status & (RxRUNT | RxCRC)) && + !(status & (RxRWT | RxFOVF)) && + (dev->features & NETIF_F_RXALL)) + goto process_pkt; + } else { + struct sk_buff *skb; + dma_addr_t addr; + int pkt_size; + +process_pkt: + addr = le64_to_cpu(desc->addr); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size = (status & 0x00003fff) - 4; + else + pkt_size = status & 0x00003fff; + + /* + * The driver does not support incoming fragmented + * frames. They are seen as a symptom of over-mtu + * sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + goto release_descriptor; + } + + skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], + tp, pkt_size, addr); + if (!skb) { + dev->stats.rx_dropped++; + goto release_descriptor; + } + + rtl8169_rx_csum(skb, status); + skb_put(skb, pkt_size); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + napi_gro_receive(&tp->napi, skb); + + u64_stats_update_begin(&tp->rx_stats.syncp); + tp->rx_stats.packets++; + tp->rx_stats.bytes += pkt_size; + u64_stats_update_end(&tp->rx_stats.syncp); + } +release_descriptor: + desc->opts2 = 0; + wmb(); + rtl8169_mark_to_asic(desc, rx_buf_sz); + } + + count = cur_rx - tp->cur_rx; + tp->cur_rx = cur_rx; + + return count; +} + +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct rtl8169_private *tp = netdev_priv(dev); + int handled = 0; + u16 status; + + status = rtl_get_events(tp); + if (status && status != 0xffff) { + status &= RTL_EVENT_NAPI | tp->event_slow; + if (status) { + handled = 1; + + rtl_irq_disable(tp); + napi_schedule(&tp->napi); + } + } + return IRQ_RETVAL(handled); +} + +/* + * Workqueue context. + */ +static void rtl_slow_event_work(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + u16 status; + + status = rtl_get_events(tp) & tp->event_slow; + rtl_ack_events(tp, status); + + if (unlikely(status & RxFIFOOver)) { + switch (tp->mac_version) { + /* Work around for rx fifo overflow */ + case RTL_GIGA_MAC_VER_11: + netif_stop_queue(dev); + /* XXX - Hack alert. See rtl_task(). */ + set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); + default: + break; + } + } + + if (unlikely(status & SYSErr)) + rtl8169_pcierr_interrupt(dev); + + if (status & LinkChg) + __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); + + rtl_irq_enable_all(tp); +} + +static void rtl_task(struct work_struct *work) +{ + static const struct { + int bitnr; + void (*action)(struct rtl8169_private *); + } rtl_work[] = { + /* XXX - keep rtl_slow_event_work() as first element. */ + { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work }, + { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work }, + { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work } + }; + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + struct net_device *dev = tp->dev; + int i; + + rtl_lock_work(tp); + + if (!netif_running(dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + for (i = 0; i < ARRAY_SIZE(rtl_work); i++) { + bool pending; + + pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags); + if (pending) + rtl_work[i].action(tp); + } + +out_unlock: + rtl_unlock_work(tp); +} + +static int rtl8169_poll(struct napi_struct *napi, int budget) +{ + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow; + int work_done= 0; + u16 status; + + status = rtl_get_events(tp); + rtl_ack_events(tp, status & ~tp->event_slow); + + if (status & RTL_EVENT_NAPI_RX) + work_done = rtl_rx(dev, tp, (u32) budget); + + if (status & RTL_EVENT_NAPI_TX) + rtl_tx(dev, tp); + + if (status & tp->event_slow) { + enable_mask &= ~tp->event_slow; + + rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING); + } + + if (work_done < budget) { + napi_complete(napi); + + rtl_irq_enable(tp, enable_mask); + mmiowb(); + } + + return work_done; +} + +static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) + return; + + dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff); + RTL_W32(RxMissed, 0); +} + +static void rtl8169_down(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + del_timer_sync(&tp->timer); + + napi_disable(&tp->napi); + netif_stop_queue(dev); + + rtl8169_hw_reset(tp); + /* + * At this point device interrupts can not be enabled in any function, + * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task) + * and napi is disabled (rtl8169_poll). + */ + rtl8169_rx_missed(dev, ioaddr); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_sched(); + + rtl8169_tx_clear(tp); + + rtl8169_rx_clear(tp); + + rtl_pll_power_down(tp); +} + +static int rtl8169_close(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + /* Update counters before going down */ + rtl8169_update_counters(dev); + + rtl_lock_work(tp); + clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + + rtl8169_down(dev); + rtl_unlock_work(tp); + + free_irq(pdev->irq, dev); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(tp->pci_dev->irq, dev); +} +#endif + +static int rtl_open(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto err_pm_runtime_put; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(dev); + if (retval < 0) + goto err_free_rx_1; + + INIT_WORK(&tp->wk.work, rtl_task); + + smp_mb(); + + rtl_request_firmware(tp); + + retval = request_irq(pdev->irq, rtl8169_interrupt, + (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, + dev->name, dev); + if (retval < 0) + goto err_release_fw_2; + + rtl_lock_work(tp); + + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + + napi_enable(&tp->napi); + + rtl8169_init_phy(dev, tp); + + __rtl8169_set_features(dev, dev->features); + + rtl_pll_power_up(tp); + + rtl_hw_start(dev); + + netif_start_queue(dev); + + rtl_unlock_work(tp); + + tp->saved_wolopts = 0; + pm_runtime_put_noidle(&pdev->dev); + + rtl8169_check_link_status(dev, tp, ioaddr); +out: + return retval; + +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; +err_pm_runtime_put: + pm_runtime_put_noidle(&pdev->dev); + goto out; +} + +static struct rtnl_link_stats64 * +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + unsigned int start; + + if (netif_running(dev)) + rtl8169_rx_missed(dev, ioaddr); + + do { + start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp); + stats->rx_packets = tp->rx_stats.packets; + stats->rx_bytes = tp->rx_stats.bytes; + } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start)); + + + do { + start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp); + stats->tx_packets = tp->tx_stats.packets; + stats->tx_bytes = tp->tx_stats.bytes; + } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start)); + + stats->rx_dropped = dev->stats.rx_dropped; + stats->tx_dropped = dev->stats.tx_dropped; + stats->rx_length_errors = dev->stats.rx_length_errors; + stats->rx_errors = dev->stats.rx_errors; + stats->rx_crc_errors = dev->stats.rx_crc_errors; + stats->rx_fifo_errors = dev->stats.rx_fifo_errors; + stats->rx_missed_errors = dev->stats.rx_missed_errors; + + return stats; +} + +static void rtl8169_net_suspend(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + netif_device_detach(dev); + netif_stop_queue(dev); + + rtl_lock_work(tp); + napi_disable(&tp->napi); + clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_unlock_work(tp); + + rtl_pll_power_down(tp); +} + +#ifdef CONFIG_PM + +static int rtl8169_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + + rtl8169_net_suspend(dev); + + return 0; +} + +static void __rtl8169_resume(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + netif_device_attach(dev); + + rtl_pll_power_up(tp); + + rtl_lock_work(tp); + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_unlock_work(tp); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_init_phy(dev, tp); + + if (netif_running(dev)) + __rtl8169_resume(dev); + + return 0; +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + rtl_lock_work(tp); + tp->saved_wolopts = __rtl8169_get_wol(tp); + __rtl8169_set_wol(tp, WAKE_ANY); + rtl_unlock_work(tp); + + rtl8169_net_suspend(dev); + + return 0; +} + +static int rtl8169_runtime_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + rtl_lock_work(tp); + __rtl8169_set_wol(tp, tp->saved_wolopts); + tp->saved_wolopts = 0; + rtl_unlock_work(tp); + + rtl8169_init_phy(dev, tp); + + __rtl8169_resume(dev); + + return 0; +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + return tp->TxDescArray ? -EBUSY : 0; +} + +static const struct dev_pm_ops rtl8169_pm_ops = { + .suspend = rtl8169_suspend, + .resume = rtl8169_resume, + .freeze = rtl8169_suspend, + .thaw = rtl8169_resume, + .poweroff = rtl8169_suspend, + .restore = rtl8169_resume, + .runtime_suspend = rtl8169_runtime_suspend, + .runtime_resume = rtl8169_runtime_resume, + .runtime_idle = rtl8169_runtime_idle, +}; + +#define RTL8169_PM_OPS (&rtl8169_pm_ops) + +#else /* !CONFIG_PM */ + +#define RTL8169_PM_OPS NULL + +#endif /* !CONFIG_PM */ + +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* WoL fails with 8168b when the receiver is disabled. */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + pci_clear_master(tp->pci_dev); + + RTL_W8(ChipCmd, CmdRxEnb); + /* PCI commit */ + RTL_R8(ChipCmd); + break; + default: + break; + } +} + +static void rtl_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &pdev->dev; + + pm_runtime_get_sync(d); + + rtl8169_net_suspend(dev); + + /* Restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); + + rtl8169_hw_reset(tp); + + if (system_state == SYSTEM_POWER_OFF) { + if (__rtl8169_get_wol(tp) & WAKE_ANY) { + rtl_wol_suspend_quirk(tp); + rtl_wol_shutdown_quirk(tp); + } + + pci_wake_from_d3(pdev, true); + pci_set_power_state(pdev, PCI_D3hot); + } + + pm_runtime_put_noidle(d); +} + +static void rtl_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + rtl8168_driver_stop(tp); + } + + cancel_work_sync(&tp->wk.work); + + netif_napi_del(&tp->napi); + + unregister_netdev(dev); + + rtl_release_firmware(tp); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); + + rtl_disable_msi(pdev, tp); + rtl8169_release_board(pdev, dev, tp->mmio_addr); + pci_set_drvdata(pdev, NULL); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_do_ioctl = rtl8169_ioctl, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static const struct rtl_cfg_info { + void (*hw_start)(struct net_device *); + unsigned int region; + unsigned int align; + u16 event_slow; + unsigned features; + u8 default_ver; +} rtl_cfg_infos [] = { + [RTL_CFG_0] = { + .hw_start = rtl_hw_start_8169, + .region = 1, + .align = 0, + .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, + .features = RTL_FEATURE_GMII, + .default_ver = RTL_GIGA_MAC_VER_01, + }, + [RTL_CFG_1] = { + .hw_start = rtl_hw_start_8168, + .region = 2, + .align = 8, + .event_slow = SYSErr | LinkChg | RxOverflow, + .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_11, + }, + [RTL_CFG_2] = { + .hw_start = rtl_hw_start_8101, + .region = 2, + .align = 8, + .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | + PCSTimeout, + .features = RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_13, + } +}; + +/* Cfg9346_Unlock assumed. */ +static unsigned rtl_try_msi(struct rtl8169_private *tp, + const struct rtl_cfg_info *cfg) +{ + void __iomem *ioaddr = tp->mmio_addr; + unsigned msi = 0; + u8 cfg2; + + cfg2 = RTL_R8(Config2) & ~MSIEnable; + if (cfg->features & RTL_FEATURE_MSI) { + if (pci_enable_msi(tp->pci_dev)) { + netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n"); + } else { + cfg2 |= MSIEnable; + msi = RTL_FEATURE_MSI; + } + } + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + RTL_W8(Config2, cfg2); + return msi; +} + +DECLARE_RTL_COND(rtl_link_list_ready_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R8(MCU) & LINK_LIST_RDY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY; +} + +static void rtl_hw_init_8168g(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u32 data; + + tp->ocp_base = OCP_STD_PHY_BASE; + + RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42)) + return; + + if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42)) + return; + + RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + data = r8168_mac_ocp_read(tp, 0xe8de); + data &= ~(1 << 14); + r8168_mac_ocp_write(tp, 0xe8de, data); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) + return; + + data = r8168_mac_ocp_read(tp, 0xe8de); + data |= (1 << 15); + r8168_mac_ocp_write(tp, 0xe8de, data); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) + return; +} + +static void rtl_hw_initialize(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + rtl_hw_init_8168g(tp); + break; + + default: + break; + } +} + +static int +rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; + const unsigned int region = cfg->region; + struct rtl8169_private *tp; + struct mii_if_info *mii; + struct net_device *dev; + void __iomem *ioaddr; + int chipset, i; + int rc; + + if (netif_msg_drv(&debug)) { + printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", + MODULENAME, RTL8169_VERSION); + } + + dev = alloc_etherdev(sizeof (*tp)); + if (!dev) { + rc = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); + + mii = &tp->mii; + mii->dev = dev; + mii->mdio_read = rtl_mdio_read; + mii->mdio_write = rtl_mdio_write; + mii->phy_id_mask = 0x1f; + mii->reg_num_mask = 0x1f; + mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); + + /* disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pci_enable_device(pdev); + if (rc < 0) { + netif_err(tp, probe, dev, "enable failure\n"); + goto err_out_free_dev_1; + } + + if (pci_set_mwi(pdev) < 0) + netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); + + /* make sure PCI base addr 1 is MMIO */ + if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { + netif_err(tp, probe, dev, + "region #%d not an MMIO resource, aborting\n", + region); + rc = -ENODEV; + goto err_out_mwi_2; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + netif_err(tp, probe, dev, + "Invalid PCI region size(s), aborting\n"); + rc = -ENODEV; + goto err_out_mwi_2; + } + + rc = pci_request_regions(pdev, MODULENAME); + if (rc < 0) { + netif_err(tp, probe, dev, "could not request regions\n"); + goto err_out_mwi_2; + } + + tp->cp_cmd = RxChkSum; + + if ((sizeof(dma_addr_t) > 4) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { + tp->cp_cmd |= PCIDAC; + dev->features |= NETIF_F_HIGHDMA; + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { + netif_err(tp, probe, dev, "DMA configuration failed\n"); + goto err_out_free_res_3; + } + } + + /* ioremap MMIO region */ + ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); + if (!ioaddr) { + netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); + rc = -EIO; + goto err_out_free_res_3; + } + tp->mmio_addr = ioaddr; + + if (!pci_is_pcie(pdev)) + netif_info(tp, probe, dev, "not PCI Express\n"); + + /* Identify chip attached to board */ + rtl8169_get_mac_version(tp, dev, cfg->default_ver); + + rtl_init_rxcfg(tp); + + rtl_irq_disable(tp); + + rtl_hw_initialize(tp); + + rtl_hw_reset(tp); + + rtl_ack_events(tp, 0xffff); + + pci_set_master(pdev); + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + tp->cp_cmd |= RxVlan; + + rtl_init_mdio_ops(tp); + rtl_init_pll_power_ops(tp); + rtl_init_jumbo_ops(tp); + rtl_init_csi_ops(tp); + + rtl8169_print_mac_version(tp); + + chipset = tp->mac_version; + tp->txd_version = rtl_chip_infos[chipset].txd_version; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(Config1, RTL_R8(Config1) | PMEnable); + RTL_W8(Config5, RTL_R8(Config5) & PMEStatus); + if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) + tp->features |= RTL_FEATURE_WOL; + if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) + tp->features |= RTL_FEATURE_WOL; + tp->features |= rtl_try_msi(tp, cfg); + RTL_W8(Cfg9346, Cfg9346_Lock); + + if (rtl_tbi_enabled(tp)) { + tp->set_speed = rtl8169_set_speed_tbi; + tp->get_settings = rtl8169_gset_tbi; + tp->phy_reset_enable = rtl8169_tbi_reset_enable; + tp->phy_reset_pending = rtl8169_tbi_reset_pending; + tp->link_ok = rtl8169_tbi_link_ok; + tp->do_ioctl = rtl_tbi_ioctl; + } else { + tp->set_speed = rtl8169_set_speed_xmii; + tp->get_settings = rtl8169_gset_xmii; + tp->phy_reset_enable = rtl8169_xmii_reset_enable; + tp->phy_reset_pending = rtl8169_xmii_reset_pending; + tp->link_ok = rtl8169_xmii_link_ok; + tp->do_ioctl = rtl_xmii_ioctl; + } + + mutex_init(&tp->wk.mutex); + + /* Get MAC address */ + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = RTL_R8(MAC0 + i); + + SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); + dev->watchdog_timeo = RTL8169_TX_TIMEOUT; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); + + /* don't enable SG, IP_CSUM and TSO by default - it might not work + * properly for all devices */ + dev->features |= NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HIGHDMA; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* 8110SCd requires hardware Rx VLAN - disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + tp->hw_start = cfg->hw_start; + tp->event_slow = cfg->event_slow; + + tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? + ~(RxBOVF | RxFOVF) : ~0; + + init_timer(&tp->timer); + tp->timer.data = (unsigned long) dev; + tp->timer.function = rtl8169_phy_timer; + + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; + + rc = register_netdev(dev); + if (rc < 0) + goto err_out_msi_4; + + pci_set_drvdata(pdev, dev); + + netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", + rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr, + (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq); + if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { + netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " + "tx checksumming: %s]\n", + rtl_chip_infos[chipset].jumbo_max, + rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + rtl8168_driver_start(tp); + } + + device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + netif_carrier_off(dev); + +out: + return rc; + +err_out_msi_4: + netif_napi_del(&tp->napi); + rtl_disable_msi(pdev, tp); + iounmap(ioaddr); +err_out_free_res_3: + pci_release_regions(pdev); +err_out_mwi_2: + pci_clear_mwi(pdev); + pci_disable_device(pdev); +err_out_free_dev_1: + free_netdev(dev); + goto out; +} + +static struct pci_driver rtl8169_pci_driver = { + .name = MODULENAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl_init_one, + .remove = rtl_remove_one, + .shutdown = rtl_shutdown, + .driver.pm = RTL8169_PM_OPS, +}; + +module_pci_driver(rtl8169_pci_driver); diff --git a/addons/r8169/src/4.4.180/Makefile b/addons/r8169/src/4.4.180/Makefile new file mode 100644 index 00000000..340f8cbe --- /dev/null +++ b/addons/r8169/src/4.4.180/Makefile @@ -0,0 +1,2 @@ +obj-m += r8169.o +obj-m += mii.o diff --git a/addons/r8169/src/4.4.180/mii.c b/addons/r8169/src/4.4.180/mii.c new file mode 100644 index 00000000..993570b1 --- /dev/null +++ b/addons/r8169/src/4.4.180/mii.c @@ -0,0 +1,474 @@ +/* + + mii.c: MII interface library + + Maintained by Jeff Garzik + Copyright 2001,2002 Jeff Garzik + + Various code came from myson803.c and other files by + Donald Becker. Copyright: + + Written 1998-2002 by Donald Becker. + + This software may be used and distributed according + to the terms of the GNU General Public License (GPL), + incorporated herein by reference. Drivers based on + or derived from this code fall under the GPL and must + retain the authorship, copyright and license notice. + This file is not a complete program and may only be + used when the entire operating system is licensed + under the GPL. + + The author may be reached as becker@scyld.com, or C/O + Scyld Computing Corporation + 410 Severn Ave., Suite 210 + Annapolis MD 21403 + + + */ + +#include +#include +#include +#include +#include + +static u32 mii_get_an(struct mii_if_info *mii, u16 addr) +{ + int advert; + + advert = mii->mdio_read(mii->dev, mii->phy_id, addr); + + return mii_lpa_to_ethtool_lpa_t(advert); +} + +/** + * mii_ethtool_gset - get settings that are specified in @ecmd + * @mii: MII interface + * @ecmd: requested ethtool_cmd + * + * The @ecmd parameter is expected to have been cleared before calling + * mii_ethtool_gset(). + * + * Returns 0 for success, negative on error. + */ +int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) +{ + struct net_device *dev = mii->dev; + u16 bmcr, bmsr, ctrl1000 = 0, stat1000 = 0; + u32 nego; + + ecmd->supported = + (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); + if (mii->supports_gmii) + ecmd->supported |= SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full; + + /* only supports twisted-pair */ + ecmd->port = PORT_MII; + + /* only supports internal transceiver */ + ecmd->transceiver = XCVR_INTERNAL; + + /* this isn't fully supported at higher layers */ + ecmd->phy_address = mii->phy_id; + ecmd->mdio_support = ETH_MDIO_SUPPORTS_C22; + + ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; + + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + bmsr = mii->mdio_read(dev, mii->phy_id, MII_BMSR); + if (mii->supports_gmii) { + ctrl1000 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); + stat1000 = mii->mdio_read(dev, mii->phy_id, MII_STAT1000); + } + if (bmcr & BMCR_ANENABLE) { + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; + + ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE); + if (mii->supports_gmii) + ecmd->advertising |= + mii_ctrl1000_to_ethtool_adv_t(ctrl1000); + + if (bmsr & BMSR_ANEGCOMPLETE) { + ecmd->lp_advertising = mii_get_an(mii, MII_LPA); + ecmd->lp_advertising |= + mii_stat1000_to_ethtool_lpa_t(stat1000); + } else { + ecmd->lp_advertising = 0; + } + + nego = ecmd->advertising & ecmd->lp_advertising; + + if (nego & (ADVERTISED_1000baseT_Full | + ADVERTISED_1000baseT_Half)) { + ethtool_cmd_speed_set(ecmd, SPEED_1000); + ecmd->duplex = !!(nego & ADVERTISED_1000baseT_Full); + } else if (nego & (ADVERTISED_100baseT_Full | + ADVERTISED_100baseT_Half)) { + ethtool_cmd_speed_set(ecmd, SPEED_100); + ecmd->duplex = !!(nego & ADVERTISED_100baseT_Full); + } else { + ethtool_cmd_speed_set(ecmd, SPEED_10); + ecmd->duplex = !!(nego & ADVERTISED_10baseT_Full); + } + } else { + ecmd->autoneg = AUTONEG_DISABLE; + + ethtool_cmd_speed_set(ecmd, + ((bmcr & BMCR_SPEED1000 && + (bmcr & BMCR_SPEED100) == 0) ? + SPEED_1000 : + ((bmcr & BMCR_SPEED100) ? + SPEED_100 : SPEED_10))); + ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; + } + + mii->full_duplex = ecmd->duplex; + + /* ignore maxtxpkt, maxrxpkt for now */ + + return 0; +} + +/** + * mii_ethtool_sset - set settings that are specified in @ecmd + * @mii: MII interface + * @ecmd: requested ethtool_cmd + * + * Returns 0 for success, negative on error. + */ +int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) +{ + struct net_device *dev = mii->dev; + u32 speed = ethtool_cmd_speed(ecmd); + + if (speed != SPEED_10 && + speed != SPEED_100 && + speed != SPEED_1000) + return -EINVAL; + if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) + return -EINVAL; + if (ecmd->port != PORT_MII) + return -EINVAL; + if (ecmd->transceiver != XCVR_INTERNAL) + return -EINVAL; + if (ecmd->phy_address != mii->phy_id) + return -EINVAL; + if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) + return -EINVAL; + if ((speed == SPEED_1000) && (!mii->supports_gmii)) + return -EINVAL; + + /* ignore supported, maxtxpkt, maxrxpkt */ + + if (ecmd->autoneg == AUTONEG_ENABLE) { + u32 bmcr, advert, tmp; + u32 advert2 = 0, tmp2 = 0; + + if ((ecmd->advertising & (ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) == 0) + return -EINVAL; + + /* advertise only what has been requested */ + advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); + tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); + if (mii->supports_gmii) { + advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000); + tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL); + } + tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising); + + if (mii->supports_gmii) + tmp2 |= + ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising); + if (advert != tmp) { + mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); + mii->advertising = tmp; + } + if ((mii->supports_gmii) && (advert2 != tmp2)) + mii->mdio_write(dev, mii->phy_id, MII_CTRL1000, tmp2); + + /* turn on autonegotiation, and force a renegotiate */ + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); + mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); + + mii->force_media = 0; + } else { + u32 bmcr, tmp; + + /* turn off auto negotiation, set speed and duplexity */ + bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); + tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | + BMCR_SPEED1000 | BMCR_FULLDPLX); + if (speed == SPEED_1000) + tmp |= BMCR_SPEED1000; + else if (speed == SPEED_100) + tmp |= BMCR_SPEED100; + if (ecmd->duplex == DUPLEX_FULL) { + tmp |= BMCR_FULLDPLX; + mii->full_duplex = 1; + } else + mii->full_duplex = 0; + if (bmcr != tmp) + mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); + + mii->force_media = 1; + } + return 0; +} + +/** + * mii_check_gmii_support - check if the MII supports Gb interfaces + * @mii: the MII interface + */ +int mii_check_gmii_support(struct mii_if_info *mii) +{ + int reg; + + reg = mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); + if (reg & BMSR_ESTATEN) { + reg = mii->mdio_read(mii->dev, mii->phy_id, MII_ESTATUS); + if (reg & (ESTATUS_1000_TFULL | ESTATUS_1000_THALF)) + return 1; + } + + return 0; +} + +/** + * mii_link_ok - is link status up/ok + * @mii: the MII interface + * + * Returns 1 if the MII reports link status up/ok, 0 otherwise. + */ +int mii_link_ok (struct mii_if_info *mii) +{ + /* first, a dummy read, needed to latch some MII phys */ + mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); + if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) + return 1; + return 0; +} + +/** + * mii_nway_restart - restart NWay (autonegotiation) for this interface + * @mii: the MII interface + * + * Returns 0 on success, negative on error. + */ +int mii_nway_restart (struct mii_if_info *mii) +{ + int bmcr; + int r = -EINVAL; + + /* if autoneg is off, it's an error */ + bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); + + if (bmcr & BMCR_ANENABLE) { + bmcr |= BMCR_ANRESTART; + mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); + r = 0; + } + + return r; +} + +/** + * mii_check_link - check MII link status + * @mii: MII interface + * + * If the link status changed (previous != current), call + * netif_carrier_on() if current link status is Up or call + * netif_carrier_off() if current link status is Down. + */ +void mii_check_link (struct mii_if_info *mii) +{ + int cur_link = mii_link_ok(mii); + int prev_link = netif_carrier_ok(mii->dev); + + if (cur_link && !prev_link) + netif_carrier_on(mii->dev); + else if (prev_link && !cur_link) + netif_carrier_off(mii->dev); +} + +/** + * mii_check_media - check the MII interface for a carrier/speed/duplex change + * @mii: the MII interface + * @ok_to_print: OK to print link up/down messages + * @init_media: OK to save duplex mode in @mii + * + * Returns 1 if the duplex mode changed, 0 if not. + * If the media type is forced, always returns 0. + */ +unsigned int mii_check_media (struct mii_if_info *mii, + unsigned int ok_to_print, + unsigned int init_media) +{ + unsigned int old_carrier, new_carrier; + int advertise, lpa, media, duplex; + int lpa2 = 0; + + /* check current and old link status */ + old_carrier = netif_carrier_ok(mii->dev) ? 1 : 0; + new_carrier = (unsigned int) mii_link_ok(mii); + + /* if carrier state did not change, this is a "bounce", + * just exit as everything is already set correctly + */ + if ((!init_media) && (old_carrier == new_carrier)) + return 0; /* duplex did not change */ + + /* no carrier, nothing much to do */ + if (!new_carrier) { + netif_carrier_off(mii->dev); + if (ok_to_print) + netdev_info(mii->dev, "link down\n"); + return 0; /* duplex did not change */ + } + + /* + * we have carrier, see who's on the other end + */ + netif_carrier_on(mii->dev); + + if (mii->force_media) { + if (ok_to_print) + netdev_info(mii->dev, "link up\n"); + return 0; /* duplex did not change */ + } + + /* get MII advertise and LPA values */ + if ((!init_media) && (mii->advertising)) + advertise = mii->advertising; + else { + advertise = mii->mdio_read(mii->dev, mii->phy_id, MII_ADVERTISE); + mii->advertising = advertise; + } + lpa = mii->mdio_read(mii->dev, mii->phy_id, MII_LPA); + if (mii->supports_gmii) + lpa2 = mii->mdio_read(mii->dev, mii->phy_id, MII_STAT1000); + + /* figure out media and duplex from advertise and LPA values */ + media = mii_nway_result(lpa & advertise); + duplex = (media & ADVERTISE_FULL) ? 1 : 0; + if (lpa2 & LPA_1000FULL) + duplex = 1; + + if (ok_to_print) + netdev_info(mii->dev, "link up, %uMbps, %s-duplex, lpa 0x%04X\n", + lpa2 & (LPA_1000FULL | LPA_1000HALF) ? 1000 : + media & (ADVERTISE_100FULL | ADVERTISE_100HALF) ? + 100 : 10, + duplex ? "full" : "half", + lpa); + + if ((init_media) || (mii->full_duplex != duplex)) { + mii->full_duplex = duplex; + return 1; /* duplex changed */ + } + + return 0; /* duplex did not change */ +} + +/** + * generic_mii_ioctl - main MII ioctl interface + * @mii_if: the MII interface + * @mii_data: MII ioctl data structure + * @cmd: MII ioctl command + * @duplex_chg_out: pointer to @duplex_changed status if there was no + * ioctl error + * + * Returns 0 on success, negative on error. + */ +int generic_mii_ioctl(struct mii_if_info *mii_if, + struct mii_ioctl_data *mii_data, int cmd, + unsigned int *duplex_chg_out) +{ + int rc = 0; + unsigned int duplex_changed = 0; + + if (duplex_chg_out) + *duplex_chg_out = 0; + + mii_data->phy_id &= mii_if->phy_id_mask; + mii_data->reg_num &= mii_if->reg_num_mask; + + switch(cmd) { + case SIOCGMIIPHY: + mii_data->phy_id = mii_if->phy_id; + /* fall through */ + + case SIOCGMIIREG: + mii_data->val_out = + mii_if->mdio_read(mii_if->dev, mii_data->phy_id, + mii_data->reg_num); + break; + + case SIOCSMIIREG: { + u16 val = mii_data->val_in; + + if (mii_data->phy_id == mii_if->phy_id) { + switch(mii_data->reg_num) { + case MII_BMCR: { + unsigned int new_duplex = 0; + if (val & (BMCR_RESET|BMCR_ANENABLE)) + mii_if->force_media = 0; + else + mii_if->force_media = 1; + if (mii_if->force_media && + (val & BMCR_FULLDPLX)) + new_duplex = 1; + if (mii_if->full_duplex != new_duplex) { + duplex_changed = 1; + mii_if->full_duplex = new_duplex; + } + break; + } + case MII_ADVERTISE: + mii_if->advertising = val; + break; + default: + /* do nothing */ + break; + } + } + + mii_if->mdio_write(mii_if->dev, mii_data->phy_id, + mii_data->reg_num, val); + break; + } + + default: + rc = -EOPNOTSUPP; + break; + } + + if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) + *duplex_chg_out = 1; + + return rc; +} + +MODULE_AUTHOR ("Jeff Garzik "); +MODULE_DESCRIPTION ("MII hardware support library"); +MODULE_LICENSE("GPL"); + +EXPORT_SYMBOL(mii_link_ok); +EXPORT_SYMBOL(mii_nway_restart); +EXPORT_SYMBOL(mii_ethtool_gset); +EXPORT_SYMBOL(mii_ethtool_sset); +EXPORT_SYMBOL(mii_check_link); +EXPORT_SYMBOL(mii_check_media); +EXPORT_SYMBOL(mii_check_gmii_support); +EXPORT_SYMBOL(generic_mii_ioctl); + diff --git a/addons/r8169/src/4.4.180/r8169.c b/addons/r8169/src/4.4.180/r8169.c new file mode 100644 index 00000000..8f40e121 --- /dev/null +++ b/addons/r8169/src/4.4.180/r8169.c @@ -0,0 +1,8482 @@ +/* + * r8169.c: RealTek 8169/8168/8101 ethernet driver. + * + * Copyright (c) 2002 ShuChen + * Copyright (c) 2003 - 2007 Francois Romieu + * Copyright (c) a lot of people too. Please respect their work. + * + * See MAINTAINERS file for support contact information. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#define RTL8169_VERSION "2.3LK-NAPI" +#define MODULENAME "r8169" +#define PFX MODULENAME ": " + +#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw" +#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw" +#define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw" +#define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw" +#define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw" +#define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw" +#define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw" +#define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw" +#define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw" +#define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw" +#define FIRMWARE_8411_2 "rtl_nic/rtl8411-2.fw" +#define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw" +#define FIRMWARE_8106E_2 "rtl_nic/rtl8106e-2.fw" +#define FIRMWARE_8168G_2 "rtl_nic/rtl8168g-2.fw" +#define FIRMWARE_8168G_3 "rtl_nic/rtl8168g-3.fw" +#define FIRMWARE_8168H_1 "rtl_nic/rtl8168h-1.fw" +#define FIRMWARE_8168H_2 "rtl_nic/rtl8168h-2.fw" +#define FIRMWARE_8107E_1 "rtl_nic/rtl8107e-1.fw" +#define FIRMWARE_8107E_2 "rtl_nic/rtl8107e-2.fw" + +#ifdef RTL8169_DEBUG +#define assert(expr) \ + if (!(expr)) { \ + printk( "Assertion failed! %s,%s,%s,line=%d\n", \ + #expr,__FILE__,__func__,__LINE__); \ + } +#define dprintk(fmt, args...) \ + do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) +#else +#define assert(expr) do {} while (0) +#define dprintk(fmt, args...) do {} while (0) +#endif /* RTL8169_DEBUG */ + +#define R8169_MSG_DEFAULT \ + (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) + +#define TX_SLOTS_AVAIL(tp) \ + (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx) + +/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ +#define TX_FRAGS_READY_FOR(tp,nr_frags) \ + (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1)) + +/* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). + The RTL chips use a 64 element hash table based on the Ethernet CRC. */ +static const int multicast_filter_limit = 32; + +#define MAX_READ_REQUEST_SHIFT 12 +#define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */ +#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ + +#define R8169_REGS_SIZE 256 +#define R8169_NAPI_WEIGHT 64 +#define NUM_TX_DESC 64 /* Number of Tx descriptor registers */ +#define NUM_RX_DESC 256U /* Number of Rx descriptor registers */ +#define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) +#define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) + +#define RTL8169_TX_TIMEOUT (6*HZ) +#define RTL8169_PHY_TIMEOUT (10*HZ) + +/* write/read MMIO register */ +#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg)) +#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg)) +#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg)) +#define RTL_R8(reg) readb (ioaddr + (reg)) +#define RTL_R16(reg) readw (ioaddr + (reg)) +#define RTL_R32(reg) readl (ioaddr + (reg)) + +enum mac_version { + RTL_GIGA_MAC_VER_01 = 0, + RTL_GIGA_MAC_VER_02, + RTL_GIGA_MAC_VER_03, + RTL_GIGA_MAC_VER_04, + RTL_GIGA_MAC_VER_05, + RTL_GIGA_MAC_VER_06, + RTL_GIGA_MAC_VER_07, + RTL_GIGA_MAC_VER_08, + RTL_GIGA_MAC_VER_09, + RTL_GIGA_MAC_VER_10, + RTL_GIGA_MAC_VER_11, + RTL_GIGA_MAC_VER_12, + RTL_GIGA_MAC_VER_13, + RTL_GIGA_MAC_VER_14, + RTL_GIGA_MAC_VER_15, + RTL_GIGA_MAC_VER_16, + RTL_GIGA_MAC_VER_17, + RTL_GIGA_MAC_VER_18, + RTL_GIGA_MAC_VER_19, + RTL_GIGA_MAC_VER_20, + RTL_GIGA_MAC_VER_21, + RTL_GIGA_MAC_VER_22, + RTL_GIGA_MAC_VER_23, + RTL_GIGA_MAC_VER_24, + RTL_GIGA_MAC_VER_25, + RTL_GIGA_MAC_VER_26, + RTL_GIGA_MAC_VER_27, + RTL_GIGA_MAC_VER_28, + RTL_GIGA_MAC_VER_29, + RTL_GIGA_MAC_VER_30, + RTL_GIGA_MAC_VER_31, + RTL_GIGA_MAC_VER_32, + RTL_GIGA_MAC_VER_33, + RTL_GIGA_MAC_VER_34, + RTL_GIGA_MAC_VER_35, + RTL_GIGA_MAC_VER_36, + RTL_GIGA_MAC_VER_37, + RTL_GIGA_MAC_VER_38, + RTL_GIGA_MAC_VER_39, + RTL_GIGA_MAC_VER_40, + RTL_GIGA_MAC_VER_41, + RTL_GIGA_MAC_VER_42, + RTL_GIGA_MAC_VER_43, + RTL_GIGA_MAC_VER_44, + RTL_GIGA_MAC_VER_45, + RTL_GIGA_MAC_VER_46, + RTL_GIGA_MAC_VER_47, + RTL_GIGA_MAC_VER_48, + RTL_GIGA_MAC_VER_49, + RTL_GIGA_MAC_VER_50, + RTL_GIGA_MAC_VER_51, + RTL_GIGA_MAC_NONE = 0xff, +}; + +enum rtl_tx_desc_version { + RTL_TD_0 = 0, + RTL_TD_1 = 1, +}; + +#define JUMBO_1K ETH_DATA_LEN +#define JUMBO_4K (4*1024 - ETH_HLEN - 2) +#define JUMBO_6K (6*1024 - ETH_HLEN - 2) +#define JUMBO_7K (7*1024 - ETH_HLEN - 2) +#define JUMBO_9K (9*1024 - ETH_HLEN - 2) + +#define _R(NAME,TD,FW,SZ,B) { \ + .name = NAME, \ + .txd_version = TD, \ + .fw_name = FW, \ + .jumbo_max = SZ, \ + .jumbo_tx_csum = B \ +} + +static const struct { + const char *name; + enum rtl_tx_desc_version txd_version; + const char *fw_name; + u16 jumbo_max; + bool jumbo_tx_csum; +} rtl_chip_infos[] = { + /* PCI devices. */ + [RTL_GIGA_MAC_VER_01] = + _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_02] = + _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_03] = + _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_04] = + _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_05] = + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + [RTL_GIGA_MAC_VER_06] = + _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true), + /* PCI-E devices. */ + [RTL_GIGA_MAC_VER_07] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_08] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_09] = + _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_10] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_11] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_12] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_13] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_14] = + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_15] = + _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_16] = + _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), + [RTL_GIGA_MAC_VER_17] = + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), + [RTL_GIGA_MAC_VER_18] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_19] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_20] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_21] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_22] = + _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_23] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_24] = + _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), + [RTL_GIGA_MAC_VER_25] = + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_26] = + _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_27] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_28] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_29] = + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_30] = + _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_31] = + _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_32] = + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_33] = + _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_34] = + _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_35] = + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_36] = + _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_37] = + _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_38] = + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_39] = + _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_40] = + _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_41] = + _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false), + [RTL_GIGA_MAC_VER_42] = + _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_3, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_43] = + _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_2, + JUMBO_1K, true), + [RTL_GIGA_MAC_VER_44] = + _R("RTL8411", RTL_TD_1, FIRMWARE_8411_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_45] = + _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_1, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_46] = + _R("RTL8168h/8111h", RTL_TD_1, FIRMWARE_8168H_2, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_47] = + _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_1, + JUMBO_1K, false), + [RTL_GIGA_MAC_VER_48] = + _R("RTL8107e", RTL_TD_1, FIRMWARE_8107E_2, + JUMBO_1K, false), + [RTL_GIGA_MAC_VER_49] = + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_50] = + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, + JUMBO_9K, false), + [RTL_GIGA_MAC_VER_51] = + _R("RTL8168ep/8111ep", RTL_TD_1, NULL, + JUMBO_9K, false), +}; +#undef _R + +enum cfg_version { + RTL_CFG_0 = 0x00, + RTL_CFG_1, + RTL_CFG_2 +}; + +static const struct pci_device_id rtl8169_pci_tbl[] = { + { PCI_VDEVICE(REALTEK, 0x2502), RTL_CFG_1 }, + { PCI_VDEVICE(REALTEK, 0x2600), RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, + { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, + { PCI_VENDOR_ID_LINKSYS, 0x1032, + PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, + { 0x0001, 0x8168, + PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 }, + {0,}, +}; + +MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); + +static int rx_buf_sz = 16383; +static int use_dac; +static struct { + u32 msg_enable; +} debug = { -1 }; + +enum rtl_registers { + MAC0 = 0, /* Ethernet hardware address. */ + MAC4 = 4, + MAR0 = 8, /* Multicast filter. */ + CounterAddrLow = 0x10, + CounterAddrHigh = 0x14, + TxDescStartAddrLow = 0x20, + TxDescStartAddrHigh = 0x24, + TxHDescStartAddrLow = 0x28, + TxHDescStartAddrHigh = 0x2c, + FLASH = 0x30, + ERSR = 0x36, + ChipCmd = 0x37, + TxPoll = 0x38, + IntrMask = 0x3c, + IntrStatus = 0x3e, + + TxConfig = 0x40, +#define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */ +#define TXCFG_EMPTY (1 << 11) /* 8111e-vl */ + + RxConfig = 0x44, +#define RX128_INT_EN (1 << 15) /* 8111c and later */ +#define RX_MULTI_EN (1 << 14) /* 8111c only */ +#define RXCFG_FIFO_SHIFT 13 + /* No threshold before first PCI xfer */ +#define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT) +#define RX_EARLY_OFF (1 << 11) +#define RXCFG_DMA_SHIFT 8 + /* Unlimited maximum PCI burst. */ +#define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT) + + RxMissed = 0x4c, + Cfg9346 = 0x50, + Config0 = 0x51, + Config1 = 0x52, + Config2 = 0x53, +#define PME_SIGNAL (1 << 5) /* 8168c and later */ + + Config3 = 0x54, + Config4 = 0x55, + Config5 = 0x56, + MultiIntr = 0x5c, + PHYAR = 0x60, + PHYstatus = 0x6c, + RxMaxSize = 0xda, + CPlusCmd = 0xe0, + IntrMitigate = 0xe2, + RxDescAddrLow = 0xe4, + RxDescAddrHigh = 0xe8, + EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */ + +#define NoEarlyTx 0x3f /* Max value : no early transmit. */ + + MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */ + +#define TxPacketMax (8064 >> 7) +#define EarlySize 0x27 + + FuncEvent = 0xf0, + FuncEventMask = 0xf4, + FuncPresetState = 0xf8, + IBCR0 = 0xf8, + IBCR2 = 0xf9, + IBIMR0 = 0xfa, + IBISR0 = 0xfb, + FuncForceEvent = 0xfc, +}; + +enum rtl8110_registers { + TBICSR = 0x64, + TBI_ANAR = 0x68, + TBI_LPAR = 0x6a, +}; + +enum rtl8168_8101_registers { + CSIDR = 0x64, + CSIAR = 0x68, +#define CSIAR_FLAG 0x80000000 +#define CSIAR_WRITE_CMD 0x80000000 +#define CSIAR_BYTE_ENABLE 0x0f +#define CSIAR_BYTE_ENABLE_SHIFT 12 +#define CSIAR_ADDR_MASK 0x0fff +#define CSIAR_FUNC_CARD 0x00000000 +#define CSIAR_FUNC_SDIO 0x00010000 +#define CSIAR_FUNC_NIC 0x00020000 +#define CSIAR_FUNC_NIC2 0x00010000 + PMCH = 0x6f, + EPHYAR = 0x80, +#define EPHYAR_FLAG 0x80000000 +#define EPHYAR_WRITE_CMD 0x80000000 +#define EPHYAR_REG_MASK 0x1f +#define EPHYAR_REG_SHIFT 16 +#define EPHYAR_DATA_MASK 0xffff + DLLPR = 0xd0, +#define PFM_EN (1 << 6) +#define TX_10M_PS_EN (1 << 7) + DBG_REG = 0xd1, +#define FIX_NAK_1 (1 << 4) +#define FIX_NAK_2 (1 << 3) + TWSI = 0xd2, + MCU = 0xd3, +#define NOW_IS_OOB (1 << 7) +#define TX_EMPTY (1 << 5) +#define RX_EMPTY (1 << 4) +#define RXTX_EMPTY (TX_EMPTY | RX_EMPTY) +#define EN_NDP (1 << 3) +#define EN_OOB_RESET (1 << 2) +#define LINK_LIST_RDY (1 << 1) + EFUSEAR = 0xdc, +#define EFUSEAR_FLAG 0x80000000 +#define EFUSEAR_WRITE_CMD 0x80000000 +#define EFUSEAR_READ_CMD 0x00000000 +#define EFUSEAR_REG_MASK 0x03ff +#define EFUSEAR_REG_SHIFT 8 +#define EFUSEAR_DATA_MASK 0xff + MISC_1 = 0xf2, +#define PFM_D3COLD_EN (1 << 6) +}; + +enum rtl8168_registers { + LED_FREQ = 0x1a, + EEE_LED = 0x1b, + ERIDR = 0x70, + ERIAR = 0x74, +#define ERIAR_FLAG 0x80000000 +#define ERIAR_WRITE_CMD 0x80000000 +#define ERIAR_READ_CMD 0x00000000 +#define ERIAR_ADDR_BYTE_ALIGN 4 +#define ERIAR_TYPE_SHIFT 16 +#define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT) +#define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT) +#define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_OOB (0x02 << ERIAR_TYPE_SHIFT) +#define ERIAR_MASK_SHIFT 12 +#define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0100 (0x4 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT) +#define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT) + EPHY_RXER_NUM = 0x7c, + OCPDR = 0xb0, /* OCP GPHY access */ +#define OCPDR_WRITE_CMD 0x80000000 +#define OCPDR_READ_CMD 0x00000000 +#define OCPDR_REG_MASK 0x7f +#define OCPDR_GPHY_REG_SHIFT 16 +#define OCPDR_DATA_MASK 0xffff + OCPAR = 0xb4, +#define OCPAR_FLAG 0x80000000 +#define OCPAR_GPHY_WRITE_CMD 0x8000f060 +#define OCPAR_GPHY_READ_CMD 0x0000f060 + GPHY_OCP = 0xb8, + RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */ + MISC = 0xf0, /* 8168e only. */ +#define TXPLA_RST (1 << 29) +#define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */ +#define PWM_EN (1 << 22) +#define RXDV_GATED_EN (1 << 19) +#define EARLY_TALLY_EN (1 << 16) +}; + +enum rtl_register_content { + /* InterruptStatusBits */ + SYSErr = 0x8000, + PCSTimeout = 0x4000, + SWInt = 0x0100, + TxDescUnavail = 0x0080, + RxFIFOOver = 0x0040, + LinkChg = 0x0020, + RxOverflow = 0x0010, + TxErr = 0x0008, + TxOK = 0x0004, + RxErr = 0x0002, + RxOK = 0x0001, + + /* RxStatusDesc */ + RxBOVF = (1 << 24), + RxFOVF = (1 << 23), + RxRWT = (1 << 22), + RxRES = (1 << 21), + RxRUNT = (1 << 20), + RxCRC = (1 << 19), + + /* ChipCmdBits */ + StopReq = 0x80, + CmdReset = 0x10, + CmdRxEnb = 0x08, + CmdTxEnb = 0x04, + RxBufEmpty = 0x01, + + /* TXPoll register p.5 */ + HPQ = 0x80, /* Poll cmd on the high prio queue */ + NPQ = 0x40, /* Poll cmd on the low prio queue */ + FSWInt = 0x01, /* Forced software interrupt */ + + /* Cfg9346Bits */ + Cfg9346_Lock = 0x00, + Cfg9346_Unlock = 0xc0, + + /* rx_mode_bits */ + AcceptErr = 0x20, + AcceptRunt = 0x10, + AcceptBroadcast = 0x08, + AcceptMulticast = 0x04, + AcceptMyPhys = 0x02, + AcceptAllPhys = 0x01, +#define RX_CONFIG_ACCEPT_MASK 0x3f + + /* TxConfigBits */ + TxInterFrameGapShift = 24, + TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ + + /* Config1 register p.24 */ + LEDS1 = (1 << 7), + LEDS0 = (1 << 6), + Speed_down = (1 << 4), + MEMMAP = (1 << 3), + IOMAP = (1 << 2), + VPD = (1 << 1), + PMEnable = (1 << 0), /* Power Management Enable */ + + /* Config2 register p. 25 */ + ClkReqEn = (1 << 7), /* Clock Request Enable */ + MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */ + PCI_Clock_66MHz = 0x01, + PCI_Clock_33MHz = 0x00, + + /* Config3 register p.25 */ + MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ + LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ + Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ + Rdy_to_L23 = (1 << 1), /* L23 Enable */ + Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ + + /* Config4 register */ + Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */ + + /* Config5 register p.27 */ + BWF = (1 << 6), /* Accept Broadcast wakeup frame */ + MWF = (1 << 5), /* Accept Multicast wakeup frame */ + UWF = (1 << 4), /* Accept Unicast wakeup frame */ + Spi_en = (1 << 3), + LanWake = (1 << 1), /* LanWake enable/disable */ + PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */ + ASPM_en = (1 << 0), /* ASPM enable */ + + /* TBICSR p.28 */ + TBIReset = 0x80000000, + TBILoopback = 0x40000000, + TBINwEnable = 0x20000000, + TBINwRestart = 0x10000000, + TBILinkOk = 0x02000000, + TBINwComplete = 0x01000000, + + /* CPlusCmd p.31 */ + EnableBist = (1 << 15), // 8168 8101 + Mac_dbgo_oe = (1 << 14), // 8168 8101 + Normal_mode = (1 << 13), // unused + Force_half_dup = (1 << 12), // 8168 8101 + Force_rxflow_en = (1 << 11), // 8168 8101 + Force_txflow_en = (1 << 10), // 8168 8101 + Cxpl_dbg_sel = (1 << 9), // 8168 8101 + ASF = (1 << 8), // 8168 8101 + PktCntrDisable = (1 << 7), // 8168 8101 + Mac_dbgo_sel = 0x001c, // 8168 + RxVlan = (1 << 6), + RxChkSum = (1 << 5), + PCIDAC = (1 << 4), + PCIMulRW = (1 << 3), + INTT_0 = 0x0000, // 8168 + INTT_1 = 0x0001, // 8168 + INTT_2 = 0x0002, // 8168 + INTT_3 = 0x0003, // 8168 + + /* rtl8169_PHYstatus */ + TBI_Enable = 0x80, + TxFlowCtrl = 0x40, + RxFlowCtrl = 0x20, + _1000bpsF = 0x10, + _100bps = 0x08, + _10bps = 0x04, + LinkStatus = 0x02, + FullDup = 0x01, + + /* _TBICSRBit */ + TBILinkOK = 0x02000000, + + /* ResetCounterCommand */ + CounterReset = 0x1, + + /* DumpCounterCommand */ + CounterDump = 0x8, + + /* magic enable v2 */ + MagicPacket_v2 = (1 << 16), /* Wake up when receives a Magic Packet */ +}; + +enum rtl_desc_bit { + /* First doubleword. */ + DescOwn = (1 << 31), /* Descriptor is owned by NIC */ + RingEnd = (1 << 30), /* End of descriptor ring */ + FirstFrag = (1 << 29), /* First segment of a packet */ + LastFrag = (1 << 28), /* Final segment of a packet */ +}; + +/* Generic case. */ +enum rtl_tx_desc_bit { + /* First doubleword. */ + TD_LSO = (1 << 27), /* Large Send Offload */ +#define TD_MSS_MAX 0x07ffu /* MSS value */ + + /* Second doubleword. */ + TxVlanTag = (1 << 17), /* Add VLAN tag */ +}; + +/* 8169, 8168b and 810x except 8102e. */ +enum rtl_tx_desc_bit_0 { + /* First doubleword. */ +#define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */ + TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */ + TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */ + TD0_IP_CS = (1 << 18), /* Calculate IP checksum */ +}; + +/* 8102e, 8168c and beyond. */ +enum rtl_tx_desc_bit_1 { + /* First doubleword. */ + TD1_GTSENV4 = (1 << 26), /* Giant Send for IPv4 */ + TD1_GTSENV6 = (1 << 25), /* Giant Send for IPv6 */ +#define GTTCPHO_SHIFT 18 +#define GTTCPHO_MAX 0x7fU + + /* Second doubleword. */ +#define TCPHO_SHIFT 18 +#define TCPHO_MAX 0x3ffU +#define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */ + TD1_IPv6_CS = (1 << 28), /* Calculate IPv6 checksum */ + TD1_IPv4_CS = (1 << 29), /* Calculate IPv4 checksum */ + TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */ + TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */ +}; + +enum rtl_rx_desc_bit { + /* Rx private */ + PID1 = (1 << 18), /* Protocol ID bit 1/2 */ + PID0 = (1 << 17), /* Protocol ID bit 2/2 */ + +#define RxProtoUDP (PID1) +#define RxProtoTCP (PID0) +#define RxProtoIP (PID1 | PID0) +#define RxProtoMask RxProtoIP + + IPFail = (1 << 16), /* IP checksum failed */ + UDPFail = (1 << 15), /* UDP/IP checksum failed */ + TCPFail = (1 << 14), /* TCP/IP checksum failed */ + RxVlanTag = (1 << 16), /* VLAN tag available */ +}; + +#define RsvdMask 0x3fffc000 + +struct TxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct RxDesc { + __le32 opts1; + __le32 opts2; + __le64 addr; +}; + +struct ring_info { + struct sk_buff *skb; + u32 len; + u8 __pad[sizeof(void *) - sizeof(u32)]; +}; + +enum features { + RTL_FEATURE_WOL = (1 << 0), + RTL_FEATURE_MSI = (1 << 1), + RTL_FEATURE_GMII = (1 << 2), +}; + +struct rtl8169_counters { + __le64 tx_packets; + __le64 rx_packets; + __le64 tx_errors; + __le32 rx_errors; + __le16 rx_missed; + __le16 align_errors; + __le32 tx_one_collision; + __le32 tx_multi_collision; + __le64 rx_unicast; + __le64 rx_broadcast; + __le32 rx_multicast; + __le16 tx_aborted; + __le16 tx_underun; +}; + +struct rtl8169_tc_offsets { + bool inited; + __le64 tx_errors; + __le32 tx_multi_collision; + __le16 tx_aborted; +}; + +enum rtl_flag { + RTL_FLAG_TASK_ENABLED = 0, + RTL_FLAG_TASK_SLOW_PENDING, + RTL_FLAG_TASK_RESET_PENDING, + RTL_FLAG_TASK_PHY_PENDING, + RTL_FLAG_MAX +}; + +struct rtl8169_stats { + u64 packets; + u64 bytes; + struct u64_stats_sync syncp; +}; + +struct rtl8169_private { + void __iomem *mmio_addr; /* memory map physical address */ + struct pci_dev *pci_dev; + struct net_device *dev; + struct napi_struct napi; + u32 msg_enable; + u16 txd_version; + u16 mac_version; + u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */ + u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */ + u32 dirty_tx; + struct rtl8169_stats rx_stats; + struct rtl8169_stats tx_stats; + struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */ + struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */ + dma_addr_t TxPhyAddr; + dma_addr_t RxPhyAddr; + void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */ + struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ + struct timer_list timer; + u16 cp_cmd; + + u16 event_slow; + + struct mdio_ops { + void (*write)(struct rtl8169_private *, int, int); + int (*read)(struct rtl8169_private *, int); + } mdio_ops; + + struct pll_power_ops { + void (*down)(struct rtl8169_private *); + void (*up)(struct rtl8169_private *); + } pll_power_ops; + + struct jumbo_ops { + void (*enable)(struct rtl8169_private *); + void (*disable)(struct rtl8169_private *); + } jumbo_ops; + + struct csi_ops { + void (*write)(struct rtl8169_private *, int, int); + u32 (*read)(struct rtl8169_private *, int); + } csi_ops; + + int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv); + int (*get_settings)(struct net_device *, struct ethtool_cmd *); + void (*phy_reset_enable)(struct rtl8169_private *tp); + void (*hw_start)(struct net_device *); + unsigned int (*phy_reset_pending)(struct rtl8169_private *tp); + unsigned int (*link_ok)(void __iomem *); + int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); + bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *); + + struct { + DECLARE_BITMAP(flags, RTL_FLAG_MAX); + struct mutex mutex; + struct work_struct work; + } wk; + + unsigned features; + + struct mii_if_info mii; + dma_addr_t counters_phys_addr; + struct rtl8169_counters *counters; + struct rtl8169_tc_offsets tc_offset; + u32 saved_wolopts; + u32 opts1_mask; + + struct rtl_fw { + const struct firmware *fw; + +#define RTL_VER_SIZE 32 + + char version[RTL_VER_SIZE]; + + struct rtl_fw_phy_action { + __le32 *code; + size_t size; + } phy_action; + } *rtl_fw; +#define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN) + + u32 ocp_base; +}; + +MODULE_AUTHOR("Realtek and the Linux r8169 crew "); +MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); +module_param(use_dac, int, 0); +MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot."); +module_param_named(debug, debug.msg_enable, int, 0); +MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(RTL8169_VERSION); +MODULE_FIRMWARE(FIRMWARE_8168D_1); +MODULE_FIRMWARE(FIRMWARE_8168D_2); +MODULE_FIRMWARE(FIRMWARE_8168E_1); +MODULE_FIRMWARE(FIRMWARE_8168E_2); +MODULE_FIRMWARE(FIRMWARE_8168E_3); +MODULE_FIRMWARE(FIRMWARE_8105E_1); +MODULE_FIRMWARE(FIRMWARE_8168F_1); +MODULE_FIRMWARE(FIRMWARE_8168F_2); +MODULE_FIRMWARE(FIRMWARE_8402_1); +MODULE_FIRMWARE(FIRMWARE_8411_1); +MODULE_FIRMWARE(FIRMWARE_8411_2); +MODULE_FIRMWARE(FIRMWARE_8106E_1); +MODULE_FIRMWARE(FIRMWARE_8106E_2); +MODULE_FIRMWARE(FIRMWARE_8168G_2); +MODULE_FIRMWARE(FIRMWARE_8168G_3); +MODULE_FIRMWARE(FIRMWARE_8168H_1); +MODULE_FIRMWARE(FIRMWARE_8168H_2); +MODULE_FIRMWARE(FIRMWARE_8107E_1); +MODULE_FIRMWARE(FIRMWARE_8107E_2); + +static void rtl_lock_work(struct rtl8169_private *tp) +{ + mutex_lock(&tp->wk.mutex); +} + +static void rtl_unlock_work(struct rtl8169_private *tp) +{ + mutex_unlock(&tp->wk.mutex); +} + +static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force) +{ + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ, force); +} + +struct rtl_cond { + bool (*check)(struct rtl8169_private *); + const char *msg; +}; + +static void rtl_udelay(unsigned int d) +{ + udelay(d); +} + +static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c, + void (*delay)(unsigned int), unsigned int d, int n, + bool high) +{ + int i; + + for (i = 0; i < n; i++) { + delay(d); + if (c->check(tp) == high) + return true; + } + netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n", + c->msg, !high, n, d); + return false; +} + +static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, rtl_udelay, d, n, true); +} + +static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, rtl_udelay, d, n, false); +} + +static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, msleep, d, n, true); +} + +static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp, + const struct rtl_cond *c, + unsigned int d, int n) +{ + return rtl_loop_wait(tp, c, msleep, d, n, false); +} + +#define DECLARE_RTL_COND(name) \ +static bool name ## _check(struct rtl8169_private *); \ + \ +static const struct rtl_cond name = { \ + .check = name ## _check, \ + .msg = #name \ +}; \ + \ +static bool name ## _check(struct rtl8169_private *tp) + +static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg) +{ + if (reg & 0xffff0001) { + netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg); + return true; + } + return false; +} + +DECLARE_RTL_COND(rtl_ocp_gphy_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(GPHY_OCP) & OCPAR_FLAG; +} + +static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data); + + rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10); +} + +static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(GPHY_OCP, reg << 15); + + return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ? + (RTL_R32(GPHY_OCP) & 0xffff) : ~0; +} + +static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return; + + RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data); +} + +static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_ocp_reg_failure(tp, reg)) + return 0; + + RTL_W32(OCPDR, reg << 15); + + return RTL_R32(OCPDR); +} + +#define OCP_STD_PHY_BASE 0xa400 + +static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE; + return; + } + + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value); +} + +static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) +{ + if (tp->ocp_base != OCP_STD_PHY_BASE) + reg -= 0x10; + + return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2); +} + +static void mac_mcu_write(struct rtl8169_private *tp, int reg, int value) +{ + if (reg == 0x1f) { + tp->ocp_base = value << 4; + return; + } + + r8168_mac_ocp_write(tp, tp->ocp_base + reg, value); +} + +static int mac_mcu_read(struct rtl8169_private *tp, int reg) +{ + return r8168_mac_ocp_read(tp, tp->ocp_base + reg); +} + +DECLARE_RTL_COND(rtl_phyar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(PHYAR) & 0x80000000; +} + +static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff)); + + rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20); + /* + * According to hardware specs a 20us delay is required after write + * complete indication, but before sending next command. + */ + udelay(20); +} + +static int r8169_mdio_read(struct rtl8169_private *tp, int reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + int value; + + RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16); + + value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ? + RTL_R32(PHYAR) & 0xffff : ~0; + + /* + * According to hardware specs a 20us delay is required after read + * complete indication, but before sending next command. + */ + udelay(20); + + return value; +} + +DECLARE_RTL_COND(rtl_ocpar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(OCPAR) & OCPAR_FLAG; +} + +static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT)); + RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD); + RTL_W32(EPHY_RXER_NUM, 0); + + rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100); +} + +static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + r8168dp_1_mdio_access(tp, reg, + OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK)); +} + +static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD); + + mdelay(1); + RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD); + RTL_W32(EPHY_RXER_NUM, 0); + + return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ? + RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0; +} + +#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000 + +static void r8168dp_2_mdio_start(void __iomem *ioaddr) +{ + RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_stop(void __iomem *ioaddr) +{ + RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT); +} + +static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168dp_2_mdio_start(ioaddr); + + r8169_mdio_write(tp, reg, value); + + r8168dp_2_mdio_stop(ioaddr); +} + +static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + int value; + + r8168dp_2_mdio_start(ioaddr); + + value = r8169_mdio_read(tp, reg); + + r8168dp_2_mdio_stop(ioaddr); + + return value; +} + +static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val) +{ + tp->mdio_ops.write(tp, location, val); +} + +static int rtl_readphy(struct rtl8169_private *tp, int location) +{ + return tp->mdio_ops.read(tp, location); +} + +static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value) +{ + rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value); +} + +static void rtl_w0w1_phy(struct rtl8169_private *tp, int reg_addr, int p, int m) +{ + int val; + + val = rtl_readphy(tp, reg_addr); + rtl_writephy(tp, reg_addr, (val & ~m) | p); +} + +static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, + int val) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_writephy(tp, location, val); +} + +static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + return rtl_readphy(tp, location); +} + +DECLARE_RTL_COND(rtl_ephyar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(EPHYAR) & EPHYAR_FLAG; +} + +static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) | + (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100); + + udelay(10); +} + +static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ? + RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0; +} + +DECLARE_RTL_COND(rtl_eriar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(ERIAR) & ERIAR_FLAG; +} + +static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, + u32 val, int type) +{ + void __iomem *ioaddr = tp->mmio_addr; + + BUG_ON((addr & 3) || (mask == 0)); + RTL_W32(ERIDR, val); + RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr); + + rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); +} + +static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); + + return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? + RTL_R32(ERIDR) : ~0; +} + +static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p, + u32 m, int type) +{ + u32 val; + + val = rtl_eri_read(tp, addr, type); + rtl_eri_write(tp, addr, mask, (val & ~m) | p, type); +} + +static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ? + RTL_R32(OCPDR) : ~0; +} + +static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + return rtl_eri_read(tp, reg, ERIAR_OOB); +} + +static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_ocp_read(tp, mask, reg); + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + return r8168ep_ocp_read(tp, mask, reg); + default: + BUG(); + return ~0; + } +} + +static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(OCPDR, data); + RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff)); + rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20); +} + +static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, + u32 data) +{ + rtl_eri_write(tp, reg, ((u32)mask & 0x0f) << ERIAR_MASK_SHIFT, + data, ERIAR_OOB); +} + +static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + r8168dp_ocp_write(tp, mask, reg, data); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + r8168ep_ocp_write(tp, mask, reg, data); + break; + default: + BUG(); + break; + } +} + +static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) +{ + rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC); + + ocp_write(tp, 0x1, 0x30, 0x00000001); +} + +#define OOB_CMD_RESET 0x00 +#define OOB_CMD_DRIVER_START 0x05 +#define OOB_CMD_DRIVER_STOP 0x06 + +static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) +{ + return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; +} + +DECLARE_RTL_COND(rtl_ocp_read_cond) +{ + u16 reg; + + reg = rtl8168_get_ocp_reg(tp); + + return ocp_read(tp, 0x0f, reg) & 0x00000800; +} + +DECLARE_RTL_COND(rtl_ep_ocp_read_cond) +{ + return ocp_read(tp, 0x0f, 0x124) & 0x00000001; +} + +DECLARE_RTL_COND(rtl_ocp_tx_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R8(IBISR0) & 0x20; +} + +static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01); + rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000); + RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20); + RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01); +} + +static void rtl8168dp_driver_start(struct rtl8169_private *tp) +{ + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); + rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10); +} + +static void rtl8168ep_driver_start(struct rtl8169_private *tp) +{ + ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); + rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10); +} + +static void rtl8168_driver_start(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl8168dp_driver_start(tp); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl8168ep_driver_start(tp); + break; + default: + BUG(); + break; + } +} + +static void rtl8168dp_driver_stop(struct rtl8169_private *tp) +{ + rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10); +} + +static void rtl8168ep_driver_stop(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); + ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); + rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10); +} + +static void rtl8168_driver_stop(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl8168dp_driver_stop(tp); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl8168ep_driver_stop(tp); + break; + default: + BUG(); + break; + } +} + +static int r8168dp_check_dash(struct rtl8169_private *tp) +{ + u16 reg = rtl8168_get_ocp_reg(tp); + + return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0; +} + +static int r8168ep_check_dash(struct rtl8169_private *tp) +{ + return (ocp_read(tp, 0x0f, 0x128) & 0x00000001) ? 1 : 0; +} + +static int r8168_check_dash(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + return r8168dp_check_dash(tp); + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + return r8168ep_check_dash(tp); + default: + return 0; + } +} + +struct exgmac_reg { + u16 addr; + u16 mask; + u32 val; +}; + +static void rtl_write_exgmac_batch(struct rtl8169_private *tp, + const struct exgmac_reg *r, int len) +{ + while (len-- > 0) { + rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC); + r++; + } +} + +DECLARE_RTL_COND(rtl_efusear_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(EFUSEAR) & EFUSEAR_FLAG; +} + +static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ? + RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0; +} + +static u16 rtl_get_events(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R16(IntrStatus); +} + +static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrStatus, bits); + mmiowb(); +} + +static void rtl_irq_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrMask, 0); + mmiowb(); +} + +static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W16(IntrMask, bits); +} + +#define RTL_EVENT_NAPI_RX (RxOK | RxErr) +#define RTL_EVENT_NAPI_TX (TxOK | TxErr) +#define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX) + +static void rtl_irq_enable_all(struct rtl8169_private *tp) +{ + rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow); +} + +static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + rtl_irq_disable(tp); + rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow); + RTL_R8(ChipCmd); +} + +static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(TBICSR) & TBIReset; +} + +static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp) +{ + return rtl_readphy(tp, MII_BMCR) & BMCR_RESET; +} + +static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr) +{ + return RTL_R32(TBICSR) & TBILinkOk; +} + +static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr) +{ + return RTL_R8(PHYstatus) & LinkStatus; +} + +static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset); +} + +static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp) +{ + unsigned int val; + + val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET; + rtl_writephy(tp, MII_BMCR, val & 0xffff); +} + +static void rtl_link_chg_patch(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct net_device *dev = tp->dev; + + if (!netif_running(dev)) + return; + + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_38) { + if (RTL_R8(PHYstatus) & _1000bpsF) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); + } else if (RTL_R8(PHYstatus) & _100bps) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, + ERIAR_EXGMAC); + } + /* Reset packet filter */ + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, + ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, + ERIAR_EXGMAC); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36) { + if (RTL_R8(PHYstatus) & _1000bpsF) { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005, + ERIAR_EXGMAC); + } else { + rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f, + ERIAR_EXGMAC); + } + } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) { + if (RTL_R8(PHYstatus) & _10bps) { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02, + ERIAR_EXGMAC); + rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060, + ERIAR_EXGMAC); + } else { + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, + ERIAR_EXGMAC); + } + } +} + +static void __rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr, bool pm) +{ + if (tp->link_ok(ioaddr)) { + rtl_link_chg_patch(tp); + /* This is to cancel a scheduled suspend if there's one. */ + if (pm) + pm_request_resume(&tp->pci_dev->dev); + netif_carrier_on(dev); + if (net_ratelimit()) + netif_info(tp, ifup, dev, "link up\n"); + } else { + netif_carrier_off(dev); + netif_info(tp, ifdown, dev, "link down\n"); + if (pm) + pm_schedule_suspend(&tp->pci_dev->dev, 5000); + } +} + +static void rtl8169_check_link_status(struct net_device *dev, + struct rtl8169_private *tp, + void __iomem *ioaddr) +{ + __rtl8169_check_link_status(dev, tp, ioaddr, false); +} + +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST) + +static u32 __rtl8169_get_wol(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u8 options; + u32 wolopts = 0; + + options = RTL_R8(Config1); + if (!(options & PMEnable)) + return 0; + + options = RTL_R8(Config3); + if (options & LinkUp) + wolopts |= WAKE_PHY; + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) + wolopts |= WAKE_MAGIC; + break; + default: + if (options & MagicPacket) + wolopts |= WAKE_MAGIC; + break; + } + + options = RTL_R8(Config5); + if (options & UWF) + wolopts |= WAKE_UCAST; + if (options & BWF) + wolopts |= WAKE_BCAST; + if (options & MWF) + wolopts |= WAKE_MCAST; + + return wolopts; +} + +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_lock_work(tp); + + wol->supported = WAKE_ANY; + wol->wolopts = __rtl8169_get_wol(tp); + + rtl_unlock_work(tp); +} + +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) +{ + void __iomem *ioaddr = tp->mmio_addr; + unsigned int i, tmp; + static const struct { + u32 opt; + u16 reg; + u8 mask; + } cfg[] = { + { WAKE_PHY, Config3, LinkUp }, + { WAKE_UCAST, Config5, UWF }, + { WAKE_BCAST, Config5, BWF }, + { WAKE_MCAST, Config5, MWF }, + { WAKE_ANY, Config5, LanWake }, + { WAKE_MAGIC, Config3, MagicPacket } + }; + u8 options; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + tmp = ARRAY_SIZE(cfg) - 1; + if (wolopts & WAKE_MAGIC) + rtl_w0w1_eri(tp, + 0x0dc, + ERIAR_MASK_0100, + MagicPacket_v2, + 0x0000, + ERIAR_EXGMAC); + else + rtl_w0w1_eri(tp, + 0x0dc, + ERIAR_MASK_0100, + 0x0000, + MagicPacket_v2, + ERIAR_EXGMAC); + break; + default: + tmp = ARRAY_SIZE(cfg); + break; + } + + for (i = 0; i < tmp; i++) { + options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; + if (wolopts & cfg[i].opt) + options |= cfg[i].mask; + RTL_W8(cfg[i].reg, options); + } + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17: + options = RTL_R8(Config1) & ~PMEnable; + if (wolopts) + options |= PMEnable; + RTL_W8(Config1, options); + break; + default: + options = RTL_R8(Config2) & ~PME_SIGNAL; + if (wolopts) + options |= PME_SIGNAL; + RTL_W8(Config2, options); + break; + } + + RTL_W8(Cfg9346, Cfg9346_Lock); +} + +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_lock_work(tp); + + if (wol->wolopts) + tp->features |= RTL_FEATURE_WOL; + else + tp->features &= ~RTL_FEATURE_WOL; + __rtl8169_set_wol(tp, wol->wolopts); + + rtl_unlock_work(tp); + + device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); + + return 0; +} + +static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp) +{ + return rtl_chip_infos[tp->mac_version].fw_name; +} + +static void rtl8169_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl_fw *rtl_fw = tp->rtl_fw; + + strlcpy(info->driver, MODULENAME, sizeof(info->driver)); + strlcpy(info->version, RTL8169_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); + BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version)); + if (!IS_ERR_OR_NULL(rtl_fw)) + strlcpy(info->fw_version, rtl_fw->version, + sizeof(info->fw_version)); +} + +static int rtl8169_get_regs_len(struct net_device *dev) +{ + return R8169_REGS_SIZE; +} + +static int rtl8169_set_speed_tbi(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 ignored) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + int ret = 0; + u32 reg; + + reg = RTL_R32(TBICSR); + if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) && + (duplex == DUPLEX_FULL)) { + RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart)); + } else if (autoneg == AUTONEG_ENABLE) + RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart); + else { + netif_warn(tp, link, dev, + "incorrect speed setting refused in TBI mode\n"); + ret = -EOPNOTSUPP; + } + + return ret; +} + +static int rtl8169_set_speed_xmii(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 adv) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int giga_ctrl, bmcr; + int rc = -EINVAL; + + rtl_writephy(tp, 0x1f, 0x0000); + + if (autoneg == AUTONEG_ENABLE) { + int auto_nego; + + auto_nego = rtl_readphy(tp, MII_ADVERTISE); + auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL | + ADVERTISE_100HALF | ADVERTISE_100FULL); + + if (adv & ADVERTISED_10baseT_Half) + auto_nego |= ADVERTISE_10HALF; + if (adv & ADVERTISED_10baseT_Full) + auto_nego |= ADVERTISE_10FULL; + if (adv & ADVERTISED_100baseT_Half) + auto_nego |= ADVERTISE_100HALF; + if (adv & ADVERTISED_100baseT_Full) + auto_nego |= ADVERTISE_100FULL; + + auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; + + giga_ctrl = rtl_readphy(tp, MII_CTRL1000); + giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); + + /* The 8100e/8101e/8102e do Fast Ethernet only. */ + if (tp->mii.supports_gmii) { + if (adv & ADVERTISED_1000baseT_Half) + giga_ctrl |= ADVERTISE_1000HALF; + if (adv & ADVERTISED_1000baseT_Full) + giga_ctrl |= ADVERTISE_1000FULL; + } else if (adv & (ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full)) { + netif_info(tp, link, dev, + "PHY does not support 1000Mbps\n"); + goto out; + } + + bmcr = BMCR_ANENABLE | BMCR_ANRESTART; + + rtl_writephy(tp, MII_ADVERTISE, auto_nego); + rtl_writephy(tp, MII_CTRL1000, giga_ctrl); + } else { + giga_ctrl = 0; + + if (speed == SPEED_10) + bmcr = 0; + else if (speed == SPEED_100) + bmcr = BMCR_SPEED100; + else + goto out; + + if (duplex == DUPLEX_FULL) + bmcr |= BMCR_FULLDPLX; + } + + rtl_writephy(tp, MII_BMCR, bmcr); + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) { + if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) { + rtl_writephy(tp, 0x17, 0x2138); + rtl_writephy(tp, 0x0e, 0x0260); + } else { + rtl_writephy(tp, 0x17, 0x2108); + rtl_writephy(tp, 0x0e, 0x0000); + } + } + + rc = 0; +out: + return rc; +} + +static int rtl8169_set_speed(struct net_device *dev, + u8 autoneg, u16 speed, u8 duplex, u32 advertising) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + ret = tp->set_speed(dev, autoneg, speed, duplex, advertising); + if (ret < 0) + goto out; + + if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) && + (advertising & ADVERTISED_1000baseT_Full)) { + mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT); + } +out: + return ret; +} + +static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int ret; + + del_timer_sync(&tp->timer); + + rtl_lock_work(tp); + ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd), + cmd->duplex, cmd->advertising); + rtl_unlock_work(tp); + + return ret; +} + +static netdev_features_t rtl8169_fix_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (dev->mtu > TD_MSS_MAX) + features &= ~NETIF_F_ALL_TSO; + + if (dev->mtu > JUMBO_1K && + !rtl_chip_infos[tp->mac_version].jumbo_tx_csum) + features &= ~NETIF_F_IP_CSUM; + + return features; +} + +static void __rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 rx_config; + + rx_config = RTL_R32(RxConfig); + if (features & NETIF_F_RXALL) + rx_config |= (AcceptErr | AcceptRunt); + else + rx_config &= ~(AcceptErr | AcceptRunt); + + RTL_W32(RxConfig, rx_config); + + if (features & NETIF_F_RXCSUM) + tp->cp_cmd |= RxChkSum; + else + tp->cp_cmd &= ~RxChkSum; + + if (features & NETIF_F_HW_VLAN_CTAG_RX) + tp->cp_cmd |= RxVlan; + else + tp->cp_cmd &= ~RxVlan; + + tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum); + + RTL_W16(CPlusCmd, tp->cp_cmd); + RTL_R16(CPlusCmd); +} + +static int rtl8169_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + features &= NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX; + + rtl_lock_work(tp); + if (features ^ dev->features) + __rtl8169_set_features(dev, features); + rtl_unlock_work(tp); + + return 0; +} + + +static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb) +{ + return (skb_vlan_tag_present(skb)) ? + TxVlanTag | swab16(skb_vlan_tag_get(skb)) : 0x00; +} + +static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) +{ + u32 opts2 = le32_to_cpu(desc->opts2); + + if (opts2 & RxVlanTag) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), swab16(opts2 & 0xffff)); +} + +static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 status; + + cmd->supported = + SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE; + cmd->port = PORT_FIBRE; + cmd->transceiver = XCVR_INTERNAL; + + status = RTL_R32(TBICSR); + cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0; + cmd->autoneg = !!(status & TBINwEnable); + + ethtool_cmd_speed_set(cmd, SPEED_1000); + cmd->duplex = DUPLEX_FULL; /* Always set */ + + return 0; +} + +static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + return mii_ethtool_gset(&tp->mii, cmd); +} + +static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + int rc; + + rtl_lock_work(tp); + rc = tp->get_settings(dev, cmd); + rtl_unlock_work(tp); + + return rc; +} + +static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + u32 __iomem *data = tp->mmio_addr; + u32 *dw = p; + int i; + + rtl_lock_work(tp); + for (i = 0; i < R8169_REGS_SIZE; i += 4) + memcpy_fromio(dw++, data++, 4); + rtl_unlock_work(tp); +} + +static u32 rtl8169_get_msglevel(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + return tp->msg_enable; +} + +static void rtl8169_set_msglevel(struct net_device *dev, u32 value) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + tp->msg_enable = value; +} + +static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = { + "tx_packets", + "rx_packets", + "tx_errors", + "rx_errors", + "rx_missed", + "align_errors", + "tx_single_collisions", + "tx_multi_collisions", + "unicast", + "broadcast", + "multicast", + "tx_aborted", + "tx_underrun", +}; + +static int rtl8169_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(rtl8169_gstrings); + default: + return -EOPNOTSUPP; + } +} + +DECLARE_RTL_COND(rtl_counters_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(CounterAddrLow) & (CounterReset | CounterDump); +} + +static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + dma_addr_t paddr = tp->counters_phys_addr; + u32 cmd; + + RTL_W32(CounterAddrHigh, (u64)paddr >> 32); + RTL_R32(CounterAddrHigh); + cmd = (u64)paddr & DMA_BIT_MASK(32); + RTL_W32(CounterAddrLow, cmd); + RTL_W32(CounterAddrLow, cmd | counter_cmd); + + return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000); +} + +static bool rtl8169_reset_counters(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + /* + * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the + * tally counters. + */ + if (tp->mac_version < RTL_GIGA_MAC_VER_19) + return true; + + return rtl8169_do_counters(dev, CounterReset); +} + +static bool rtl8169_update_counters(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + /* + * Some chips are unable to dump tally counters when the receiver + * is disabled. + */ + if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0) + return true; + + return rtl8169_do_counters(dev, CounterDump); +} + +static bool rtl8169_init_counter_offsets(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_counters *counters = tp->counters; + bool ret = false; + + /* + * rtl8169_init_counter_offsets is called from rtl_open. On chip + * versions prior to RTL_GIGA_MAC_VER_19 the tally counters are only + * reset by a power cycle, while the counter values collected by the + * driver are reset at every driver unload/load cycle. + * + * To make sure the HW values returned by @get_stats64 match the SW + * values, we collect the initial values at first open(*) and use them + * as offsets to normalize the values returned by @get_stats64. + * + * (*) We can't call rtl8169_init_counter_offsets from rtl_init_one + * for the reason stated in rtl8169_update_counters; CmdRxEnb is only + * set at open time by rtl_hw_start. + */ + + if (tp->tc_offset.inited) + return true; + + /* If both, reset and update fail, propagate to caller. */ + if (rtl8169_reset_counters(dev)) + ret = true; + + if (rtl8169_update_counters(dev)) + ret = true; + + tp->tc_offset.tx_errors = counters->tx_errors; + tp->tc_offset.tx_multi_collision = counters->tx_multi_collision; + tp->tc_offset.tx_aborted = counters->tx_aborted; + tp->tc_offset.inited = true; + + return ret; +} + +static void rtl8169_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct rtl8169_counters *counters = tp->counters; + + ASSERT_RTNL(); + + rtl8169_update_counters(dev); + + data[0] = le64_to_cpu(counters->tx_packets); + data[1] = le64_to_cpu(counters->rx_packets); + data[2] = le64_to_cpu(counters->tx_errors); + data[3] = le32_to_cpu(counters->rx_errors); + data[4] = le16_to_cpu(counters->rx_missed); + data[5] = le16_to_cpu(counters->align_errors); + data[6] = le32_to_cpu(counters->tx_one_collision); + data[7] = le32_to_cpu(counters->tx_multi_collision); + data[8] = le64_to_cpu(counters->rx_unicast); + data[9] = le64_to_cpu(counters->rx_broadcast); + data[10] = le32_to_cpu(counters->rx_multicast); + data[11] = le16_to_cpu(counters->tx_aborted); + data[12] = le16_to_cpu(counters->tx_underun); +} + +static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + switch(stringset) { + case ETH_SS_STATS: + memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings)); + break; + } +} + +static const struct ethtool_ops rtl8169_ethtool_ops = { + .get_drvinfo = rtl8169_get_drvinfo, + .get_regs_len = rtl8169_get_regs_len, + .get_link = ethtool_op_get_link, + .get_settings = rtl8169_get_settings, + .set_settings = rtl8169_set_settings, + .get_msglevel = rtl8169_get_msglevel, + .set_msglevel = rtl8169_set_msglevel, + .get_regs = rtl8169_get_regs, + .get_wol = rtl8169_get_wol, + .set_wol = rtl8169_set_wol, + .get_strings = rtl8169_get_strings, + .get_sset_count = rtl8169_get_sset_count, + .get_ethtool_stats = rtl8169_get_ethtool_stats, + .get_ts_info = ethtool_op_get_ts_info, +}; + +static void rtl8169_get_mac_version(struct rtl8169_private *tp, + struct net_device *dev, u8 default_version) +{ + void __iomem *ioaddr = tp->mmio_addr; + /* + * The driver currently handles the 8168Bf and the 8168Be identically + * but they can be identified more specifically through the test below + * if needed: + * + * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be + * + * Same thing for the 8101Eb and the 8101Ec: + * + * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec + */ + static const struct rtl_mac_info { + u32 mask; + u32 val; + int mac_version; + } mac_info[] = { + /* 8168EP family. */ + { 0x7cf00000, 0x50200000, RTL_GIGA_MAC_VER_51 }, + { 0x7cf00000, 0x50100000, RTL_GIGA_MAC_VER_50 }, + { 0x7cf00000, 0x50000000, RTL_GIGA_MAC_VER_49 }, + + /* 8168H family. */ + { 0x7cf00000, 0x54100000, RTL_GIGA_MAC_VER_46 }, + { 0x7cf00000, 0x54000000, RTL_GIGA_MAC_VER_45 }, + + /* 8168G family. */ + { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 }, + { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 }, + { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 }, + { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 }, + + /* 8168F family. */ + { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 }, + { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, + { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, + + /* 8168E family. */ + { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 }, + { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 }, + { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 }, + { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 }, + + /* 8168D family. */ + { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, + { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, + { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, + + /* 8168DP family. */ + { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 }, + { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 }, + { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 }, + + /* 8168C family. */ + { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 }, + { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, + { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, + { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 }, + { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 }, + { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 }, + { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 }, + { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 }, + { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 }, + + /* 8168B family. */ + { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 }, + { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 }, + { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 }, + { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, + + /* 8101 family. */ + { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 }, + { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 }, + { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 }, + { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, + { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, + { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 }, + { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 }, + { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 }, + { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 }, + { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 }, + { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 }, + { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 }, + { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 }, + { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 }, + /* FIXME: where did these entries come from ? -- FR */ + { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 }, + { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 }, + + /* 8110 family. */ + { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 }, + { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 }, + { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 }, + { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 }, + { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 }, + { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 }, + + /* Catch-all */ + { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } + }; + const struct rtl_mac_info *p = mac_info; + u32 reg; + + reg = RTL_R32(TxConfig); + while ((reg & p->mask) != p->val) + p++; + tp->mac_version = p->mac_version; + + if (tp->mac_version == RTL_GIGA_MAC_NONE) { + netif_notice(tp, probe, dev, + "unknown MAC, using family default\n"); + tp->mac_version = default_version; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) { + tp->mac_version = tp->mii.supports_gmii ? + RTL_GIGA_MAC_VER_42 : + RTL_GIGA_MAC_VER_43; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) { + tp->mac_version = tp->mii.supports_gmii ? + RTL_GIGA_MAC_VER_45 : + RTL_GIGA_MAC_VER_47; + } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) { + tp->mac_version = tp->mii.supports_gmii ? + RTL_GIGA_MAC_VER_46 : + RTL_GIGA_MAC_VER_48; + } +} + +static void rtl8169_print_mac_version(struct rtl8169_private *tp) +{ + dprintk("mac_version = 0x%02x\n", tp->mac_version); +} + +struct phy_reg { + u16 reg; + u16 val; +}; + +static void rtl_writephy_batch(struct rtl8169_private *tp, + const struct phy_reg *regs, int len) +{ + while (len-- > 0) { + rtl_writephy(tp, regs->reg, regs->val); + regs++; + } +} + +#define PHY_READ 0x00000000 +#define PHY_DATA_OR 0x10000000 +#define PHY_DATA_AND 0x20000000 +#define PHY_BJMPN 0x30000000 +#define PHY_MDIO_CHG 0x40000000 +#define PHY_CLEAR_READCOUNT 0x70000000 +#define PHY_WRITE 0x80000000 +#define PHY_READCOUNT_EQ_SKIP 0x90000000 +#define PHY_COMP_EQ_SKIPN 0xa0000000 +#define PHY_COMP_NEQ_SKIPN 0xb0000000 +#define PHY_WRITE_PREVIOUS 0xc0000000 +#define PHY_SKIPN 0xd0000000 +#define PHY_DELAY_MS 0xe0000000 + +struct fw_info { + u32 magic; + char version[RTL_VER_SIZE]; + __le32 fw_start; + __le32 fw_len; + u8 chksum; +} __packed; + +#define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code)) + +static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + const struct firmware *fw = rtl_fw->fw; + struct fw_info *fw_info = (struct fw_info *)fw->data; + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + char *version = rtl_fw->version; + bool rc = false; + + if (fw->size < FW_OPCODE_SIZE) + goto out; + + if (!fw_info->magic) { + size_t i, size, start; + u8 checksum = 0; + + if (fw->size < sizeof(*fw_info)) + goto out; + + for (i = 0; i < fw->size; i++) + checksum += fw->data[i]; + if (checksum != 0) + goto out; + + start = le32_to_cpu(fw_info->fw_start); + if (start > fw->size) + goto out; + + size = le32_to_cpu(fw_info->fw_len); + if (size > (fw->size - start) / FW_OPCODE_SIZE) + goto out; + + memcpy(version, fw_info->version, RTL_VER_SIZE); + + pa->code = (__le32 *)(fw->data + start); + pa->size = size; + } else { + if (fw->size % FW_OPCODE_SIZE) + goto out; + + strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE); + + pa->code = (__le32 *)fw->data; + pa->size = fw->size / FW_OPCODE_SIZE; + } + version[RTL_VER_SIZE - 1] = 0; + + rc = true; +out: + return rc; +} + +static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev, + struct rtl_fw_phy_action *pa) +{ + bool rc = false; + size_t index; + + for (index = 0; index < pa->size; index++) { + u32 action = le32_to_cpu(pa->code[index]); + u32 regno = (action & 0x0fff0000) >> 16; + + switch(action & 0xf0000000) { + case PHY_READ: + case PHY_DATA_OR: + case PHY_DATA_AND: + case PHY_MDIO_CHG: + case PHY_CLEAR_READCOUNT: + case PHY_WRITE: + case PHY_WRITE_PREVIOUS: + case PHY_DELAY_MS: + break; + + case PHY_BJMPN: + if (regno > index) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_READCOUNT_EQ_SKIP: + if (index + 2 >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + case PHY_COMP_EQ_SKIPN: + case PHY_COMP_NEQ_SKIPN: + case PHY_SKIPN: + if (index + 1 + regno >= pa->size) { + netif_err(tp, ifup, tp->dev, + "Out of range of firmware\n"); + goto out; + } + break; + + default: + netif_err(tp, ifup, tp->dev, + "Invalid action 0x%08x\n", action); + goto out; + } + } + rc = true; +out: + return rc; +} + +static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct net_device *dev = tp->dev; + int rc = -EINVAL; + + if (!rtl_fw_format_ok(tp, rtl_fw)) { + netif_err(tp, ifup, dev, "invalid firmware\n"); + goto out; + } + + if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action)) + rc = 0; +out: + return rc; +} + +static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) +{ + struct rtl_fw_phy_action *pa = &rtl_fw->phy_action; + struct mdio_ops org, *ops = &tp->mdio_ops; + u32 predata, count; + size_t index; + + predata = count = 0; + org.write = ops->write; + org.read = ops->read; + + for (index = 0; index < pa->size; ) { + u32 action = le32_to_cpu(pa->code[index]); + u32 data = action & 0x0000ffff; + u32 regno = (action & 0x0fff0000) >> 16; + + if (!action) + break; + + switch(action & 0xf0000000) { + case PHY_READ: + predata = rtl_readphy(tp, regno); + count++; + index++; + break; + case PHY_DATA_OR: + predata |= data; + index++; + break; + case PHY_DATA_AND: + predata &= data; + index++; + break; + case PHY_BJMPN: + index -= regno; + break; + case PHY_MDIO_CHG: + if (data == 0) { + ops->write = org.write; + ops->read = org.read; + } else if (data == 1) { + ops->write = mac_mcu_write; + ops->read = mac_mcu_read; + } + + index++; + break; + case PHY_CLEAR_READCOUNT: + count = 0; + index++; + break; + case PHY_WRITE: + rtl_writephy(tp, regno, data); + index++; + break; + case PHY_READCOUNT_EQ_SKIP: + index += (count == data) ? 2 : 1; + break; + case PHY_COMP_EQ_SKIPN: + if (predata == data) + index += regno; + index++; + break; + case PHY_COMP_NEQ_SKIPN: + if (predata != data) + index += regno; + index++; + break; + case PHY_WRITE_PREVIOUS: + rtl_writephy(tp, regno, predata); + index++; + break; + case PHY_SKIPN: + index += regno + 1; + break; + case PHY_DELAY_MS: + mdelay(data); + index++; + break; + + default: + BUG(); + } + } + + ops->write = org.write; + ops->read = org.read; +} + +static void rtl_release_firmware(struct rtl8169_private *tp) +{ + if (!IS_ERR_OR_NULL(tp->rtl_fw)) { + release_firmware(tp->rtl_fw->fw); + kfree(tp->rtl_fw); + } + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; +} + +static void rtl_apply_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw = tp->rtl_fw; + + /* TODO: release firmware once rtl_phy_write_fw signals failures. */ + if (!IS_ERR_OR_NULL(rtl_fw)) + rtl_phy_write_fw(tp, rtl_fw); +} + +static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val) +{ + if (rtl_readphy(tp, reg) != val) + netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n"); + else + rtl_apply_firmware(tp); +} + +static void rtl8169s_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x06, 0x006e }, + { 0x08, 0x0708 }, + { 0x15, 0x4000 }, + { 0x18, 0x65c7 }, + + { 0x1f, 0x0001 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x0000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf60 }, + { 0x01, 0x0140 }, + { 0x00, 0x0077 }, + { 0x04, 0x7800 }, + { 0x04, 0x7000 }, + + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf0f9 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xa000 }, + + { 0x03, 0xff41 }, + { 0x02, 0xdf20 }, + { 0x01, 0x0140 }, + { 0x00, 0x00bb }, + { 0x04, 0xb800 }, + { 0x04, 0xb000 }, + + { 0x03, 0xdf41 }, + { 0x02, 0xdc60 }, + { 0x01, 0x6340 }, + { 0x00, 0x007d }, + { 0x04, 0xd800 }, + { 0x04, 0xd000 }, + + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x100a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + + { 0x1f, 0x0000 }, + { 0x0b, 0x0000 }, + { 0x00, 0x9200 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x01, 0x90d0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp) +{ + struct pci_dev *pdev = tp->pci_dev; + + if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) || + (pdev->subsystem_device != 0xe000)) + return; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_writephy(tp, 0x10, 0xf01b); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x14, 0xfb54 }, + { 0x18, 0xf5c7 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl8169scd_hw_phy_config_quirk(tp); +} + +static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x04, 0x0000 }, + { 0x03, 0x00a1 }, + { 0x02, 0x0008 }, + { 0x01, 0x0120 }, + { 0x00, 0x1000 }, + { 0x04, 0x0800 }, + { 0x04, 0x9000 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0xa000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0xff95 }, + { 0x00, 0xba00 }, + { 0x04, 0xa800 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0x8480 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x18, 0x67c7 }, + { 0x04, 0x2000 }, + { 0x03, 0x002f }, + { 0x02, 0x4360 }, + { 0x01, 0x0109 }, + { 0x00, 0x3022 }, + { 0x04, 0x2800 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0001); + rtl_patchphy(tp, 0x16, 1 << 0); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x10, 0xf41b }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0000 }, + { 0x1d, 0x0f00 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x1ec8 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1f, 0x0002 }, + { 0x00, 0x88d4 }, + { 0x01, 0x82b1 }, + { 0x03, 0x7002 }, + { 0x08, 0x9e30 }, + { 0x09, 0x01f0 }, + { 0x0a, 0x5500 }, + { 0x0c, 0x00c8 }, + { 0x1f, 0x0003 }, + { 0x12, 0xc096 }, + { 0x16, 0x000a }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x09, 0x2000 }, + { 0x09, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x03, 0x802f }, + { 0x02, 0x4f02 }, + { 0x01, 0x0409 }, + { 0x00, 0xf099 }, + { 0x04, 0x9800 }, + { 0x04, 0x9000 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x0761 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x12, 0x2300 }, + { 0x1d, 0x3d98 }, + { 0x1f, 0x0002 }, + { 0x0c, 0x7eb8 }, + { 0x06, 0x5461 }, + { 0x1f, 0x0003 }, + { 0x16, 0x0f0a }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_patchphy(tp, 0x16, 1 << 0); + rtl_patchphy(tp, 0x14, 1 << 5); + rtl_patchphy(tp, 0x0d, 1 << 5); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp) +{ + rtl8168c_3_hw_phy_config(tp); +} + +static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + /* + * Rx Error Issue + * Fine Tune Switching regulator parameter + */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w0w1_phy(tp, 0x0b, 0x0010, 0x00ef); + rtl_w0w1_phy(tp, 0x0c, 0xa200, 0x5d00); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x6662 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x6662 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* RSET couple improve */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0d, 0x0300); + rtl_patchphy(tp, 0x0f, 0x0010); + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init_0[] = { + /* Channel Estimation */ + { 0x1f, 0x0001 }, + { 0x06, 0x4064 }, + { 0x07, 0x2863 }, + { 0x08, 0x059c }, + { 0x09, 0x26b4 }, + { 0x0a, 0x6a19 }, + { 0x0b, 0xdcc8 }, + { 0x10, 0xf06d }, + { 0x14, 0x7f68 }, + { 0x18, 0x7fd9 }, + { 0x1c, 0xf0ff }, + { 0x1d, 0x3d9c }, + { 0x1f, 0x0003 }, + { 0x12, 0xf49f }, + { 0x13, 0x070b }, + { 0x1a, 0x05ad }, + { 0x14, 0x94c0 }, + + /* + * Tx Error Issue + * Enhance line driver power + */ + { 0x1f, 0x0002 }, + { 0x06, 0x5561 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8332 }, + { 0x06, 0x5561 }, + + /* + * Can not link to 1Gbps with bad cable + * Decrease SNR threshold form 21.07dB to 19.04dB + */ + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 } + }; + + rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); + + if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x669a }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x669a }, + + { 0x1f, 0x0002 } + }; + int val; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + val = rtl_readphy(tp, 0x0d); + if ((val & 0x00ff) != 0x006c) { + static const u32 set[] = { + 0x0065, 0x0066, 0x0067, 0x0068, + 0x0069, 0x006a, 0x006b, 0x006c + }; + int i; + + rtl_writephy(tp, 0x1f, 0x0002); + + val &= 0xff00; + for (i = 0; i < ARRAY_SIZE(set); i++) + rtl_writephy(tp, 0x0d, val | set[i]); + } + } else { + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x05, 0x2642 }, + { 0x1f, 0x0005 }, + { 0x05, 0x8330 }, + { 0x06, 0x2642 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + } + + /* Fine tune PLL performance */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w0w1_phy(tp, 0x02, 0x0100, 0x0600); + rtl_w0w1_phy(tp, 0x03, 0x0000, 0xe000); + + /* Switching regulator Slew rate */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_patchphy(tp, 0x0f, 0x0017); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x001b); + + rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0002 }, + { 0x10, 0x0008 }, + { 0x0d, 0x006c }, + + { 0x1f, 0x0000 }, + { 0x0d, 0xf880 }, + + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0001 }, + { 0x0b, 0xa4d8 }, + { 0x09, 0x281c }, + { 0x07, 0x2883 }, + { 0x0a, 0x6b35 }, + { 0x1d, 0x3da4 }, + { 0x1c, 0xeffd }, + { 0x14, 0x7f52 }, + { 0x18, 0x7fc6 }, + { 0x08, 0x0601 }, + { 0x06, 0x4063 }, + { 0x10, 0xf074 }, + { 0x1f, 0x0003 }, + { 0x13, 0x0789 }, + { 0x12, 0xf4bd }, + { 0x1a, 0x04fd }, + { 0x14, 0x84b0 }, + { 0x1f, 0x0000 }, + { 0x00, 0x9200 }, + + { 0x1f, 0x0005 }, + { 0x01, 0x0340 }, + { 0x1f, 0x0001 }, + { 0x04, 0x4000 }, + { 0x03, 0x1d21 }, + { 0x02, 0x0c32 }, + { 0x01, 0x0200 }, + { 0x00, 0x5554 }, + { 0x04, 0x4800 }, + { 0x04, 0x4000 }, + { 0x04, 0xf000 }, + { 0x03, 0xdf01 }, + { 0x02, 0xdf20 }, + { 0x01, 0x101a }, + { 0x00, 0xa0ff }, + { 0x04, 0xf800 }, + { 0x04, 0xf000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x0023 }, + { 0x16, 0x0000 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0001 }, + { 0x17, 0x0cc0 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x002d }, + { 0x18, 0x0040 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + rtl_patchphy(tp, 0x0d, 1 << 5); +} + +static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b80 }, + { 0x06, 0xc896 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0001 }, + { 0x0b, 0x6c20 }, + { 0x07, 0x2872 }, + { 0x1c, 0xefff }, + { 0x1f, 0x0003 }, + { 0x14, 0x6420 }, + { 0x1f, 0x0000 }, + + /* Update PFM & 10M TX idle timer */ + { 0x1f, 0x0007 }, + { 0x1e, 0x002f }, + { 0x15, 0x1919 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* DCO enable for 10M IDLE Power */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0023); + rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* For impedance matching */ + rtl_writephy(tp, 0x1f, 0x0002); + rtl_w0w1_phy(tp, 0x08, 0x8000, 0x7f00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w0w1_phy(tp, 0x18, 0x0050, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x1100); + rtl_writephy(tp, 0x1f, 0x0006); + rtl_writephy(tp, 0x00, 0x5a00); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); +} + +static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr) +{ + const u16 w[] = { + addr[0] | (addr[1] << 8), + addr[2] | (addr[3] << 8), + addr[4] | (addr[5] << 8) + }; + const struct exgmac_reg e[] = { + { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) }, + { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] }, + { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 }, + { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) } + }; + + rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e)); +} + +static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Enable Delay cap */ + { 0x1f, 0x0004 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x00ac }, + { 0x18, 0x0006 }, + { 0x1f, 0x0002 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0000 }, + + /* Green Setting */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b5b }, + { 0x06, 0x9222 }, + { 0x05, 0x8b6d }, + { 0x06, 0x8000 }, + { 0x05, 0x8b76 }, + { 0x06, 0x8000 }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w0w1_phy(tp, 0x17, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + + /* improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* EEE setting */ + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0002); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */ + rtl_rar_exgmac_set(tp, tp->dev->dev_addr); +} + +static void rtl8168f_hw_phy_config(struct rtl8169_private *tp) +{ + /* For 4-corner performance improve */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b80); + rtl_w0w1_phy(tp, 0x06, 0x0006, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x002d); + rtl_w0w1_phy(tp, 0x18, 0x0010, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + + /* Improve 10M EEE waveform */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b86); + rtl_w0w1_phy(tp, 0x06, 0x0001, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + + /* Modify green table for giga & fnet */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b55 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b5e }, + { 0x06, 0x0000 }, + { 0x05, 0x8b67 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b70 }, + { 0x06, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x0078 }, + { 0x17, 0x0000 }, + { 0x19, 0x00fb }, + { 0x1f, 0x0000 }, + + /* Modify green table for 10M */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b79 }, + { 0x06, 0xaa00 }, + { 0x1f, 0x0000 }, + + /* Disable hiimpedance detection (RTCT) */ + { 0x1f, 0x0003 }, + { 0x01, 0x328a }, + { 0x1f, 0x0000 } + }; + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl8168f_hw_phy_config(tp); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp); +} + +static void rtl8411_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + /* Channel estimation fine tune */ + { 0x1f, 0x0003 }, + { 0x09, 0xa20f }, + { 0x1f, 0x0000 }, + + /* Modify green table for giga & fnet */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b55 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b5e }, + { 0x06, 0x0000 }, + { 0x05, 0x8b67 }, + { 0x06, 0x0000 }, + { 0x05, 0x8b70 }, + { 0x06, 0x0000 }, + { 0x1f, 0x0000 }, + { 0x1f, 0x0007 }, + { 0x1e, 0x0078 }, + { 0x17, 0x0000 }, + { 0x19, 0x00aa }, + { 0x1f, 0x0000 }, + + /* Modify green table for 10M */ + { 0x1f, 0x0005 }, + { 0x05, 0x8b79 }, + { 0x06, 0xaa00 }, + { 0x1f, 0x0000 }, + + /* Disable hiimpedance detection (RTCT) */ + { 0x1f, 0x0003 }, + { 0x01, 0x328a }, + { 0x1f, 0x0000 } + }; + + + rtl_apply_firmware(tp); + + rtl8168f_hw_phy_config(tp); + + /* Improve 2-pair detection performance */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x4000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + /* Modify green table for giga */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b54); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800); + rtl_writephy(tp, 0x05, 0x8b5d); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0800); + rtl_writephy(tp, 0x05, 0x8a7c); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a7f); + rtl_w0w1_phy(tp, 0x06, 0x0100, 0x0000); + rtl_writephy(tp, 0x05, 0x8a82); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a85); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x05, 0x8a88); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0000); + + /* uc same-seed solution */ + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* eee setting */ + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0005); + rtl_writephy(tp, 0x05, 0x8b85); + rtl_w0w1_phy(tp, 0x06, 0x0000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x1f, 0x0007); + rtl_writephy(tp, 0x1e, 0x0020); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x0d, 0x0007); + rtl_writephy(tp, 0x0e, 0x003c); + rtl_writephy(tp, 0x0d, 0x4007); + rtl_writephy(tp, 0x0e, 0x0000); + rtl_writephy(tp, 0x0d, 0x0000); + + /* Green feature */ + rtl_writephy(tp, 0x1f, 0x0003); + rtl_w0w1_phy(tp, 0x19, 0x0000, 0x0001); + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0400); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); + + rtl_writephy(tp, 0x1f, 0x0a46); + if (rtl_readphy(tp, 0x10) & 0x0100) { + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x12, 0x0000, 0x8000); + } else { + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x12, 0x8000, 0x0000); + } + + rtl_writephy(tp, 0x1f, 0x0a46); + if (rtl_readphy(tp, 0x13) & 0x0100) { + rtl_writephy(tp, 0x1f, 0x0c41); + rtl_w0w1_phy(tp, 0x15, 0x0002, 0x0000); + } else { + rtl_writephy(tp, 0x1f, 0x0c41); + rtl_w0w1_phy(tp, 0x15, 0x0000, 0x0002); + } + + /* Enable PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x14, 0x0100, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8084); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); + + /* EEE auto-fallback function */ + rtl_writephy(tp, 0x1f, 0x0a4b); + rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000); + + /* Enable UC LPF tune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8012); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0c42); + rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); + + /* Improve SWR Efficiency */ + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x5065); + rtl_writephy(tp, 0x14, 0xd065); + rtl_writephy(tp, 0x1f, 0x0bc8); + rtl_writephy(tp, 0x11, 0x5655); + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x1065); + rtl_writephy(tp, 0x14, 0x9065); + rtl_writephy(tp, 0x14, 0x1065); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168g_2_hw_phy_config(struct rtl8169_private *tp) +{ + rtl_apply_firmware(tp); +} + +static void rtl8168h_1_hw_phy_config(struct rtl8169_private *tp) +{ + u16 dout_tapbin; + u32 data; + + rtl_apply_firmware(tp); + + /* CHN EST parameters adjust - giga master */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x809b); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0xf800); + rtl_writephy(tp, 0x13, 0x80a2); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0xff00); + rtl_writephy(tp, 0x13, 0x80a4); + rtl_w0w1_phy(tp, 0x14, 0x8500, 0xff00); + rtl_writephy(tp, 0x13, 0x809c); + rtl_w0w1_phy(tp, 0x14, 0xbd00, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* CHN EST parameters adjust - giga slave */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x80ad); + rtl_w0w1_phy(tp, 0x14, 0x7000, 0xf800); + rtl_writephy(tp, 0x13, 0x80b4); + rtl_w0w1_phy(tp, 0x14, 0x5000, 0xff00); + rtl_writephy(tp, 0x13, 0x80ac); + rtl_w0w1_phy(tp, 0x14, 0x4000, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* CHN EST parameters adjust - fnet */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x808e); + rtl_w0w1_phy(tp, 0x14, 0x1200, 0xff00); + rtl_writephy(tp, 0x13, 0x8090); + rtl_w0w1_phy(tp, 0x14, 0xe500, 0xff00); + rtl_writephy(tp, 0x13, 0x8092); + rtl_w0w1_phy(tp, 0x14, 0x9f00, 0xff00); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable R-tune & PGA-retune function */ + dout_tapbin = 0; + rtl_writephy(tp, 0x1f, 0x0a46); + data = rtl_readphy(tp, 0x13); + data &= 3; + data <<= 2; + dout_tapbin |= data; + data = rtl_readphy(tp, 0x12); + data &= 0xc000; + data >>= 14; + dout_tapbin |= data; + dout_tapbin = ~(dout_tapbin^0x08); + dout_tapbin <<= 12; + dout_tapbin &= 0xf000; + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x827a); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827b); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827c); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + rtl_writephy(tp, 0x13, 0x827d); + rtl_w0w1_phy(tp, 0x14, dout_tapbin, 0xf000); + + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x0811); + rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a42); + rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable GPHY 10M */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* SAR ADC performance */ + rtl_writephy(tp, 0x1f, 0x0bca); + rtl_w0w1_phy(tp, 0x17, 0x4000, 0x3000); + rtl_writephy(tp, 0x1f, 0x0000); + + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x803f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8047); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x804f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8057); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x805f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x8067); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x13, 0x806f); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x3000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* disable phy pfm mode */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0080); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168h_2_hw_phy_config(struct rtl8169_private *tp) +{ + u16 ioffset_p3, ioffset_p2, ioffset_p1, ioffset_p0; + u16 rlen; + u32 data; + + rtl_apply_firmware(tp); + + /* CHIN EST parameter update */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x808a); + rtl_w0w1_phy(tp, 0x14, 0x000a, 0x003f); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable R-tune & PGA-retune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x0811); + rtl_w0w1_phy(tp, 0x14, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a42); + rtl_w0w1_phy(tp, 0x16, 0x0002, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* enable GPHY 10M */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x0800, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + r8168_mac_ocp_write(tp, 0xdd02, 0x807d); + data = r8168_mac_ocp_read(tp, 0xdd02); + ioffset_p3 = ((data & 0x80)>>7); + ioffset_p3 <<= 3; + + data = r8168_mac_ocp_read(tp, 0xdd00); + ioffset_p3 |= ((data & (0xe000))>>13); + ioffset_p2 = ((data & (0x1e00))>>9); + ioffset_p1 = ((data & (0x01e0))>>5); + ioffset_p0 = ((data & 0x0010)>>4); + ioffset_p0 <<= 3; + ioffset_p0 |= (data & (0x07)); + data = (ioffset_p3<<12)|(ioffset_p2<<8)|(ioffset_p1<<4)|(ioffset_p0); + + if ((ioffset_p3 != 0x0f) || (ioffset_p2 != 0x0f) || + (ioffset_p1 != 0x0f) || (ioffset_p0 == 0x0f)) { + rtl_writephy(tp, 0x1f, 0x0bcf); + rtl_writephy(tp, 0x16, data); + rtl_writephy(tp, 0x1f, 0x0000); + } + + /* Modify rlen (TX LPF corner frequency) level */ + rtl_writephy(tp, 0x1f, 0x0bcd); + data = rtl_readphy(tp, 0x16); + data &= 0x000f; + rlen = 0; + if (data > 3) + rlen = data - 3; + data = rlen | (rlen<<4) | (rlen<<8) | (rlen<<12); + rtl_writephy(tp, 0x17, data); + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x1f, 0x0000); + + /* disable phy pfm mode */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0080); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168ep_1_hw_phy_config(struct rtl8169_private *tp) +{ + /* Enable PHY auto speed down */ + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x000c, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* patch 10M & ALDPS */ + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8084); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Enable EEE auto-fallback function */ + rtl_writephy(tp, 0x1f, 0x0a4b); + rtl_w0w1_phy(tp, 0x11, 0x0004, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Enable UC LPF tune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8012); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* set rg_sel_sdm_rate */ + rtl_writephy(tp, 0x1f, 0x0c42); + rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8168ep_2_hw_phy_config(struct rtl8169_private *tp) +{ + /* patch 10M & ALDPS */ + rtl_writephy(tp, 0x1f, 0x0bcc); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x0100); + rtl_writephy(tp, 0x1f, 0x0a44); + rtl_w0w1_phy(tp, 0x11, 0x00c0, 0x0000); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8084); + rtl_w0w1_phy(tp, 0x14, 0x0000, 0x6000); + rtl_w0w1_phy(tp, 0x10, 0x1003, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Enable UC LPF tune function */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8012); + rtl_w0w1_phy(tp, 0x14, 0x8000, 0x0000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Set rg_sel_sdm_rate */ + rtl_writephy(tp, 0x1f, 0x0c42); + rtl_w0w1_phy(tp, 0x11, 0x4000, 0x2000); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Channel estimation parameters */ + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x80f3); + rtl_w0w1_phy(tp, 0x14, 0x8b00, ~0x8bff); + rtl_writephy(tp, 0x13, 0x80f0); + rtl_w0w1_phy(tp, 0x14, 0x3a00, ~0x3aff); + rtl_writephy(tp, 0x13, 0x80ef); + rtl_w0w1_phy(tp, 0x14, 0x0500, ~0x05ff); + rtl_writephy(tp, 0x13, 0x80f6); + rtl_w0w1_phy(tp, 0x14, 0x6e00, ~0x6eff); + rtl_writephy(tp, 0x13, 0x80ec); + rtl_w0w1_phy(tp, 0x14, 0x6800, ~0x68ff); + rtl_writephy(tp, 0x13, 0x80ed); + rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff); + rtl_writephy(tp, 0x13, 0x80f2); + rtl_w0w1_phy(tp, 0x14, 0xf400, ~0xf4ff); + rtl_writephy(tp, 0x13, 0x80f4); + rtl_w0w1_phy(tp, 0x14, 0x8500, ~0x85ff); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x8110); + rtl_w0w1_phy(tp, 0x14, 0xa800, ~0xa8ff); + rtl_writephy(tp, 0x13, 0x810f); + rtl_w0w1_phy(tp, 0x14, 0x1d00, ~0x1dff); + rtl_writephy(tp, 0x13, 0x8111); + rtl_w0w1_phy(tp, 0x14, 0xf500, ~0xf5ff); + rtl_writephy(tp, 0x13, 0x8113); + rtl_w0w1_phy(tp, 0x14, 0x6100, ~0x61ff); + rtl_writephy(tp, 0x13, 0x8115); + rtl_w0w1_phy(tp, 0x14, 0x9200, ~0x92ff); + rtl_writephy(tp, 0x13, 0x810e); + rtl_w0w1_phy(tp, 0x14, 0x0400, ~0x04ff); + rtl_writephy(tp, 0x13, 0x810c); + rtl_w0w1_phy(tp, 0x14, 0x7c00, ~0x7cff); + rtl_writephy(tp, 0x13, 0x810b); + rtl_w0w1_phy(tp, 0x14, 0x5a00, ~0x5aff); + rtl_writephy(tp, 0x1f, 0x0a43); + rtl_writephy(tp, 0x13, 0x80d1); + rtl_w0w1_phy(tp, 0x14, 0xff00, ~0xffff); + rtl_writephy(tp, 0x13, 0x80cd); + rtl_w0w1_phy(tp, 0x14, 0x9e00, ~0x9eff); + rtl_writephy(tp, 0x13, 0x80d3); + rtl_w0w1_phy(tp, 0x14, 0x0e00, ~0x0eff); + rtl_writephy(tp, 0x13, 0x80d5); + rtl_w0w1_phy(tp, 0x14, 0xca00, ~0xcaff); + rtl_writephy(tp, 0x13, 0x80d7); + rtl_w0w1_phy(tp, 0x14, 0x8400, ~0x84ff); + + /* Force PWM-mode */ + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x5065); + rtl_writephy(tp, 0x14, 0xd065); + rtl_writephy(tp, 0x1f, 0x0bc8); + rtl_writephy(tp, 0x12, 0x00ed); + rtl_writephy(tp, 0x1f, 0x0bcd); + rtl_writephy(tp, 0x14, 0x1065); + rtl_writephy(tp, 0x14, 0x9065); + rtl_writephy(tp, 0x14, 0x1065); + rtl_writephy(tp, 0x1f, 0x0000); + + /* Check ALDPS bit, disable it if enabled */ + rtl_writephy(tp, 0x1f, 0x0a43); + if (rtl_readphy(tp, 0x10) & 0x0004) + rtl_w0w1_phy(tp, 0x10, 0x0000, 0x0004); + + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8102e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0003 }, + { 0x08, 0x441d }, + { 0x01, 0x9100 }, + { 0x1f, 0x0000 } + }; + + rtl_writephy(tp, 0x1f, 0x0000); + rtl_patchphy(tp, 0x11, 1 << 12); + rtl_patchphy(tp, 0x19, 1 << 13); + rtl_patchphy(tp, 0x10, 1 << 15); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8105e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0005 }, + { 0x1a, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0004 }, + { 0x1c, 0x0000 }, + { 0x1f, 0x0000 }, + + { 0x1f, 0x0001 }, + { 0x15, 0x7701 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(100); + + rtl_apply_firmware(tp); + + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); +} + +static void rtl8402_hw_phy_config(struct rtl8169_private *tp) +{ + /* Disable ALDPS before setting firmware */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(20); + + rtl_apply_firmware(tp); + + /* EEE setting */ + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_writephy(tp, 0x1f, 0x0004); + rtl_writephy(tp, 0x10, 0x401f); + rtl_writephy(tp, 0x19, 0x7030); + rtl_writephy(tp, 0x1f, 0x0000); +} + +static void rtl8106e_hw_phy_config(struct rtl8169_private *tp) +{ + static const struct phy_reg phy_reg_init[] = { + { 0x1f, 0x0004 }, + { 0x10, 0xc07f }, + { 0x19, 0x7030 }, + { 0x1f, 0x0000 } + }; + + /* Disable ALDPS before ram code */ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, 0x18, 0x0310); + msleep(100); + + rtl_apply_firmware(tp); + + rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init)); + + rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); +} + +static void rtl_hw_phy_config(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_print_mac_version(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + break; + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + rtl8169s_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_04: + rtl8169sb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_05: + rtl8169scd_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_06: + rtl8169sce_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + rtl8102e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_11: + rtl8168bb_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_12: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_17: + rtl8168bef_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_18: + rtl8168cp_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_19: + rtl8168c_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_20: + rtl8168c_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_21: + rtl8168c_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_22: + rtl8168c_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + rtl8168cp_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_25: + rtl8168d_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_26: + rtl8168d_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_27: + rtl8168d_3_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_28: + rtl8168d_4_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + rtl8105e_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_31: + /* None. */ + break; + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl8168e_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_34: + rtl8168e_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_35: + rtl8168f_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_36: + rtl8168f_2_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_37: + rtl8402_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_38: + rtl8411_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_39: + rtl8106e_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_40: + rtl8168g_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + rtl8168g_2_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_47: + rtl8168h_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_48: + rtl8168h_2_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_49: + rtl8168ep_1_hw_phy_config(tp); + break; + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl8168ep_2_hw_phy_config(tp); + break; + + case RTL_GIGA_MAC_VER_41: + default: + break; + } +} + +static void rtl_phy_work(struct rtl8169_private *tp) +{ + struct timer_list *timer = &tp->timer; + void __iomem *ioaddr = tp->mmio_addr; + unsigned long timeout = RTL8169_PHY_TIMEOUT; + + assert(tp->mac_version > RTL_GIGA_MAC_VER_01); + + if (tp->phy_reset_pending(tp)) { + /* + * A busy loop could burn quite a few cycles on nowadays CPU. + * Let's delay the execution of the timer for a few ticks. + */ + timeout = HZ/10; + goto out_mod_timer; + } + + if (tp->link_ok(ioaddr)) + return; + + netif_dbg(tp, link, tp->dev, "PHY reset until link up\n"); + + tp->phy_reset_enable(tp); + +out_mod_timer: + mod_timer(timer, jiffies + timeout); +} + +static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag) +{ + if (!test_and_set_bit(flag, tp->wk.flags)) + schedule_work(&tp->wk.work); +} + +static void rtl8169_phy_timer(unsigned long __opaque) +{ + struct net_device *dev = (struct net_device *)__opaque; + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING); +} + +static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev, + void __iomem *ioaddr) +{ + iounmap(ioaddr); + pci_release_regions(pdev); + pci_clear_mwi(pdev); + pci_disable_device(pdev); + free_netdev(dev); +} + +DECLARE_RTL_COND(rtl_phy_reset_cond) +{ + return tp->phy_reset_pending(tp); +} + +static void rtl8169_phy_reset(struct net_device *dev, + struct rtl8169_private *tp) +{ + tp->phy_reset_enable(tp); + rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100); +} + +static bool rtl_tbi_enabled(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return (tp->mac_version == RTL_GIGA_MAC_VER_01) && + (RTL_R8(PHYstatus) & TBI_Enable); +} + +static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + rtl_hw_phy_config(dev); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { + dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8(0x82, 0x01); + } + + pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); + + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08); + + if (tp->mac_version == RTL_GIGA_MAC_VER_02) { + dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); + RTL_W8(0x82, 0x01); + dprintk("Set PHY Reg 0x0bh = 0x00h\n"); + rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0 + } + + rtl8169_phy_reset(dev, tp); + + rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + (tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0)); + + if (rtl_tbi_enabled(tp)) + netif_info(tp, link, dev, "TBI auto-negotiating\n"); +} + +static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + rtl_lock_work(tp); + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + RTL_W32(MAC4, addr[4] | addr[5] << 8); + RTL_R32(MAC4); + + RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24); + RTL_R32(MAC0); + + if (tp->mac_version == RTL_GIGA_MAC_VER_34) + rtl_rar_exgmac_set(tp, addr); + + RTL_W8(Cfg9346, Cfg9346_Lock); + + rtl_unlock_work(tp); +} + +static int rtl_set_mac_address(struct net_device *dev, void *p) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); + + rtl_rar_set(tp, dev->dev_addr); + + return 0; +} + +static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct mii_ioctl_data *data = if_mii(ifr); + + return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV; +} + +static int rtl_xmii_ioctl(struct rtl8169_private *tp, + struct mii_ioctl_data *data, int cmd) +{ + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = 32; /* Internal PHY */ + return 0; + + case SIOCGMIIREG: + data->val_out = rtl_readphy(tp, data->reg_num & 0x1f); + return 0; + + case SIOCSMIIREG: + rtl_writephy(tp, data->reg_num & 0x1f, data->val_in); + return 0; + } + return -EOPNOTSUPP; +} + +static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd) +{ + return -EOPNOTSUPP; +} + +static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp) +{ + if (tp->features & RTL_FEATURE_MSI) { + pci_disable_msi(pdev); + tp->features &= ~RTL_FEATURE_MSI; + } +} + +static void rtl_init_mdio_ops(struct rtl8169_private *tp) +{ + struct mdio_ops *ops = &tp->mdio_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_27: + ops->write = r8168dp_1_mdio_write; + ops->read = r8168dp_1_mdio_read; + break; + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + ops->write = r8168dp_2_mdio_write; + ops->read = r8168dp_2_mdio_read; + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + ops->write = r8168g_mdio_write; + ops->read = r8168g_mdio_read; + break; + default: + ops->write = r8169_mdio_write; + ops->read = r8169_mdio_read; + break; + } +} + +static void rtl_speed_down(struct rtl8169_private *tp) +{ + u32 adv; + int lpa; + + rtl_writephy(tp, 0x1f, 0x0000); + lpa = rtl_readphy(tp, MII_LPA); + + if (lpa & (LPA_10HALF | LPA_10FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; + else if (lpa & (LPA_100HALF | LPA_100FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; + else + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + (tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0); + + rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + adv); +} + +static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + RTL_W32(RxConfig, RTL_R32(RxConfig) | + AcceptBroadcast | AcceptMulticast | AcceptMyPhys); + break; + default: + break; + } +} + +static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) +{ + if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) + return false; + + rtl_speed_down(tp); + rtl_wol_suspend_quirk(tp); + + return true; +} + +static void r810x_phy_power_down(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); +} + +static void r810x_phy_power_up(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); +} + +static void r810x_pll_power_down(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if (rtl_wol_pll_power_down(tp)) + return; + + r810x_phy_power_down(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_16: + break; + default: + RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + break; + } +} + +static void r810x_pll_power_up(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r810x_phy_power_up(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_16: + break; + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + break; + default: + RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + break; + } +} + +static void r8168_phy_power_up(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_writephy(tp, 0x0e, 0x0000); + break; + default: + break; + } + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); +} + +static void r8168_phy_power_down(struct rtl8169_private *tp) +{ + rtl_writephy(tp, 0x1f, 0x0000); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN); + break; + + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + rtl_writephy(tp, 0x0e, 0x0200); + default: + rtl_writephy(tp, MII_BMCR, BMCR_PDOWN); + break; + } +} + +static void r8168_pll_power_down(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) && + r8168_check_dash(tp)) { + return; + } + + if ((tp->mac_version == RTL_GIGA_MAC_VER_23 || + tp->mac_version == RTL_GIGA_MAC_VER_24) && + (RTL_R16(CPlusCmd) & ASF)) { + return; + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_32 || + tp->mac_version == RTL_GIGA_MAC_VER_33) + rtl_ephy_write(tp, 0x19, 0xff64); + + if (rtl_wol_pll_power_down(tp)) + return; + + r8168_phy_power_down(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000, + 0xfc000000, ERIAR_EXGMAC); + RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); + break; + } +} + +static void r8168_pll_power_up(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); + break; + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + break; + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_49: + RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0); + rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000, + 0x00000000, ERIAR_EXGMAC); + break; + } + + r8168_phy_power_up(tp); +} + +static void rtl_generic_op(struct rtl8169_private *tp, + void (*op)(struct rtl8169_private *)) +{ + if (op) + op(tp); +} + +static void rtl_pll_power_down(struct rtl8169_private *tp) +{ + rtl_generic_op(tp, tp->pll_power_ops.down); +} + +static void rtl_pll_power_up(struct rtl8169_private *tp) +{ + rtl_generic_op(tp, tp->pll_power_ops.up); + + /* give MAC/PHY some time to resume */ + msleep(20); +} + +static void rtl_init_pll_power_ops(struct rtl8169_private *tp) +{ + struct pll_power_ops *ops = &tp->pll_power_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + case RTL_GIGA_MAC_VER_08: + case RTL_GIGA_MAC_VER_09: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_29: + case RTL_GIGA_MAC_VER_30: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_39: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + ops->down = r810x_pll_power_down; + ops->up = r810x_pll_power_up; + break; + + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + case RTL_GIGA_MAC_VER_31: + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + ops->down = r8168_pll_power_down; + ops->up = r8168_pll_power_up; + break; + + default: + ops->down = NULL; + ops->up = NULL; + break; + } +} + +static void rtl_init_rxcfg(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + case RTL_GIGA_MAC_VER_04: + case RTL_GIGA_MAC_VER_05: + case RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_14: + case RTL_GIGA_MAC_VER_15: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_17: + RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_18: + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST); + break; + case RTL_GIGA_MAC_VER_40: + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF); + break; + default: + RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); + break; + } +} + +static void rtl8169_init_ring_indexes(struct rtl8169_private *tp) +{ + tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0; +} + +static void rtl_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + rtl_generic_op(tp, tp->jumbo_ops.enable); + RTL_W8(Cfg9346, Cfg9346_Lock); +} + +static void rtl_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + rtl_generic_op(tp, tp->jumbo_ops.disable); + RTL_W8(Cfg9346, Cfg9346_Lock); +} + +static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1); + rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B); +} + +static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1); + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); +} + +static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); +} + +static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); +} + +static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(MaxTxPacketSize, 0x3f); + RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) | 0x01); + rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B); +} + +static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(MaxTxPacketSize, 0x0c); + RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0); + RTL_W8(Config4, RTL_R8(Config4) & ~0x01); + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); +} + +static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp) +{ + rtl_tx_performance_tweak(tp->pci_dev, + PCI_EXP_DEVCTL_READRQ_512B | PCI_EXP_DEVCTL_NOSNOOP_EN); +} + +static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp) +{ + rtl_tx_performance_tweak(tp->pci_dev, + (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN); +} + +static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168b_0_hw_jumbo_enable(tp); + + RTL_W8(Config4, RTL_R8(Config4) | (1 << 0)); +} + +static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + r8168b_0_hw_jumbo_disable(tp); + + RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); +} + +static void rtl_init_jumbo_ops(struct rtl8169_private *tp) +{ + struct jumbo_ops *ops = &tp->jumbo_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + ops->disable = r8168b_0_hw_jumbo_disable; + ops->enable = r8168b_0_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + ops->disable = r8168b_1_hw_jumbo_disable; + ops->enable = r8168b_1_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_19: + case RTL_GIGA_MAC_VER_20: + case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_22: + case RTL_GIGA_MAC_VER_23: + case RTL_GIGA_MAC_VER_24: + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + ops->disable = r8168c_hw_jumbo_disable; + ops->enable = r8168c_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_27: + case RTL_GIGA_MAC_VER_28: + ops->disable = r8168dp_hw_jumbo_disable; + ops->enable = r8168dp_hw_jumbo_enable; + break; + case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */ + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + case RTL_GIGA_MAC_VER_34: + ops->disable = r8168e_hw_jumbo_disable; + ops->enable = r8168e_hw_jumbo_enable; + break; + + /* + * No action needed for jumbo frames with 8169. + * No jumbo for 810x at all. + */ + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + default: + ops->disable = NULL; + ops->enable = NULL; + break; + } +} + +DECLARE_RTL_COND(rtl_chipcmd_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R8(ChipCmd) & CmdReset; +} + +static void rtl_hw_reset(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(ChipCmd, CmdReset); + + rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); +} + +static void rtl_request_uncached_firmware(struct rtl8169_private *tp) +{ + struct rtl_fw *rtl_fw; + const char *name; + int rc = -ENOMEM; + + name = rtl_lookup_firmware_name(tp); + if (!name) + goto out_no_firmware; + + rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL); + if (!rtl_fw) + goto err_warn; + + rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev); + if (rc < 0) + goto err_free; + + rc = rtl_check_firmware(tp, rtl_fw); + if (rc < 0) + goto err_release_firmware; + + tp->rtl_fw = rtl_fw; +out: + return; + +err_release_firmware: + release_firmware(rtl_fw->fw); +err_free: + kfree(rtl_fw); +err_warn: + netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n", + name, rc); +out_no_firmware: + tp->rtl_fw = NULL; + goto out; +} + +static void rtl_request_firmware(struct rtl8169_private *tp) +{ + if (IS_ERR(tp->rtl_fw)) + rtl_request_uncached_firmware(tp); +} + +static void rtl_rx_close(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK); +} + +DECLARE_RTL_COND(rtl_npq_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R8(TxPoll) & NPQ; +} + +DECLARE_RTL_COND(rtl_txcfg_empty_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(TxConfig) & TXCFG_EMPTY; +} + +static void rtl8169_hw_reset(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* Disable interrupts */ + rtl8169_irq_mask_and_ack(tp); + + rtl_rx_close(tp); + + if (tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31) { + rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42); + } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 || + tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36 || + tp->mac_version == RTL_GIGA_MAC_VER_37 || + tp->mac_version == RTL_GIGA_MAC_VER_38 || + tp->mac_version == RTL_GIGA_MAC_VER_40 || + tp->mac_version == RTL_GIGA_MAC_VER_41 || + tp->mac_version == RTL_GIGA_MAC_VER_42 || + tp->mac_version == RTL_GIGA_MAC_VER_43 || + tp->mac_version == RTL_GIGA_MAC_VER_44 || + tp->mac_version == RTL_GIGA_MAC_VER_45 || + tp->mac_version == RTL_GIGA_MAC_VER_46 || + tp->mac_version == RTL_GIGA_MAC_VER_47 || + tp->mac_version == RTL_GIGA_MAC_VER_48 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) { + RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666); + } else { + RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq); + udelay(100); + } + + rtl_hw_reset(tp); +} + +static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* Set DMA burst size and Interframe Gap Time */ + RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) | + (InterFrameGap << TxInterFrameGapShift)); +} + +static void rtl_hw_start(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + tp->hw_start(dev); + + rtl_irq_enable_all(tp); +} + +static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp, + void __iomem *ioaddr) +{ + /* + * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh + * register to be written before TxDescAddrLow to work. + * Switching from MMIO to I/O access fixes the issue as well. + */ + RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32); + RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32)); + RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32); + RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32)); +} + +static u16 rtl_rw_cpluscmd(void __iomem *ioaddr) +{ + u16 cmd; + + cmd = RTL_R16(CPlusCmd); + RTL_W16(CPlusCmd, cmd); + return cmd; +} + +static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz) +{ + /* Low hurts. Let's disable the filtering. */ + RTL_W16(RxMaxSize, rx_buf_sz + 1); +} + +static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) +{ + static const struct rtl_cfg2_info { + u32 mac_version; + u32 clk; + u32 val; + } cfg2_info [] = { + { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd + { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff }, + { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe + { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff } + }; + const struct rtl_cfg2_info *p = cfg2_info; + unsigned int i; + u32 clk; + + clk = RTL_R8(Config2) & PCI_Clock_66MHz; + for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) { + if ((p->mac_version == mac_version) && (p->clk == clk)) { + RTL_W32(0x7c, p->val); + break; + } + } +} + +static void rtl_set_rx_mode(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + u32 mc_filter[2]; /* Multicast hash filter */ + int rx_mode; + u32 tmp = 0; + + if (dev->flags & IFF_PROMISC) { + /* Unconditionally log net taps. */ + netif_notice(tp, link, dev, "Promiscuous mode enabled\n"); + rx_mode = + AcceptBroadcast | AcceptMulticast | AcceptMyPhys | + AcceptAllPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else if ((netdev_mc_count(dev) > multicast_filter_limit) || + (dev->flags & IFF_ALLMULTI)) { + /* Too many to filter perfectly -- accept all multicasts. */ + rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0xffffffff; + } else { + struct netdev_hw_addr *ha; + + rx_mode = AcceptBroadcast | AcceptMyPhys; + mc_filter[1] = mc_filter[0] = 0; + netdev_for_each_mc_addr(ha, dev) { + int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; + mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); + rx_mode |= AcceptMulticast; + } + } + + if (dev->features & NETIF_F_RXALL) + rx_mode |= (AcceptErr | AcceptRunt); + + tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode; + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) { + u32 data = mc_filter[0]; + + mc_filter[0] = swab32(mc_filter[1]); + mc_filter[1] = swab32(data); + } + + if (tp->mac_version == RTL_GIGA_MAC_VER_35) + mc_filter[1] = mc_filter[0] = 0xffffffff; + + RTL_W32(MAR0 + 4, mc_filter[1]); + RTL_W32(MAR0 + 0, mc_filter[0]); + + RTL_W32(RxConfig, tmp); +} + +static void rtl_hw_start_8169(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + if (tp->mac_version == RTL_GIGA_MAC_VER_05) { + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW); + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); + } + + RTL_W8(Cfg9346, Cfg9346_Unlock); + if (tp->mac_version == RTL_GIGA_MAC_VER_01 || + tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03 || + tp->mac_version == RTL_GIGA_MAC_VER_04) + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + + rtl_init_rxcfg(tp); + + RTL_W8(EarlyTxThres, NoEarlyTx); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + if (tp->mac_version == RTL_GIGA_MAC_VER_01 || + tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03 || + tp->mac_version == RTL_GIGA_MAC_VER_04) + rtl_set_rx_tx_config_registers(tp); + + tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; + + if (tp->mac_version == RTL_GIGA_MAC_VER_02 || + tp->mac_version == RTL_GIGA_MAC_VER_03) { + dprintk("Set MAC Reg C+CR Offset 0xe0. " + "Bit-3 and bit-14 MUST be 1\n"); + tp->cp_cmd |= (1 << 14); + } + + RTL_W16(CPlusCmd, tp->cp_cmd); + + rtl8169_set_magic_reg(ioaddr, tp->mac_version); + + /* + * Undocumented corner. Supposedly: + * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets + */ + RTL_W16(IntrMitigate, 0x0000); + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + if (tp->mac_version != RTL_GIGA_MAC_VER_01 && + tp->mac_version != RTL_GIGA_MAC_VER_02 && + tp->mac_version != RTL_GIGA_MAC_VER_03 && + tp->mac_version != RTL_GIGA_MAC_VER_04) { + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + rtl_set_rx_tx_config_registers(tp); + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ + RTL_R8(IntrMask); + + RTL_W32(RxMissed, 0); + + rtl_set_rx_mode(dev); + + /* no early-rx interrupts */ + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); +} + +static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + if (tp->csi_ops.write) + tp->csi_ops.write(tp, addr, value); +} + +static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) +{ + return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0; +} + +static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits) +{ + u32 csi; + + csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff; + rtl_csi_write(tp, 0x070c, csi | bits); +} + +static void rtl_csi_access_enable_1(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x17000000); +} + +static void rtl_csi_access_enable_2(struct rtl8169_private *tp) +{ + rtl_csi_access_enable(tp, 0x27000000); +} + +DECLARE_RTL_COND(rtl_csiar_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R32(CSIAR) & CSIAR_FLAG; +} + +static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 r8169_csi_read(struct rtl8169_private *tp, int addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(CSIDR) : ~0; +} + +static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | + CSIAR_FUNC_NIC); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 r8402_csi_read(struct rtl8169_private *tp, int addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(CSIDR) : ~0; +} + +static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIDR, value); + RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT | + CSIAR_FUNC_NIC2); + + rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100); +} + +static u32 r8411_csi_read(struct rtl8169_private *tp, int addr) +{ + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 | + CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT); + + return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ? + RTL_R32(CSIDR) : ~0; +} + +static void rtl_init_csi_ops(struct rtl8169_private *tp) +{ + struct csi_ops *ops = &tp->csi_ops; + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_01: + case RTL_GIGA_MAC_VER_02: + case RTL_GIGA_MAC_VER_03: + case RTL_GIGA_MAC_VER_04: + case RTL_GIGA_MAC_VER_05: + case RTL_GIGA_MAC_VER_06: + case RTL_GIGA_MAC_VER_10: + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_13: + case RTL_GIGA_MAC_VER_14: + case RTL_GIGA_MAC_VER_15: + case RTL_GIGA_MAC_VER_16: + case RTL_GIGA_MAC_VER_17: + ops->write = NULL; + ops->read = NULL; + break; + + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + ops->write = r8402_csi_write; + ops->read = r8402_csi_read; + break; + + case RTL_GIGA_MAC_VER_44: + ops->write = r8411_csi_write; + ops->read = r8411_csi_read; + break; + + default: + ops->write = r8169_csi_write; + ops->read = r8169_csi_read; + break; + } +} + +struct ephy_info { + unsigned int offset; + u16 mask; + u16 bits; +}; + +static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e, + int len) +{ + u16 w; + + while (len-- > 0) { + w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits; + rtl_ephy_write(tp, e->offset, w); + e++; + } +} + +static void rtl_disable_clock_request(struct pci_dev *pdev) +{ + pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_enable_clock_request(struct pci_dev *pdev) +{ + pcie_capability_set_word(pdev, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_CLKREQ_EN); +} + +static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable) +{ + void __iomem *ioaddr = tp->mmio_addr; + u8 data; + + data = RTL_R8(Config3); + + if (enable) + data |= Rdy_to_L23; + else + data &= ~Rdy_to_L23; + + RTL_W8(Config3, data); +} + +#define R8168_CPCMD_QUIRK_MASK (\ + EnableBist | \ + Mac_dbgo_oe | \ + Force_half_dup | \ + Force_rxflow_en | \ + Force_txflow_en | \ + Cxpl_dbg_sel | \ + ASF | \ + PktCntrDisable | \ + Mac_dbgo_sel) + +static void rtl_hw_start_8168bb(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); + + if (tp->dev->mtu <= ETH_DATA_LEN) { + rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) | + PCI_EXP_DEVCTL_NOSNOOP_EN); + } +} + +static void rtl_hw_start_8168bef(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + rtl_hw_start_8168bb(tp); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); +} + +static void __rtl_hw_start_8168cp(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W8(Config1, RTL_R8(Config1) | Speed_down); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_disable_clock_request(pdev); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168cp[] = { + { 0x01, 0, 0x0001 }, + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0042 }, + { 0x06, 0x0080, 0x0000 }, + { 0x07, 0, 0x2000 } + }; + + rtl_csi_access_enable_2(tp); + + rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + /* Magic. */ + RTL_W8(DBG_REG, 0x20); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168c_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168c_1[] = { + { 0x02, 0x0800, 0x1000 }, + { 0x03, 0, 0x0002 }, + { 0x06, 0x0080, 0x0000 } + }; + + rtl_csi_access_enable_2(tp); + + RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); + + rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1)); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_2(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168c_2[] = { + { 0x01, 0, 0x0001 }, + { 0x03, 0x0400, 0x0220 } + }; + + rtl_csi_access_enable_2(tp); + + rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168c_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8168c_2(tp); +} + +static void rtl_hw_start_8168c_4(struct rtl8169_private *tp) +{ + rtl_csi_access_enable_2(tp); + + __rtl_hw_start_8168cp(tp); +} + +static void rtl_hw_start_8168d(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + rtl_disable_clock_request(pdev); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); +} + +static void rtl_hw_start_8168dp(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_1(tp); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_disable_clock_request(pdev); +} + +static void rtl_hw_start_8168d_4(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + static const struct ephy_info e_info_8168d_4[] = { + { 0x0b, ~0, 0x48 }, + { 0x19, 0x20, 0x50 }, + { 0x0c, ~0, 0x20 } + }; + int i; + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) { + const struct ephy_info *e = e_info_8168d_4 + i; + u16 w; + + w = rtl_ephy_read(tp, e->offset); + rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits); + } + + rtl_enable_clock_request(pdev); +} + +static void rtl_hw_start_8168e_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + static const struct ephy_info e_info_8168e_1[] = { + { 0x00, 0x0200, 0x0100 }, + { 0x00, 0x0000, 0x0004 }, + { 0x06, 0x0002, 0x0001 }, + { 0x06, 0x0000, 0x0030 }, + { 0x07, 0x0000, 0x2000 }, + { 0x00, 0x0000, 0x0020 }, + { 0x03, 0x5800, 0x2000 }, + { 0x03, 0x0000, 0x0001 }, + { 0x01, 0x0800, 0x1000 }, + { 0x07, 0x0000, 0x4000 }, + { 0x1e, 0x0000, 0x2000 }, + { 0x19, 0xffff, 0xfe6c }, + { 0x0a, 0x0000, 0x0040 } + }; + + rtl_csi_access_enable_2(tp); + + rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1)); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_disable_clock_request(pdev); + + /* Reset tx FIFO pointer */ + RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST); + RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST); + + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + static const struct ephy_info e_info_8168e_2[] = { + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_csi_access_enable_1(tp); + + rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2)); + + if (tp->dev->mtu <= ETH_DATA_LEN) + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_disable_clock_request(pdev); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168f(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC); + + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_disable_clock_request(pdev); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + RTL_W32(MISC, RTL_R32(MISC) | PWM_EN); + RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en); +} + +static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x08, 0x0001, 0x0002 }, + { 0x09, 0x0000, 0x0080 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_hw_start_8168f(tp); + + rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); +} + +static void rtl_hw_start_8411(struct rtl8169_private *tp) +{ + static const struct ephy_info e_info_8168f_1[] = { + { 0x06, 0x00c0, 0x0020 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x1e, 0x0000, 0x4000 }, + { 0x19, 0x0000, 0x0224 } + }; + + rtl_hw_start_8168f(tp); + rtl_pcie_state_l2l3_enable(tp, false); + + rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); + + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC); +} + +static void rtl_hw_start_8168g(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC); + + RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8168g_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168g_1[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x37d0, 0x0820 }, + { 0x1e, 0x0000, 0x0001 }, + { 0x19, 0x8000, 0x0000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1)); +} + +static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168g_2[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20eb } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2)); +} + +static void rtl_hw_start_8411_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8411_2[] = { + { 0x00, 0x0000, 0x0008 }, + { 0x0c, 0x3df0, 0x0200 }, + { 0x0f, 0xffff, 0x5200 }, + { 0x19, 0x0020, 0x0000 }, + { 0x1e, 0x0000, 0x2000 } + }; + + rtl_hw_start_8168g(tp); + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2)); +} + +static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + int rg_saw_cnt; + u32 data; + static const struct ephy_info e_info_8168h_1[] = { + { 0x1e, 0x0800, 0x0001 }, + { 0x1d, 0x0000, 0x0800 }, + { 0x05, 0xffff, 0x2089 }, + { 0x06, 0xffff, 0x5881 }, + { 0x04, 0xffff, 0x154a }, + { 0x01, 0xffff, 0x068b } + }; + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1)); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_1111, 0x0010, 0x00, ERIAR_EXGMAC); + + rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f00, 0x00, ERIAR_EXGMAC); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); + + RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); + + rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); + + rtl_writephy(tp, 0x1f, 0x0c42); + rg_saw_cnt = rtl_readphy(tp, 0x13); + rtl_writephy(tp, 0x1f, 0x0000); + if (rg_saw_cnt > 0) { + u16 sw_cnt_1ms_ini; + + sw_cnt_1ms_ini = 16000000/rg_saw_cnt; + sw_cnt_1ms_ini &= 0x0fff; + data = r8168_mac_ocp_read(tp, 0xd412); + data &= 0x0fff; + data |= sw_cnt_1ms_ini; + r8168_mac_ocp_write(tp, 0xd412, data); + } + + data = r8168_mac_ocp_read(tp, 0xe056); + data &= 0xf0; + data |= 0x07; + r8168_mac_ocp_write(tp, 0xe056, data); + + data = r8168_mac_ocp_read(tp, 0xe052); + data &= 0x8008; + data |= 0x6000; + r8168_mac_ocp_write(tp, 0xe052, data); + + data = r8168_mac_ocp_read(tp, 0xe0d6); + data &= 0x01ff; + data |= 0x017f; + r8168_mac_ocp_write(tp, 0xe0d6, data); + + data = r8168_mac_ocp_read(tp, 0xd420); + data &= 0x0fff; + data |= 0x047f; + r8168_mac_ocp_write(tp, 0xd420, data); + + r8168_mac_ocp_write(tp, 0xe63e, 0x0001); + r8168_mac_ocp_write(tp, 0xe63e, 0x0000); + r8168_mac_ocp_write(tp, 0xc094, 0x0000); + r8168_mac_ocp_write(tp, 0xc09e, 0x0000); +} + +static void rtl_hw_start_8168ep(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl8168ep_stop_cmac(tp); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x5f, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC); + + rtl_csi_access_enable_1(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + + rtl_w0w1_eri(tp, 0xd4, ERIAR_MASK_1111, 0x1f80, 0x00, ERIAR_EXGMAC); + + rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC); + + RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN); + RTL_W8(MaxTxPacketSize, EarlySize); + + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + + /* Adjust EEE LED frequency */ + RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07); + + rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168ep_1[] = { + { 0x00, 0xffff, 0x10ab }, + { 0x06, 0xffff, 0xf030 }, + { 0x08, 0xffff, 0x2006 }, + { 0x0d, 0xffff, 0x1666 }, + { 0x0c, 0x3ff0, 0x0000 } + }; + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1)); + + rtl_hw_start_8168ep(tp); +} + +static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8168ep_2[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0xfc00 }, + { 0x1e, 0xffff, 0x20ea } + }; + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2)); + + rtl_hw_start_8168ep(tp); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); +} + +static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u32 data; + static const struct ephy_info e_info_8168ep_3[] = { + { 0x00, 0xffff, 0x10a3 }, + { 0x19, 0xffff, 0x7c00 }, + { 0x1e, 0xffff, 0x20eb }, + { 0x0d, 0xffff, 0x1666 } + }; + + /* disable aspm and clock request before access ephy */ + RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn); + RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en); + rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3)); + + rtl_hw_start_8168ep(tp); + + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + RTL_W8(DLLPR, RTL_R8(MISC_1) & ~PFM_D3COLD_EN); + + data = r8168_mac_ocp_read(tp, 0xd3e2); + data &= 0xf000; + data |= 0x0271; + r8168_mac_ocp_write(tp, 0xd3e2, data); + + data = r8168_mac_ocp_read(tp, 0xd3e4); + data &= 0xff00; + r8168_mac_ocp_write(tp, 0xd3e4, data); + + data = r8168_mac_ocp_read(tp, 0xe860); + data |= 0x0080; + r8168_mac_ocp_write(tp, 0xe860, data); +} + +static void rtl_hw_start_8168(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1; + + RTL_W16(CPlusCmd, tp->cp_cmd); + + RTL_W16(IntrMitigate, 0x5151); + + /* Work around for RxFIFO overflow. */ + if (tp->mac_version == RTL_GIGA_MAC_VER_11) { + tp->event_slow |= RxFIFOOver | PCSTimeout; + tp->event_slow &= ~RxOverflow; + } + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + rtl_set_rx_tx_config_registers(tp); + + RTL_R8(IntrMask); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + rtl_hw_start_8168bb(tp); + break; + + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + rtl_hw_start_8168bef(tp); + break; + + case RTL_GIGA_MAC_VER_18: + rtl_hw_start_8168cp_1(tp); + break; + + case RTL_GIGA_MAC_VER_19: + rtl_hw_start_8168c_1(tp); + break; + + case RTL_GIGA_MAC_VER_20: + rtl_hw_start_8168c_2(tp); + break; + + case RTL_GIGA_MAC_VER_21: + rtl_hw_start_8168c_3(tp); + break; + + case RTL_GIGA_MAC_VER_22: + rtl_hw_start_8168c_4(tp); + break; + + case RTL_GIGA_MAC_VER_23: + rtl_hw_start_8168cp_2(tp); + break; + + case RTL_GIGA_MAC_VER_24: + rtl_hw_start_8168cp_3(tp); + break; + + case RTL_GIGA_MAC_VER_25: + case RTL_GIGA_MAC_VER_26: + case RTL_GIGA_MAC_VER_27: + rtl_hw_start_8168d(tp); + break; + + case RTL_GIGA_MAC_VER_28: + rtl_hw_start_8168d_4(tp); + break; + + case RTL_GIGA_MAC_VER_31: + rtl_hw_start_8168dp(tp); + break; + + case RTL_GIGA_MAC_VER_32: + case RTL_GIGA_MAC_VER_33: + rtl_hw_start_8168e_1(tp); + break; + case RTL_GIGA_MAC_VER_34: + rtl_hw_start_8168e_2(tp); + break; + + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + rtl_hw_start_8168f_1(tp); + break; + + case RTL_GIGA_MAC_VER_38: + rtl_hw_start_8411(tp); + break; + + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + rtl_hw_start_8168g_1(tp); + break; + case RTL_GIGA_MAC_VER_42: + rtl_hw_start_8168g_2(tp); + break; + + case RTL_GIGA_MAC_VER_44: + rtl_hw_start_8411_2(tp); + break; + + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + rtl_hw_start_8168h_1(tp); + break; + + case RTL_GIGA_MAC_VER_49: + rtl_hw_start_8168ep_1(tp); + break; + + case RTL_GIGA_MAC_VER_50: + rtl_hw_start_8168ep_2(tp); + break; + + case RTL_GIGA_MAC_VER_51: + rtl_hw_start_8168ep_3(tp); + break; + + default: + printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", + dev->name, tp->mac_version); + break; + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + + rtl_set_rx_mode(dev); + + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); +} + +#define R810X_CPCMD_QUIRK_MASK (\ + EnableBist | \ + Mac_dbgo_oe | \ + Force_half_dup | \ + Force_rxflow_en | \ + Force_txflow_en | \ + Cxpl_dbg_sel | \ + ASF | \ + PktCntrDisable | \ + Mac_dbgo_sel) + +static void rtl_hw_start_8102e_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + static const struct ephy_info e_info_8102e_1[] = { + { 0x01, 0, 0x6e65 }, + { 0x02, 0, 0x091f }, + { 0x03, 0, 0xc2f9 }, + { 0x06, 0, 0xafb5 }, + { 0x07, 0, 0x0e00 }, + { 0x19, 0, 0xec80 }, + { 0x01, 0, 0x2e65 }, + { 0x01, 0, 0x6e65 } + }; + u8 cfg1; + + rtl_csi_access_enable_2(tp); + + RTL_W8(DBG_REG, FIX_NAK_1); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(Config1, + LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); + + cfg1 = RTL_R8(Config1); + if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) + RTL_W8(Config1, cfg1 & ~LEDS0); + + rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); +} + +static void rtl_hw_start_8102e_2(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + rtl_csi_access_enable_2(tp); + + rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); + + RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); + RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); +} + +static void rtl_hw_start_8102e_3(struct rtl8169_private *tp) +{ + rtl_hw_start_8102e_2(tp); + + rtl_ephy_write(tp, 0x03, 0xc2f9); +} + +static void rtl_hw_start_8105e_1(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8105e_1[] = { + { 0x07, 0, 0x4000 }, + { 0x19, 0, 0x0200 }, + { 0x19, 0, 0x0020 }, + { 0x1e, 0, 0x2000 }, + { 0x03, 0, 0x0001 }, + { 0x19, 0, 0x0100 }, + { 0x19, 0, 0x0004 }, + { 0x0a, 0, 0x0020 } + }; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + /* Disable Early Tally Counter */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000); + + RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); + + rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) +{ + rtl_hw_start_8105e_1(tp); + rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000); +} + +static void rtl_hw_start_8402(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + static const struct ephy_info e_info_8402[] = { + { 0x19, 0xffff, 0xff64 }, + { 0x1e, 0, 0x4000 } + }; + + rtl_csi_access_enable_2(tp); + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402)); + + rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT); + + rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); + rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8106(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* Force LAN exit from ASPM if Rx/Tx are not idle */ + RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800); + + RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); + RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); + RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); + + rtl_pcie_state_l2l3_enable(tp, false); +} + +static void rtl_hw_start_8101(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + + if (tp->mac_version >= RTL_GIGA_MAC_VER_30) + tp->event_slow &= ~RxFIFOOver; + + if (tp->mac_version == RTL_GIGA_MAC_VER_13 || + tp->mac_version == RTL_GIGA_MAC_VER_16) + pcie_capability_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_NOSNOOP_EN); + + RTL_W8(Cfg9346, Cfg9346_Unlock); + + RTL_W8(MaxTxPacketSize, TxPacketMax); + + rtl_set_rx_max_size(ioaddr, rx_buf_sz); + + tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; + RTL_W16(CPlusCmd, tp->cp_cmd); + + rtl_set_rx_tx_desc_registers(tp, ioaddr); + + rtl_set_rx_tx_config_registers(tp); + + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_07: + rtl_hw_start_8102e_1(tp); + break; + + case RTL_GIGA_MAC_VER_08: + rtl_hw_start_8102e_3(tp); + break; + + case RTL_GIGA_MAC_VER_09: + rtl_hw_start_8102e_2(tp); + break; + + case RTL_GIGA_MAC_VER_29: + rtl_hw_start_8105e_1(tp); + break; + case RTL_GIGA_MAC_VER_30: + rtl_hw_start_8105e_2(tp); + break; + + case RTL_GIGA_MAC_VER_37: + rtl_hw_start_8402(tp); + break; + + case RTL_GIGA_MAC_VER_39: + rtl_hw_start_8106(tp); + break; + case RTL_GIGA_MAC_VER_43: + rtl_hw_start_8168g_2(tp); + break; + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + rtl_hw_start_8168h_1(tp); + break; + } + + RTL_W8(Cfg9346, Cfg9346_Lock); + + RTL_W16(IntrMitigate, 0x0000); + + RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); + + rtl_set_rx_mode(dev); + + RTL_R8(IntrMask); + + RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); +} + +static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (new_mtu < ETH_ZLEN || + new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max) + return -EINVAL; + + if (new_mtu > ETH_DATA_LEN) + rtl_hw_jumbo_enable(tp); + else + rtl_hw_jumbo_disable(tp); + + dev->mtu = new_mtu; + netdev_update_features(dev); + + return 0; +} + +static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc) +{ + desc->addr = cpu_to_le64(0x0badbadbadbadbadull); + desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask); +} + +static void rtl8169_free_rx_databuff(struct rtl8169_private *tp, + void **data_buff, struct RxDesc *desc) +{ + dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz, + DMA_FROM_DEVICE); + + kfree(*data_buff); + *data_buff = NULL; + rtl8169_make_unusable_by_asic(desc); +} + +static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz) +{ + u32 eor = le32_to_cpu(desc->opts1) & RingEnd; + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz); +} + +static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, + u32 rx_buf_sz) +{ + desc->addr = cpu_to_le64(mapping); + rtl8169_mark_to_asic(desc, rx_buf_sz); +} + +static inline void *rtl8169_align(void *data) +{ + return (void *)ALIGN((long)data, 16); +} + +static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp, + struct RxDesc *desc) +{ + void *data; + dma_addr_t mapping; + struct device *d = &tp->pci_dev->dev; + struct net_device *dev = tp->dev; + int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; + + data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node); + if (!data) + return NULL; + + if (rtl8169_align(data) != data) { + kfree(data); + data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node); + if (!data) + return NULL; + } + + mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n"); + goto err_out; + } + + rtl8169_map_to_asic(desc, mapping, rx_buf_sz); + return data; + +err_out: + kfree(data); + return NULL; +} + +static void rtl8169_rx_clear(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + if (tp->Rx_databuff[i]) { + rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i, + tp->RxDescArray + i); + } + } +} + +static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc) +{ + desc->opts1 |= cpu_to_le32(RingEnd); +} + +static int rtl8169_rx_fill(struct rtl8169_private *tp) +{ + unsigned int i; + + for (i = 0; i < NUM_RX_DESC; i++) { + void *data; + + if (tp->Rx_databuff[i]) + continue; + + data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i); + if (!data) { + rtl8169_make_unusable_by_asic(tp->RxDescArray + i); + goto err_out; + } + tp->Rx_databuff[i] = data; + } + + rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1); + return 0; + +err_out: + rtl8169_rx_clear(tp); + return -ENOMEM; +} + +static int rtl8169_init_ring(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_init_ring_indexes(tp); + + memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info)); + memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *)); + + return rtl8169_rx_fill(tp); +} + +static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb, + struct TxDesc *desc) +{ + unsigned int len = tx_skb->len; + + dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE); + + desc->opts1 = 0x00; + desc->opts2 = 0x00; + desc->addr = 0x00; + tx_skb->len = 0; +} + +static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start, + unsigned int n) +{ + unsigned int i; + + for (i = 0; i < n; i++) { + unsigned int entry = (start + i) % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + unsigned int len = tx_skb->len; + + if (len) { + struct sk_buff *skb = tx_skb->skb; + + rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + tp->TxDescArray + entry); + if (skb) { + tp->dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + tx_skb->skb = NULL; + } + } + } +} + +static void rtl8169_tx_clear(struct rtl8169_private *tp) +{ + rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC); + tp->cur_tx = tp->dirty_tx = 0; +} + +static void rtl_reset_work(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + int i; + + napi_disable(&tp->napi); + netif_stop_queue(dev); + synchronize_sched(); + + rtl8169_hw_reset(tp); + + for (i = 0; i < NUM_RX_DESC; i++) + rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz); + + rtl8169_tx_clear(tp); + rtl8169_init_ring_indexes(tp); + + napi_enable(&tp->napi); + rtl_hw_start(dev); + netif_wake_queue(dev); + rtl8169_check_link_status(dev, tp, tp->mmio_addr); +} + +static void rtl8169_tx_timeout(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, + u32 *opts) +{ + struct skb_shared_info *info = skb_shinfo(skb); + unsigned int cur_frag, entry; + struct TxDesc *uninitialized_var(txd); + struct device *d = &tp->pci_dev->dev; + + entry = tp->cur_tx; + for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { + const skb_frag_t *frag = info->frags + cur_frag; + dma_addr_t mapping; + u32 status, len; + void *addr; + + entry = (entry + 1) % NUM_TX_DESC; + + txd = tp->TxDescArray + entry; + len = skb_frag_size(frag); + addr = skb_frag_address(frag); + mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, tp->dev, + "Failed to map TX fragments DMA!\n"); + goto err_out; + } + + /* Anti gcc 2.95.3 bugware (sic) */ + status = opts[0] | len | + (RingEnd * !((entry + 1) % NUM_TX_DESC)); + + txd->opts1 = cpu_to_le32(status); + txd->opts2 = cpu_to_le32(opts[1]); + txd->addr = cpu_to_le64(mapping); + + tp->tx_skb[entry].len = len; + } + + if (cur_frag) { + tp->tx_skb[entry].skb = skb; + txd->opts1 |= cpu_to_le32(LastFrag); + } + + return cur_frag; + +err_out: + rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag); + return -EIO; +} + +static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) +{ + return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev); +/* r8169_csum_workaround() + * The hw limites the value the transport offset. When the offset is out of the + * range, calculate the checksum by sw. + */ +static void r8169_csum_workaround(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + if (skb_shinfo(skb)->gso_size) { + netdev_features_t features = tp->dev->features; + struct sk_buff *segs, *nskb; + + features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6); + segs = skb_gso_segment(skb, features); + if (IS_ERR(segs) || !segs) + goto drop; + + do { + nskb = segs; + segs = segs->next; + nskb->next = NULL; + rtl8169_start_xmit(nskb, tp->dev); + } while (segs); + + dev_consume_skb_any(skb); + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + if (skb_checksum_help(skb) < 0) + goto drop; + + rtl8169_start_xmit(skb, tp->dev); + } else { + struct net_device_stats *stats; + +drop: + stats = &tp->dev->stats; + stats->tx_dropped++; + dev_kfree_skb_any(skb); + } +} + +/* msdn_giant_send_check() + * According to the document of microsoft, the TCP Pseudo Header excludes the + * packet length for IPv6 TCP large packets. + */ +static int msdn_giant_send_check(struct sk_buff *skb) +{ + const struct ipv6hdr *ipv6h; + struct tcphdr *th; + int ret; + + ret = skb_cow_head(skb, 0); + if (ret) + return ret; + + ipv6h = ipv6_hdr(skb); + th = tcp_hdr(skb); + + th->check = 0; + th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0); + + return ret; +} + +static inline __be16 get_protocol(struct sk_buff *skb) +{ + __be16 protocol; + + if (skb->protocol == htons(ETH_P_8021Q)) + protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; + else + protocol = skb->protocol; + + return protocol; +} + +static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + opts[0] |= TD_LSO; + opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + const struct iphdr *ip = ip_hdr(skb); + + if (ip->protocol == IPPROTO_TCP) + opts[0] |= TD0_IP_CS | TD0_TCP_CS; + else if (ip->protocol == IPPROTO_UDP) + opts[0] |= TD0_IP_CS | TD0_UDP_CS; + else + WARN_ON_ONCE(1); + } + + return true; +} + +static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, + struct sk_buff *skb, u32 *opts) +{ + u32 transport_offset = (u32)skb_transport_offset(skb); + u32 mss = skb_shinfo(skb)->gso_size; + + if (mss) { + if (transport_offset > GTTCPHO_MAX) { + netif_warn(tp, tx_err, tp->dev, + "Invalid transport offset 0x%x for TSO\n", + transport_offset); + return false; + } + + switch (get_protocol(skb)) { + case htons(ETH_P_IP): + opts[0] |= TD1_GTSENV4; + break; + + case htons(ETH_P_IPV6): + if (msdn_giant_send_check(skb)) + return false; + + opts[0] |= TD1_GTSENV6; + break; + + default: + WARN_ON_ONCE(1); + break; + } + + opts[0] |= transport_offset << GTTCPHO_SHIFT; + opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT; + } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + u8 ip_protocol; + + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + return !(skb_checksum_help(skb) || eth_skb_pad(skb)); + + if (transport_offset > TCPHO_MAX) { + netif_warn(tp, tx_err, tp->dev, + "Invalid transport offset 0x%x\n", + transport_offset); + return false; + } + + switch (get_protocol(skb)) { + case htons(ETH_P_IP): + opts[1] |= TD1_IPv4_CS; + ip_protocol = ip_hdr(skb)->protocol; + break; + + case htons(ETH_P_IPV6): + opts[1] |= TD1_IPv6_CS; + ip_protocol = ipv6_hdr(skb)->nexthdr; + break; + + default: + ip_protocol = IPPROTO_RAW; + break; + } + + if (ip_protocol == IPPROTO_TCP) + opts[1] |= TD1_TCP_CS; + else if (ip_protocol == IPPROTO_UDP) + opts[1] |= TD1_UDP_CS; + else + WARN_ON_ONCE(1); + + opts[1] |= transport_offset << TCPHO_SHIFT; + } else { + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + return !eth_skb_pad(skb); + } + + return true; +} + +static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + unsigned int entry = tp->cur_tx % NUM_TX_DESC; + struct TxDesc *txd = tp->TxDescArray + entry; + void __iomem *ioaddr = tp->mmio_addr; + struct device *d = &tp->pci_dev->dev; + dma_addr_t mapping; + u32 status, len; + u32 opts[2]; + int frags; + + if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { + netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); + goto err_stop_0; + } + + if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) + goto err_stop_0; + + opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); + opts[0] = DescOwn; + + if (!tp->tso_csum(tp, skb, opts)) { + r8169_csum_workaround(tp, skb); + return NETDEV_TX_OK; + } + + len = skb_headlen(skb); + mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(d, mapping))) { + if (net_ratelimit()) + netif_err(tp, drv, dev, "Failed to map TX DMA!\n"); + goto err_dma_0; + } + + tp->tx_skb[entry].len = len; + txd->addr = cpu_to_le64(mapping); + + frags = rtl8169_xmit_frags(tp, skb, opts); + if (frags < 0) + goto err_dma_1; + else if (frags) + opts[0] |= FirstFrag; + else { + opts[0] |= FirstFrag | LastFrag; + tp->tx_skb[entry].skb = skb; + } + + txd->opts2 = cpu_to_le32(opts[1]); + + skb_tx_timestamp(skb); + + /* Force memory writes to complete before releasing descriptor */ + dma_wmb(); + + /* Anti gcc 2.95.3 bugware (sic) */ + status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); + txd->opts1 = cpu_to_le32(status); + + /* Force all memory writes to complete before notifying device */ + wmb(); + + tp->cur_tx += frags + 1; + + RTL_W8(TxPoll, NPQ); + + mmiowb(); + + if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { + /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must + * not miss a ring update when it notices a stopped queue. + */ + smp_wmb(); + netif_stop_queue(dev); + /* Sync with rtl_tx: + * - publish queue status and cur_tx ring index (write barrier) + * - refresh dirty_tx ring index (read barrier). + * May the current thread have a pessimistic view of the ring + * status and forget to wake up queue, a racing rtl_tx thread + * can't. + */ + smp_mb(); + if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) + netif_wake_queue(dev); + } + + return NETDEV_TX_OK; + +err_dma_1: + rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); +err_dma_0: + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + return NETDEV_TX_OK; + +err_stop_0: + netif_stop_queue(dev); + dev->stats.tx_dropped++; + return NETDEV_TX_BUSY; +} + +static void rtl8169_pcierr_interrupt(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + u16 pci_status, pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_read_config_word(pdev, PCI_STATUS, &pci_status); + + netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n", + pci_cmd, pci_status); + + /* + * The recovery sequence below admits a very elaborated explanation: + * - it seems to work; + * - I did not see what else could be done; + * - it makes iop3xx happy. + * + * Feel free to adjust to your needs. + */ + if (pdev->broken_parity_status) + pci_cmd &= ~PCI_COMMAND_PARITY; + else + pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY; + + pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); + + pci_write_config_word(pdev, PCI_STATUS, + pci_status & (PCI_STATUS_DETECTED_PARITY | + PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT | + PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT)); + + /* The infamous DAC f*ckup only happens at boot time */ + if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) { + void __iomem *ioaddr = tp->mmio_addr; + + netif_info(tp, intr, dev, "disabling PCI DAC\n"); + tp->cp_cmd &= ~PCIDAC; + RTL_W16(CPlusCmd, tp->cp_cmd); + dev->features &= ~NETIF_F_HIGHDMA; + } + + rtl8169_hw_reset(tp); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) +{ + unsigned int dirty_tx, tx_left; + + dirty_tx = tp->dirty_tx; + smp_rmb(); + tx_left = tp->cur_tx - dirty_tx; + + while (tx_left > 0) { + unsigned int entry = dirty_tx % NUM_TX_DESC; + struct ring_info *tx_skb = tp->tx_skb + entry; + u32 status; + + status = le32_to_cpu(tp->TxDescArray[entry].opts1); + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Tx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, + tp->TxDescArray + entry); + if (status & LastFrag) { + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets++; + tp->tx_stats.bytes += tx_skb->skb->len; + u64_stats_update_end(&tp->tx_stats.syncp); + dev_kfree_skb_any(tx_skb->skb); + tx_skb->skb = NULL; + } + dirty_tx++; + tx_left--; + } + + if (tp->dirty_tx != dirty_tx) { + tp->dirty_tx = dirty_tx; + /* Sync with rtl8169_start_xmit: + * - publish dirty_tx ring index (write barrier) + * - refresh cur_tx ring index and queue status (read barrier) + * May the current thread miss the stopped queue condition, + * a racing xmit thread can only have a right view of the + * ring status. + */ + smp_mb(); + if (netif_queue_stopped(dev) && + TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { + netif_wake_queue(dev); + } + /* + * 8168 hack: TxPoll requests are lost when the Tx packets are + * too close. Let's kick an extra TxPoll request when a burst + * of start_xmit activity is detected (if it is not detected, + * it is slow enough). -- FR + */ + if (tp->cur_tx != dirty_tx) { + void __iomem *ioaddr = tp->mmio_addr; + + RTL_W8(TxPoll, NPQ); + } + } +} + +static inline int rtl8169_fragmented_frame(u32 status) +{ + return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag); +} + +static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1) +{ + u32 status = opts1 & RxProtoMask; + + if (((status == RxProtoTCP) && !(opts1 & TCPFail)) || + ((status == RxProtoUDP) && !(opts1 & UDPFail))) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); +} + +static struct sk_buff *rtl8169_try_rx_copy(void *data, + struct rtl8169_private *tp, + int pkt_size, + dma_addr_t addr) +{ + struct sk_buff *skb; + struct device *d = &tp->pci_dev->dev; + + data = rtl8169_align(data); + dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE); + prefetch(data); + skb = napi_alloc_skb(&tp->napi, pkt_size); + if (skb) + memcpy(skb->data, data, pkt_size); + dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE); + + return skb; +} + +static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget) +{ + unsigned int cur_rx, rx_left; + unsigned int count; + + cur_rx = tp->cur_rx; + + for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) { + unsigned int entry = cur_rx % NUM_RX_DESC; + struct RxDesc *desc = tp->RxDescArray + entry; + u32 status; + + status = le32_to_cpu(desc->opts1) & tp->opts1_mask; + if (status & DescOwn) + break; + + /* This barrier is needed to keep us from reading + * any other fields out of the Rx descriptor until + * we know the status of DescOwn + */ + dma_rmb(); + + if (unlikely(status & RxRES)) { + netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n", + status); + dev->stats.rx_errors++; + if (status & (RxRWT | RxRUNT)) + dev->stats.rx_length_errors++; + if (status & RxCRC) + dev->stats.rx_crc_errors++; + if (status & RxFOVF) { + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); + dev->stats.rx_fifo_errors++; + } + if ((status & (RxRUNT | RxCRC)) && + !(status & (RxRWT | RxFOVF)) && + (dev->features & NETIF_F_RXALL)) + goto process_pkt; + } else { + struct sk_buff *skb; + dma_addr_t addr; + int pkt_size; + +process_pkt: + addr = le64_to_cpu(desc->addr); + if (likely(!(dev->features & NETIF_F_RXFCS))) + pkt_size = (status & 0x00003fff) - 4; + else + pkt_size = status & 0x00003fff; + + /* + * The driver does not support incoming fragmented + * frames. They are seen as a symptom of over-mtu + * sized frames. + */ + if (unlikely(rtl8169_fragmented_frame(status))) { + dev->stats.rx_dropped++; + dev->stats.rx_length_errors++; + goto release_descriptor; + } + + skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], + tp, pkt_size, addr); + if (!skb) { + dev->stats.rx_dropped++; + goto release_descriptor; + } + + rtl8169_rx_csum(skb, status); + skb_put(skb, pkt_size); + skb->protocol = eth_type_trans(skb, dev); + + rtl8169_rx_vlan_tag(desc, skb); + + if (skb->pkt_type == PACKET_MULTICAST) + dev->stats.multicast++; + + napi_gro_receive(&tp->napi, skb); + + u64_stats_update_begin(&tp->rx_stats.syncp); + tp->rx_stats.packets++; + tp->rx_stats.bytes += pkt_size; + u64_stats_update_end(&tp->rx_stats.syncp); + } +release_descriptor: + desc->opts2 = 0; + rtl8169_mark_to_asic(desc, rx_buf_sz); + } + + count = cur_rx - tp->cur_rx; + tp->cur_rx = cur_rx; + + return count; +} + +static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) +{ + struct net_device *dev = dev_instance; + struct rtl8169_private *tp = netdev_priv(dev); + int handled = 0; + u16 status; + + status = rtl_get_events(tp); + if (status && status != 0xffff) { + status &= RTL_EVENT_NAPI | tp->event_slow; + if (status) { + handled = 1; + + rtl_irq_disable(tp); + napi_schedule(&tp->napi); + } + } + return IRQ_RETVAL(handled); +} + +/* + * Workqueue context. + */ +static void rtl_slow_event_work(struct rtl8169_private *tp) +{ + struct net_device *dev = tp->dev; + u16 status; + + status = rtl_get_events(tp) & tp->event_slow; + rtl_ack_events(tp, status); + + if (unlikely(status & RxFIFOOver)) { + switch (tp->mac_version) { + /* Work around for rx fifo overflow */ + case RTL_GIGA_MAC_VER_11: + netif_stop_queue(dev); + /* XXX - Hack alert. See rtl_task(). */ + set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags); + default: + break; + } + } + + if (unlikely(status & SYSErr)) + rtl8169_pcierr_interrupt(dev); + + if (status & LinkChg) + __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); + + rtl_irq_enable_all(tp); +} + +static void rtl_task(struct work_struct *work) +{ + static const struct { + int bitnr; + void (*action)(struct rtl8169_private *); + } rtl_work[] = { + /* XXX - keep rtl_slow_event_work() as first element. */ + { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work }, + { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work }, + { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work } + }; + struct rtl8169_private *tp = + container_of(work, struct rtl8169_private, wk.work); + struct net_device *dev = tp->dev; + int i; + + rtl_lock_work(tp); + + if (!netif_running(dev) || + !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags)) + goto out_unlock; + + for (i = 0; i < ARRAY_SIZE(rtl_work); i++) { + bool pending; + + pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags); + if (pending) + rtl_work[i].action(tp); + } + +out_unlock: + rtl_unlock_work(tp); +} + +static int rtl8169_poll(struct napi_struct *napi, int budget) +{ + struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); + struct net_device *dev = tp->dev; + u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow; + int work_done; + u16 status; + + status = rtl_get_events(tp); + rtl_ack_events(tp, status & ~tp->event_slow); + + work_done = rtl_rx(dev, tp, (u32) budget); + + rtl_tx(dev, tp); + + if (status & tp->event_slow) { + enable_mask &= ~tp->event_slow; + + rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING); + } + + if (work_done < budget) { + napi_complete(napi); + + rtl_irq_enable(tp, enable_mask); + mmiowb(); + } + + return work_done; +} + +static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (tp->mac_version > RTL_GIGA_MAC_VER_06) + return; + + dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff); + RTL_W32(RxMissed, 0); +} + +static void rtl8169_down(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + + del_timer_sync(&tp->timer); + + napi_disable(&tp->napi); + netif_stop_queue(dev); + + rtl8169_hw_reset(tp); + /* + * At this point device interrupts can not be enabled in any function, + * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task) + * and napi is disabled (rtl8169_poll). + */ + rtl8169_rx_missed(dev, ioaddr); + + /* Give a racing hard_start_xmit a few cycles to complete. */ + synchronize_sched(); + + rtl8169_tx_clear(tp); + + rtl8169_rx_clear(tp); + + rtl_pll_power_down(tp); +} + +static int rtl8169_close(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + struct pci_dev *pdev = tp->pci_dev; + + pm_runtime_get_sync(&pdev->dev); + + /* Update counters before going down */ + rtl8169_update_counters(dev); + + rtl_lock_work(tp); + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + + rtl8169_down(dev); + rtl_unlock_work(tp); + + cancel_work_sync(&tp->wk.work); + + free_irq(pdev->irq, dev); + + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; + tp->RxDescArray = NULL; + + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void rtl8169_netpoll(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_interrupt(tp->pci_dev->irq, dev); +} +#endif + +static int rtl_open(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct pci_dev *pdev = tp->pci_dev; + int retval = -ENOMEM; + + pm_runtime_get_sync(&pdev->dev); + + /* + * Rx and Tx descriptors needs 256 bytes alignment. + * dma_alloc_coherent provides more. + */ + tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES, + &tp->TxPhyAddr, GFP_KERNEL); + if (!tp->TxDescArray) + goto err_pm_runtime_put; + + tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES, + &tp->RxPhyAddr, GFP_KERNEL); + if (!tp->RxDescArray) + goto err_free_tx_0; + + retval = rtl8169_init_ring(dev); + if (retval < 0) + goto err_free_rx_1; + + INIT_WORK(&tp->wk.work, rtl_task); + + smp_mb(); + + rtl_request_firmware(tp); + + retval = request_irq(pdev->irq, rtl8169_interrupt, + (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED, + dev->name, dev); + if (retval < 0) + goto err_release_fw_2; + + rtl_lock_work(tp); + + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + + napi_enable(&tp->napi); + + rtl8169_init_phy(dev, tp); + + __rtl8169_set_features(dev, dev->features); + + rtl_pll_power_up(tp); + + rtl_hw_start(dev); + + if (!rtl8169_init_counter_offsets(dev)) + netif_warn(tp, hw, dev, "counter reset/update failed\n"); + + netif_start_queue(dev); + + rtl_unlock_work(tp); + + tp->saved_wolopts = 0; + pm_runtime_put_noidle(&pdev->dev); + + rtl8169_check_link_status(dev, tp, ioaddr); +out: + return retval; + +err_release_fw_2: + rtl_release_firmware(tp); + rtl8169_rx_clear(tp); +err_free_rx_1: + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, + tp->RxPhyAddr); + tp->RxDescArray = NULL; +err_free_tx_0: + dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, + tp->TxPhyAddr); + tp->TxDescArray = NULL; +err_pm_runtime_put: + pm_runtime_put_noidle(&pdev->dev); + goto out; +} + +static struct rtnl_link_stats64 * +rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct rtl8169_private *tp = netdev_priv(dev); + void __iomem *ioaddr = tp->mmio_addr; + struct rtl8169_counters *counters = tp->counters; + unsigned int start; + + if (netif_running(dev)) + rtl8169_rx_missed(dev, ioaddr); + + do { + start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp); + stats->rx_packets = tp->rx_stats.packets; + stats->rx_bytes = tp->rx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->rx_stats.syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&tp->tx_stats.syncp); + stats->tx_packets = tp->tx_stats.packets; + stats->tx_bytes = tp->tx_stats.bytes; + } while (u64_stats_fetch_retry_irq(&tp->tx_stats.syncp, start)); + + stats->rx_dropped = dev->stats.rx_dropped; + stats->tx_dropped = dev->stats.tx_dropped; + stats->rx_length_errors = dev->stats.rx_length_errors; + stats->rx_errors = dev->stats.rx_errors; + stats->rx_crc_errors = dev->stats.rx_crc_errors; + stats->rx_fifo_errors = dev->stats.rx_fifo_errors; + stats->rx_missed_errors = dev->stats.rx_missed_errors; + stats->multicast = dev->stats.multicast; + + /* + * Fetch additonal counter values missing in stats collected by driver + * from tally counters. + */ + rtl8169_update_counters(dev); + + /* + * Subtract values fetched during initalization. + * See rtl8169_init_counter_offsets for a description why we do that. + */ + stats->tx_errors = le64_to_cpu(counters->tx_errors) - + le64_to_cpu(tp->tc_offset.tx_errors); + stats->collisions = le32_to_cpu(counters->tx_multi_collision) - + le32_to_cpu(tp->tc_offset.tx_multi_collision); + stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) - + le16_to_cpu(tp->tc_offset.tx_aborted); + + return stats; +} + +static void rtl8169_net_suspend(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + if (!netif_running(dev)) + return; + + netif_device_detach(dev); + netif_stop_queue(dev); + + rtl_lock_work(tp); + napi_disable(&tp->napi); + /* Clear all task flags */ + bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); + + rtl_unlock_work(tp); + + rtl_pll_power_down(tp); +} + +#ifdef CONFIG_PM + +static int rtl8169_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + + rtl8169_net_suspend(dev); + + return 0; +} + +static void __rtl8169_resume(struct net_device *dev) +{ + struct rtl8169_private *tp = netdev_priv(dev); + + netif_device_attach(dev); + + rtl_pll_power_up(tp); + + rtl_lock_work(tp); + napi_enable(&tp->napi); + set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); + rtl_unlock_work(tp); + + rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); +} + +static int rtl8169_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + rtl8169_init_phy(dev, tp); + + if (netif_running(dev)) + __rtl8169_resume(dev); + + return 0; +} + +static int rtl8169_runtime_suspend(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + rtl_lock_work(tp); + tp->saved_wolopts = __rtl8169_get_wol(tp); + __rtl8169_set_wol(tp, WAKE_ANY); + rtl_unlock_work(tp); + + rtl8169_net_suspend(dev); + + return 0; +} + +static int rtl8169_runtime_resume(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if (!tp->TxDescArray) + return 0; + + rtl_lock_work(tp); + __rtl8169_set_wol(tp, tp->saved_wolopts); + tp->saved_wolopts = 0; + rtl_unlock_work(tp); + + rtl8169_init_phy(dev, tp); + + __rtl8169_resume(dev); + + return 0; +} + +static int rtl8169_runtime_idle(struct device *device) +{ + struct pci_dev *pdev = to_pci_dev(device); + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + return tp->TxDescArray ? -EBUSY : 0; +} + +static const struct dev_pm_ops rtl8169_pm_ops = { + .suspend = rtl8169_suspend, + .resume = rtl8169_resume, + .freeze = rtl8169_suspend, + .thaw = rtl8169_resume, + .poweroff = rtl8169_suspend, + .restore = rtl8169_resume, + .runtime_suspend = rtl8169_runtime_suspend, + .runtime_resume = rtl8169_runtime_resume, + .runtime_idle = rtl8169_runtime_idle, +}; + +#define RTL8169_PM_OPS (&rtl8169_pm_ops) + +#else /* !CONFIG_PM */ + +#define RTL8169_PM_OPS NULL + +#endif /* !CONFIG_PM */ + +static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + + /* WoL fails with 8168b when the receiver is disabled. */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_11: + case RTL_GIGA_MAC_VER_12: + case RTL_GIGA_MAC_VER_17: + pci_clear_master(tp->pci_dev); + + RTL_W8(ChipCmd, CmdRxEnb); + /* PCI commit */ + RTL_R8(ChipCmd); + break; + default: + break; + } +} + +static void rtl_shutdown(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + struct device *d = &pdev->dev; + + pm_runtime_get_sync(d); + + rtl8169_net_suspend(dev); + + /* Restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); + + rtl8169_hw_reset(tp); + + if (system_state == SYSTEM_POWER_OFF) { + if (__rtl8169_get_wol(tp) & WAKE_ANY) { + rtl_wol_suspend_quirk(tp); + rtl_wol_shutdown_quirk(tp); + } + + pci_wake_from_d3(pdev, true); + pci_set_power_state(pdev, PCI_D3hot); + } + + pm_runtime_put_noidle(d); +} + +static void rtl_remove_one(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct rtl8169_private *tp = netdev_priv(dev); + + if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) && + r8168_check_dash(tp)) { + rtl8168_driver_stop(tp); + } + + netif_napi_del(&tp->napi); + + unregister_netdev(dev); + + dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters), + tp->counters, tp->counters_phys_addr); + + rtl_release_firmware(tp); + + if (pci_dev_run_wake(pdev)) + pm_runtime_get_noresume(&pdev->dev); + + /* restore original MAC address */ + rtl_rar_set(tp, dev->perm_addr); + + rtl_disable_msi(pdev, tp); + rtl8169_release_board(pdev, dev, tp->mmio_addr); +} + +static const struct net_device_ops rtl_netdev_ops = { + .ndo_open = rtl_open, + .ndo_stop = rtl8169_close, + .ndo_get_stats64 = rtl8169_get_stats64, + .ndo_start_xmit = rtl8169_start_xmit, + .ndo_tx_timeout = rtl8169_tx_timeout, + .ndo_validate_addr = eth_validate_addr, + .ndo_change_mtu = rtl8169_change_mtu, + .ndo_fix_features = rtl8169_fix_features, + .ndo_set_features = rtl8169_set_features, + .ndo_set_mac_address = rtl_set_mac_address, + .ndo_do_ioctl = rtl8169_ioctl, + .ndo_set_rx_mode = rtl_set_rx_mode, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = rtl8169_netpoll, +#endif + +}; + +static const struct rtl_cfg_info { + void (*hw_start)(struct net_device *); + unsigned int region; + unsigned int align; + u16 event_slow; + unsigned features; + u8 default_ver; +} rtl_cfg_infos [] = { + [RTL_CFG_0] = { + .hw_start = rtl_hw_start_8169, + .region = 1, + .align = 0, + .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, + .features = RTL_FEATURE_GMII, + .default_ver = RTL_GIGA_MAC_VER_01, + }, + [RTL_CFG_1] = { + .hw_start = rtl_hw_start_8168, + .region = 2, + .align = 8, + .event_slow = SYSErr | LinkChg | RxOverflow, + .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_11, + }, + [RTL_CFG_2] = { + .hw_start = rtl_hw_start_8101, + .region = 2, + .align = 8, + .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | + PCSTimeout, + .features = RTL_FEATURE_MSI, + .default_ver = RTL_GIGA_MAC_VER_13, + } +}; + +/* Cfg9346_Unlock assumed. */ +static unsigned rtl_try_msi(struct rtl8169_private *tp, + const struct rtl_cfg_info *cfg) +{ + void __iomem *ioaddr = tp->mmio_addr; + unsigned msi = 0; + u8 cfg2; + + cfg2 = RTL_R8(Config2) & ~MSIEnable; + if (cfg->features & RTL_FEATURE_MSI) { + if (pci_enable_msi(tp->pci_dev)) { + netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n"); + } else { + cfg2 |= MSIEnable; + msi = RTL_FEATURE_MSI; + } + } + if (tp->mac_version <= RTL_GIGA_MAC_VER_06) + RTL_W8(Config2, cfg2); + return msi; +} + +DECLARE_RTL_COND(rtl_link_list_ready_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return RTL_R8(MCU) & LINK_LIST_RDY; +} + +DECLARE_RTL_COND(rtl_rxtx_empty_cond) +{ + void __iomem *ioaddr = tp->mmio_addr; + + return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY; +} + +static void rtl_hw_init_8168g(struct rtl8169_private *tp) +{ + void __iomem *ioaddr = tp->mmio_addr; + u32 data; + + tp->ocp_base = OCP_STD_PHY_BASE; + + RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42)) + return; + + if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42)) + return; + + RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb)); + msleep(1); + RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB); + + data = r8168_mac_ocp_read(tp, 0xe8de); + data &= ~(1 << 14); + r8168_mac_ocp_write(tp, 0xe8de, data); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) + return; + + data = r8168_mac_ocp_read(tp, 0xe8de); + data |= (1 << 15); + r8168_mac_ocp_write(tp, 0xe8de, data); + + if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42)) + return; +} + +static void rtl_hw_init_8168ep(struct rtl8169_private *tp) +{ + rtl8168ep_stop_cmac(tp); + rtl_hw_init_8168g(tp); +} + +static void rtl_hw_initialize(struct rtl8169_private *tp) +{ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + rtl_hw_init_8168g(tp); + break; + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + rtl_hw_init_8168ep(tp); + break; + default: + break; + } +} + +static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data; + const unsigned int region = cfg->region; + struct rtl8169_private *tp; + struct mii_if_info *mii; + struct net_device *dev; + void __iomem *ioaddr; + int chipset, i; + int rc; + + if (netif_msg_drv(&debug)) { + printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", + MODULENAME, RTL8169_VERSION); + } + + dev = alloc_etherdev(sizeof (*tp)); + if (!dev) { + rc = -ENOMEM; + goto out; + } + + SET_NETDEV_DEV(dev, &pdev->dev); + dev->netdev_ops = &rtl_netdev_ops; + tp = netdev_priv(dev); + tp->dev = dev; + tp->pci_dev = pdev; + tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); + + mii = &tp->mii; + mii->dev = dev; + mii->mdio_read = rtl_mdio_read; + mii->mdio_write = rtl_mdio_write; + mii->phy_id_mask = 0x1f; + mii->reg_num_mask = 0x1f; + mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); + + /* disable ASPM completely as that cause random device stop working + * problems as well as full system hangs for some PCIe devices users */ + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | + PCIE_LINK_STATE_CLKPM); + + /* enable device (incl. PCI PM wakeup and hotplug setup) */ + rc = pci_enable_device(pdev); + if (rc < 0) { + netif_err(tp, probe, dev, "enable failure\n"); + goto err_out_free_dev_1; + } + + if (pci_set_mwi(pdev) < 0) + netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n"); + + /* make sure PCI base addr 1 is MMIO */ + if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { + netif_err(tp, probe, dev, + "region #%d not an MMIO resource, aborting\n", + region); + rc = -ENODEV; + goto err_out_mwi_2; + } + + /* check for weird/broken PCI region reporting */ + if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) { + netif_err(tp, probe, dev, + "Invalid PCI region size(s), aborting\n"); + rc = -ENODEV; + goto err_out_mwi_2; + } + + rc = pci_request_regions(pdev, MODULENAME); + if (rc < 0) { + netif_err(tp, probe, dev, "could not request regions\n"); + goto err_out_mwi_2; + } + + tp->cp_cmd = 0; + + if ((sizeof(dma_addr_t) > 4) && + !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { + tp->cp_cmd |= PCIDAC; + dev->features |= NETIF_F_HIGHDMA; + } else { + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc < 0) { + netif_err(tp, probe, dev, "DMA configuration failed\n"); + goto err_out_free_res_3; + } + } + + /* ioremap MMIO region */ + ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE); + if (!ioaddr) { + netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n"); + rc = -EIO; + goto err_out_free_res_3; + } + tp->mmio_addr = ioaddr; + + if (!pci_is_pcie(pdev)) + netif_info(tp, probe, dev, "not PCI Express\n"); + + /* Identify chip attached to board */ + rtl8169_get_mac_version(tp, dev, cfg->default_ver); + + rtl_init_rxcfg(tp); + + rtl_irq_disable(tp); + + rtl_hw_initialize(tp); + + rtl_hw_reset(tp); + + rtl_ack_events(tp, 0xffff); + + pci_set_master(pdev); + + rtl_init_mdio_ops(tp); + rtl_init_pll_power_ops(tp); + rtl_init_jumbo_ops(tp); + rtl_init_csi_ops(tp); + + rtl8169_print_mac_version(tp); + + chipset = tp->mac_version; + tp->txd_version = rtl_chip_infos[chipset].txd_version; + + RTL_W8(Cfg9346, Cfg9346_Unlock); + RTL_W8(Config1, RTL_R8(Config1) | PMEnable); + RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus)); + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34: + case RTL_GIGA_MAC_VER_35: + case RTL_GIGA_MAC_VER_36: + case RTL_GIGA_MAC_VER_37: + case RTL_GIGA_MAC_VER_38: + case RTL_GIGA_MAC_VER_40: + case RTL_GIGA_MAC_VER_41: + case RTL_GIGA_MAC_VER_42: + case RTL_GIGA_MAC_VER_43: + case RTL_GIGA_MAC_VER_44: + case RTL_GIGA_MAC_VER_45: + case RTL_GIGA_MAC_VER_46: + case RTL_GIGA_MAC_VER_47: + case RTL_GIGA_MAC_VER_48: + case RTL_GIGA_MAC_VER_49: + case RTL_GIGA_MAC_VER_50: + case RTL_GIGA_MAC_VER_51: + if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2) + tp->features |= RTL_FEATURE_WOL; + if ((RTL_R8(Config3) & LinkUp) != 0) + tp->features |= RTL_FEATURE_WOL; + break; + default: + if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0) + tp->features |= RTL_FEATURE_WOL; + break; + } + if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0) + tp->features |= RTL_FEATURE_WOL; + tp->features |= rtl_try_msi(tp, cfg); + RTL_W8(Cfg9346, Cfg9346_Lock); + + if (rtl_tbi_enabled(tp)) { + tp->set_speed = rtl8169_set_speed_tbi; + tp->get_settings = rtl8169_gset_tbi; + tp->phy_reset_enable = rtl8169_tbi_reset_enable; + tp->phy_reset_pending = rtl8169_tbi_reset_pending; + tp->link_ok = rtl8169_tbi_link_ok; + tp->do_ioctl = rtl_tbi_ioctl; + } else { + tp->set_speed = rtl8169_set_speed_xmii; + tp->get_settings = rtl8169_gset_xmii; + tp->phy_reset_enable = rtl8169_xmii_reset_enable; + tp->phy_reset_pending = rtl8169_xmii_reset_pending; + tp->link_ok = rtl8169_xmii_link_ok; + tp->do_ioctl = rtl_xmii_ioctl; + } + + mutex_init(&tp->wk.mutex); + u64_stats_init(&tp->rx_stats.syncp); + u64_stats_init(&tp->tx_stats.syncp); + + /* Get MAC address */ + if (tp->mac_version == RTL_GIGA_MAC_VER_35 || + tp->mac_version == RTL_GIGA_MAC_VER_36 || + tp->mac_version == RTL_GIGA_MAC_VER_37 || + tp->mac_version == RTL_GIGA_MAC_VER_38 || + tp->mac_version == RTL_GIGA_MAC_VER_40 || + tp->mac_version == RTL_GIGA_MAC_VER_41 || + tp->mac_version == RTL_GIGA_MAC_VER_42 || + tp->mac_version == RTL_GIGA_MAC_VER_43 || + tp->mac_version == RTL_GIGA_MAC_VER_44 || + tp->mac_version == RTL_GIGA_MAC_VER_45 || + tp->mac_version == RTL_GIGA_MAC_VER_46 || + tp->mac_version == RTL_GIGA_MAC_VER_47 || + tp->mac_version == RTL_GIGA_MAC_VER_48 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) { + u16 mac_addr[3]; + + *(u32 *)&mac_addr[0] = rtl_eri_read(tp, 0xe0, ERIAR_EXGMAC); + *(u16 *)&mac_addr[2] = rtl_eri_read(tp, 0xe4, ERIAR_EXGMAC); + + if (is_valid_ether_addr((u8 *)mac_addr)) + rtl_rar_set(tp, (u8 *)mac_addr); + } + for (i = 0; i < ETH_ALEN; i++) + dev->dev_addr[i] = RTL_R8(MAC0 + i); + + dev->ethtool_ops = &rtl8169_ethtool_ops; + dev->watchdog_timeo = RTL8169_TX_TIMEOUT; + + netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT); + + /* don't enable SG, IP_CSUM and TSO by default - it might not work + * properly for all devices */ + dev->features |= NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; + + dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX; + dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | + NETIF_F_HIGHDMA; + + tp->cp_cmd |= RxChkSum | RxVlan; + + /* + * Pretend we are using VLANs; This bypasses a nasty bug where + * Interrupts stop flowing on high load on 8110SCd controllers. + */ + if (tp->mac_version == RTL_GIGA_MAC_VER_05) + /* Disallow toggling */ + dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + + if (tp->txd_version == RTL_TD_0) + tp->tso_csum = rtl8169_tso_csum_v1; + else if (tp->txd_version == RTL_TD_1) { + tp->tso_csum = rtl8169_tso_csum_v2; + dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + } else + WARN_ON_ONCE(1); + + dev->hw_features |= NETIF_F_RXALL; + dev->hw_features |= NETIF_F_RXFCS; + + tp->hw_start = cfg->hw_start; + tp->event_slow = cfg->event_slow; + + tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ? + ~(RxBOVF | RxFOVF) : ~0; + + init_timer(&tp->timer); + tp->timer.data = (unsigned long) dev; + tp->timer.function = rtl8169_phy_timer; + + tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; + + tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters), + &tp->counters_phys_addr, GFP_KERNEL); + if (!tp->counters) { + rc = -ENOMEM; + goto err_out_msi_4; + } + + pci_set_drvdata(pdev, dev); + + rc = register_netdev(dev); + if (rc < 0) + goto err_out_cnt_5; + + netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n", + rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr, + (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq); + if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) { + netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, " + "tx checksumming: %s]\n", + rtl_chip_infos[chipset].jumbo_max, + rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko"); + } + + if ((tp->mac_version == RTL_GIGA_MAC_VER_27 || + tp->mac_version == RTL_GIGA_MAC_VER_28 || + tp->mac_version == RTL_GIGA_MAC_VER_31 || + tp->mac_version == RTL_GIGA_MAC_VER_49 || + tp->mac_version == RTL_GIGA_MAC_VER_50 || + tp->mac_version == RTL_GIGA_MAC_VER_51) && + r8168_check_dash(tp)) { + rtl8168_driver_start(tp); + } + + device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); + + if (pci_dev_run_wake(pdev)) + pm_runtime_put_noidle(&pdev->dev); + + netif_carrier_off(dev); + +out: + return rc; + +err_out_cnt_5: + dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters, + tp->counters_phys_addr); +err_out_msi_4: + netif_napi_del(&tp->napi); + rtl_disable_msi(pdev, tp); + iounmap(ioaddr); +err_out_free_res_3: + pci_release_regions(pdev); +err_out_mwi_2: + pci_clear_mwi(pdev); + pci_disable_device(pdev); +err_out_free_dev_1: + free_netdev(dev); + goto out; +} + +static struct pci_driver rtl8169_pci_driver = { + .name = MODULENAME, + .id_table = rtl8169_pci_tbl, + .probe = rtl_init_one, + .remove = rtl_remove_one, + .shutdown = rtl_shutdown, + .driver.pm = RTL8169_PM_OPS, +}; + +module_pci_driver(rtl8169_pci_driver); diff --git a/addons/skge/install.sh b/addons/skge/install.sh new file mode 100644 index 00000000..9a5111b6 --- /dev/null +++ b/addons/skge/install.sh @@ -0,0 +1,4 @@ +if [ "${1}" = "rd" ]; then + echo "Installing module for Marvell Yukon Gigabit Ethernet adapter" + ${INSMOD} "/modules/skge.ko" ${PARAMS} +fi diff --git a/addons/skge/manifest.yml b/addons/skge/manifest.yml new file mode 100644 index 00000000..1f11e717 --- /dev/null +++ b/addons/skge/manifest.yml @@ -0,0 +1,28 @@ +version: 1 +name: skge +description: "Driver for Marvell Yukon Gigabit Ethernet adapters" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/addons/skge/src/3.10.108/Makefile b/addons/skge/src/3.10.108/Makefile new file mode 100644 index 00000000..f0ca3939 --- /dev/null +++ b/addons/skge/src/3.10.108/Makefile @@ -0,0 +1 @@ +obj-m += skge.o diff --git a/addons/skge/src/3.10.108/skge.c b/addons/skge/src/3.10.108/skge.c new file mode 100644 index 00000000..171f4b3d --- /dev/null +++ b/addons/skge/src/3.10.108/skge.c @@ -0,0 +1,4180 @@ +/* + * New driver for Marvell Yukon chipset and SysKonnect Gigabit + * Ethernet adapters. Based on earlier sk98lin, e100 and + * FreeBSD if_sk drivers. + * + * This driver intentionally does not support all the features + * of the original driver such as link fail-over and link management because + * those should be done at higher levels. + * + * Copyright (C) 2004, 2005 Stephen Hemminger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "skge.h" + +#define DRV_NAME "skge" +#define DRV_VERSION "1.14" + +#define DEFAULT_TX_RING_SIZE 128 +#define DEFAULT_RX_RING_SIZE 512 +#define MAX_TX_RING_SIZE 1024 +#define TX_LOW_WATER (MAX_SKB_FRAGS + 1) +#define MAX_RX_RING_SIZE 4096 +#define RX_COPY_THRESHOLD 128 +#define RX_BUF_SIZE 1536 +#define PHY_RETRIES 1000 +#define ETH_JUMBO_MTU 9000 +#define TX_WATCHDOG (5 * HZ) +#define NAPI_WEIGHT 64 +#define BLINK_MS 250 +#define LINK_HZ HZ + +#define SKGE_EEPROM_MAGIC 0x9933aabb + + +MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); +MODULE_AUTHOR("Stephen Hemminger "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN); + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = { + { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) }, /* 3Com 3C940 */ + { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) }, /* 3Com 3C940B */ +#ifdef CONFIG_SKGE_GENESIS + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */ +#endif + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ + { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */ + { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) }, /* Linksys EG1064 v2 */ + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, skge_id_table); + +static int skge_up(struct net_device *dev); +static int skge_down(struct net_device *dev); +static void skge_phy_reset(struct skge_port *skge); +static void skge_tx_clean(struct net_device *dev); +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static void genesis_get_stats(struct skge_port *skge, u64 *data); +static void yukon_get_stats(struct skge_port *skge, u64 *data); +static void yukon_init(struct skge_hw *hw, int port); +static void genesis_mac_init(struct skge_hw *hw, int port); +static void genesis_link_up(struct skge_port *skge); +static void skge_set_multicast(struct net_device *dev); +static irqreturn_t skge_intr(int irq, void *dev_id); + +/* Avoid conditionals by using array */ +static const int txqaddr[] = { Q_XA1, Q_XA2 }; +static const int rxqaddr[] = { Q_R1, Q_R2 }; +static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; +static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; +static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; +static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 }; + +static inline bool is_genesis(const struct skge_hw *hw) +{ +#ifdef CONFIG_SKGE_GENESIS + return hw->chip_id == CHIP_ID_GENESIS; +#else + return false; +#endif +} + +static int skge_get_regs_len(struct net_device *dev) +{ + return 0x4000; +} + +/* + * Returns copy of whole control register region + * Note: skip RAM address register because accessing it will + * cause bus hangs! + */ +static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + const struct skge_port *skge = netdev_priv(dev); + const void __iomem *io = skge->hw->regs; + + regs->version = 1; + memset(p, 0, regs->len); + memcpy_fromio(p, io, B3_RAM_ADDR); + + memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, + regs->len - B3_RI_WTO_R1); +} + +/* Wake on Lan only supported on Yukon chips with rev 1 or above */ +static u32 wol_supported(const struct skge_hw *hw) +{ + if (is_genesis(hw)) + return 0; + + if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) + return 0; + + return WAKE_MAGIC | WAKE_PHY; +} + +static void skge_wol_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl; + + skge_write16(hw, B0_CTST, CS_RST_CLR); + skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); + + /* Turn on Vaux */ + skge_write8(hw, B0_POWER_CTRL, + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); + + /* WA code for COMA mode -- clear PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + u32 reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9; + reg &= ~GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + skge_write32(hw, SK_REG(port, GPHY_CTRL), + GPC_DIS_SLEEP | + GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | + GPC_ANEG_1 | GPC_RST_SET); + + skge_write32(hw, SK_REG(port, GPHY_CTRL), + GPC_DIS_SLEEP | + GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | + GPC_ANEG_1 | GPC_RST_CLR); + + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + /* Force to 10/100 skge_reset will re-enable on resume */ + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, + (PHY_AN_100FULL | PHY_AN_100HALF | + PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA)); + /* no 1000 HD/FD */ + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_CTRL, + PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE | + PHY_CT_RE_CFG | PHY_CT_DUP_MD); + + + /* Set GMAC to no flow control and auto update for speed/duplex */ + gma_write16(hw, port, GM_GP_CTRL, + GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| + GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); + + /* Set WOL address */ + memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), + skge->netdev->dev_addr, ETH_ALEN); + + /* Turn on appropriate WOL control bits */ + skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); + ctrl = 0; + if (skge->wol & WAKE_PHY) + ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; + + if (skge->wol & WAKE_MAGIC) + ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; + + ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; + skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); + + /* block receiver */ + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); +} + +static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct skge_port *skge = netdev_priv(dev); + + wol->supported = wol_supported(skge->hw); + wol->wolopts = skge->wol; +} + +static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + + if ((wol->wolopts & ~wol_supported(hw)) || + !device_can_wakeup(&hw->pdev->dev)) + return -EOPNOTSUPP; + + skge->wol = wol->wolopts; + + device_set_wakeup_enable(&hw->pdev->dev, skge->wol); + + return 0; +} + +/* Determine supported/advertised modes based on hardware. + * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx + */ +static u32 skge_supported_modes(const struct skge_hw *hw) +{ + u32 supported; + + if (hw->copper) { + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + + if (is_genesis(hw)) + supported &= ~(SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full); + + else if (hw->chip_id == CHIP_ID_YUKON) + supported &= ~SUPPORTED_1000baseT_Half; + } else + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + return supported; +} + +static int skge_get_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = skge_supported_modes(hw); + + if (hw->copper) { + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy_addr; + } else + ecmd->port = PORT_FIBRE; + + ecmd->advertising = skge->advertising; + ecmd->autoneg = skge->autoneg; + ethtool_cmd_speed_set(ecmd, skge->speed); + ecmd->duplex = skge->duplex; + return 0; +} + +static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + const struct skge_hw *hw = skge->hw; + u32 supported = skge_supported_modes(hw); + int err = 0; + + if (ecmd->autoneg == AUTONEG_ENABLE) { + ecmd->advertising = supported; + skge->duplex = -1; + skge->speed = -1; + } else { + u32 setting; + u32 speed = ethtool_cmd_speed(ecmd); + + switch (speed) { + case SPEED_1000: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_1000baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_1000baseT_Half; + else + return -EINVAL; + break; + case SPEED_100: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_100baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_100baseT_Half; + else + return -EINVAL; + break; + + case SPEED_10: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_10baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_10baseT_Half; + else + return -EINVAL; + break; + default: + return -EINVAL; + } + + if ((setting & supported) == 0) + return -EINVAL; + + skge->speed = speed; + skge->duplex = ecmd->duplex; + } + + skge->autoneg = ecmd->autoneg; + skge->advertising = ecmd->advertising; + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) { + dev_close(dev); + return err; + } + } + + return 0; +} + +static void skge_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct skge_port *skge = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(skge->hw->pdev), + sizeof(info->bus_info)); +} + +static const struct skge_stat { + char name[ETH_GSTRING_LEN]; + u16 xmac_offset; + u16 gma_offset; +} skge_stats[] = { + { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI }, + { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI }, + + { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK }, + { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK }, + { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK }, + { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK }, + { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK }, + { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK }, + { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE }, + { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE }, + + { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL }, + { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL }, + { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL }, + { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL }, + { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR }, + { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV }, + + { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, + { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT }, + { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG }, + { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, + { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR }, +}; + +static int skge_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(skge_stats); + default: + return -EOPNOTSUPP; + } +} + +static void skge_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct skge_port *skge = netdev_priv(dev); + + if (is_genesis(skge->hw)) + genesis_get_stats(skge, data); + else + yukon_get_stats(skge, data); +} + +/* Use hardware MIB variables for critical path statistics and + * transmit feedback not reported at interrupt. + * Other errors are accounted for in interrupt handler. + */ +static struct net_device_stats *skge_get_stats(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + u64 data[ARRAY_SIZE(skge_stats)]; + + if (is_genesis(skge->hw)) + genesis_get_stats(skge, data); + else + yukon_get_stats(skge, data); + + dev->stats.tx_bytes = data[0]; + dev->stats.rx_bytes = data[1]; + dev->stats.tx_packets = data[2] + data[4] + data[6]; + dev->stats.rx_packets = data[3] + data[5] + data[7]; + dev->stats.multicast = data[3] + data[5]; + dev->stats.collisions = data[10]; + dev->stats.tx_aborted_errors = data[12]; + + return &dev->stats; +} + +static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(skge_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + skge_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static void skge_get_ring_param(struct net_device *dev, + struct ethtool_ringparam *p) +{ + struct skge_port *skge = netdev_priv(dev); + + p->rx_max_pending = MAX_RX_RING_SIZE; + p->tx_max_pending = MAX_TX_RING_SIZE; + + p->rx_pending = skge->rx_ring.count; + p->tx_pending = skge->tx_ring.count; +} + +static int skge_set_ring_param(struct net_device *dev, + struct ethtool_ringparam *p) +{ + struct skge_port *skge = netdev_priv(dev); + int err = 0; + + if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || + p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) + return -EINVAL; + + skge->rx_ring.count = p->rx_pending; + skge->tx_ring.count = p->tx_pending; + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) + dev_close(dev); + } + + return err; +} + +static u32 skge_get_msglevel(struct net_device *netdev) +{ + struct skge_port *skge = netdev_priv(netdev); + return skge->msg_enable; +} + +static void skge_set_msglevel(struct net_device *netdev, u32 value) +{ + struct skge_port *skge = netdev_priv(netdev); + skge->msg_enable = value; +} + +static int skge_nway_reset(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) + return -EINVAL; + + skge_phy_reset(skge); + return 0; +} + +static void skge_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + + ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) || + (skge->flow_control == FLOW_MODE_SYM_OR_REM)); + ecmd->tx_pause = (ecmd->rx_pause || + (skge->flow_control == FLOW_MODE_LOC_SEND)); + + ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; +} + +static int skge_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct ethtool_pauseparam old; + int err = 0; + + skge_get_pauseparam(dev, &old); + + if (ecmd->autoneg != old.autoneg) + skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; + else { + if (ecmd->rx_pause && ecmd->tx_pause) + skge->flow_control = FLOW_MODE_SYMMETRIC; + else if (ecmd->rx_pause && !ecmd->tx_pause) + skge->flow_control = FLOW_MODE_SYM_OR_REM; + else if (!ecmd->rx_pause && ecmd->tx_pause) + skge->flow_control = FLOW_MODE_LOC_SEND; + else + skge->flow_control = FLOW_MODE_NONE; + } + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) { + dev_close(dev); + return err; + } + } + + return 0; +} + +/* Chip internal frequency for clock calculations */ +static inline u32 hwkhz(const struct skge_hw *hw) +{ + return is_genesis(hw) ? 53125 : 78125; +} + +/* Chip HZ to microseconds */ +static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) +{ + return (ticks * 1000) / hwkhz(hw); +} + +/* Microseconds to chip HZ */ +static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) +{ + return hwkhz(hw) * usec / 1000; +} + +static int skge_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + + ecmd->rx_coalesce_usecs = 0; + ecmd->tx_coalesce_usecs = 0; + + if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) { + u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI)); + u32 msk = skge_read32(hw, B2_IRQM_MSK); + + if (msk & rxirqmask[port]) + ecmd->rx_coalesce_usecs = delay; + if (msk & txirqmask[port]) + ecmd->tx_coalesce_usecs = delay; + } + + return 0; +} + +/* Note: interrupt timer is per board, but can turn on/off per port */ +static int skge_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u32 msk = skge_read32(hw, B2_IRQM_MSK); + u32 delay = 25; + + if (ecmd->rx_coalesce_usecs == 0) + msk &= ~rxirqmask[port]; + else if (ecmd->rx_coalesce_usecs < 25 || + ecmd->rx_coalesce_usecs > 33333) + return -EINVAL; + else { + msk |= rxirqmask[port]; + delay = ecmd->rx_coalesce_usecs; + } + + if (ecmd->tx_coalesce_usecs == 0) + msk &= ~txirqmask[port]; + else if (ecmd->tx_coalesce_usecs < 25 || + ecmd->tx_coalesce_usecs > 33333) + return -EINVAL; + else { + msk |= txirqmask[port]; + delay = min(delay, ecmd->rx_coalesce_usecs); + } + + skge_write32(hw, B2_IRQM_MSK, msk); + if (msk == 0) + skge_write32(hw, B2_IRQM_CTRL, TIM_STOP); + else { + skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay)); + skge_write32(hw, B2_IRQM_CTRL, TIM_START); + } + return 0; +} + +enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST }; +static void skge_led(struct skge_port *skge, enum led_mode mode) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) { + switch (mode) { + case LED_MODE_OFF: + if (hw->phy_type == SK_PHY_BCOM) + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF); + else { + skge_write32(hw, SK_REG(port, TX_LED_VAL), 0); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF); + } + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); + skge_write32(hw, SK_REG(port, RX_LED_VAL), 0); + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF); + break; + + case LED_MODE_ON: + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); + + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); + + break; + + case LED_MODE_TST: + skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON); + skge_write32(hw, SK_REG(port, RX_LED_VAL), 100); + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); + + if (hw->phy_type == SK_PHY_BCOM) + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON); + else { + skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON); + skge_write32(hw, SK_REG(port, TX_LED_VAL), 100); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); + } + + } + } else { + switch (mode) { + case LED_MODE_OFF: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_DUP(MO_LED_OFF) | + PHY_M_LED_MO_10(MO_LED_OFF) | + PHY_M_LED_MO_100(MO_LED_OFF) | + PHY_M_LED_MO_1000(MO_LED_OFF) | + PHY_M_LED_MO_RX(MO_LED_OFF)); + break; + case LED_MODE_ON: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, + PHY_M_LED_PULS_DUR(PULS_170MS) | + PHY_M_LED_BLINK_RT(BLINK_84MS) | + PHY_M_LEDC_TX_CTRL | + PHY_M_LEDC_DP_CTRL); + + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_RX(MO_LED_OFF) | + (skge->speed == SPEED_100 ? + PHY_M_LED_MO_100(MO_LED_ON) : 0)); + break; + case LED_MODE_TST: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_DUP(MO_LED_ON) | + PHY_M_LED_MO_10(MO_LED_ON) | + PHY_M_LED_MO_100(MO_LED_ON) | + PHY_M_LED_MO_1000(MO_LED_ON) | + PHY_M_LED_MO_RX(MO_LED_ON)); + } + } + spin_unlock_bh(&hw->phy_lock); +} + +/* blink LED's for finding board */ +static int skge_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct skge_port *skge = netdev_priv(dev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; /* cycle on/off twice per second */ + + case ETHTOOL_ID_ON: + skge_led(skge, LED_MODE_TST); + break; + + case ETHTOOL_ID_OFF: + skge_led(skge, LED_MODE_OFF); + break; + + case ETHTOOL_ID_INACTIVE: + /* back to regular LED state */ + skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF); + } + + return 0; +} + +static int skge_get_eeprom_len(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + u32 reg2; + + pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, ®2); + return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); +} + +static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) +{ + u32 val; + + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); + + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (!(offset & PCI_VPD_ADDR_F)); + + pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); + return val; +} + +static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) +{ + pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val); + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, + offset | PCI_VPD_ADDR_F); + + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (offset & PCI_VPD_ADDR_F); +} + +static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct skge_port *skge = netdev_priv(dev); + struct pci_dev *pdev = skge->hw->pdev; + int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); + int length = eeprom->len; + u16 offset = eeprom->offset; + + if (!cap) + return -EINVAL; + + eeprom->magic = SKGE_EEPROM_MAGIC; + + while (length > 0) { + u32 val = skge_vpd_read(pdev, cap, offset); + int n = min_t(int, length, sizeof(val)); + + memcpy(data, &val, n); + length -= n; + data += n; + offset += n; + } + return 0; +} + +static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct skge_port *skge = netdev_priv(dev); + struct pci_dev *pdev = skge->hw->pdev; + int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); + int length = eeprom->len; + u16 offset = eeprom->offset; + + if (!cap) + return -EINVAL; + + if (eeprom->magic != SKGE_EEPROM_MAGIC) + return -EINVAL; + + while (length > 0) { + u32 val; + int n = min_t(int, length, sizeof(val)); + + if (n < sizeof(val)) + val = skge_vpd_read(pdev, cap, offset); + memcpy(&val, data, n); + + skge_vpd_write(pdev, cap, offset, val); + + length -= n; + data += n; + offset += n; + } + return 0; +} + +static const struct ethtool_ops skge_ethtool_ops = { + .get_settings = skge_get_settings, + .set_settings = skge_set_settings, + .get_drvinfo = skge_get_drvinfo, + .get_regs_len = skge_get_regs_len, + .get_regs = skge_get_regs, + .get_wol = skge_get_wol, + .set_wol = skge_set_wol, + .get_msglevel = skge_get_msglevel, + .set_msglevel = skge_set_msglevel, + .nway_reset = skge_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = skge_get_eeprom_len, + .get_eeprom = skge_get_eeprom, + .set_eeprom = skge_set_eeprom, + .get_ringparam = skge_get_ring_param, + .set_ringparam = skge_set_ring_param, + .get_pauseparam = skge_get_pauseparam, + .set_pauseparam = skge_set_pauseparam, + .get_coalesce = skge_get_coalesce, + .set_coalesce = skge_set_coalesce, + .get_strings = skge_get_strings, + .set_phys_id = skge_set_phys_id, + .get_sset_count = skge_get_sset_count, + .get_ethtool_stats = skge_get_ethtool_stats, +}; + +/* + * Allocate ring elements and chain them together + * One-to-one association of board descriptors with ring elements + */ +static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) +{ + struct skge_tx_desc *d; + struct skge_element *e; + int i; + + ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); + if (!ring->start) + return -ENOMEM; + + for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { + e->desc = d; + if (i == ring->count - 1) { + e->next = ring->start; + d->next_offset = base; + } else { + e->next = e + 1; + d->next_offset = base + (i+1) * sizeof(*d); + } + } + ring->to_use = ring->to_clean = ring->start; + + return 0; +} + +/* Allocate and setup a new buffer for receiving */ +static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, + struct sk_buff *skb, unsigned int bufsize) +{ + struct skge_rx_desc *rd = e->desc; + u64 map; + + map = pci_map_single(skge->hw->pdev, skb->data, bufsize, + PCI_DMA_FROMDEVICE); + + rd->dma_lo = map; + rd->dma_hi = map >> 32; + e->skb = skb; + rd->csum1_start = ETH_HLEN; + rd->csum2_start = ETH_HLEN; + rd->csum1 = 0; + rd->csum2 = 0; + + wmb(); + + rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, bufsize); +} + +/* Resume receiving using existing skb, + * Note: DMA address is not changed by chip. + * MTU not changed while receiver active. + */ +static inline void skge_rx_reuse(struct skge_element *e, unsigned int size) +{ + struct skge_rx_desc *rd = e->desc; + + rd->csum2 = 0; + rd->csum2_start = ETH_HLEN; + + wmb(); + + rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; +} + + +/* Free all buffers in receive ring, assumes receiver stopped */ +static void skge_rx_clean(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + + e = ring->start; + do { + struct skge_rx_desc *rd = e->desc; + rd->control = 0; + if (e->skb) { + pci_unmap_single(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(e->skb); + e->skb = NULL; + } + } while ((e = e->next) != ring->start); +} + + +/* Allocate buffers for receive ring + * For receive: to_clean is next received frame. + */ +static int skge_rx_fill(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + + e = ring->start; + do { + struct sk_buff *skb; + + skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, + GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, NET_IP_ALIGN); + skge_rx_setup(skge, e, skb, skge->rx_buf_size); + } while ((e = e->next) != ring->start); + + ring->to_clean = ring->start; + return 0; +} + +static const char *skge_pause(enum pause_status status) +{ + switch (status) { + case FLOW_STAT_NONE: + return "none"; + case FLOW_STAT_REM_SEND: + return "rx only"; + case FLOW_STAT_LOC_SEND: + return "tx_only"; + case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */ + return "both"; + default: + return "indeterminated"; + } +} + + +static void skge_link_up(struct skge_port *skge) +{ + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), + LED_BLK_OFF|LED_SYNC_OFF|LED_ON); + + netif_carrier_on(skge->netdev); + netif_wake_queue(skge->netdev); + + netif_info(skge, link, skge->netdev, + "Link is up at %d Mbps, %s duplex, flow control %s\n", + skge->speed, + skge->duplex == DUPLEX_FULL ? "full" : "half", + skge_pause(skge->flow_status)); +} + +static void skge_link_down(struct skge_port *skge) +{ + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); + netif_carrier_off(skge->netdev); + netif_stop_queue(skge->netdev); + + netif_info(skge, link, skge->netdev, "Link is down\n"); +} + +static void xm_link_down(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + + xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); + + if (netif_carrier_ok(dev)) + skge_link_down(skge); +} + +static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) +{ + int i; + + xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); + *val = xm_read16(hw, port, XM_PHY_DATA); + + if (hw->phy_type == SK_PHY_XMAC) + goto ready; + + for (i = 0; i < PHY_RETRIES; i++) { + if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) + goto ready; + udelay(1); + } + + return -ETIMEDOUT; + ready: + *val = xm_read16(hw, port, XM_PHY_DATA); + + return 0; +} + +static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) +{ + u16 v = 0; + if (__xm_phy_read(hw, port, reg, &v)) + pr_warning("%s: phy read timed out\n", hw->dev[port]->name); + return v; +} + +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +{ + int i; + + xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); + for (i = 0; i < PHY_RETRIES; i++) { + if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) + goto ready; + udelay(1); + } + return -EIO; + + ready: + xm_write16(hw, port, XM_PHY_DATA, val); + for (i = 0; i < PHY_RETRIES; i++) { + if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void genesis_init(struct skge_hw *hw) +{ + /* set blink source counter */ + skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100); + skge_write8(hw, B2_BSC_CTRL, BSC_START); + + /* configure mac arbiter */ + skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); + + /* configure mac arbiter timeout values */ + skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53); + + skge_write8(hw, B3_MA_RCINI_RX1, 0); + skge_write8(hw, B3_MA_RCINI_RX2, 0); + skge_write8(hw, B3_MA_RCINI_TX1, 0); + skge_write8(hw, B3_MA_RCINI_TX2, 0); + + /* configure packet arbiter timeout */ + skge_write16(hw, B3_PA_CTRL, PA_RST_CLR); + skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX); +} + +static void genesis_reset(struct skge_hw *hw, int port) +{ + static const u8 zero[8] = { 0 }; + u32 reg; + + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); + + /* reset the statistics module */ + xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); + xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); + xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ + xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ + xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ + + /* disable Broadcom PHY IRQ */ + if (hw->phy_type == SK_PHY_BCOM) + xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); + + xm_outhash(hw, port, XM_HSM, zero); + + /* Flush TX and RX fifo */ + reg = xm_read32(hw, port, XM_MODE); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); +} + +/* Convert mode to MII values */ +static const u16 phy_pause_map[] = { + [FLOW_MODE_NONE] = 0, + [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, + [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, + [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, +}; + +/* special defines for FIBER (88E1011S only) */ +static const u16 fiber_pause_map[] = { + [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE, + [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD, + [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD, + [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD, +}; + + +/* Check status of Broadcom phy link */ +static void bcom_check_link(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u16 status; + + /* read twice because of latch */ + xm_phy_read(hw, port, PHY_BCOM_STAT); + status = xm_phy_read(hw, port, PHY_BCOM_STAT); + + if ((status & PHY_ST_LSYNC) == 0) { + xm_link_down(hw, port); + return; + } + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 lpa, aux; + + if (!(status & PHY_ST_AN_OVER)) + return; + + lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); + if (lpa & PHY_B_AN_RF) { + netdev_notice(dev, "remote fault\n"); + return; + } + + aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); + + /* Check Duplex mismatch */ + switch (aux & PHY_B_AS_AN_RES_MSK) { + case PHY_B_RES_1000FD: + skge->duplex = DUPLEX_FULL; + break; + case PHY_B_RES_1000HD: + skge->duplex = DUPLEX_HALF; + break; + default: + netdev_notice(dev, "duplex mismatch\n"); + return; + } + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + switch (aux & PHY_B_AS_PAUSE_MSK) { + case PHY_B_AS_PAUSE_MSK: + skge->flow_status = FLOW_STAT_SYMMETRIC; + break; + case PHY_B_AS_PRR: + skge->flow_status = FLOW_STAT_REM_SEND; + break; + case PHY_B_AS_PRT: + skge->flow_status = FLOW_STAT_LOC_SEND; + break; + default: + skge->flow_status = FLOW_STAT_NONE; + } + skge->speed = SPEED_1000; + } + + if (!netif_carrier_ok(dev)) + genesis_link_up(skge); +} + +/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional + * Phy on for 100 or 10Mbit operation + */ +static void bcom_phy_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + u16 id1, r, ext, ctl; + + /* magic workaround patterns for Broadcom */ + static const struct { + u16 reg; + u16 val; + } A1hack[] = { + { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, + { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, + { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, + { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, + }, C0hack[] = { + { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, + { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, + }; + + /* read Id from external PHY (all have the same address) */ + id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); + + /* Optimize MDIO transfer by suppressing preamble. */ + r = xm_read16(hw, port, XM_MMU_CMD); + r |= XM_MMU_NO_PRE; + xm_write16(hw, port, XM_MMU_CMD, r); + + switch (id1) { + case PHY_BCOM_ID1_C0: + /* + * Workaround BCOM Errata for the C0 type. + * Write magic patterns to reserved registers. + */ + for (i = 0; i < ARRAY_SIZE(C0hack); i++) + xm_phy_write(hw, port, + C0hack[i].reg, C0hack[i].val); + + break; + case PHY_BCOM_ID1_A1: + /* + * Workaround BCOM Errata for the A1 type. + * Write magic patterns to reserved registers. + */ + for (i = 0; i < ARRAY_SIZE(A1hack); i++) + xm_phy_write(hw, port, + A1hack[i].reg, A1hack[i].val); + break; + } + + /* + * Workaround BCOM Errata (#10523) for all BCom PHYs. + * Disable Power Management after reset. + */ + r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); + r |= PHY_B_AC_DIS_PM; + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r); + + /* Dummy read */ + xm_read16(hw, port, XM_ISRC); + + ext = PHY_B_PEC_EN_LTR; /* enable tx led */ + ctl = PHY_CT_SP1000; /* always 1000mbit */ + + if (skge->autoneg == AUTONEG_ENABLE) { + /* + * Workaround BCOM Errata #1 for the C5 type. + * 1000Base-T Link Acquisition Failure in Slave Mode + * Set Repeater/DTE bit 10 of the 1000Base-T Control Register + */ + u16 adv = PHY_B_1000C_RD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + adv |= PHY_B_1000C_AHD; + if (skge->advertising & ADVERTISED_1000baseT_Full) + adv |= PHY_B_1000C_AFD; + xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv); + + ctl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + if (skge->duplex == DUPLEX_FULL) + ctl |= PHY_CT_DUP_MD; + /* Force to slave */ + xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE); + } + + /* Set autonegotiation pause parameters */ + xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, + phy_pause_map[skge->flow_control] | PHY_AN_CSMA); + + /* Handle Jumbo frames */ + if (hw->dev[port]->mtu > ETH_DATA_LEN) { + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, + PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK); + + ext |= PHY_B_PEC_HIGH_LA; + + } + + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); + xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); + + /* Use link status change interrupt */ + xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); +} + +static void xm_phy_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl = 0; + + if (skge->autoneg == AUTONEG_ENABLE) { + if (skge->advertising & ADVERTISED_1000baseT_Half) + ctrl |= PHY_X_AN_HD; + if (skge->advertising & ADVERTISED_1000baseT_Full) + ctrl |= PHY_X_AN_FD; + + ctrl |= fiber_pause_map[skge->flow_control]; + + xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); + + /* Restart Auto-negotiation */ + ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + /* Set DuplexMode in Config register */ + if (skge->duplex == DUPLEX_FULL) + ctrl |= PHY_CT_DUP_MD; + /* + * Do NOT enable Auto-negotiation here. This would hold + * the link down because no IDLEs are transmitted + */ + } + + xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); + + /* Poll PHY for status changes */ + mod_timer(&skge->link_timer, jiffies + LINK_HZ); +} + +static int xm_check_link(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 status; + + /* read twice because of latch */ + xm_phy_read(hw, port, PHY_XMAC_STAT); + status = xm_phy_read(hw, port, PHY_XMAC_STAT); + + if ((status & PHY_ST_LSYNC) == 0) { + xm_link_down(hw, port); + return 0; + } + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 lpa, res; + + if (!(status & PHY_ST_AN_OVER)) + return 0; + + lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); + if (lpa & PHY_B_AN_RF) { + netdev_notice(dev, "remote fault\n"); + return 0; + } + + res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); + + /* Check Duplex mismatch */ + switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) { + case PHY_X_RS_FD: + skge->duplex = DUPLEX_FULL; + break; + case PHY_X_RS_HD: + skge->duplex = DUPLEX_HALF; + break; + default: + netdev_notice(dev, "duplex mismatch\n"); + return 0; + } + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + if ((skge->flow_control == FLOW_MODE_SYMMETRIC || + skge->flow_control == FLOW_MODE_SYM_OR_REM) && + (lpa & PHY_X_P_SYM_MD)) + skge->flow_status = FLOW_STAT_SYMMETRIC; + else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && + (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) + /* Enable PAUSE receive, disable PAUSE transmit */ + skge->flow_status = FLOW_STAT_REM_SEND; + else if (skge->flow_control == FLOW_MODE_LOC_SEND && + (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) + /* Disable PAUSE receive, enable PAUSE transmit */ + skge->flow_status = FLOW_STAT_LOC_SEND; + else + skge->flow_status = FLOW_STAT_NONE; + + skge->speed = SPEED_1000; + } + + if (!netif_carrier_ok(dev)) + genesis_link_up(skge); + return 1; +} + +/* Poll to check for link coming up. + * + * Since internal PHY is wired to a level triggered pin, can't + * get an interrupt when carrier is detected, need to poll for + * link coming up. + */ +static void xm_link_timer(unsigned long arg) +{ + struct skge_port *skge = (struct skge_port *) arg; + struct net_device *dev = skge->netdev; + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + unsigned long flags; + + if (!netif_running(dev)) + return; + + spin_lock_irqsave(&hw->phy_lock, flags); + + /* + * Verify that the link by checking GPIO register three times. + * This pin has the signal from the link_sync pin connected to it. + */ + for (i = 0; i < 3; i++) { + if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) + goto link_down; + } + + /* Re-enable interrupt to detect link down */ + if (xm_check_link(dev)) { + u16 msk = xm_read16(hw, port, XM_IMSK); + msk &= ~XM_IS_INP_ASS; + xm_write16(hw, port, XM_IMSK, msk); + xm_read16(hw, port, XM_ISRC); + } else { +link_down: + mod_timer(&skge->link_timer, + round_jiffies(jiffies + LINK_HZ)); + } + spin_unlock_irqrestore(&hw->phy_lock, flags); +} + +static void genesis_mac_init(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; + int i; + u32 r; + static const u8 zero[6] = { 0 }; + + for (i = 0; i < 10; i++) { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), + MFF_SET_MAC_RST); + if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) + goto reset_ok; + udelay(1); + } + + netdev_warn(dev, "genesis reset failed\n"); + + reset_ok: + /* Unreset the XMAC. */ + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + + /* + * Perform additional initialization for external PHYs, + * namely for the 1000baseTX cards that use the XMAC's + * GMII mode. + */ + if (hw->phy_type != SK_PHY_XMAC) { + /* Take external Phy out of reset */ + r = skge_read32(hw, B2_GP_IO); + if (port == 0) + r |= GP_DIR_0|GP_IO_0; + else + r |= GP_DIR_2|GP_IO_2; + + skge_write32(hw, B2_GP_IO, r); + + /* Enable GMII interface */ + xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); + } + + + switch (hw->phy_type) { + case SK_PHY_XMAC: + xm_phy_init(skge); + break; + case SK_PHY_BCOM: + bcom_phy_init(skge); + bcom_check_link(hw, port); + } + + /* Set Station Address */ + xm_outaddr(hw, port, XM_SA, dev->dev_addr); + + /* We don't use match addresses so clear */ + for (i = 1; i < 16; i++) + xm_outaddr(hw, port, XM_EXM(i), zero); + + /* Clear MIB counters */ + xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + /* Clear two times according to Errata #3 */ + xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + + /* configure Rx High Water Mark (XM_RX_HI_WM) */ + xm_write16(hw, port, XM_RX_HI_WM, 1450); + + /* We don't need the FCS appended to the packet. */ + r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS; + if (jumbo) + r |= XM_RX_BIG_PK_OK; + + if (skge->duplex == DUPLEX_HALF) { + /* + * If in manual half duplex mode the other side might be in + * full duplex mode, so ignore if a carrier extension is not seen + * on frames received + */ + r |= XM_RX_DIS_CEXT; + } + xm_write16(hw, port, XM_RX_CMD, r); + + /* We want short frames padded to 60 bytes. */ + xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); + + /* Increase threshold for jumbo frames on dual port */ + if (hw->ports > 1 && jumbo) + xm_write16(hw, port, XM_TX_THR, 1020); + else + xm_write16(hw, port, XM_TX_THR, 512); + + /* + * Enable the reception of all error frames. This is is + * a necessary evil due to the design of the XMAC. The + * XMAC's receive FIFO is only 8K in size, however jumbo + * frames can be up to 9000 bytes in length. When bad + * frame filtering is enabled, the XMAC's RX FIFO operates + * in 'store and forward' mode. For this to work, the + * entire frame has to fit into the FIFO, but that means + * that jumbo frames larger than 8192 bytes will be + * truncated. Disabling all bad frame filtering causes + * the RX FIFO to operate in streaming mode, in which + * case the XMAC will start transferring frames out of the + * RX FIFO as soon as the FIFO threshold is reached. + */ + xm_write32(hw, port, XM_MODE, XM_DEF_MODE); + + + /* + * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK) + * - Enable all bits excepting 'Octets Rx OK Low CntOv' + * and 'Octets Rx OK Hi Cnt Ov'. + */ + xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK); + + /* + * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK) + * - Enable all bits excepting 'Octets Tx OK Low CntOv' + * and 'Octets Tx OK Hi Cnt Ov'. + */ + xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK); + + /* Configure MAC arbiter */ + skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); + + /* configure timeout values */ + skge_write8(hw, B3_MA_TOINI_RX1, 72); + skge_write8(hw, B3_MA_TOINI_RX2, 72); + skge_write8(hw, B3_MA_TOINI_TX1, 72); + skge_write8(hw, B3_MA_TOINI_TX2, 72); + + skge_write8(hw, B3_MA_RCINI_RX1, 0); + skge_write8(hw, B3_MA_RCINI_RX2, 0); + skge_write8(hw, B3_MA_RCINI_TX1, 0); + skge_write8(hw, B3_MA_RCINI_TX2, 0); + + /* Configure Rx MAC FIFO */ + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); + + /* Configure Tx MAC FIFO */ + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); + + if (jumbo) { + /* Enable frame flushing if jumbo frames used */ + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH); + } else { + /* enable timeout timers if normal frames */ + skge_write16(hw, B3_PA_CTRL, + (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); + } +} + +static void genesis_stop(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + unsigned retries = 1000; + u16 cmd; + + /* Disable Tx and Rx */ + cmd = xm_read16(hw, port, XM_MMU_CMD); + cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); + xm_write16(hw, port, XM_MMU_CMD, cmd); + + genesis_reset(hw, port); + + /* Clear Tx packet arbiter timeout IRQ */ + skge_write16(hw, B3_PA_CTRL, + port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); + + /* Reset the MAC */ + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + do { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) + break; + } while (--retries > 0); + + /* For external PHYs there must be special handling */ + if (hw->phy_type != SK_PHY_XMAC) { + u32 reg = skge_read32(hw, B2_GP_IO); + if (port == 0) { + reg |= GP_DIR_0; + reg &= ~GP_IO_0; + } else { + reg |= GP_DIR_2; + reg &= ~GP_IO_2; + } + skge_write32(hw, B2_GP_IO, reg); + skge_read32(hw, B2_GP_IO); + } + + xm_write16(hw, port, XM_MMU_CMD, + xm_read16(hw, port, XM_MMU_CMD) + & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); + + xm_read16(hw, port, XM_MMU_CMD); +} + + +static void genesis_get_stats(struct skge_port *skge, u64 *data) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + unsigned long timeout = jiffies + HZ; + + xm_write16(hw, port, + XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); + + /* wait for update to complete */ + while (xm_read16(hw, port, XM_STAT_CMD) + & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { + if (time_after(jiffies, timeout)) + break; + udelay(10); + } + + /* special case for 64 bit octet counter */ + data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32 + | xm_read32(hw, port, XM_TXO_OK_LO); + data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32 + | xm_read32(hw, port, XM_RXO_OK_LO); + + for (i = 2; i < ARRAY_SIZE(skge_stats); i++) + data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset); +} + +static void genesis_mac_intr(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u16 status = xm_read16(hw, port, XM_ISRC); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "mac interrupt status 0x%x\n", status); + + if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { + xm_link_down(hw, port); + mod_timer(&skge->link_timer, jiffies + 1); + } + + if (status & XM_IS_TXF_UR) { + xm_write32(hw, port, XM_MODE, XM_MD_FTF); + ++dev->stats.tx_fifo_errors; + } +} + +static void genesis_link_up(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 cmd, msk; + u32 mode; + + cmd = xm_read16(hw, port, XM_MMU_CMD); + + /* + * enabling pause frame reception is required for 1000BT + * because the XMAC is not reset if the link is going down + */ + if (skge->flow_status == FLOW_STAT_NONE || + skge->flow_status == FLOW_STAT_LOC_SEND) + /* Disable Pause Frame Reception */ + cmd |= XM_MMU_IGN_PF; + else + /* Enable Pause Frame Reception */ + cmd &= ~XM_MMU_IGN_PF; + + xm_write16(hw, port, XM_MMU_CMD, cmd); + + mode = xm_read32(hw, port, XM_MODE); + if (skge->flow_status == FLOW_STAT_SYMMETRIC || + skge->flow_status == FLOW_STAT_LOC_SEND) { + /* + * Configure Pause Frame Generation + * Use internal and external Pause Frame Generation. + * Sending pause frames is edge triggered. + * Send a Pause frame with the maximum pause time if + * internal oder external FIFO full condition occurs. + * Send a zero pause time frame to re-start transmission. + */ + /* XM_PAUSE_DA = '010000C28001' (default) */ + /* XM_MAC_PTIME = 0xffff (maximum) */ + /* remember this value is defined in big endian (!) */ + xm_write16(hw, port, XM_MAC_PTIME, 0xffff); + + mode |= XM_PAUSE_MODE; + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); + } else { + /* + * disable pause frame generation is required for 1000BT + * because the XMAC is not reset if the link is going down + */ + /* Disable Pause Mode in Mode Register */ + mode &= ~XM_PAUSE_MODE; + + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); + } + + xm_write32(hw, port, XM_MODE, mode); + + /* Turn on detection of Tx underrun */ + msk = xm_read16(hw, port, XM_IMSK); + msk &= ~XM_IS_TXF_UR; + xm_write16(hw, port, XM_IMSK, msk); + + xm_read16(hw, port, XM_ISRC); + + /* get MMU Command Reg. */ + cmd = xm_read16(hw, port, XM_MMU_CMD); + if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) + cmd |= XM_MMU_GMII_FD; + + /* + * Workaround BCOM Errata (#10523) for all BCom Phys + * Enable Power Management after link up + */ + if (hw->phy_type == SK_PHY_BCOM) { + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, + xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) + & ~PHY_B_AC_DIS_PM); + xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); + } + + /* enable Rx/Tx */ + xm_write16(hw, port, XM_MMU_CMD, + cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); + skge_link_up(skge); +} + + +static inline void bcom_phy_intr(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 isrc; + + isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "phy interrupt status 0x%x\n", isrc); + + if (isrc & PHY_B_IS_PSE) + pr_err("%s: uncorrectable pair swap error\n", + hw->dev[port]->name); + + /* Workaround BCom Errata: + * enable and disable loopback mode if "NO HCD" occurs. + */ + if (isrc & PHY_B_IS_NO_HDCL) { + u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL); + xm_phy_write(hw, port, PHY_BCOM_CTRL, + ctrl | PHY_CT_LOOP); + xm_phy_write(hw, port, PHY_BCOM_CTRL, + ctrl & ~PHY_CT_LOOP); + } + + if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) + bcom_check_link(hw, port); + +} + +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +{ + int i; + + gma_write16(hw, port, GM_SMI_DATA, val); + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); + for (i = 0; i < PHY_RETRIES; i++) { + udelay(1); + + if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) + return 0; + } + + pr_warning("%s: phy write timeout\n", hw->dev[port]->name); + return -EIO; +} + +static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) +{ + int i; + + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(hw->phy_addr) + | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); + + for (i = 0; i < PHY_RETRIES; i++) { + udelay(1); + if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) + goto ready; + } + + return -ETIMEDOUT; + ready: + *val = gma_read16(hw, port, GM_SMI_DATA); + return 0; +} + +static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) +{ + u16 v = 0; + if (__gm_phy_read(hw, port, reg, &v)) + pr_warning("%s: phy read timeout\n", hw->dev[port]->name); + return v; +} + +/* Marvell Phy Initialization */ +static void yukon_init(struct skge_hw *hw, int port) +{ + struct skge_port *skge = netdev_priv(hw->dev[port]); + u16 ctrl, ct1000, adv; + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); + + ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | + PHY_M_EC_MAC_S_MSK); + ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); + + ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); + + gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); + } + + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + if (skge->autoneg == AUTONEG_DISABLE) + ctrl &= ~PHY_CT_ANE; + + ctrl |= PHY_CT_RESET; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + ctrl = 0; + ct1000 = 0; + adv = PHY_AN_CSMA; + + if (skge->autoneg == AUTONEG_ENABLE) { + if (hw->copper) { + if (skge->advertising & ADVERTISED_1000baseT_Full) + ct1000 |= PHY_M_1000C_AFD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + ct1000 |= PHY_M_1000C_AHD; + if (skge->advertising & ADVERTISED_100baseT_Full) + adv |= PHY_M_AN_100_FD; + if (skge->advertising & ADVERTISED_100baseT_Half) + adv |= PHY_M_AN_100_HD; + if (skge->advertising & ADVERTISED_10baseT_Full) + adv |= PHY_M_AN_10_FD; + if (skge->advertising & ADVERTISED_10baseT_Half) + adv |= PHY_M_AN_10_HD; + + /* Set Flow-control capabilities */ + adv |= phy_pause_map[skge->flow_control]; + } else { + if (skge->advertising & ADVERTISED_1000baseT_Full) + adv |= PHY_M_AN_1000X_AFD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + adv |= PHY_M_AN_1000X_AHD; + + adv |= fiber_pause_map[skge->flow_control]; + } + + /* Restart Auto-negotiation */ + ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + /* forced speed/duplex settings */ + ct1000 = PHY_M_1000C_MSE; + + if (skge->duplex == DUPLEX_FULL) + ctrl |= PHY_CT_DUP_MD; + + switch (skge->speed) { + case SPEED_1000: + ctrl |= PHY_CT_SP1000; + break; + case SPEED_100: + ctrl |= PHY_CT_SP100; + break; + } + + ctrl |= PHY_CT_RESET; + } + + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); + + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* Enable phy interrupt on autonegotiation complete (or link up) */ + if (skge->autoneg == AUTONEG_ENABLE) + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK); + else + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); +} + +static void yukon_reset(struct skge_hw *hw, int port) +{ + gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ + gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ + gma_write16(hw, port, GM_MC_ADDR_H2, 0); + gma_write16(hw, port, GM_MC_ADDR_H3, 0); + gma_write16(hw, port, GM_MC_ADDR_H4, 0); + + gma_write16(hw, port, GM_RX_CTRL, + gma_read16(hw, port, GM_RX_CTRL) + | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); +} + +/* Apparently, early versions of Yukon-Lite had wrong chip_id? */ +static int is_yukon_lite_a0(struct skge_hw *hw) +{ + u32 reg; + int ret; + + if (hw->chip_id != CHIP_ID_YUKON) + return 0; + + reg = skge_read32(hw, B2_FAR); + skge_write8(hw, B2_FAR + 3, 0xff); + ret = (skge_read8(hw, B2_FAR + 3) != 0); + skge_write32(hw, B2_FAR, reg); + return ret; +} + +static void yukon_mac_init(struct skge_hw *hw, int port) +{ + struct skge_port *skge = netdev_priv(hw->dev[port]); + int i; + u32 reg; + const u8 *addr = hw->dev[port]->dev_addr; + + /* WA code for COMA mode -- set PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9 | GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + /* hard reset */ + skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); + + /* WA code for COMA mode -- clear PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9; + reg &= ~GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + /* Set hardware config mode */ + reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | + GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; + reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; + + /* Clear GMC reset */ + skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); + skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); + + if (skge->autoneg == AUTONEG_DISABLE) { + reg = GM_GPCR_AU_ALL_DIS; + gma_write16(hw, port, GM_GP_CTRL, + gma_read16(hw, port, GM_GP_CTRL) | reg); + + switch (skge->speed) { + case SPEED_1000: + reg &= ~GM_GPCR_SPEED_100; + reg |= GM_GPCR_SPEED_1000; + break; + case SPEED_100: + reg &= ~GM_GPCR_SPEED_1000; + reg |= GM_GPCR_SPEED_100; + break; + case SPEED_10: + reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); + break; + } + + if (skge->duplex == DUPLEX_FULL) + reg |= GM_GPCR_DUP_FULL; + } else + reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; + + switch (skge->flow_control) { + case FLOW_MODE_NONE: + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case FLOW_MODE_LOC_SEND: + /* disable Rx flow-control */ + reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case FLOW_MODE_SYMMETRIC: + case FLOW_MODE_SYM_OR_REM: + /* enable Tx & Rx flow-control */ + break; + } + + gma_write16(hw, port, GM_GP_CTRL, reg); + skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); + + yukon_init(hw, port); + + /* MIB clear */ + reg = gma_read16(hw, port, GM_PHY_ADDR); + gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); + + for (i = 0; i < GM_MIB_CNT_SIZE; i++) + gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); + gma_write16(hw, port, GM_PHY_ADDR, reg); + + /* transmit control */ + gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); + + /* receive control reg: unicast + multicast + no FCS */ + gma_write16(hw, port, GM_RX_CTRL, + GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); + + /* transmit flow control */ + gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); + + /* transmit parameter */ + gma_write16(hw, port, GM_TX_PARAM, + TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | + TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | + TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); + + /* configure the Serial Mode Register */ + reg = DATA_BLIND_VAL(DATA_BLIND_DEF) + | GM_SMOD_VLAN_ENA + | IPG_DATA_VAL(IPG_DATA_DEF); + + if (hw->dev[port]->mtu > ETH_DATA_LEN) + reg |= GM_SMOD_JUMBO_ENA; + + gma_write16(hw, port, GM_SERIAL_MODE, reg); + + /* physical address: used for pause frames */ + gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); + /* virtual address for data */ + gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); + + /* enable interrupt mask for counter overflows */ + gma_write16(hw, port, GM_TX_IRQ_MSK, 0); + gma_write16(hw, port, GM_RX_IRQ_MSK, 0); + gma_write16(hw, port, GM_TR_IRQ_MSK, 0); + + /* Initialize Mac Fifo */ + + /* Configure Rx MAC FIFO */ + skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); + reg = GMF_OPER_ON | GMF_RX_F_FL_ON; + + /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ + if (is_yukon_lite_a0(hw)) + reg &= ~GMF_RX_F_FL_ON; + + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); + skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); + /* + * because Pause Packet Truncation in GMAC is not working + * we have to increase the Flush Threshold to 64 bytes + * in order to flush pause packets in Rx FIFO on Yukon-1 + */ + skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); + + /* Configure Tx MAC FIFO */ + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); + skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); +} + +/* Go into power down mode */ +static void yukon_suspend(struct skge_hw *hw, int port) +{ + u16 ctrl; + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl |= PHY_M_PC_POL_R_DIS; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + ctrl |= PHY_CT_RESET; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* switch IEEE compatible power down mode on */ + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + ctrl |= PHY_CT_PDOWN; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); +} + +static void yukon_stop(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); + yukon_reset(hw, port); + + gma_write16(hw, port, GM_GP_CTRL, + gma_read16(hw, port, GM_GP_CTRL) + & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); + gma_read16(hw, port, GM_GP_CTRL); + + yukon_suspend(hw, port); + + /* set GPHY Control reset */ + skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); +} + +static void yukon_get_stats(struct skge_port *skge, u64 *data) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + + data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 + | gma_read32(hw, port, GM_TXO_OK_LO); + data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 + | gma_read32(hw, port, GM_RXO_OK_LO); + + for (i = 2; i < ARRAY_SIZE(skge_stats); i++) + data[i] = gma_read32(hw, port, + skge_stats[i].gma_offset); +} + +static void yukon_mac_intr(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "mac interrupt status 0x%x\n", status); + + if (status & GM_IS_RX_FF_OR) { + ++dev->stats.rx_fifo_errors; + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); + } + + if (status & GM_IS_TX_FF_UR) { + ++dev->stats.tx_fifo_errors; + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); + } + +} + +static u16 yukon_speed(const struct skge_hw *hw, u16 aux) +{ + switch (aux & PHY_M_PS_SPEED_MSK) { + case PHY_M_PS_SPEED_1000: + return SPEED_1000; + case PHY_M_PS_SPEED_100: + return SPEED_100; + default: + return SPEED_10; + } +} + +static void yukon_link_up(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 reg; + + /* Enable Transmit FIFO Underrun */ + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); + + reg = gma_read16(hw, port, GM_GP_CTRL); + if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) + reg |= GM_GPCR_DUP_FULL; + + /* enable Rx/Tx */ + reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; + gma_write16(hw, port, GM_GP_CTRL, reg); + + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); + skge_link_up(skge); +} + +static void yukon_link_down(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl; + + ctrl = gma_read16(hw, port, GM_GP_CTRL); + ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); + gma_write16(hw, port, GM_GP_CTRL, ctrl); + + if (skge->flow_status == FLOW_STAT_REM_SEND) { + ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); + ctrl |= PHY_M_AN_ASP; + /* restore Asymmetric Pause bit */ + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); + } + + skge_link_down(skge); + + yukon_init(hw, port); +} + +static void yukon_phy_intr(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + const char *reason = NULL; + u16 istatus, phystat; + + istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); + phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "phy interrupt status 0x%x 0x%x\n", istatus, phystat); + + if (istatus & PHY_M_IS_AN_COMPL) { + if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) + & PHY_M_AN_RF) { + reason = "remote fault"; + goto failed; + } + + if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { + reason = "master/slave fault"; + goto failed; + } + + if (!(phystat & PHY_M_PS_SPDUP_RES)) { + reason = "speed/duplex"; + goto failed; + } + + skge->duplex = (phystat & PHY_M_PS_FULL_DUP) + ? DUPLEX_FULL : DUPLEX_HALF; + skge->speed = yukon_speed(hw, phystat); + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + switch (phystat & PHY_M_PS_PAUSE_MSK) { + case PHY_M_PS_PAUSE_MSK: + skge->flow_status = FLOW_STAT_SYMMETRIC; + break; + case PHY_M_PS_RX_P_EN: + skge->flow_status = FLOW_STAT_REM_SEND; + break; + case PHY_M_PS_TX_P_EN: + skge->flow_status = FLOW_STAT_LOC_SEND; + break; + default: + skge->flow_status = FLOW_STAT_NONE; + } + + if (skge->flow_status == FLOW_STAT_NONE || + (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + else + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); + yukon_link_up(skge); + return; + } + + if (istatus & PHY_M_IS_LSP_CHANGE) + skge->speed = yukon_speed(hw, phystat); + + if (istatus & PHY_M_IS_DUP_CHANGE) + skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; + if (istatus & PHY_M_IS_LST_CHANGE) { + if (phystat & PHY_M_PS_LINK_UP) + yukon_link_up(skge); + else + yukon_link_down(skge); + } + return; + failed: + pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason); + + /* XXX restart autonegotiation? */ +} + +static void skge_phy_reset(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct net_device *dev = hw->dev[port]; + + netif_stop_queue(skge->netdev); + netif_carrier_off(skge->netdev); + + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) { + genesis_reset(hw, port); + genesis_mac_init(hw, port); + } else { + yukon_reset(hw, port); + yukon_init(hw, port); + } + spin_unlock_bh(&hw->phy_lock); + + skge_set_multicast(dev); +} + +/* Basic MII support */ +static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int err = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -ENODEV; /* Phy still in reset */ + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + + /* fallthru */ + case SIOCGMIIREG: { + u16 val = 0; + spin_lock_bh(&hw->phy_lock); + + if (is_genesis(hw)) + err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); + else + err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); + spin_unlock_bh(&hw->phy_lock); + data->val_out = val; + break; + } + + case SIOCSMIIREG: + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) + err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, + data->val_in); + else + err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, + data->val_in); + spin_unlock_bh(&hw->phy_lock); + break; + } + return err; +} + +static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) +{ + u32 end; + + start /= 8; + len /= 8; + end = start + len - 1; + + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); + skge_write32(hw, RB_ADDR(q, RB_START), start); + skge_write32(hw, RB_ADDR(q, RB_WP), start); + skge_write32(hw, RB_ADDR(q, RB_RP), start); + skge_write32(hw, RB_ADDR(q, RB_END), end); + + if (q == Q_R1 || q == Q_R2) { + /* Set thresholds on receive queue's */ + skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), + start + (2*len)/3); + skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), + start + (len/3)); + } else { + /* Enable store & forward on Tx queue's because + * Tx FIFO is only 4K on Genesis and 1K on Yukon + */ + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); + } + + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); +} + +/* Setup Bus Memory Interface */ +static void skge_qset(struct skge_port *skge, u16 q, + const struct skge_element *e) +{ + struct skge_hw *hw = skge->hw; + u32 watermark = 0x600; + u64 base = skge->dma + (e->desc - skge->mem); + + /* optimization to reduce window on 32bit/33mhz */ + if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0) + watermark /= 2; + + skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET); + skge_write32(hw, Q_ADDR(q, Q_F), watermark); + skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32)); + skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base); +} + +static int skge_up(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u32 chunk, ram_addr; + size_t rx_size, tx_size; + int err; + + if (!is_valid_ether_addr(dev->dev_addr)) + return -EINVAL; + + netif_info(skge, ifup, skge->netdev, "enabling interface\n"); + + if (dev->mtu > RX_BUF_SIZE) + skge->rx_buf_size = dev->mtu + ETH_HLEN; + else + skge->rx_buf_size = RX_BUF_SIZE; + + + rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); + tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); + skge->mem_size = tx_size + rx_size; + skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma); + if (!skge->mem) + return -ENOMEM; + + BUG_ON(skge->dma & 7); + + if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { + dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); + err = -EINVAL; + goto free_pci_mem; + } + + memset(skge->mem, 0, skge->mem_size); + + err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); + if (err) + goto free_pci_mem; + + err = skge_rx_fill(dev); + if (err) + goto free_rx_ring; + + err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, + skge->dma + rx_size); + if (err) + goto free_rx_ring; + + if (hw->ports == 1) { + err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED, + dev->name, hw); + if (err) { + netdev_err(dev, "Unable to allocate interrupt %d error: %d\n", + hw->pdev->irq, err); + goto free_tx_ring; + } + } + + /* Initialize MAC */ + netif_carrier_off(dev); + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) + genesis_mac_init(hw, port); + else + yukon_mac_init(hw, port); + spin_unlock_bh(&hw->phy_lock); + + /* Configure RAMbuffers - equally between ports and tx/rx */ + chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); + ram_addr = hw->ram_offset + 2 * chunk * port; + + skge_ramset(hw, rxqaddr[port], ram_addr, chunk); + skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); + + BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); + skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); + skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); + + /* Start receiver BMU */ + wmb(); + skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); + skge_led(skge, LED_MODE_ON); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask |= portmask[port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); + + napi_enable(&skge->napi); + + skge_set_multicast(dev); + + return 0; + + free_tx_ring: + kfree(skge->tx_ring.start); + free_rx_ring: + skge_rx_clean(skge); + kfree(skge->rx_ring.start); + free_pci_mem: + pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); + skge->mem = NULL; + + return err; +} + +/* stop receiver */ +static void skge_rx_stop(struct skge_hw *hw, int port) +{ + skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); + skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), + RB_RST_SET|RB_DIS_OP_MD); + skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); +} + +static int skge_down(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + + if (skge->mem == NULL) + return 0; + + netif_info(skge, ifdown, skge->netdev, "disabling interface\n"); + + netif_tx_disable(dev); + + if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) + del_timer_sync(&skge->link_timer); + + napi_disable(&skge->napi); + netif_carrier_off(dev); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask &= ~portmask[port]; + skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); + + if (hw->ports == 1) + free_irq(hw->pdev->irq, hw); + + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); + if (is_genesis(hw)) + genesis_stop(skge); + else + yukon_stop(skge); + + /* Stop transmitter */ + skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); + skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), + RB_RST_SET|RB_DIS_OP_MD); + + + /* Disable Force Sync bit and Enable Alloc bit */ + skge_write8(hw, SK_REG(port, TXA_CTRL), + TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); + + /* Stop Interval Timer and Limit Counter of Tx Arbiter */ + skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); + skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); + + /* Reset PCI FIFO */ + skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); + skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); + + /* Reset the RAM Buffer async Tx queue */ + skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); + + skge_rx_stop(hw, port); + + if (is_genesis(hw)) { + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET); + } else { + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); + } + + skge_led(skge, LED_MODE_OFF); + + netif_tx_lock_bh(dev); + skge_tx_clean(dev); + netif_tx_unlock_bh(dev); + + skge_rx_clean(skge); + + kfree(skge->rx_ring.start); + kfree(skge->tx_ring.start); + pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); + skge->mem = NULL; + return 0; +} + +static inline int skge_avail(const struct skge_ring *ring) +{ + smp_mb(); + return ((ring->to_clean > ring->to_use) ? 0 : ring->count) + + (ring->to_clean - ring->to_use) - 1; +} + +static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, + struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + struct skge_element *e; + struct skge_tx_desc *td; + int i; + u32 control, len; + u64 map; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) + return NETDEV_TX_BUSY; + + e = skge->tx_ring.to_use; + td = e->desc; + BUG_ON(td->control & BMU_OWN); + e->skb = skb; + len = skb_headlen(skb); + map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, len); + + td->dma_lo = map; + td->dma_hi = map >> 32; + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + const int offset = skb_checksum_start_offset(skb); + + /* This seems backwards, but it is what the sk98lin + * does. Looks like hardware is wrong? + */ + if (ipip_hdr(skb)->protocol == IPPROTO_UDP && + hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) + control = BMU_TCP_CHECK; + else + control = BMU_UDP_CHECK; + + td->csum_offs = 0; + td->csum_start = offset; + td->csum_write = offset + skb->csum_offset; + } else + control = BMU_CHECK; + + if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ + control |= BMU_EOF | BMU_IRQ_EOF; + else { + struct skge_tx_desc *tf = td; + + control |= BMU_STFWD; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + + e = e->next; + e->skb = skb; + tf = e->desc; + BUG_ON(tf->control & BMU_OWN); + + tf->dma_lo = map; + tf->dma_hi = (u64) map >> 32; + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, skb_frag_size(frag)); + + tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag); + } + tf->control |= BMU_EOF | BMU_IRQ_EOF; + } + /* Make sure all the descriptors written */ + wmb(); + td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; + wmb(); + + netdev_sent_queue(dev, skb->len); + + skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); + + netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, + "tx queued, slot %td, len %d\n", + e - skge->tx_ring.start, skb->len); + + skge->tx_ring.to_use = e->next; + smp_wmb(); + + if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { + netdev_dbg(dev, "transmit queue full\n"); + netif_stop_queue(dev); + } + + return NETDEV_TX_OK; +} + + +/* Free resources associated with this reing element */ +static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e, + u32 control) +{ + /* skb header vs. fragment */ + if (control & BMU_STF) + pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + else + pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); +} + +/* Free all buffers in transmit ring */ +static void skge_tx_clean(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_element *e; + + for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { + struct skge_tx_desc *td = e->desc; + + skge_tx_unmap(skge->hw->pdev, e, td->control); + + if (td->control & BMU_EOF) + dev_kfree_skb(e->skb); + td->control = 0; + } + + netdev_reset_queue(dev); + skge->tx_ring.to_clean = e; +} + +static void skge_tx_timeout(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n"); + + skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); + skge_tx_clean(dev); + netif_wake_queue(dev); +} + +static int skge_change_mtu(struct net_device *dev, int new_mtu) +{ + int err; + + if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) + return -EINVAL; + + if (!netif_running(dev)) { + dev->mtu = new_mtu; + return 0; + } + + skge_down(dev); + + dev->mtu = new_mtu; + + err = skge_up(dev); + if (err) + dev_close(dev); + + return err; +} + +static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; + +static void genesis_add_filter(u8 filter[8], const u8 *addr) +{ + u32 crc, bit; + + crc = ether_crc_le(ETH_ALEN, addr); + bit = ~crc & 0x3f; + filter[bit/8] |= 1 << (bit%8); +} + +static void genesis_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct netdev_hw_addr *ha; + u32 mode; + u8 filter[8]; + + mode = xm_read32(hw, port, XM_MODE); + mode |= XM_MD_ENA_HASH; + if (dev->flags & IFF_PROMISC) + mode |= XM_MD_ENA_PROM; + else + mode &= ~XM_MD_ENA_PROM; + + if (dev->flags & IFF_ALLMULTI) + memset(filter, 0xff, sizeof(filter)); + else { + memset(filter, 0, sizeof(filter)); + + if (skge->flow_status == FLOW_STAT_REM_SEND || + skge->flow_status == FLOW_STAT_SYMMETRIC) + genesis_add_filter(filter, pause_mc_addr); + + netdev_for_each_mc_addr(ha, dev) + genesis_add_filter(filter, ha->addr); + } + + xm_write32(hw, port, XM_MODE, mode); + xm_outhash(hw, port, XM_HSM, filter); +} + +static void yukon_add_filter(u8 filter[8], const u8 *addr) +{ + u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f; + filter[bit/8] |= 1 << (bit%8); +} + +static void yukon_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct netdev_hw_addr *ha; + int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || + skge->flow_status == FLOW_STAT_SYMMETRIC); + u16 reg; + u8 filter[8]; + + memset(filter, 0, sizeof(filter)); + + reg = gma_read16(hw, port, GM_RX_CTRL); + reg |= GM_RXCR_UCF_ENA; + + if (dev->flags & IFF_PROMISC) /* promiscuous */ + reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + else if (dev->flags & IFF_ALLMULTI) /* all multicast */ + memset(filter, 0xff, sizeof(filter)); + else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */ + reg &= ~GM_RXCR_MCF_ENA; + else { + reg |= GM_RXCR_MCF_ENA; + + if (rx_pause) + yukon_add_filter(filter, pause_mc_addr); + + netdev_for_each_mc_addr(ha, dev) + yukon_add_filter(filter, ha->addr); + } + + + gma_write16(hw, port, GM_MC_ADDR_H1, + (u16)filter[0] | ((u16)filter[1] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H2, + (u16)filter[2] | ((u16)filter[3] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H3, + (u16)filter[4] | ((u16)filter[5] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H4, + (u16)filter[6] | ((u16)filter[7] << 8)); + + gma_write16(hw, port, GM_RX_CTRL, reg); +} + +static inline u16 phy_length(const struct skge_hw *hw, u32 status) +{ + if (is_genesis(hw)) + return status >> XMR_FS_LEN_SHIFT; + else + return status >> GMR_FS_LEN_SHIFT; +} + +static inline int bad_phy_status(const struct skge_hw *hw, u32 status) +{ + if (is_genesis(hw)) + return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0; + else + return (status & GMR_FS_ANY_ERR) || + (status & GMR_FS_RX_OK) == 0; +} + +static void skge_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + if (is_genesis(skge->hw)) + genesis_set_multicast(dev); + else + yukon_set_multicast(dev); + +} + + +/* Get receive buffer from descriptor. + * Handles copy of small buffers and reallocation failures + */ +static struct sk_buff *skge_rx_get(struct net_device *dev, + struct skge_element *e, + u32 control, u32 status, u16 csum) +{ + struct skge_port *skge = netdev_priv(dev); + struct sk_buff *skb; + u16 len = control & BMU_BBC; + + netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev, + "rx slot %td status 0x%x len %d\n", + e - skge->rx_ring.start, status, len); + + if (len > skge->rx_buf_size) + goto error; + + if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) + goto error; + + if (bad_phy_status(skge->hw, status)) + goto error; + + if (phy_length(skge->hw, status) != len) + goto error; + + if (len < RX_COPY_THRESHOLD) { + skb = netdev_alloc_skb_ip_align(dev, len); + if (!skb) + goto resubmit; + + pci_dma_sync_single_for_cpu(skge->hw->pdev, + dma_unmap_addr(e, mapaddr), + len, PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(e->skb, skb->data, len); + pci_dma_sync_single_for_device(skge->hw->pdev, + dma_unmap_addr(e, mapaddr), + len, PCI_DMA_FROMDEVICE); + skge_rx_reuse(e, skge->rx_buf_size); + } else { + struct sk_buff *nskb; + + nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); + if (!nskb) + goto resubmit; + + pci_unmap_single(skge->hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); + skb = e->skb; + prefetch(skb->data); + skge_rx_setup(skge, e, nskb, skge->rx_buf_size); + } + + skb_put(skb, len); + + if (dev->features & NETIF_F_RXCSUM) { + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + } + + skb->protocol = eth_type_trans(skb, dev); + + return skb; +error: + + netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev, + "rx err, slot %td control 0x%x status 0x%x\n", + e - skge->rx_ring.start, control, status); + + if (is_genesis(skge->hw)) { + if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) + dev->stats.rx_length_errors++; + if (status & XMR_FS_FRA_ERR) + dev->stats.rx_frame_errors++; + if (status & XMR_FS_FCS_ERR) + dev->stats.rx_crc_errors++; + } else { + if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) + dev->stats.rx_length_errors++; + if (status & GMR_FS_FRAGMENT) + dev->stats.rx_frame_errors++; + if (status & GMR_FS_CRC_ERR) + dev->stats.rx_crc_errors++; + } + +resubmit: + skge_rx_reuse(e, skge->rx_buf_size); + return NULL; +} + +/* Free all buffers in Tx ring which are no longer owned by device */ +static void skge_tx_done(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_ring *ring = &skge->tx_ring; + struct skge_element *e; + unsigned int bytes_compl = 0, pkts_compl = 0; + + skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); + + for (e = ring->to_clean; e != ring->to_use; e = e->next) { + u32 control = ((const struct skge_tx_desc *) e->desc)->control; + + if (control & BMU_OWN) + break; + + skge_tx_unmap(skge->hw->pdev, e, control); + + if (control & BMU_EOF) { + netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, + "tx done slot %td\n", + e - skge->tx_ring.start); + + pkts_compl++; + bytes_compl += e->skb->len; + + dev_kfree_skb(e->skb); + } + } + netdev_completed_queue(dev, pkts_compl, bytes_compl); + skge->tx_ring.to_clean = e; + + /* Can run lockless until we need to synchronize to restart queue. */ + smp_mb(); + + if (unlikely(netif_queue_stopped(dev) && + skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { + netif_tx_lock(dev); + if (unlikely(netif_queue_stopped(dev) && + skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { + netif_wake_queue(dev); + + } + netif_tx_unlock(dev); + } +} + +static int skge_poll(struct napi_struct *napi, int to_do) +{ + struct skge_port *skge = container_of(napi, struct skge_port, napi); + struct net_device *dev = skge->netdev; + struct skge_hw *hw = skge->hw; + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + int work_done = 0; + + skge_tx_done(dev); + + skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); + + for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { + struct skge_rx_desc *rd = e->desc; + struct sk_buff *skb; + u32 control; + + rmb(); + control = rd->control; + if (control & BMU_OWN) + break; + + skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); + if (likely(skb)) { + napi_gro_receive(napi, skb); + ++work_done; + } + } + ring->to_clean = e; + + /* restart receiver */ + wmb(); + skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); + + if (work_done < to_do) { + unsigned long flags; + + napi_gro_flush(napi, false); + spin_lock_irqsave(&hw->hw_lock, flags); + __napi_complete(napi); + hw->intr_mask |= napimask[skge->port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irqrestore(&hw->hw_lock, flags); + } + + return work_done; +} + +/* Parity errors seem to happen when Genesis is connected to a switch + * with no other ports present. Heartbeat error?? + */ +static void skge_mac_parity(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + + ++dev->stats.tx_heartbeat_errors; + + if (is_genesis(hw)) + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), + MFF_CLR_PERR); + else + /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), + (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) + ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); +} + +static void skge_mac_intr(struct skge_hw *hw, int port) +{ + if (is_genesis(hw)) + genesis_mac_intr(hw, port); + else + yukon_mac_intr(hw, port); +} + +/* Handle device specific framing and timeout interrupts */ +static void skge_error_irq(struct skge_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); + + if (is_genesis(hw)) { + /* clear xmac errors */ + if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) + skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT); + if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) + skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT); + } else { + /* Timestamp (unused) overflow */ + if (hwstatus & IS_IRQ_TIST_OV) + skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); + } + + if (hwstatus & IS_RAM_RD_PAR) { + dev_err(&pdev->dev, "Ram read data parity error\n"); + skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); + } + + if (hwstatus & IS_RAM_WR_PAR) { + dev_err(&pdev->dev, "Ram write data parity error\n"); + skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); + } + + if (hwstatus & IS_M1_PAR_ERR) + skge_mac_parity(hw, 0); + + if (hwstatus & IS_M2_PAR_ERR) + skge_mac_parity(hw, 1); + + if (hwstatus & IS_R1_PAR_ERR) { + dev_err(&pdev->dev, "%s: receive queue parity error\n", + hw->dev[0]->name); + skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); + } + + if (hwstatus & IS_R2_PAR_ERR) { + dev_err(&pdev->dev, "%s: receive queue parity error\n", + hw->dev[1]->name); + skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); + } + + if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { + u16 pci_status, pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_read_config_word(pdev, PCI_STATUS, &pci_status); + + dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", + pci_cmd, pci_status); + + /* Write the error bits back to clear them. */ + pci_status &= PCI_STATUS_ERROR_BITS; + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_write_config_word(pdev, PCI_COMMAND, + pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); + pci_write_config_word(pdev, PCI_STATUS, pci_status); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + /* if error still set then just ignore it */ + hwstatus = skge_read32(hw, B0_HWE_ISRC); + if (hwstatus & IS_IRQ_STAT) { + dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); + hw->intr_mask &= ~IS_HW_ERR; + } + } +} + +/* + * Interrupt from PHY are handled in tasklet (softirq) + * because accessing phy registers requires spin wait which might + * cause excess interrupt latency. + */ +static void skge_extirq(unsigned long arg) +{ + struct skge_hw *hw = (struct skge_hw *) arg; + int port; + + for (port = 0; port < hw->ports; port++) { + struct net_device *dev = hw->dev[port]; + + if (netif_running(dev)) { + struct skge_port *skge = netdev_priv(dev); + + spin_lock(&hw->phy_lock); + if (!is_genesis(hw)) + yukon_phy_intr(skge); + else if (hw->phy_type == SK_PHY_BCOM) + bcom_phy_intr(skge); + spin_unlock(&hw->phy_lock); + } + } + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask |= IS_EXT_REG; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); +} + +static irqreturn_t skge_intr(int irq, void *dev_id) +{ + struct skge_hw *hw = dev_id; + u32 status; + int handled = 0; + + spin_lock(&hw->hw_lock); + /* Reading this register masks IRQ */ + status = skge_read32(hw, B0_SP_ISRC); + if (status == 0 || status == ~0) + goto out; + + handled = 1; + status &= hw->intr_mask; + if (status & IS_EXT_REG) { + hw->intr_mask &= ~IS_EXT_REG; + tasklet_schedule(&hw->phy_task); + } + + if (status & (IS_XA1_F|IS_R1_F)) { + struct skge_port *skge = netdev_priv(hw->dev[0]); + hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); + napi_schedule(&skge->napi); + } + + if (status & IS_PA_TO_TX1) + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); + + if (status & IS_PA_TO_RX1) { + ++hw->dev[0]->stats.rx_over_errors; + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); + } + + + if (status & IS_MAC1) + skge_mac_intr(hw, 0); + + if (hw->dev[1]) { + struct skge_port *skge = netdev_priv(hw->dev[1]); + + if (status & (IS_XA2_F|IS_R2_F)) { + hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); + napi_schedule(&skge->napi); + } + + if (status & IS_PA_TO_RX2) { + ++hw->dev[1]->stats.rx_over_errors; + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); + } + + if (status & IS_PA_TO_TX2) + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); + + if (status & IS_MAC2) + skge_mac_intr(hw, 1); + } + + if (status & IS_HW_ERR) + skge_error_irq(hw); + + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); +out: + spin_unlock(&hw->hw_lock); + + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void skge_netpoll(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + disable_irq(dev->irq); + skge_intr(dev->irq, skge->hw); + enable_irq(dev->irq); +} +#endif + +static int skge_set_mac_address(struct net_device *dev, void *p) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + unsigned port = skge->port; + const struct sockaddr *addr = p; + u16 ctrl; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + if (!netif_running(dev)) { + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); + } else { + /* disable Rx */ + spin_lock_bh(&hw->phy_lock); + ctrl = gma_read16(hw, port, GM_GP_CTRL); + gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); + + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); + + if (is_genesis(hw)) + xm_outaddr(hw, port, XM_SA, dev->dev_addr); + else { + gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); + gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); + } + + gma_write16(hw, port, GM_GP_CTRL, ctrl); + spin_unlock_bh(&hw->phy_lock); + } + + return 0; +} + +static const struct { + u8 id; + const char *name; +} skge_chips[] = { + { CHIP_ID_GENESIS, "Genesis" }, + { CHIP_ID_YUKON, "Yukon" }, + { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, + { CHIP_ID_YUKON_LP, "Yukon-LP"}, +}; + +static const char *skge_board_name(const struct skge_hw *hw) +{ + int i; + static char buf[16]; + + for (i = 0; i < ARRAY_SIZE(skge_chips); i++) + if (skge_chips[i].id == hw->chip_id) + return skge_chips[i].name; + + snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id); + return buf; +} + + +/* + * Setup the board data structure, but don't bring up + * the port(s) + */ +static int skge_reset(struct skge_hw *hw) +{ + u32 reg; + u16 ctst, pci_status; + u8 t8, mac_cfg, pmd_type; + int i; + + ctst = skge_read16(hw, B0_CTST); + + /* do a SW reset */ + skge_write8(hw, B0_CTST, CS_RST_SET); + skge_write8(hw, B0_CTST, CS_RST_CLR); + + /* clear PCI errors, if any */ + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + skge_write8(hw, B2_TST_CTRL2, 0); + + pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); + pci_write_config_word(hw->pdev, PCI_STATUS, + pci_status | PCI_STATUS_ERROR_BITS); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + skge_write8(hw, B0_CTST, CS_MRST_CLR); + + /* restore CLK_RUN bits (for Yukon-Lite) */ + skge_write16(hw, B0_CTST, + ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); + + hw->chip_id = skge_read8(hw, B2_CHIP_ID); + hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; + pmd_type = skge_read8(hw, B2_PMD_TYP); + hw->copper = (pmd_type == 'T' || pmd_type == '1'); + + switch (hw->chip_id) { + case CHIP_ID_GENESIS: +#ifdef CONFIG_SKGE_GENESIS + switch (hw->phy_type) { + case SK_PHY_XMAC: + hw->phy_addr = PHY_ADDR_XMAC; + break; + case SK_PHY_BCOM: + hw->phy_addr = PHY_ADDR_BCOM; + break; + default: + dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", + hw->phy_type); + return -EOPNOTSUPP; + } + break; +#else + dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n"); + return -EOPNOTSUPP; +#endif + + case CHIP_ID_YUKON: + case CHIP_ID_YUKON_LITE: + case CHIP_ID_YUKON_LP: + if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S') + hw->copper = 1; + + hw->phy_addr = PHY_ADDR_MARV; + break; + + default: + dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", + hw->chip_id); + return -EOPNOTSUPP; + } + + mac_cfg = skge_read8(hw, B2_MAC_CFG); + hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; + hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; + + /* read the adapters RAM size */ + t8 = skge_read8(hw, B2_E_0); + if (is_genesis(hw)) { + if (t8 == 3) { + /* special case: 4 x 64k x 36, offset = 0x80000 */ + hw->ram_size = 0x100000; + hw->ram_offset = 0x80000; + } else + hw->ram_size = t8 * 512; + } else if (t8 == 0) + hw->ram_size = 0x20000; + else + hw->ram_size = t8 * 4096; + + hw->intr_mask = IS_HW_ERR; + + /* Use PHY IRQ for all but fiber based Genesis board */ + if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)) + hw->intr_mask |= IS_EXT_REG; + + if (is_genesis(hw)) + genesis_init(hw); + else { + /* switch power to VCC (WA for VAUX problem) */ + skge_write8(hw, B0_POWER_CTRL, + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); + + /* avoid boards with stuck Hardware error bits */ + if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && + (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { + dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); + hw->intr_mask &= ~IS_HW_ERR; + } + + /* Clear PHY COMA */ + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®); + reg &= ~PCI_PHY_COMA; + pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + + for (i = 0; i < hw->ports; i++) { + skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); + skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); + } + } + + /* turn off hardware timer (unused) */ + skge_write8(hw, B2_TI_CTRL, TIM_STOP); + skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); + skge_write8(hw, B0_LED, LED_STAT_ON); + + /* enable the Tx Arbiters */ + for (i = 0; i < hw->ports; i++) + skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); + + /* Initialize ram interface */ + skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); + + skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53); + + skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK); + + /* Set interrupt moderation for Transmit only + * Receive interrupts avoided by NAPI + */ + skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F); + skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); + skge_write32(hw, B2_IRQM_CTRL, TIM_START); + + /* Leave irq disabled until first port is brought up. */ + skge_write32(hw, B0_IMSK, 0); + + for (i = 0; i < hw->ports; i++) { + if (is_genesis(hw)) + genesis_reset(hw, i); + else + yukon_reset(hw, i); + } + + return 0; +} + + +#ifdef CONFIG_SKGE_DEBUG + +static struct dentry *skge_debug; + +static int skge_debug_show(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + const struct skge_port *skge = netdev_priv(dev); + const struct skge_hw *hw = skge->hw; + const struct skge_element *e; + + if (!netif_running(dev)) + return -ENETDOWN; + + seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), + skge_read32(hw, B0_IMSK)); + + seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); + for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { + const struct skge_tx_desc *t = e->desc; + seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n", + t->control, t->dma_hi, t->dma_lo, t->status, + t->csum_offs, t->csum_write, t->csum_start); + } + + seq_printf(seq, "\nRx Ring:\n"); + for (e = skge->rx_ring.to_clean; ; e = e->next) { + const struct skge_rx_desc *r = e->desc; + + if (r->control & BMU_OWN) + break; + + seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n", + r->control, r->dma_hi, r->dma_lo, r->status, + r->timestamp, r->csum1, r->csum1_start); + } + + return 0; +} + +static int skge_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, skge_debug_show, inode->i_private); +} + +static const struct file_operations skge_debug_fops = { + .owner = THIS_MODULE, + .open = skge_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * Use network device events to create/remove/rename + * debugfs file entries + */ +static int skge_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = ptr; + struct skge_port *skge; + struct dentry *d; + + if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) + goto done; + + skge = netdev_priv(dev); + switch (event) { + case NETDEV_CHANGENAME: + if (skge->debugfs) { + d = debugfs_rename(skge_debug, skge->debugfs, + skge_debug, dev->name); + if (d) + skge->debugfs = d; + else { + netdev_info(dev, "rename failed\n"); + debugfs_remove(skge->debugfs); + } + } + break; + + case NETDEV_GOING_DOWN: + if (skge->debugfs) { + debugfs_remove(skge->debugfs); + skge->debugfs = NULL; + } + break; + + case NETDEV_UP: + d = debugfs_create_file(dev->name, S_IRUGO, + skge_debug, dev, + &skge_debug_fops); + if (!d || IS_ERR(d)) + netdev_info(dev, "debugfs create failed\n"); + else + skge->debugfs = d; + break; + } + +done: + return NOTIFY_DONE; +} + +static struct notifier_block skge_notifier = { + .notifier_call = skge_device_event, +}; + + +static __init void skge_debug_init(void) +{ + struct dentry *ent; + + ent = debugfs_create_dir("skge", NULL); + if (!ent || IS_ERR(ent)) { + pr_info("debugfs create directory failed\n"); + return; + } + + skge_debug = ent; + register_netdevice_notifier(&skge_notifier); +} + +static __exit void skge_debug_cleanup(void) +{ + if (skge_debug) { + unregister_netdevice_notifier(&skge_notifier); + debugfs_remove(skge_debug); + skge_debug = NULL; + } +} + +#else +#define skge_debug_init() +#define skge_debug_cleanup() +#endif + +static const struct net_device_ops skge_netdev_ops = { + .ndo_open = skge_up, + .ndo_stop = skge_down, + .ndo_start_xmit = skge_xmit_frame, + .ndo_do_ioctl = skge_ioctl, + .ndo_get_stats = skge_get_stats, + .ndo_tx_timeout = skge_tx_timeout, + .ndo_change_mtu = skge_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = skge_set_multicast, + .ndo_set_mac_address = skge_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = skge_netpoll, +#endif +}; + + +/* Initialize network device */ +static struct net_device *skge_devinit(struct skge_hw *hw, int port, + int highmem) +{ + struct skge_port *skge; + struct net_device *dev = alloc_etherdev(sizeof(*skge)); + + if (!dev) + return NULL; + + SET_NETDEV_DEV(dev, &hw->pdev->dev); + dev->netdev_ops = &skge_netdev_ops; + dev->ethtool_ops = &skge_ethtool_ops; + dev->watchdog_timeo = TX_WATCHDOG; + dev->irq = hw->pdev->irq; + + if (highmem) + dev->features |= NETIF_F_HIGHDMA; + + skge = netdev_priv(dev); + netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); + skge->netdev = dev; + skge->hw = hw; + skge->msg_enable = netif_msg_init(debug, default_msg); + + skge->tx_ring.count = DEFAULT_TX_RING_SIZE; + skge->rx_ring.count = DEFAULT_RX_RING_SIZE; + + /* Auto speed and flow control */ + skge->autoneg = AUTONEG_ENABLE; + skge->flow_control = FLOW_MODE_SYM_OR_REM; + skge->duplex = -1; + skge->speed = -1; + skge->advertising = skge_supported_modes(hw); + + if (device_can_wakeup(&hw->pdev->dev)) { + skge->wol = wol_supported(hw) & WAKE_MAGIC; + device_set_wakeup_enable(&hw->pdev->dev, skge->wol); + } + + hw->dev[port] = dev; + + skge->port = port; + + /* Only used for Genesis XMAC */ + if (is_genesis(hw)) + setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge); + else { + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM; + dev->features |= dev->hw_features; + } + + /* read the mac address */ + memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); + + return dev; +} + +static void skge_show_addr(struct net_device *dev) +{ + const struct skge_port *skge = netdev_priv(dev); + + netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); +} + +static int only_32bit_dma; + +static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev, *dev1; + struct skge_hw *hw; + int err, using_dac = 0; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto err_out; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + using_dac = 0; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_out_free_regions; + } + +#ifdef __BIG_ENDIAN + /* byte swap descriptors in hardware */ + { + u32 reg; + + pci_read_config_dword(pdev, PCI_DEV_REG2, ®); + reg |= PCI_REV_DESC; + pci_write_config_dword(pdev, PCI_DEV_REG2, reg); + } +#endif + + err = -ENOMEM; + /* space for skge@pci:0000:04:00.0 */ + hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + + strlen(pci_name(pdev)) + 1, GFP_KERNEL); + if (!hw) + goto err_out_free_regions; + + sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); + + hw->pdev = pdev; + spin_lock_init(&hw->hw_lock); + spin_lock_init(&hw->phy_lock); + tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw); + + hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); + if (!hw->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + goto err_out_free_hw; + } + + err = skge_reset(hw); + if (err) + goto err_out_iounmap; + + pr_info("%s addr 0x%llx irq %d chip %s rev %d\n", + DRV_VERSION, + (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, + skge_board_name(hw), hw->chip_rev); + + dev = skge_devinit(hw, 0, using_dac); + if (!dev) { + err = -ENOMEM; + goto err_out_led_off; + } + + /* Some motherboards are broken and has zero in ROM. */ + if (!is_valid_ether_addr(dev->dev_addr)) + dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "cannot register net device\n"); + goto err_out_free_netdev; + } + + skge_show_addr(dev); + + if (hw->ports > 1) { + dev1 = skge_devinit(hw, 1, using_dac); + if (!dev1) { + err = -ENOMEM; + goto err_out_unregister; + } + + err = register_netdev(dev1); + if (err) { + dev_err(&pdev->dev, "cannot register second net device\n"); + goto err_out_free_dev1; + } + + err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, + hw->irq_name, hw); + if (err) { + dev_err(&pdev->dev, "cannot assign irq %d\n", + pdev->irq); + goto err_out_unregister_dev1; + } + + skge_show_addr(dev1); + } + pci_set_drvdata(pdev, hw); + + return 0; + +err_out_unregister_dev1: + unregister_netdev(dev1); +err_out_free_dev1: + free_netdev(dev1); +err_out_unregister: + unregister_netdev(dev); +err_out_free_netdev: + free_netdev(dev); +err_out_led_off: + skge_write16(hw, B0_LED, LED_STAT_OFF); +err_out_iounmap: + iounmap(hw->regs); +err_out_free_hw: + kfree(hw); +err_out_free_regions: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +err_out: + return err; +} + +static void skge_remove(struct pci_dev *pdev) +{ + struct skge_hw *hw = pci_get_drvdata(pdev); + struct net_device *dev0, *dev1; + + if (!hw) + return; + + dev1 = hw->dev[1]; + if (dev1) + unregister_netdev(dev1); + dev0 = hw->dev[0]; + unregister_netdev(dev0); + + tasklet_kill(&hw->phy_task); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask = 0; + + if (hw->ports > 1) { + skge_write32(hw, B0_IMSK, 0); + skge_read32(hw, B0_IMSK); + free_irq(pdev->irq, hw); + } + spin_unlock_irq(&hw->hw_lock); + + skge_write16(hw, B0_LED, LED_STAT_OFF); + skge_write8(hw, B0_CTST, CS_RST_SET); + + if (hw->ports > 1) + free_irq(pdev->irq, hw); + pci_release_regions(pdev); + pci_disable_device(pdev); + if (dev1) + free_netdev(dev1); + free_netdev(dev0); + + iounmap(hw->regs); + kfree(hw); + pci_set_drvdata(pdev, NULL); +} + +#ifdef CONFIG_PM_SLEEP +static int skge_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct skge_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return 0; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct skge_port *skge = netdev_priv(dev); + + if (netif_running(dev)) + skge_down(dev); + + if (skge->wol) + skge_wol_init(skge); + } + + skge_write32(hw, B0_IMSK, 0); + + return 0; +} + +static int skge_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct skge_hw *hw = pci_get_drvdata(pdev); + int i, err; + + if (!hw) + return 0; + + err = skge_reset(hw); + if (err) + goto out; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + + if (netif_running(dev)) { + err = skge_up(dev); + + if (err) { + netdev_err(dev, "could not up: %d\n", err); + dev_close(dev); + goto out; + } + } + } +out: + return err; +} + +static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume); +#define SKGE_PM_OPS (&skge_pm_ops) + +#else + +#define SKGE_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + +static void skge_shutdown(struct pci_dev *pdev) +{ + struct skge_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct skge_port *skge = netdev_priv(dev); + + if (skge->wol) + skge_wol_init(skge); + } + + pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); + pci_set_power_state(pdev, PCI_D3hot); +} + +static struct pci_driver skge_driver = { + .name = DRV_NAME, + .id_table = skge_id_table, + .probe = skge_probe, + .remove = skge_remove, + .shutdown = skge_shutdown, + .driver.pm = SKGE_PM_OPS, +}; + +static struct dmi_system_id skge_32bit_dma_boards[] = { + { + .ident = "Gigabyte nForce boards", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"), + DMI_MATCH(DMI_BOARD_NAME, "nForce"), + }, + }, + { + .ident = "ASUS P5NSLI", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") + }, + }, + {} +}; + +static int __init skge_init_module(void) +{ + if (dmi_check_system(skge_32bit_dma_boards)) + only_32bit_dma = 1; + skge_debug_init(); + return pci_register_driver(&skge_driver); +} + +static void __exit skge_cleanup_module(void) +{ + pci_unregister_driver(&skge_driver); + skge_debug_cleanup(); +} + +module_init(skge_init_module); +module_exit(skge_cleanup_module); diff --git a/addons/skge/src/3.10.108/skge.h b/addons/skge/src/3.10.108/skge.h new file mode 100644 index 00000000..a2eb3411 --- /dev/null +++ b/addons/skge/src/3.10.108/skge.h @@ -0,0 +1,2584 @@ +/* + * Definitions for the new Marvell Yukon / SysKonnect driver. + */ +#ifndef _SKGE_H +#define _SKGE_H +#include + +/* PCI config registers */ +#define PCI_DEV_REG1 0x40 +#define PCI_PHY_COMA 0x8000000 +#define PCI_VIO 0x2000000 + +#define PCI_DEV_REG2 0x44 +#define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */ +#define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */ + +#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ + PCI_STATUS_SIG_SYSTEM_ERROR | \ + PCI_STATUS_REC_MASTER_ABORT | \ + PCI_STATUS_REC_TARGET_ABORT | \ + PCI_STATUS_PARITY) + +enum csr_regs { + B0_RAP = 0x0000, + B0_CTST = 0x0004, + B0_LED = 0x0006, + B0_POWER_CTRL = 0x0007, + B0_ISRC = 0x0008, + B0_IMSK = 0x000c, + B0_HWE_ISRC = 0x0010, + B0_HWE_IMSK = 0x0014, + B0_SP_ISRC = 0x0018, + B0_XM1_IMSK = 0x0020, + B0_XM1_ISRC = 0x0028, + B0_XM1_PHY_ADDR = 0x0030, + B0_XM1_PHY_DATA = 0x0034, + B0_XM2_IMSK = 0x0040, + B0_XM2_ISRC = 0x0048, + B0_XM2_PHY_ADDR = 0x0050, + B0_XM2_PHY_DATA = 0x0054, + B0_R1_CSR = 0x0060, + B0_R2_CSR = 0x0064, + B0_XS1_CSR = 0x0068, + B0_XA1_CSR = 0x006c, + B0_XS2_CSR = 0x0070, + B0_XA2_CSR = 0x0074, + + B2_MAC_1 = 0x0100, + B2_MAC_2 = 0x0108, + B2_MAC_3 = 0x0110, + B2_CONN_TYP = 0x0118, + B2_PMD_TYP = 0x0119, + B2_MAC_CFG = 0x011a, + B2_CHIP_ID = 0x011b, + B2_E_0 = 0x011c, + B2_E_1 = 0x011d, + B2_E_2 = 0x011e, + B2_E_3 = 0x011f, + B2_FAR = 0x0120, + B2_FDP = 0x0124, + B2_LD_CTRL = 0x0128, + B2_LD_TEST = 0x0129, + B2_TI_INI = 0x0130, + B2_TI_VAL = 0x0134, + B2_TI_CTRL = 0x0138, + B2_TI_TEST = 0x0139, + B2_IRQM_INI = 0x0140, + B2_IRQM_VAL = 0x0144, + B2_IRQM_CTRL = 0x0148, + B2_IRQM_TEST = 0x0149, + B2_IRQM_MSK = 0x014c, + B2_IRQM_HWE_MSK = 0x0150, + B2_TST_CTRL1 = 0x0158, + B2_TST_CTRL2 = 0x0159, + B2_GP_IO = 0x015c, + B2_I2C_CTRL = 0x0160, + B2_I2C_DATA = 0x0164, + B2_I2C_IRQ = 0x0168, + B2_I2C_SW = 0x016c, + B2_BSC_INI = 0x0170, + B2_BSC_VAL = 0x0174, + B2_BSC_CTRL = 0x0178, + B2_BSC_STAT = 0x0179, + B2_BSC_TST = 0x017a, + + B3_RAM_ADDR = 0x0180, + B3_RAM_DATA_LO = 0x0184, + B3_RAM_DATA_HI = 0x0188, + B3_RI_WTO_R1 = 0x0190, + B3_RI_WTO_XA1 = 0x0191, + B3_RI_WTO_XS1 = 0x0192, + B3_RI_RTO_R1 = 0x0193, + B3_RI_RTO_XA1 = 0x0194, + B3_RI_RTO_XS1 = 0x0195, + B3_RI_WTO_R2 = 0x0196, + B3_RI_WTO_XA2 = 0x0197, + B3_RI_WTO_XS2 = 0x0198, + B3_RI_RTO_R2 = 0x0199, + B3_RI_RTO_XA2 = 0x019a, + B3_RI_RTO_XS2 = 0x019b, + B3_RI_TO_VAL = 0x019c, + B3_RI_CTRL = 0x01a0, + B3_RI_TEST = 0x01a2, + B3_MA_TOINI_RX1 = 0x01b0, + B3_MA_TOINI_RX2 = 0x01b1, + B3_MA_TOINI_TX1 = 0x01b2, + B3_MA_TOINI_TX2 = 0x01b3, + B3_MA_TOVAL_RX1 = 0x01b4, + B3_MA_TOVAL_RX2 = 0x01b5, + B3_MA_TOVAL_TX1 = 0x01b6, + B3_MA_TOVAL_TX2 = 0x01b7, + B3_MA_TO_CTRL = 0x01b8, + B3_MA_TO_TEST = 0x01ba, + B3_MA_RCINI_RX1 = 0x01c0, + B3_MA_RCINI_RX2 = 0x01c1, + B3_MA_RCINI_TX1 = 0x01c2, + B3_MA_RCINI_TX2 = 0x01c3, + B3_MA_RCVAL_RX1 = 0x01c4, + B3_MA_RCVAL_RX2 = 0x01c5, + B3_MA_RCVAL_TX1 = 0x01c6, + B3_MA_RCVAL_TX2 = 0x01c7, + B3_MA_RC_CTRL = 0x01c8, + B3_MA_RC_TEST = 0x01ca, + B3_PA_TOINI_RX1 = 0x01d0, + B3_PA_TOINI_RX2 = 0x01d4, + B3_PA_TOINI_TX1 = 0x01d8, + B3_PA_TOINI_TX2 = 0x01dc, + B3_PA_TOVAL_RX1 = 0x01e0, + B3_PA_TOVAL_RX2 = 0x01e4, + B3_PA_TOVAL_TX1 = 0x01e8, + B3_PA_TOVAL_TX2 = 0x01ec, + B3_PA_CTRL = 0x01f0, + B3_PA_TEST = 0x01f2, +}; + +/* B0_CTST 16 bit Control/Status register */ +enum { + CS_CLK_RUN_HOT = 1<<13,/* CLK_RUN hot m. (YUKON-Lite only) */ + CS_CLK_RUN_RST = 1<<12,/* CLK_RUN reset (YUKON-Lite only) */ + CS_CLK_RUN_ENA = 1<<11,/* CLK_RUN enable (YUKON-Lite only) */ + CS_VAUX_AVAIL = 1<<10,/* VAUX available (YUKON only) */ + CS_BUS_CLOCK = 1<<9, /* Bus Clock 0/1 = 33/66 MHz */ + CS_BUS_SLOT_SZ = 1<<8, /* Slot Size 0/1 = 32/64 bit slot */ + CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */ + CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */ + CS_STOP_DONE = 1<<5, /* Stop Master is finished */ + CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */ + CS_MRST_CLR = 1<<3, /* Clear Master reset */ + CS_MRST_SET = 1<<2, /* Set Master reset */ + CS_RST_CLR = 1<<1, /* Clear Software reset */ + CS_RST_SET = 1, /* Set Software reset */ + +/* B0_LED 8 Bit LED register */ +/* Bit 7.. 2: reserved */ + LED_STAT_ON = 1<<1, /* Status LED on */ + LED_STAT_OFF = 1, /* Status LED off */ + +/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ + PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */ + PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */ + PC_VCC_ENA = 1<<5, /* Switch VCC Enable */ + PC_VCC_DIS = 1<<4, /* Switch VCC Disable */ + PC_VAUX_ON = 1<<3, /* Switch VAUX On */ + PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */ + PC_VCC_ON = 1<<1, /* Switch VCC On */ + PC_VCC_OFF = 1<<0, /* Switch VCC Off */ +}; + +/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ +enum { + IS_ALL_MSK = 0xbffffffful, /* All Interrupt bits */ + IS_HW_ERR = 1<<31, /* Interrupt HW Error */ + /* Bit 30: reserved */ + IS_PA_TO_RX1 = 1<<29, /* Packet Arb Timeout Rx1 */ + IS_PA_TO_RX2 = 1<<28, /* Packet Arb Timeout Rx2 */ + IS_PA_TO_TX1 = 1<<27, /* Packet Arb Timeout Tx1 */ + IS_PA_TO_TX2 = 1<<26, /* Packet Arb Timeout Tx2 */ + IS_I2C_READY = 1<<25, /* IRQ on end of I2C Tx */ + IS_IRQ_SW = 1<<24, /* SW forced IRQ */ + IS_EXT_REG = 1<<23, /* IRQ from LM80 or PHY (GENESIS only) */ + /* IRQ from PHY (YUKON only) */ + IS_TIMINT = 1<<22, /* IRQ from Timer */ + IS_MAC1 = 1<<21, /* IRQ from MAC 1 */ + IS_LNK_SYNC_M1 = 1<<20, /* Link Sync Cnt wrap MAC 1 */ + IS_MAC2 = 1<<19, /* IRQ from MAC 2 */ + IS_LNK_SYNC_M2 = 1<<18, /* Link Sync Cnt wrap MAC 2 */ +/* Receive Queue 1 */ + IS_R1_B = 1<<17, /* Q_R1 End of Buffer */ + IS_R1_F = 1<<16, /* Q_R1 End of Frame */ + IS_R1_C = 1<<15, /* Q_R1 Encoding Error */ +/* Receive Queue 2 */ + IS_R2_B = 1<<14, /* Q_R2 End of Buffer */ + IS_R2_F = 1<<13, /* Q_R2 End of Frame */ + IS_R2_C = 1<<12, /* Q_R2 Encoding Error */ +/* Synchronous Transmit Queue 1 */ + IS_XS1_B = 1<<11, /* Q_XS1 End of Buffer */ + IS_XS1_F = 1<<10, /* Q_XS1 End of Frame */ + IS_XS1_C = 1<<9, /* Q_XS1 Encoding Error */ +/* Asynchronous Transmit Queue 1 */ + IS_XA1_B = 1<<8, /* Q_XA1 End of Buffer */ + IS_XA1_F = 1<<7, /* Q_XA1 End of Frame */ + IS_XA1_C = 1<<6, /* Q_XA1 Encoding Error */ +/* Synchronous Transmit Queue 2 */ + IS_XS2_B = 1<<5, /* Q_XS2 End of Buffer */ + IS_XS2_F = 1<<4, /* Q_XS2 End of Frame */ + IS_XS2_C = 1<<3, /* Q_XS2 Encoding Error */ +/* Asynchronous Transmit Queue 2 */ + IS_XA2_B = 1<<2, /* Q_XA2 End of Buffer */ + IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */ + IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */ + + IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1, + IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2, + + IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1, + IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2, +}; + + +/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ +enum { + IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */ + IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */ + IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */ + IS_IRQ_STAT = 1<<10, /* IRQ status exception */ + IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */ + IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */ + IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */ + IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */ + IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */ + IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */ + IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */ + IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */ + IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */ + IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */ + + IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT + | IS_RAM_RD_PAR | IS_RAM_WR_PAR + | IS_M1_PAR_ERR | IS_M2_PAR_ERR + | IS_R1_PAR_ERR | IS_R2_PAR_ERR, +}; + +/* B2_TST_CTRL1 8 bit Test Control Register 1 */ +enum { + TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */ + TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */ + TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */ + TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */ + TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */ + TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */ + TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */ + TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */ +}; + +/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ +enum { + CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */ + /* Bit 3.. 2: reserved */ + CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */ + CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/ +}; + +/* B2_CHIP_ID 8 bit Chip Identification Number */ +enum { + CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */ + CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */ + CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */ + CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */ + CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */ + CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */ + CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ + + CHIP_REV_YU_LITE_A1 = 3, /* Chip Rev. for YUKON-Lite A1,A2 */ + CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */ +}; + +/* B2_TI_CTRL 8 bit Timer control */ +/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ +enum { + TIM_START = 1<<2, /* Start Timer */ + TIM_STOP = 1<<1, /* Stop Timer */ + TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */ +}; + +/* B2_TI_TEST 8 Bit Timer Test */ +/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ +/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ +enum { + TIM_T_ON = 1<<2, /* Test mode on */ + TIM_T_OFF = 1<<1, /* Test mode off */ + TIM_T_STEP = 1<<0, /* Test step */ +}; + +/* B2_GP_IO 32 bit General Purpose I/O Register */ +enum { + GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */ + GP_DIR_8 = 1<<24, /* IO_8 direct, 0=In/1=Out */ + GP_DIR_7 = 1<<23, /* IO_7 direct, 0=In/1=Out */ + GP_DIR_6 = 1<<22, /* IO_6 direct, 0=In/1=Out */ + GP_DIR_5 = 1<<21, /* IO_5 direct, 0=In/1=Out */ + GP_DIR_4 = 1<<20, /* IO_4 direct, 0=In/1=Out */ + GP_DIR_3 = 1<<19, /* IO_3 direct, 0=In/1=Out */ + GP_DIR_2 = 1<<18, /* IO_2 direct, 0=In/1=Out */ + GP_DIR_1 = 1<<17, /* IO_1 direct, 0=In/1=Out */ + GP_DIR_0 = 1<<16, /* IO_0 direct, 0=In/1=Out */ + + GP_IO_9 = 1<<9, /* IO_9 pin */ + GP_IO_8 = 1<<8, /* IO_8 pin */ + GP_IO_7 = 1<<7, /* IO_7 pin */ + GP_IO_6 = 1<<6, /* IO_6 pin */ + GP_IO_5 = 1<<5, /* IO_5 pin */ + GP_IO_4 = 1<<4, /* IO_4 pin */ + GP_IO_3 = 1<<3, /* IO_3 pin */ + GP_IO_2 = 1<<2, /* IO_2 pin */ + GP_IO_1 = 1<<1, /* IO_1 pin */ + GP_IO_0 = 1<<0, /* IO_0 pin */ +}; + +/* Descriptor Bit Definition */ +/* TxCtrl Transmit Buffer Control Field */ +/* RxCtrl Receive Buffer Control Field */ +enum { + BMU_OWN = 1<<31, /* OWN bit: 0=host/1=BMU */ + BMU_STF = 1<<30, /* Start of Frame */ + BMU_EOF = 1<<29, /* End of Frame */ + BMU_IRQ_EOB = 1<<28, /* Req "End of Buffer" IRQ */ + BMU_IRQ_EOF = 1<<27, /* Req "End of Frame" IRQ */ + /* TxCtrl specific bits */ + BMU_STFWD = 1<<26, /* (Tx) Store & Forward Frame */ + BMU_NO_FCS = 1<<25, /* (Tx) Disable MAC FCS (CRC) generation */ + BMU_SW = 1<<24, /* (Tx) 1 bit res. for SW use */ + /* RxCtrl specific bits */ + BMU_DEV_0 = 1<<26, /* (Rx) Transfer data to Dev0 */ + BMU_STAT_VAL = 1<<25, /* (Rx) Rx Status Valid */ + BMU_TIST_VAL = 1<<24, /* (Rx) Rx TimeStamp Valid */ + /* Bit 23..16: BMU Check Opcodes */ + BMU_CHECK = 0x55<<16, /* Default BMU check */ + BMU_TCP_CHECK = 0x56<<16, /* Descr with TCP ext */ + BMU_UDP_CHECK = 0x57<<16, /* Descr with UDP ext (YUKON only) */ + BMU_BBC = 0xffffL, /* Bit 15.. 0: Buffer Byte Counter */ +}; + +/* B2_BSC_CTRL 8 bit Blink Source Counter Control */ +enum { + BSC_START = 1<<1, /* Start Blink Source Counter */ + BSC_STOP = 1<<0, /* Stop Blink Source Counter */ +}; + +/* B2_BSC_STAT 8 bit Blink Source Counter Status */ +enum { + BSC_SRC = 1<<0, /* Blink Source, 0=Off / 1=On */ +}; + +/* B2_BSC_TST 16 bit Blink Source Counter Test Reg */ +enum { + BSC_T_ON = 1<<2, /* Test mode on */ + BSC_T_OFF = 1<<1, /* Test mode off */ + BSC_T_STEP = 1<<0, /* Test step */ +}; + +/* B3_RAM_ADDR 32 bit RAM Address, to read or write */ + /* Bit 31..19: reserved */ +#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ +/* RAM Interface Registers */ + +/* B3_RI_CTRL 16 bit RAM Iface Control Register */ +enum { + RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */ + RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/ + + RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */ + RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ +}; + +/* MAC Arbiter Registers */ +/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */ +enum { + MA_FOE_ON = 1<<3, /* XMAC Fast Output Enable ON */ + MA_FOE_OFF = 1<<2, /* XMAC Fast Output Enable OFF */ + MA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */ + MA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */ + +}; + +/* Timeout values */ +#define SK_MAC_TO_53 72 /* MAC arbiter timeout */ +#define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */ +#define SK_PKT_TO_MAX 0xffff /* Maximum value */ +#define SK_RI_TO_53 36 /* RAM interface timeout */ + +/* Packet Arbiter Registers */ +/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */ +enum { + PA_CLR_TO_TX2 = 1<<13,/* Clear IRQ Packet Timeout TX2 */ + PA_CLR_TO_TX1 = 1<<12,/* Clear IRQ Packet Timeout TX1 */ + PA_CLR_TO_RX2 = 1<<11,/* Clear IRQ Packet Timeout RX2 */ + PA_CLR_TO_RX1 = 1<<10,/* Clear IRQ Packet Timeout RX1 */ + PA_ENA_TO_TX2 = 1<<9, /* Enable Timeout Timer TX2 */ + PA_DIS_TO_TX2 = 1<<8, /* Disable Timeout Timer TX2 */ + PA_ENA_TO_TX1 = 1<<7, /* Enable Timeout Timer TX1 */ + PA_DIS_TO_TX1 = 1<<6, /* Disable Timeout Timer TX1 */ + PA_ENA_TO_RX2 = 1<<5, /* Enable Timeout Timer RX2 */ + PA_DIS_TO_RX2 = 1<<4, /* Disable Timeout Timer RX2 */ + PA_ENA_TO_RX1 = 1<<3, /* Enable Timeout Timer RX1 */ + PA_DIS_TO_RX1 = 1<<2, /* Disable Timeout Timer RX1 */ + PA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */ + PA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */ +}; + +#define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\ + PA_ENA_TO_TX1 | PA_ENA_TO_TX2) + + +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ +/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ +/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ +/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ + +#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ + +/* TXA_CTRL 8 bit Tx Arbiter Control Register */ +enum { + TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */ + TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */ + TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */ + TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */ + TXA_START_RC = 1<<3, /* Start sync Rate Control */ + TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */ + TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */ + TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */ +}; + +/* + * Bank 4 - 5 + */ +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +enum { + TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ + TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ + TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */ + TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */ + TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */ + TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */ + TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */ +}; + + +enum { + B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */ + B7_CFG_SPC = 0x0380,/* copy of the Configuration register */ + B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */ + B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */ + B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */ + B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */ + B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */ + B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */ + B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */ +}; + +/* Queue Register Offsets, use Q_ADDR() to access */ +enum { + B8_Q_REGS = 0x0400, /* base of Queue registers */ + Q_D = 0x00, /* 8*32 bit Current Descriptor */ + Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */ + Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */ + Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */ + Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */ + Q_BC = 0x30, /* 32 bit Current Byte Counter */ + Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */ + Q_F = 0x38, /* 32 bit Flag Register */ + Q_T1 = 0x3c, /* 32 bit Test Register 1 */ + Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */ + Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */ + Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */ + Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */ + Q_T2 = 0x40, /* 32 bit Test Register 2 */ + Q_T3 = 0x44, /* 32 bit Test Register 3 */ + +}; +#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) + +/* RAM Buffer Register Offsets */ +enum { + + RB_START= 0x00,/* 32 bit RAM Buffer Start Address */ + RB_END = 0x04,/* 32 bit RAM Buffer End Address */ + RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */ + RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */ + RB_RX_UTPP= 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */ + RB_RX_LTPP= 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */ + RB_RX_UTHP= 0x18,/* 32 bit Rx Upper Threshold, High Prio */ + RB_RX_LTHP= 0x1c,/* 32 bit Rx Lower Threshold, High Prio */ + /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */ + RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */ + RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */ + RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */ + RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */ + RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */ +}; + +/* Receive and Transmit Queues */ +enum { + Q_R1 = 0x0000, /* Receive Queue 1 */ + Q_R2 = 0x0080, /* Receive Queue 2 */ + Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */ + Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */ + Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */ + Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */ +}; + +/* Different MAC Types */ +enum { + SK_MAC_XMAC = 0, /* Xaqti XMAC II */ + SK_MAC_GMAC = 1, /* Marvell GMAC */ +}; + +/* Different PHY Types */ +enum { + SK_PHY_XMAC = 0,/* integrated in XMAC II */ + SK_PHY_BCOM = 1,/* Broadcom BCM5400 */ + SK_PHY_LONE = 2,/* Level One LXT1000 [not supported]*/ + SK_PHY_NAT = 3,/* National DP83891 [not supported] */ + SK_PHY_MARV_COPPER= 4,/* Marvell 88E1011S */ + SK_PHY_MARV_FIBER = 5,/* Marvell 88E1011S working on fiber */ +}; + +/* PHY addresses (bits 12..8 of PHY address reg) */ +enum { + PHY_ADDR_XMAC = 0<<8, + PHY_ADDR_BCOM = 1<<8, + +/* GPHY address (bits 15..11 of SMI control reg) */ + PHY_ADDR_MARV = 0, +}; + +#define RB_ADDR(offs, queue) ((u16)B16_RAM_REGS + (u16)(queue) + (offs)) + +/* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */ +enum { + RX_MFF_EA = 0x0c00,/* 32 bit Receive MAC FIFO End Address */ + RX_MFF_WP = 0x0c04,/* 32 bit Receive MAC FIFO Write Pointer */ + + RX_MFF_RP = 0x0c0c,/* 32 bit Receive MAC FIFO Read Pointer */ + RX_MFF_PC = 0x0c10,/* 32 bit Receive MAC FIFO Packet Cnt */ + RX_MFF_LEV = 0x0c14,/* 32 bit Receive MAC FIFO Level */ + RX_MFF_CTRL1 = 0x0c18,/* 16 bit Receive MAC FIFO Control Reg 1*/ + RX_MFF_STAT_TO = 0x0c1a,/* 8 bit Receive MAC Status Timeout */ + RX_MFF_TIST_TO = 0x0c1b,/* 8 bit Receive MAC Time Stamp Timeout */ + RX_MFF_CTRL2 = 0x0c1c,/* 8 bit Receive MAC FIFO Control Reg 2*/ + RX_MFF_TST1 = 0x0c1d,/* 8 bit Receive MAC FIFO Test Reg 1 */ + RX_MFF_TST2 = 0x0c1e,/* 8 bit Receive MAC FIFO Test Reg 2 */ + + RX_LED_INI = 0x0c20,/* 32 bit Receive LED Cnt Init Value */ + RX_LED_VAL = 0x0c24,/* 32 bit Receive LED Cnt Current Value */ + RX_LED_CTRL = 0x0c28,/* 8 bit Receive LED Cnt Control Reg */ + RX_LED_TST = 0x0c29,/* 8 bit Receive LED Cnt Test Register */ + + LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */ + LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */ + LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */ + LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */ + LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */ +}; + +/* Receive and Transmit MAC FIFO Registers (GENESIS only) */ +/* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */ +enum { + MFF_ENA_RDY_PAT = 1<<13, /* Enable Ready Patch */ + MFF_DIS_RDY_PAT = 1<<12, /* Disable Ready Patch */ + MFF_ENA_TIM_PAT = 1<<11, /* Enable Timing Patch */ + MFF_DIS_TIM_PAT = 1<<10, /* Disable Timing Patch */ + MFF_ENA_ALM_FUL = 1<<9, /* Enable AlmostFull Sign */ + MFF_DIS_ALM_FUL = 1<<8, /* Disable AlmostFull Sign */ + MFF_ENA_PAUSE = 1<<7, /* Enable Pause Signaling */ + MFF_DIS_PAUSE = 1<<6, /* Disable Pause Signaling */ + MFF_ENA_FLUSH = 1<<5, /* Enable Frame Flushing */ + MFF_DIS_FLUSH = 1<<4, /* Disable Frame Flushing */ + MFF_ENA_TIST = 1<<3, /* Enable Time Stamp Gener */ + MFF_DIS_TIST = 1<<2, /* Disable Time Stamp Gener */ + MFF_CLR_INTIST = 1<<1, /* Clear IRQ No Time Stamp */ + MFF_CLR_INSTAT = 1<<0, /* Clear IRQ No Status */ + MFF_RX_CTRL_DEF = MFF_ENA_TIM_PAT, +}; + +/* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */ +enum { + MFF_CLR_PERR = 1<<15, /* Clear Parity Error IRQ */ + + MFF_ENA_PKT_REC = 1<<13, /* Enable Packet Recovery */ + MFF_DIS_PKT_REC = 1<<12, /* Disable Packet Recovery */ + + MFF_ENA_W4E = 1<<7, /* Enable Wait for Empty */ + MFF_DIS_W4E = 1<<6, /* Disable Wait for Empty */ + + MFF_ENA_LOOPB = 1<<3, /* Enable Loopback */ + MFF_DIS_LOOPB = 1<<2, /* Disable Loopback */ + MFF_CLR_MAC_RST = 1<<1, /* Clear XMAC Reset */ + MFF_SET_MAC_RST = 1<<0, /* Set XMAC Reset */ + + MFF_TX_CTRL_DEF = MFF_ENA_PKT_REC | (u16) MFF_ENA_TIM_PAT | MFF_ENA_FLUSH, +}; + + +/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */ +/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */ +enum { + MFF_WSP_T_ON = 1<<6, /* Tx: Write Shadow Ptr TestOn */ + MFF_WSP_T_OFF = 1<<5, /* Tx: Write Shadow Ptr TstOff */ + MFF_WSP_INC = 1<<4, /* Tx: Write Shadow Ptr Increment */ + MFF_PC_DEC = 1<<3, /* Packet Counter Decrement */ + MFF_PC_T_ON = 1<<2, /* Packet Counter Test On */ + MFF_PC_T_OFF = 1<<1, /* Packet Counter Test Off */ + MFF_PC_INC = 1<<0, /* Packet Counter Increment */ +}; + +/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */ +/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */ +enum { + MFF_WP_T_ON = 1<<6, /* Write Pointer Test On */ + MFF_WP_T_OFF = 1<<5, /* Write Pointer Test Off */ + MFF_WP_INC = 1<<4, /* Write Pointer Increm */ + + MFF_RP_T_ON = 1<<2, /* Read Pointer Test On */ + MFF_RP_T_OFF = 1<<1, /* Read Pointer Test Off */ + MFF_RP_DEC = 1<<0, /* Read Pointer Decrement */ +}; + +/* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */ +/* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */ +enum { + MFF_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ + MFF_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ + MFF_RST_CLR = 1<<1, /* Clear MAC FIFO Reset */ + MFF_RST_SET = 1<<0, /* Set MAC FIFO Reset */ +}; + + +/* Link LED Counter Registers (GENESIS only) */ + +/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */ +/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */ +/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */ +enum { + LED_START = 1<<2, /* Start Timer */ + LED_STOP = 1<<1, /* Stop Timer */ + LED_STATE = 1<<0, /* Rx/Tx: LED State, 1=LED on */ +}; + +/* RX_LED_TST 8 bit Receive LED Cnt Test Register */ +/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */ +/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */ +enum { + LED_T_ON = 1<<2, /* LED Counter Test mode On */ + LED_T_OFF = 1<<1, /* LED Counter Test mode Off */ + LED_T_STEP = 1<<0, /* LED Counter Step */ +}; + +/* LNK_LED_REG 8 bit Link LED Register */ +enum { + LED_BLK_ON = 1<<5, /* Link LED Blinking On */ + LED_BLK_OFF = 1<<4, /* Link LED Blinking Off */ + LED_SYNC_ON = 1<<3, /* Use Sync Wire to switch LED */ + LED_SYNC_OFF = 1<<2, /* Disable Sync Wire Input */ + LED_ON = 1<<1, /* switch LED on */ + LED_OFF = 1<<0, /* switch LED off */ +}; + +/* Receive GMAC FIFO (YUKON) */ +enum { + RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */ + RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ + RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ + RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ + RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */ + RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ + RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */ + RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */ + RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */ +}; + + +/* TXA_TEST 8 bit Tx Arbiter Test Register */ +enum { + TXA_INT_T_ON = 1<<5, /* Tx Arb Interval Timer Test On */ + TXA_INT_T_OFF = 1<<4, /* Tx Arb Interval Timer Test Off */ + TXA_INT_T_STEP = 1<<3, /* Tx Arb Interval Timer Step */ + TXA_LIM_T_ON = 1<<2, /* Tx Arb Limit Timer Test On */ + TXA_LIM_T_OFF = 1<<1, /* Tx Arb Limit Timer Test Off */ + TXA_LIM_T_STEP = 1<<0, /* Tx Arb Limit Timer Step */ +}; + +/* TXA_STAT 8 bit Tx Arbiter Status Register */ +enum { + TXA_PRIO_XS = 1<<0, /* sync queue has prio to send */ +}; + + +/* Q_BC 32 bit Current Byte Counter */ + +/* BMU Control Status Registers */ +/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */ +/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */ +/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ +/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */ +/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ +/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */ +/* Q_CSR 32 bit BMU Control/Status Register */ + +enum { + CSR_SV_IDLE = 1<<24, /* BMU SM Idle */ + + CSR_DESC_CLR = 1<<21, /* Clear Reset for Descr */ + CSR_DESC_SET = 1<<20, /* Set Reset for Descr */ + CSR_FIFO_CLR = 1<<19, /* Clear Reset for FIFO */ + CSR_FIFO_SET = 1<<18, /* Set Reset for FIFO */ + CSR_HPI_RUN = 1<<17, /* Release HPI SM */ + CSR_HPI_RST = 1<<16, /* Reset HPI SM to Idle */ + CSR_SV_RUN = 1<<15, /* Release Supervisor SM */ + CSR_SV_RST = 1<<14, /* Reset Supervisor SM */ + CSR_DREAD_RUN = 1<<13, /* Release Descr Read SM */ + CSR_DREAD_RST = 1<<12, /* Reset Descr Read SM */ + CSR_DWRITE_RUN = 1<<11, /* Release Descr Write SM */ + CSR_DWRITE_RST = 1<<10, /* Reset Descr Write SM */ + CSR_TRANS_RUN = 1<<9, /* Release Transfer SM */ + CSR_TRANS_RST = 1<<8, /* Reset Transfer SM */ + CSR_ENA_POL = 1<<7, /* Enable Descr Polling */ + CSR_DIS_POL = 1<<6, /* Disable Descr Polling */ + CSR_STOP = 1<<5, /* Stop Rx/Tx Queue */ + CSR_START = 1<<4, /* Start Rx/Tx Queue */ + CSR_IRQ_CL_P = 1<<3, /* (Rx) Clear Parity IRQ */ + CSR_IRQ_CL_B = 1<<2, /* Clear EOB IRQ */ + CSR_IRQ_CL_F = 1<<1, /* Clear EOF IRQ */ + CSR_IRQ_CL_C = 1<<0, /* Clear ERR IRQ */ +}; + +#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\ + CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\ + CSR_TRANS_RST) +#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\ + CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\ + CSR_TRANS_RUN) + +/* Q_F 32 bit Flag Register */ +enum { + F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */ + F_EMPTY = 1<<27, /* Tx FIFO: empty flag */ + F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */ + F_WM_REACHED = 1<<25, /* Watermark reached */ + + F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */ + F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */ +}; + +/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ +/* RB_START 32 bit RAM Buffer Start Address */ +/* RB_END 32 bit RAM Buffer End Address */ +/* RB_WP 32 bit RAM Buffer Write Pointer */ +/* RB_RP 32 bit RAM Buffer Read Pointer */ +/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ +/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ +/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ +/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ +/* RB_PC 32 bit RAM Buffer Packet Counter */ +/* RB_LEV 32 bit RAM Buffer Level Register */ + +#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ +/* RB_TST2 8 bit RAM Buffer Test Register 2 */ +/* RB_TST1 8 bit RAM Buffer Test Register 1 */ + +/* RB_CTRL 8 bit RAM Buffer Control Register */ +enum { + RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */ + RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */ + RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ + RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ + RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */ + RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */ +}; + +/* Transmit MAC FIFO and Transmit LED Registers (GENESIS only), */ +enum { + TX_MFF_EA = 0x0d00,/* 32 bit Transmit MAC FIFO End Address */ + TX_MFF_WP = 0x0d04,/* 32 bit Transmit MAC FIFO WR Pointer */ + TX_MFF_WSP = 0x0d08,/* 32 bit Transmit MAC FIFO WR Shadow Ptr */ + TX_MFF_RP = 0x0d0c,/* 32 bit Transmit MAC FIFO RD Pointer */ + TX_MFF_PC = 0x0d10,/* 32 bit Transmit MAC FIFO Packet Cnt */ + TX_MFF_LEV = 0x0d14,/* 32 bit Transmit MAC FIFO Level */ + TX_MFF_CTRL1 = 0x0d18,/* 16 bit Transmit MAC FIFO Ctrl Reg 1 */ + TX_MFF_WAF = 0x0d1a,/* 8 bit Transmit MAC Wait after flush */ + + TX_MFF_CTRL2 = 0x0d1c,/* 8 bit Transmit MAC FIFO Ctrl Reg 2 */ + TX_MFF_TST1 = 0x0d1d,/* 8 bit Transmit MAC FIFO Test Reg 1 */ + TX_MFF_TST2 = 0x0d1e,/* 8 bit Transmit MAC FIFO Test Reg 2 */ + + TX_LED_INI = 0x0d20,/* 32 bit Transmit LED Cnt Init Value */ + TX_LED_VAL = 0x0d24,/* 32 bit Transmit LED Cnt Current Val */ + TX_LED_CTRL = 0x0d28,/* 8 bit Transmit LED Cnt Control Reg */ + TX_LED_TST = 0x0d29,/* 8 bit Transmit LED Cnt Test Reg */ +}; + +/* Counter and Timer constants, for a host clock of 62.5 MHz */ +#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */ +#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */ + +#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */ + +#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */ + /* 215 ms at 78.12 MHz */ + +#define SK_FACT_62 100 /* is given in percent */ +#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */ +#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */ + + +/* Transmit GMAC FIFO (YUKON only) */ +enum { + TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */ + TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ + TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */ + + TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */ + TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */ + TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */ + + TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ + TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ + TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ + + /* Descriptor Poll Timer Registers */ + B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */ + B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */ + B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */ + + B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */ + + /* Time Stamp Timer Registers (YUKON only) */ + GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */ + GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */ + GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */ +}; + + +enum { + LINKLED_OFF = 0x01, + LINKLED_ON = 0x02, + LINKLED_LINKSYNC_OFF = 0x04, + LINKLED_LINKSYNC_ON = 0x08, + LINKLED_BLINK_OFF = 0x10, + LINKLED_BLINK_ON = 0x20, +}; + +/* GMAC and GPHY Control Registers (YUKON only) */ +enum { + GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ + GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */ + GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */ + GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */ + GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */ + +/* Wake-up Frame Pattern Match Control Registers (YUKON only) */ + + WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */ + + WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */ + WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ + WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ + WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ + WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ + +/* WOL Pattern Length Registers (YUKON only) */ + + WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */ + WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */ + +/* WOL Pattern Counter Registers (YUKON only) */ + + WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ + WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ +}; +#define WOL_REGS(port, x) (x + (port)*0x80) + +enum { + WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ + WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ +}; +#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400) + +enum { + BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */ + BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */ + BASE_XMAC_2 = 0x3000,/* XMAC 2 registers */ + BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */ +}; + +/* + * Receive Frame Status Encoding + */ +enum { + XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */ + XMR_FS_LEN_SHIFT = 18, + XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/ + XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/ + XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */ + XMR_FS_MC = 1<<14, /* Bit 14: Multicast Frame */ + XMR_FS_UC = 1<<13, /* Bit 13: Unicast Frame */ + + XMR_FS_BURST = 1<<11, /* Bit 11: Burst Mode */ + XMR_FS_CEX_ERR = 1<<10, /* Bit 10: Carrier Ext. Error */ + XMR_FS_802_3 = 1<<9, /* Bit 9: 802.3 Frame */ + XMR_FS_COL_ERR = 1<<8, /* Bit 8: Collision Error */ + XMR_FS_CAR_ERR = 1<<7, /* Bit 7: Carrier Event Error */ + XMR_FS_LEN_ERR = 1<<6, /* Bit 6: In-Range Length Error */ + XMR_FS_FRA_ERR = 1<<5, /* Bit 5: Framing Error */ + XMR_FS_RUNT = 1<<4, /* Bit 4: Runt Frame */ + XMR_FS_LNG_ERR = 1<<3, /* Bit 3: Giant (Jumbo) Frame */ + XMR_FS_FCS_ERR = 1<<2, /* Bit 2: Frame Check Sequ Err */ + XMR_FS_ERR = 1<<1, /* Bit 1: Frame Error */ + XMR_FS_MCTRL = 1<<0, /* Bit 0: MAC Control Packet */ + +/* + * XMR_FS_ERR will be set if + * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT, + * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR + * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue + * XMR_FS_ERR unless the corresponding bit in the Receive Command + * Register is set. + */ +}; + +/* +,* XMAC-PHY Registers, indirect addressed over the XMAC + */ +enum { + PHY_XMAC_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_XMAC_STAT = 0x01,/* 16 bit r/w PHY Status Register */ + PHY_XMAC_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_XMAC_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_XMAC_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_XMAC_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Abi Reg */ + PHY_XMAC_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_XMAC_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_XMAC_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + + PHY_XMAC_EXT_STAT = 0x0f,/* 16 bit r/o Ext Status Register */ + PHY_XMAC_RES_ABI = 0x10,/* 16 bit r/o PHY Resolved Ability */ +}; +/* + * Broadcom-PHY Registers, indirect addressed over XMAC + */ +enum { + PHY_BCOM_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_BCOM_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_BCOM_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_BCOM_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_BCOM_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_BCOM_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_BCOM_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_BCOM_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_BCOM_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Broadcom-specific registers */ + PHY_BCOM_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_BCOM_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_BCOM_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_BCOM_P_EXT_CTRL = 0x10,/* 16 bit r/w PHY Extended Ctrl Reg */ + PHY_BCOM_P_EXT_STAT = 0x11,/* 16 bit r/o PHY Extended Stat Reg */ + PHY_BCOM_RE_CTR = 0x12,/* 16 bit r/w Receive Error Counter */ + PHY_BCOM_FC_CTR = 0x13,/* 16 bit r/w False Carrier Sense Cnt */ + PHY_BCOM_RNO_CTR = 0x14,/* 16 bit r/w Receiver NOT_OK Cnt */ + + PHY_BCOM_AUX_CTRL = 0x18,/* 16 bit r/w Auxiliary Control Reg */ + PHY_BCOM_AUX_STAT = 0x19,/* 16 bit r/o Auxiliary Stat Summary */ + PHY_BCOM_INT_STAT = 0x1a,/* 16 bit r/o Interrupt Status Reg */ + PHY_BCOM_INT_MASK = 0x1b,/* 16 bit r/w Interrupt Mask Reg */ +}; + +/* + * Marvel-PHY Registers, indirect addressed over GMAC + */ +enum { + PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Marvel-specific registers */ + PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */ + PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */ + PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */ + PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */ + PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */ + PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */ + PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */ + PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */ + PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */ + PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */ + PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */ + PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */ + PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */ + PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */ + PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */ + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ + PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */ + PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */ + PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */ + PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */ + PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ +}; + +enum { + PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ + PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ + PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */ + PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */ + PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */ + PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */ + PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */ + PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */ + PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */ + PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */ +}; + +enum { + PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */ + PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */ + PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */ +}; + +enum { + PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */ + + PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */ + PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */ + PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */ + PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */ + PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */ + PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */ + PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */ +}; + +enum { + PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */ + PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */ + PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */ +}; + +/* different Broadcom PHY Ids */ +enum { + PHY_BCOM_ID1_A1 = 0x6041, + PHY_BCOM_ID1_B2 = 0x6043, + PHY_BCOM_ID1_C0 = 0x6044, + PHY_BCOM_ID1_C5 = 0x6047, +}; + +/* different Marvell PHY Ids */ +enum { + PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */ + PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ + PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ + PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ + PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ +}; + +/* Advertisement register bits */ +enum { + PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ + PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ + PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */ + + PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */ + PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */ + PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */ + PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */ + PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */ + PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */ + PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */ + PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */ + PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ + PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA, + PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL | + PHY_AN_100HALF | PHY_AN_100FULL, +}; + +/* Xmac Specific */ +enum { + PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ + PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ + PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */ + + PHY_X_AN_PAUSE = 3<<7,/* Bit 8.. 7: Pause Bits */ + PHY_X_AN_HD = 1<<6, /* Bit 6: Half Duplex */ + PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */ +}; + +/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */ +enum { + PHY_X_P_NO_PAUSE= 0<<7,/* Bit 8..7: no Pause Mode */ + PHY_X_P_SYM_MD = 1<<7, /* Bit 8..7: symmetric Pause Mode */ + PHY_X_P_ASYM_MD = 2<<7,/* Bit 8..7: asymmetric Pause Mode */ + PHY_X_P_BOTH_MD = 3<<7,/* Bit 8..7: both Pause Mode */ +}; + + +/***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/ +enum { + PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */ + PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */ +}; + +/***** PHY_XMAC_RES_ABI 16 bit r/o PHY Resolved Ability *****/ +enum { + PHY_X_RS_PAUSE = 3<<7, /* Bit 8..7: selected Pause Mode */ + PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */ + PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */ + PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */ + PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */ +}; + +/* Remote Fault Bits (PHY_X_AN_RFB) encoding */ +enum { + X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */ + X_RFB_LF = 1<<12,/* Bit 13..12 Link Failure */ + X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */ + X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */ +}; + +/* Broadcom-Specific */ +/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_B_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */ + PHY_B_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */ + PHY_B_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */ + PHY_B_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */ + PHY_B_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */ + PHY_B_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */ +}; + +/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +enum { + PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */ + PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */ + PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */ + PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */ + PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */ + PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */ + /* Bit 9..8: reserved */ + PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */ +}; + +/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/ +enum { + PHY_B_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */ + PHY_B_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */ + PHY_B_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */ + PHY_B_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */ +}; + +/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/ +enum { + PHY_B_PEC_MAC_PHY = 1<<15, /* Bit 15: 10BIT/GMI-Interface */ + PHY_B_PEC_DIS_CROSS = 1<<14, /* Bit 14: Disable MDI Crossover */ + PHY_B_PEC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */ + PHY_B_PEC_INT_DIS = 1<<12, /* Bit 12: Interrupts Disabled */ + PHY_B_PEC_F_INT = 1<<11, /* Bit 11: Force Interrupt */ + PHY_B_PEC_BY_45 = 1<<10, /* Bit 10: Bypass 4B5B-Decoder */ + PHY_B_PEC_BY_SCR = 1<<9, /* Bit 9: Bypass Scrambler */ + PHY_B_PEC_BY_MLT3 = 1<<8, /* Bit 8: Bypass MLT3 Encoder */ + PHY_B_PEC_BY_RXA = 1<<7, /* Bit 7: Bypass Rx Alignm. */ + PHY_B_PEC_RES_SCR = 1<<6, /* Bit 6: Reset Scrambler */ + PHY_B_PEC_EN_LTR = 1<<5, /* Bit 5: Ena LED Traffic Mode */ + PHY_B_PEC_LED_ON = 1<<4, /* Bit 4: Force LED's on */ + PHY_B_PEC_LED_OFF = 1<<3, /* Bit 3: Force LED's off */ + PHY_B_PEC_EX_IPG = 1<<2, /* Bit 2: Extend Tx IPG Mode */ + PHY_B_PEC_3_LED = 1<<1, /* Bit 1: Three Link LED mode */ + PHY_B_PEC_HIGH_LA = 1<<0, /* Bit 0: GMII FIFO Elasticy */ +}; + +/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/ +enum { + PHY_B_PES_CROSS_STAT = 1<<13, /* Bit 13: MDI Crossover Status */ + PHY_B_PES_INT_STAT = 1<<12, /* Bit 12: Interrupt Status */ + PHY_B_PES_RRS = 1<<11, /* Bit 11: Remote Receiver Stat. */ + PHY_B_PES_LRS = 1<<10, /* Bit 10: Local Receiver Stat. */ + PHY_B_PES_LOCKED = 1<<9, /* Bit 9: Locked */ + PHY_B_PES_LS = 1<<8, /* Bit 8: Link Status */ + PHY_B_PES_RF = 1<<7, /* Bit 7: Remote Fault */ + PHY_B_PES_CE_ER = 1<<6, /* Bit 6: Carrier Ext Error */ + PHY_B_PES_BAD_SSD = 1<<5, /* Bit 5: Bad SSD */ + PHY_B_PES_BAD_ESD = 1<<4, /* Bit 4: Bad ESD */ + PHY_B_PES_RX_ER = 1<<3, /* Bit 3: Receive Error */ + PHY_B_PES_TX_ER = 1<<2, /* Bit 2: Transmit Error */ + PHY_B_PES_LOCK_ER = 1<<1, /* Bit 1: Lock Error */ + PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */ +}; + +/* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ +enum { + PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */ + + PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */ + PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */ +}; + + +/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ +enum { + PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */ + +/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/ + PHY_B_RC_LOC_MSK = 0xff00, /* Bit 15..8: Local Rx NOT_OK cnt */ + PHY_B_RC_REM_MSK = 0x00ff, /* Bit 7..0: Remote Rx NOT_OK cnt */ + +/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/ + PHY_B_AC_L_SQE = 1<<15, /* Bit 15: Low Squelch */ + PHY_B_AC_LONG_PACK = 1<<14, /* Bit 14: Rx Long Packets */ + PHY_B_AC_ER_CTRL = 3<<12,/* Bit 13..12: Edgerate Control */ + /* Bit 11: reserved */ + PHY_B_AC_TX_TST = 1<<10, /* Bit 10: Tx test bit, always 1 */ + /* Bit 9.. 8: reserved */ + PHY_B_AC_DIS_PRF = 1<<7, /* Bit 7: dis part resp filter */ + /* Bit 6: reserved */ + PHY_B_AC_DIS_PM = 1<<5, /* Bit 5: dis power management */ + /* Bit 4: reserved */ + PHY_B_AC_DIAG = 1<<3, /* Bit 3: Diagnostic Mode */ +}; + +/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/ +enum { + PHY_B_AS_AN_C = 1<<15, /* Bit 15: AutoNeg complete */ + PHY_B_AS_AN_CA = 1<<14, /* Bit 14: AN Complete Ack */ + PHY_B_AS_ANACK_D = 1<<13, /* Bit 13: AN Ack Detect */ + PHY_B_AS_ANAB_D = 1<<12, /* Bit 12: AN Ability Detect */ + PHY_B_AS_NPW = 1<<11, /* Bit 11: AN Next Page Wait */ + PHY_B_AS_AN_RES_MSK = 7<<8,/* Bit 10..8: AN HDC */ + PHY_B_AS_PDF = 1<<7, /* Bit 7: Parallel Detect. Fault */ + PHY_B_AS_RF = 1<<6, /* Bit 6: Remote Fault */ + PHY_B_AS_ANP_R = 1<<5, /* Bit 5: AN Page Received */ + PHY_B_AS_LP_ANAB = 1<<4, /* Bit 4: LP AN Ability */ + PHY_B_AS_LP_NPAB = 1<<3, /* Bit 3: LP Next Page Ability */ + PHY_B_AS_LS = 1<<2, /* Bit 2: Link Status */ + PHY_B_AS_PRR = 1<<1, /* Bit 1: Pause Resolution-Rx */ + PHY_B_AS_PRT = 1<<0, /* Bit 0: Pause Resolution-Tx */ +}; +#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT) + +/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/ +/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ +enum { + PHY_B_IS_PSE = 1<<14, /* Bit 14: Pair Swap Error */ + PHY_B_IS_MDXI_SC = 1<<13, /* Bit 13: MDIX Status Change */ + PHY_B_IS_HCT = 1<<12, /* Bit 12: counter above 32k */ + PHY_B_IS_LCT = 1<<11, /* Bit 11: counter above 128 */ + PHY_B_IS_AN_PR = 1<<10, /* Bit 10: Page Received */ + PHY_B_IS_NO_HDCL = 1<<9, /* Bit 9: No HCD Link */ + PHY_B_IS_NO_HDC = 1<<8, /* Bit 8: No HCD */ + PHY_B_IS_NEG_USHDC = 1<<7, /* Bit 7: Negotiated Unsup. HCD */ + PHY_B_IS_SCR_S_ER = 1<<6, /* Bit 6: Scrambler Sync Error */ + PHY_B_IS_RRS_CHANGE = 1<<5, /* Bit 5: Remote Rx Stat Change */ + PHY_B_IS_LRS_CHANGE = 1<<4, /* Bit 4: Local Rx Stat Change */ + PHY_B_IS_DUP_CHANGE = 1<<3, /* Bit 3: Duplex Mode Change */ + PHY_B_IS_LSP_CHANGE = 1<<2, /* Bit 2: Link Speed Change */ + PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */ + PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */ +}; +#define PHY_B_DEF_MSK \ + (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \ + PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE)) + +/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */ +enum { + PHY_B_P_NO_PAUSE = 0<<10,/* Bit 11..10: no Pause Mode */ + PHY_B_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */ + PHY_B_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */ + PHY_B_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */ +}; +/* + * Resolved Duplex mode and Capabilities (Aux Status Summary Reg) + */ +enum { + PHY_B_RES_1000FD = 7<<8,/* Bit 10..8: 1000Base-T Full Dup. */ + PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */ +}; + +/** Marvell-Specific */ +enum { + PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ + PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */ + PHY_M_AN_RF = 1<<13, /* Remote Fault */ + + PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */ + PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */ + PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */ + PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */ + PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */ + PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */ + PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */ + PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */ +}; + +/* special defines for FIBER (88E1011S only) */ +enum { + PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */ + PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */ + PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */ + PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */ +}; + +/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ +enum { + PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */ + PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */ + PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */ + PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */ +}; + +/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_M_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */ + PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */ + PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */ + PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */ + PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */ + PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */ +}; + +/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ +enum { + PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */ + PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */ + PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */ + PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */ + PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */ + PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */ + PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */ + PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */ + PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */ + PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */ + PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */ + PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */ +}; + +enum { + PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */ + PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ +}; + +enum { + PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ + PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */ + PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */ +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */ + PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */ + PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */ + PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */ + PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */ + + PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */ + PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */ + + PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */ + PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */ +}; + +/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ +enum { + PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */ + PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */ + PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */ + PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */ + PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */ + PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */ + PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */ + PHY_M_PS_LINK_UP = 1<<10, /* Link Up */ + PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */ + PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */ + PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */ + PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */ + PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */ + PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */ + PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */ + PHY_M_PS_JABBER = 1<<0, /* Jabber */ +}; + +#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */ + PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */ +}; + +enum { + PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */ + PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */ + PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */ + PHY_M_IS_AN_PR = 1<<12, /* Page Received */ + PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */ + PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */ + PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */ + PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */ + PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */ + PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */ + PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */ + PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */ + + PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */ + PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */ + PHY_M_IS_JABBER = 1<<0, /* Jabber */ + + PHY_M_IS_DEF_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_LSP_CHANGE | + PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR, + + PHY_M_IS_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL, +}; + +/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ +enum { + PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */ + PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */ + + PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */ + PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_S_DSC_MSK = 3<<8, /* Bit 9.. 8: Slave Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_M_DSC_MSK2 = 7<<9, /* Bit 11.. 9: Master Downshift Counter */ + /* (88E1111 only) */ + PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */ + /* !!! Errata in spec. (1 = disable) */ + PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/ + PHY_M_EC_MAC_S_MSK = 7<<4, /* Bit 6.. 4: Def. MAC interface speed */ + PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */ + PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */ + PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */ + PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */}; + +#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */ +#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */ +#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */ + +#define PHY_M_EC_M_DSC_2(x) ((u16)(x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */ + /* 100=5x; 101=6x; 110=7x; 111=8x */ +enum { + MAC_TX_CLK_0_MHZ = 2, + MAC_TX_CLK_2_5_MHZ = 6, + MAC_TX_CLK_25_MHZ = 7, +}; + +/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ +enum { + PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */ + PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */ + PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */ + PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */ + PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */ + PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */ + PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */ + /* (88E1111 only) */ +}; +#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK) +#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK) + +enum { + PHY_M_LEDC_LINK_MSK = 3<<3, /* Bit 4.. 3: Link Control Mask */ + /* (88E1011 only) */ + PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */ + PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */ + PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */ + PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */ + PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */ +}; + +enum { + PULS_NO_STR = 0, /* no pulse stretching */ + PULS_21MS = 1, /* 21 ms to 42 ms */ + PULS_42MS = 2, /* 42 ms to 84 ms */ + PULS_84MS = 3, /* 84 ms to 170 ms */ + PULS_170MS = 4, /* 170 ms to 340 ms */ + PULS_340MS = 5, /* 340 ms to 670 ms */ + PULS_670MS = 6, /* 670 ms to 1.3 s */ + PULS_1300MS = 7, /* 1.3 s to 2.7 s */ +}; + + +enum { + BLINK_42MS = 0, /* 42 ms */ + BLINK_84MS = 1, /* 84 ms */ + BLINK_170MS = 2, /* 170 ms */ + BLINK_340MS = 3, /* 340 ms */ + BLINK_670MS = 4, /* 670 ms */ +}; + +/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ +#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */ + /* Bit 13..12: reserved */ +#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */ +#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */ +#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */ +#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */ +#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */ +#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */ + +enum { + MO_LED_NORM = 0, + MO_LED_BLINK = 1, + MO_LED_OFF = 2, + MO_LED_ON = 3, +}; + +/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ +enum { + PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */ + PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */ + PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */ + PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */ + PHY_M_EC2_FO_AM_MSK = 7, /* Bit 2.. 0: Fiber Output Amplitude */ +}; + +/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ +enum { + PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */ + PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */ + PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */ + PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */ + PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */ + PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */ + PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */ + /* (88E1111 only) */ + /* Bit 9.. 4: reserved (88E1011 only) */ + PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */ + PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */ + PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */ +}; + +/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/ +enum { + PHY_M_CABD_ENA_TEST = 1<<15, /* Enable Test (Page 0) */ + PHY_M_CABD_DIS_WAIT = 1<<15, /* Disable Waiting Period (Page 1) */ + /* (88E1111 only) */ + PHY_M_CABD_STAT_MSK = 3<<13, /* Bit 14..13: Status Mask */ + PHY_M_CABD_AMPL_MSK = 0x1f<<8, /* Bit 12.. 8: Amplitude Mask */ + /* (88E1111 only) */ + PHY_M_CABD_DIST_MSK = 0xff, /* Bit 7.. 0: Distance Mask */ +}; + +/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */ +enum { + CABD_STAT_NORMAL= 0, + CABD_STAT_SHORT = 1, + CABD_STAT_OPEN = 2, + CABD_STAT_FAIL = 3, +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/ + /* Bit 15..12: reserved (used internally) */ +enum { + PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */ + PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */ + PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */ +}; + +#define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK) +#define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK) +#define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK) + +enum { + LED_PAR_CTRL_COLX = 0x00, + LED_PAR_CTRL_ERROR = 0x01, + LED_PAR_CTRL_DUPLEX = 0x02, + LED_PAR_CTRL_DP_COL = 0x03, + LED_PAR_CTRL_SPEED = 0x04, + LED_PAR_CTRL_LINK = 0x05, + LED_PAR_CTRL_TX = 0x06, + LED_PAR_CTRL_RX = 0x07, + LED_PAR_CTRL_ACT = 0x08, + LED_PAR_CTRL_LNK_RX = 0x09, + LED_PAR_CTRL_LNK_AC = 0x0a, + LED_PAR_CTRL_ACT_BL = 0x0b, + LED_PAR_CTRL_TX_BL = 0x0c, + LED_PAR_CTRL_RX_BL = 0x0d, + LED_PAR_CTRL_COL_BL = 0x0e, + LED_PAR_CTRL_INACT = 0x0f +}; + +/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/ +enum { + PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */ + PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */ + PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */ +}; + + +/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ +enum { + PHY_M_LEDC_LOS_MSK = 0xf<<12, /* Bit 15..12: LOS LED Ctrl. Mask */ + PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */ + PHY_M_LEDC_STA1_MSK = 0xf<<4, /* Bit 7.. 4: STAT1 LED Ctrl. Mask */ + PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */ +}; + +#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK) +#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK) +#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK) +#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK) + +/* GMAC registers */ +/* Port Registers */ +enum { + GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */ + GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */ + GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */ + GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */ + GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */ + GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */ + GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */ +/* Source Address Registers */ + GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */ + GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */ + GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */ + GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */ + GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */ + GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */ + +/* Multicast Address Hash Registers */ + GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */ + GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */ + GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */ + GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */ + +/* Interrupt Source Registers */ + GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */ + GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */ + GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */ + +/* Interrupt Mask Registers */ + GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */ + GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */ + GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */ + +/* Serial Management Interface (SMI) Registers */ + GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ + GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ + GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ +}; + +/* MIB Counters */ +#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */ +#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */ + +/* + * MIB Counters base address definitions (low word) - + * use offset 4 for access to high word (32 bit r/o) + */ +enum { + GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ + GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ + GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ + GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ + GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ + /* GM_MIB_CNT_BASE + 40: reserved */ + GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ + GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ + GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ + GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */ + GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ + GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */ + GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ + GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */ + GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */ + GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */ + GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */ + GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */ + GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */ + GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */ + GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */ + /* GM_MIB_CNT_BASE + 168: reserved */ + GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */ + /* GM_MIB_CNT_BASE + 184: reserved */ + GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */ + GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */ + GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */ + GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */ + GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */ + GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */ + GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */ + GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */ + GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */ + GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */ + GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */ + GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */ + GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */ + + GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */ + GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */ + GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */ + GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */ + GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */ + GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */ +}; + +/* GMAC Bit Definitions */ +/* GM_GP_STAT 16 bit r/o General Purpose Status Register */ +enum { + GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */ + GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */ + GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */ + GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */ + GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */ + GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */ + GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */ + GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */ + + GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */ + GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ + GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */ + GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ + GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ +}; + +/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ +enum { + GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ + GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */ + GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */ + GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */ + GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */ + GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */ + GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */ + GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */ + GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */ + GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */ + GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */ + GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */ + GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */ + GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */ + GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */ +}; + +#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) +#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS) + +/* GM_TX_CTRL 16 bit r/w Transmit Control Register */ +enum { + GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ + GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ + GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ + GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ +}; + +#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) +#define TX_COL_DEF 0x04 /* late collision after 64 byte */ + +/* GM_RX_CTRL 16 bit r/w Receive Control Register */ +enum { + GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ + GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */ + GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ + GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ +}; + +/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ +enum { + GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ + GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */ + GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */ + + TX_JAM_LEN_DEF = 0x03, + TX_JAM_IPG_DEF = 0x0b, + TX_IPG_JAM_DEF = 0x1c, +}; + +#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK) +#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK) +#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK) + + +/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ +enum { + GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ + GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */ + GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */ + GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ + GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ +}; + +#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) +#define DATA_BLIND_DEF 0x04 + +#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK) +#define IPG_DATA_DEF 0x1e + +/* GM_SMI_CTRL 16 bit r/w SMI Control Register */ +enum { + GM_SMI_CT_PHY_A_MSK = 0x1f<<11, /* Bit 15..11: PHY Device Address */ + GM_SMI_CT_REG_A_MSK = 0x1f<<6, /* Bit 10.. 6: PHY Register Address */ + GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/ + GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ + GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ +}; + +#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK) +#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK) + +/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ +enum { + GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ + GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ +}; + +/* Receive Frame Status Encoding */ +enum { + GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ + GMR_FS_LEN_SHIFT = 16, + GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */ + GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */ + GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */ + GMR_FS_MC = 1<<10, /* Bit 10: Multicast Packet */ + GMR_FS_BC = 1<<9, /* Bit 9: Broadcast Packet */ + GMR_FS_RX_OK = 1<<8, /* Bit 8: Receive OK (Good Packet) */ + GMR_FS_GOOD_FC = 1<<7, /* Bit 7: Good Flow-Control Packet */ + GMR_FS_BAD_FC = 1<<6, /* Bit 6: Bad Flow-Control Packet */ + GMR_FS_MII_ERR = 1<<5, /* Bit 5: MII Error */ + GMR_FS_LONG_ERR = 1<<4, /* Bit 4: Too Long Packet */ + GMR_FS_FRAGMENT = 1<<3, /* Bit 3: Fragment */ + + GMR_FS_CRC_ERR = 1<<1, /* Bit 1: CRC Error */ + GMR_FS_RX_FF_OV = 1<<0, /* Bit 0: Rx FIFO Overflow */ + +/* + * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR) + */ + GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR | + GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC | + GMR_FS_JABBER, +/* Rx GMAC FIFO Flush Mask (default) */ + RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR | + GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER, +}; + +/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ +enum { + GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */ + GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */ + GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */ + + GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */ + GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */ + GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */ + GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */ + GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */ + GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */ + GMF_CLI_RX_FC = 1<<4, /* Clear IRQ Rx Frame Complete */ + GMF_OPER_ON = 1<<3, /* Operational Mode On */ + GMF_OPER_OFF = 1<<2, /* Operational Mode Off */ + GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */ + GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */ + + RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */ +}; + + +/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ +enum { + GMF_WSP_TST_ON = 1<<18, /* Write Shadow Pointer Test On */ + GMF_WSP_TST_OFF = 1<<17, /* Write Shadow Pointer Test Off */ + GMF_WSP_STEP = 1<<16, /* Write Shadow Pointer Step/Increment */ + + GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */ + GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */ + GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */ +}; + +/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ +enum { + GMT_ST_START = 1<<2, /* Start Time Stamp Timer */ + GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */ + GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */ +}; + +/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ +enum { + GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */ + GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */ + GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */ + GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */ + GMC_PAUSE_ON = 1<<3, /* Pause On */ + GMC_PAUSE_OFF = 1<<2, /* Pause Off */ + GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */ + GMC_RST_SET = 1<<0, /* Set GMAC Reset */ +}; + +/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ +enum { + GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */ + GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */ + GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */ + GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */ + GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */ + GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */ + GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */ + GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */ + GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */ + GPC_ANEG_0 = 1<<19, /* ANEG[0] */ + GPC_ENA_XC = 1<<18, /* Enable MDI crossover */ + GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */ + GPC_ANEG_3 = 1<<16, /* ANEG[3] */ + GPC_ANEG_2 = 1<<15, /* ANEG[2] */ + GPC_ANEG_1 = 1<<14, /* ANEG[1] */ + GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */ + GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */ + GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */ + GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */ + GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */ + GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */ + /* Bits 7..2: reserved */ + GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */ + GPC_RST_SET = 1<<0, /* Set GPHY Reset */ +}; + +#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3|GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) +#define GPC_HWCFG_GMII_FIB (GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) +#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | GPC_ANEG_1 | GPC_ANEG_0) + +/* forced speed and duplex mode (don't mix with other ANEG bits) */ +#define GPC_FRC10MBIT_HALF 0 +#define GPC_FRC10MBIT_FULL GPC_ANEG_0 +#define GPC_FRC100MBIT_HALF GPC_ANEG_1 +#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1) + +/* auto-negotiation with limited advertised speeds */ +/* mix only with master/slave settings (for copper) */ +#define GPC_ADV_1000_HALF GPC_ANEG_2 +#define GPC_ADV_1000_FULL GPC_ANEG_3 +#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3) + +/* master/slave settings */ +/* only for copper with 1000 Mbps */ +#define GPC_FORCE_MASTER 0 +#define GPC_FORCE_SLAVE GPC_ANEG_0 +#define GPC_PREF_MASTER GPC_ANEG_1 +#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0) + +/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ +/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ +enum { + GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */ + GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */ + GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */ + GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */ + GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ + GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ + +#define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR) + +/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ + /* Bits 15.. 2: reserved */ + GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ + GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ + + +/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ + WOL_CTL_LINK_CHG_OCC = 1<<15, + WOL_CTL_MAGIC_PKT_OCC = 1<<14, + WOL_CTL_PATTERN_OCC = 1<<13, + WOL_CTL_CLEAR_RESULT = 1<<12, + WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11, + WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10, + WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9, + WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8, + WOL_CTL_ENA_PME_ON_PATTERN = 1<<7, + WOL_CTL_DIS_PME_ON_PATTERN = 1<<6, + WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5, + WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4, + WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3, + WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2, + WOL_CTL_ENA_PATTERN_UNIT = 1<<1, + WOL_CTL_DIS_PATTERN_UNIT = 1<<0, +}; + +#define WOL_CTL_DEFAULT \ + (WOL_CTL_DIS_PME_ON_LINK_CHG | \ + WOL_CTL_DIS_PME_ON_PATTERN | \ + WOL_CTL_DIS_PME_ON_MAGIC_PKT | \ + WOL_CTL_DIS_LINK_CHG_UNIT | \ + WOL_CTL_DIS_PATTERN_UNIT | \ + WOL_CTL_DIS_MAGIC_PKT_UNIT) + +/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */ +#define WOL_CTL_PATT_ENA(x) (1 << (x)) + + +/* XMAC II registers */ +enum { + XM_MMU_CMD = 0x0000, /* 16 bit r/w MMU Command Register */ + XM_POFF = 0x0008, /* 32 bit r/w Packet Offset Register */ + XM_BURST = 0x000c, /* 32 bit r/w Burst Register for half duplex*/ + XM_1L_VLAN_TAG = 0x0010, /* 16 bit r/w One Level VLAN Tag ID */ + XM_2L_VLAN_TAG = 0x0014, /* 16 bit r/w Two Level VLAN Tag ID */ + XM_TX_CMD = 0x0020, /* 16 bit r/w Transmit Command Register */ + XM_TX_RT_LIM = 0x0024, /* 16 bit r/w Transmit Retry Limit Register */ + XM_TX_STIME = 0x0028, /* 16 bit r/w Transmit Slottime Register */ + XM_TX_IPG = 0x002c, /* 16 bit r/w Transmit Inter Packet Gap */ + XM_RX_CMD = 0x0030, /* 16 bit r/w Receive Command Register */ + XM_PHY_ADDR = 0x0034, /* 16 bit r/w PHY Address Register */ + XM_PHY_DATA = 0x0038, /* 16 bit r/w PHY Data Register */ + XM_GP_PORT = 0x0040, /* 32 bit r/w General Purpose Port Register */ + XM_IMSK = 0x0044, /* 16 bit r/w Interrupt Mask Register */ + XM_ISRC = 0x0048, /* 16 bit r/o Interrupt Status Register */ + XM_HW_CFG = 0x004c, /* 16 bit r/w Hardware Config Register */ + XM_TX_LO_WM = 0x0060, /* 16 bit r/w Tx FIFO Low Water Mark */ + XM_TX_HI_WM = 0x0062, /* 16 bit r/w Tx FIFO High Water Mark */ + XM_TX_THR = 0x0064, /* 16 bit r/w Tx Request Threshold */ + XM_HT_THR = 0x0066, /* 16 bit r/w Host Request Threshold */ + XM_PAUSE_DA = 0x0068, /* NA reg r/w Pause Destination Address */ + XM_CTL_PARA = 0x0070, /* 32 bit r/w Control Parameter Register */ + XM_MAC_OPCODE = 0x0074, /* 16 bit r/w Opcode for MAC control frames */ + XM_MAC_PTIME = 0x0076, /* 16 bit r/w Pause time for MAC ctrl frames*/ + XM_TX_STAT = 0x0078, /* 32 bit r/o Tx Status LIFO Register */ + + XM_EXM_START = 0x0080, /* r/w Start Address of the EXM Regs */ +#define XM_EXM(reg) (XM_EXM_START + ((reg) << 3)) +}; + +enum { + XM_SRC_CHK = 0x0100, /* NA reg r/w Source Check Address Register */ + XM_SA = 0x0108, /* NA reg r/w Station Address Register */ + XM_HSM = 0x0110, /* 64 bit r/w Hash Match Address Registers */ + XM_RX_LO_WM = 0x0118, /* 16 bit r/w Receive Low Water Mark */ + XM_RX_HI_WM = 0x011a, /* 16 bit r/w Receive High Water Mark */ + XM_RX_THR = 0x011c, /* 32 bit r/w Receive Request Threshold */ + XM_DEV_ID = 0x0120, /* 32 bit r/o Device ID Register */ + XM_MODE = 0x0124, /* 32 bit r/w Mode Register */ + XM_LSA = 0x0128, /* NA reg r/o Last Source Register */ + XM_TS_READ = 0x0130, /* 32 bit r/o Time Stamp Read Register */ + XM_TS_LOAD = 0x0134, /* 32 bit r/o Time Stamp Load Value */ + XM_STAT_CMD = 0x0200, /* 16 bit r/w Statistics Command Register */ + XM_RX_CNT_EV = 0x0204, /* 32 bit r/o Rx Counter Event Register */ + XM_TX_CNT_EV = 0x0208, /* 32 bit r/o Tx Counter Event Register */ + XM_RX_EV_MSK = 0x020c, /* 32 bit r/w Rx Counter Event Mask */ + XM_TX_EV_MSK = 0x0210, /* 32 bit r/w Tx Counter Event Mask */ + XM_TXF_OK = 0x0280, /* 32 bit r/o Frames Transmitted OK Conuter */ + XM_TXO_OK_HI = 0x0284, /* 32 bit r/o Octets Transmitted OK High Cnt*/ + XM_TXO_OK_LO = 0x0288, /* 32 bit r/o Octets Transmitted OK Low Cnt */ + XM_TXF_BC_OK = 0x028c, /* 32 bit r/o Broadcast Frames Xmitted OK */ + XM_TXF_MC_OK = 0x0290, /* 32 bit r/o Multicast Frames Xmitted OK */ + XM_TXF_UC_OK = 0x0294, /* 32 bit r/o Unicast Frames Xmitted OK */ + XM_TXF_LONG = 0x0298, /* 32 bit r/o Tx Long Frame Counter */ + XM_TXE_BURST = 0x029c, /* 32 bit r/o Tx Burst Event Counter */ + XM_TXF_MPAUSE = 0x02a0, /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */ + XM_TXF_MCTRL = 0x02a4, /* 32 bit r/o Tx MAC Ctrl Frame Counter */ + XM_TXF_SNG_COL = 0x02a8, /* 32 bit r/o Tx Single Collision Counter */ + XM_TXF_MUL_COL = 0x02ac, /* 32 bit r/o Tx Multiple Collision Counter */ + XM_TXF_ABO_COL = 0x02b0, /* 32 bit r/o Tx aborted due to Exces. Col. */ + XM_TXF_LAT_COL = 0x02b4, /* 32 bit r/o Tx Late Collision Counter */ + XM_TXF_DEF = 0x02b8, /* 32 bit r/o Tx Deferred Frame Counter */ + XM_TXF_EX_DEF = 0x02bc, /* 32 bit r/o Tx Excessive Deferall Counter */ + XM_TXE_FIFO_UR = 0x02c0, /* 32 bit r/o Tx FIFO Underrun Event Cnt */ + XM_TXE_CS_ERR = 0x02c4, /* 32 bit r/o Tx Carrier Sense Error Cnt */ + XM_TXP_UTIL = 0x02c8, /* 32 bit r/o Tx Utilization in % */ + XM_TXF_64B = 0x02d0, /* 32 bit r/o 64 Byte Tx Frame Counter */ + XM_TXF_127B = 0x02d4, /* 32 bit r/o 65-127 Byte Tx Frame Counter */ + XM_TXF_255B = 0x02d8, /* 32 bit r/o 128-255 Byte Tx Frame Counter */ + XM_TXF_511B = 0x02dc, /* 32 bit r/o 256-511 Byte Tx Frame Counter */ + XM_TXF_1023B = 0x02e0, /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/ + XM_TXF_MAX_SZ = 0x02e4, /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/ + XM_RXF_OK = 0x0300, /* 32 bit r/o Frames Received OK */ + XM_RXO_OK_HI = 0x0304, /* 32 bit r/o Octets Received OK High Cnt */ + XM_RXO_OK_LO = 0x0308, /* 32 bit r/o Octets Received OK Low Counter*/ + XM_RXF_BC_OK = 0x030c, /* 32 bit r/o Broadcast Frames Received OK */ + XM_RXF_MC_OK = 0x0310, /* 32 bit r/o Multicast Frames Received OK */ + XM_RXF_UC_OK = 0x0314, /* 32 bit r/o Unicast Frames Received OK */ + XM_RXF_MPAUSE = 0x0318, /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */ + XM_RXF_MCTRL = 0x031c, /* 32 bit r/o Rx MAC Ctrl Frame Counter */ + XM_RXF_INV_MP = 0x0320, /* 32 bit r/o Rx invalid Pause Frame Cnt */ + XM_RXF_INV_MOC = 0x0324, /* 32 bit r/o Rx Frames with inv. MAC Opcode*/ + XM_RXE_BURST = 0x0328, /* 32 bit r/o Rx Burst Event Counter */ + XM_RXE_FMISS = 0x032c, /* 32 bit r/o Rx Missed Frames Event Cnt */ + XM_RXF_FRA_ERR = 0x0330, /* 32 bit r/o Rx Framing Error Counter */ + XM_RXE_FIFO_OV = 0x0334, /* 32 bit r/o Rx FIFO overflow Event Cnt */ + XM_RXF_JAB_PKT = 0x0338, /* 32 bit r/o Rx Jabber Packet Frame Cnt */ + XM_RXE_CAR_ERR = 0x033c, /* 32 bit r/o Rx Carrier Event Error Cnt */ + XM_RXF_LEN_ERR = 0x0340, /* 32 bit r/o Rx in Range Length Error */ + XM_RXE_SYM_ERR = 0x0344, /* 32 bit r/o Rx Symbol Error Counter */ + XM_RXE_SHT_ERR = 0x0348, /* 32 bit r/o Rx Short Event Error Cnt */ + XM_RXE_RUNT = 0x034c, /* 32 bit r/o Rx Runt Event Counter */ + XM_RXF_LNG_ERR = 0x0350, /* 32 bit r/o Rx Frame too Long Error Cnt */ + XM_RXF_FCS_ERR = 0x0354, /* 32 bit r/o Rx Frame Check Seq. Error Cnt */ + XM_RXF_CEX_ERR = 0x035c, /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/ + XM_RXP_UTIL = 0x0360, /* 32 bit r/o Rx Utilization in % */ + XM_RXF_64B = 0x0368, /* 32 bit r/o 64 Byte Rx Frame Counter */ + XM_RXF_127B = 0x036c, /* 32 bit r/o 65-127 Byte Rx Frame Counter */ + XM_RXF_255B = 0x0370, /* 32 bit r/o 128-255 Byte Rx Frame Counter */ + XM_RXF_511B = 0x0374, /* 32 bit r/o 256-511 Byte Rx Frame Counter */ + XM_RXF_1023B = 0x0378, /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/ + XM_RXF_MAX_SZ = 0x037c, /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/ +}; + +/* XM_MMU_CMD 16 bit r/w MMU Command Register */ +enum { + XM_MMU_PHY_RDY = 1<<12, /* Bit 12: PHY Read Ready */ + XM_MMU_PHY_BUSY = 1<<11, /* Bit 11: PHY Busy */ + XM_MMU_IGN_PF = 1<<10, /* Bit 10: Ignore Pause Frame */ + XM_MMU_MAC_LB = 1<<9, /* Bit 9: Enable MAC Loopback */ + XM_MMU_FRC_COL = 1<<7, /* Bit 7: Force Collision */ + XM_MMU_SIM_COL = 1<<6, /* Bit 6: Simulate Collision */ + XM_MMU_NO_PRE = 1<<5, /* Bit 5: No MDIO Preamble */ + XM_MMU_GMII_FD = 1<<4, /* Bit 4: GMII uses Full Duplex */ + XM_MMU_RAT_CTRL = 1<<3, /* Bit 3: Enable Rate Control */ + XM_MMU_GMII_LOOP= 1<<2, /* Bit 2: PHY is in Loopback Mode */ + XM_MMU_ENA_RX = 1<<1, /* Bit 1: Enable Receiver */ + XM_MMU_ENA_TX = 1<<0, /* Bit 0: Enable Transmitter */ +}; + + +/* XM_TX_CMD 16 bit r/w Transmit Command Register */ +enum { + XM_TX_BK2BK = 1<<6, /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/ + XM_TX_ENC_BYP = 1<<5, /* Bit 5: Set Encoder in Bypass Mode */ + XM_TX_SAM_LINE = 1<<4, /* Bit 4: (sc) Start utilization calculation */ + XM_TX_NO_GIG_MD = 1<<3, /* Bit 3: Disable Carrier Extension */ + XM_TX_NO_PRE = 1<<2, /* Bit 2: Disable Preamble Generation */ + XM_TX_NO_CRC = 1<<1, /* Bit 1: Disable CRC Generation */ + XM_TX_AUTO_PAD = 1<<0, /* Bit 0: Enable Automatic Padding */ +}; + +/* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */ +#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */ + + +/* XM_TX_STIME 16 bit r/w Transmit Slottime Register */ +#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */ + + +/* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */ +#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */ + + +/* XM_RX_CMD 16 bit r/w Receive Command Register */ +enum { + XM_RX_LENERR_OK = 1<<8, /* Bit 8 don't set Rx Err bit for */ + /* inrange error packets */ + XM_RX_BIG_PK_OK = 1<<7, /* Bit 7 don't set Rx Err bit for */ + /* jumbo packets */ + XM_RX_IPG_CAP = 1<<6, /* Bit 6 repl. type field with IPG */ + XM_RX_TP_MD = 1<<5, /* Bit 5: Enable transparent Mode */ + XM_RX_STRIP_FCS = 1<<4, /* Bit 4: Enable FCS Stripping */ + XM_RX_SELF_RX = 1<<3, /* Bit 3: Enable Rx of own packets */ + XM_RX_SAM_LINE = 1<<2, /* Bit 2: (sc) Start utilization calculation */ + XM_RX_STRIP_PAD = 1<<1, /* Bit 1: Strip pad bytes of Rx frames */ + XM_RX_DIS_CEXT = 1<<0, /* Bit 0: Disable carrier ext. check */ +}; + + +/* XM_GP_PORT 32 bit r/w General Purpose Port Register */ +enum { + XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */ + XM_GP_FRC_INT = 1<<5, /* Bit 5: (sc) Force Interrupt */ + XM_GP_RES_MAC = 1<<3, /* Bit 3: (sc) Reset MAC and FIFOs */ + XM_GP_RES_STAT = 1<<2, /* Bit 2: (sc) Reset the statistics module */ + XM_GP_INP_ASS = 1<<0, /* Bit 0: (ro) GP Input Pin asserted */ +}; + + +/* XM_IMSK 16 bit r/w Interrupt Mask Register */ +/* XM_ISRC 16 bit r/o Interrupt Status Register */ +enum { + XM_IS_LNK_AE = 1<<14, /* Bit 14: Link Asynchronous Event */ + XM_IS_TX_ABORT = 1<<13, /* Bit 13: Transmit Abort, late Col. etc */ + XM_IS_FRC_INT = 1<<12, /* Bit 12: Force INT bit set in GP */ + XM_IS_INP_ASS = 1<<11, /* Bit 11: Input Asserted, GP bit 0 set */ + XM_IS_LIPA_RC = 1<<10, /* Bit 10: Link Partner requests config */ + XM_IS_RX_PAGE = 1<<9, /* Bit 9: Page Received */ + XM_IS_TX_PAGE = 1<<8, /* Bit 8: Next Page Loaded for Transmit */ + XM_IS_AND = 1<<7, /* Bit 7: Auto-Negotiation Done */ + XM_IS_TSC_OV = 1<<6, /* Bit 6: Time Stamp Counter Overflow */ + XM_IS_RXC_OV = 1<<5, /* Bit 5: Rx Counter Event Overflow */ + XM_IS_TXC_OV = 1<<4, /* Bit 4: Tx Counter Event Overflow */ + XM_IS_RXF_OV = 1<<3, /* Bit 3: Receive FIFO Overflow */ + XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */ + XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */ + XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */ + + XM_IMSK_DISABLE = 0xffff, +}; + +/* XM_HW_CFG 16 bit r/w Hardware Config Register */ +enum { + XM_HW_GEN_EOP = 1<<3, /* Bit 3: generate End of Packet pulse */ + XM_HW_COM4SIG = 1<<2, /* Bit 2: use Comma Detect for Sig. Det.*/ + XM_HW_GMII_MD = 1<<0, /* Bit 0: GMII Interface selected */ +}; + + +/* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */ +/* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */ +#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */ + +/* XM_TX_THR 16 bit r/w Tx Request Threshold */ +/* XM_HT_THR 16 bit r/w Host Request Threshold */ +/* XM_RX_THR 16 bit r/w Rx Request Threshold */ +#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */ + + +/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */ +enum { + XM_ST_VALID = (1UL<<31), /* Bit 31: Status Valid */ + XM_ST_BYTE_CNT = (0x3fffL<<17), /* Bit 30..17: Tx frame Length */ + XM_ST_RETRY_CNT = (0x1fL<<12), /* Bit 16..12: Retry Count */ + XM_ST_EX_COL = 1<<11, /* Bit 11: Excessive Collisions */ + XM_ST_EX_DEF = 1<<10, /* Bit 10: Excessive Deferral */ + XM_ST_BURST = 1<<9, /* Bit 9: p. xmitted in burst md*/ + XM_ST_DEFER = 1<<8, /* Bit 8: packet was defered */ + XM_ST_BC = 1<<7, /* Bit 7: Broadcast packet */ + XM_ST_MC = 1<<6, /* Bit 6: Multicast packet */ + XM_ST_UC = 1<<5, /* Bit 5: Unicast packet */ + XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occurred */ + XM_ST_CS_ERR = 1<<3, /* Bit 3: Carrier Sense Error */ + XM_ST_LAT_COL = 1<<2, /* Bit 2: Late Collision Error */ + XM_ST_MUL_COL = 1<<1, /* Bit 1: Multiple Collisions */ + XM_ST_SGN_COL = 1<<0, /* Bit 0: Single Collision */ +}; + +/* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */ +/* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */ +#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */ + + +/* XM_DEV_ID 32 bit r/o Device ID Register */ +#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */ +#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */ + + +/* XM_MODE 32 bit r/w Mode Register */ +enum { + XM_MD_ENA_REJ = 1<<26, /* Bit 26: Enable Frame Reject */ + XM_MD_SPOE_E = 1<<25, /* Bit 25: Send Pause on Edge */ + /* extern generated */ + XM_MD_TX_REP = 1<<24, /* Bit 24: Transmit Repeater Mode */ + XM_MD_SPOFF_I = 1<<23, /* Bit 23: Send Pause on FIFO full */ + /* intern generated */ + XM_MD_LE_STW = 1<<22, /* Bit 22: Rx Stat Word in Little Endian */ + XM_MD_TX_CONT = 1<<21, /* Bit 21: Send Continuous */ + XM_MD_TX_PAUSE = 1<<20, /* Bit 20: (sc) Send Pause Frame */ + XM_MD_ATS = 1<<19, /* Bit 19: Append Time Stamp */ + XM_MD_SPOL_I = 1<<18, /* Bit 18: Send Pause on Low */ + /* intern generated */ + XM_MD_SPOH_I = 1<<17, /* Bit 17: Send Pause on High */ + /* intern generated */ + XM_MD_CAP = 1<<16, /* Bit 16: Check Address Pair */ + XM_MD_ENA_HASH = 1<<15, /* Bit 15: Enable Hashing */ + XM_MD_CSA = 1<<14, /* Bit 14: Check Station Address */ + XM_MD_CAA = 1<<13, /* Bit 13: Check Address Array */ + XM_MD_RX_MCTRL = 1<<12, /* Bit 12: Rx MAC Control Frame */ + XM_MD_RX_RUNT = 1<<11, /* Bit 11: Rx Runt Frames */ + XM_MD_RX_IRLE = 1<<10, /* Bit 10: Rx in Range Len Err Frame */ + XM_MD_RX_LONG = 1<<9, /* Bit 9: Rx Long Frame */ + XM_MD_RX_CRCE = 1<<8, /* Bit 8: Rx CRC Error Frame */ + XM_MD_RX_ERR = 1<<7, /* Bit 7: Rx Error Frame */ + XM_MD_DIS_UC = 1<<6, /* Bit 6: Disable Rx Unicast */ + XM_MD_DIS_MC = 1<<5, /* Bit 5: Disable Rx Multicast */ + XM_MD_DIS_BC = 1<<4, /* Bit 4: Disable Rx Broadcast */ + XM_MD_ENA_PROM = 1<<3, /* Bit 3: Enable Promiscuous */ + XM_MD_ENA_BE = 1<<2, /* Bit 2: Enable Big Endian */ + XM_MD_FTF = 1<<1, /* Bit 1: (sc) Flush Tx FIFO */ + XM_MD_FRF = 1<<0, /* Bit 0: (sc) Flush Rx FIFO */ +}; + +#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) +#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ + XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA) + +/* XM_STAT_CMD 16 bit r/w Statistics Command Register */ +enum { + XM_SC_SNP_RXC = 1<<5, /* Bit 5: (sc) Snap Rx Counters */ + XM_SC_SNP_TXC = 1<<4, /* Bit 4: (sc) Snap Tx Counters */ + XM_SC_CP_RXC = 1<<3, /* Bit 3: Copy Rx Counters Continuously */ + XM_SC_CP_TXC = 1<<2, /* Bit 2: Copy Tx Counters Continuously */ + XM_SC_CLR_RXC = 1<<1, /* Bit 1: (sc) Clear Rx Counters */ + XM_SC_CLR_TXC = 1<<0, /* Bit 0: (sc) Clear Tx Counters */ +}; + + +/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */ +/* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */ +enum { + XMR_MAX_SZ_OV = 1<<31, /* Bit 31: 1024-MaxSize Rx Cnt Ov*/ + XMR_1023B_OV = 1<<30, /* Bit 30: 512-1023Byte Rx Cnt Ov*/ + XMR_511B_OV = 1<<29, /* Bit 29: 256-511 Byte Rx Cnt Ov*/ + XMR_255B_OV = 1<<28, /* Bit 28: 128-255 Byte Rx Cnt Ov*/ + XMR_127B_OV = 1<<27, /* Bit 27: 65-127 Byte Rx Cnt Ov */ + XMR_64B_OV = 1<<26, /* Bit 26: 64 Byte Rx Cnt Ov */ + XMR_UTIL_OV = 1<<25, /* Bit 25: Rx Util Cnt Overflow */ + XMR_UTIL_UR = 1<<24, /* Bit 24: Rx Util Cnt Underrun */ + XMR_CEX_ERR_OV = 1<<23, /* Bit 23: CEXT Err Cnt Ov */ + XMR_FCS_ERR_OV = 1<<21, /* Bit 21: Rx FCS Error Cnt Ov */ + XMR_LNG_ERR_OV = 1<<20, /* Bit 20: Rx too Long Err Cnt Ov*/ + XMR_RUNT_OV = 1<<19, /* Bit 19: Runt Event Cnt Ov */ + XMR_SHT_ERR_OV = 1<<18, /* Bit 18: Rx Short Ev Err Cnt Ov*/ + XMR_SYM_ERR_OV = 1<<17, /* Bit 17: Rx Sym Err Cnt Ov */ + XMR_CAR_ERR_OV = 1<<15, /* Bit 15: Rx Carr Ev Err Cnt Ov */ + XMR_JAB_PKT_OV = 1<<14, /* Bit 14: Rx Jabb Packet Cnt Ov */ + XMR_FIFO_OV = 1<<13, /* Bit 13: Rx FIFO Ov Ev Cnt Ov */ + XMR_FRA_ERR_OV = 1<<12, /* Bit 12: Rx Framing Err Cnt Ov */ + XMR_FMISS_OV = 1<<11, /* Bit 11: Rx Missed Ev Cnt Ov */ + XMR_BURST = 1<<10, /* Bit 10: Rx Burst Event Cnt Ov */ + XMR_INV_MOC = 1<<9, /* Bit 9: Rx with inv. MAC OC Ov*/ + XMR_INV_MP = 1<<8, /* Bit 8: Rx inv Pause Frame Ov */ + XMR_MCTRL_OV = 1<<7, /* Bit 7: Rx MAC Ctrl-F Cnt Ov */ + XMR_MPAUSE_OV = 1<<6, /* Bit 6: Rx Pause MAC Ctrl-F Ov*/ + XMR_UC_OK_OV = 1<<5, /* Bit 5: Rx Unicast Frame CntOv*/ + XMR_MC_OK_OV = 1<<4, /* Bit 4: Rx Multicast Cnt Ov */ + XMR_BC_OK_OV = 1<<3, /* Bit 3: Rx Broadcast Cnt Ov */ + XMR_OK_LO_OV = 1<<2, /* Bit 2: Octets Rx OK Low CntOv*/ + XMR_OK_HI_OV = 1<<1, /* Bit 1: Octets Rx OK Hi Cnt Ov*/ + XMR_OK_OV = 1<<0, /* Bit 0: Frames Received Ok Ov */ +}; + +#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV) + +/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */ +/* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */ +enum { + XMT_MAX_SZ_OV = 1<<25, /* Bit 25: 1024-MaxSize Tx Cnt Ov*/ + XMT_1023B_OV = 1<<24, /* Bit 24: 512-1023Byte Tx Cnt Ov*/ + XMT_511B_OV = 1<<23, /* Bit 23: 256-511 Byte Tx Cnt Ov*/ + XMT_255B_OV = 1<<22, /* Bit 22: 128-255 Byte Tx Cnt Ov*/ + XMT_127B_OV = 1<<21, /* Bit 21: 65-127 Byte Tx Cnt Ov */ + XMT_64B_OV = 1<<20, /* Bit 20: 64 Byte Tx Cnt Ov */ + XMT_UTIL_OV = 1<<19, /* Bit 19: Tx Util Cnt Overflow */ + XMT_UTIL_UR = 1<<18, /* Bit 18: Tx Util Cnt Underrun */ + XMT_CS_ERR_OV = 1<<17, /* Bit 17: Tx Carr Sen Err Cnt Ov*/ + XMT_FIFO_UR_OV = 1<<16, /* Bit 16: Tx FIFO Ur Ev Cnt Ov */ + XMT_EX_DEF_OV = 1<<15, /* Bit 15: Tx Ex Deferall Cnt Ov */ + XMT_DEF = 1<<14, /* Bit 14: Tx Deferred Cnt Ov */ + XMT_LAT_COL_OV = 1<<13, /* Bit 13: Tx Late Col Cnt Ov */ + XMT_ABO_COL_OV = 1<<12, /* Bit 12: Tx abo dueto Ex Col Ov*/ + XMT_MUL_COL_OV = 1<<11, /* Bit 11: Tx Mult Col Cnt Ov */ + XMT_SNG_COL = 1<<10, /* Bit 10: Tx Single Col Cnt Ov */ + XMT_MCTRL_OV = 1<<9, /* Bit 9: Tx MAC Ctrl Counter Ov*/ + XMT_MPAUSE = 1<<8, /* Bit 8: Tx Pause MAC Ctrl-F Ov*/ + XMT_BURST = 1<<7, /* Bit 7: Tx Burst Event Cnt Ov */ + XMT_LONG = 1<<6, /* Bit 6: Tx Long Frame Cnt Ov */ + XMT_UC_OK_OV = 1<<5, /* Bit 5: Tx Unicast Cnt Ov */ + XMT_MC_OK_OV = 1<<4, /* Bit 4: Tx Multicast Cnt Ov */ + XMT_BC_OK_OV = 1<<3, /* Bit 3: Tx Broadcast Cnt Ov */ + XMT_OK_LO_OV = 1<<2, /* Bit 2: Octets Tx OK Low CntOv*/ + XMT_OK_HI_OV = 1<<1, /* Bit 1: Octets Tx OK Hi Cnt Ov*/ + XMT_OK_OV = 1<<0, /* Bit 0: Frames Tx Ok Ov */ +}; + +#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV) + +struct skge_rx_desc { + u32 control; + u32 next_offset; + u32 dma_lo; + u32 dma_hi; + u32 status; + u32 timestamp; + u16 csum2; + u16 csum1; + u16 csum2_start; + u16 csum1_start; +}; + +struct skge_tx_desc { + u32 control; + u32 next_offset; + u32 dma_lo; + u32 dma_hi; + u32 status; + u32 csum_offs; + u16 csum_write; + u16 csum_start; + u32 rsvd; +}; + +struct skge_element { + struct skge_element *next; + void *desc; + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct skge_ring { + struct skge_element *to_clean; + struct skge_element *to_use; + struct skge_element *start; + unsigned long count; +}; + + +struct skge_hw { + void __iomem *regs; + struct pci_dev *pdev; + spinlock_t hw_lock; + u32 intr_mask; + struct net_device *dev[2]; + + u8 chip_id; + u8 chip_rev; + u8 copper; + u8 ports; + u8 phy_type; + + u32 ram_size; + u32 ram_offset; + u16 phy_addr; + spinlock_t phy_lock; + struct tasklet_struct phy_task; + + char irq_name[0]; /* skge@pci:000:04:00.0 */ +}; + +enum pause_control { + FLOW_MODE_NONE = 1, /* No Flow-Control */ + FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */ + FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ + FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or + * just the remote station may send PAUSE + */ +}; + +enum pause_status { + FLOW_STAT_INDETERMINATED=0, /* indeterminated */ + FLOW_STAT_NONE, /* No Flow Control */ + FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */ + FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */ + FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */ +}; + + +struct skge_port { + struct skge_hw *hw; + struct net_device *netdev; + struct napi_struct napi; + int port; + u32 msg_enable; + + struct skge_ring tx_ring; + + struct skge_ring rx_ring ____cacheline_aligned_in_smp; + unsigned int rx_buf_size; + + struct timer_list link_timer; + enum pause_control flow_control; + enum pause_status flow_status; + u8 blink_on; + u8 wol; + u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ + u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ + u16 speed; /* SPEED_1000, SPEED_100, ... */ + u32 advertising; + + void *mem; /* PCI memory for rings */ + dma_addr_t dma; + unsigned long mem_size; +#ifdef CONFIG_SKGE_DEBUG + struct dentry *debugfs; +#endif +}; + + +/* Register accessor for memory mapped device */ +static inline u32 skge_read32(const struct skge_hw *hw, int reg) +{ + return readl(hw->regs + reg); +} + +static inline u16 skge_read16(const struct skge_hw *hw, int reg) +{ + return readw(hw->regs + reg); +} + +static inline u8 skge_read8(const struct skge_hw *hw, int reg) +{ + return readb(hw->regs + reg); +} + +static inline void skge_write32(const struct skge_hw *hw, int reg, u32 val) +{ + writel(val, hw->regs + reg); +} + +static inline void skge_write16(const struct skge_hw *hw, int reg, u16 val) +{ + writew(val, hw->regs + reg); +} + +static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val) +{ + writeb(val, hw->regs + reg); +} + +/* MAC Related Registers inside the device. */ +#define SK_REG(port,reg) (((port)<<7)+(u16)(reg)) +#define SK_XMAC_REG(port, reg) \ + ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1) + +static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg) +{ + u32 v; + v = skge_read16(hw, SK_XMAC_REG(port, reg)); + v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16; + return v; +} + +static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg) +{ + return skge_read16(hw, SK_XMAC_REG(port,reg)); +} + +static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v) +{ + skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff); + skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16); +} + +static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v) +{ + skge_write16(hw, SK_XMAC_REG(port,r), v); +} + +static inline void xm_outhash(const struct skge_hw *hw, int port, int reg, + const u8 *hash) +{ + xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8)); + xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8)); + xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8)); + xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8)); +} + +static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg, + const u8 *addr) +{ + xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8)); + xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8)); + xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8)); +} + +#define SK_GMAC_REG(port,reg) \ + (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg)) + +static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg) +{ + return skge_read16(hw, SK_GMAC_REG(port,reg)); +} + +static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg) +{ + return (u32) skge_read16(hw, SK_GMAC_REG(port,reg)) + | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16); +} + +static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v) +{ + skge_write16(hw, SK_GMAC_REG(port,r), v); +} + +static inline void gma_set_addr(struct skge_hw *hw, int port, int reg, + const u8 *addr) +{ + gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8)); + gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); + gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); +} + +#endif diff --git a/addons/skge/src/4.4.180/Makefile b/addons/skge/src/4.4.180/Makefile new file mode 100644 index 00000000..f0ca3939 --- /dev/null +++ b/addons/skge/src/4.4.180/Makefile @@ -0,0 +1 @@ +obj-m += skge.o diff --git a/addons/skge/src/4.4.180/skge.c b/addons/skge/src/4.4.180/skge.c new file mode 100644 index 00000000..c9f4b541 --- /dev/null +++ b/addons/skge/src/4.4.180/skge.c @@ -0,0 +1,4228 @@ +/* + * New driver for Marvell Yukon chipset and SysKonnect Gigabit + * Ethernet adapters. Based on earlier sk98lin, e100 and + * FreeBSD if_sk drivers. + * + * This driver intentionally does not support all the features + * of the original driver such as link fail-over and link management because + * those should be done at higher levels. + * + * Copyright (C) 2004, 2005 Stephen Hemminger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "skge.h" + +#define DRV_NAME "skge" +#define DRV_VERSION "1.14" + +#define DEFAULT_TX_RING_SIZE 128 +#define DEFAULT_RX_RING_SIZE 512 +#define MAX_TX_RING_SIZE 1024 +#define TX_LOW_WATER (MAX_SKB_FRAGS + 1) +#define MAX_RX_RING_SIZE 4096 +#define RX_COPY_THRESHOLD 128 +#define RX_BUF_SIZE 1536 +#define PHY_RETRIES 1000 +#define ETH_JUMBO_MTU 9000 +#define TX_WATCHDOG (5 * HZ) +#define NAPI_WEIGHT 64 +#define BLINK_MS 250 +#define LINK_HZ HZ + +#define SKGE_EEPROM_MAGIC 0x9933aabb + + +MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); +MODULE_AUTHOR("Stephen Hemminger "); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VERSION); + +static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_IFUP | + NETIF_MSG_IFDOWN); + +static int debug = -1; /* defaults above */ +module_param(debug, int, 0); +MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); + +static const struct pci_device_id skge_id_table[] = { + { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x1700) }, /* 3Com 3C940 */ + { PCI_DEVICE(PCI_VENDOR_ID_3COM, 0x80EB) }, /* 3Com 3C940B */ +#ifdef CONFIG_SKGE_GENESIS + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4300) }, /* SK-9xx */ +#endif + { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x4320) }, /* SK-98xx V2.0 */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* D-Link DGE-530T (rev.B) */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4c00) }, /* D-Link DGE-530T */ + { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302) }, /* D-Link DGE-530T Rev C1 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, /* Marvell Yukon 88E8001/8003/8010 */ + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ + { PCI_DEVICE(PCI_VENDOR_ID_CNET, 0x434E) }, /* CNet PowerG-2000 */ + { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, 0x1064) }, /* Linksys EG1064 v2 */ + { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0015 }, /* Linksys EG1032 v2 */ + { 0 } +}; +MODULE_DEVICE_TABLE(pci, skge_id_table); + +static int skge_up(struct net_device *dev); +static int skge_down(struct net_device *dev); +static void skge_phy_reset(struct skge_port *skge); +static void skge_tx_clean(struct net_device *dev); +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); +static void genesis_get_stats(struct skge_port *skge, u64 *data); +static void yukon_get_stats(struct skge_port *skge, u64 *data); +static void yukon_init(struct skge_hw *hw, int port); +static void genesis_mac_init(struct skge_hw *hw, int port); +static void genesis_link_up(struct skge_port *skge); +static void skge_set_multicast(struct net_device *dev); +static irqreturn_t skge_intr(int irq, void *dev_id); + +/* Avoid conditionals by using array */ +static const int txqaddr[] = { Q_XA1, Q_XA2 }; +static const int rxqaddr[] = { Q_R1, Q_R2 }; +static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; +static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; +static const u32 napimask[] = { IS_R1_F|IS_XA1_F, IS_R2_F|IS_XA2_F }; +static const u32 portmask[] = { IS_PORT_1, IS_PORT_2 }; + +static inline bool is_genesis(const struct skge_hw *hw) +{ +#ifdef CONFIG_SKGE_GENESIS + return hw->chip_id == CHIP_ID_GENESIS; +#else + return false; +#endif +} + +static int skge_get_regs_len(struct net_device *dev) +{ + return 0x4000; +} + +/* + * Returns copy of whole control register region + * Note: skip RAM address register because accessing it will + * cause bus hangs! + */ +static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, + void *p) +{ + const struct skge_port *skge = netdev_priv(dev); + const void __iomem *io = skge->hw->regs; + + regs->version = 1; + memset(p, 0, regs->len); + memcpy_fromio(p, io, B3_RAM_ADDR); + + if (regs->len > B3_RI_WTO_R1) { + memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, + regs->len - B3_RI_WTO_R1); + } +} + +/* Wake on Lan only supported on Yukon chips with rev 1 or above */ +static u32 wol_supported(const struct skge_hw *hw) +{ + if (is_genesis(hw)) + return 0; + + if (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) + return 0; + + return WAKE_MAGIC | WAKE_PHY; +} + +static void skge_wol_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl; + + skge_write16(hw, B0_CTST, CS_RST_CLR); + skge_write16(hw, SK_REG(port, GMAC_LINK_CTRL), GMLC_RST_CLR); + + /* Turn on Vaux */ + skge_write8(hw, B0_POWER_CTRL, + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_ON | PC_VCC_OFF); + + /* WA code for COMA mode -- clear PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + u32 reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9; + reg &= ~GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + skge_write32(hw, SK_REG(port, GPHY_CTRL), + GPC_DIS_SLEEP | + GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | + GPC_ANEG_1 | GPC_RST_SET); + + skge_write32(hw, SK_REG(port, GPHY_CTRL), + GPC_DIS_SLEEP | + GPC_HWCFG_M_3 | GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0 | + GPC_ANEG_1 | GPC_RST_CLR); + + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); + + /* Force to 10/100 skge_reset will re-enable on resume */ + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, + (PHY_AN_100FULL | PHY_AN_100HALF | + PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA)); + /* no 1000 HD/FD */ + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_CTRL, + PHY_CT_RESET | PHY_CT_SPS_LSB | PHY_CT_ANE | + PHY_CT_RE_CFG | PHY_CT_DUP_MD); + + + /* Set GMAC to no flow control and auto update for speed/duplex */ + gma_write16(hw, port, GM_GP_CTRL, + GM_GPCR_FC_TX_DIS|GM_GPCR_TX_ENA|GM_GPCR_RX_ENA| + GM_GPCR_DUP_FULL|GM_GPCR_FC_RX_DIS|GM_GPCR_AU_FCT_DIS); + + /* Set WOL address */ + memcpy_toio(hw->regs + WOL_REGS(port, WOL_MAC_ADDR), + skge->netdev->dev_addr, ETH_ALEN); + + /* Turn on appropriate WOL control bits */ + skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), WOL_CTL_CLEAR_RESULT); + ctrl = 0; + if (skge->wol & WAKE_PHY) + ctrl |= WOL_CTL_ENA_PME_ON_LINK_CHG|WOL_CTL_ENA_LINK_CHG_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_LINK_CHG|WOL_CTL_DIS_LINK_CHG_UNIT; + + if (skge->wol & WAKE_MAGIC) + ctrl |= WOL_CTL_ENA_PME_ON_MAGIC_PKT|WOL_CTL_ENA_MAGIC_PKT_UNIT; + else + ctrl |= WOL_CTL_DIS_PME_ON_MAGIC_PKT|WOL_CTL_DIS_MAGIC_PKT_UNIT; + + ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT; + skge_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl); + + /* block receiver */ + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); +} + +static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct skge_port *skge = netdev_priv(dev); + + wol->supported = wol_supported(skge->hw); + wol->wolopts = skge->wol; +} + +static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + + if ((wol->wolopts & ~wol_supported(hw)) || + !device_can_wakeup(&hw->pdev->dev)) + return -EOPNOTSUPP; + + skge->wol = wol->wolopts; + + device_set_wakeup_enable(&hw->pdev->dev, skge->wol); + + return 0; +} + +/* Determine supported/advertised modes based on hardware. + * Note: ethtool ADVERTISED_xxx == SUPPORTED_xxx + */ +static u32 skge_supported_modes(const struct skge_hw *hw) +{ + u32 supported; + + if (hw->copper) { + supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_TP); + + if (is_genesis(hw)) + supported &= ~(SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full); + + else if (hw->chip_id == CHIP_ID_YUKON) + supported &= ~SUPPORTED_1000baseT_Half; + } else + supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_FIBRE | + SUPPORTED_Autoneg); + + return supported; +} + +static int skge_get_settings(struct net_device *dev, + struct ethtool_cmd *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + + ecmd->transceiver = XCVR_INTERNAL; + ecmd->supported = skge_supported_modes(hw); + + if (hw->copper) { + ecmd->port = PORT_TP; + ecmd->phy_address = hw->phy_addr; + } else + ecmd->port = PORT_FIBRE; + + ecmd->advertising = skge->advertising; + ecmd->autoneg = skge->autoneg; + ethtool_cmd_speed_set(ecmd, skge->speed); + ecmd->duplex = skge->duplex; + return 0; +} + +static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + const struct skge_hw *hw = skge->hw; + u32 supported = skge_supported_modes(hw); + int err = 0; + + if (ecmd->autoneg == AUTONEG_ENABLE) { + ecmd->advertising = supported; + skge->duplex = -1; + skge->speed = -1; + } else { + u32 setting; + u32 speed = ethtool_cmd_speed(ecmd); + + switch (speed) { + case SPEED_1000: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_1000baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_1000baseT_Half; + else + return -EINVAL; + break; + case SPEED_100: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_100baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_100baseT_Half; + else + return -EINVAL; + break; + + case SPEED_10: + if (ecmd->duplex == DUPLEX_FULL) + setting = SUPPORTED_10baseT_Full; + else if (ecmd->duplex == DUPLEX_HALF) + setting = SUPPORTED_10baseT_Half; + else + return -EINVAL; + break; + default: + return -EINVAL; + } + + if ((setting & supported) == 0) + return -EINVAL; + + skge->speed = speed; + skge->duplex = ecmd->duplex; + } + + skge->autoneg = ecmd->autoneg; + skge->advertising = ecmd->advertising; + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) { + dev_close(dev); + return err; + } + } + + return 0; +} + +static void skge_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct skge_port *skge = netdev_priv(dev); + + strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); + strlcpy(info->version, DRV_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, pci_name(skge->hw->pdev), + sizeof(info->bus_info)); +} + +static const struct skge_stat { + char name[ETH_GSTRING_LEN]; + u16 xmac_offset; + u16 gma_offset; +} skge_stats[] = { + { "tx_bytes", XM_TXO_OK_HI, GM_TXO_OK_HI }, + { "rx_bytes", XM_RXO_OK_HI, GM_RXO_OK_HI }, + + { "tx_broadcast", XM_TXF_BC_OK, GM_TXF_BC_OK }, + { "rx_broadcast", XM_RXF_BC_OK, GM_RXF_BC_OK }, + { "tx_multicast", XM_TXF_MC_OK, GM_TXF_MC_OK }, + { "rx_multicast", XM_RXF_MC_OK, GM_RXF_MC_OK }, + { "tx_unicast", XM_TXF_UC_OK, GM_TXF_UC_OK }, + { "rx_unicast", XM_RXF_UC_OK, GM_RXF_UC_OK }, + { "tx_mac_pause", XM_TXF_MPAUSE, GM_TXF_MPAUSE }, + { "rx_mac_pause", XM_RXF_MPAUSE, GM_RXF_MPAUSE }, + + { "collisions", XM_TXF_SNG_COL, GM_TXF_SNG_COL }, + { "multi_collisions", XM_TXF_MUL_COL, GM_TXF_MUL_COL }, + { "aborted", XM_TXF_ABO_COL, GM_TXF_ABO_COL }, + { "late_collision", XM_TXF_LAT_COL, GM_TXF_LAT_COL }, + { "fifo_underrun", XM_TXE_FIFO_UR, GM_TXE_FIFO_UR }, + { "fifo_overflow", XM_RXE_FIFO_OV, GM_RXE_FIFO_OV }, + + { "rx_toolong", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, + { "rx_jabber", XM_RXF_JAB_PKT, GM_RXF_JAB_PKT }, + { "rx_runt", XM_RXE_RUNT, GM_RXE_FRAG }, + { "rx_too_long", XM_RXF_LNG_ERR, GM_RXF_LNG_ERR }, + { "rx_fcs_error", XM_RXF_FCS_ERR, GM_RXF_FCS_ERR }, +}; + +static int skge_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(skge_stats); + default: + return -EOPNOTSUPP; + } +} + +static void skge_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct skge_port *skge = netdev_priv(dev); + + if (is_genesis(skge->hw)) + genesis_get_stats(skge, data); + else + yukon_get_stats(skge, data); +} + +/* Use hardware MIB variables for critical path statistics and + * transmit feedback not reported at interrupt. + * Other errors are accounted for in interrupt handler. + */ +static struct net_device_stats *skge_get_stats(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + u64 data[ARRAY_SIZE(skge_stats)]; + + if (is_genesis(skge->hw)) + genesis_get_stats(skge, data); + else + yukon_get_stats(skge, data); + + dev->stats.tx_bytes = data[0]; + dev->stats.rx_bytes = data[1]; + dev->stats.tx_packets = data[2] + data[4] + data[6]; + dev->stats.rx_packets = data[3] + data[5] + data[7]; + dev->stats.multicast = data[3] + data[5]; + dev->stats.collisions = data[10]; + dev->stats.tx_aborted_errors = data[12]; + + return &dev->stats; +} + +static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < ARRAY_SIZE(skge_stats); i++) + memcpy(data + i * ETH_GSTRING_LEN, + skge_stats[i].name, ETH_GSTRING_LEN); + break; + } +} + +static void skge_get_ring_param(struct net_device *dev, + struct ethtool_ringparam *p) +{ + struct skge_port *skge = netdev_priv(dev); + + p->rx_max_pending = MAX_RX_RING_SIZE; + p->tx_max_pending = MAX_TX_RING_SIZE; + + p->rx_pending = skge->rx_ring.count; + p->tx_pending = skge->tx_ring.count; +} + +static int skge_set_ring_param(struct net_device *dev, + struct ethtool_ringparam *p) +{ + struct skge_port *skge = netdev_priv(dev); + int err = 0; + + if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || + p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE) + return -EINVAL; + + skge->rx_ring.count = p->rx_pending; + skge->tx_ring.count = p->tx_pending; + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) + dev_close(dev); + } + + return err; +} + +static u32 skge_get_msglevel(struct net_device *netdev) +{ + struct skge_port *skge = netdev_priv(netdev); + return skge->msg_enable; +} + +static void skge_set_msglevel(struct net_device *netdev, u32 value) +{ + struct skge_port *skge = netdev_priv(netdev); + skge->msg_enable = value; +} + +static int skge_nway_reset(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + if (skge->autoneg != AUTONEG_ENABLE || !netif_running(dev)) + return -EINVAL; + + skge_phy_reset(skge); + return 0; +} + +static void skge_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + + ecmd->rx_pause = ((skge->flow_control == FLOW_MODE_SYMMETRIC) || + (skge->flow_control == FLOW_MODE_SYM_OR_REM)); + ecmd->tx_pause = (ecmd->rx_pause || + (skge->flow_control == FLOW_MODE_LOC_SEND)); + + ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause; +} + +static int skge_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct ethtool_pauseparam old; + int err = 0; + + skge_get_pauseparam(dev, &old); + + if (ecmd->autoneg != old.autoneg) + skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC; + else { + if (ecmd->rx_pause && ecmd->tx_pause) + skge->flow_control = FLOW_MODE_SYMMETRIC; + else if (ecmd->rx_pause && !ecmd->tx_pause) + skge->flow_control = FLOW_MODE_SYM_OR_REM; + else if (!ecmd->rx_pause && ecmd->tx_pause) + skge->flow_control = FLOW_MODE_LOC_SEND; + else + skge->flow_control = FLOW_MODE_NONE; + } + + if (netif_running(dev)) { + skge_down(dev); + err = skge_up(dev); + if (err) { + dev_close(dev); + return err; + } + } + + return 0; +} + +/* Chip internal frequency for clock calculations */ +static inline u32 hwkhz(const struct skge_hw *hw) +{ + return is_genesis(hw) ? 53125 : 78125; +} + +/* Chip HZ to microseconds */ +static inline u32 skge_clk2usec(const struct skge_hw *hw, u32 ticks) +{ + return (ticks * 1000) / hwkhz(hw); +} + +/* Microseconds to chip HZ */ +static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) +{ + return hwkhz(hw) * usec / 1000; +} + +static int skge_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + + ecmd->rx_coalesce_usecs = 0; + ecmd->tx_coalesce_usecs = 0; + + if (skge_read32(hw, B2_IRQM_CTRL) & TIM_START) { + u32 delay = skge_clk2usec(hw, skge_read32(hw, B2_IRQM_INI)); + u32 msk = skge_read32(hw, B2_IRQM_MSK); + + if (msk & rxirqmask[port]) + ecmd->rx_coalesce_usecs = delay; + if (msk & txirqmask[port]) + ecmd->tx_coalesce_usecs = delay; + } + + return 0; +} + +/* Note: interrupt timer is per board, but can turn on/off per port */ +static int skge_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ecmd) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u32 msk = skge_read32(hw, B2_IRQM_MSK); + u32 delay = 25; + + if (ecmd->rx_coalesce_usecs == 0) + msk &= ~rxirqmask[port]; + else if (ecmd->rx_coalesce_usecs < 25 || + ecmd->rx_coalesce_usecs > 33333) + return -EINVAL; + else { + msk |= rxirqmask[port]; + delay = ecmd->rx_coalesce_usecs; + } + + if (ecmd->tx_coalesce_usecs == 0) + msk &= ~txirqmask[port]; + else if (ecmd->tx_coalesce_usecs < 25 || + ecmd->tx_coalesce_usecs > 33333) + return -EINVAL; + else { + msk |= txirqmask[port]; + delay = min(delay, ecmd->rx_coalesce_usecs); + } + + skge_write32(hw, B2_IRQM_MSK, msk); + if (msk == 0) + skge_write32(hw, B2_IRQM_CTRL, TIM_STOP); + else { + skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, delay)); + skge_write32(hw, B2_IRQM_CTRL, TIM_START); + } + return 0; +} + +enum led_mode { LED_MODE_OFF, LED_MODE_ON, LED_MODE_TST }; +static void skge_led(struct skge_port *skge, enum led_mode mode) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) { + switch (mode) { + case LED_MODE_OFF: + if (hw->phy_type == SK_PHY_BCOM) + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF); + else { + skge_write32(hw, SK_REG(port, TX_LED_VAL), 0); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_T_OFF); + } + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF); + skge_write32(hw, SK_REG(port, RX_LED_VAL), 0); + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF); + break; + + case LED_MODE_ON: + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON); + skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); + + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); + + break; + + case LED_MODE_TST: + skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON); + skge_write32(hw, SK_REG(port, RX_LED_VAL), 100); + skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START); + + if (hw->phy_type == SK_PHY_BCOM) + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON); + else { + skge_write8(hw, SK_REG(port, TX_LED_TST), LED_T_ON); + skge_write32(hw, SK_REG(port, TX_LED_VAL), 100); + skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START); + } + + } + } else { + switch (mode) { + case LED_MODE_OFF: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_DUP(MO_LED_OFF) | + PHY_M_LED_MO_10(MO_LED_OFF) | + PHY_M_LED_MO_100(MO_LED_OFF) | + PHY_M_LED_MO_1000(MO_LED_OFF) | + PHY_M_LED_MO_RX(MO_LED_OFF)); + break; + case LED_MODE_ON: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, + PHY_M_LED_PULS_DUR(PULS_170MS) | + PHY_M_LED_BLINK_RT(BLINK_84MS) | + PHY_M_LEDC_TX_CTRL | + PHY_M_LEDC_DP_CTRL); + + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_RX(MO_LED_OFF) | + (skge->speed == SPEED_100 ? + PHY_M_LED_MO_100(MO_LED_ON) : 0)); + break; + case LED_MODE_TST: + gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); + gm_phy_write(hw, port, PHY_MARV_LED_OVER, + PHY_M_LED_MO_DUP(MO_LED_ON) | + PHY_M_LED_MO_10(MO_LED_ON) | + PHY_M_LED_MO_100(MO_LED_ON) | + PHY_M_LED_MO_1000(MO_LED_ON) | + PHY_M_LED_MO_RX(MO_LED_ON)); + } + } + spin_unlock_bh(&hw->phy_lock); +} + +/* blink LED's for finding board */ +static int skge_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct skge_port *skge = netdev_priv(dev); + + switch (state) { + case ETHTOOL_ID_ACTIVE: + return 2; /* cycle on/off twice per second */ + + case ETHTOOL_ID_ON: + skge_led(skge, LED_MODE_TST); + break; + + case ETHTOOL_ID_OFF: + skge_led(skge, LED_MODE_OFF); + break; + + case ETHTOOL_ID_INACTIVE: + /* back to regular LED state */ + skge_led(skge, netif_running(dev) ? LED_MODE_ON : LED_MODE_OFF); + } + + return 0; +} + +static int skge_get_eeprom_len(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + u32 reg2; + + pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, ®2); + return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8); +} + +static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset) +{ + u32 val; + + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, offset); + + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (!(offset & PCI_VPD_ADDR_F)); + + pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &val); + return val; +} + +static void skge_vpd_write(struct pci_dev *pdev, int cap, u16 offset, u32 val) +{ + pci_write_config_dword(pdev, cap + PCI_VPD_DATA, val); + pci_write_config_word(pdev, cap + PCI_VPD_ADDR, + offset | PCI_VPD_ADDR_F); + + do { + pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &offset); + } while (offset & PCI_VPD_ADDR_F); +} + +static int skge_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct skge_port *skge = netdev_priv(dev); + struct pci_dev *pdev = skge->hw->pdev; + int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); + int length = eeprom->len; + u16 offset = eeprom->offset; + + if (!cap) + return -EINVAL; + + eeprom->magic = SKGE_EEPROM_MAGIC; + + while (length > 0) { + u32 val = skge_vpd_read(pdev, cap, offset); + int n = min_t(int, length, sizeof(val)); + + memcpy(data, &val, n); + length -= n; + data += n; + offset += n; + } + return 0; +} + +static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, + u8 *data) +{ + struct skge_port *skge = netdev_priv(dev); + struct pci_dev *pdev = skge->hw->pdev; + int cap = pci_find_capability(pdev, PCI_CAP_ID_VPD); + int length = eeprom->len; + u16 offset = eeprom->offset; + + if (!cap) + return -EINVAL; + + if (eeprom->magic != SKGE_EEPROM_MAGIC) + return -EINVAL; + + while (length > 0) { + u32 val; + int n = min_t(int, length, sizeof(val)); + + if (n < sizeof(val)) + val = skge_vpd_read(pdev, cap, offset); + memcpy(&val, data, n); + + skge_vpd_write(pdev, cap, offset, val); + + length -= n; + data += n; + offset += n; + } + return 0; +} + +static const struct ethtool_ops skge_ethtool_ops = { + .get_settings = skge_get_settings, + .set_settings = skge_set_settings, + .get_drvinfo = skge_get_drvinfo, + .get_regs_len = skge_get_regs_len, + .get_regs = skge_get_regs, + .get_wol = skge_get_wol, + .set_wol = skge_set_wol, + .get_msglevel = skge_get_msglevel, + .set_msglevel = skge_set_msglevel, + .nway_reset = skge_nway_reset, + .get_link = ethtool_op_get_link, + .get_eeprom_len = skge_get_eeprom_len, + .get_eeprom = skge_get_eeprom, + .set_eeprom = skge_set_eeprom, + .get_ringparam = skge_get_ring_param, + .set_ringparam = skge_set_ring_param, + .get_pauseparam = skge_get_pauseparam, + .set_pauseparam = skge_set_pauseparam, + .get_coalesce = skge_get_coalesce, + .set_coalesce = skge_set_coalesce, + .get_strings = skge_get_strings, + .set_phys_id = skge_set_phys_id, + .get_sset_count = skge_get_sset_count, + .get_ethtool_stats = skge_get_ethtool_stats, +}; + +/* + * Allocate ring elements and chain them together + * One-to-one association of board descriptors with ring elements + */ +static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) +{ + struct skge_tx_desc *d; + struct skge_element *e; + int i; + + ring->start = kcalloc(ring->count, sizeof(*e), GFP_KERNEL); + if (!ring->start) + return -ENOMEM; + + for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { + e->desc = d; + if (i == ring->count - 1) { + e->next = ring->start; + d->next_offset = base; + } else { + e->next = e + 1; + d->next_offset = base + (i+1) * sizeof(*d); + } + } + ring->to_use = ring->to_clean = ring->start; + + return 0; +} + +/* Allocate and setup a new buffer for receiving */ +static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, + struct sk_buff *skb, unsigned int bufsize) +{ + struct skge_rx_desc *rd = e->desc; + dma_addr_t map; + + map = pci_map_single(skge->hw->pdev, skb->data, bufsize, + PCI_DMA_FROMDEVICE); + + if (pci_dma_mapping_error(skge->hw->pdev, map)) + return -1; + + rd->dma_lo = lower_32_bits(map); + rd->dma_hi = upper_32_bits(map); + e->skb = skb; + rd->csum1_start = ETH_HLEN; + rd->csum2_start = ETH_HLEN; + rd->csum1 = 0; + rd->csum2 = 0; + + wmb(); + + rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, bufsize); + return 0; +} + +/* Resume receiving using existing skb, + * Note: DMA address is not changed by chip. + * MTU not changed while receiver active. + */ +static inline void skge_rx_reuse(struct skge_element *e, unsigned int size) +{ + struct skge_rx_desc *rd = e->desc; + + rd->csum2 = 0; + rd->csum2_start = ETH_HLEN; + + wmb(); + + rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size; +} + + +/* Free all buffers in receive ring, assumes receiver stopped */ +static void skge_rx_clean(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + + e = ring->start; + do { + struct skge_rx_desc *rd = e->desc; + rd->control = 0; + if (e->skb) { + pci_unmap_single(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); + dev_kfree_skb(e->skb); + e->skb = NULL; + } + } while ((e = e->next) != ring->start); +} + + +/* Allocate buffers for receive ring + * For receive: to_clean is next received frame. + */ +static int skge_rx_fill(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + + e = ring->start; + do { + struct sk_buff *skb; + + skb = __netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN, + GFP_KERNEL); + if (!skb) + return -ENOMEM; + + skb_reserve(skb, NET_IP_ALIGN); + if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { + dev_kfree_skb(skb); + return -EIO; + } + } while ((e = e->next) != ring->start); + + ring->to_clean = ring->start; + return 0; +} + +static const char *skge_pause(enum pause_status status) +{ + switch (status) { + case FLOW_STAT_NONE: + return "none"; + case FLOW_STAT_REM_SEND: + return "rx only"; + case FLOW_STAT_LOC_SEND: + return "tx_only"; + case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */ + return "both"; + default: + return "indeterminated"; + } +} + + +static void skge_link_up(struct skge_port *skge) +{ + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), + LED_BLK_OFF|LED_SYNC_OFF|LED_ON); + + netif_carrier_on(skge->netdev); + netif_wake_queue(skge->netdev); + + netif_info(skge, link, skge->netdev, + "Link is up at %d Mbps, %s duplex, flow control %s\n", + skge->speed, + skge->duplex == DUPLEX_FULL ? "full" : "half", + skge_pause(skge->flow_status)); +} + +static void skge_link_down(struct skge_port *skge) +{ + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); + netif_carrier_off(skge->netdev); + netif_stop_queue(skge->netdev); + + netif_info(skge, link, skge->netdev, "Link is down\n"); +} + +static void xm_link_down(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + + xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); + + if (netif_carrier_ok(dev)) + skge_link_down(skge); +} + +static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) +{ + int i; + + xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); + *val = xm_read16(hw, port, XM_PHY_DATA); + + if (hw->phy_type == SK_PHY_XMAC) + goto ready; + + for (i = 0; i < PHY_RETRIES; i++) { + if (xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_RDY) + goto ready; + udelay(1); + } + + return -ETIMEDOUT; + ready: + *val = xm_read16(hw, port, XM_PHY_DATA); + + return 0; +} + +static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg) +{ + u16 v = 0; + if (__xm_phy_read(hw, port, reg, &v)) + pr_warn("%s: phy read timed out\n", hw->dev[port]->name); + return v; +} + +static int xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +{ + int i; + + xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); + for (i = 0; i < PHY_RETRIES; i++) { + if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) + goto ready; + udelay(1); + } + return -EIO; + + ready: + xm_write16(hw, port, XM_PHY_DATA, val); + for (i = 0; i < PHY_RETRIES; i++) { + if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) + return 0; + udelay(1); + } + return -ETIMEDOUT; +} + +static void genesis_init(struct skge_hw *hw) +{ + /* set blink source counter */ + skge_write32(hw, B2_BSC_INI, (SK_BLK_DUR * SK_FACT_53) / 100); + skge_write8(hw, B2_BSC_CTRL, BSC_START); + + /* configure mac arbiter */ + skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); + + /* configure mac arbiter timeout values */ + skge_write8(hw, B3_MA_TOINI_RX1, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_RX2, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_TX1, SK_MAC_TO_53); + skge_write8(hw, B3_MA_TOINI_TX2, SK_MAC_TO_53); + + skge_write8(hw, B3_MA_RCINI_RX1, 0); + skge_write8(hw, B3_MA_RCINI_RX2, 0); + skge_write8(hw, B3_MA_RCINI_TX1, 0); + skge_write8(hw, B3_MA_RCINI_TX2, 0); + + /* configure packet arbiter timeout */ + skge_write16(hw, B3_PA_CTRL, PA_RST_CLR); + skge_write16(hw, B3_PA_TOINI_RX1, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_TX1, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_RX2, SK_PKT_TO_MAX); + skge_write16(hw, B3_PA_TOINI_TX2, SK_PKT_TO_MAX); +} + +static void genesis_reset(struct skge_hw *hw, int port) +{ + static const u8 zero[8] = { 0 }; + u32 reg; + + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); + + /* reset the statistics module */ + xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); + xm_write16(hw, port, XM_IMSK, XM_IMSK_DISABLE); + xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ + xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ + xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ + + /* disable Broadcom PHY IRQ */ + if (hw->phy_type == SK_PHY_BCOM) + xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff); + + xm_outhash(hw, port, XM_HSM, zero); + + /* Flush TX and RX fifo */ + reg = xm_read32(hw, port, XM_MODE); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FTF); + xm_write32(hw, port, XM_MODE, reg | XM_MD_FRF); +} + +/* Convert mode to MII values */ +static const u16 phy_pause_map[] = { + [FLOW_MODE_NONE] = 0, + [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, + [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, + [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, +}; + +/* special defines for FIBER (88E1011S only) */ +static const u16 fiber_pause_map[] = { + [FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE, + [FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD, + [FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD, + [FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD, +}; + + +/* Check status of Broadcom phy link */ +static void bcom_check_link(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u16 status; + + /* read twice because of latch */ + xm_phy_read(hw, port, PHY_BCOM_STAT); + status = xm_phy_read(hw, port, PHY_BCOM_STAT); + + if ((status & PHY_ST_LSYNC) == 0) { + xm_link_down(hw, port); + return; + } + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 lpa, aux; + + if (!(status & PHY_ST_AN_OVER)) + return; + + lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); + if (lpa & PHY_B_AN_RF) { + netdev_notice(dev, "remote fault\n"); + return; + } + + aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT); + + /* Check Duplex mismatch */ + switch (aux & PHY_B_AS_AN_RES_MSK) { + case PHY_B_RES_1000FD: + skge->duplex = DUPLEX_FULL; + break; + case PHY_B_RES_1000HD: + skge->duplex = DUPLEX_HALF; + break; + default: + netdev_notice(dev, "duplex mismatch\n"); + return; + } + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + switch (aux & PHY_B_AS_PAUSE_MSK) { + case PHY_B_AS_PAUSE_MSK: + skge->flow_status = FLOW_STAT_SYMMETRIC; + break; + case PHY_B_AS_PRR: + skge->flow_status = FLOW_STAT_REM_SEND; + break; + case PHY_B_AS_PRT: + skge->flow_status = FLOW_STAT_LOC_SEND; + break; + default: + skge->flow_status = FLOW_STAT_NONE; + } + skge->speed = SPEED_1000; + } + + if (!netif_carrier_ok(dev)) + genesis_link_up(skge); +} + +/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional + * Phy on for 100 or 10Mbit operation + */ +static void bcom_phy_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + u16 id1, r, ext, ctl; + + /* magic workaround patterns for Broadcom */ + static const struct { + u16 reg; + u16 val; + } A1hack[] = { + { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1104 }, + { 0x17, 0x0013 }, { 0x15, 0x0404 }, { 0x17, 0x8006 }, + { 0x15, 0x0132 }, { 0x17, 0x8006 }, { 0x15, 0x0232 }, + { 0x17, 0x800D }, { 0x15, 0x000F }, { 0x18, 0x0420 }, + }, C0hack[] = { + { 0x18, 0x0c20 }, { 0x17, 0x0012 }, { 0x15, 0x1204 }, + { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, + }; + + /* read Id from external PHY (all have the same address) */ + id1 = xm_phy_read(hw, port, PHY_XMAC_ID1); + + /* Optimize MDIO transfer by suppressing preamble. */ + r = xm_read16(hw, port, XM_MMU_CMD); + r |= XM_MMU_NO_PRE; + xm_write16(hw, port, XM_MMU_CMD, r); + + switch (id1) { + case PHY_BCOM_ID1_C0: + /* + * Workaround BCOM Errata for the C0 type. + * Write magic patterns to reserved registers. + */ + for (i = 0; i < ARRAY_SIZE(C0hack); i++) + xm_phy_write(hw, port, + C0hack[i].reg, C0hack[i].val); + + break; + case PHY_BCOM_ID1_A1: + /* + * Workaround BCOM Errata for the A1 type. + * Write magic patterns to reserved registers. + */ + for (i = 0; i < ARRAY_SIZE(A1hack); i++) + xm_phy_write(hw, port, + A1hack[i].reg, A1hack[i].val); + break; + } + + /* + * Workaround BCOM Errata (#10523) for all BCom PHYs. + * Disable Power Management after reset. + */ + r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL); + r |= PHY_B_AC_DIS_PM; + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r); + + /* Dummy read */ + xm_read16(hw, port, XM_ISRC); + + ext = PHY_B_PEC_EN_LTR; /* enable tx led */ + ctl = PHY_CT_SP1000; /* always 1000mbit */ + + if (skge->autoneg == AUTONEG_ENABLE) { + /* + * Workaround BCOM Errata #1 for the C5 type. + * 1000Base-T Link Acquisition Failure in Slave Mode + * Set Repeater/DTE bit 10 of the 1000Base-T Control Register + */ + u16 adv = PHY_B_1000C_RD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + adv |= PHY_B_1000C_AHD; + if (skge->advertising & ADVERTISED_1000baseT_Full) + adv |= PHY_B_1000C_AFD; + xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv); + + ctl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + if (skge->duplex == DUPLEX_FULL) + ctl |= PHY_CT_DUP_MD; + /* Force to slave */ + xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE); + } + + /* Set autonegotiation pause parameters */ + xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, + phy_pause_map[skge->flow_control] | PHY_AN_CSMA); + + /* Handle Jumbo frames */ + if (hw->dev[port]->mtu > ETH_DATA_LEN) { + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, + PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK); + + ext |= PHY_B_PEC_HIGH_LA; + + } + + xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext); + xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl); + + /* Use link status change interrupt */ + xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); +} + +static void xm_phy_init(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl = 0; + + if (skge->autoneg == AUTONEG_ENABLE) { + if (skge->advertising & ADVERTISED_1000baseT_Half) + ctrl |= PHY_X_AN_HD; + if (skge->advertising & ADVERTISED_1000baseT_Full) + ctrl |= PHY_X_AN_FD; + + ctrl |= fiber_pause_map[skge->flow_control]; + + xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); + + /* Restart Auto-negotiation */ + ctrl = PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + /* Set DuplexMode in Config register */ + if (skge->duplex == DUPLEX_FULL) + ctrl |= PHY_CT_DUP_MD; + /* + * Do NOT enable Auto-negotiation here. This would hold + * the link down because no IDLEs are transmitted + */ + } + + xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl); + + /* Poll PHY for status changes */ + mod_timer(&skge->link_timer, jiffies + LINK_HZ); +} + +static int xm_check_link(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 status; + + /* read twice because of latch */ + xm_phy_read(hw, port, PHY_XMAC_STAT); + status = xm_phy_read(hw, port, PHY_XMAC_STAT); + + if ((status & PHY_ST_LSYNC) == 0) { + xm_link_down(hw, port); + return 0; + } + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 lpa, res; + + if (!(status & PHY_ST_AN_OVER)) + return 0; + + lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP); + if (lpa & PHY_B_AN_RF) { + netdev_notice(dev, "remote fault\n"); + return 0; + } + + res = xm_phy_read(hw, port, PHY_XMAC_RES_ABI); + + /* Check Duplex mismatch */ + switch (res & (PHY_X_RS_HD | PHY_X_RS_FD)) { + case PHY_X_RS_FD: + skge->duplex = DUPLEX_FULL; + break; + case PHY_X_RS_HD: + skge->duplex = DUPLEX_HALF; + break; + default: + netdev_notice(dev, "duplex mismatch\n"); + return 0; + } + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + if ((skge->flow_control == FLOW_MODE_SYMMETRIC || + skge->flow_control == FLOW_MODE_SYM_OR_REM) && + (lpa & PHY_X_P_SYM_MD)) + skge->flow_status = FLOW_STAT_SYMMETRIC; + else if (skge->flow_control == FLOW_MODE_SYM_OR_REM && + (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) + /* Enable PAUSE receive, disable PAUSE transmit */ + skge->flow_status = FLOW_STAT_REM_SEND; + else if (skge->flow_control == FLOW_MODE_LOC_SEND && + (lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) + /* Disable PAUSE receive, enable PAUSE transmit */ + skge->flow_status = FLOW_STAT_LOC_SEND; + else + skge->flow_status = FLOW_STAT_NONE; + + skge->speed = SPEED_1000; + } + + if (!netif_carrier_ok(dev)) + genesis_link_up(skge); + return 1; +} + +/* Poll to check for link coming up. + * + * Since internal PHY is wired to a level triggered pin, can't + * get an interrupt when carrier is detected, need to poll for + * link coming up. + */ +static void xm_link_timer(unsigned long arg) +{ + struct skge_port *skge = (struct skge_port *) arg; + struct net_device *dev = skge->netdev; + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + unsigned long flags; + + if (!netif_running(dev)) + return; + + spin_lock_irqsave(&hw->phy_lock, flags); + + /* + * Verify that the link by checking GPIO register three times. + * This pin has the signal from the link_sync pin connected to it. + */ + for (i = 0; i < 3; i++) { + if (xm_read16(hw, port, XM_GP_PORT) & XM_GP_INP_ASS) + goto link_down; + } + + /* Re-enable interrupt to detect link down */ + if (xm_check_link(dev)) { + u16 msk = xm_read16(hw, port, XM_IMSK); + msk &= ~XM_IS_INP_ASS; + xm_write16(hw, port, XM_IMSK, msk); + xm_read16(hw, port, XM_ISRC); + } else { +link_down: + mod_timer(&skge->link_timer, + round_jiffies(jiffies + LINK_HZ)); + } + spin_unlock_irqrestore(&hw->phy_lock, flags); +} + +static void genesis_mac_init(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; + int i; + u32 r; + static const u8 zero[6] = { 0 }; + + for (i = 0; i < 10; i++) { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), + MFF_SET_MAC_RST); + if (skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST) + goto reset_ok; + udelay(1); + } + + netdev_warn(dev, "genesis reset failed\n"); + + reset_ok: + /* Unreset the XMAC. */ + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + + /* + * Perform additional initialization for external PHYs, + * namely for the 1000baseTX cards that use the XMAC's + * GMII mode. + */ + if (hw->phy_type != SK_PHY_XMAC) { + /* Take external Phy out of reset */ + r = skge_read32(hw, B2_GP_IO); + if (port == 0) + r |= GP_DIR_0|GP_IO_0; + else + r |= GP_DIR_2|GP_IO_2; + + skge_write32(hw, B2_GP_IO, r); + + /* Enable GMII interface */ + xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD); + } + + + switch (hw->phy_type) { + case SK_PHY_XMAC: + xm_phy_init(skge); + break; + case SK_PHY_BCOM: + bcom_phy_init(skge); + bcom_check_link(hw, port); + } + + /* Set Station Address */ + xm_outaddr(hw, port, XM_SA, dev->dev_addr); + + /* We don't use match addresses so clear */ + for (i = 1; i < 16; i++) + xm_outaddr(hw, port, XM_EXM(i), zero); + + /* Clear MIB counters */ + xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + /* Clear two times according to Errata #3 */ + xm_write16(hw, port, XM_STAT_CMD, + XM_SC_CLR_RXC | XM_SC_CLR_TXC); + + /* configure Rx High Water Mark (XM_RX_HI_WM) */ + xm_write16(hw, port, XM_RX_HI_WM, 1450); + + /* We don't need the FCS appended to the packet. */ + r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS; + if (jumbo) + r |= XM_RX_BIG_PK_OK; + + if (skge->duplex == DUPLEX_HALF) { + /* + * If in manual half duplex mode the other side might be in + * full duplex mode, so ignore if a carrier extension is not seen + * on frames received + */ + r |= XM_RX_DIS_CEXT; + } + xm_write16(hw, port, XM_RX_CMD, r); + + /* We want short frames padded to 60 bytes. */ + xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD); + + /* Increase threshold for jumbo frames on dual port */ + if (hw->ports > 1 && jumbo) + xm_write16(hw, port, XM_TX_THR, 1020); + else + xm_write16(hw, port, XM_TX_THR, 512); + + /* + * Enable the reception of all error frames. This is is + * a necessary evil due to the design of the XMAC. The + * XMAC's receive FIFO is only 8K in size, however jumbo + * frames can be up to 9000 bytes in length. When bad + * frame filtering is enabled, the XMAC's RX FIFO operates + * in 'store and forward' mode. For this to work, the + * entire frame has to fit into the FIFO, but that means + * that jumbo frames larger than 8192 bytes will be + * truncated. Disabling all bad frame filtering causes + * the RX FIFO to operate in streaming mode, in which + * case the XMAC will start transferring frames out of the + * RX FIFO as soon as the FIFO threshold is reached. + */ + xm_write32(hw, port, XM_MODE, XM_DEF_MODE); + + + /* + * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK) + * - Enable all bits excepting 'Octets Rx OK Low CntOv' + * and 'Octets Rx OK Hi Cnt Ov'. + */ + xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK); + + /* + * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK) + * - Enable all bits excepting 'Octets Tx OK Low CntOv' + * and 'Octets Tx OK Hi Cnt Ov'. + */ + xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK); + + /* Configure MAC arbiter */ + skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); + + /* configure timeout values */ + skge_write8(hw, B3_MA_TOINI_RX1, 72); + skge_write8(hw, B3_MA_TOINI_RX2, 72); + skge_write8(hw, B3_MA_TOINI_TX1, 72); + skge_write8(hw, B3_MA_TOINI_TX2, 72); + + skge_write8(hw, B3_MA_RCINI_RX1, 0); + skge_write8(hw, B3_MA_RCINI_RX2, 0); + skge_write8(hw, B3_MA_RCINI_TX1, 0); + skge_write8(hw, B3_MA_RCINI_TX2, 0); + + /* Configure Rx MAC FIFO */ + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); + + /* Configure Tx MAC FIFO */ + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); + + if (jumbo) { + /* Enable frame flushing if jumbo frames used */ + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH); + } else { + /* enable timeout timers if normal frames */ + skge_write16(hw, B3_PA_CTRL, + (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); + } +} + +static void genesis_stop(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + unsigned retries = 1000; + u16 cmd; + + /* Disable Tx and Rx */ + cmd = xm_read16(hw, port, XM_MMU_CMD); + cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX); + xm_write16(hw, port, XM_MMU_CMD, cmd); + + genesis_reset(hw, port); + + /* Clear Tx packet arbiter timeout IRQ */ + skge_write16(hw, B3_PA_CTRL, + port == 0 ? PA_CLR_TO_TX1 : PA_CLR_TO_TX2); + + /* Reset the MAC */ + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); + do { + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); + if (!(skge_read16(hw, SK_REG(port, TX_MFF_CTRL1)) & MFF_SET_MAC_RST)) + break; + } while (--retries > 0); + + /* For external PHYs there must be special handling */ + if (hw->phy_type != SK_PHY_XMAC) { + u32 reg = skge_read32(hw, B2_GP_IO); + if (port == 0) { + reg |= GP_DIR_0; + reg &= ~GP_IO_0; + } else { + reg |= GP_DIR_2; + reg &= ~GP_IO_2; + } + skge_write32(hw, B2_GP_IO, reg); + skge_read32(hw, B2_GP_IO); + } + + xm_write16(hw, port, XM_MMU_CMD, + xm_read16(hw, port, XM_MMU_CMD) + & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); + + xm_read16(hw, port, XM_MMU_CMD); +} + + +static void genesis_get_stats(struct skge_port *skge, u64 *data) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + unsigned long timeout = jiffies + HZ; + + xm_write16(hw, port, + XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); + + /* wait for update to complete */ + while (xm_read16(hw, port, XM_STAT_CMD) + & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { + if (time_after(jiffies, timeout)) + break; + udelay(10); + } + + /* special case for 64 bit octet counter */ + data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32 + | xm_read32(hw, port, XM_TXO_OK_LO); + data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32 + | xm_read32(hw, port, XM_RXO_OK_LO); + + for (i = 2; i < ARRAY_SIZE(skge_stats); i++) + data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset); +} + +static void genesis_mac_intr(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u16 status = xm_read16(hw, port, XM_ISRC); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "mac interrupt status 0x%x\n", status); + + if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) { + xm_link_down(hw, port); + mod_timer(&skge->link_timer, jiffies + 1); + } + + if (status & XM_IS_TXF_UR) { + xm_write32(hw, port, XM_MODE, XM_MD_FTF); + ++dev->stats.tx_fifo_errors; + } +} + +static void genesis_link_up(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 cmd, msk; + u32 mode; + + cmd = xm_read16(hw, port, XM_MMU_CMD); + + /* + * enabling pause frame reception is required for 1000BT + * because the XMAC is not reset if the link is going down + */ + if (skge->flow_status == FLOW_STAT_NONE || + skge->flow_status == FLOW_STAT_LOC_SEND) + /* Disable Pause Frame Reception */ + cmd |= XM_MMU_IGN_PF; + else + /* Enable Pause Frame Reception */ + cmd &= ~XM_MMU_IGN_PF; + + xm_write16(hw, port, XM_MMU_CMD, cmd); + + mode = xm_read32(hw, port, XM_MODE); + if (skge->flow_status == FLOW_STAT_SYMMETRIC || + skge->flow_status == FLOW_STAT_LOC_SEND) { + /* + * Configure Pause Frame Generation + * Use internal and external Pause Frame Generation. + * Sending pause frames is edge triggered. + * Send a Pause frame with the maximum pause time if + * internal oder external FIFO full condition occurs. + * Send a zero pause time frame to re-start transmission. + */ + /* XM_PAUSE_DA = '010000C28001' (default) */ + /* XM_MAC_PTIME = 0xffff (maximum) */ + /* remember this value is defined in big endian (!) */ + xm_write16(hw, port, XM_MAC_PTIME, 0xffff); + + mode |= XM_PAUSE_MODE; + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); + } else { + /* + * disable pause frame generation is required for 1000BT + * because the XMAC is not reset if the link is going down + */ + /* Disable Pause Mode in Mode Register */ + mode &= ~XM_PAUSE_MODE; + + skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); + } + + xm_write32(hw, port, XM_MODE, mode); + + /* Turn on detection of Tx underrun */ + msk = xm_read16(hw, port, XM_IMSK); + msk &= ~XM_IS_TXF_UR; + xm_write16(hw, port, XM_IMSK, msk); + + xm_read16(hw, port, XM_ISRC); + + /* get MMU Command Reg. */ + cmd = xm_read16(hw, port, XM_MMU_CMD); + if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) + cmd |= XM_MMU_GMII_FD; + + /* + * Workaround BCOM Errata (#10523) for all BCom Phys + * Enable Power Management after link up + */ + if (hw->phy_type == SK_PHY_BCOM) { + xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, + xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) + & ~PHY_B_AC_DIS_PM); + xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK); + } + + /* enable Rx/Tx */ + xm_write16(hw, port, XM_MMU_CMD, + cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); + skge_link_up(skge); +} + + +static inline void bcom_phy_intr(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 isrc; + + isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT); + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "phy interrupt status 0x%x\n", isrc); + + if (isrc & PHY_B_IS_PSE) + pr_err("%s: uncorrectable pair swap error\n", + hw->dev[port]->name); + + /* Workaround BCom Errata: + * enable and disable loopback mode if "NO HCD" occurs. + */ + if (isrc & PHY_B_IS_NO_HDCL) { + u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL); + xm_phy_write(hw, port, PHY_BCOM_CTRL, + ctrl | PHY_CT_LOOP); + xm_phy_write(hw, port, PHY_BCOM_CTRL, + ctrl & ~PHY_CT_LOOP); + } + + if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) + bcom_check_link(hw, port); + +} + +static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) +{ + int i; + + gma_write16(hw, port, GM_SMI_DATA, val); + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); + for (i = 0; i < PHY_RETRIES; i++) { + udelay(1); + + if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) + return 0; + } + + pr_warn("%s: phy write timeout\n", hw->dev[port]->name); + return -EIO; +} + +static int __gm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) +{ + int i; + + gma_write16(hw, port, GM_SMI_CTRL, + GM_SMI_CT_PHY_AD(hw->phy_addr) + | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); + + for (i = 0; i < PHY_RETRIES; i++) { + udelay(1); + if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) + goto ready; + } + + return -ETIMEDOUT; + ready: + *val = gma_read16(hw, port, GM_SMI_DATA); + return 0; +} + +static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg) +{ + u16 v = 0; + if (__gm_phy_read(hw, port, reg, &v)) + pr_warn("%s: phy read timeout\n", hw->dev[port]->name); + return v; +} + +/* Marvell Phy Initialization */ +static void yukon_init(struct skge_hw *hw, int port) +{ + struct skge_port *skge = netdev_priv(hw->dev[port]); + u16 ctrl, ct1000, adv; + + if (skge->autoneg == AUTONEG_ENABLE) { + u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); + + ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | + PHY_M_EC_MAC_S_MSK); + ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); + + ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1); + + gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); + } + + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + if (skge->autoneg == AUTONEG_DISABLE) + ctrl &= ~PHY_CT_ANE; + + ctrl |= PHY_CT_RESET; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + ctrl = 0; + ct1000 = 0; + adv = PHY_AN_CSMA; + + if (skge->autoneg == AUTONEG_ENABLE) { + if (hw->copper) { + if (skge->advertising & ADVERTISED_1000baseT_Full) + ct1000 |= PHY_M_1000C_AFD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + ct1000 |= PHY_M_1000C_AHD; + if (skge->advertising & ADVERTISED_100baseT_Full) + adv |= PHY_M_AN_100_FD; + if (skge->advertising & ADVERTISED_100baseT_Half) + adv |= PHY_M_AN_100_HD; + if (skge->advertising & ADVERTISED_10baseT_Full) + adv |= PHY_M_AN_10_FD; + if (skge->advertising & ADVERTISED_10baseT_Half) + adv |= PHY_M_AN_10_HD; + + /* Set Flow-control capabilities */ + adv |= phy_pause_map[skge->flow_control]; + } else { + if (skge->advertising & ADVERTISED_1000baseT_Full) + adv |= PHY_M_AN_1000X_AFD; + if (skge->advertising & ADVERTISED_1000baseT_Half) + adv |= PHY_M_AN_1000X_AHD; + + adv |= fiber_pause_map[skge->flow_control]; + } + + /* Restart Auto-negotiation */ + ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; + } else { + /* forced speed/duplex settings */ + ct1000 = PHY_M_1000C_MSE; + + if (skge->duplex == DUPLEX_FULL) + ctrl |= PHY_CT_DUP_MD; + + switch (skge->speed) { + case SPEED_1000: + ctrl |= PHY_CT_SP1000; + break; + case SPEED_100: + ctrl |= PHY_CT_SP100; + break; + } + + ctrl |= PHY_CT_RESET; + } + + gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000); + + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* Enable phy interrupt on autonegotiation complete (or link up) */ + if (skge->autoneg == AUTONEG_ENABLE) + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_MSK); + else + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); +} + +static void yukon_reset(struct skge_hw *hw, int port) +{ + gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ + gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ + gma_write16(hw, port, GM_MC_ADDR_H2, 0); + gma_write16(hw, port, GM_MC_ADDR_H3, 0); + gma_write16(hw, port, GM_MC_ADDR_H4, 0); + + gma_write16(hw, port, GM_RX_CTRL, + gma_read16(hw, port, GM_RX_CTRL) + | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); +} + +/* Apparently, early versions of Yukon-Lite had wrong chip_id? */ +static int is_yukon_lite_a0(struct skge_hw *hw) +{ + u32 reg; + int ret; + + if (hw->chip_id != CHIP_ID_YUKON) + return 0; + + reg = skge_read32(hw, B2_FAR); + skge_write8(hw, B2_FAR + 3, 0xff); + ret = (skge_read8(hw, B2_FAR + 3) != 0); + skge_write32(hw, B2_FAR, reg); + return ret; +} + +static void yukon_mac_init(struct skge_hw *hw, int port) +{ + struct skge_port *skge = netdev_priv(hw->dev[port]); + int i; + u32 reg; + const u8 *addr = hw->dev[port]->dev_addr; + + /* WA code for COMA mode -- set PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9 | GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + /* hard reset */ + skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); + + /* WA code for COMA mode -- clear PHY reset */ + if (hw->chip_id == CHIP_ID_YUKON_LITE && + hw->chip_rev >= CHIP_REV_YU_LITE_A3) { + reg = skge_read32(hw, B2_GP_IO); + reg |= GP_DIR_9; + reg &= ~GP_IO_9; + skge_write32(hw, B2_GP_IO, reg); + } + + /* Set hardware config mode */ + reg = GPC_INT_POL_HI | GPC_DIS_FC | GPC_DIS_SLEEP | + GPC_ENA_XC | GPC_ANEG_ADV_ALL_M | GPC_ENA_PAUSE; + reg |= hw->copper ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; + + /* Clear GMC reset */ + skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET); + skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); + + if (skge->autoneg == AUTONEG_DISABLE) { + reg = GM_GPCR_AU_ALL_DIS; + gma_write16(hw, port, GM_GP_CTRL, + gma_read16(hw, port, GM_GP_CTRL) | reg); + + switch (skge->speed) { + case SPEED_1000: + reg &= ~GM_GPCR_SPEED_100; + reg |= GM_GPCR_SPEED_1000; + break; + case SPEED_100: + reg &= ~GM_GPCR_SPEED_1000; + reg |= GM_GPCR_SPEED_100; + break; + case SPEED_10: + reg &= ~(GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100); + break; + } + + if (skge->duplex == DUPLEX_FULL) + reg |= GM_GPCR_DUP_FULL; + } else + reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; + + switch (skge->flow_control) { + case FLOW_MODE_NONE: + skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case FLOW_MODE_LOC_SEND: + /* disable Rx flow-control */ + reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; + break; + case FLOW_MODE_SYMMETRIC: + case FLOW_MODE_SYM_OR_REM: + /* enable Tx & Rx flow-control */ + break; + } + + gma_write16(hw, port, GM_GP_CTRL, reg); + skge_read16(hw, SK_REG(port, GMAC_IRQ_SRC)); + + yukon_init(hw, port); + + /* MIB clear */ + reg = gma_read16(hw, port, GM_PHY_ADDR); + gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); + + for (i = 0; i < GM_MIB_CNT_SIZE; i++) + gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); + gma_write16(hw, port, GM_PHY_ADDR, reg); + + /* transmit control */ + gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); + + /* receive control reg: unicast + multicast + no FCS */ + gma_write16(hw, port, GM_RX_CTRL, + GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); + + /* transmit flow control */ + gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); + + /* transmit parameter */ + gma_write16(hw, port, GM_TX_PARAM, + TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | + TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | + TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); + + /* configure the Serial Mode Register */ + reg = DATA_BLIND_VAL(DATA_BLIND_DEF) + | GM_SMOD_VLAN_ENA + | IPG_DATA_VAL(IPG_DATA_DEF); + + if (hw->dev[port]->mtu > ETH_DATA_LEN) + reg |= GM_SMOD_JUMBO_ENA; + + gma_write16(hw, port, GM_SERIAL_MODE, reg); + + /* physical address: used for pause frames */ + gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr); + /* virtual address for data */ + gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr); + + /* enable interrupt mask for counter overflows */ + gma_write16(hw, port, GM_TX_IRQ_MSK, 0); + gma_write16(hw, port, GM_RX_IRQ_MSK, 0); + gma_write16(hw, port, GM_TR_IRQ_MSK, 0); + + /* Initialize Mac Fifo */ + + /* Configure Rx MAC FIFO */ + skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); + reg = GMF_OPER_ON | GMF_RX_F_FL_ON; + + /* disable Rx GMAC FIFO Flush for YUKON-Lite Rev. A0 only */ + if (is_yukon_lite_a0(hw)) + reg &= ~GMF_RX_F_FL_ON; + + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); + skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg); + /* + * because Pause Packet Truncation in GMAC is not working + * we have to increase the Flush Threshold to 64 bytes + * in order to flush pause packets in Rx FIFO on Yukon-1 + */ + skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF+1); + + /* Configure Tx MAC FIFO */ + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); + skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); +} + +/* Go into power down mode */ +static void yukon_suspend(struct skge_hw *hw, int port) +{ + u16 ctrl; + + ctrl = gm_phy_read(hw, port, PHY_MARV_PHY_CTRL); + ctrl |= PHY_M_PC_POL_R_DIS; + gm_phy_write(hw, port, PHY_MARV_PHY_CTRL, ctrl); + + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + ctrl |= PHY_CT_RESET; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); + + /* switch IEEE compatible power down mode on */ + ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL); + ctrl |= PHY_CT_PDOWN; + gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); +} + +static void yukon_stop(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); + yukon_reset(hw, port); + + gma_write16(hw, port, GM_GP_CTRL, + gma_read16(hw, port, GM_GP_CTRL) + & ~(GM_GPCR_TX_ENA|GM_GPCR_RX_ENA)); + gma_read16(hw, port, GM_GP_CTRL); + + yukon_suspend(hw, port); + + /* set GPHY Control reset */ + skge_write8(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET); + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET); +} + +static void yukon_get_stats(struct skge_port *skge, u64 *data) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + int i; + + data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 + | gma_read32(hw, port, GM_TXO_OK_LO); + data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32 + | gma_read32(hw, port, GM_RXO_OK_LO); + + for (i = 2; i < ARRAY_SIZE(skge_stats); i++) + data[i] = gma_read32(hw, port, + skge_stats[i].gma_offset); +} + +static void yukon_mac_intr(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + struct skge_port *skge = netdev_priv(dev); + u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC)); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "mac interrupt status 0x%x\n", status); + + if (status & GM_IS_RX_FF_OR) { + ++dev->stats.rx_fifo_errors; + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_CLI_RX_FO); + } + + if (status & GM_IS_TX_FF_UR) { + ++dev->stats.tx_fifo_errors; + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_FU); + } + +} + +static u16 yukon_speed(const struct skge_hw *hw, u16 aux) +{ + switch (aux & PHY_M_PS_SPEED_MSK) { + case PHY_M_PS_SPEED_1000: + return SPEED_1000; + case PHY_M_PS_SPEED_100: + return SPEED_100; + default: + return SPEED_10; + } +} + +static void yukon_link_up(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 reg; + + /* Enable Transmit FIFO Underrun */ + skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), GMAC_DEF_MSK); + + reg = gma_read16(hw, port, GM_GP_CTRL); + if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) + reg |= GM_GPCR_DUP_FULL; + + /* enable Rx/Tx */ + reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; + gma_write16(hw, port, GM_GP_CTRL, reg); + + gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_DEF_MSK); + skge_link_up(skge); +} + +static void yukon_link_down(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + u16 ctrl; + + ctrl = gma_read16(hw, port, GM_GP_CTRL); + ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); + gma_write16(hw, port, GM_GP_CTRL, ctrl); + + if (skge->flow_status == FLOW_STAT_REM_SEND) { + ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV); + ctrl |= PHY_M_AN_ASP; + /* restore Asymmetric Pause bit */ + gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl); + } + + skge_link_down(skge); + + yukon_init(hw, port); +} + +static void yukon_phy_intr(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + const char *reason = NULL; + u16 istatus, phystat; + + istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT); + phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT); + + netif_printk(skge, intr, KERN_DEBUG, skge->netdev, + "phy interrupt status 0x%x 0x%x\n", istatus, phystat); + + if (istatus & PHY_M_IS_AN_COMPL) { + if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP) + & PHY_M_AN_RF) { + reason = "remote fault"; + goto failed; + } + + if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) { + reason = "master/slave fault"; + goto failed; + } + + if (!(phystat & PHY_M_PS_SPDUP_RES)) { + reason = "speed/duplex"; + goto failed; + } + + skge->duplex = (phystat & PHY_M_PS_FULL_DUP) + ? DUPLEX_FULL : DUPLEX_HALF; + skge->speed = yukon_speed(hw, phystat); + + /* We are using IEEE 802.3z/D5.0 Table 37-4 */ + switch (phystat & PHY_M_PS_PAUSE_MSK) { + case PHY_M_PS_PAUSE_MSK: + skge->flow_status = FLOW_STAT_SYMMETRIC; + break; + case PHY_M_PS_RX_P_EN: + skge->flow_status = FLOW_STAT_REM_SEND; + break; + case PHY_M_PS_TX_P_EN: + skge->flow_status = FLOW_STAT_LOC_SEND; + break; + default: + skge->flow_status = FLOW_STAT_NONE; + } + + if (skge->flow_status == FLOW_STAT_NONE || + (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); + else + skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON); + yukon_link_up(skge); + return; + } + + if (istatus & PHY_M_IS_LSP_CHANGE) + skge->speed = yukon_speed(hw, phystat); + + if (istatus & PHY_M_IS_DUP_CHANGE) + skge->duplex = (phystat & PHY_M_PS_FULL_DUP) ? DUPLEX_FULL : DUPLEX_HALF; + if (istatus & PHY_M_IS_LST_CHANGE) { + if (phystat & PHY_M_PS_LINK_UP) + yukon_link_up(skge); + else + yukon_link_down(skge); + } + return; + failed: + pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason); + + /* XXX restart autonegotiation? */ +} + +static void skge_phy_reset(struct skge_port *skge) +{ + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct net_device *dev = hw->dev[port]; + + netif_stop_queue(skge->netdev); + netif_carrier_off(skge->netdev); + + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) { + genesis_reset(hw, port); + genesis_mac_init(hw, port); + } else { + yukon_reset(hw, port); + yukon_init(hw, port); + } + spin_unlock_bh(&hw->phy_lock); + + skge_set_multicast(dev); +} + +/* Basic MII support */ +static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mii_ioctl_data *data = if_mii(ifr); + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int err = -EOPNOTSUPP; + + if (!netif_running(dev)) + return -ENODEV; /* Phy still in reset */ + + switch (cmd) { + case SIOCGMIIPHY: + data->phy_id = hw->phy_addr; + + /* fallthru */ + case SIOCGMIIREG: { + u16 val = 0; + spin_lock_bh(&hw->phy_lock); + + if (is_genesis(hw)) + err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); + else + err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val); + spin_unlock_bh(&hw->phy_lock); + data->val_out = val; + break; + } + + case SIOCSMIIREG: + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) + err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f, + data->val_in); + else + err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f, + data->val_in); + spin_unlock_bh(&hw->phy_lock); + break; + } + return err; +} + +static void skge_ramset(struct skge_hw *hw, u16 q, u32 start, size_t len) +{ + u32 end; + + start /= 8; + len /= 8; + end = start + len - 1; + + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); + skge_write32(hw, RB_ADDR(q, RB_START), start); + skge_write32(hw, RB_ADDR(q, RB_WP), start); + skge_write32(hw, RB_ADDR(q, RB_RP), start); + skge_write32(hw, RB_ADDR(q, RB_END), end); + + if (q == Q_R1 || q == Q_R2) { + /* Set thresholds on receive queue's */ + skge_write32(hw, RB_ADDR(q, RB_RX_UTPP), + start + (2*len)/3); + skge_write32(hw, RB_ADDR(q, RB_RX_LTPP), + start + (len/3)); + } else { + /* Enable store & forward on Tx queue's because + * Tx FIFO is only 4K on Genesis and 1K on Yukon + */ + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_STFWD); + } + + skge_write8(hw, RB_ADDR(q, RB_CTRL), RB_ENA_OP_MD); +} + +/* Setup Bus Memory Interface */ +static void skge_qset(struct skge_port *skge, u16 q, + const struct skge_element *e) +{ + struct skge_hw *hw = skge->hw; + u32 watermark = 0x600; + u64 base = skge->dma + (e->desc - skge->mem); + + /* optimization to reduce window on 32bit/33mhz */ + if ((skge_read16(hw, B0_CTST) & (CS_BUS_CLOCK | CS_BUS_SLOT_SZ)) == 0) + watermark /= 2; + + skge_write32(hw, Q_ADDR(q, Q_CSR), CSR_CLR_RESET); + skge_write32(hw, Q_ADDR(q, Q_F), watermark); + skge_write32(hw, Q_ADDR(q, Q_DA_H), (u32)(base >> 32)); + skge_write32(hw, Q_ADDR(q, Q_DA_L), (u32)base); +} + +static int skge_up(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + u32 chunk, ram_addr; + size_t rx_size, tx_size; + int err; + + if (!is_valid_ether_addr(dev->dev_addr)) + return -EINVAL; + + netif_info(skge, ifup, skge->netdev, "enabling interface\n"); + + if (dev->mtu > RX_BUF_SIZE) + skge->rx_buf_size = dev->mtu + ETH_HLEN; + else + skge->rx_buf_size = RX_BUF_SIZE; + + + rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); + tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); + skge->mem_size = tx_size + rx_size; + skge->mem = pci_alloc_consistent(hw->pdev, skge->mem_size, &skge->dma); + if (!skge->mem) + return -ENOMEM; + + BUG_ON(skge->dma & 7); + + if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { + dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); + err = -EINVAL; + goto free_pci_mem; + } + + memset(skge->mem, 0, skge->mem_size); + + err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma); + if (err) + goto free_pci_mem; + + err = skge_rx_fill(dev); + if (err) + goto free_rx_ring; + + err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, + skge->dma + rx_size); + if (err) + goto free_rx_ring; + + if (hw->ports == 1) { + err = request_irq(hw->pdev->irq, skge_intr, IRQF_SHARED, + dev->name, hw); + if (err) { + netdev_err(dev, "Unable to allocate interrupt %d error: %d\n", + hw->pdev->irq, err); + goto free_tx_ring; + } + } + + /* Initialize MAC */ + netif_carrier_off(dev); + spin_lock_bh(&hw->phy_lock); + if (is_genesis(hw)) + genesis_mac_init(hw, port); + else + yukon_mac_init(hw, port); + spin_unlock_bh(&hw->phy_lock); + + /* Configure RAMbuffers - equally between ports and tx/rx */ + chunk = (hw->ram_size - hw->ram_offset) / (hw->ports * 2); + ram_addr = hw->ram_offset + 2 * chunk * port; + + skge_ramset(hw, rxqaddr[port], ram_addr, chunk); + skge_qset(skge, rxqaddr[port], skge->rx_ring.to_clean); + + BUG_ON(skge->tx_ring.to_use != skge->tx_ring.to_clean); + skge_ramset(hw, txqaddr[port], ram_addr+chunk, chunk); + skge_qset(skge, txqaddr[port], skge->tx_ring.to_use); + + /* Start receiver BMU */ + wmb(); + skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_START | CSR_IRQ_CL_F); + skge_led(skge, LED_MODE_ON); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask |= portmask[port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); + + napi_enable(&skge->napi); + + skge_set_multicast(dev); + + return 0; + + free_tx_ring: + kfree(skge->tx_ring.start); + free_rx_ring: + skge_rx_clean(skge); + kfree(skge->rx_ring.start); + free_pci_mem: + pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); + skge->mem = NULL; + + return err; +} + +/* stop receiver */ +static void skge_rx_stop(struct skge_hw *hw, int port) +{ + skge_write8(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_STOP); + skge_write32(hw, RB_ADDR(port ? Q_R2 : Q_R1, RB_CTRL), + RB_RST_SET|RB_DIS_OP_MD); + skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); +} + +static int skge_down(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + + if (skge->mem == NULL) + return 0; + + netif_info(skge, ifdown, skge->netdev, "disabling interface\n"); + + netif_tx_disable(dev); + + if (is_genesis(hw) && hw->phy_type == SK_PHY_XMAC) + del_timer_sync(&skge->link_timer); + + napi_disable(&skge->napi); + netif_carrier_off(dev); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask &= ~portmask[port]; + skge_write32(hw, B0_IMSK, (hw->ports == 1) ? 0 : hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); + + if (hw->ports == 1) + free_irq(hw->pdev->irq, hw); + + skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), LED_OFF); + if (is_genesis(hw)) + genesis_stop(skge); + else + yukon_stop(skge); + + /* Stop transmitter */ + skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); + skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), + RB_RST_SET|RB_DIS_OP_MD); + + + /* Disable Force Sync bit and Enable Alloc bit */ + skge_write8(hw, SK_REG(port, TXA_CTRL), + TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); + + /* Stop Interval Timer and Limit Counter of Tx Arbiter */ + skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L); + skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L); + + /* Reset PCI FIFO */ + skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); + skge_write32(hw, RB_ADDR(txqaddr[port], RB_CTRL), RB_RST_SET); + + /* Reset the RAM Buffer async Tx queue */ + skge_write8(hw, RB_ADDR(port == 0 ? Q_XA1 : Q_XA2, RB_CTRL), RB_RST_SET); + + skge_rx_stop(hw, port); + + if (is_genesis(hw)) { + skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET); + skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET); + } else { + skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); + } + + skge_led(skge, LED_MODE_OFF); + + netif_tx_lock_bh(dev); + skge_tx_clean(dev); + netif_tx_unlock_bh(dev); + + skge_rx_clean(skge); + + kfree(skge->rx_ring.start); + kfree(skge->tx_ring.start); + pci_free_consistent(hw->pdev, skge->mem_size, skge->mem, skge->dma); + skge->mem = NULL; + return 0; +} + +static inline int skge_avail(const struct skge_ring *ring) +{ + smp_mb(); + return ((ring->to_clean > ring->to_use) ? 0 : ring->count) + + (ring->to_clean - ring->to_use) - 1; +} + +static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, + struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + struct skge_element *e; + struct skge_tx_desc *td; + int i; + u32 control, len; + dma_addr_t map; + + if (skb_padto(skb, ETH_ZLEN)) + return NETDEV_TX_OK; + + if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) + return NETDEV_TX_BUSY; + + e = skge->tx_ring.to_use; + td = e->desc; + BUG_ON(td->control & BMU_OWN); + e->skb = skb; + len = skb_headlen(skb); + map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(hw->pdev, map)) + goto mapping_error; + + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, len); + + td->dma_lo = lower_32_bits(map); + td->dma_hi = upper_32_bits(map); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + const int offset = skb_checksum_start_offset(skb); + + /* This seems backwards, but it is what the sk98lin + * does. Looks like hardware is wrong? + */ + if (ipip_hdr(skb)->protocol == IPPROTO_UDP && + hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON) + control = BMU_TCP_CHECK; + else + control = BMU_UDP_CHECK; + + td->csum_offs = 0; + td->csum_start = offset; + td->csum_write = offset + skb->csum_offset; + } else + control = BMU_CHECK; + + if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */ + control |= BMU_EOF | BMU_IRQ_EOF; + else { + struct skge_tx_desc *tf = td; + + control |= BMU_STFWD; + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, + skb_frag_size(frag), DMA_TO_DEVICE); + if (dma_mapping_error(&hw->pdev->dev, map)) + goto mapping_unwind; + + e = e->next; + e->skb = skb; + tf = e->desc; + BUG_ON(tf->control & BMU_OWN); + + tf->dma_lo = lower_32_bits(map); + tf->dma_hi = upper_32_bits(map); + dma_unmap_addr_set(e, mapaddr, map); + dma_unmap_len_set(e, maplen, skb_frag_size(frag)); + + tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag); + } + tf->control |= BMU_EOF | BMU_IRQ_EOF; + } + /* Make sure all the descriptors written */ + wmb(); + td->control = BMU_OWN | BMU_SW | BMU_STF | control | len; + wmb(); + + netdev_sent_queue(dev, skb->len); + + skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START); + + netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev, + "tx queued, slot %td, len %d\n", + e - skge->tx_ring.start, skb->len); + + skge->tx_ring.to_use = e->next; + smp_wmb(); + + if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) { + netdev_dbg(dev, "transmit queue full\n"); + netif_stop_queue(dev); + } + + return NETDEV_TX_OK; + +mapping_unwind: + e = skge->tx_ring.to_use; + pci_unmap_single(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + while (i-- > 0) { + e = e->next; + pci_unmap_page(hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + } + +mapping_error: + if (net_ratelimit()) + dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + + +/* Free resources associated with this reing element */ +static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e, + u32 control) +{ + /* skb header vs. fragment */ + if (control & BMU_STF) + pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); + else + pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_TODEVICE); +} + +/* Free all buffers in transmit ring */ +static void skge_tx_clean(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_element *e; + + for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { + struct skge_tx_desc *td = e->desc; + + skge_tx_unmap(skge->hw->pdev, e, td->control); + + if (td->control & BMU_EOF) + dev_kfree_skb(e->skb); + td->control = 0; + } + + netdev_reset_queue(dev); + skge->tx_ring.to_clean = e; +} + +static void skge_tx_timeout(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n"); + + skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP); + skge_tx_clean(dev); + netif_wake_queue(dev); +} + +static int skge_change_mtu(struct net_device *dev, int new_mtu) +{ + int err; + + if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) + return -EINVAL; + + if (!netif_running(dev)) { + dev->mtu = new_mtu; + return 0; + } + + skge_down(dev); + + dev->mtu = new_mtu; + + err = skge_up(dev); + if (err) + dev_close(dev); + + return err; +} + +static const u8 pause_mc_addr[ETH_ALEN] = { 0x1, 0x80, 0xc2, 0x0, 0x0, 0x1 }; + +static void genesis_add_filter(u8 filter[8], const u8 *addr) +{ + u32 crc, bit; + + crc = ether_crc_le(ETH_ALEN, addr); + bit = ~crc & 0x3f; + filter[bit/8] |= 1 << (bit%8); +} + +static void genesis_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct netdev_hw_addr *ha; + u32 mode; + u8 filter[8]; + + mode = xm_read32(hw, port, XM_MODE); + mode |= XM_MD_ENA_HASH; + if (dev->flags & IFF_PROMISC) + mode |= XM_MD_ENA_PROM; + else + mode &= ~XM_MD_ENA_PROM; + + if (dev->flags & IFF_ALLMULTI) + memset(filter, 0xff, sizeof(filter)); + else { + memset(filter, 0, sizeof(filter)); + + if (skge->flow_status == FLOW_STAT_REM_SEND || + skge->flow_status == FLOW_STAT_SYMMETRIC) + genesis_add_filter(filter, pause_mc_addr); + + netdev_for_each_mc_addr(ha, dev) + genesis_add_filter(filter, ha->addr); + } + + xm_write32(hw, port, XM_MODE, mode); + xm_outhash(hw, port, XM_HSM, filter); +} + +static void yukon_add_filter(u8 filter[8], const u8 *addr) +{ + u32 bit = ether_crc(ETH_ALEN, addr) & 0x3f; + filter[bit/8] |= 1 << (bit%8); +} + +static void yukon_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + int port = skge->port; + struct netdev_hw_addr *ha; + int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || + skge->flow_status == FLOW_STAT_SYMMETRIC); + u16 reg; + u8 filter[8]; + + memset(filter, 0, sizeof(filter)); + + reg = gma_read16(hw, port, GM_RX_CTRL); + reg |= GM_RXCR_UCF_ENA; + + if (dev->flags & IFF_PROMISC) /* promiscuous */ + reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); + else if (dev->flags & IFF_ALLMULTI) /* all multicast */ + memset(filter, 0xff, sizeof(filter)); + else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */ + reg &= ~GM_RXCR_MCF_ENA; + else { + reg |= GM_RXCR_MCF_ENA; + + if (rx_pause) + yukon_add_filter(filter, pause_mc_addr); + + netdev_for_each_mc_addr(ha, dev) + yukon_add_filter(filter, ha->addr); + } + + + gma_write16(hw, port, GM_MC_ADDR_H1, + (u16)filter[0] | ((u16)filter[1] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H2, + (u16)filter[2] | ((u16)filter[3] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H3, + (u16)filter[4] | ((u16)filter[5] << 8)); + gma_write16(hw, port, GM_MC_ADDR_H4, + (u16)filter[6] | ((u16)filter[7] << 8)); + + gma_write16(hw, port, GM_RX_CTRL, reg); +} + +static inline u16 phy_length(const struct skge_hw *hw, u32 status) +{ + if (is_genesis(hw)) + return status >> XMR_FS_LEN_SHIFT; + else + return status >> GMR_FS_LEN_SHIFT; +} + +static inline int bad_phy_status(const struct skge_hw *hw, u32 status) +{ + if (is_genesis(hw)) + return (status & (XMR_FS_ERR | XMR_FS_2L_VLAN)) != 0; + else + return (status & GMR_FS_ANY_ERR) || + (status & GMR_FS_RX_OK) == 0; +} + +static void skge_set_multicast(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + if (is_genesis(skge->hw)) + genesis_set_multicast(dev); + else + yukon_set_multicast(dev); + +} + + +/* Get receive buffer from descriptor. + * Handles copy of small buffers and reallocation failures + */ +static struct sk_buff *skge_rx_get(struct net_device *dev, + struct skge_element *e, + u32 control, u32 status, u16 csum) +{ + struct skge_port *skge = netdev_priv(dev); + struct sk_buff *skb; + u16 len = control & BMU_BBC; + + netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev, + "rx slot %td status 0x%x len %d\n", + e - skge->rx_ring.start, status, len); + + if (len > skge->rx_buf_size) + goto error; + + if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)) + goto error; + + if (bad_phy_status(skge->hw, status)) + goto error; + + if (phy_length(skge->hw, status) != len) + goto error; + + if (len < RX_COPY_THRESHOLD) { + skb = netdev_alloc_skb_ip_align(dev, len); + if (!skb) + goto resubmit; + + pci_dma_sync_single_for_cpu(skge->hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); + skb_copy_from_linear_data(e->skb, skb->data, len); + pci_dma_sync_single_for_device(skge->hw->pdev, + dma_unmap_addr(e, mapaddr), + dma_unmap_len(e, maplen), + PCI_DMA_FROMDEVICE); + skge_rx_reuse(e, skge->rx_buf_size); + } else { + struct skge_element ee; + struct sk_buff *nskb; + + nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size); + if (!nskb) + goto resubmit; + + ee = *e; + + skb = ee.skb; + prefetch(skb->data); + + if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { + dev_kfree_skb(nskb); + goto resubmit; + } + + pci_unmap_single(skge->hw->pdev, + dma_unmap_addr(&ee, mapaddr), + dma_unmap_len(&ee, maplen), + PCI_DMA_FROMDEVICE); + } + + skb_put(skb, len); + + if (dev->features & NETIF_F_RXCSUM) { + skb->csum = csum; + skb->ip_summed = CHECKSUM_COMPLETE; + } + + skb->protocol = eth_type_trans(skb, dev); + + return skb; +error: + + netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev, + "rx err, slot %td control 0x%x status 0x%x\n", + e - skge->rx_ring.start, control, status); + + if (is_genesis(skge->hw)) { + if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) + dev->stats.rx_length_errors++; + if (status & XMR_FS_FRA_ERR) + dev->stats.rx_frame_errors++; + if (status & XMR_FS_FCS_ERR) + dev->stats.rx_crc_errors++; + } else { + if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) + dev->stats.rx_length_errors++; + if (status & GMR_FS_FRAGMENT) + dev->stats.rx_frame_errors++; + if (status & GMR_FS_CRC_ERR) + dev->stats.rx_crc_errors++; + } + +resubmit: + skge_rx_reuse(e, skge->rx_buf_size); + return NULL; +} + +/* Free all buffers in Tx ring which are no longer owned by device */ +static void skge_tx_done(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_ring *ring = &skge->tx_ring; + struct skge_element *e; + unsigned int bytes_compl = 0, pkts_compl = 0; + + skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); + + for (e = ring->to_clean; e != ring->to_use; e = e->next) { + u32 control = ((const struct skge_tx_desc *) e->desc)->control; + + if (control & BMU_OWN) + break; + + skge_tx_unmap(skge->hw->pdev, e, control); + + if (control & BMU_EOF) { + netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev, + "tx done slot %td\n", + e - skge->tx_ring.start); + + pkts_compl++; + bytes_compl += e->skb->len; + + dev_consume_skb_any(e->skb); + } + } + netdev_completed_queue(dev, pkts_compl, bytes_compl); + skge->tx_ring.to_clean = e; + + /* Can run lockless until we need to synchronize to restart queue. */ + smp_mb(); + + if (unlikely(netif_queue_stopped(dev) && + skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { + netif_tx_lock(dev); + if (unlikely(netif_queue_stopped(dev) && + skge_avail(&skge->tx_ring) > TX_LOW_WATER)) { + netif_wake_queue(dev); + + } + netif_tx_unlock(dev); + } +} + +static int skge_poll(struct napi_struct *napi, int to_do) +{ + struct skge_port *skge = container_of(napi, struct skge_port, napi); + struct net_device *dev = skge->netdev; + struct skge_hw *hw = skge->hw; + struct skge_ring *ring = &skge->rx_ring; + struct skge_element *e; + int work_done = 0; + + skge_tx_done(dev); + + skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F); + + for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) { + struct skge_rx_desc *rd = e->desc; + struct sk_buff *skb; + u32 control; + + rmb(); + control = rd->control; + if (control & BMU_OWN) + break; + + skb = skge_rx_get(dev, e, control, rd->status, rd->csum2); + if (likely(skb)) { + napi_gro_receive(napi, skb); + ++work_done; + } + } + ring->to_clean = e; + + /* restart receiver */ + wmb(); + skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), CSR_START); + + if (work_done < to_do) { + unsigned long flags; + + napi_gro_flush(napi, false); + spin_lock_irqsave(&hw->hw_lock, flags); + __napi_complete(napi); + hw->intr_mask |= napimask[skge->port]; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irqrestore(&hw->hw_lock, flags); + } + + return work_done; +} + +/* Parity errors seem to happen when Genesis is connected to a switch + * with no other ports present. Heartbeat error?? + */ +static void skge_mac_parity(struct skge_hw *hw, int port) +{ + struct net_device *dev = hw->dev[port]; + + ++dev->stats.tx_heartbeat_errors; + + if (is_genesis(hw)) + skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), + MFF_CLR_PERR); + else + /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ + skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), + (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0) + ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); +} + +static void skge_mac_intr(struct skge_hw *hw, int port) +{ + if (is_genesis(hw)) + genesis_mac_intr(hw, port); + else + yukon_mac_intr(hw, port); +} + +/* Handle device specific framing and timeout interrupts */ +static void skge_error_irq(struct skge_hw *hw) +{ + struct pci_dev *pdev = hw->pdev; + u32 hwstatus = skge_read32(hw, B0_HWE_ISRC); + + if (is_genesis(hw)) { + /* clear xmac errors */ + if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) + skge_write16(hw, RX_MFF_CTRL1, MFF_CLR_INSTAT); + if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) + skge_write16(hw, RX_MFF_CTRL2, MFF_CLR_INSTAT); + } else { + /* Timestamp (unused) overflow */ + if (hwstatus & IS_IRQ_TIST_OV) + skge_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_CLR_IRQ); + } + + if (hwstatus & IS_RAM_RD_PAR) { + dev_err(&pdev->dev, "Ram read data parity error\n"); + skge_write16(hw, B3_RI_CTRL, RI_CLR_RD_PERR); + } + + if (hwstatus & IS_RAM_WR_PAR) { + dev_err(&pdev->dev, "Ram write data parity error\n"); + skge_write16(hw, B3_RI_CTRL, RI_CLR_WR_PERR); + } + + if (hwstatus & IS_M1_PAR_ERR) + skge_mac_parity(hw, 0); + + if (hwstatus & IS_M2_PAR_ERR) + skge_mac_parity(hw, 1); + + if (hwstatus & IS_R1_PAR_ERR) { + dev_err(&pdev->dev, "%s: receive queue parity error\n", + hw->dev[0]->name); + skge_write32(hw, B0_R1_CSR, CSR_IRQ_CL_P); + } + + if (hwstatus & IS_R2_PAR_ERR) { + dev_err(&pdev->dev, "%s: receive queue parity error\n", + hw->dev[1]->name); + skge_write32(hw, B0_R2_CSR, CSR_IRQ_CL_P); + } + + if (hwstatus & (IS_IRQ_MST_ERR|IS_IRQ_STAT)) { + u16 pci_status, pci_cmd; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); + pci_read_config_word(pdev, PCI_STATUS, &pci_status); + + dev_err(&pdev->dev, "PCI error cmd=%#x status=%#x\n", + pci_cmd, pci_status); + + /* Write the error bits back to clear them. */ + pci_status &= PCI_STATUS_ERROR_BITS; + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_write_config_word(pdev, PCI_COMMAND, + pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY); + pci_write_config_word(pdev, PCI_STATUS, pci_status); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + /* if error still set then just ignore it */ + hwstatus = skge_read32(hw, B0_HWE_ISRC); + if (hwstatus & IS_IRQ_STAT) { + dev_warn(&hw->pdev->dev, "unable to clear error (so ignoring them)\n"); + hw->intr_mask &= ~IS_HW_ERR; + } + } +} + +/* + * Interrupt from PHY are handled in tasklet (softirq) + * because accessing phy registers requires spin wait which might + * cause excess interrupt latency. + */ +static void skge_extirq(unsigned long arg) +{ + struct skge_hw *hw = (struct skge_hw *) arg; + int port; + + for (port = 0; port < hw->ports; port++) { + struct net_device *dev = hw->dev[port]; + + if (netif_running(dev)) { + struct skge_port *skge = netdev_priv(dev); + + spin_lock(&hw->phy_lock); + if (!is_genesis(hw)) + yukon_phy_intr(skge); + else if (hw->phy_type == SK_PHY_BCOM) + bcom_phy_intr(skge); + spin_unlock(&hw->phy_lock); + } + } + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask |= IS_EXT_REG; + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock_irq(&hw->hw_lock); +} + +static irqreturn_t skge_intr(int irq, void *dev_id) +{ + struct skge_hw *hw = dev_id; + u32 status; + int handled = 0; + + spin_lock(&hw->hw_lock); + /* Reading this register masks IRQ */ + status = skge_read32(hw, B0_SP_ISRC); + if (status == 0 || status == ~0) + goto out; + + handled = 1; + status &= hw->intr_mask; + if (status & IS_EXT_REG) { + hw->intr_mask &= ~IS_EXT_REG; + tasklet_schedule(&hw->phy_task); + } + + if (status & (IS_XA1_F|IS_R1_F)) { + struct skge_port *skge = netdev_priv(hw->dev[0]); + hw->intr_mask &= ~(IS_XA1_F|IS_R1_F); + napi_schedule(&skge->napi); + } + + if (status & IS_PA_TO_TX1) + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1); + + if (status & IS_PA_TO_RX1) { + ++hw->dev[0]->stats.rx_over_errors; + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1); + } + + + if (status & IS_MAC1) + skge_mac_intr(hw, 0); + + if (hw->dev[1]) { + struct skge_port *skge = netdev_priv(hw->dev[1]); + + if (status & (IS_XA2_F|IS_R2_F)) { + hw->intr_mask &= ~(IS_XA2_F|IS_R2_F); + napi_schedule(&skge->napi); + } + + if (status & IS_PA_TO_RX2) { + ++hw->dev[1]->stats.rx_over_errors; + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2); + } + + if (status & IS_PA_TO_TX2) + skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2); + + if (status & IS_MAC2) + skge_mac_intr(hw, 1); + } + + if (status & IS_HW_ERR) + skge_error_irq(hw); +out: + skge_write32(hw, B0_IMSK, hw->intr_mask); + skge_read32(hw, B0_IMSK); + spin_unlock(&hw->hw_lock); + + return IRQ_RETVAL(handled); +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void skge_netpoll(struct net_device *dev) +{ + struct skge_port *skge = netdev_priv(dev); + + disable_irq(dev->irq); + skge_intr(dev->irq, skge->hw); + enable_irq(dev->irq); +} +#endif + +static int skge_set_mac_address(struct net_device *dev, void *p) +{ + struct skge_port *skge = netdev_priv(dev); + struct skge_hw *hw = skge->hw; + unsigned port = skge->port; + const struct sockaddr *addr = p; + u16 ctrl; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + if (!netif_running(dev)) { + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); + } else { + /* disable Rx */ + spin_lock_bh(&hw->phy_lock); + ctrl = gma_read16(hw, port, GM_GP_CTRL); + gma_write16(hw, port, GM_GP_CTRL, ctrl & ~GM_GPCR_RX_ENA); + + memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN); + memcpy_toio(hw->regs + B2_MAC_2 + port*8, dev->dev_addr, ETH_ALEN); + + if (is_genesis(hw)) + xm_outaddr(hw, port, XM_SA, dev->dev_addr); + else { + gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr); + gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr); + } + + gma_write16(hw, port, GM_GP_CTRL, ctrl); + spin_unlock_bh(&hw->phy_lock); + } + + return 0; +} + +static const struct { + u8 id; + const char *name; +} skge_chips[] = { + { CHIP_ID_GENESIS, "Genesis" }, + { CHIP_ID_YUKON, "Yukon" }, + { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, + { CHIP_ID_YUKON_LP, "Yukon-LP"}, +}; + +static const char *skge_board_name(const struct skge_hw *hw) +{ + int i; + static char buf[16]; + + for (i = 0; i < ARRAY_SIZE(skge_chips); i++) + if (skge_chips[i].id == hw->chip_id) + return skge_chips[i].name; + + snprintf(buf, sizeof buf, "chipid 0x%x", hw->chip_id); + return buf; +} + + +/* + * Setup the board data structure, but don't bring up + * the port(s) + */ +static int skge_reset(struct skge_hw *hw) +{ + u32 reg; + u16 ctst, pci_status; + u8 t8, mac_cfg, pmd_type; + int i; + + ctst = skge_read16(hw, B0_CTST); + + /* do a SW reset */ + skge_write8(hw, B0_CTST, CS_RST_SET); + skge_write8(hw, B0_CTST, CS_RST_CLR); + + /* clear PCI errors, if any */ + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + skge_write8(hw, B2_TST_CTRL2, 0); + + pci_read_config_word(hw->pdev, PCI_STATUS, &pci_status); + pci_write_config_word(hw->pdev, PCI_STATUS, + pci_status | PCI_STATUS_ERROR_BITS); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + skge_write8(hw, B0_CTST, CS_MRST_CLR); + + /* restore CLK_RUN bits (for Yukon-Lite) */ + skge_write16(hw, B0_CTST, + ctst & (CS_CLK_RUN_HOT|CS_CLK_RUN_RST|CS_CLK_RUN_ENA)); + + hw->chip_id = skge_read8(hw, B2_CHIP_ID); + hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; + pmd_type = skge_read8(hw, B2_PMD_TYP); + hw->copper = (pmd_type == 'T' || pmd_type == '1'); + + switch (hw->chip_id) { + case CHIP_ID_GENESIS: +#ifdef CONFIG_SKGE_GENESIS + switch (hw->phy_type) { + case SK_PHY_XMAC: + hw->phy_addr = PHY_ADDR_XMAC; + break; + case SK_PHY_BCOM: + hw->phy_addr = PHY_ADDR_BCOM; + break; + default: + dev_err(&hw->pdev->dev, "unsupported phy type 0x%x\n", + hw->phy_type); + return -EOPNOTSUPP; + } + break; +#else + dev_err(&hw->pdev->dev, "Genesis chip detected but not configured\n"); + return -EOPNOTSUPP; +#endif + + case CHIP_ID_YUKON: + case CHIP_ID_YUKON_LITE: + case CHIP_ID_YUKON_LP: + if (hw->phy_type < SK_PHY_MARV_COPPER && pmd_type != 'S') + hw->copper = 1; + + hw->phy_addr = PHY_ADDR_MARV; + break; + + default: + dev_err(&hw->pdev->dev, "unsupported chip type 0x%x\n", + hw->chip_id); + return -EOPNOTSUPP; + } + + mac_cfg = skge_read8(hw, B2_MAC_CFG); + hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2; + hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4; + + /* read the adapters RAM size */ + t8 = skge_read8(hw, B2_E_0); + if (is_genesis(hw)) { + if (t8 == 3) { + /* special case: 4 x 64k x 36, offset = 0x80000 */ + hw->ram_size = 0x100000; + hw->ram_offset = 0x80000; + } else + hw->ram_size = t8 * 512; + } else if (t8 == 0) + hw->ram_size = 0x20000; + else + hw->ram_size = t8 * 4096; + + hw->intr_mask = IS_HW_ERR; + + /* Use PHY IRQ for all but fiber based Genesis board */ + if (!(is_genesis(hw) && hw->phy_type == SK_PHY_XMAC)) + hw->intr_mask |= IS_EXT_REG; + + if (is_genesis(hw)) + genesis_init(hw); + else { + /* switch power to VCC (WA for VAUX problem) */ + skge_write8(hw, B0_POWER_CTRL, + PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); + + /* avoid boards with stuck Hardware error bits */ + if ((skge_read32(hw, B0_ISRC) & IS_HW_ERR) && + (skge_read32(hw, B0_HWE_ISRC) & IS_IRQ_SENSOR)) { + dev_warn(&hw->pdev->dev, "stuck hardware sensor bit\n"); + hw->intr_mask &= ~IS_HW_ERR; + } + + /* Clear PHY COMA */ + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); + pci_read_config_dword(hw->pdev, PCI_DEV_REG1, ®); + reg &= ~PCI_PHY_COMA; + pci_write_config_dword(hw->pdev, PCI_DEV_REG1, reg); + skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); + + + for (i = 0; i < hw->ports; i++) { + skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); + skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); + } + } + + /* turn off hardware timer (unused) */ + skge_write8(hw, B2_TI_CTRL, TIM_STOP); + skge_write8(hw, B2_TI_CTRL, TIM_CLR_IRQ); + skge_write8(hw, B0_LED, LED_STAT_ON); + + /* enable the Tx Arbiters */ + for (i = 0; i < hw->ports; i++) + skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB); + + /* Initialize ram interface */ + skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); + + skge_write8(hw, B3_RI_WTO_R1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XA1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XS1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_R1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XA1, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XS1, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_R2, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XA2, SK_RI_TO_53); + skge_write8(hw, B3_RI_WTO_XS2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_R2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XA2, SK_RI_TO_53); + skge_write8(hw, B3_RI_RTO_XS2, SK_RI_TO_53); + + skge_write32(hw, B0_HWE_IMSK, IS_ERR_MSK); + + /* Set interrupt moderation for Transmit only + * Receive interrupts avoided by NAPI + */ + skge_write32(hw, B2_IRQM_MSK, IS_XA1_F|IS_XA2_F); + skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); + skge_write32(hw, B2_IRQM_CTRL, TIM_START); + + /* Leave irq disabled until first port is brought up. */ + skge_write32(hw, B0_IMSK, 0); + + for (i = 0; i < hw->ports; i++) { + if (is_genesis(hw)) + genesis_reset(hw, i); + else + yukon_reset(hw, i); + } + + return 0; +} + + +#ifdef CONFIG_SKGE_DEBUG + +static struct dentry *skge_debug; + +static int skge_debug_show(struct seq_file *seq, void *v) +{ + struct net_device *dev = seq->private; + const struct skge_port *skge = netdev_priv(dev); + const struct skge_hw *hw = skge->hw; + const struct skge_element *e; + + if (!netif_running(dev)) + return -ENETDOWN; + + seq_printf(seq, "IRQ src=%x mask=%x\n", skge_read32(hw, B0_ISRC), + skge_read32(hw, B0_IMSK)); + + seq_printf(seq, "Tx Ring: (%d)\n", skge_avail(&skge->tx_ring)); + for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) { + const struct skge_tx_desc *t = e->desc; + seq_printf(seq, "%#x dma=%#x%08x %#x csum=%#x/%x/%x\n", + t->control, t->dma_hi, t->dma_lo, t->status, + t->csum_offs, t->csum_write, t->csum_start); + } + + seq_printf(seq, "\nRx Ring:\n"); + for (e = skge->rx_ring.to_clean; ; e = e->next) { + const struct skge_rx_desc *r = e->desc; + + if (r->control & BMU_OWN) + break; + + seq_printf(seq, "%#x dma=%#x%08x %#x %#x csum=%#x/%x\n", + r->control, r->dma_hi, r->dma_lo, r->status, + r->timestamp, r->csum1, r->csum1_start); + } + + return 0; +} + +static int skge_debug_open(struct inode *inode, struct file *file) +{ + return single_open(file, skge_debug_show, inode->i_private); +} + +static const struct file_operations skge_debug_fops = { + .owner = THIS_MODULE, + .open = skge_debug_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/* + * Use network device events to create/remove/rename + * debugfs file entries + */ +static int skge_device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct skge_port *skge; + struct dentry *d; + + if (dev->netdev_ops->ndo_open != &skge_up || !skge_debug) + goto done; + + skge = netdev_priv(dev); + switch (event) { + case NETDEV_CHANGENAME: + if (skge->debugfs) { + d = debugfs_rename(skge_debug, skge->debugfs, + skge_debug, dev->name); + if (d) + skge->debugfs = d; + else { + netdev_info(dev, "rename failed\n"); + debugfs_remove(skge->debugfs); + } + } + break; + + case NETDEV_GOING_DOWN: + if (skge->debugfs) { + debugfs_remove(skge->debugfs); + skge->debugfs = NULL; + } + break; + + case NETDEV_UP: + d = debugfs_create_file(dev->name, S_IRUGO, + skge_debug, dev, + &skge_debug_fops); + if (!d || IS_ERR(d)) + netdev_info(dev, "debugfs create failed\n"); + else + skge->debugfs = d; + break; + } + +done: + return NOTIFY_DONE; +} + +static struct notifier_block skge_notifier = { + .notifier_call = skge_device_event, +}; + + +static __init void skge_debug_init(void) +{ + struct dentry *ent; + + ent = debugfs_create_dir("skge", NULL); + if (!ent || IS_ERR(ent)) { + pr_info("debugfs create directory failed\n"); + return; + } + + skge_debug = ent; + register_netdevice_notifier(&skge_notifier); +} + +static __exit void skge_debug_cleanup(void) +{ + if (skge_debug) { + unregister_netdevice_notifier(&skge_notifier); + debugfs_remove(skge_debug); + skge_debug = NULL; + } +} + +#else +#define skge_debug_init() +#define skge_debug_cleanup() +#endif + +static const struct net_device_ops skge_netdev_ops = { + .ndo_open = skge_up, + .ndo_stop = skge_down, + .ndo_start_xmit = skge_xmit_frame, + .ndo_do_ioctl = skge_ioctl, + .ndo_get_stats = skge_get_stats, + .ndo_tx_timeout = skge_tx_timeout, + .ndo_change_mtu = skge_change_mtu, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = skge_set_multicast, + .ndo_set_mac_address = skge_set_mac_address, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = skge_netpoll, +#endif +}; + + +/* Initialize network device */ +static struct net_device *skge_devinit(struct skge_hw *hw, int port, + int highmem) +{ + struct skge_port *skge; + struct net_device *dev = alloc_etherdev(sizeof(*skge)); + + if (!dev) + return NULL; + + SET_NETDEV_DEV(dev, &hw->pdev->dev); + dev->netdev_ops = &skge_netdev_ops; + dev->ethtool_ops = &skge_ethtool_ops; + dev->watchdog_timeo = TX_WATCHDOG; + dev->irq = hw->pdev->irq; + + if (highmem) + dev->features |= NETIF_F_HIGHDMA; + + skge = netdev_priv(dev); + netif_napi_add(dev, &skge->napi, skge_poll, NAPI_WEIGHT); + skge->netdev = dev; + skge->hw = hw; + skge->msg_enable = netif_msg_init(debug, default_msg); + + skge->tx_ring.count = DEFAULT_TX_RING_SIZE; + skge->rx_ring.count = DEFAULT_RX_RING_SIZE; + + /* Auto speed and flow control */ + skge->autoneg = AUTONEG_ENABLE; + skge->flow_control = FLOW_MODE_SYM_OR_REM; + skge->duplex = -1; + skge->speed = -1; + skge->advertising = skge_supported_modes(hw); + + if (device_can_wakeup(&hw->pdev->dev)) { + skge->wol = wol_supported(hw) & WAKE_MAGIC; + device_set_wakeup_enable(&hw->pdev->dev, skge->wol); + } + + hw->dev[port] = dev; + + skge->port = port; + + /* Only used for Genesis XMAC */ + if (is_genesis(hw)) + setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge); + else { + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_RXCSUM; + dev->features |= dev->hw_features; + } + + /* read the mac address */ + memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); + + return dev; +} + +static void skge_show_addr(struct net_device *dev) +{ + const struct skge_port *skge = netdev_priv(dev); + + netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr); +} + +static int only_32bit_dma; + +static int skge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *dev, *dev1; + struct skge_hw *hw; + int err, using_dac = 0; + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "cannot enable PCI device\n"); + goto err_out; + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "cannot obtain PCI resources\n"); + goto err_out_disable_pdev; + } + + pci_set_master(pdev); + + if (!only_32bit_dma && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + using_dac = 1; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + } else if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { + using_dac = 0; + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + } + + if (err) { + dev_err(&pdev->dev, "no usable DMA configuration\n"); + goto err_out_free_regions; + } + +#ifdef __BIG_ENDIAN + /* byte swap descriptors in hardware */ + { + u32 reg; + + pci_read_config_dword(pdev, PCI_DEV_REG2, ®); + reg |= PCI_REV_DESC; + pci_write_config_dword(pdev, PCI_DEV_REG2, reg); + } +#endif + + err = -ENOMEM; + /* space for skge@pci:0000:04:00.0 */ + hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:") + + strlen(pci_name(pdev)) + 1, GFP_KERNEL); + if (!hw) + goto err_out_free_regions; + + sprintf(hw->irq_name, DRV_NAME "@pci:%s", pci_name(pdev)); + + hw->pdev = pdev; + spin_lock_init(&hw->hw_lock); + spin_lock_init(&hw->phy_lock); + tasklet_init(&hw->phy_task, skge_extirq, (unsigned long) hw); + + hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); + if (!hw->regs) { + dev_err(&pdev->dev, "cannot map device registers\n"); + goto err_out_free_hw; + } + + err = skge_reset(hw); + if (err) + goto err_out_iounmap; + + pr_info("%s addr 0x%llx irq %d chip %s rev %d\n", + DRV_VERSION, + (unsigned long long)pci_resource_start(pdev, 0), pdev->irq, + skge_board_name(hw), hw->chip_rev); + + dev = skge_devinit(hw, 0, using_dac); + if (!dev) { + err = -ENOMEM; + goto err_out_led_off; + } + + /* Some motherboards are broken and has zero in ROM. */ + if (!is_valid_ether_addr(dev->dev_addr)) + dev_warn(&pdev->dev, "bad (zero?) ethernet address in rom\n"); + + err = register_netdev(dev); + if (err) { + dev_err(&pdev->dev, "cannot register net device\n"); + goto err_out_free_netdev; + } + + skge_show_addr(dev); + + if (hw->ports > 1) { + dev1 = skge_devinit(hw, 1, using_dac); + if (!dev1) { + err = -ENOMEM; + goto err_out_unregister; + } + + err = register_netdev(dev1); + if (err) { + dev_err(&pdev->dev, "cannot register second net device\n"); + goto err_out_free_dev1; + } + + err = request_irq(pdev->irq, skge_intr, IRQF_SHARED, + hw->irq_name, hw); + if (err) { + dev_err(&pdev->dev, "cannot assign irq %d\n", + pdev->irq); + goto err_out_unregister_dev1; + } + + skge_show_addr(dev1); + } + pci_set_drvdata(pdev, hw); + + return 0; + +err_out_unregister_dev1: + unregister_netdev(dev1); +err_out_free_dev1: + free_netdev(dev1); +err_out_unregister: + unregister_netdev(dev); +err_out_free_netdev: + free_netdev(dev); +err_out_led_off: + skge_write16(hw, B0_LED, LED_STAT_OFF); +err_out_iounmap: + iounmap(hw->regs); +err_out_free_hw: + kfree(hw); +err_out_free_regions: + pci_release_regions(pdev); +err_out_disable_pdev: + pci_disable_device(pdev); +err_out: + return err; +} + +static void skge_remove(struct pci_dev *pdev) +{ + struct skge_hw *hw = pci_get_drvdata(pdev); + struct net_device *dev0, *dev1; + + if (!hw) + return; + + dev1 = hw->dev[1]; + if (dev1) + unregister_netdev(dev1); + dev0 = hw->dev[0]; + unregister_netdev(dev0); + + tasklet_kill(&hw->phy_task); + + spin_lock_irq(&hw->hw_lock); + hw->intr_mask = 0; + + if (hw->ports > 1) { + skge_write32(hw, B0_IMSK, 0); + skge_read32(hw, B0_IMSK); + free_irq(pdev->irq, hw); + } + spin_unlock_irq(&hw->hw_lock); + + skge_write16(hw, B0_LED, LED_STAT_OFF); + skge_write8(hw, B0_CTST, CS_RST_SET); + + if (hw->ports > 1) + free_irq(pdev->irq, hw); + pci_release_regions(pdev); + pci_disable_device(pdev); + if (dev1) + free_netdev(dev1); + free_netdev(dev0); + + iounmap(hw->regs); + kfree(hw); +} + +#ifdef CONFIG_PM_SLEEP +static int skge_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct skge_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return 0; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct skge_port *skge = netdev_priv(dev); + + if (netif_running(dev)) + skge_down(dev); + + if (skge->wol) + skge_wol_init(skge); + } + + skge_write32(hw, B0_IMSK, 0); + + return 0; +} + +static int skge_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct skge_hw *hw = pci_get_drvdata(pdev); + int i, err; + + if (!hw) + return 0; + + err = skge_reset(hw); + if (err) + goto out; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + + if (netif_running(dev)) { + err = skge_up(dev); + + if (err) { + netdev_err(dev, "could not up: %d\n", err); + dev_close(dev); + goto out; + } + } + } +out: + return err; +} + +static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume); +#define SKGE_PM_OPS (&skge_pm_ops) + +#else + +#define SKGE_PM_OPS NULL +#endif /* CONFIG_PM_SLEEP */ + +static void skge_shutdown(struct pci_dev *pdev) +{ + struct skge_hw *hw = pci_get_drvdata(pdev); + int i; + + if (!hw) + return; + + for (i = 0; i < hw->ports; i++) { + struct net_device *dev = hw->dev[i]; + struct skge_port *skge = netdev_priv(dev); + + if (skge->wol) + skge_wol_init(skge); + } + + pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); + pci_set_power_state(pdev, PCI_D3hot); +} + +static struct pci_driver skge_driver = { + .name = DRV_NAME, + .id_table = skge_id_table, + .probe = skge_probe, + .remove = skge_remove, + .shutdown = skge_shutdown, + .driver.pm = SKGE_PM_OPS, +}; + +static struct dmi_system_id skge_32bit_dma_boards[] = { + { + .ident = "Gigabyte nForce boards", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co"), + DMI_MATCH(DMI_BOARD_NAME, "nForce"), + }, + }, + { + .ident = "ASUS P5NSLI", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") + }, + }, + { + .ident = "FUJITSU SIEMENS A8NE-FM", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM") + }, + }, + {} +}; + +static int __init skge_init_module(void) +{ + if (dmi_check_system(skge_32bit_dma_boards)) + only_32bit_dma = 1; + skge_debug_init(); + return pci_register_driver(&skge_driver); +} + +static void __exit skge_cleanup_module(void) +{ + pci_unregister_driver(&skge_driver); + skge_debug_cleanup(); +} + +module_init(skge_init_module); +module_exit(skge_cleanup_module); diff --git a/addons/skge/src/4.4.180/skge.h b/addons/skge/src/4.4.180/skge.h new file mode 100644 index 00000000..a2eb3411 --- /dev/null +++ b/addons/skge/src/4.4.180/skge.h @@ -0,0 +1,2584 @@ +/* + * Definitions for the new Marvell Yukon / SysKonnect driver. + */ +#ifndef _SKGE_H +#define _SKGE_H +#include + +/* PCI config registers */ +#define PCI_DEV_REG1 0x40 +#define PCI_PHY_COMA 0x8000000 +#define PCI_VIO 0x2000000 + +#define PCI_DEV_REG2 0x44 +#define PCI_VPD_ROM_SZ 7L<<14 /* VPD ROM size 0=256, 1=512, ... */ +#define PCI_REV_DESC 1<<2 /* Reverse Descriptor bytes */ + +#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ + PCI_STATUS_SIG_SYSTEM_ERROR | \ + PCI_STATUS_REC_MASTER_ABORT | \ + PCI_STATUS_REC_TARGET_ABORT | \ + PCI_STATUS_PARITY) + +enum csr_regs { + B0_RAP = 0x0000, + B0_CTST = 0x0004, + B0_LED = 0x0006, + B0_POWER_CTRL = 0x0007, + B0_ISRC = 0x0008, + B0_IMSK = 0x000c, + B0_HWE_ISRC = 0x0010, + B0_HWE_IMSK = 0x0014, + B0_SP_ISRC = 0x0018, + B0_XM1_IMSK = 0x0020, + B0_XM1_ISRC = 0x0028, + B0_XM1_PHY_ADDR = 0x0030, + B0_XM1_PHY_DATA = 0x0034, + B0_XM2_IMSK = 0x0040, + B0_XM2_ISRC = 0x0048, + B0_XM2_PHY_ADDR = 0x0050, + B0_XM2_PHY_DATA = 0x0054, + B0_R1_CSR = 0x0060, + B0_R2_CSR = 0x0064, + B0_XS1_CSR = 0x0068, + B0_XA1_CSR = 0x006c, + B0_XS2_CSR = 0x0070, + B0_XA2_CSR = 0x0074, + + B2_MAC_1 = 0x0100, + B2_MAC_2 = 0x0108, + B2_MAC_3 = 0x0110, + B2_CONN_TYP = 0x0118, + B2_PMD_TYP = 0x0119, + B2_MAC_CFG = 0x011a, + B2_CHIP_ID = 0x011b, + B2_E_0 = 0x011c, + B2_E_1 = 0x011d, + B2_E_2 = 0x011e, + B2_E_3 = 0x011f, + B2_FAR = 0x0120, + B2_FDP = 0x0124, + B2_LD_CTRL = 0x0128, + B2_LD_TEST = 0x0129, + B2_TI_INI = 0x0130, + B2_TI_VAL = 0x0134, + B2_TI_CTRL = 0x0138, + B2_TI_TEST = 0x0139, + B2_IRQM_INI = 0x0140, + B2_IRQM_VAL = 0x0144, + B2_IRQM_CTRL = 0x0148, + B2_IRQM_TEST = 0x0149, + B2_IRQM_MSK = 0x014c, + B2_IRQM_HWE_MSK = 0x0150, + B2_TST_CTRL1 = 0x0158, + B2_TST_CTRL2 = 0x0159, + B2_GP_IO = 0x015c, + B2_I2C_CTRL = 0x0160, + B2_I2C_DATA = 0x0164, + B2_I2C_IRQ = 0x0168, + B2_I2C_SW = 0x016c, + B2_BSC_INI = 0x0170, + B2_BSC_VAL = 0x0174, + B2_BSC_CTRL = 0x0178, + B2_BSC_STAT = 0x0179, + B2_BSC_TST = 0x017a, + + B3_RAM_ADDR = 0x0180, + B3_RAM_DATA_LO = 0x0184, + B3_RAM_DATA_HI = 0x0188, + B3_RI_WTO_R1 = 0x0190, + B3_RI_WTO_XA1 = 0x0191, + B3_RI_WTO_XS1 = 0x0192, + B3_RI_RTO_R1 = 0x0193, + B3_RI_RTO_XA1 = 0x0194, + B3_RI_RTO_XS1 = 0x0195, + B3_RI_WTO_R2 = 0x0196, + B3_RI_WTO_XA2 = 0x0197, + B3_RI_WTO_XS2 = 0x0198, + B3_RI_RTO_R2 = 0x0199, + B3_RI_RTO_XA2 = 0x019a, + B3_RI_RTO_XS2 = 0x019b, + B3_RI_TO_VAL = 0x019c, + B3_RI_CTRL = 0x01a0, + B3_RI_TEST = 0x01a2, + B3_MA_TOINI_RX1 = 0x01b0, + B3_MA_TOINI_RX2 = 0x01b1, + B3_MA_TOINI_TX1 = 0x01b2, + B3_MA_TOINI_TX2 = 0x01b3, + B3_MA_TOVAL_RX1 = 0x01b4, + B3_MA_TOVAL_RX2 = 0x01b5, + B3_MA_TOVAL_TX1 = 0x01b6, + B3_MA_TOVAL_TX2 = 0x01b7, + B3_MA_TO_CTRL = 0x01b8, + B3_MA_TO_TEST = 0x01ba, + B3_MA_RCINI_RX1 = 0x01c0, + B3_MA_RCINI_RX2 = 0x01c1, + B3_MA_RCINI_TX1 = 0x01c2, + B3_MA_RCINI_TX2 = 0x01c3, + B3_MA_RCVAL_RX1 = 0x01c4, + B3_MA_RCVAL_RX2 = 0x01c5, + B3_MA_RCVAL_TX1 = 0x01c6, + B3_MA_RCVAL_TX2 = 0x01c7, + B3_MA_RC_CTRL = 0x01c8, + B3_MA_RC_TEST = 0x01ca, + B3_PA_TOINI_RX1 = 0x01d0, + B3_PA_TOINI_RX2 = 0x01d4, + B3_PA_TOINI_TX1 = 0x01d8, + B3_PA_TOINI_TX2 = 0x01dc, + B3_PA_TOVAL_RX1 = 0x01e0, + B3_PA_TOVAL_RX2 = 0x01e4, + B3_PA_TOVAL_TX1 = 0x01e8, + B3_PA_TOVAL_TX2 = 0x01ec, + B3_PA_CTRL = 0x01f0, + B3_PA_TEST = 0x01f2, +}; + +/* B0_CTST 16 bit Control/Status register */ +enum { + CS_CLK_RUN_HOT = 1<<13,/* CLK_RUN hot m. (YUKON-Lite only) */ + CS_CLK_RUN_RST = 1<<12,/* CLK_RUN reset (YUKON-Lite only) */ + CS_CLK_RUN_ENA = 1<<11,/* CLK_RUN enable (YUKON-Lite only) */ + CS_VAUX_AVAIL = 1<<10,/* VAUX available (YUKON only) */ + CS_BUS_CLOCK = 1<<9, /* Bus Clock 0/1 = 33/66 MHz */ + CS_BUS_SLOT_SZ = 1<<8, /* Slot Size 0/1 = 32/64 bit slot */ + CS_ST_SW_IRQ = 1<<7, /* Set IRQ SW Request */ + CS_CL_SW_IRQ = 1<<6, /* Clear IRQ SW Request */ + CS_STOP_DONE = 1<<5, /* Stop Master is finished */ + CS_STOP_MAST = 1<<4, /* Command Bit to stop the master */ + CS_MRST_CLR = 1<<3, /* Clear Master reset */ + CS_MRST_SET = 1<<2, /* Set Master reset */ + CS_RST_CLR = 1<<1, /* Clear Software reset */ + CS_RST_SET = 1, /* Set Software reset */ + +/* B0_LED 8 Bit LED register */ +/* Bit 7.. 2: reserved */ + LED_STAT_ON = 1<<1, /* Status LED on */ + LED_STAT_OFF = 1, /* Status LED off */ + +/* B0_POWER_CTRL 8 Bit Power Control reg (YUKON only) */ + PC_VAUX_ENA = 1<<7, /* Switch VAUX Enable */ + PC_VAUX_DIS = 1<<6, /* Switch VAUX Disable */ + PC_VCC_ENA = 1<<5, /* Switch VCC Enable */ + PC_VCC_DIS = 1<<4, /* Switch VCC Disable */ + PC_VAUX_ON = 1<<3, /* Switch VAUX On */ + PC_VAUX_OFF = 1<<2, /* Switch VAUX Off */ + PC_VCC_ON = 1<<1, /* Switch VCC On */ + PC_VCC_OFF = 1<<0, /* Switch VCC Off */ +}; + +/* B2_IRQM_MSK 32 bit IRQ Moderation Mask */ +enum { + IS_ALL_MSK = 0xbffffffful, /* All Interrupt bits */ + IS_HW_ERR = 1<<31, /* Interrupt HW Error */ + /* Bit 30: reserved */ + IS_PA_TO_RX1 = 1<<29, /* Packet Arb Timeout Rx1 */ + IS_PA_TO_RX2 = 1<<28, /* Packet Arb Timeout Rx2 */ + IS_PA_TO_TX1 = 1<<27, /* Packet Arb Timeout Tx1 */ + IS_PA_TO_TX2 = 1<<26, /* Packet Arb Timeout Tx2 */ + IS_I2C_READY = 1<<25, /* IRQ on end of I2C Tx */ + IS_IRQ_SW = 1<<24, /* SW forced IRQ */ + IS_EXT_REG = 1<<23, /* IRQ from LM80 or PHY (GENESIS only) */ + /* IRQ from PHY (YUKON only) */ + IS_TIMINT = 1<<22, /* IRQ from Timer */ + IS_MAC1 = 1<<21, /* IRQ from MAC 1 */ + IS_LNK_SYNC_M1 = 1<<20, /* Link Sync Cnt wrap MAC 1 */ + IS_MAC2 = 1<<19, /* IRQ from MAC 2 */ + IS_LNK_SYNC_M2 = 1<<18, /* Link Sync Cnt wrap MAC 2 */ +/* Receive Queue 1 */ + IS_R1_B = 1<<17, /* Q_R1 End of Buffer */ + IS_R1_F = 1<<16, /* Q_R1 End of Frame */ + IS_R1_C = 1<<15, /* Q_R1 Encoding Error */ +/* Receive Queue 2 */ + IS_R2_B = 1<<14, /* Q_R2 End of Buffer */ + IS_R2_F = 1<<13, /* Q_R2 End of Frame */ + IS_R2_C = 1<<12, /* Q_R2 Encoding Error */ +/* Synchronous Transmit Queue 1 */ + IS_XS1_B = 1<<11, /* Q_XS1 End of Buffer */ + IS_XS1_F = 1<<10, /* Q_XS1 End of Frame */ + IS_XS1_C = 1<<9, /* Q_XS1 Encoding Error */ +/* Asynchronous Transmit Queue 1 */ + IS_XA1_B = 1<<8, /* Q_XA1 End of Buffer */ + IS_XA1_F = 1<<7, /* Q_XA1 End of Frame */ + IS_XA1_C = 1<<6, /* Q_XA1 Encoding Error */ +/* Synchronous Transmit Queue 2 */ + IS_XS2_B = 1<<5, /* Q_XS2 End of Buffer */ + IS_XS2_F = 1<<4, /* Q_XS2 End of Frame */ + IS_XS2_C = 1<<3, /* Q_XS2 Encoding Error */ +/* Asynchronous Transmit Queue 2 */ + IS_XA2_B = 1<<2, /* Q_XA2 End of Buffer */ + IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */ + IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */ + + IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1, + IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2, + + IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1, + IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2, +}; + + +/* B2_IRQM_HWE_MSK 32 bit IRQ Moderation HW Error Mask */ +enum { + IS_IRQ_TIST_OV = 1<<13, /* Time Stamp Timer Overflow (YUKON only) */ + IS_IRQ_SENSOR = 1<<12, /* IRQ from Sensor (YUKON only) */ + IS_IRQ_MST_ERR = 1<<11, /* IRQ master error detected */ + IS_IRQ_STAT = 1<<10, /* IRQ status exception */ + IS_NO_STAT_M1 = 1<<9, /* No Rx Status from MAC 1 */ + IS_NO_STAT_M2 = 1<<8, /* No Rx Status from MAC 2 */ + IS_NO_TIST_M1 = 1<<7, /* No Time Stamp from MAC 1 */ + IS_NO_TIST_M2 = 1<<6, /* No Time Stamp from MAC 2 */ + IS_RAM_RD_PAR = 1<<5, /* RAM Read Parity Error */ + IS_RAM_WR_PAR = 1<<4, /* RAM Write Parity Error */ + IS_M1_PAR_ERR = 1<<3, /* MAC 1 Parity Error */ + IS_M2_PAR_ERR = 1<<2, /* MAC 2 Parity Error */ + IS_R1_PAR_ERR = 1<<1, /* Queue R1 Parity Error */ + IS_R2_PAR_ERR = 1<<0, /* Queue R2 Parity Error */ + + IS_ERR_MSK = IS_IRQ_MST_ERR | IS_IRQ_STAT + | IS_RAM_RD_PAR | IS_RAM_WR_PAR + | IS_M1_PAR_ERR | IS_M2_PAR_ERR + | IS_R1_PAR_ERR | IS_R2_PAR_ERR, +}; + +/* B2_TST_CTRL1 8 bit Test Control Register 1 */ +enum { + TST_FRC_DPERR_MR = 1<<7, /* force DATAPERR on MST RD */ + TST_FRC_DPERR_MW = 1<<6, /* force DATAPERR on MST WR */ + TST_FRC_DPERR_TR = 1<<5, /* force DATAPERR on TRG RD */ + TST_FRC_DPERR_TW = 1<<4, /* force DATAPERR on TRG WR */ + TST_FRC_APERR_M = 1<<3, /* force ADDRPERR on MST */ + TST_FRC_APERR_T = 1<<2, /* force ADDRPERR on TRG */ + TST_CFG_WRITE_ON = 1<<1, /* Enable Config Reg WR */ + TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */ +}; + +/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ +enum { + CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */ + /* Bit 3.. 2: reserved */ + CFG_DIS_M2_CLK = 1<<1, /* Disable Clock for 2nd MAC */ + CFG_SNG_MAC = 1<<0, /* MAC Config: 0=2 MACs / 1=1 MAC*/ +}; + +/* B2_CHIP_ID 8 bit Chip Identification Number */ +enum { + CHIP_ID_GENESIS = 0x0a, /* Chip ID for GENESIS */ + CHIP_ID_YUKON = 0xb0, /* Chip ID for YUKON */ + CHIP_ID_YUKON_LITE = 0xb1, /* Chip ID for YUKON-Lite (Rev. A1-A3) */ + CHIP_ID_YUKON_LP = 0xb2, /* Chip ID for YUKON-LP */ + CHIP_ID_YUKON_XL = 0xb3, /* Chip ID for YUKON-2 XL */ + CHIP_ID_YUKON_EC = 0xb6, /* Chip ID for YUKON-2 EC */ + CHIP_ID_YUKON_FE = 0xb7, /* Chip ID for YUKON-2 FE */ + + CHIP_REV_YU_LITE_A1 = 3, /* Chip Rev. for YUKON-Lite A1,A2 */ + CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */ +}; + +/* B2_TI_CTRL 8 bit Timer control */ +/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ +enum { + TIM_START = 1<<2, /* Start Timer */ + TIM_STOP = 1<<1, /* Stop Timer */ + TIM_CLR_IRQ = 1<<0, /* Clear Timer IRQ (!IRQM) */ +}; + +/* B2_TI_TEST 8 Bit Timer Test */ +/* B2_IRQM_TEST 8 bit IRQ Moderation Timer Test */ +/* B28_DPT_TST 8 bit Descriptor Poll Timer Test Reg */ +enum { + TIM_T_ON = 1<<2, /* Test mode on */ + TIM_T_OFF = 1<<1, /* Test mode off */ + TIM_T_STEP = 1<<0, /* Test step */ +}; + +/* B2_GP_IO 32 bit General Purpose I/O Register */ +enum { + GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */ + GP_DIR_8 = 1<<24, /* IO_8 direct, 0=In/1=Out */ + GP_DIR_7 = 1<<23, /* IO_7 direct, 0=In/1=Out */ + GP_DIR_6 = 1<<22, /* IO_6 direct, 0=In/1=Out */ + GP_DIR_5 = 1<<21, /* IO_5 direct, 0=In/1=Out */ + GP_DIR_4 = 1<<20, /* IO_4 direct, 0=In/1=Out */ + GP_DIR_3 = 1<<19, /* IO_3 direct, 0=In/1=Out */ + GP_DIR_2 = 1<<18, /* IO_2 direct, 0=In/1=Out */ + GP_DIR_1 = 1<<17, /* IO_1 direct, 0=In/1=Out */ + GP_DIR_0 = 1<<16, /* IO_0 direct, 0=In/1=Out */ + + GP_IO_9 = 1<<9, /* IO_9 pin */ + GP_IO_8 = 1<<8, /* IO_8 pin */ + GP_IO_7 = 1<<7, /* IO_7 pin */ + GP_IO_6 = 1<<6, /* IO_6 pin */ + GP_IO_5 = 1<<5, /* IO_5 pin */ + GP_IO_4 = 1<<4, /* IO_4 pin */ + GP_IO_3 = 1<<3, /* IO_3 pin */ + GP_IO_2 = 1<<2, /* IO_2 pin */ + GP_IO_1 = 1<<1, /* IO_1 pin */ + GP_IO_0 = 1<<0, /* IO_0 pin */ +}; + +/* Descriptor Bit Definition */ +/* TxCtrl Transmit Buffer Control Field */ +/* RxCtrl Receive Buffer Control Field */ +enum { + BMU_OWN = 1<<31, /* OWN bit: 0=host/1=BMU */ + BMU_STF = 1<<30, /* Start of Frame */ + BMU_EOF = 1<<29, /* End of Frame */ + BMU_IRQ_EOB = 1<<28, /* Req "End of Buffer" IRQ */ + BMU_IRQ_EOF = 1<<27, /* Req "End of Frame" IRQ */ + /* TxCtrl specific bits */ + BMU_STFWD = 1<<26, /* (Tx) Store & Forward Frame */ + BMU_NO_FCS = 1<<25, /* (Tx) Disable MAC FCS (CRC) generation */ + BMU_SW = 1<<24, /* (Tx) 1 bit res. for SW use */ + /* RxCtrl specific bits */ + BMU_DEV_0 = 1<<26, /* (Rx) Transfer data to Dev0 */ + BMU_STAT_VAL = 1<<25, /* (Rx) Rx Status Valid */ + BMU_TIST_VAL = 1<<24, /* (Rx) Rx TimeStamp Valid */ + /* Bit 23..16: BMU Check Opcodes */ + BMU_CHECK = 0x55<<16, /* Default BMU check */ + BMU_TCP_CHECK = 0x56<<16, /* Descr with TCP ext */ + BMU_UDP_CHECK = 0x57<<16, /* Descr with UDP ext (YUKON only) */ + BMU_BBC = 0xffffL, /* Bit 15.. 0: Buffer Byte Counter */ +}; + +/* B2_BSC_CTRL 8 bit Blink Source Counter Control */ +enum { + BSC_START = 1<<1, /* Start Blink Source Counter */ + BSC_STOP = 1<<0, /* Stop Blink Source Counter */ +}; + +/* B2_BSC_STAT 8 bit Blink Source Counter Status */ +enum { + BSC_SRC = 1<<0, /* Blink Source, 0=Off / 1=On */ +}; + +/* B2_BSC_TST 16 bit Blink Source Counter Test Reg */ +enum { + BSC_T_ON = 1<<2, /* Test mode on */ + BSC_T_OFF = 1<<1, /* Test mode off */ + BSC_T_STEP = 1<<0, /* Test step */ +}; + +/* B3_RAM_ADDR 32 bit RAM Address, to read or write */ + /* Bit 31..19: reserved */ +#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ +/* RAM Interface Registers */ + +/* B3_RI_CTRL 16 bit RAM Iface Control Register */ +enum { + RI_CLR_RD_PERR = 1<<9, /* Clear IRQ RAM Read Parity Err */ + RI_CLR_WR_PERR = 1<<8, /* Clear IRQ RAM Write Parity Err*/ + + RI_RST_CLR = 1<<1, /* Clear RAM Interface Reset */ + RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ +}; + +/* MAC Arbiter Registers */ +/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */ +enum { + MA_FOE_ON = 1<<3, /* XMAC Fast Output Enable ON */ + MA_FOE_OFF = 1<<2, /* XMAC Fast Output Enable OFF */ + MA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */ + MA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */ + +}; + +/* Timeout values */ +#define SK_MAC_TO_53 72 /* MAC arbiter timeout */ +#define SK_PKT_TO_53 0x2000 /* Packet arbiter timeout */ +#define SK_PKT_TO_MAX 0xffff /* Maximum value */ +#define SK_RI_TO_53 36 /* RAM interface timeout */ + +/* Packet Arbiter Registers */ +/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */ +enum { + PA_CLR_TO_TX2 = 1<<13,/* Clear IRQ Packet Timeout TX2 */ + PA_CLR_TO_TX1 = 1<<12,/* Clear IRQ Packet Timeout TX1 */ + PA_CLR_TO_RX2 = 1<<11,/* Clear IRQ Packet Timeout RX2 */ + PA_CLR_TO_RX1 = 1<<10,/* Clear IRQ Packet Timeout RX1 */ + PA_ENA_TO_TX2 = 1<<9, /* Enable Timeout Timer TX2 */ + PA_DIS_TO_TX2 = 1<<8, /* Disable Timeout Timer TX2 */ + PA_ENA_TO_TX1 = 1<<7, /* Enable Timeout Timer TX1 */ + PA_DIS_TO_TX1 = 1<<6, /* Disable Timeout Timer TX1 */ + PA_ENA_TO_RX2 = 1<<5, /* Enable Timeout Timer RX2 */ + PA_DIS_TO_RX2 = 1<<4, /* Disable Timeout Timer RX2 */ + PA_ENA_TO_RX1 = 1<<3, /* Enable Timeout Timer RX1 */ + PA_DIS_TO_RX1 = 1<<2, /* Disable Timeout Timer RX1 */ + PA_RST_CLR = 1<<1, /* Clear MAC Arbiter Reset */ + PA_RST_SET = 1<<0, /* Set MAC Arbiter Reset */ +}; + +#define PA_ENA_TO_ALL (PA_ENA_TO_RX1 | PA_ENA_TO_RX2 |\ + PA_ENA_TO_TX1 | PA_ENA_TO_TX2) + + +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ +/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ +/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ +/* TXA_LIM_VAL 32 bit Tx Arb Limit Counter Value */ + +#define TXA_MAX_VAL 0x00ffffffUL /* Bit 23.. 0: Max TXA Timer/Cnt Val */ + +/* TXA_CTRL 8 bit Tx Arbiter Control Register */ +enum { + TXA_ENA_FSYNC = 1<<7, /* Enable force of sync Tx queue */ + TXA_DIS_FSYNC = 1<<6, /* Disable force of sync Tx queue */ + TXA_ENA_ALLOC = 1<<5, /* Enable alloc of free bandwidth */ + TXA_DIS_ALLOC = 1<<4, /* Disable alloc of free bandwidth */ + TXA_START_RC = 1<<3, /* Start sync Rate Control */ + TXA_STOP_RC = 1<<2, /* Stop sync Rate Control */ + TXA_ENA_ARB = 1<<1, /* Enable Tx Arbiter */ + TXA_DIS_ARB = 1<<0, /* Disable Tx Arbiter */ +}; + +/* + * Bank 4 - 5 + */ +/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */ +enum { + TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ + TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ + TXA_LIM_INI = 0x0208,/* 32 bit Tx Arb Limit Counter Init Val */ + TXA_LIM_VAL = 0x020c,/* 32 bit Tx Arb Limit Counter Value */ + TXA_CTRL = 0x0210,/* 8 bit Tx Arbiter Control Register */ + TXA_TEST = 0x0211,/* 8 bit Tx Arbiter Test Register */ + TXA_STAT = 0x0212,/* 8 bit Tx Arbiter Status Register */ +}; + + +enum { + B6_EXT_REG = 0x0300,/* External registers (GENESIS only) */ + B7_CFG_SPC = 0x0380,/* copy of the Configuration register */ + B8_RQ1_REGS = 0x0400,/* Receive Queue 1 */ + B8_RQ2_REGS = 0x0480,/* Receive Queue 2 */ + B8_TS1_REGS = 0x0600,/* Transmit sync queue 1 */ + B8_TA1_REGS = 0x0680,/* Transmit async queue 1 */ + B8_TS2_REGS = 0x0700,/* Transmit sync queue 2 */ + B8_TA2_REGS = 0x0780,/* Transmit sync queue 2 */ + B16_RAM_REGS = 0x0800,/* RAM Buffer Registers */ +}; + +/* Queue Register Offsets, use Q_ADDR() to access */ +enum { + B8_Q_REGS = 0x0400, /* base of Queue registers */ + Q_D = 0x00, /* 8*32 bit Current Descriptor */ + Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */ + Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */ + Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */ + Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */ + Q_BC = 0x30, /* 32 bit Current Byte Counter */ + Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */ + Q_F = 0x38, /* 32 bit Flag Register */ + Q_T1 = 0x3c, /* 32 bit Test Register 1 */ + Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */ + Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */ + Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */ + Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */ + Q_T2 = 0x40, /* 32 bit Test Register 2 */ + Q_T3 = 0x44, /* 32 bit Test Register 3 */ + +}; +#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) + +/* RAM Buffer Register Offsets */ +enum { + + RB_START= 0x00,/* 32 bit RAM Buffer Start Address */ + RB_END = 0x04,/* 32 bit RAM Buffer End Address */ + RB_WP = 0x08,/* 32 bit RAM Buffer Write Pointer */ + RB_RP = 0x0c,/* 32 bit RAM Buffer Read Pointer */ + RB_RX_UTPP= 0x10,/* 32 bit Rx Upper Threshold, Pause Packet */ + RB_RX_LTPP= 0x14,/* 32 bit Rx Lower Threshold, Pause Packet */ + RB_RX_UTHP= 0x18,/* 32 bit Rx Upper Threshold, High Prio */ + RB_RX_LTHP= 0x1c,/* 32 bit Rx Lower Threshold, High Prio */ + /* 0x10 - 0x1f: reserved at Tx RAM Buffer Registers */ + RB_PC = 0x20,/* 32 bit RAM Buffer Packet Counter */ + RB_LEV = 0x24,/* 32 bit RAM Buffer Level Register */ + RB_CTRL = 0x28,/* 32 bit RAM Buffer Control Register */ + RB_TST1 = 0x29,/* 8 bit RAM Buffer Test Register 1 */ + RB_TST2 = 0x2a,/* 8 bit RAM Buffer Test Register 2 */ +}; + +/* Receive and Transmit Queues */ +enum { + Q_R1 = 0x0000, /* Receive Queue 1 */ + Q_R2 = 0x0080, /* Receive Queue 2 */ + Q_XS1 = 0x0200, /* Synchronous Transmit Queue 1 */ + Q_XA1 = 0x0280, /* Asynchronous Transmit Queue 1 */ + Q_XS2 = 0x0300, /* Synchronous Transmit Queue 2 */ + Q_XA2 = 0x0380, /* Asynchronous Transmit Queue 2 */ +}; + +/* Different MAC Types */ +enum { + SK_MAC_XMAC = 0, /* Xaqti XMAC II */ + SK_MAC_GMAC = 1, /* Marvell GMAC */ +}; + +/* Different PHY Types */ +enum { + SK_PHY_XMAC = 0,/* integrated in XMAC II */ + SK_PHY_BCOM = 1,/* Broadcom BCM5400 */ + SK_PHY_LONE = 2,/* Level One LXT1000 [not supported]*/ + SK_PHY_NAT = 3,/* National DP83891 [not supported] */ + SK_PHY_MARV_COPPER= 4,/* Marvell 88E1011S */ + SK_PHY_MARV_FIBER = 5,/* Marvell 88E1011S working on fiber */ +}; + +/* PHY addresses (bits 12..8 of PHY address reg) */ +enum { + PHY_ADDR_XMAC = 0<<8, + PHY_ADDR_BCOM = 1<<8, + +/* GPHY address (bits 15..11 of SMI control reg) */ + PHY_ADDR_MARV = 0, +}; + +#define RB_ADDR(offs, queue) ((u16)B16_RAM_REGS + (u16)(queue) + (offs)) + +/* Receive MAC FIFO, Receive LED, and Link_Sync regs (GENESIS only) */ +enum { + RX_MFF_EA = 0x0c00,/* 32 bit Receive MAC FIFO End Address */ + RX_MFF_WP = 0x0c04,/* 32 bit Receive MAC FIFO Write Pointer */ + + RX_MFF_RP = 0x0c0c,/* 32 bit Receive MAC FIFO Read Pointer */ + RX_MFF_PC = 0x0c10,/* 32 bit Receive MAC FIFO Packet Cnt */ + RX_MFF_LEV = 0x0c14,/* 32 bit Receive MAC FIFO Level */ + RX_MFF_CTRL1 = 0x0c18,/* 16 bit Receive MAC FIFO Control Reg 1*/ + RX_MFF_STAT_TO = 0x0c1a,/* 8 bit Receive MAC Status Timeout */ + RX_MFF_TIST_TO = 0x0c1b,/* 8 bit Receive MAC Time Stamp Timeout */ + RX_MFF_CTRL2 = 0x0c1c,/* 8 bit Receive MAC FIFO Control Reg 2*/ + RX_MFF_TST1 = 0x0c1d,/* 8 bit Receive MAC FIFO Test Reg 1 */ + RX_MFF_TST2 = 0x0c1e,/* 8 bit Receive MAC FIFO Test Reg 2 */ + + RX_LED_INI = 0x0c20,/* 32 bit Receive LED Cnt Init Value */ + RX_LED_VAL = 0x0c24,/* 32 bit Receive LED Cnt Current Value */ + RX_LED_CTRL = 0x0c28,/* 8 bit Receive LED Cnt Control Reg */ + RX_LED_TST = 0x0c29,/* 8 bit Receive LED Cnt Test Register */ + + LNK_SYNC_INI = 0x0c30,/* 32 bit Link Sync Cnt Init Value */ + LNK_SYNC_VAL = 0x0c34,/* 32 bit Link Sync Cnt Current Value */ + LNK_SYNC_CTRL = 0x0c38,/* 8 bit Link Sync Cnt Control Register */ + LNK_SYNC_TST = 0x0c39,/* 8 bit Link Sync Cnt Test Register */ + LNK_LED_REG = 0x0c3c,/* 8 bit Link LED Register */ +}; + +/* Receive and Transmit MAC FIFO Registers (GENESIS only) */ +/* RX_MFF_CTRL1 16 bit Receive MAC FIFO Control Reg 1 */ +enum { + MFF_ENA_RDY_PAT = 1<<13, /* Enable Ready Patch */ + MFF_DIS_RDY_PAT = 1<<12, /* Disable Ready Patch */ + MFF_ENA_TIM_PAT = 1<<11, /* Enable Timing Patch */ + MFF_DIS_TIM_PAT = 1<<10, /* Disable Timing Patch */ + MFF_ENA_ALM_FUL = 1<<9, /* Enable AlmostFull Sign */ + MFF_DIS_ALM_FUL = 1<<8, /* Disable AlmostFull Sign */ + MFF_ENA_PAUSE = 1<<7, /* Enable Pause Signaling */ + MFF_DIS_PAUSE = 1<<6, /* Disable Pause Signaling */ + MFF_ENA_FLUSH = 1<<5, /* Enable Frame Flushing */ + MFF_DIS_FLUSH = 1<<4, /* Disable Frame Flushing */ + MFF_ENA_TIST = 1<<3, /* Enable Time Stamp Gener */ + MFF_DIS_TIST = 1<<2, /* Disable Time Stamp Gener */ + MFF_CLR_INTIST = 1<<1, /* Clear IRQ No Time Stamp */ + MFF_CLR_INSTAT = 1<<0, /* Clear IRQ No Status */ + MFF_RX_CTRL_DEF = MFF_ENA_TIM_PAT, +}; + +/* TX_MFF_CTRL1 16 bit Transmit MAC FIFO Control Reg 1 */ +enum { + MFF_CLR_PERR = 1<<15, /* Clear Parity Error IRQ */ + + MFF_ENA_PKT_REC = 1<<13, /* Enable Packet Recovery */ + MFF_DIS_PKT_REC = 1<<12, /* Disable Packet Recovery */ + + MFF_ENA_W4E = 1<<7, /* Enable Wait for Empty */ + MFF_DIS_W4E = 1<<6, /* Disable Wait for Empty */ + + MFF_ENA_LOOPB = 1<<3, /* Enable Loopback */ + MFF_DIS_LOOPB = 1<<2, /* Disable Loopback */ + MFF_CLR_MAC_RST = 1<<1, /* Clear XMAC Reset */ + MFF_SET_MAC_RST = 1<<0, /* Set XMAC Reset */ + + MFF_TX_CTRL_DEF = MFF_ENA_PKT_REC | (u16) MFF_ENA_TIM_PAT | MFF_ENA_FLUSH, +}; + + +/* RX_MFF_TST2 8 bit Receive MAC FIFO Test Register 2 */ +/* TX_MFF_TST2 8 bit Transmit MAC FIFO Test Register 2 */ +enum { + MFF_WSP_T_ON = 1<<6, /* Tx: Write Shadow Ptr TestOn */ + MFF_WSP_T_OFF = 1<<5, /* Tx: Write Shadow Ptr TstOff */ + MFF_WSP_INC = 1<<4, /* Tx: Write Shadow Ptr Increment */ + MFF_PC_DEC = 1<<3, /* Packet Counter Decrement */ + MFF_PC_T_ON = 1<<2, /* Packet Counter Test On */ + MFF_PC_T_OFF = 1<<1, /* Packet Counter Test Off */ + MFF_PC_INC = 1<<0, /* Packet Counter Increment */ +}; + +/* RX_MFF_TST1 8 bit Receive MAC FIFO Test Register 1 */ +/* TX_MFF_TST1 8 bit Transmit MAC FIFO Test Register 1 */ +enum { + MFF_WP_T_ON = 1<<6, /* Write Pointer Test On */ + MFF_WP_T_OFF = 1<<5, /* Write Pointer Test Off */ + MFF_WP_INC = 1<<4, /* Write Pointer Increm */ + + MFF_RP_T_ON = 1<<2, /* Read Pointer Test On */ + MFF_RP_T_OFF = 1<<1, /* Read Pointer Test Off */ + MFF_RP_DEC = 1<<0, /* Read Pointer Decrement */ +}; + +/* RX_MFF_CTRL2 8 bit Receive MAC FIFO Control Reg 2 */ +/* TX_MFF_CTRL2 8 bit Transmit MAC FIFO Control Reg 2 */ +enum { + MFF_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ + MFF_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ + MFF_RST_CLR = 1<<1, /* Clear MAC FIFO Reset */ + MFF_RST_SET = 1<<0, /* Set MAC FIFO Reset */ +}; + + +/* Link LED Counter Registers (GENESIS only) */ + +/* RX_LED_CTRL 8 bit Receive LED Cnt Control Reg */ +/* TX_LED_CTRL 8 bit Transmit LED Cnt Control Reg */ +/* LNK_SYNC_CTRL 8 bit Link Sync Cnt Control Register */ +enum { + LED_START = 1<<2, /* Start Timer */ + LED_STOP = 1<<1, /* Stop Timer */ + LED_STATE = 1<<0, /* Rx/Tx: LED State, 1=LED on */ +}; + +/* RX_LED_TST 8 bit Receive LED Cnt Test Register */ +/* TX_LED_TST 8 bit Transmit LED Cnt Test Register */ +/* LNK_SYNC_TST 8 bit Link Sync Cnt Test Register */ +enum { + LED_T_ON = 1<<2, /* LED Counter Test mode On */ + LED_T_OFF = 1<<1, /* LED Counter Test mode Off */ + LED_T_STEP = 1<<0, /* LED Counter Step */ +}; + +/* LNK_LED_REG 8 bit Link LED Register */ +enum { + LED_BLK_ON = 1<<5, /* Link LED Blinking On */ + LED_BLK_OFF = 1<<4, /* Link LED Blinking Off */ + LED_SYNC_ON = 1<<3, /* Use Sync Wire to switch LED */ + LED_SYNC_OFF = 1<<2, /* Disable Sync Wire Input */ + LED_ON = 1<<1, /* switch LED on */ + LED_OFF = 1<<0, /* switch LED off */ +}; + +/* Receive GMAC FIFO (YUKON) */ +enum { + RX_GMF_EA = 0x0c40,/* 32 bit Rx GMAC FIFO End Address */ + RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ + RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ + RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ + RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */ + RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ + RX_GMF_WLEV = 0x0c68,/* 32 bit Rx GMAC FIFO Write Level */ + RX_GMF_RP = 0x0c70,/* 32 bit Rx GMAC FIFO Read Pointer */ + RX_GMF_RLEV = 0x0c78,/* 32 bit Rx GMAC FIFO Read Level */ +}; + + +/* TXA_TEST 8 bit Tx Arbiter Test Register */ +enum { + TXA_INT_T_ON = 1<<5, /* Tx Arb Interval Timer Test On */ + TXA_INT_T_OFF = 1<<4, /* Tx Arb Interval Timer Test Off */ + TXA_INT_T_STEP = 1<<3, /* Tx Arb Interval Timer Step */ + TXA_LIM_T_ON = 1<<2, /* Tx Arb Limit Timer Test On */ + TXA_LIM_T_OFF = 1<<1, /* Tx Arb Limit Timer Test Off */ + TXA_LIM_T_STEP = 1<<0, /* Tx Arb Limit Timer Step */ +}; + +/* TXA_STAT 8 bit Tx Arbiter Status Register */ +enum { + TXA_PRIO_XS = 1<<0, /* sync queue has prio to send */ +}; + + +/* Q_BC 32 bit Current Byte Counter */ + +/* BMU Control Status Registers */ +/* B0_R1_CSR 32 bit BMU Ctrl/Stat Rx Queue 1 */ +/* B0_R2_CSR 32 bit BMU Ctrl/Stat Rx Queue 2 */ +/* B0_XA1_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 1 */ +/* B0_XS1_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 1 */ +/* B0_XA2_CSR 32 bit BMU Ctrl/Stat Sync Tx Queue 2 */ +/* B0_XS2_CSR 32 bit BMU Ctrl/Stat Async Tx Queue 2 */ +/* Q_CSR 32 bit BMU Control/Status Register */ + +enum { + CSR_SV_IDLE = 1<<24, /* BMU SM Idle */ + + CSR_DESC_CLR = 1<<21, /* Clear Reset for Descr */ + CSR_DESC_SET = 1<<20, /* Set Reset for Descr */ + CSR_FIFO_CLR = 1<<19, /* Clear Reset for FIFO */ + CSR_FIFO_SET = 1<<18, /* Set Reset for FIFO */ + CSR_HPI_RUN = 1<<17, /* Release HPI SM */ + CSR_HPI_RST = 1<<16, /* Reset HPI SM to Idle */ + CSR_SV_RUN = 1<<15, /* Release Supervisor SM */ + CSR_SV_RST = 1<<14, /* Reset Supervisor SM */ + CSR_DREAD_RUN = 1<<13, /* Release Descr Read SM */ + CSR_DREAD_RST = 1<<12, /* Reset Descr Read SM */ + CSR_DWRITE_RUN = 1<<11, /* Release Descr Write SM */ + CSR_DWRITE_RST = 1<<10, /* Reset Descr Write SM */ + CSR_TRANS_RUN = 1<<9, /* Release Transfer SM */ + CSR_TRANS_RST = 1<<8, /* Reset Transfer SM */ + CSR_ENA_POL = 1<<7, /* Enable Descr Polling */ + CSR_DIS_POL = 1<<6, /* Disable Descr Polling */ + CSR_STOP = 1<<5, /* Stop Rx/Tx Queue */ + CSR_START = 1<<4, /* Start Rx/Tx Queue */ + CSR_IRQ_CL_P = 1<<3, /* (Rx) Clear Parity IRQ */ + CSR_IRQ_CL_B = 1<<2, /* Clear EOB IRQ */ + CSR_IRQ_CL_F = 1<<1, /* Clear EOF IRQ */ + CSR_IRQ_CL_C = 1<<0, /* Clear ERR IRQ */ +}; + +#define CSR_SET_RESET (CSR_DESC_SET | CSR_FIFO_SET | CSR_HPI_RST |\ + CSR_SV_RST | CSR_DREAD_RST | CSR_DWRITE_RST |\ + CSR_TRANS_RST) +#define CSR_CLR_RESET (CSR_DESC_CLR | CSR_FIFO_CLR | CSR_HPI_RUN |\ + CSR_SV_RUN | CSR_DREAD_RUN | CSR_DWRITE_RUN |\ + CSR_TRANS_RUN) + +/* Q_F 32 bit Flag Register */ +enum { + F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */ + F_EMPTY = 1<<27, /* Tx FIFO: empty flag */ + F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */ + F_WM_REACHED = 1<<25, /* Watermark reached */ + + F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */ + F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */ +}; + +/* RAM Buffer Register Offsets, use RB_ADDR(Queue, Offs) to access */ +/* RB_START 32 bit RAM Buffer Start Address */ +/* RB_END 32 bit RAM Buffer End Address */ +/* RB_WP 32 bit RAM Buffer Write Pointer */ +/* RB_RP 32 bit RAM Buffer Read Pointer */ +/* RB_RX_UTPP 32 bit Rx Upper Threshold, Pause Pack */ +/* RB_RX_LTPP 32 bit Rx Lower Threshold, Pause Pack */ +/* RB_RX_UTHP 32 bit Rx Upper Threshold, High Prio */ +/* RB_RX_LTHP 32 bit Rx Lower Threshold, High Prio */ +/* RB_PC 32 bit RAM Buffer Packet Counter */ +/* RB_LEV 32 bit RAM Buffer Level Register */ + +#define RB_MSK 0x0007ffff /* Bit 18.. 0: RAM Buffer Pointer Bits */ +/* RB_TST2 8 bit RAM Buffer Test Register 2 */ +/* RB_TST1 8 bit RAM Buffer Test Register 1 */ + +/* RB_CTRL 8 bit RAM Buffer Control Register */ +enum { + RB_ENA_STFWD = 1<<5, /* Enable Store & Forward */ + RB_DIS_STFWD = 1<<4, /* Disable Store & Forward */ + RB_ENA_OP_MD = 1<<3, /* Enable Operation Mode */ + RB_DIS_OP_MD = 1<<2, /* Disable Operation Mode */ + RB_RST_CLR = 1<<1, /* Clear RAM Buf STM Reset */ + RB_RST_SET = 1<<0, /* Set RAM Buf STM Reset */ +}; + +/* Transmit MAC FIFO and Transmit LED Registers (GENESIS only), */ +enum { + TX_MFF_EA = 0x0d00,/* 32 bit Transmit MAC FIFO End Address */ + TX_MFF_WP = 0x0d04,/* 32 bit Transmit MAC FIFO WR Pointer */ + TX_MFF_WSP = 0x0d08,/* 32 bit Transmit MAC FIFO WR Shadow Ptr */ + TX_MFF_RP = 0x0d0c,/* 32 bit Transmit MAC FIFO RD Pointer */ + TX_MFF_PC = 0x0d10,/* 32 bit Transmit MAC FIFO Packet Cnt */ + TX_MFF_LEV = 0x0d14,/* 32 bit Transmit MAC FIFO Level */ + TX_MFF_CTRL1 = 0x0d18,/* 16 bit Transmit MAC FIFO Ctrl Reg 1 */ + TX_MFF_WAF = 0x0d1a,/* 8 bit Transmit MAC Wait after flush */ + + TX_MFF_CTRL2 = 0x0d1c,/* 8 bit Transmit MAC FIFO Ctrl Reg 2 */ + TX_MFF_TST1 = 0x0d1d,/* 8 bit Transmit MAC FIFO Test Reg 1 */ + TX_MFF_TST2 = 0x0d1e,/* 8 bit Transmit MAC FIFO Test Reg 2 */ + + TX_LED_INI = 0x0d20,/* 32 bit Transmit LED Cnt Init Value */ + TX_LED_VAL = 0x0d24,/* 32 bit Transmit LED Cnt Current Val */ + TX_LED_CTRL = 0x0d28,/* 8 bit Transmit LED Cnt Control Reg */ + TX_LED_TST = 0x0d29,/* 8 bit Transmit LED Cnt Test Reg */ +}; + +/* Counter and Timer constants, for a host clock of 62.5 MHz */ +#define SK_XMIT_DUR 0x002faf08UL /* 50 ms */ +#define SK_BLK_DUR 0x01dcd650UL /* 500 ms */ + +#define SK_DPOLL_DEF 0x00ee6b28UL /* 250 ms at 62.5 MHz */ + +#define SK_DPOLL_MAX 0x00ffffffUL /* 268 ms at 62.5 MHz */ + /* 215 ms at 78.12 MHz */ + +#define SK_FACT_62 100 /* is given in percent */ +#define SK_FACT_53 85 /* on GENESIS: 53.12 MHz */ +#define SK_FACT_78 125 /* on YUKON: 78.12 MHz */ + + +/* Transmit GMAC FIFO (YUKON only) */ +enum { + TX_GMF_EA = 0x0d40,/* 32 bit Tx GMAC FIFO End Address */ + TX_GMF_AE_THR = 0x0d44,/* 32 bit Tx GMAC FIFO Almost Empty Thresh.*/ + TX_GMF_CTRL_T = 0x0d48,/* 32 bit Tx GMAC FIFO Control/Test */ + + TX_GMF_WP = 0x0d60,/* 32 bit Tx GMAC FIFO Write Pointer */ + TX_GMF_WSP = 0x0d64,/* 32 bit Tx GMAC FIFO Write Shadow Ptr. */ + TX_GMF_WLEV = 0x0d68,/* 32 bit Tx GMAC FIFO Write Level */ + + TX_GMF_RP = 0x0d70,/* 32 bit Tx GMAC FIFO Read Pointer */ + TX_GMF_RSTP = 0x0d74,/* 32 bit Tx GMAC FIFO Restart Pointer */ + TX_GMF_RLEV = 0x0d78,/* 32 bit Tx GMAC FIFO Read Level */ + + /* Descriptor Poll Timer Registers */ + B28_DPT_INI = 0x0e00,/* 24 bit Descriptor Poll Timer Init Val */ + B28_DPT_VAL = 0x0e04,/* 24 bit Descriptor Poll Timer Curr Val */ + B28_DPT_CTRL = 0x0e08,/* 8 bit Descriptor Poll Timer Ctrl Reg */ + + B28_DPT_TST = 0x0e0a,/* 8 bit Descriptor Poll Timer Test Reg */ + + /* Time Stamp Timer Registers (YUKON only) */ + GMAC_TI_ST_VAL = 0x0e14,/* 32 bit Time Stamp Timer Curr Val */ + GMAC_TI_ST_CTRL = 0x0e18,/* 8 bit Time Stamp Timer Ctrl Reg */ + GMAC_TI_ST_TST = 0x0e1a,/* 8 bit Time Stamp Timer Test Reg */ +}; + + +enum { + LINKLED_OFF = 0x01, + LINKLED_ON = 0x02, + LINKLED_LINKSYNC_OFF = 0x04, + LINKLED_LINKSYNC_ON = 0x08, + LINKLED_BLINK_OFF = 0x10, + LINKLED_BLINK_ON = 0x20, +}; + +/* GMAC and GPHY Control Registers (YUKON only) */ +enum { + GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ + GPHY_CTRL = 0x0f04,/* 32 bit GPHY Control Reg */ + GMAC_IRQ_SRC = 0x0f08,/* 8 bit GMAC Interrupt Source Reg */ + GMAC_IRQ_MSK = 0x0f0c,/* 8 bit GMAC Interrupt Mask Reg */ + GMAC_LINK_CTRL = 0x0f10,/* 16 bit Link Control Reg */ + +/* Wake-up Frame Pattern Match Control Registers (YUKON only) */ + + WOL_REG_OFFS = 0x20,/* HW-Bug: Address is + 0x20 against spec. */ + + WOL_CTRL_STAT = 0x0f20,/* 16 bit WOL Control/Status Reg */ + WOL_MATCH_CTL = 0x0f22,/* 8 bit WOL Match Control Reg */ + WOL_MATCH_RES = 0x0f23,/* 8 bit WOL Match Result Reg */ + WOL_MAC_ADDR = 0x0f24,/* 32 bit WOL MAC Address */ + WOL_PATT_RPTR = 0x0f2c,/* 8 bit WOL Pattern Read Pointer */ + +/* WOL Pattern Length Registers (YUKON only) */ + + WOL_PATT_LEN_LO = 0x0f30,/* 32 bit WOL Pattern Length 3..0 */ + WOL_PATT_LEN_HI = 0x0f34,/* 24 bit WOL Pattern Length 6..4 */ + +/* WOL Pattern Counter Registers (YUKON only) */ + + WOL_PATT_CNT_0 = 0x0f38,/* 32 bit WOL Pattern Counter 3..0 */ + WOL_PATT_CNT_4 = 0x0f3c,/* 24 bit WOL Pattern Counter 6..4 */ +}; +#define WOL_REGS(port, x) (x + (port)*0x80) + +enum { + WOL_PATT_RAM_1 = 0x1000,/* WOL Pattern RAM Link 1 */ + WOL_PATT_RAM_2 = 0x1400,/* WOL Pattern RAM Link 2 */ +}; +#define WOL_PATT_RAM_BASE(port) (WOL_PATT_RAM_1 + (port)*0x400) + +enum { + BASE_XMAC_1 = 0x2000,/* XMAC 1 registers */ + BASE_GMAC_1 = 0x2800,/* GMAC 1 registers */ + BASE_XMAC_2 = 0x3000,/* XMAC 2 registers */ + BASE_GMAC_2 = 0x3800,/* GMAC 2 registers */ +}; + +/* + * Receive Frame Status Encoding + */ +enum { + XMR_FS_LEN = 0x3fff<<18, /* Bit 31..18: Rx Frame Length */ + XMR_FS_LEN_SHIFT = 18, + XMR_FS_2L_VLAN = 1<<17, /* Bit 17: tagged wh 2Lev VLAN ID*/ + XMR_FS_1_VLAN = 1<<16, /* Bit 16: tagged wh 1ev VLAN ID*/ + XMR_FS_BC = 1<<15, /* Bit 15: Broadcast Frame */ + XMR_FS_MC = 1<<14, /* Bit 14: Multicast Frame */ + XMR_FS_UC = 1<<13, /* Bit 13: Unicast Frame */ + + XMR_FS_BURST = 1<<11, /* Bit 11: Burst Mode */ + XMR_FS_CEX_ERR = 1<<10, /* Bit 10: Carrier Ext. Error */ + XMR_FS_802_3 = 1<<9, /* Bit 9: 802.3 Frame */ + XMR_FS_COL_ERR = 1<<8, /* Bit 8: Collision Error */ + XMR_FS_CAR_ERR = 1<<7, /* Bit 7: Carrier Event Error */ + XMR_FS_LEN_ERR = 1<<6, /* Bit 6: In-Range Length Error */ + XMR_FS_FRA_ERR = 1<<5, /* Bit 5: Framing Error */ + XMR_FS_RUNT = 1<<4, /* Bit 4: Runt Frame */ + XMR_FS_LNG_ERR = 1<<3, /* Bit 3: Giant (Jumbo) Frame */ + XMR_FS_FCS_ERR = 1<<2, /* Bit 2: Frame Check Sequ Err */ + XMR_FS_ERR = 1<<1, /* Bit 1: Frame Error */ + XMR_FS_MCTRL = 1<<0, /* Bit 0: MAC Control Packet */ + +/* + * XMR_FS_ERR will be set if + * XMR_FS_FCS_ERR, XMR_FS_LNG_ERR, XMR_FS_RUNT, + * XMR_FS_FRA_ERR, XMR_FS_LEN_ERR, or XMR_FS_CEX_ERR + * is set. XMR_FS_LNG_ERR and XMR_FS_LEN_ERR will issue + * XMR_FS_ERR unless the corresponding bit in the Receive Command + * Register is set. + */ +}; + +/* +,* XMAC-PHY Registers, indirect addressed over the XMAC + */ +enum { + PHY_XMAC_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_XMAC_STAT = 0x01,/* 16 bit r/w PHY Status Register */ + PHY_XMAC_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_XMAC_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_XMAC_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_XMAC_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Abi Reg */ + PHY_XMAC_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_XMAC_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_XMAC_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + + PHY_XMAC_EXT_STAT = 0x0f,/* 16 bit r/o Ext Status Register */ + PHY_XMAC_RES_ABI = 0x10,/* 16 bit r/o PHY Resolved Ability */ +}; +/* + * Broadcom-PHY Registers, indirect addressed over XMAC + */ +enum { + PHY_BCOM_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_BCOM_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_BCOM_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_BCOM_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_BCOM_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_BCOM_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_BCOM_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_BCOM_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_BCOM_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Broadcom-specific registers */ + PHY_BCOM_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_BCOM_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_BCOM_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_BCOM_P_EXT_CTRL = 0x10,/* 16 bit r/w PHY Extended Ctrl Reg */ + PHY_BCOM_P_EXT_STAT = 0x11,/* 16 bit r/o PHY Extended Stat Reg */ + PHY_BCOM_RE_CTR = 0x12,/* 16 bit r/w Receive Error Counter */ + PHY_BCOM_FC_CTR = 0x13,/* 16 bit r/w False Carrier Sense Cnt */ + PHY_BCOM_RNO_CTR = 0x14,/* 16 bit r/w Receiver NOT_OK Cnt */ + + PHY_BCOM_AUX_CTRL = 0x18,/* 16 bit r/w Auxiliary Control Reg */ + PHY_BCOM_AUX_STAT = 0x19,/* 16 bit r/o Auxiliary Stat Summary */ + PHY_BCOM_INT_STAT = 0x1a,/* 16 bit r/o Interrupt Status Reg */ + PHY_BCOM_INT_MASK = 0x1b,/* 16 bit r/w Interrupt Mask Reg */ +}; + +/* + * Marvel-PHY Registers, indirect addressed over GMAC + */ +enum { + PHY_MARV_CTRL = 0x00,/* 16 bit r/w PHY Control Register */ + PHY_MARV_STAT = 0x01,/* 16 bit r/o PHY Status Register */ + PHY_MARV_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */ + PHY_MARV_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */ + PHY_MARV_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */ + PHY_MARV_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */ + PHY_MARV_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */ + PHY_MARV_NEPG = 0x07,/* 16 bit r/w Next Page Register */ + PHY_MARV_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */ + /* Marvel-specific registers */ + PHY_MARV_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */ + PHY_MARV_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */ + PHY_MARV_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */ + PHY_MARV_PHY_CTRL = 0x10,/* 16 bit r/w PHY Specific Ctrl Reg */ + PHY_MARV_PHY_STAT = 0x11,/* 16 bit r/o PHY Specific Stat Reg */ + PHY_MARV_INT_MASK = 0x12,/* 16 bit r/w Interrupt Mask Reg */ + PHY_MARV_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */ + PHY_MARV_EXT_CTRL = 0x14,/* 16 bit r/w Ext. PHY Specific Ctrl */ + PHY_MARV_RXE_CNT = 0x15,/* 16 bit r/w Receive Error Counter */ + PHY_MARV_EXT_ADR = 0x16,/* 16 bit r/w Ext. Ad. for Cable Diag. */ + PHY_MARV_PORT_IRQ = 0x17,/* 16 bit r/o Port 0 IRQ (88E1111 only) */ + PHY_MARV_LED_CTRL = 0x18,/* 16 bit r/w LED Control Reg */ + PHY_MARV_LED_OVER = 0x19,/* 16 bit r/w Manual LED Override Reg */ + PHY_MARV_EXT_CTRL_2 = 0x1a,/* 16 bit r/w Ext. PHY Specific Ctrl 2 */ + PHY_MARV_EXT_P_STAT = 0x1b,/* 16 bit r/w Ext. PHY Spec. Stat Reg */ + PHY_MARV_CABLE_DIAG = 0x1c,/* 16 bit r/o Cable Diagnostic Reg */ + PHY_MARV_PAGE_ADDR = 0x1d,/* 16 bit r/w Extended Page Address Reg */ + PHY_MARV_PAGE_DATA = 0x1e,/* 16 bit r/w Extended Page Data Reg */ + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ + PHY_MARV_FE_LED_PAR = 0x16,/* 16 bit r/w LED Parallel Select Reg. */ + PHY_MARV_FE_LED_SER = 0x17,/* 16 bit r/w LED Stream Select S. LED */ + PHY_MARV_FE_VCT_TX = 0x1a,/* 16 bit r/w VCT Reg. for TXP/N Pins */ + PHY_MARV_FE_VCT_RX = 0x1b,/* 16 bit r/o VCT Reg. for RXP/N Pins */ + PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ +}; + +enum { + PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ + PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ + PHY_CT_SPS_LSB = 1<<13, /* Bit 13: Speed select, lower bit */ + PHY_CT_ANE = 1<<12, /* Bit 12: Auto-Negotiation Enabled */ + PHY_CT_PDOWN = 1<<11, /* Bit 11: Power Down Mode */ + PHY_CT_ISOL = 1<<10, /* Bit 10: Isolate Mode */ + PHY_CT_RE_CFG = 1<<9, /* Bit 9: (sc) Restart Auto-Negotiation */ + PHY_CT_DUP_MD = 1<<8, /* Bit 8: Duplex Mode */ + PHY_CT_COL_TST = 1<<7, /* Bit 7: Collision Test enabled */ + PHY_CT_SPS_MSB = 1<<6, /* Bit 6: Speed select, upper bit */ +}; + +enum { + PHY_CT_SP1000 = PHY_CT_SPS_MSB, /* enable speed of 1000 Mbps */ + PHY_CT_SP100 = PHY_CT_SPS_LSB, /* enable speed of 100 Mbps */ + PHY_CT_SP10 = 0, /* enable speed of 10 Mbps */ +}; + +enum { + PHY_ST_EXT_ST = 1<<8, /* Bit 8: Extended Status Present */ + + PHY_ST_PRE_SUP = 1<<6, /* Bit 6: Preamble Suppression */ + PHY_ST_AN_OVER = 1<<5, /* Bit 5: Auto-Negotiation Over */ + PHY_ST_REM_FLT = 1<<4, /* Bit 4: Remote Fault Condition Occurred */ + PHY_ST_AN_CAP = 1<<3, /* Bit 3: Auto-Negotiation Capability */ + PHY_ST_LSYNC = 1<<2, /* Bit 2: Link Synchronized */ + PHY_ST_JAB_DET = 1<<1, /* Bit 1: Jabber Detected */ + PHY_ST_EXT_REG = 1<<0, /* Bit 0: Extended Register available */ +}; + +enum { + PHY_I1_OUI_MSK = 0x3f<<10, /* Bit 15..10: Organization Unique ID */ + PHY_I1_MOD_NUM = 0x3f<<4, /* Bit 9.. 4: Model Number */ + PHY_I1_REV_MSK = 0xf, /* Bit 3.. 0: Revision Number */ +}; + +/* different Broadcom PHY Ids */ +enum { + PHY_BCOM_ID1_A1 = 0x6041, + PHY_BCOM_ID1_B2 = 0x6043, + PHY_BCOM_ID1_C0 = 0x6044, + PHY_BCOM_ID1_C5 = 0x6047, +}; + +/* different Marvell PHY Ids */ +enum { + PHY_MARV_ID0_VAL= 0x0141, /* Marvell Unique Identifier */ + PHY_MARV_ID1_B0 = 0x0C23, /* Yukon (PHY 88E1011) */ + PHY_MARV_ID1_B2 = 0x0C25, /* Yukon-Plus (PHY 88E1011) */ + PHY_MARV_ID1_C2 = 0x0CC2, /* Yukon-EC (PHY 88E1111) */ + PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ +}; + +/* Advertisement register bits */ +enum { + PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ + PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ + PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */ + + PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */ + PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */ + PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */ + PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */ + PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */ + PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */ + PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */ + PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */ + PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/ + PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA, + PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL | + PHY_AN_100HALF | PHY_AN_100FULL, +}; + +/* Xmac Specific */ +enum { + PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ + PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ + PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */ + + PHY_X_AN_PAUSE = 3<<7,/* Bit 8.. 7: Pause Bits */ + PHY_X_AN_HD = 1<<6, /* Bit 6: Half Duplex */ + PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */ +}; + +/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */ +enum { + PHY_X_P_NO_PAUSE= 0<<7,/* Bit 8..7: no Pause Mode */ + PHY_X_P_SYM_MD = 1<<7, /* Bit 8..7: symmetric Pause Mode */ + PHY_X_P_ASYM_MD = 2<<7,/* Bit 8..7: asymmetric Pause Mode */ + PHY_X_P_BOTH_MD = 3<<7,/* Bit 8..7: both Pause Mode */ +}; + + +/***** PHY_XMAC_EXT_STAT 16 bit r/w Extended Status Register *****/ +enum { + PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */ + PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */ +}; + +/***** PHY_XMAC_RES_ABI 16 bit r/o PHY Resolved Ability *****/ +enum { + PHY_X_RS_PAUSE = 3<<7, /* Bit 8..7: selected Pause Mode */ + PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */ + PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */ + PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */ + PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */ +}; + +/* Remote Fault Bits (PHY_X_AN_RFB) encoding */ +enum { + X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */ + X_RFB_LF = 1<<12,/* Bit 13..12 Link Failure */ + X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */ + X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */ +}; + +/* Broadcom-Specific */ +/***** PHY_BCOM_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_B_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */ + PHY_B_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */ + PHY_B_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */ + PHY_B_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */ + PHY_B_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */ + PHY_B_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */ +}; + +/***** PHY_BCOM_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +/***** PHY_MARV_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/ +enum { + PHY_B_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */ + PHY_B_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */ + PHY_B_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */ + PHY_B_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */ + PHY_B_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */ + PHY_B_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */ + /* Bit 9..8: reserved */ + PHY_B_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */ +}; + +/***** PHY_BCOM_EXT_STAT 16 bit r/o Extended Status Register *****/ +enum { + PHY_B_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */ + PHY_B_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */ + PHY_B_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */ + PHY_B_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */ +}; + +/***** PHY_BCOM_P_EXT_CTRL 16 bit r/w PHY Extended Control Reg *****/ +enum { + PHY_B_PEC_MAC_PHY = 1<<15, /* Bit 15: 10BIT/GMI-Interface */ + PHY_B_PEC_DIS_CROSS = 1<<14, /* Bit 14: Disable MDI Crossover */ + PHY_B_PEC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */ + PHY_B_PEC_INT_DIS = 1<<12, /* Bit 12: Interrupts Disabled */ + PHY_B_PEC_F_INT = 1<<11, /* Bit 11: Force Interrupt */ + PHY_B_PEC_BY_45 = 1<<10, /* Bit 10: Bypass 4B5B-Decoder */ + PHY_B_PEC_BY_SCR = 1<<9, /* Bit 9: Bypass Scrambler */ + PHY_B_PEC_BY_MLT3 = 1<<8, /* Bit 8: Bypass MLT3 Encoder */ + PHY_B_PEC_BY_RXA = 1<<7, /* Bit 7: Bypass Rx Alignm. */ + PHY_B_PEC_RES_SCR = 1<<6, /* Bit 6: Reset Scrambler */ + PHY_B_PEC_EN_LTR = 1<<5, /* Bit 5: Ena LED Traffic Mode */ + PHY_B_PEC_LED_ON = 1<<4, /* Bit 4: Force LED's on */ + PHY_B_PEC_LED_OFF = 1<<3, /* Bit 3: Force LED's off */ + PHY_B_PEC_EX_IPG = 1<<2, /* Bit 2: Extend Tx IPG Mode */ + PHY_B_PEC_3_LED = 1<<1, /* Bit 1: Three Link LED mode */ + PHY_B_PEC_HIGH_LA = 1<<0, /* Bit 0: GMII FIFO Elasticy */ +}; + +/***** PHY_BCOM_P_EXT_STAT 16 bit r/o PHY Extended Status Reg *****/ +enum { + PHY_B_PES_CROSS_STAT = 1<<13, /* Bit 13: MDI Crossover Status */ + PHY_B_PES_INT_STAT = 1<<12, /* Bit 12: Interrupt Status */ + PHY_B_PES_RRS = 1<<11, /* Bit 11: Remote Receiver Stat. */ + PHY_B_PES_LRS = 1<<10, /* Bit 10: Local Receiver Stat. */ + PHY_B_PES_LOCKED = 1<<9, /* Bit 9: Locked */ + PHY_B_PES_LS = 1<<8, /* Bit 8: Link Status */ + PHY_B_PES_RF = 1<<7, /* Bit 7: Remote Fault */ + PHY_B_PES_CE_ER = 1<<6, /* Bit 6: Carrier Ext Error */ + PHY_B_PES_BAD_SSD = 1<<5, /* Bit 5: Bad SSD */ + PHY_B_PES_BAD_ESD = 1<<4, /* Bit 4: Bad ESD */ + PHY_B_PES_RX_ER = 1<<3, /* Bit 3: Receive Error */ + PHY_B_PES_TX_ER = 1<<2, /* Bit 2: Transmit Error */ + PHY_B_PES_LOCK_ER = 1<<1, /* Bit 1: Lock Error */ + PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */ +}; + +/* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/ +/* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/ +enum { + PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */ + + PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */ + PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */ +}; + + +/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ +enum { + PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */ + +/***** PHY_BCOM_RNO_CTR 16 bit r/w Receive NOT_OK Counter *****/ + PHY_B_RC_LOC_MSK = 0xff00, /* Bit 15..8: Local Rx NOT_OK cnt */ + PHY_B_RC_REM_MSK = 0x00ff, /* Bit 7..0: Remote Rx NOT_OK cnt */ + +/***** PHY_BCOM_AUX_CTRL 16 bit r/w Auxiliary Control Reg *****/ + PHY_B_AC_L_SQE = 1<<15, /* Bit 15: Low Squelch */ + PHY_B_AC_LONG_PACK = 1<<14, /* Bit 14: Rx Long Packets */ + PHY_B_AC_ER_CTRL = 3<<12,/* Bit 13..12: Edgerate Control */ + /* Bit 11: reserved */ + PHY_B_AC_TX_TST = 1<<10, /* Bit 10: Tx test bit, always 1 */ + /* Bit 9.. 8: reserved */ + PHY_B_AC_DIS_PRF = 1<<7, /* Bit 7: dis part resp filter */ + /* Bit 6: reserved */ + PHY_B_AC_DIS_PM = 1<<5, /* Bit 5: dis power management */ + /* Bit 4: reserved */ + PHY_B_AC_DIAG = 1<<3, /* Bit 3: Diagnostic Mode */ +}; + +/***** PHY_BCOM_AUX_STAT 16 bit r/o Auxiliary Status Reg *****/ +enum { + PHY_B_AS_AN_C = 1<<15, /* Bit 15: AutoNeg complete */ + PHY_B_AS_AN_CA = 1<<14, /* Bit 14: AN Complete Ack */ + PHY_B_AS_ANACK_D = 1<<13, /* Bit 13: AN Ack Detect */ + PHY_B_AS_ANAB_D = 1<<12, /* Bit 12: AN Ability Detect */ + PHY_B_AS_NPW = 1<<11, /* Bit 11: AN Next Page Wait */ + PHY_B_AS_AN_RES_MSK = 7<<8,/* Bit 10..8: AN HDC */ + PHY_B_AS_PDF = 1<<7, /* Bit 7: Parallel Detect. Fault */ + PHY_B_AS_RF = 1<<6, /* Bit 6: Remote Fault */ + PHY_B_AS_ANP_R = 1<<5, /* Bit 5: AN Page Received */ + PHY_B_AS_LP_ANAB = 1<<4, /* Bit 4: LP AN Ability */ + PHY_B_AS_LP_NPAB = 1<<3, /* Bit 3: LP Next Page Ability */ + PHY_B_AS_LS = 1<<2, /* Bit 2: Link Status */ + PHY_B_AS_PRR = 1<<1, /* Bit 1: Pause Resolution-Rx */ + PHY_B_AS_PRT = 1<<0, /* Bit 0: Pause Resolution-Tx */ +}; +#define PHY_B_AS_PAUSE_MSK (PHY_B_AS_PRR | PHY_B_AS_PRT) + +/***** PHY_BCOM_INT_STAT 16 bit r/o Interrupt Status Reg *****/ +/***** PHY_BCOM_INT_MASK 16 bit r/w Interrupt Mask Reg *****/ +enum { + PHY_B_IS_PSE = 1<<14, /* Bit 14: Pair Swap Error */ + PHY_B_IS_MDXI_SC = 1<<13, /* Bit 13: MDIX Status Change */ + PHY_B_IS_HCT = 1<<12, /* Bit 12: counter above 32k */ + PHY_B_IS_LCT = 1<<11, /* Bit 11: counter above 128 */ + PHY_B_IS_AN_PR = 1<<10, /* Bit 10: Page Received */ + PHY_B_IS_NO_HDCL = 1<<9, /* Bit 9: No HCD Link */ + PHY_B_IS_NO_HDC = 1<<8, /* Bit 8: No HCD */ + PHY_B_IS_NEG_USHDC = 1<<7, /* Bit 7: Negotiated Unsup. HCD */ + PHY_B_IS_SCR_S_ER = 1<<6, /* Bit 6: Scrambler Sync Error */ + PHY_B_IS_RRS_CHANGE = 1<<5, /* Bit 5: Remote Rx Stat Change */ + PHY_B_IS_LRS_CHANGE = 1<<4, /* Bit 4: Local Rx Stat Change */ + PHY_B_IS_DUP_CHANGE = 1<<3, /* Bit 3: Duplex Mode Change */ + PHY_B_IS_LSP_CHANGE = 1<<2, /* Bit 2: Link Speed Change */ + PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */ + PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */ +}; +#define PHY_B_DEF_MSK \ + (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \ + PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE)) + +/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */ +enum { + PHY_B_P_NO_PAUSE = 0<<10,/* Bit 11..10: no Pause Mode */ + PHY_B_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */ + PHY_B_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */ + PHY_B_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */ +}; +/* + * Resolved Duplex mode and Capabilities (Aux Status Summary Reg) + */ +enum { + PHY_B_RES_1000FD = 7<<8,/* Bit 10..8: 1000Base-T Full Dup. */ + PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */ +}; + +/** Marvell-Specific */ +enum { + PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ + PHY_M_AN_ACK = 1<<14, /* (ro) Acknowledge Received */ + PHY_M_AN_RF = 1<<13, /* Remote Fault */ + + PHY_M_AN_ASP = 1<<11, /* Asymmetric Pause */ + PHY_M_AN_PC = 1<<10, /* MAC Pause implemented */ + PHY_M_AN_100_T4 = 1<<9, /* Not cap. 100Base-T4 (always 0) */ + PHY_M_AN_100_FD = 1<<8, /* Advertise 100Base-TX Full Duplex */ + PHY_M_AN_100_HD = 1<<7, /* Advertise 100Base-TX Half Duplex */ + PHY_M_AN_10_FD = 1<<6, /* Advertise 10Base-TX Full Duplex */ + PHY_M_AN_10_HD = 1<<5, /* Advertise 10Base-TX Half Duplex */ + PHY_M_AN_SEL_MSK =0x1f<<4, /* Bit 4.. 0: Selector Field Mask */ +}; + +/* special defines for FIBER (88E1011S only) */ +enum { + PHY_M_AN_ASP_X = 1<<8, /* Asymmetric Pause */ + PHY_M_AN_PC_X = 1<<7, /* MAC Pause implemented */ + PHY_M_AN_1000X_AHD = 1<<6, /* Advertise 10000Base-X Half Duplex */ + PHY_M_AN_1000X_AFD = 1<<5, /* Advertise 10000Base-X Full Duplex */ +}; + +/* Pause Bits (PHY_M_AN_ASP_X and PHY_M_AN_PC_X) encoding */ +enum { + PHY_M_P_NO_PAUSE_X = 0<<7,/* Bit 8.. 7: no Pause Mode */ + PHY_M_P_SYM_MD_X = 1<<7, /* Bit 8.. 7: symmetric Pause Mode */ + PHY_M_P_ASYM_MD_X = 2<<7,/* Bit 8.. 7: asymmetric Pause Mode */ + PHY_M_P_BOTH_MD_X = 3<<7,/* Bit 8.. 7: both Pause Mode */ +}; + +/***** PHY_MARV_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/ +enum { + PHY_M_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */ + PHY_M_1000C_MSE = 1<<12, /* Manual Master/Slave Enable */ + PHY_M_1000C_MSC = 1<<11, /* M/S Configuration (1=Master) */ + PHY_M_1000C_MPD = 1<<10, /* Multi-Port Device */ + PHY_M_1000C_AFD = 1<<9, /* Advertise Full Duplex */ + PHY_M_1000C_AHD = 1<<8, /* Advertise Half Duplex */ +}; + +/***** PHY_MARV_PHY_CTRL 16 bit r/w PHY Specific Ctrl Reg *****/ +enum { + PHY_M_PC_TX_FFD_MSK = 3<<14,/* Bit 15..14: Tx FIFO Depth Mask */ + PHY_M_PC_RX_FFD_MSK = 3<<12,/* Bit 13..12: Rx FIFO Depth Mask */ + PHY_M_PC_ASS_CRS_TX = 1<<11, /* Assert CRS on Transmit */ + PHY_M_PC_FL_GOOD = 1<<10, /* Force Link Good */ + PHY_M_PC_EN_DET_MSK = 3<<8,/* Bit 9.. 8: Energy Detect Mask */ + PHY_M_PC_ENA_EXT_D = 1<<7, /* Enable Ext. Distance (10BT) */ + PHY_M_PC_MDIX_MSK = 3<<5,/* Bit 6.. 5: MDI/MDIX Config. Mask */ + PHY_M_PC_DIS_125CLK = 1<<4, /* Disable 125 CLK */ + PHY_M_PC_MAC_POW_UP = 1<<3, /* MAC Power up */ + PHY_M_PC_SQE_T_ENA = 1<<2, /* SQE Test Enabled */ + PHY_M_PC_POL_R_DIS = 1<<1, /* Polarity Reversal Disabled */ + PHY_M_PC_DIS_JABBER = 1<<0, /* Disable Jabber */ +}; + +enum { + PHY_M_PC_EN_DET = 2<<8, /* Energy Detect (Mode 1) */ + PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ +}; + +enum { + PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ + PHY_M_PC_MAN_MDIX = 1, /* 01 = Manual MDIX configuration */ + PHY_M_PC_ENA_AUTO = 3, /* 11 = Enable Automatic Crossover */ +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PC_ENA_DTE_DT = 1<<15, /* Enable Data Terminal Equ. (DTE) Detect */ + PHY_M_PC_ENA_ENE_DT = 1<<14, /* Enable Energy Detect (sense & pulse) */ + PHY_M_PC_DIS_NLP_CK = 1<<13, /* Disable Normal Link Puls (NLP) Check */ + PHY_M_PC_ENA_LIP_NP = 1<<12, /* Enable Link Partner Next Page Reg. */ + PHY_M_PC_DIS_NLP_GN = 1<<11, /* Disable Normal Link Puls Generation */ + + PHY_M_PC_DIS_SCRAMB = 1<<9, /* Disable Scrambler */ + PHY_M_PC_DIS_FEFI = 1<<8, /* Disable Far End Fault Indic. (FEFI) */ + + PHY_M_PC_SH_TP_SEL = 1<<6, /* Shielded Twisted Pair Select */ + PHY_M_PC_RX_FD_MSK = 3<<2,/* Bit 3.. 2: Rx FIFO Depth Mask */ +}; + +/***** PHY_MARV_PHY_STAT 16 bit r/o PHY Specific Status Reg *****/ +enum { + PHY_M_PS_SPEED_MSK = 3<<14, /* Bit 15..14: Speed Mask */ + PHY_M_PS_SPEED_1000 = 1<<15, /* 10 = 1000 Mbps */ + PHY_M_PS_SPEED_100 = 1<<14, /* 01 = 100 Mbps */ + PHY_M_PS_SPEED_10 = 0, /* 00 = 10 Mbps */ + PHY_M_PS_FULL_DUP = 1<<13, /* Full Duplex */ + PHY_M_PS_PAGE_REC = 1<<12, /* Page Received */ + PHY_M_PS_SPDUP_RES = 1<<11, /* Speed & Duplex Resolved */ + PHY_M_PS_LINK_UP = 1<<10, /* Link Up */ + PHY_M_PS_CABLE_MSK = 7<<7, /* Bit 9.. 7: Cable Length Mask */ + PHY_M_PS_MDI_X_STAT = 1<<6, /* MDI Crossover Stat (1=MDIX) */ + PHY_M_PS_DOWNS_STAT = 1<<5, /* Downshift Status (1=downsh.) */ + PHY_M_PS_ENDET_STAT = 1<<4, /* Energy Detect Status (1=act) */ + PHY_M_PS_TX_P_EN = 1<<3, /* Tx Pause Enabled */ + PHY_M_PS_RX_P_EN = 1<<2, /* Rx Pause Enabled */ + PHY_M_PS_POL_REV = 1<<1, /* Polarity Reversed */ + PHY_M_PS_JABBER = 1<<0, /* Jabber */ +}; + +#define PHY_M_PS_PAUSE_MSK (PHY_M_PS_TX_P_EN | PHY_M_PS_RX_P_EN) + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +enum { + PHY_M_PS_DTE_DETECT = 1<<15, /* Data Terminal Equipment (DTE) Detected */ + PHY_M_PS_RES_SPEED = 1<<14, /* Resolved Speed (1=100 Mbps, 0=10 Mbps */ +}; + +enum { + PHY_M_IS_AN_ERROR = 1<<15, /* Auto-Negotiation Error */ + PHY_M_IS_LSP_CHANGE = 1<<14, /* Link Speed Changed */ + PHY_M_IS_DUP_CHANGE = 1<<13, /* Duplex Mode Changed */ + PHY_M_IS_AN_PR = 1<<12, /* Page Received */ + PHY_M_IS_AN_COMPL = 1<<11, /* Auto-Negotiation Completed */ + PHY_M_IS_LST_CHANGE = 1<<10, /* Link Status Changed */ + PHY_M_IS_SYMB_ERROR = 1<<9, /* Symbol Error */ + PHY_M_IS_FALSE_CARR = 1<<8, /* False Carrier */ + PHY_M_IS_FIFO_ERROR = 1<<7, /* FIFO Overflow/Underrun Error */ + PHY_M_IS_MDI_CHANGE = 1<<6, /* MDI Crossover Changed */ + PHY_M_IS_DOWNSH_DET = 1<<5, /* Downshift Detected */ + PHY_M_IS_END_CHANGE = 1<<4, /* Energy Detect Changed */ + + PHY_M_IS_DTE_CHANGE = 1<<2, /* DTE Power Det. Status Changed */ + PHY_M_IS_POL_CHANGE = 1<<1, /* Polarity Changed */ + PHY_M_IS_JABBER = 1<<0, /* Jabber */ + + PHY_M_IS_DEF_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_LSP_CHANGE | + PHY_M_IS_LST_CHANGE | PHY_M_IS_FIFO_ERROR, + + PHY_M_IS_AN_MSK = PHY_M_IS_AN_ERROR | PHY_M_IS_AN_COMPL, +}; + +/***** PHY_MARV_EXT_CTRL 16 bit r/w Ext. PHY Specific Ctrl *****/ +enum { + PHY_M_EC_ENA_BC_EXT = 1<<15, /* Enable Block Carr. Ext. (88E1111 only) */ + PHY_M_EC_ENA_LIN_LB = 1<<14, /* Enable Line Loopback (88E1111 only) */ + + PHY_M_EC_DIS_LINK_P = 1<<12, /* Disable Link Pulses (88E1111 only) */ + PHY_M_EC_M_DSC_MSK = 3<<10, /* Bit 11..10: Master Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_S_DSC_MSK = 3<<8, /* Bit 9.. 8: Slave Downshift Counter */ + /* (88E1011 only) */ + PHY_M_EC_M_DSC_MSK2 = 7<<9, /* Bit 11.. 9: Master Downshift Counter */ + /* (88E1111 only) */ + PHY_M_EC_DOWN_S_ENA = 1<<8, /* Downshift Enable (88E1111 only) */ + /* !!! Errata in spec. (1 = disable) */ + PHY_M_EC_RX_TIM_CT = 1<<7, /* RGMII Rx Timing Control*/ + PHY_M_EC_MAC_S_MSK = 7<<4, /* Bit 6.. 4: Def. MAC interface speed */ + PHY_M_EC_FIB_AN_ENA = 1<<3, /* Fiber Auto-Neg. Enable (88E1011S only) */ + PHY_M_EC_DTE_D_ENA = 1<<2, /* DTE Detect Enable (88E1111 only) */ + PHY_M_EC_TX_TIM_CT = 1<<1, /* RGMII Tx Timing Control */ + PHY_M_EC_TRANS_DIS = 1<<0, /* Transmitter Disable (88E1111 only) */}; + +#define PHY_M_EC_M_DSC(x) ((u16)(x)<<10) /* 00=1x; 01=2x; 10=3x; 11=4x */ +#define PHY_M_EC_S_DSC(x) ((u16)(x)<<8) /* 00=dis; 01=1x; 10=2x; 11=3x */ +#define PHY_M_EC_MAC_S(x) ((u16)(x)<<4) /* 01X=0; 110=2.5; 111=25 (MHz) */ + +#define PHY_M_EC_M_DSC_2(x) ((u16)(x)<<9) /* 000=1x; 001=2x; 010=3x; 011=4x */ + /* 100=5x; 101=6x; 110=7x; 111=8x */ +enum { + MAC_TX_CLK_0_MHZ = 2, + MAC_TX_CLK_2_5_MHZ = 6, + MAC_TX_CLK_25_MHZ = 7, +}; + +/***** PHY_MARV_LED_CTRL 16 bit r/w LED Control Reg *****/ +enum { + PHY_M_LEDC_DIS_LED = 1<<15, /* Disable LED */ + PHY_M_LEDC_PULS_MSK = 7<<12,/* Bit 14..12: Pulse Stretch Mask */ + PHY_M_LEDC_F_INT = 1<<11, /* Force Interrupt */ + PHY_M_LEDC_BL_R_MSK = 7<<8,/* Bit 10.. 8: Blink Rate Mask */ + PHY_M_LEDC_DP_C_LSB = 1<<7, /* Duplex Control (LSB, 88E1111 only) */ + PHY_M_LEDC_TX_C_LSB = 1<<6, /* Tx Control (LSB, 88E1111 only) */ + PHY_M_LEDC_LK_C_MSK = 7<<3,/* Bit 5.. 3: Link Control Mask */ + /* (88E1111 only) */ +}; +#define PHY_M_LED_PULS_DUR(x) (((u16)(x)<<12) & PHY_M_LEDC_PULS_MSK) +#define PHY_M_LED_BLINK_RT(x) (((u16)(x)<<8) & PHY_M_LEDC_BL_R_MSK) + +enum { + PHY_M_LEDC_LINK_MSK = 3<<3, /* Bit 4.. 3: Link Control Mask */ + /* (88E1011 only) */ + PHY_M_LEDC_DP_CTRL = 1<<2, /* Duplex Control */ + PHY_M_LEDC_DP_C_MSB = 1<<2, /* Duplex Control (MSB, 88E1111 only) */ + PHY_M_LEDC_RX_CTRL = 1<<1, /* Rx Activity / Link */ + PHY_M_LEDC_TX_CTRL = 1<<0, /* Tx Activity / Link */ + PHY_M_LEDC_TX_C_MSB = 1<<0, /* Tx Control (MSB, 88E1111 only) */ +}; + +enum { + PULS_NO_STR = 0, /* no pulse stretching */ + PULS_21MS = 1, /* 21 ms to 42 ms */ + PULS_42MS = 2, /* 42 ms to 84 ms */ + PULS_84MS = 3, /* 84 ms to 170 ms */ + PULS_170MS = 4, /* 170 ms to 340 ms */ + PULS_340MS = 5, /* 340 ms to 670 ms */ + PULS_670MS = 6, /* 670 ms to 1.3 s */ + PULS_1300MS = 7, /* 1.3 s to 2.7 s */ +}; + + +enum { + BLINK_42MS = 0, /* 42 ms */ + BLINK_84MS = 1, /* 84 ms */ + BLINK_170MS = 2, /* 170 ms */ + BLINK_340MS = 3, /* 340 ms */ + BLINK_670MS = 4, /* 670 ms */ +}; + +/***** PHY_MARV_LED_OVER 16 bit r/w Manual LED Override Reg *****/ +#define PHY_M_LED_MO_SGMII(x) ((x)<<14) /* Bit 15..14: SGMII AN Timer */ + /* Bit 13..12: reserved */ +#define PHY_M_LED_MO_DUP(x) ((x)<<10) /* Bit 11..10: Duplex */ +#define PHY_M_LED_MO_10(x) ((x)<<8) /* Bit 9.. 8: Link 10 */ +#define PHY_M_LED_MO_100(x) ((x)<<6) /* Bit 7.. 6: Link 100 */ +#define PHY_M_LED_MO_1000(x) ((x)<<4) /* Bit 5.. 4: Link 1000 */ +#define PHY_M_LED_MO_RX(x) ((x)<<2) /* Bit 3.. 2: Rx */ +#define PHY_M_LED_MO_TX(x) ((x)<<0) /* Bit 1.. 0: Tx */ + +enum { + MO_LED_NORM = 0, + MO_LED_BLINK = 1, + MO_LED_OFF = 2, + MO_LED_ON = 3, +}; + +/***** PHY_MARV_EXT_CTRL_2 16 bit r/w Ext. PHY Specific Ctrl 2 *****/ +enum { + PHY_M_EC2_FI_IMPED = 1<<6, /* Fiber Input Impedance */ + PHY_M_EC2_FO_IMPED = 1<<5, /* Fiber Output Impedance */ + PHY_M_EC2_FO_M_CLK = 1<<4, /* Fiber Mode Clock Enable */ + PHY_M_EC2_FO_BOOST = 1<<3, /* Fiber Output Boost */ + PHY_M_EC2_FO_AM_MSK = 7, /* Bit 2.. 0: Fiber Output Amplitude */ +}; + +/***** PHY_MARV_EXT_P_STAT 16 bit r/w Ext. PHY Specific Status *****/ +enum { + PHY_M_FC_AUTO_SEL = 1<<15, /* Fiber/Copper Auto Sel. Dis. */ + PHY_M_FC_AN_REG_ACC = 1<<14, /* Fiber/Copper AN Reg. Access */ + PHY_M_FC_RESOLUTION = 1<<13, /* Fiber/Copper Resolution */ + PHY_M_SER_IF_AN_BP = 1<<12, /* Ser. IF AN Bypass Enable */ + PHY_M_SER_IF_BP_ST = 1<<11, /* Ser. IF AN Bypass Status */ + PHY_M_IRQ_POLARITY = 1<<10, /* IRQ polarity */ + PHY_M_DIS_AUT_MED = 1<<9, /* Disable Aut. Medium Reg. Selection */ + /* (88E1111 only) */ + /* Bit 9.. 4: reserved (88E1011 only) */ + PHY_M_UNDOC1 = 1<<7, /* undocumented bit !! */ + PHY_M_DTE_POW_STAT = 1<<4, /* DTE Power Status (88E1111 only) */ + PHY_M_MODE_MASK = 0xf, /* Bit 3.. 0: copy of HWCFG MODE[3:0] */ +}; + +/***** PHY_MARV_CABLE_DIAG 16 bit r/o Cable Diagnostic Reg *****/ +enum { + PHY_M_CABD_ENA_TEST = 1<<15, /* Enable Test (Page 0) */ + PHY_M_CABD_DIS_WAIT = 1<<15, /* Disable Waiting Period (Page 1) */ + /* (88E1111 only) */ + PHY_M_CABD_STAT_MSK = 3<<13, /* Bit 14..13: Status Mask */ + PHY_M_CABD_AMPL_MSK = 0x1f<<8, /* Bit 12.. 8: Amplitude Mask */ + /* (88E1111 only) */ + PHY_M_CABD_DIST_MSK = 0xff, /* Bit 7.. 0: Distance Mask */ +}; + +/* values for Cable Diagnostic Status (11=fail; 00=OK; 10=open; 01=short) */ +enum { + CABD_STAT_NORMAL= 0, + CABD_STAT_SHORT = 1, + CABD_STAT_OPEN = 2, + CABD_STAT_FAIL = 3, +}; + +/* for 10/100 Fast Ethernet PHY (88E3082 only) */ +/***** PHY_MARV_FE_LED_PAR 16 bit r/w LED Parallel Select Reg. *****/ + /* Bit 15..12: reserved (used internally) */ +enum { + PHY_M_FELP_LED2_MSK = 0xf<<8, /* Bit 11.. 8: LED2 Mask (LINK) */ + PHY_M_FELP_LED1_MSK = 0xf<<4, /* Bit 7.. 4: LED1 Mask (ACT) */ + PHY_M_FELP_LED0_MSK = 0xf, /* Bit 3.. 0: LED0 Mask (SPEED) */ +}; + +#define PHY_M_FELP_LED2_CTRL(x) (((x)<<8) & PHY_M_FELP_LED2_MSK) +#define PHY_M_FELP_LED1_CTRL(x) (((x)<<4) & PHY_M_FELP_LED1_MSK) +#define PHY_M_FELP_LED0_CTRL(x) (((x)<<0) & PHY_M_FELP_LED0_MSK) + +enum { + LED_PAR_CTRL_COLX = 0x00, + LED_PAR_CTRL_ERROR = 0x01, + LED_PAR_CTRL_DUPLEX = 0x02, + LED_PAR_CTRL_DP_COL = 0x03, + LED_PAR_CTRL_SPEED = 0x04, + LED_PAR_CTRL_LINK = 0x05, + LED_PAR_CTRL_TX = 0x06, + LED_PAR_CTRL_RX = 0x07, + LED_PAR_CTRL_ACT = 0x08, + LED_PAR_CTRL_LNK_RX = 0x09, + LED_PAR_CTRL_LNK_AC = 0x0a, + LED_PAR_CTRL_ACT_BL = 0x0b, + LED_PAR_CTRL_TX_BL = 0x0c, + LED_PAR_CTRL_RX_BL = 0x0d, + LED_PAR_CTRL_COL_BL = 0x0e, + LED_PAR_CTRL_INACT = 0x0f +}; + +/*****,PHY_MARV_FE_SPEC_2 16 bit r/w Specific Control Reg. 2 *****/ +enum { + PHY_M_FESC_DIS_WAIT = 1<<2, /* Disable TDR Waiting Period */ + PHY_M_FESC_ENA_MCLK = 1<<1, /* Enable MAC Rx Clock in sleep mode */ + PHY_M_FESC_SEL_CL_A = 1<<0, /* Select Class A driver (100B-TX) */ +}; + + +/***** PHY_MARV_PHY_CTRL (page 3) 16 bit r/w LED Control Reg. *****/ +enum { + PHY_M_LEDC_LOS_MSK = 0xf<<12, /* Bit 15..12: LOS LED Ctrl. Mask */ + PHY_M_LEDC_INIT_MSK = 0xf<<8, /* Bit 11.. 8: INIT LED Ctrl. Mask */ + PHY_M_LEDC_STA1_MSK = 0xf<<4, /* Bit 7.. 4: STAT1 LED Ctrl. Mask */ + PHY_M_LEDC_STA0_MSK = 0xf, /* Bit 3.. 0: STAT0 LED Ctrl. Mask */ +}; + +#define PHY_M_LEDC_LOS_CTRL(x) (((x)<<12) & PHY_M_LEDC_LOS_MSK) +#define PHY_M_LEDC_INIT_CTRL(x) (((x)<<8) & PHY_M_LEDC_INIT_MSK) +#define PHY_M_LEDC_STA1_CTRL(x) (((x)<<4) & PHY_M_LEDC_STA1_MSK) +#define PHY_M_LEDC_STA0_CTRL(x) (((x)<<0) & PHY_M_LEDC_STA0_MSK) + +/* GMAC registers */ +/* Port Registers */ +enum { + GM_GP_STAT = 0x0000, /* 16 bit r/o General Purpose Status */ + GM_GP_CTRL = 0x0004, /* 16 bit r/w General Purpose Control */ + GM_TX_CTRL = 0x0008, /* 16 bit r/w Transmit Control Reg. */ + GM_RX_CTRL = 0x000c, /* 16 bit r/w Receive Control Reg. */ + GM_TX_FLOW_CTRL = 0x0010, /* 16 bit r/w Transmit Flow-Control */ + GM_TX_PARAM = 0x0014, /* 16 bit r/w Transmit Parameter Reg. */ + GM_SERIAL_MODE = 0x0018, /* 16 bit r/w Serial Mode Register */ +/* Source Address Registers */ + GM_SRC_ADDR_1L = 0x001c, /* 16 bit r/w Source Address 1 (low) */ + GM_SRC_ADDR_1M = 0x0020, /* 16 bit r/w Source Address 1 (middle) */ + GM_SRC_ADDR_1H = 0x0024, /* 16 bit r/w Source Address 1 (high) */ + GM_SRC_ADDR_2L = 0x0028, /* 16 bit r/w Source Address 2 (low) */ + GM_SRC_ADDR_2M = 0x002c, /* 16 bit r/w Source Address 2 (middle) */ + GM_SRC_ADDR_2H = 0x0030, /* 16 bit r/w Source Address 2 (high) */ + +/* Multicast Address Hash Registers */ + GM_MC_ADDR_H1 = 0x0034, /* 16 bit r/w Multicast Address Hash 1 */ + GM_MC_ADDR_H2 = 0x0038, /* 16 bit r/w Multicast Address Hash 2 */ + GM_MC_ADDR_H3 = 0x003c, /* 16 bit r/w Multicast Address Hash 3 */ + GM_MC_ADDR_H4 = 0x0040, /* 16 bit r/w Multicast Address Hash 4 */ + +/* Interrupt Source Registers */ + GM_TX_IRQ_SRC = 0x0044, /* 16 bit r/o Tx Overflow IRQ Source */ + GM_RX_IRQ_SRC = 0x0048, /* 16 bit r/o Rx Overflow IRQ Source */ + GM_TR_IRQ_SRC = 0x004c, /* 16 bit r/o Tx/Rx Over. IRQ Source */ + +/* Interrupt Mask Registers */ + GM_TX_IRQ_MSK = 0x0050, /* 16 bit r/w Tx Overflow IRQ Mask */ + GM_RX_IRQ_MSK = 0x0054, /* 16 bit r/w Rx Overflow IRQ Mask */ + GM_TR_IRQ_MSK = 0x0058, /* 16 bit r/w Tx/Rx Over. IRQ Mask */ + +/* Serial Management Interface (SMI) Registers */ + GM_SMI_CTRL = 0x0080, /* 16 bit r/w SMI Control Register */ + GM_SMI_DATA = 0x0084, /* 16 bit r/w SMI Data Register */ + GM_PHY_ADDR = 0x0088, /* 16 bit r/w GPHY Address Register */ +}; + +/* MIB Counters */ +#define GM_MIB_CNT_BASE 0x0100 /* Base Address of MIB Counters */ +#define GM_MIB_CNT_SIZE 44 /* Number of MIB Counters */ + +/* + * MIB Counters base address definitions (low word) - + * use offset 4 for access to high word (32 bit r/o) + */ +enum { + GM_RXF_UC_OK = GM_MIB_CNT_BASE + 0, /* Unicast Frames Received OK */ + GM_RXF_BC_OK = GM_MIB_CNT_BASE + 8, /* Broadcast Frames Received OK */ + GM_RXF_MPAUSE = GM_MIB_CNT_BASE + 16, /* Pause MAC Ctrl Frames Received */ + GM_RXF_MC_OK = GM_MIB_CNT_BASE + 24, /* Multicast Frames Received OK */ + GM_RXF_FCS_ERR = GM_MIB_CNT_BASE + 32, /* Rx Frame Check Seq. Error */ + /* GM_MIB_CNT_BASE + 40: reserved */ + GM_RXO_OK_LO = GM_MIB_CNT_BASE + 48, /* Octets Received OK Low */ + GM_RXO_OK_HI = GM_MIB_CNT_BASE + 56, /* Octets Received OK High */ + GM_RXO_ERR_LO = GM_MIB_CNT_BASE + 64, /* Octets Received Invalid Low */ + GM_RXO_ERR_HI = GM_MIB_CNT_BASE + 72, /* Octets Received Invalid High */ + GM_RXF_SHT = GM_MIB_CNT_BASE + 80, /* Frames <64 Byte Received OK */ + GM_RXE_FRAG = GM_MIB_CNT_BASE + 88, /* Frames <64 Byte Received with FCS Err */ + GM_RXF_64B = GM_MIB_CNT_BASE + 96, /* 64 Byte Rx Frame */ + GM_RXF_127B = GM_MIB_CNT_BASE + 104, /* 65-127 Byte Rx Frame */ + GM_RXF_255B = GM_MIB_CNT_BASE + 112, /* 128-255 Byte Rx Frame */ + GM_RXF_511B = GM_MIB_CNT_BASE + 120, /* 256-511 Byte Rx Frame */ + GM_RXF_1023B = GM_MIB_CNT_BASE + 128, /* 512-1023 Byte Rx Frame */ + GM_RXF_1518B = GM_MIB_CNT_BASE + 136, /* 1024-1518 Byte Rx Frame */ + GM_RXF_MAX_SZ = GM_MIB_CNT_BASE + 144, /* 1519-MaxSize Byte Rx Frame */ + GM_RXF_LNG_ERR = GM_MIB_CNT_BASE + 152, /* Rx Frame too Long Error */ + GM_RXF_JAB_PKT = GM_MIB_CNT_BASE + 160, /* Rx Jabber Packet Frame */ + /* GM_MIB_CNT_BASE + 168: reserved */ + GM_RXE_FIFO_OV = GM_MIB_CNT_BASE + 176, /* Rx FIFO overflow Event */ + /* GM_MIB_CNT_BASE + 184: reserved */ + GM_TXF_UC_OK = GM_MIB_CNT_BASE + 192, /* Unicast Frames Xmitted OK */ + GM_TXF_BC_OK = GM_MIB_CNT_BASE + 200, /* Broadcast Frames Xmitted OK */ + GM_TXF_MPAUSE = GM_MIB_CNT_BASE + 208, /* Pause MAC Ctrl Frames Xmitted */ + GM_TXF_MC_OK = GM_MIB_CNT_BASE + 216, /* Multicast Frames Xmitted OK */ + GM_TXO_OK_LO = GM_MIB_CNT_BASE + 224, /* Octets Transmitted OK Low */ + GM_TXO_OK_HI = GM_MIB_CNT_BASE + 232, /* Octets Transmitted OK High */ + GM_TXF_64B = GM_MIB_CNT_BASE + 240, /* 64 Byte Tx Frame */ + GM_TXF_127B = GM_MIB_CNT_BASE + 248, /* 65-127 Byte Tx Frame */ + GM_TXF_255B = GM_MIB_CNT_BASE + 256, /* 128-255 Byte Tx Frame */ + GM_TXF_511B = GM_MIB_CNT_BASE + 264, /* 256-511 Byte Tx Frame */ + GM_TXF_1023B = GM_MIB_CNT_BASE + 272, /* 512-1023 Byte Tx Frame */ + GM_TXF_1518B = GM_MIB_CNT_BASE + 280, /* 1024-1518 Byte Tx Frame */ + GM_TXF_MAX_SZ = GM_MIB_CNT_BASE + 288, /* 1519-MaxSize Byte Tx Frame */ + + GM_TXF_COL = GM_MIB_CNT_BASE + 304, /* Tx Collision */ + GM_TXF_LAT_COL = GM_MIB_CNT_BASE + 312, /* Tx Late Collision */ + GM_TXF_ABO_COL = GM_MIB_CNT_BASE + 320, /* Tx aborted due to Exces. Col. */ + GM_TXF_MUL_COL = GM_MIB_CNT_BASE + 328, /* Tx Multiple Collision */ + GM_TXF_SNG_COL = GM_MIB_CNT_BASE + 336, /* Tx Single Collision */ + GM_TXE_FIFO_UR = GM_MIB_CNT_BASE + 344, /* Tx FIFO Underrun Event */ +}; + +/* GMAC Bit Definitions */ +/* GM_GP_STAT 16 bit r/o General Purpose Status Register */ +enum { + GM_GPSR_SPEED = 1<<15, /* Bit 15: Port Speed (1 = 100 Mbps) */ + GM_GPSR_DUPLEX = 1<<14, /* Bit 14: Duplex Mode (1 = Full) */ + GM_GPSR_FC_TX_DIS = 1<<13, /* Bit 13: Tx Flow-Control Mode Disabled */ + GM_GPSR_LINK_UP = 1<<12, /* Bit 12: Link Up Status */ + GM_GPSR_PAUSE = 1<<11, /* Bit 11: Pause State */ + GM_GPSR_TX_ACTIVE = 1<<10, /* Bit 10: Tx in Progress */ + GM_GPSR_EXC_COL = 1<<9, /* Bit 9: Excessive Collisions Occurred */ + GM_GPSR_LAT_COL = 1<<8, /* Bit 8: Late Collisions Occurred */ + + GM_GPSR_PHY_ST_CH = 1<<5, /* Bit 5: PHY Status Change */ + GM_GPSR_GIG_SPEED = 1<<4, /* Bit 4: Gigabit Speed (1 = 1000 Mbps) */ + GM_GPSR_PART_MODE = 1<<3, /* Bit 3: Partition mode */ + GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ + GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ +}; + +/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ +enum { + GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ + GM_GPCR_FC_TX_DIS = 1<<13, /* Bit 13: Disable Tx Flow-Control Mode */ + GM_GPCR_TX_ENA = 1<<12, /* Bit 12: Enable Transmit */ + GM_GPCR_RX_ENA = 1<<11, /* Bit 11: Enable Receive */ + GM_GPCR_BURST_ENA = 1<<10, /* Bit 10: Enable Burst Mode */ + GM_GPCR_LOOP_ENA = 1<<9, /* Bit 9: Enable MAC Loopback Mode */ + GM_GPCR_PART_ENA = 1<<8, /* Bit 8: Enable Partition Mode */ + GM_GPCR_GIGS_ENA = 1<<7, /* Bit 7: Gigabit Speed (1000 Mbps) */ + GM_GPCR_FL_PASS = 1<<6, /* Bit 6: Force Link Pass */ + GM_GPCR_DUP_FULL = 1<<5, /* Bit 5: Full Duplex Mode */ + GM_GPCR_FC_RX_DIS = 1<<4, /* Bit 4: Disable Rx Flow-Control Mode */ + GM_GPCR_SPEED_100 = 1<<3, /* Bit 3: Port Speed 100 Mbps */ + GM_GPCR_AU_DUP_DIS = 1<<2, /* Bit 2: Disable Auto-Update Duplex */ + GM_GPCR_AU_FCT_DIS = 1<<1, /* Bit 1: Disable Auto-Update Flow-C. */ + GM_GPCR_AU_SPD_DIS = 1<<0, /* Bit 0: Disable Auto-Update Speed */ +}; + +#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) +#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS) + +/* GM_TX_CTRL 16 bit r/w Transmit Control Register */ +enum { + GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ + GM_TXCR_CRC_DIS = 1<<14, /* Bit 14: Disable insertion of CRC */ + GM_TXCR_PAD_DIS = 1<<13, /* Bit 13: Disable padding of packets */ + GM_TXCR_COL_THR_MSK = 7<<10, /* Bit 12..10: Collision Threshold */ +}; + +#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) +#define TX_COL_DEF 0x04 /* late collision after 64 byte */ + +/* GM_RX_CTRL 16 bit r/w Receive Control Register */ +enum { + GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ + GM_RXCR_MCF_ENA = 1<<14, /* Bit 14: Enable Multicast filtering */ + GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ + GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ +}; + +/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ +enum { + GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ + GM_TXPA_JAMIPG_MSK = 0x1f<<9, /* Bit 13..9: Jam IPG */ + GM_TXPA_JAMDAT_MSK = 0x1f<<4, /* Bit 8..4: IPG Jam to Data */ + + TX_JAM_LEN_DEF = 0x03, + TX_JAM_IPG_DEF = 0x0b, + TX_IPG_JAM_DEF = 0x1c, +}; + +#define TX_JAM_LEN_VAL(x) (((x)<<14) & GM_TXPA_JAMLEN_MSK) +#define TX_JAM_IPG_VAL(x) (((x)<<9) & GM_TXPA_JAMIPG_MSK) +#define TX_IPG_JAM_DATA(x) (((x)<<4) & GM_TXPA_JAMDAT_MSK) + + +/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ +enum { + GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ + GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */ + GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */ + GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ + GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ +}; + +#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) +#define DATA_BLIND_DEF 0x04 + +#define IPG_DATA_VAL(x) (x & GM_SMOD_IPG_MSK) +#define IPG_DATA_DEF 0x1e + +/* GM_SMI_CTRL 16 bit r/w SMI Control Register */ +enum { + GM_SMI_CT_PHY_A_MSK = 0x1f<<11, /* Bit 15..11: PHY Device Address */ + GM_SMI_CT_REG_A_MSK = 0x1f<<6, /* Bit 10.. 6: PHY Register Address */ + GM_SMI_CT_OP_RD = 1<<5, /* Bit 5: OpCode Read (0=Write)*/ + GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ + GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ +}; + +#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK) +#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK) + +/* GM_PHY_ADDR 16 bit r/w GPHY Address Register */ +enum { + GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ + GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ +}; + +/* Receive Frame Status Encoding */ +enum { + GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ + GMR_FS_LEN_SHIFT = 16, + GMR_FS_VLAN = 1<<13, /* Bit 13: VLAN Packet */ + GMR_FS_JABBER = 1<<12, /* Bit 12: Jabber Packet */ + GMR_FS_UN_SIZE = 1<<11, /* Bit 11: Undersize Packet */ + GMR_FS_MC = 1<<10, /* Bit 10: Multicast Packet */ + GMR_FS_BC = 1<<9, /* Bit 9: Broadcast Packet */ + GMR_FS_RX_OK = 1<<8, /* Bit 8: Receive OK (Good Packet) */ + GMR_FS_GOOD_FC = 1<<7, /* Bit 7: Good Flow-Control Packet */ + GMR_FS_BAD_FC = 1<<6, /* Bit 6: Bad Flow-Control Packet */ + GMR_FS_MII_ERR = 1<<5, /* Bit 5: MII Error */ + GMR_FS_LONG_ERR = 1<<4, /* Bit 4: Too Long Packet */ + GMR_FS_FRAGMENT = 1<<3, /* Bit 3: Fragment */ + + GMR_FS_CRC_ERR = 1<<1, /* Bit 1: CRC Error */ + GMR_FS_RX_FF_OV = 1<<0, /* Bit 0: Rx FIFO Overflow */ + +/* + * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR) + */ + GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR | + GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC | + GMR_FS_JABBER, +/* Rx GMAC FIFO Flush Mask (default) */ + RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR | + GMR_FS_BAD_FC | GMR_FS_UN_SIZE | GMR_FS_JABBER, +}; + +/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ +enum { + GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */ + GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */ + GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */ + + GMF_RP_TST_ON = 1<<10, /* Read Pointer Test On */ + GMF_RP_TST_OFF = 1<<9, /* Read Pointer Test Off */ + GMF_RP_STEP = 1<<8, /* Read Pointer Step/Increment */ + GMF_RX_F_FL_ON = 1<<7, /* Rx FIFO Flush Mode On */ + GMF_RX_F_FL_OFF = 1<<6, /* Rx FIFO Flush Mode Off */ + GMF_CLI_RX_FO = 1<<5, /* Clear IRQ Rx FIFO Overrun */ + GMF_CLI_RX_FC = 1<<4, /* Clear IRQ Rx Frame Complete */ + GMF_OPER_ON = 1<<3, /* Operational Mode On */ + GMF_OPER_OFF = 1<<2, /* Operational Mode Off */ + GMF_RST_CLR = 1<<1, /* Clear GMAC FIFO Reset */ + GMF_RST_SET = 1<<0, /* Set GMAC FIFO Reset */ + + RX_GMF_FL_THR_DEF = 0xa, /* flush threshold (default) */ +}; + + +/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */ +enum { + GMF_WSP_TST_ON = 1<<18, /* Write Shadow Pointer Test On */ + GMF_WSP_TST_OFF = 1<<17, /* Write Shadow Pointer Test Off */ + GMF_WSP_STEP = 1<<16, /* Write Shadow Pointer Step/Increment */ + + GMF_CLI_TX_FU = 1<<6, /* Clear IRQ Tx FIFO Underrun */ + GMF_CLI_TX_FC = 1<<5, /* Clear IRQ Tx Frame Complete */ + GMF_CLI_TX_PE = 1<<4, /* Clear IRQ Tx Parity Error */ +}; + +/* GMAC_TI_ST_CTRL 8 bit Time Stamp Timer Ctrl Reg (YUKON only) */ +enum { + GMT_ST_START = 1<<2, /* Start Time Stamp Timer */ + GMT_ST_STOP = 1<<1, /* Stop Time Stamp Timer */ + GMT_ST_CLR_IRQ = 1<<0, /* Clear Time Stamp Timer IRQ */ +}; + +/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ +enum { + GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */ + GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */ + GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */ + GMC_F_LOOPB_OFF = 1<<4, /* FIFO Loopback Off */ + GMC_PAUSE_ON = 1<<3, /* Pause On */ + GMC_PAUSE_OFF = 1<<2, /* Pause Off */ + GMC_RST_CLR = 1<<1, /* Clear GMAC Reset */ + GMC_RST_SET = 1<<0, /* Set GMAC Reset */ +}; + +/* GPHY_CTRL 32 bit GPHY Control Reg (YUKON only) */ +enum { + GPC_SEL_BDT = 1<<28, /* Select Bi-Dir. Transfer for MDC/MDIO */ + GPC_INT_POL_HI = 1<<27, /* IRQ Polarity is Active HIGH */ + GPC_75_OHM = 1<<26, /* Use 75 Ohm Termination instead of 50 */ + GPC_DIS_FC = 1<<25, /* Disable Automatic Fiber/Copper Detection */ + GPC_DIS_SLEEP = 1<<24, /* Disable Energy Detect */ + GPC_HWCFG_M_3 = 1<<23, /* HWCFG_MODE[3] */ + GPC_HWCFG_M_2 = 1<<22, /* HWCFG_MODE[2] */ + GPC_HWCFG_M_1 = 1<<21, /* HWCFG_MODE[1] */ + GPC_HWCFG_M_0 = 1<<20, /* HWCFG_MODE[0] */ + GPC_ANEG_0 = 1<<19, /* ANEG[0] */ + GPC_ENA_XC = 1<<18, /* Enable MDI crossover */ + GPC_DIS_125 = 1<<17, /* Disable 125 MHz clock */ + GPC_ANEG_3 = 1<<16, /* ANEG[3] */ + GPC_ANEG_2 = 1<<15, /* ANEG[2] */ + GPC_ANEG_1 = 1<<14, /* ANEG[1] */ + GPC_ENA_PAUSE = 1<<13, /* Enable Pause (SYM_OR_REM) */ + GPC_PHYADDR_4 = 1<<12, /* Bit 4 of Phy Addr */ + GPC_PHYADDR_3 = 1<<11, /* Bit 3 of Phy Addr */ + GPC_PHYADDR_2 = 1<<10, /* Bit 2 of Phy Addr */ + GPC_PHYADDR_1 = 1<<9, /* Bit 1 of Phy Addr */ + GPC_PHYADDR_0 = 1<<8, /* Bit 0 of Phy Addr */ + /* Bits 7..2: reserved */ + GPC_RST_CLR = 1<<1, /* Clear GPHY Reset */ + GPC_RST_SET = 1<<0, /* Set GPHY Reset */ +}; + +#define GPC_HWCFG_GMII_COP (GPC_HWCFG_M_3|GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) +#define GPC_HWCFG_GMII_FIB (GPC_HWCFG_M_2 | GPC_HWCFG_M_1 | GPC_HWCFG_M_0) +#define GPC_ANEG_ADV_ALL_M (GPC_ANEG_3 | GPC_ANEG_2 | GPC_ANEG_1 | GPC_ANEG_0) + +/* forced speed and duplex mode (don't mix with other ANEG bits) */ +#define GPC_FRC10MBIT_HALF 0 +#define GPC_FRC10MBIT_FULL GPC_ANEG_0 +#define GPC_FRC100MBIT_HALF GPC_ANEG_1 +#define GPC_FRC100MBIT_FULL (GPC_ANEG_0 | GPC_ANEG_1) + +/* auto-negotiation with limited advertised speeds */ +/* mix only with master/slave settings (for copper) */ +#define GPC_ADV_1000_HALF GPC_ANEG_2 +#define GPC_ADV_1000_FULL GPC_ANEG_3 +#define GPC_ADV_ALL (GPC_ANEG_2 | GPC_ANEG_3) + +/* master/slave settings */ +/* only for copper with 1000 Mbps */ +#define GPC_FORCE_MASTER 0 +#define GPC_FORCE_SLAVE GPC_ANEG_0 +#define GPC_PREF_MASTER GPC_ANEG_1 +#define GPC_PREF_SLAVE (GPC_ANEG_1 | GPC_ANEG_0) + +/* GMAC_IRQ_SRC 8 bit GMAC Interrupt Source Reg (YUKON only) */ +/* GMAC_IRQ_MSK 8 bit GMAC Interrupt Mask Reg (YUKON only) */ +enum { + GM_IS_TX_CO_OV = 1<<5, /* Transmit Counter Overflow IRQ */ + GM_IS_RX_CO_OV = 1<<4, /* Receive Counter Overflow IRQ */ + GM_IS_TX_FF_UR = 1<<3, /* Transmit FIFO Underrun */ + GM_IS_TX_COMPL = 1<<2, /* Frame Transmission Complete */ + GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */ + GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */ + +#define GMAC_DEF_MSK (GM_IS_RX_FF_OR | GM_IS_TX_FF_UR) + +/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */ + /* Bits 15.. 2: reserved */ + GMLC_RST_CLR = 1<<1, /* Clear GMAC Link Reset */ + GMLC_RST_SET = 1<<0, /* Set GMAC Link Reset */ + + +/* WOL_CTRL_STAT 16 bit WOL Control/Status Reg */ + WOL_CTL_LINK_CHG_OCC = 1<<15, + WOL_CTL_MAGIC_PKT_OCC = 1<<14, + WOL_CTL_PATTERN_OCC = 1<<13, + WOL_CTL_CLEAR_RESULT = 1<<12, + WOL_CTL_ENA_PME_ON_LINK_CHG = 1<<11, + WOL_CTL_DIS_PME_ON_LINK_CHG = 1<<10, + WOL_CTL_ENA_PME_ON_MAGIC_PKT = 1<<9, + WOL_CTL_DIS_PME_ON_MAGIC_PKT = 1<<8, + WOL_CTL_ENA_PME_ON_PATTERN = 1<<7, + WOL_CTL_DIS_PME_ON_PATTERN = 1<<6, + WOL_CTL_ENA_LINK_CHG_UNIT = 1<<5, + WOL_CTL_DIS_LINK_CHG_UNIT = 1<<4, + WOL_CTL_ENA_MAGIC_PKT_UNIT = 1<<3, + WOL_CTL_DIS_MAGIC_PKT_UNIT = 1<<2, + WOL_CTL_ENA_PATTERN_UNIT = 1<<1, + WOL_CTL_DIS_PATTERN_UNIT = 1<<0, +}; + +#define WOL_CTL_DEFAULT \ + (WOL_CTL_DIS_PME_ON_LINK_CHG | \ + WOL_CTL_DIS_PME_ON_PATTERN | \ + WOL_CTL_DIS_PME_ON_MAGIC_PKT | \ + WOL_CTL_DIS_LINK_CHG_UNIT | \ + WOL_CTL_DIS_PATTERN_UNIT | \ + WOL_CTL_DIS_MAGIC_PKT_UNIT) + +/* WOL_MATCH_CTL 8 bit WOL Match Control Reg */ +#define WOL_CTL_PATT_ENA(x) (1 << (x)) + + +/* XMAC II registers */ +enum { + XM_MMU_CMD = 0x0000, /* 16 bit r/w MMU Command Register */ + XM_POFF = 0x0008, /* 32 bit r/w Packet Offset Register */ + XM_BURST = 0x000c, /* 32 bit r/w Burst Register for half duplex*/ + XM_1L_VLAN_TAG = 0x0010, /* 16 bit r/w One Level VLAN Tag ID */ + XM_2L_VLAN_TAG = 0x0014, /* 16 bit r/w Two Level VLAN Tag ID */ + XM_TX_CMD = 0x0020, /* 16 bit r/w Transmit Command Register */ + XM_TX_RT_LIM = 0x0024, /* 16 bit r/w Transmit Retry Limit Register */ + XM_TX_STIME = 0x0028, /* 16 bit r/w Transmit Slottime Register */ + XM_TX_IPG = 0x002c, /* 16 bit r/w Transmit Inter Packet Gap */ + XM_RX_CMD = 0x0030, /* 16 bit r/w Receive Command Register */ + XM_PHY_ADDR = 0x0034, /* 16 bit r/w PHY Address Register */ + XM_PHY_DATA = 0x0038, /* 16 bit r/w PHY Data Register */ + XM_GP_PORT = 0x0040, /* 32 bit r/w General Purpose Port Register */ + XM_IMSK = 0x0044, /* 16 bit r/w Interrupt Mask Register */ + XM_ISRC = 0x0048, /* 16 bit r/o Interrupt Status Register */ + XM_HW_CFG = 0x004c, /* 16 bit r/w Hardware Config Register */ + XM_TX_LO_WM = 0x0060, /* 16 bit r/w Tx FIFO Low Water Mark */ + XM_TX_HI_WM = 0x0062, /* 16 bit r/w Tx FIFO High Water Mark */ + XM_TX_THR = 0x0064, /* 16 bit r/w Tx Request Threshold */ + XM_HT_THR = 0x0066, /* 16 bit r/w Host Request Threshold */ + XM_PAUSE_DA = 0x0068, /* NA reg r/w Pause Destination Address */ + XM_CTL_PARA = 0x0070, /* 32 bit r/w Control Parameter Register */ + XM_MAC_OPCODE = 0x0074, /* 16 bit r/w Opcode for MAC control frames */ + XM_MAC_PTIME = 0x0076, /* 16 bit r/w Pause time for MAC ctrl frames*/ + XM_TX_STAT = 0x0078, /* 32 bit r/o Tx Status LIFO Register */ + + XM_EXM_START = 0x0080, /* r/w Start Address of the EXM Regs */ +#define XM_EXM(reg) (XM_EXM_START + ((reg) << 3)) +}; + +enum { + XM_SRC_CHK = 0x0100, /* NA reg r/w Source Check Address Register */ + XM_SA = 0x0108, /* NA reg r/w Station Address Register */ + XM_HSM = 0x0110, /* 64 bit r/w Hash Match Address Registers */ + XM_RX_LO_WM = 0x0118, /* 16 bit r/w Receive Low Water Mark */ + XM_RX_HI_WM = 0x011a, /* 16 bit r/w Receive High Water Mark */ + XM_RX_THR = 0x011c, /* 32 bit r/w Receive Request Threshold */ + XM_DEV_ID = 0x0120, /* 32 bit r/o Device ID Register */ + XM_MODE = 0x0124, /* 32 bit r/w Mode Register */ + XM_LSA = 0x0128, /* NA reg r/o Last Source Register */ + XM_TS_READ = 0x0130, /* 32 bit r/o Time Stamp Read Register */ + XM_TS_LOAD = 0x0134, /* 32 bit r/o Time Stamp Load Value */ + XM_STAT_CMD = 0x0200, /* 16 bit r/w Statistics Command Register */ + XM_RX_CNT_EV = 0x0204, /* 32 bit r/o Rx Counter Event Register */ + XM_TX_CNT_EV = 0x0208, /* 32 bit r/o Tx Counter Event Register */ + XM_RX_EV_MSK = 0x020c, /* 32 bit r/w Rx Counter Event Mask */ + XM_TX_EV_MSK = 0x0210, /* 32 bit r/w Tx Counter Event Mask */ + XM_TXF_OK = 0x0280, /* 32 bit r/o Frames Transmitted OK Conuter */ + XM_TXO_OK_HI = 0x0284, /* 32 bit r/o Octets Transmitted OK High Cnt*/ + XM_TXO_OK_LO = 0x0288, /* 32 bit r/o Octets Transmitted OK Low Cnt */ + XM_TXF_BC_OK = 0x028c, /* 32 bit r/o Broadcast Frames Xmitted OK */ + XM_TXF_MC_OK = 0x0290, /* 32 bit r/o Multicast Frames Xmitted OK */ + XM_TXF_UC_OK = 0x0294, /* 32 bit r/o Unicast Frames Xmitted OK */ + XM_TXF_LONG = 0x0298, /* 32 bit r/o Tx Long Frame Counter */ + XM_TXE_BURST = 0x029c, /* 32 bit r/o Tx Burst Event Counter */ + XM_TXF_MPAUSE = 0x02a0, /* 32 bit r/o Tx Pause MAC Ctrl Frame Cnt */ + XM_TXF_MCTRL = 0x02a4, /* 32 bit r/o Tx MAC Ctrl Frame Counter */ + XM_TXF_SNG_COL = 0x02a8, /* 32 bit r/o Tx Single Collision Counter */ + XM_TXF_MUL_COL = 0x02ac, /* 32 bit r/o Tx Multiple Collision Counter */ + XM_TXF_ABO_COL = 0x02b0, /* 32 bit r/o Tx aborted due to Exces. Col. */ + XM_TXF_LAT_COL = 0x02b4, /* 32 bit r/o Tx Late Collision Counter */ + XM_TXF_DEF = 0x02b8, /* 32 bit r/o Tx Deferred Frame Counter */ + XM_TXF_EX_DEF = 0x02bc, /* 32 bit r/o Tx Excessive Deferall Counter */ + XM_TXE_FIFO_UR = 0x02c0, /* 32 bit r/o Tx FIFO Underrun Event Cnt */ + XM_TXE_CS_ERR = 0x02c4, /* 32 bit r/o Tx Carrier Sense Error Cnt */ + XM_TXP_UTIL = 0x02c8, /* 32 bit r/o Tx Utilization in % */ + XM_TXF_64B = 0x02d0, /* 32 bit r/o 64 Byte Tx Frame Counter */ + XM_TXF_127B = 0x02d4, /* 32 bit r/o 65-127 Byte Tx Frame Counter */ + XM_TXF_255B = 0x02d8, /* 32 bit r/o 128-255 Byte Tx Frame Counter */ + XM_TXF_511B = 0x02dc, /* 32 bit r/o 256-511 Byte Tx Frame Counter */ + XM_TXF_1023B = 0x02e0, /* 32 bit r/o 512-1023 Byte Tx Frame Counter*/ + XM_TXF_MAX_SZ = 0x02e4, /* 32 bit r/o 1024-MaxSize Byte Tx Frame Cnt*/ + XM_RXF_OK = 0x0300, /* 32 bit r/o Frames Received OK */ + XM_RXO_OK_HI = 0x0304, /* 32 bit r/o Octets Received OK High Cnt */ + XM_RXO_OK_LO = 0x0308, /* 32 bit r/o Octets Received OK Low Counter*/ + XM_RXF_BC_OK = 0x030c, /* 32 bit r/o Broadcast Frames Received OK */ + XM_RXF_MC_OK = 0x0310, /* 32 bit r/o Multicast Frames Received OK */ + XM_RXF_UC_OK = 0x0314, /* 32 bit r/o Unicast Frames Received OK */ + XM_RXF_MPAUSE = 0x0318, /* 32 bit r/o Rx Pause MAC Ctrl Frame Cnt */ + XM_RXF_MCTRL = 0x031c, /* 32 bit r/o Rx MAC Ctrl Frame Counter */ + XM_RXF_INV_MP = 0x0320, /* 32 bit r/o Rx invalid Pause Frame Cnt */ + XM_RXF_INV_MOC = 0x0324, /* 32 bit r/o Rx Frames with inv. MAC Opcode*/ + XM_RXE_BURST = 0x0328, /* 32 bit r/o Rx Burst Event Counter */ + XM_RXE_FMISS = 0x032c, /* 32 bit r/o Rx Missed Frames Event Cnt */ + XM_RXF_FRA_ERR = 0x0330, /* 32 bit r/o Rx Framing Error Counter */ + XM_RXE_FIFO_OV = 0x0334, /* 32 bit r/o Rx FIFO overflow Event Cnt */ + XM_RXF_JAB_PKT = 0x0338, /* 32 bit r/o Rx Jabber Packet Frame Cnt */ + XM_RXE_CAR_ERR = 0x033c, /* 32 bit r/o Rx Carrier Event Error Cnt */ + XM_RXF_LEN_ERR = 0x0340, /* 32 bit r/o Rx in Range Length Error */ + XM_RXE_SYM_ERR = 0x0344, /* 32 bit r/o Rx Symbol Error Counter */ + XM_RXE_SHT_ERR = 0x0348, /* 32 bit r/o Rx Short Event Error Cnt */ + XM_RXE_RUNT = 0x034c, /* 32 bit r/o Rx Runt Event Counter */ + XM_RXF_LNG_ERR = 0x0350, /* 32 bit r/o Rx Frame too Long Error Cnt */ + XM_RXF_FCS_ERR = 0x0354, /* 32 bit r/o Rx Frame Check Seq. Error Cnt */ + XM_RXF_CEX_ERR = 0x035c, /* 32 bit r/o Rx Carrier Ext Error Frame Cnt*/ + XM_RXP_UTIL = 0x0360, /* 32 bit r/o Rx Utilization in % */ + XM_RXF_64B = 0x0368, /* 32 bit r/o 64 Byte Rx Frame Counter */ + XM_RXF_127B = 0x036c, /* 32 bit r/o 65-127 Byte Rx Frame Counter */ + XM_RXF_255B = 0x0370, /* 32 bit r/o 128-255 Byte Rx Frame Counter */ + XM_RXF_511B = 0x0374, /* 32 bit r/o 256-511 Byte Rx Frame Counter */ + XM_RXF_1023B = 0x0378, /* 32 bit r/o 512-1023 Byte Rx Frame Counter*/ + XM_RXF_MAX_SZ = 0x037c, /* 32 bit r/o 1024-MaxSize Byte Rx Frame Cnt*/ +}; + +/* XM_MMU_CMD 16 bit r/w MMU Command Register */ +enum { + XM_MMU_PHY_RDY = 1<<12, /* Bit 12: PHY Read Ready */ + XM_MMU_PHY_BUSY = 1<<11, /* Bit 11: PHY Busy */ + XM_MMU_IGN_PF = 1<<10, /* Bit 10: Ignore Pause Frame */ + XM_MMU_MAC_LB = 1<<9, /* Bit 9: Enable MAC Loopback */ + XM_MMU_FRC_COL = 1<<7, /* Bit 7: Force Collision */ + XM_MMU_SIM_COL = 1<<6, /* Bit 6: Simulate Collision */ + XM_MMU_NO_PRE = 1<<5, /* Bit 5: No MDIO Preamble */ + XM_MMU_GMII_FD = 1<<4, /* Bit 4: GMII uses Full Duplex */ + XM_MMU_RAT_CTRL = 1<<3, /* Bit 3: Enable Rate Control */ + XM_MMU_GMII_LOOP= 1<<2, /* Bit 2: PHY is in Loopback Mode */ + XM_MMU_ENA_RX = 1<<1, /* Bit 1: Enable Receiver */ + XM_MMU_ENA_TX = 1<<0, /* Bit 0: Enable Transmitter */ +}; + + +/* XM_TX_CMD 16 bit r/w Transmit Command Register */ +enum { + XM_TX_BK2BK = 1<<6, /* Bit 6: Ignor Carrier Sense (Tx Bk2Bk)*/ + XM_TX_ENC_BYP = 1<<5, /* Bit 5: Set Encoder in Bypass Mode */ + XM_TX_SAM_LINE = 1<<4, /* Bit 4: (sc) Start utilization calculation */ + XM_TX_NO_GIG_MD = 1<<3, /* Bit 3: Disable Carrier Extension */ + XM_TX_NO_PRE = 1<<2, /* Bit 2: Disable Preamble Generation */ + XM_TX_NO_CRC = 1<<1, /* Bit 1: Disable CRC Generation */ + XM_TX_AUTO_PAD = 1<<0, /* Bit 0: Enable Automatic Padding */ +}; + +/* XM_TX_RT_LIM 16 bit r/w Transmit Retry Limit Register */ +#define XM_RT_LIM_MSK 0x1f /* Bit 4..0: Tx Retry Limit */ + + +/* XM_TX_STIME 16 bit r/w Transmit Slottime Register */ +#define XM_STIME_MSK 0x7f /* Bit 6..0: Tx Slottime bits */ + + +/* XM_TX_IPG 16 bit r/w Transmit Inter Packet Gap */ +#define XM_IPG_MSK 0xff /* Bit 7..0: IPG value bits */ + + +/* XM_RX_CMD 16 bit r/w Receive Command Register */ +enum { + XM_RX_LENERR_OK = 1<<8, /* Bit 8 don't set Rx Err bit for */ + /* inrange error packets */ + XM_RX_BIG_PK_OK = 1<<7, /* Bit 7 don't set Rx Err bit for */ + /* jumbo packets */ + XM_RX_IPG_CAP = 1<<6, /* Bit 6 repl. type field with IPG */ + XM_RX_TP_MD = 1<<5, /* Bit 5: Enable transparent Mode */ + XM_RX_STRIP_FCS = 1<<4, /* Bit 4: Enable FCS Stripping */ + XM_RX_SELF_RX = 1<<3, /* Bit 3: Enable Rx of own packets */ + XM_RX_SAM_LINE = 1<<2, /* Bit 2: (sc) Start utilization calculation */ + XM_RX_STRIP_PAD = 1<<1, /* Bit 1: Strip pad bytes of Rx frames */ + XM_RX_DIS_CEXT = 1<<0, /* Bit 0: Disable carrier ext. check */ +}; + + +/* XM_GP_PORT 32 bit r/w General Purpose Port Register */ +enum { + XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */ + XM_GP_FRC_INT = 1<<5, /* Bit 5: (sc) Force Interrupt */ + XM_GP_RES_MAC = 1<<3, /* Bit 3: (sc) Reset MAC and FIFOs */ + XM_GP_RES_STAT = 1<<2, /* Bit 2: (sc) Reset the statistics module */ + XM_GP_INP_ASS = 1<<0, /* Bit 0: (ro) GP Input Pin asserted */ +}; + + +/* XM_IMSK 16 bit r/w Interrupt Mask Register */ +/* XM_ISRC 16 bit r/o Interrupt Status Register */ +enum { + XM_IS_LNK_AE = 1<<14, /* Bit 14: Link Asynchronous Event */ + XM_IS_TX_ABORT = 1<<13, /* Bit 13: Transmit Abort, late Col. etc */ + XM_IS_FRC_INT = 1<<12, /* Bit 12: Force INT bit set in GP */ + XM_IS_INP_ASS = 1<<11, /* Bit 11: Input Asserted, GP bit 0 set */ + XM_IS_LIPA_RC = 1<<10, /* Bit 10: Link Partner requests config */ + XM_IS_RX_PAGE = 1<<9, /* Bit 9: Page Received */ + XM_IS_TX_PAGE = 1<<8, /* Bit 8: Next Page Loaded for Transmit */ + XM_IS_AND = 1<<7, /* Bit 7: Auto-Negotiation Done */ + XM_IS_TSC_OV = 1<<6, /* Bit 6: Time Stamp Counter Overflow */ + XM_IS_RXC_OV = 1<<5, /* Bit 5: Rx Counter Event Overflow */ + XM_IS_TXC_OV = 1<<4, /* Bit 4: Tx Counter Event Overflow */ + XM_IS_RXF_OV = 1<<3, /* Bit 3: Receive FIFO Overflow */ + XM_IS_TXF_UR = 1<<2, /* Bit 2: Transmit FIFO Underrun */ + XM_IS_TX_COMP = 1<<1, /* Bit 1: Frame Tx Complete */ + XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */ + + XM_IMSK_DISABLE = 0xffff, +}; + +/* XM_HW_CFG 16 bit r/w Hardware Config Register */ +enum { + XM_HW_GEN_EOP = 1<<3, /* Bit 3: generate End of Packet pulse */ + XM_HW_COM4SIG = 1<<2, /* Bit 2: use Comma Detect for Sig. Det.*/ + XM_HW_GMII_MD = 1<<0, /* Bit 0: GMII Interface selected */ +}; + + +/* XM_TX_LO_WM 16 bit r/w Tx FIFO Low Water Mark */ +/* XM_TX_HI_WM 16 bit r/w Tx FIFO High Water Mark */ +#define XM_TX_WM_MSK 0x01ff /* Bit 9.. 0 Tx FIFO Watermark bits */ + +/* XM_TX_THR 16 bit r/w Tx Request Threshold */ +/* XM_HT_THR 16 bit r/w Host Request Threshold */ +/* XM_RX_THR 16 bit r/w Rx Request Threshold */ +#define XM_THR_MSK 0x03ff /* Bit 10.. 0 Rx/Tx Request Threshold bits */ + + +/* XM_TX_STAT 32 bit r/o Tx Status LIFO Register */ +enum { + XM_ST_VALID = (1UL<<31), /* Bit 31: Status Valid */ + XM_ST_BYTE_CNT = (0x3fffL<<17), /* Bit 30..17: Tx frame Length */ + XM_ST_RETRY_CNT = (0x1fL<<12), /* Bit 16..12: Retry Count */ + XM_ST_EX_COL = 1<<11, /* Bit 11: Excessive Collisions */ + XM_ST_EX_DEF = 1<<10, /* Bit 10: Excessive Deferral */ + XM_ST_BURST = 1<<9, /* Bit 9: p. xmitted in burst md*/ + XM_ST_DEFER = 1<<8, /* Bit 8: packet was defered */ + XM_ST_BC = 1<<7, /* Bit 7: Broadcast packet */ + XM_ST_MC = 1<<6, /* Bit 6: Multicast packet */ + XM_ST_UC = 1<<5, /* Bit 5: Unicast packet */ + XM_ST_TX_UR = 1<<4, /* Bit 4: FIFO Underrun occurred */ + XM_ST_CS_ERR = 1<<3, /* Bit 3: Carrier Sense Error */ + XM_ST_LAT_COL = 1<<2, /* Bit 2: Late Collision Error */ + XM_ST_MUL_COL = 1<<1, /* Bit 1: Multiple Collisions */ + XM_ST_SGN_COL = 1<<0, /* Bit 0: Single Collision */ +}; + +/* XM_RX_LO_WM 16 bit r/w Receive Low Water Mark */ +/* XM_RX_HI_WM 16 bit r/w Receive High Water Mark */ +#define XM_RX_WM_MSK 0x03ff /* Bit 11.. 0: Rx FIFO Watermark bits */ + + +/* XM_DEV_ID 32 bit r/o Device ID Register */ +#define XM_DEV_OUI (0x00ffffffUL<<8) /* Bit 31..8: Device OUI */ +#define XM_DEV_REV (0x07L << 5) /* Bit 7..5: Chip Rev Num */ + + +/* XM_MODE 32 bit r/w Mode Register */ +enum { + XM_MD_ENA_REJ = 1<<26, /* Bit 26: Enable Frame Reject */ + XM_MD_SPOE_E = 1<<25, /* Bit 25: Send Pause on Edge */ + /* extern generated */ + XM_MD_TX_REP = 1<<24, /* Bit 24: Transmit Repeater Mode */ + XM_MD_SPOFF_I = 1<<23, /* Bit 23: Send Pause on FIFO full */ + /* intern generated */ + XM_MD_LE_STW = 1<<22, /* Bit 22: Rx Stat Word in Little Endian */ + XM_MD_TX_CONT = 1<<21, /* Bit 21: Send Continuous */ + XM_MD_TX_PAUSE = 1<<20, /* Bit 20: (sc) Send Pause Frame */ + XM_MD_ATS = 1<<19, /* Bit 19: Append Time Stamp */ + XM_MD_SPOL_I = 1<<18, /* Bit 18: Send Pause on Low */ + /* intern generated */ + XM_MD_SPOH_I = 1<<17, /* Bit 17: Send Pause on High */ + /* intern generated */ + XM_MD_CAP = 1<<16, /* Bit 16: Check Address Pair */ + XM_MD_ENA_HASH = 1<<15, /* Bit 15: Enable Hashing */ + XM_MD_CSA = 1<<14, /* Bit 14: Check Station Address */ + XM_MD_CAA = 1<<13, /* Bit 13: Check Address Array */ + XM_MD_RX_MCTRL = 1<<12, /* Bit 12: Rx MAC Control Frame */ + XM_MD_RX_RUNT = 1<<11, /* Bit 11: Rx Runt Frames */ + XM_MD_RX_IRLE = 1<<10, /* Bit 10: Rx in Range Len Err Frame */ + XM_MD_RX_LONG = 1<<9, /* Bit 9: Rx Long Frame */ + XM_MD_RX_CRCE = 1<<8, /* Bit 8: Rx CRC Error Frame */ + XM_MD_RX_ERR = 1<<7, /* Bit 7: Rx Error Frame */ + XM_MD_DIS_UC = 1<<6, /* Bit 6: Disable Rx Unicast */ + XM_MD_DIS_MC = 1<<5, /* Bit 5: Disable Rx Multicast */ + XM_MD_DIS_BC = 1<<4, /* Bit 4: Disable Rx Broadcast */ + XM_MD_ENA_PROM = 1<<3, /* Bit 3: Enable Promiscuous */ + XM_MD_ENA_BE = 1<<2, /* Bit 2: Enable Big Endian */ + XM_MD_FTF = 1<<1, /* Bit 1: (sc) Flush Tx FIFO */ + XM_MD_FRF = 1<<0, /* Bit 0: (sc) Flush Rx FIFO */ +}; + +#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) +#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ + XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA) + +/* XM_STAT_CMD 16 bit r/w Statistics Command Register */ +enum { + XM_SC_SNP_RXC = 1<<5, /* Bit 5: (sc) Snap Rx Counters */ + XM_SC_SNP_TXC = 1<<4, /* Bit 4: (sc) Snap Tx Counters */ + XM_SC_CP_RXC = 1<<3, /* Bit 3: Copy Rx Counters Continuously */ + XM_SC_CP_TXC = 1<<2, /* Bit 2: Copy Tx Counters Continuously */ + XM_SC_CLR_RXC = 1<<1, /* Bit 1: (sc) Clear Rx Counters */ + XM_SC_CLR_TXC = 1<<0, /* Bit 0: (sc) Clear Tx Counters */ +}; + + +/* XM_RX_CNT_EV 32 bit r/o Rx Counter Event Register */ +/* XM_RX_EV_MSK 32 bit r/w Rx Counter Event Mask */ +enum { + XMR_MAX_SZ_OV = 1<<31, /* Bit 31: 1024-MaxSize Rx Cnt Ov*/ + XMR_1023B_OV = 1<<30, /* Bit 30: 512-1023Byte Rx Cnt Ov*/ + XMR_511B_OV = 1<<29, /* Bit 29: 256-511 Byte Rx Cnt Ov*/ + XMR_255B_OV = 1<<28, /* Bit 28: 128-255 Byte Rx Cnt Ov*/ + XMR_127B_OV = 1<<27, /* Bit 27: 65-127 Byte Rx Cnt Ov */ + XMR_64B_OV = 1<<26, /* Bit 26: 64 Byte Rx Cnt Ov */ + XMR_UTIL_OV = 1<<25, /* Bit 25: Rx Util Cnt Overflow */ + XMR_UTIL_UR = 1<<24, /* Bit 24: Rx Util Cnt Underrun */ + XMR_CEX_ERR_OV = 1<<23, /* Bit 23: CEXT Err Cnt Ov */ + XMR_FCS_ERR_OV = 1<<21, /* Bit 21: Rx FCS Error Cnt Ov */ + XMR_LNG_ERR_OV = 1<<20, /* Bit 20: Rx too Long Err Cnt Ov*/ + XMR_RUNT_OV = 1<<19, /* Bit 19: Runt Event Cnt Ov */ + XMR_SHT_ERR_OV = 1<<18, /* Bit 18: Rx Short Ev Err Cnt Ov*/ + XMR_SYM_ERR_OV = 1<<17, /* Bit 17: Rx Sym Err Cnt Ov */ + XMR_CAR_ERR_OV = 1<<15, /* Bit 15: Rx Carr Ev Err Cnt Ov */ + XMR_JAB_PKT_OV = 1<<14, /* Bit 14: Rx Jabb Packet Cnt Ov */ + XMR_FIFO_OV = 1<<13, /* Bit 13: Rx FIFO Ov Ev Cnt Ov */ + XMR_FRA_ERR_OV = 1<<12, /* Bit 12: Rx Framing Err Cnt Ov */ + XMR_FMISS_OV = 1<<11, /* Bit 11: Rx Missed Ev Cnt Ov */ + XMR_BURST = 1<<10, /* Bit 10: Rx Burst Event Cnt Ov */ + XMR_INV_MOC = 1<<9, /* Bit 9: Rx with inv. MAC OC Ov*/ + XMR_INV_MP = 1<<8, /* Bit 8: Rx inv Pause Frame Ov */ + XMR_MCTRL_OV = 1<<7, /* Bit 7: Rx MAC Ctrl-F Cnt Ov */ + XMR_MPAUSE_OV = 1<<6, /* Bit 6: Rx Pause MAC Ctrl-F Ov*/ + XMR_UC_OK_OV = 1<<5, /* Bit 5: Rx Unicast Frame CntOv*/ + XMR_MC_OK_OV = 1<<4, /* Bit 4: Rx Multicast Cnt Ov */ + XMR_BC_OK_OV = 1<<3, /* Bit 3: Rx Broadcast Cnt Ov */ + XMR_OK_LO_OV = 1<<2, /* Bit 2: Octets Rx OK Low CntOv*/ + XMR_OK_HI_OV = 1<<1, /* Bit 1: Octets Rx OK Hi Cnt Ov*/ + XMR_OK_OV = 1<<0, /* Bit 0: Frames Received Ok Ov */ +}; + +#define XMR_DEF_MSK (XMR_OK_LO_OV | XMR_OK_HI_OV) + +/* XM_TX_CNT_EV 32 bit r/o Tx Counter Event Register */ +/* XM_TX_EV_MSK 32 bit r/w Tx Counter Event Mask */ +enum { + XMT_MAX_SZ_OV = 1<<25, /* Bit 25: 1024-MaxSize Tx Cnt Ov*/ + XMT_1023B_OV = 1<<24, /* Bit 24: 512-1023Byte Tx Cnt Ov*/ + XMT_511B_OV = 1<<23, /* Bit 23: 256-511 Byte Tx Cnt Ov*/ + XMT_255B_OV = 1<<22, /* Bit 22: 128-255 Byte Tx Cnt Ov*/ + XMT_127B_OV = 1<<21, /* Bit 21: 65-127 Byte Tx Cnt Ov */ + XMT_64B_OV = 1<<20, /* Bit 20: 64 Byte Tx Cnt Ov */ + XMT_UTIL_OV = 1<<19, /* Bit 19: Tx Util Cnt Overflow */ + XMT_UTIL_UR = 1<<18, /* Bit 18: Tx Util Cnt Underrun */ + XMT_CS_ERR_OV = 1<<17, /* Bit 17: Tx Carr Sen Err Cnt Ov*/ + XMT_FIFO_UR_OV = 1<<16, /* Bit 16: Tx FIFO Ur Ev Cnt Ov */ + XMT_EX_DEF_OV = 1<<15, /* Bit 15: Tx Ex Deferall Cnt Ov */ + XMT_DEF = 1<<14, /* Bit 14: Tx Deferred Cnt Ov */ + XMT_LAT_COL_OV = 1<<13, /* Bit 13: Tx Late Col Cnt Ov */ + XMT_ABO_COL_OV = 1<<12, /* Bit 12: Tx abo dueto Ex Col Ov*/ + XMT_MUL_COL_OV = 1<<11, /* Bit 11: Tx Mult Col Cnt Ov */ + XMT_SNG_COL = 1<<10, /* Bit 10: Tx Single Col Cnt Ov */ + XMT_MCTRL_OV = 1<<9, /* Bit 9: Tx MAC Ctrl Counter Ov*/ + XMT_MPAUSE = 1<<8, /* Bit 8: Tx Pause MAC Ctrl-F Ov*/ + XMT_BURST = 1<<7, /* Bit 7: Tx Burst Event Cnt Ov */ + XMT_LONG = 1<<6, /* Bit 6: Tx Long Frame Cnt Ov */ + XMT_UC_OK_OV = 1<<5, /* Bit 5: Tx Unicast Cnt Ov */ + XMT_MC_OK_OV = 1<<4, /* Bit 4: Tx Multicast Cnt Ov */ + XMT_BC_OK_OV = 1<<3, /* Bit 3: Tx Broadcast Cnt Ov */ + XMT_OK_LO_OV = 1<<2, /* Bit 2: Octets Tx OK Low CntOv*/ + XMT_OK_HI_OV = 1<<1, /* Bit 1: Octets Tx OK Hi Cnt Ov*/ + XMT_OK_OV = 1<<0, /* Bit 0: Frames Tx Ok Ov */ +}; + +#define XMT_DEF_MSK (XMT_OK_LO_OV | XMT_OK_HI_OV) + +struct skge_rx_desc { + u32 control; + u32 next_offset; + u32 dma_lo; + u32 dma_hi; + u32 status; + u32 timestamp; + u16 csum2; + u16 csum1; + u16 csum2_start; + u16 csum1_start; +}; + +struct skge_tx_desc { + u32 control; + u32 next_offset; + u32 dma_lo; + u32 dma_hi; + u32 status; + u32 csum_offs; + u16 csum_write; + u16 csum_start; + u32 rsvd; +}; + +struct skge_element { + struct skge_element *next; + void *desc; + struct sk_buff *skb; + DEFINE_DMA_UNMAP_ADDR(mapaddr); + DEFINE_DMA_UNMAP_LEN(maplen); +}; + +struct skge_ring { + struct skge_element *to_clean; + struct skge_element *to_use; + struct skge_element *start; + unsigned long count; +}; + + +struct skge_hw { + void __iomem *regs; + struct pci_dev *pdev; + spinlock_t hw_lock; + u32 intr_mask; + struct net_device *dev[2]; + + u8 chip_id; + u8 chip_rev; + u8 copper; + u8 ports; + u8 phy_type; + + u32 ram_size; + u32 ram_offset; + u16 phy_addr; + spinlock_t phy_lock; + struct tasklet_struct phy_task; + + char irq_name[0]; /* skge@pci:000:04:00.0 */ +}; + +enum pause_control { + FLOW_MODE_NONE = 1, /* No Flow-Control */ + FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */ + FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ + FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or + * just the remote station may send PAUSE + */ +}; + +enum pause_status { + FLOW_STAT_INDETERMINATED=0, /* indeterminated */ + FLOW_STAT_NONE, /* No Flow Control */ + FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */ + FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */ + FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */ +}; + + +struct skge_port { + struct skge_hw *hw; + struct net_device *netdev; + struct napi_struct napi; + int port; + u32 msg_enable; + + struct skge_ring tx_ring; + + struct skge_ring rx_ring ____cacheline_aligned_in_smp; + unsigned int rx_buf_size; + + struct timer_list link_timer; + enum pause_control flow_control; + enum pause_status flow_status; + u8 blink_on; + u8 wol; + u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ + u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ + u16 speed; /* SPEED_1000, SPEED_100, ... */ + u32 advertising; + + void *mem; /* PCI memory for rings */ + dma_addr_t dma; + unsigned long mem_size; +#ifdef CONFIG_SKGE_DEBUG + struct dentry *debugfs; +#endif +}; + + +/* Register accessor for memory mapped device */ +static inline u32 skge_read32(const struct skge_hw *hw, int reg) +{ + return readl(hw->regs + reg); +} + +static inline u16 skge_read16(const struct skge_hw *hw, int reg) +{ + return readw(hw->regs + reg); +} + +static inline u8 skge_read8(const struct skge_hw *hw, int reg) +{ + return readb(hw->regs + reg); +} + +static inline void skge_write32(const struct skge_hw *hw, int reg, u32 val) +{ + writel(val, hw->regs + reg); +} + +static inline void skge_write16(const struct skge_hw *hw, int reg, u16 val) +{ + writew(val, hw->regs + reg); +} + +static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val) +{ + writeb(val, hw->regs + reg); +} + +/* MAC Related Registers inside the device. */ +#define SK_REG(port,reg) (((port)<<7)+(u16)(reg)) +#define SK_XMAC_REG(port, reg) \ + ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1) + +static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg) +{ + u32 v; + v = skge_read16(hw, SK_XMAC_REG(port, reg)); + v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16; + return v; +} + +static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg) +{ + return skge_read16(hw, SK_XMAC_REG(port,reg)); +} + +static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v) +{ + skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff); + skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16); +} + +static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v) +{ + skge_write16(hw, SK_XMAC_REG(port,r), v); +} + +static inline void xm_outhash(const struct skge_hw *hw, int port, int reg, + const u8 *hash) +{ + xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8)); + xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8)); + xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8)); + xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8)); +} + +static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg, + const u8 *addr) +{ + xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8)); + xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8)); + xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8)); +} + +#define SK_GMAC_REG(port,reg) \ + (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg)) + +static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg) +{ + return skge_read16(hw, SK_GMAC_REG(port,reg)); +} + +static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg) +{ + return (u32) skge_read16(hw, SK_GMAC_REG(port,reg)) + | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16); +} + +static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v) +{ + skge_write16(hw, SK_GMAC_REG(port,r), v); +} + +static inline void gma_set_addr(struct skge_hw *hw, int port, int reg, + const u8 *addr) +{ + gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8)); + gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8)); + gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8)); +} + +#endif diff --git a/addons/virtio/install.sh b/addons/virtio/install.sh new file mode 100644 index 00000000..ffe448be --- /dev/null +++ b/addons/virtio/install.sh @@ -0,0 +1,14 @@ +if [ "${1}" = "rd" ]; then + echo "Checking for VirtIO..." + if (grep -r -q -E "(QEMU|VirtualBox)" /sys/devices/virtual/dmi/id/); then + echo "VirtIO hypervisor detected!" + ${INSMOD} "/modules/virtio.ko" ${PARAMS} + ${INSMOD} "/modules/virtio_ring.ko" ${PARAMS} + ${INSMOD} "/modules/virtio_mmio.ko" ${PARAMS} + ${INSMOD} "/modules/virtio_pci.ko" ${PARAMS} + ${INSMOD} "/modules/virtio_net.ko" ${PARAMS} + ${INSMOD} "/modules/virtio_scsi.ko" ${PARAMS} + else + echo "No VirtIO hypervisor detected!" + fi +fi diff --git a/addons/virtio/manifest.yml b/addons/virtio/manifest.yml new file mode 100644 index 00000000..f67a8123 --- /dev/null +++ b/addons/virtio/manifest.yml @@ -0,0 +1,28 @@ +version: 1 +name: virtio +description: "Drivers for QEMU/Virtualbox hypervisor" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/addons/virtio/src/3.10.108/Makefile b/addons/virtio/src/3.10.108/Makefile new file mode 100644 index 00000000..bde7770d --- /dev/null +++ b/addons/virtio/src/3.10.108/Makefile @@ -0,0 +1,5 @@ +obj-m += virtio.o virtio_ring.o +obj-m += virtio_mmio.o +obj-m += virtio_pci.o +obj-m += virtio_net.o +obj-m += virtio_scsi.o diff --git a/addons/virtio/src/3.10.108/config.c b/addons/virtio/src/3.10.108/config.c new file mode 100644 index 00000000..f70bcd2f --- /dev/null +++ b/addons/virtio/src/3.10.108/config.c @@ -0,0 +1,12 @@ +/* Configuration space parsing helpers for virtio. + * + * The configuration is [type][len][... len bytes ...] fields. + * + * Copyright 2007 Rusty Russell, IBM Corporation. + * GPL v2 or later. + */ +#include +#include +#include +#include + diff --git a/addons/virtio/src/3.10.108/virtio.c b/addons/virtio/src/3.10.108/virtio.c new file mode 100644 index 00000000..beaa7cc4 --- /dev/null +++ b/addons/virtio/src/3.10.108/virtio.c @@ -0,0 +1,246 @@ +#include +#include +#include +#include +#include + +/* Unique numbering for virtio devices. */ +static DEFINE_IDA(virtio_index_ida); + +static ssize_t device_show(struct device *_d, + struct device_attribute *attr, char *buf) +{ + struct virtio_device *dev = dev_to_virtio(_d); + return sprintf(buf, "0x%04x\n", dev->id.device); +} +static ssize_t vendor_show(struct device *_d, + struct device_attribute *attr, char *buf) +{ + struct virtio_device *dev = dev_to_virtio(_d); + return sprintf(buf, "0x%04x\n", dev->id.vendor); +} +static ssize_t status_show(struct device *_d, + struct device_attribute *attr, char *buf) +{ + struct virtio_device *dev = dev_to_virtio(_d); + return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); +} +static ssize_t modalias_show(struct device *_d, + struct device_attribute *attr, char *buf) +{ + struct virtio_device *dev = dev_to_virtio(_d); + return sprintf(buf, "virtio:d%08Xv%08X\n", + dev->id.device, dev->id.vendor); +} +static ssize_t features_show(struct device *_d, + struct device_attribute *attr, char *buf) +{ + struct virtio_device *dev = dev_to_virtio(_d); + unsigned int i; + ssize_t len = 0; + + /* We actually represent this as a bitstring, as it could be + * arbitrary length in future. */ + for (i = 0; i < ARRAY_SIZE(dev->features)*BITS_PER_LONG; i++) + len += sprintf(buf+len, "%c", + test_bit(i, dev->features) ? '1' : '0'); + len += sprintf(buf+len, "\n"); + return len; +} +static struct device_attribute virtio_dev_attrs[] = { + __ATTR_RO(device), + __ATTR_RO(vendor), + __ATTR_RO(status), + __ATTR_RO(modalias), + __ATTR_RO(features), + __ATTR_NULL +}; + +static inline int virtio_id_match(const struct virtio_device *dev, + const struct virtio_device_id *id) +{ + if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID) + return 0; + + return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; +} + +/* This looks through all the IDs a driver claims to support. If any of them + * match, we return 1 and the kernel will call virtio_dev_probe(). */ +static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) +{ + unsigned int i; + struct virtio_device *dev = dev_to_virtio(_dv); + const struct virtio_device_id *ids; + + ids = drv_to_virtio(_dr)->id_table; + for (i = 0; ids[i].device; i++) + if (virtio_id_match(dev, &ids[i])) + return 1; + return 0; +} + +static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) +{ + struct virtio_device *dev = dev_to_virtio(_dv); + + return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", + dev->id.device, dev->id.vendor); +} + +static void add_status(struct virtio_device *dev, unsigned status) +{ + dev->config->set_status(dev, dev->config->get_status(dev) | status); +} + +void virtio_check_driver_offered_feature(const struct virtio_device *vdev, + unsigned int fbit) +{ + unsigned int i; + struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); + + for (i = 0; i < drv->feature_table_size; i++) + if (drv->feature_table[i] == fbit) + return; + BUG(); +} +EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); + +static int virtio_dev_probe(struct device *_d) +{ + int err, i; + struct virtio_device *dev = dev_to_virtio(_d); + struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); + u32 device_features; + + /* We have a driver! */ + add_status(dev, VIRTIO_CONFIG_S_DRIVER); + + /* Figure out what features the device supports. */ + device_features = dev->config->get_features(dev); + + /* Features supported by both device and driver into dev->features. */ + memset(dev->features, 0, sizeof(dev->features)); + for (i = 0; i < drv->feature_table_size; i++) { + unsigned int f = drv->feature_table[i]; + BUG_ON(f >= 32); + if (device_features & (1 << f)) + set_bit(f, dev->features); + } + + /* Transport features always preserved to pass to finalize_features. */ + for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) + if (device_features & (1 << i)) + set_bit(i, dev->features); + + dev->config->finalize_features(dev); + + err = drv->probe(dev); + if (err) + add_status(dev, VIRTIO_CONFIG_S_FAILED); + else { + add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); + if (drv->scan) + drv->scan(dev); + } + + return err; +} + +static int virtio_dev_remove(struct device *_d) +{ + struct virtio_device *dev = dev_to_virtio(_d); + struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); + + drv->remove(dev); + + /* Driver should have reset device. */ + WARN_ON_ONCE(dev->config->get_status(dev)); + + /* Acknowledge the device's existence again. */ + add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + return 0; +} + +static struct bus_type virtio_bus = { + .name = "virtio", + .match = virtio_dev_match, + .dev_attrs = virtio_dev_attrs, + .uevent = virtio_uevent, + .probe = virtio_dev_probe, + .remove = virtio_dev_remove, +}; + +int register_virtio_driver(struct virtio_driver *driver) +{ + /* Catch this early. */ + BUG_ON(driver->feature_table_size && !driver->feature_table); + driver->driver.bus = &virtio_bus; + return driver_register(&driver->driver); +} +EXPORT_SYMBOL_GPL(register_virtio_driver); + +void unregister_virtio_driver(struct virtio_driver *driver) +{ + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_GPL(unregister_virtio_driver); + +int register_virtio_device(struct virtio_device *dev) +{ + int err; + + dev->dev.bus = &virtio_bus; + + /* Assign a unique device index and hence name. */ + err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL); + if (err < 0) + goto out; + + dev->index = err; + dev_set_name(&dev->dev, "virtio%u", dev->index); + + /* We always start by resetting the device, in case a previous + * driver messed it up. This also tests that code path a little. */ + dev->config->reset(dev); + + /* Acknowledge that we've seen the device. */ + add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + + INIT_LIST_HEAD(&dev->vqs); + + /* device_register() causes the bus infrastructure to look for a + * matching driver. */ + err = device_register(&dev->dev); +out: + if (err) + add_status(dev, VIRTIO_CONFIG_S_FAILED); + return err; +} +EXPORT_SYMBOL_GPL(register_virtio_device); + +void unregister_virtio_device(struct virtio_device *dev) +{ + int index = dev->index; /* save for after device release */ + + device_unregister(&dev->dev); + ida_simple_remove(&virtio_index_ida, index); +} +EXPORT_SYMBOL_GPL(unregister_virtio_device); + +static int virtio_init(void) +{ + if (bus_register(&virtio_bus) != 0) + panic("virtio bus registration failed"); + return 0; +} + +static void __exit virtio_exit(void) +{ + bus_unregister(&virtio_bus); + ida_destroy(&virtio_index_ida); +} +core_initcall(virtio_init); +module_exit(virtio_exit); + +MODULE_LICENSE("GPL"); diff --git a/addons/virtio/src/3.10.108/virtio_mmio.c b/addons/virtio/src/3.10.108/virtio_mmio.c new file mode 100644 index 00000000..1ba0d683 --- /dev/null +++ b/addons/virtio/src/3.10.108/virtio_mmio.c @@ -0,0 +1,665 @@ +/* + * Virtio memory mapped device driver + * + * Copyright 2011, ARM Ltd. + * + * This module allows virtio devices to be used over a virtual, memory mapped + * platform device. + * + * The guest device(s) may be instantiated in one of three equivalent ways: + * + * 1. Static platform device in board's code, eg.: + * + * static struct platform_device v2m_virtio_device = { + * .name = "virtio-mmio", + * .id = -1, + * .num_resources = 2, + * .resource = (struct resource []) { + * { + * .start = 0x1001e000, + * .end = 0x1001e0ff, + * .flags = IORESOURCE_MEM, + * }, { + * .start = 42 + 32, + * .end = 42 + 32, + * .flags = IORESOURCE_IRQ, + * }, + * } + * }; + * + * 2. Device Tree node, eg.: + * + * virtio_block@1e000 { + * compatible = "virtio,mmio"; + * reg = <0x1e000 0x100>; + * interrupts = <42>; + * } + * + * 3. Kernel module (or command line) parameter. Can be used more than once - + * one device will be created for each one. Syntax: + * + * [virtio_mmio.]device=@:[:] + * where: + * := size (can use standard suffixes like K, M or G) + * := physical base address + * := interrupt number (as passed to request_irq()) + * := (optional) platform device id + * eg.: + * virtio_mmio.device=0x100@0x100b0000:48 \ + * virtio_mmio.device=1K@0x1001e000:74 + * + * + * + * Registers layout (all 32-bit wide): + * + * offset d. name description + * ------ -- ---------------- ----------------- + * + * 0x000 R MagicValue Magic value "virt" + * 0x004 R Version Device version (current max. 1) + * 0x008 R DeviceID Virtio device ID + * 0x00c R VendorID Virtio vendor ID + * + * 0x010 R HostFeatures Features supported by the host + * 0x014 W HostFeaturesSel Set of host features to access via HostFeatures + * + * 0x020 W GuestFeatures Features activated by the guest + * 0x024 W GuestFeaturesSel Set of activated features to set via GuestFeatures + * 0x028 W GuestPageSize Size of guest's memory page in bytes + * + * 0x030 W QueueSel Queue selector + * 0x034 R QueueNumMax Maximum size of the currently selected queue + * 0x038 W QueueNum Queue size for the currently selected queue + * 0x03c W QueueAlign Used Ring alignment for the current queue + * 0x040 RW QueuePFN PFN for the currently selected queue + * + * 0x050 W QueueNotify Queue notifier + * 0x060 R InterruptStatus Interrupt status register + * 0x064 W InterruptACK Interrupt acknowledge register + * 0x070 RW Status Device status register + * + * 0x100+ RW Device-specific configuration space + * + * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#define pr_fmt(fmt) "virtio-mmio: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +/* The alignment to use between consumer and producer parts of vring. + * Currently hardcoded to the page size. */ +#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE + + + +#define to_virtio_mmio_device(_plat_dev) \ + container_of(_plat_dev, struct virtio_mmio_device, vdev) + +struct virtio_mmio_device { + struct virtio_device vdev; + struct platform_device *pdev; + + void __iomem *base; + unsigned long version; + + /* a list of queues so we can dispatch IRQs */ + spinlock_t lock; + struct list_head virtqueues; +}; + +struct virtio_mmio_vq_info { + /* the actual virtqueue */ + struct virtqueue *vq; + + /* the number of entries in the queue */ + unsigned int num; + + /* the virtual address of the ring queue */ + void *queue; + + /* the list node for the virtqueues list */ + struct list_head node; +}; + + + +/* Configuration interface */ + +static u32 vm_get_features(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + /* TODO: Features > 32 bits */ + writel(0, vm_dev->base + VIRTIO_MMIO_HOST_FEATURES_SEL); + + return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES); +} + +static void vm_finalize_features(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + int i; + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + for (i = 0; i < ARRAY_SIZE(vdev->features); i++) { + writel(i, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); + writel(vdev->features[i], + vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); + } +} + +static void vm_get(struct virtio_device *vdev, unsigned offset, + void *buf, unsigned len) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + ptr[i] = readb(vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); +} + +static void vm_set(struct virtio_device *vdev, unsigned offset, + const void *buf, unsigned len) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + const u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + writeb(ptr[i], vm_dev->base + VIRTIO_MMIO_CONFIG + offset + i); +} + +static u8 vm_get_status(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff; +} + +static void vm_set_status(struct virtio_device *vdev, u8 status) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + /* We should never be setting status to 0. */ + BUG_ON(status == 0); + + writel(status, vm_dev->base + VIRTIO_MMIO_STATUS); +} + +static void vm_reset(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + /* 0 status means a reset. */ + writel(0, vm_dev->base + VIRTIO_MMIO_STATUS); +} + + + +/* Transport interface */ + +/* the notify function used when creating a virt queue */ +static void vm_notify(struct virtqueue *vq) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); + + /* We write the queue's selector into the notification register to + * signal the other end */ + writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); +} + +/* Notify all virtqueues on an interrupt. */ +static irqreturn_t vm_interrupt(int irq, void *opaque) +{ + struct virtio_mmio_device *vm_dev = opaque; + struct virtio_mmio_vq_info *info; + struct virtio_driver *vdrv = container_of(vm_dev->vdev.dev.driver, + struct virtio_driver, driver); + unsigned long status; + unsigned long flags; + irqreturn_t ret = IRQ_NONE; + + /* Read and acknowledge interrupts */ + status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS); + writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK); + + if (unlikely(status & VIRTIO_MMIO_INT_CONFIG) + && vdrv && vdrv->config_changed) { + vdrv->config_changed(&vm_dev->vdev); + ret = IRQ_HANDLED; + } + + if (likely(status & VIRTIO_MMIO_INT_VRING)) { + spin_lock_irqsave(&vm_dev->lock, flags); + list_for_each_entry(info, &vm_dev->virtqueues, node) + ret |= vring_interrupt(irq, info->vq); + spin_unlock_irqrestore(&vm_dev->lock, flags); + } + + return ret; +} + + + +static void vm_del_vq(struct virtqueue *vq) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); + struct virtio_mmio_vq_info *info = vq->priv; + unsigned long flags, size; + unsigned int index = vq->index; + + spin_lock_irqsave(&vm_dev->lock, flags); + list_del(&info->node); + spin_unlock_irqrestore(&vm_dev->lock, flags); + + vring_del_virtqueue(vq); + + /* Select and deactivate the queue */ + writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + + size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); + free_pages_exact(info->queue, size); + kfree(info); +} + +static void vm_del_vqs(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + struct virtqueue *vq, *n; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + vm_del_vq(vq); + + free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); +} + + + +static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + struct virtio_mmio_vq_info *info; + struct virtqueue *vq; + unsigned long flags, size; + int err; + + if (!name) + return NULL; + + /* Select the queue we're interested in */ + writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); + + /* Queue shouldn't already be set up. */ + if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) { + err = -ENOENT; + goto error_available; + } + + /* Allocate and fill out our active queue description */ + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + err = -ENOMEM; + goto error_kmalloc; + } + + /* Allocate pages for the queue - start with a queue as big as + * possible (limited by maximum size allowed by device), drop down + * to a minimal size, just big enough to fit descriptor table + * and two rings (which makes it "alignment_size * 2") + */ + info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX); + + /* If the device reports a 0 entry queue, we won't be able to + * use it to perform I/O, and vring_new_virtqueue() can't create + * empty queues anyway, so don't bother to set up the device. + */ + if (info->num == 0) { + err = -ENOENT; + goto error_alloc_pages; + } + + while (1) { + size = PAGE_ALIGN(vring_size(info->num, + VIRTIO_MMIO_VRING_ALIGN)); + /* Did the last iter shrink the queue below minimum size? */ + if (size < VIRTIO_MMIO_VRING_ALIGN * 2) { + err = -ENOMEM; + goto error_alloc_pages; + } + + info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); + if (info->queue) + break; + + info->num /= 2; + } + + /* Activate the queue */ + writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); + writel(VIRTIO_MMIO_VRING_ALIGN, + vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); + writel(virt_to_phys(info->queue) >> PAGE_SHIFT, + vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + + /* Create the vring */ + vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev, + true, info->queue, vm_notify, callback, name); + if (!vq) { + err = -ENOMEM; + goto error_new_virtqueue; + } + + vq->priv = info; + info->vq = vq; + + spin_lock_irqsave(&vm_dev->lock, flags); + list_add(&info->node, &vm_dev->virtqueues); + spin_unlock_irqrestore(&vm_dev->lock, flags); + + return vq; + +error_new_virtqueue: + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + free_pages_exact(info->queue, size); +error_alloc_pages: + kfree(info); +error_kmalloc: +error_available: + return ERR_PTR(err); +} + +static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + unsigned int irq = platform_get_irq(vm_dev->pdev, 0); + int i, err; + + err = request_irq(irq, vm_interrupt, IRQF_SHARED, + dev_name(&vdev->dev), vm_dev); + if (err) + return err; + + for (i = 0; i < nvqs; ++i) { + vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); + if (IS_ERR(vqs[i])) { + vm_del_vqs(vdev); + return PTR_ERR(vqs[i]); + } + } + + return 0; +} + +static const char *vm_bus_name(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + return vm_dev->pdev->name; +} + +static const struct virtio_config_ops virtio_mmio_config_ops = { + .get = vm_get, + .set = vm_set, + .get_status = vm_get_status, + .set_status = vm_set_status, + .reset = vm_reset, + .find_vqs = vm_find_vqs, + .del_vqs = vm_del_vqs, + .get_features = vm_get_features, + .finalize_features = vm_finalize_features, + .bus_name = vm_bus_name, +}; + + + +/* Platform device */ + +static int virtio_mmio_probe(struct platform_device *pdev) +{ + struct virtio_mmio_device *vm_dev; + struct resource *mem; + unsigned long magic; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -EINVAL; + + if (!devm_request_mem_region(&pdev->dev, mem->start, + resource_size(mem), pdev->name)) + return -EBUSY; + + vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); + if (!vm_dev) + return -ENOMEM; + + vm_dev->vdev.dev.parent = &pdev->dev; + vm_dev->vdev.config = &virtio_mmio_config_ops; + vm_dev->pdev = pdev; + INIT_LIST_HEAD(&vm_dev->virtqueues); + spin_lock_init(&vm_dev->lock); + + vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); + if (vm_dev->base == NULL) + return -EFAULT; + + /* Check magic value */ + magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); + if (memcmp(&magic, "virt", 4) != 0) { + dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); + return -ENODEV; + } + + /* Check device version */ + vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION); + if (vm_dev->version != 1) { + dev_err(&pdev->dev, "Version %ld not supported!\n", + vm_dev->version); + return -ENXIO; + } + + vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); + vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); + + writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); + + platform_set_drvdata(pdev, vm_dev); + + return register_virtio_device(&vm_dev->vdev); +} + +static int virtio_mmio_remove(struct platform_device *pdev) +{ + struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); + + unregister_virtio_device(&vm_dev->vdev); + + return 0; +} + + + +/* Devices list parameter */ + +#if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES) + +static struct device vm_cmdline_parent = { + .init_name = "virtio-mmio-cmdline", +}; + +static int vm_cmdline_parent_registered; +static int vm_cmdline_id; + +static int vm_cmdline_set(const char *device, + const struct kernel_param *kp) +{ + int err; + struct resource resources[2] = {}; + char *str; + long long int base, size; + unsigned int irq; + int processed, consumed = 0; + struct platform_device *pdev; + + /* Consume "size" part of the command line parameter */ + size = memparse(device, &str); + + /* Get "@:[:]" chunks */ + processed = sscanf(str, "@%lli:%u%n:%d%n", + &base, &irq, &consumed, + &vm_cmdline_id, &consumed); + + /* + * sscanf() must processes at least 2 chunks; also there + * must be no extra characters after the last chunk, so + * str[consumed] must be '\0' + */ + if (processed < 2 || str[consumed]) + return -EINVAL; + + resources[0].flags = IORESOURCE_MEM; + resources[0].start = base; + resources[0].end = base + size - 1; + + resources[1].flags = IORESOURCE_IRQ; + resources[1].start = resources[1].end = irq; + + if (!vm_cmdline_parent_registered) { + err = device_register(&vm_cmdline_parent); + if (err) { + pr_err("Failed to register parent device!\n"); + return err; + } + vm_cmdline_parent_registered = 1; + } + + pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n", + vm_cmdline_id, + (unsigned long long)resources[0].start, + (unsigned long long)resources[0].end, + (int)resources[1].start); + + pdev = platform_device_register_resndata(&vm_cmdline_parent, + "virtio-mmio", vm_cmdline_id++, + resources, ARRAY_SIZE(resources), NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + return 0; +} + +static int vm_cmdline_get_device(struct device *dev, void *data) +{ + char *buffer = data; + unsigned int len = strlen(buffer); + struct platform_device *pdev = to_platform_device(dev); + + snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n", + pdev->resource[0].end - pdev->resource[0].start + 1ULL, + (unsigned long long)pdev->resource[0].start, + (unsigned long long)pdev->resource[1].start, + pdev->id); + return 0; +} + +static int vm_cmdline_get(char *buffer, const struct kernel_param *kp) +{ + buffer[0] = '\0'; + device_for_each_child(&vm_cmdline_parent, buffer, + vm_cmdline_get_device); + return strlen(buffer) + 1; +} + +static struct kernel_param_ops vm_cmdline_param_ops = { + .set = vm_cmdline_set, + .get = vm_cmdline_get, +}; + +device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR); + +static int vm_unregister_cmdline_device(struct device *dev, + void *data) +{ + platform_device_unregister(to_platform_device(dev)); + + return 0; +} + +static void vm_unregister_cmdline_devices(void) +{ + if (vm_cmdline_parent_registered) { + device_for_each_child(&vm_cmdline_parent, NULL, + vm_unregister_cmdline_device); + device_unregister(&vm_cmdline_parent); + vm_cmdline_parent_registered = 0; + } +} + +#else + +static void vm_unregister_cmdline_devices(void) +{ +} + +#endif + +/* Platform driver */ + +static struct of_device_id virtio_mmio_match[] = { + { .compatible = "virtio,mmio", }, + {}, +}; +MODULE_DEVICE_TABLE(of, virtio_mmio_match); + +static struct platform_driver virtio_mmio_driver = { + .probe = virtio_mmio_probe, + .remove = virtio_mmio_remove, + .driver = { + .name = "virtio-mmio", + .owner = THIS_MODULE, + .of_match_table = virtio_mmio_match, + }, +}; + +static int __init virtio_mmio_init(void) +{ + return platform_driver_register(&virtio_mmio_driver); +} + +static void __exit virtio_mmio_exit(void) +{ + platform_driver_unregister(&virtio_mmio_driver); + vm_unregister_cmdline_devices(); +} + +module_init(virtio_mmio_init); +module_exit(virtio_mmio_exit); + +MODULE_AUTHOR("Pawel Moll "); +MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices"); +MODULE_LICENSE("GPL"); diff --git a/addons/virtio/src/3.10.108/virtio_net.c b/addons/virtio/src/3.10.108/virtio_net.c new file mode 100644 index 00000000..b5d11529 --- /dev/null +++ b/addons/virtio/src/3.10.108/virtio_net.c @@ -0,0 +1,1812 @@ +/* A network driver using virtio. + * + * Copyright 2007 Rusty Russell IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ +//#define DEBUG +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int napi_weight = NAPI_POLL_WEIGHT; +module_param(napi_weight, int, 0444); + +static bool csum = true, gso = true; +module_param(csum, bool, 0444); +module_param(gso, bool, 0444); + +/* FIXME: MTU in config. */ +#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) +#define GOOD_COPY_LEN 128 + +#define VIRTNET_DRIVER_VERSION "1.0.0" + +struct virtnet_stats { + struct u64_stats_sync tx_syncp; + struct u64_stats_sync rx_syncp; + u64 tx_bytes; + u64 tx_packets; + + u64 rx_bytes; + u64 rx_packets; +}; + +/* Internal representation of a send virtqueue */ +struct send_queue { + /* Virtqueue associated with this send _queue */ + struct virtqueue *vq; + + /* TX: fragments + linear part + virtio header */ + struct scatterlist sg[MAX_SKB_FRAGS + 2]; + + /* Name of the send queue: output.$index */ + char name[40]; +}; + +/* Internal representation of a receive virtqueue */ +struct receive_queue { + /* Virtqueue associated with this receive_queue */ + struct virtqueue *vq; + + struct napi_struct napi; + + /* Number of input buffers, and max we've ever had. */ + unsigned int num, max; + + /* Chain pages by the private ptr. */ + struct page *pages; + + /* RX: fragments + linear part + virtio header */ + struct scatterlist sg[MAX_SKB_FRAGS + 2]; + + /* Name of this receive queue: input.$index */ + char name[40]; +}; + +struct virtnet_info { + struct virtio_device *vdev; + struct virtqueue *cvq; + struct net_device *dev; + struct send_queue *sq; + struct receive_queue *rq; + unsigned int status; + + /* Max # of queue pairs supported by the device */ + u16 max_queue_pairs; + + /* # of queue pairs currently used by the driver */ + u16 curr_queue_pairs; + + /* I like... big packets and I cannot lie! */ + bool big_packets; + + /* Host will merge rx buffers for big packets (shake it! shake it!) */ + bool mergeable_rx_bufs; + + /* Has control virtqueue */ + bool has_cvq; + + /* enable config space updates */ + bool config_enable; + + /* Active statistics */ + struct virtnet_stats __percpu *stats; + + /* Work struct for refilling if we run low on memory. */ + struct delayed_work refill; + + /* Work struct for config space updates */ + struct work_struct config_work; + + /* Lock for config space updates */ + struct mutex config_lock; + + /* Does the affinity hint is set for virtqueues? */ + bool affinity_hint_set; + + /* Per-cpu variable to show the mapping from CPU to virtqueue */ + int __percpu *vq_index; + + /* CPU hot plug notifier */ + struct notifier_block nb; +}; + +struct skb_vnet_hdr { + union { + struct virtio_net_hdr hdr; + struct virtio_net_hdr_mrg_rxbuf mhdr; + }; +}; + +struct padded_vnet_hdr { + struct virtio_net_hdr hdr; + /* + * virtio_net_hdr should be in a separated sg buffer because of a + * QEMU bug, and data sg buffer shares same page with this header sg. + * This padding makes next sg 16 byte aligned after virtio_net_hdr. + */ + char padding[6]; +}; + +/* Converting between virtqueue no. and kernel tx/rx queue no. + * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq + */ +static int vq2txq(struct virtqueue *vq) +{ + return (vq->index - 1) / 2; +} + +static int txq2vq(int txq) +{ + return txq * 2 + 1; +} + +static int vq2rxq(struct virtqueue *vq) +{ + return vq->index / 2; +} + +static int rxq2vq(int rxq) +{ + return rxq * 2; +} + +static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb) +{ + return (struct skb_vnet_hdr *)skb->cb; +} + +/* + * private is used to chain pages for big packets, put the whole + * most recent used list in the beginning for reuse + */ +static void give_pages(struct receive_queue *rq, struct page *page) +{ + struct page *end; + + /* Find end of list, sew whole thing into vi->rq.pages. */ + for (end = page; end->private; end = (struct page *)end->private); + end->private = (unsigned long)rq->pages; + rq->pages = page; +} + +static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) +{ + struct page *p = rq->pages; + + if (p) { + rq->pages = (struct page *)p->private; + /* clear private here, it is used to chain pages */ + p->private = 0; + } else + p = alloc_page(gfp_mask); + return p; +} + +static void skb_xmit_done(struct virtqueue *vq) +{ + struct virtnet_info *vi = vq->vdev->priv; + + /* Suppress further interrupts. */ + virtqueue_disable_cb(vq); + + /* We were probably waiting for more output buffers. */ + netif_wake_subqueue(vi->dev, vq2txq(vq)); +} + +static void set_skb_frag(struct sk_buff *skb, struct page *page, + unsigned int offset, unsigned int *len) +{ + int size = min((unsigned)PAGE_SIZE - offset, *len); + int i = skb_shinfo(skb)->nr_frags; + + __skb_fill_page_desc(skb, i, page, offset, size); + + skb->data_len += size; + skb->len += size; + skb->truesize += PAGE_SIZE; + skb_shinfo(skb)->nr_frags++; + skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; + *len -= size; +} + +/* Called from bottom half context */ +static struct sk_buff *page_to_skb(struct receive_queue *rq, + struct page *page, unsigned int len) +{ + struct virtnet_info *vi = rq->vq->vdev->priv; + struct sk_buff *skb; + struct skb_vnet_hdr *hdr; + unsigned int copy, hdr_len, offset; + char *p; + + p = page_address(page); + + /* copy small packet so we can reuse these pages for small data */ + skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); + if (unlikely(!skb)) + return NULL; + + hdr = skb_vnet_hdr(skb); + + if (vi->mergeable_rx_bufs) { + hdr_len = sizeof hdr->mhdr; + offset = hdr_len; + } else { + hdr_len = sizeof hdr->hdr; + offset = sizeof(struct padded_vnet_hdr); + } + + memcpy(hdr, p, hdr_len); + + len -= hdr_len; + p += offset; + + copy = len; + if (copy > skb_tailroom(skb)) + copy = skb_tailroom(skb); + memcpy(skb_put(skb, copy), p, copy); + + len -= copy; + offset += copy; + + /* + * Verify that we can indeed put this data into a skb. + * This is here to handle cases when the device erroneously + * tries to receive more than is possible. This is usually + * the case of a broken device. + */ + if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { + net_dbg_ratelimited("%s: too much data\n", skb->dev->name); + dev_kfree_skb(skb); + return NULL; + } + + while (len) { + set_skb_frag(skb, page, offset, &len); + page = (struct page *)page->private; + offset = 0; + } + + if (page) + give_pages(rq, page); + + return skb; +} + +static struct sk_buff *receive_small(void *buf, unsigned int len) +{ + struct sk_buff * skb = buf; + + len -= sizeof(struct virtio_net_hdr); + skb_trim(skb, len); + + return skb; +} + +static struct sk_buff *receive_big(struct net_device *dev, + struct receive_queue *rq, + void *buf) +{ + struct page *page = buf; + struct sk_buff *skb = page_to_skb(rq, page, 0); + + if (unlikely(!skb)) + goto err; + + return skb; + +err: + dev->stats.rx_dropped++; + give_pages(rq, page); + return NULL; +} + +static struct sk_buff *receive_mergeable(struct net_device *dev, + struct receive_queue *rq, + void *buf, + unsigned int len) +{ + struct skb_vnet_hdr *hdr = page_address(buf); + int num_buf = hdr->mhdr.num_buffers; + struct page *page = buf; + struct sk_buff *skb = page_to_skb(rq, page, len); + int i; + + if (unlikely(!skb)) + goto err_skb; + + while (--num_buf) { + i = skb_shinfo(skb)->nr_frags; + if (i >= MAX_SKB_FRAGS) { + pr_debug("%s: packet too long\n", skb->dev->name); + skb->dev->stats.rx_length_errors++; + goto err_frags; + } + page = virtqueue_get_buf(rq->vq, &len); + if (!page) { + pr_debug("%s: rx error: %d buffers %d missing\n", + dev->name, hdr->mhdr.num_buffers, num_buf); + dev->stats.rx_length_errors++; + goto err_buf; + } + + if (len > PAGE_SIZE) + len = PAGE_SIZE; + + set_skb_frag(skb, page, 0, &len); + + --rq->num; + } + return skb; +err_skb: + give_pages(rq, page); + while (--num_buf) { +err_frags: + buf = virtqueue_get_buf(rq->vq, &len); + if (unlikely(!buf)) { + pr_debug("%s: rx error: %d buffers missing\n", + dev->name, num_buf); + dev->stats.rx_length_errors++; + break; + } + page = buf; + give_pages(rq, page); + --rq->num; + } +err_buf: + dev->stats.rx_dropped++; + dev_kfree_skb(skb); + return NULL; +} + +static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) +{ + struct virtnet_info *vi = rq->vq->vdev->priv; + struct net_device *dev = vi->dev; + struct virtnet_stats *stats = this_cpu_ptr(vi->stats); + struct sk_buff *skb; + struct skb_vnet_hdr *hdr; + + if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { + pr_debug("%s: short packet %i\n", dev->name, len); + dev->stats.rx_length_errors++; + if (vi->mergeable_rx_bufs || vi->big_packets) + give_pages(rq, buf); + else + dev_kfree_skb(buf); + return; + } + if (vi->mergeable_rx_bufs) + skb = receive_mergeable(dev, rq, buf, len); + else if (vi->big_packets) + skb = receive_big(dev, rq, buf); + else + skb = receive_small(buf, len); + + if (unlikely(!skb)) + return; + + hdr = skb_vnet_hdr(skb); + + u64_stats_update_begin(&stats->rx_syncp); + stats->rx_bytes += skb->len; + stats->rx_packets++; + u64_stats_update_end(&stats->rx_syncp); + + if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { + pr_debug("Needs csum!\n"); + if (!skb_partial_csum_set(skb, + hdr->hdr.csum_start, + hdr->hdr.csum_offset)) + goto frame_err; + } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + skb->protocol = eth_type_trans(skb, dev); + pr_debug("Receiving skb proto 0x%04x len %i type %i\n", + ntohs(skb->protocol), skb->len, skb->pkt_type); + + if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { + pr_debug("GSO!\n"); + switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { + case VIRTIO_NET_HDR_GSO_TCPV4: + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + break; + case VIRTIO_NET_HDR_GSO_UDP: + skb_shinfo(skb)->gso_type = SKB_GSO_UDP; + break; + case VIRTIO_NET_HDR_GSO_TCPV6: + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + break; + default: + net_warn_ratelimited("%s: bad gso type %u.\n", + dev->name, hdr->hdr.gso_type); + goto frame_err; + } + + if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; + + skb_shinfo(skb)->gso_size = hdr->hdr.gso_size; + if (skb_shinfo(skb)->gso_size == 0) { + net_warn_ratelimited("%s: zero gso size.\n", dev->name); + goto frame_err; + } + + /* Header must be checked, and gso_segs computed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; + } + + netif_receive_skb(skb); + return; + +frame_err: + dev->stats.rx_frame_errors++; + dev_kfree_skb(skb); +} + +static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) +{ + struct virtnet_info *vi = rq->vq->vdev->priv; + struct sk_buff *skb; + struct skb_vnet_hdr *hdr; + int err; + + skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp); + if (unlikely(!skb)) + return -ENOMEM; + + skb_put(skb, MAX_PACKET_LEN); + + hdr = skb_vnet_hdr(skb); + sg_set_buf(rq->sg, &hdr->hdr, sizeof hdr->hdr); + + skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); + + err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); + if (err < 0) + dev_kfree_skb(skb); + + return err; +} + +static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp) +{ + struct page *first, *list = NULL; + char *p; + int i, err, offset; + + /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ + for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { + first = get_a_page(rq, gfp); + if (!first) { + if (list) + give_pages(rq, list); + return -ENOMEM; + } + sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); + + /* chain new page in list head to match sg */ + first->private = (unsigned long)list; + list = first; + } + + first = get_a_page(rq, gfp); + if (!first) { + give_pages(rq, list); + return -ENOMEM; + } + p = page_address(first); + + /* rq->sg[0], rq->sg[1] share the same page */ + /* a separated rq->sg[0] for virtio_net_hdr only due to QEMU bug */ + sg_set_buf(&rq->sg[0], p, sizeof(struct virtio_net_hdr)); + + /* rq->sg[1] for data packet, from offset */ + offset = sizeof(struct padded_vnet_hdr); + sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); + + /* chain first in list head */ + first->private = (unsigned long)list; + err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, + first, gfp); + if (err < 0) + give_pages(rq, first); + + return err; +} + +static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) +{ + struct page *page; + int err; + + page = get_a_page(rq, gfp); + if (!page) + return -ENOMEM; + + sg_init_one(rq->sg, page_address(page), PAGE_SIZE); + + err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, page, gfp); + if (err < 0) + give_pages(rq, page); + + return err; +} + +/* + * Returns false if we couldn't fill entirely (OOM). + * + * Normally run in the receive path, but can also be run from ndo_open + * before we're receiving packets, or from refill_work which is + * careful to disable receiving (using napi_disable). + */ +static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) +{ + struct virtnet_info *vi = rq->vq->vdev->priv; + int err; + bool oom; + + do { + if (vi->mergeable_rx_bufs) + err = add_recvbuf_mergeable(rq, gfp); + else if (vi->big_packets) + err = add_recvbuf_big(rq, gfp); + else + err = add_recvbuf_small(rq, gfp); + + oom = err == -ENOMEM; + if (err) + break; + ++rq->num; + } while (rq->vq->num_free); + if (unlikely(rq->num > rq->max)) + rq->max = rq->num; + virtqueue_kick(rq->vq); + return !oom; +} + +static void skb_recv_done(struct virtqueue *rvq) +{ + struct virtnet_info *vi = rvq->vdev->priv; + struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; + + /* Schedule NAPI, Suppress further interrupts if successful. */ + if (napi_schedule_prep(&rq->napi)) { + virtqueue_disable_cb(rvq); + __napi_schedule(&rq->napi); + } +} + +static void virtnet_napi_enable(struct receive_queue *rq) +{ + napi_enable(&rq->napi); + + /* If all buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet_poll wants re-enable the queue, so we disable here. + * We synchronize against interrupts via NAPI_STATE_SCHED */ + if (napi_schedule_prep(&rq->napi)) { + virtqueue_disable_cb(rq->vq); + local_bh_disable(); + __napi_schedule(&rq->napi); + local_bh_enable(); + } +} + +static void refill_work(struct work_struct *work) +{ + struct virtnet_info *vi = + container_of(work, struct virtnet_info, refill.work); + bool still_empty; + int i; + + for (i = 0; i < vi->curr_queue_pairs; i++) { + struct receive_queue *rq = &vi->rq[i]; + + napi_disable(&rq->napi); + still_empty = !try_fill_recv(rq, GFP_KERNEL); + virtnet_napi_enable(rq); + + /* In theory, this can happen: if we don't get any buffers in + * we will *never* try to fill again. + */ + if (still_empty) + schedule_delayed_work(&vi->refill, HZ/2); + } +} + +static int virtnet_poll(struct napi_struct *napi, int budget) +{ + struct receive_queue *rq = + container_of(napi, struct receive_queue, napi); + struct virtnet_info *vi = rq->vq->vdev->priv; + void *buf; + unsigned int r, len, received = 0; + +again: + while (received < budget && + (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { + receive_buf(rq, buf, len); + --rq->num; + received++; + } + + if (rq->num < rq->max / 2) { + if (!try_fill_recv(rq, GFP_ATOMIC)) + schedule_delayed_work(&vi->refill, 0); + } + + /* Out of packets? */ + if (received < budget) { + r = virtqueue_enable_cb_prepare(rq->vq); + napi_complete(napi); + if (unlikely(virtqueue_poll(rq->vq, r)) && + napi_schedule_prep(napi)) { + virtqueue_disable_cb(rq->vq); + __napi_schedule(napi); + goto again; + } + } + + return received; +} + +static int virtnet_open(struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + int i; + + for (i = 0; i < vi->max_queue_pairs; i++) { + if (i < vi->curr_queue_pairs) + /* Make sure we have some buffers: if oom use wq. */ + if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) + schedule_delayed_work(&vi->refill, 0); + virtnet_napi_enable(&vi->rq[i]); + } + + return 0; +} + +static void free_old_xmit_skbs(struct send_queue *sq) +{ + struct sk_buff *skb; + unsigned int len; + struct virtnet_info *vi = sq->vq->vdev->priv; + struct virtnet_stats *stats = this_cpu_ptr(vi->stats); + + while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { + pr_debug("Sent skb %p\n", skb); + + u64_stats_update_begin(&stats->tx_syncp); + stats->tx_bytes += skb->len; + stats->tx_packets++; + u64_stats_update_end(&stats->tx_syncp); + + dev_kfree_skb_any(skb); + } +} + +static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) +{ + struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); + const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; + struct virtnet_info *vi = sq->vq->vdev->priv; + unsigned num_sg; + + pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + hdr->hdr.csum_start = skb_checksum_start_offset(skb); + hdr->hdr.csum_offset = skb->csum_offset; + } else { + hdr->hdr.flags = 0; + hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; + } + + if (skb_is_gso(skb)) { + hdr->hdr.hdr_len = skb_headlen(skb); + hdr->hdr.gso_size = skb_shinfo(skb)->gso_size; + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; + else + BUG(); + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) + hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; + } else { + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; + hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; + } + + hdr->mhdr.num_buffers = 0; + + /* Encode metadata header at front. */ + if (vi->mergeable_rx_bufs) + sg_set_buf(sq->sg, &hdr->mhdr, sizeof hdr->mhdr); + else + sg_set_buf(sq->sg, &hdr->hdr, sizeof hdr->hdr); + + num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1; + return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); +} + +static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + int qnum = skb_get_queue_mapping(skb); + struct send_queue *sq = &vi->sq[qnum]; + int err; + + /* Free up any pending old buffers before queueing new ones. */ + free_old_xmit_skbs(sq); + + /* Try to transmit */ + err = xmit_skb(sq, skb); + + /* This should not happen! */ + if (unlikely(err)) { + dev->stats.tx_fifo_errors++; + if (net_ratelimit()) + dev_warn(&dev->dev, + "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); + dev->stats.tx_dropped++; + kfree_skb(skb); + return NETDEV_TX_OK; + } + virtqueue_kick(sq->vq); + + /* Don't wait up for transmitted skbs to be freed. */ + skb_orphan(skb); + nf_reset(skb); + + /* Apparently nice girls don't return TX_BUSY; stop the queue + * before it gets out of hand. Naturally, this wastes entries. */ + if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { + netif_stop_subqueue(dev, qnum); + if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { + /* More just got used, free them then recheck. */ + free_old_xmit_skbs(sq); + if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { + netif_start_subqueue(dev, qnum); + virtqueue_disable_cb(sq->vq); + } + } + } + + return NETDEV_TX_OK; +} + +/* + * Send command via the control virtqueue and check status. Commands + * supported by the hypervisor, as indicated by feature bits, should + * never fail unless improperly formated. + */ +static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, + struct scatterlist *out, + struct scatterlist *in) +{ + struct scatterlist *sgs[4], hdr, stat; + struct virtio_net_ctrl_hdr ctrl; + virtio_net_ctrl_ack status = ~0; + unsigned out_num = 0, in_num = 0, tmp; + + /* Caller should know better */ + BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); + + ctrl.class = class; + ctrl.cmd = cmd; + /* Add header */ + sg_init_one(&hdr, &ctrl, sizeof(ctrl)); + sgs[out_num++] = &hdr; + + if (out) + sgs[out_num++] = out; + if (in) + sgs[out_num + in_num++] = in; + + /* Add return status. */ + sg_init_one(&stat, &status, sizeof(status)); + sgs[out_num + in_num++] = &stat; + + BUG_ON(out_num + in_num > ARRAY_SIZE(sgs)); + BUG_ON(virtqueue_add_sgs(vi->cvq, sgs, out_num, in_num, vi, GFP_ATOMIC) + < 0); + + virtqueue_kick(vi->cvq); + + /* Spin for a response, the kick causes an ioport write, trapping + * into the hypervisor, so the request should be handled immediately. + */ + while (!virtqueue_get_buf(vi->cvq, &tmp)) + cpu_relax(); + + return status == VIRTIO_NET_OK; +} + +static int virtnet_set_mac_address(struct net_device *dev, void *p) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct virtio_device *vdev = vi->vdev; + int ret; + struct sockaddr *addr = p; + struct scatterlist sg; + + ret = eth_prepare_mac_addr_change(dev, p); + if (ret) + return ret; + + if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { + sg_init_one(&sg, addr->sa_data, dev->addr_len); + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, + VIRTIO_NET_CTRL_MAC_ADDR_SET, + &sg, NULL)) { + dev_warn(&vdev->dev, + "Failed to set mac address by vq command.\n"); + return -EINVAL; + } + } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { + vdev->config->set(vdev, offsetof(struct virtio_net_config, mac), + addr->sa_data, dev->addr_len); + } + + eth_commit_mac_addr_change(dev, p); + + return 0; +} + +static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, + struct rtnl_link_stats64 *tot) +{ + struct virtnet_info *vi = netdev_priv(dev); + int cpu; + unsigned int start; + + for_each_possible_cpu(cpu) { + struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); + u64 tpackets, tbytes, rpackets, rbytes; + + do { + start = u64_stats_fetch_begin_bh(&stats->tx_syncp); + tpackets = stats->tx_packets; + tbytes = stats->tx_bytes; + } while (u64_stats_fetch_retry_bh(&stats->tx_syncp, start)); + + do { + start = u64_stats_fetch_begin_bh(&stats->rx_syncp); + rpackets = stats->rx_packets; + rbytes = stats->rx_bytes; + } while (u64_stats_fetch_retry_bh(&stats->rx_syncp, start)); + + tot->rx_packets += rpackets; + tot->tx_packets += tpackets; + tot->rx_bytes += rbytes; + tot->tx_bytes += tbytes; + } + + tot->tx_dropped = dev->stats.tx_dropped; + tot->tx_fifo_errors = dev->stats.tx_fifo_errors; + tot->rx_dropped = dev->stats.rx_dropped; + tot->rx_length_errors = dev->stats.rx_length_errors; + tot->rx_frame_errors = dev->stats.rx_frame_errors; + + return tot; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void virtnet_netpoll(struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + int i; + + for (i = 0; i < vi->curr_queue_pairs; i++) + napi_schedule(&vi->rq[i].napi); +} +#endif + +static void virtnet_ack_link_announce(struct virtnet_info *vi) +{ + rtnl_lock(); + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, + VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL, NULL)) + dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); + rtnl_unlock(); +} + +static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) +{ + struct scatterlist sg; + struct virtio_net_ctrl_mq s; + struct net_device *dev = vi->dev; + + if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) + return 0; + + s.virtqueue_pairs = queue_pairs; + sg_init_one(&sg, &s, sizeof(s)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, + VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg, NULL)) { + dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", + queue_pairs); + return -EINVAL; + } else { + vi->curr_queue_pairs = queue_pairs; + /* virtnet_open() will refill when device is going to up. */ + if (dev->flags & IFF_UP) + schedule_delayed_work(&vi->refill, 0); + } + + return 0; +} + +static int virtnet_close(struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + int i; + + /* Make sure refill_work doesn't re-enable napi! */ + cancel_delayed_work_sync(&vi->refill); + + for (i = 0; i < vi->max_queue_pairs; i++) + napi_disable(&vi->rq[i].napi); + + return 0; +} + +static void virtnet_set_rx_mode(struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct scatterlist sg[2]; + u8 promisc, allmulti; + struct virtio_net_ctrl_mac *mac_data; + struct netdev_hw_addr *ha; + int uc_count; + int mc_count; + void *buf; + int i; + + /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */ + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) + return; + + promisc = ((dev->flags & IFF_PROMISC) != 0); + allmulti = ((dev->flags & IFF_ALLMULTI) != 0); + + sg_init_one(sg, &promisc, sizeof(promisc)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, + VIRTIO_NET_CTRL_RX_PROMISC, + sg, NULL)) + dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", + promisc ? "en" : "dis"); + + sg_init_one(sg, &allmulti, sizeof(allmulti)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, + VIRTIO_NET_CTRL_RX_ALLMULTI, + sg, NULL)) + dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", + allmulti ? "en" : "dis"); + + uc_count = netdev_uc_count(dev); + mc_count = netdev_mc_count(dev); + /* MAC filter - use one buffer for both lists */ + buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + + (2 * sizeof(mac_data->entries)), GFP_ATOMIC); + mac_data = buf; + if (!buf) + return; + + sg_init_table(sg, 2); + + /* Store the unicast list and count in the front of the buffer */ + mac_data->entries = uc_count; + i = 0; + netdev_for_each_uc_addr(ha, dev) + memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); + + sg_set_buf(&sg[0], mac_data, + sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); + + /* multicast list and count fill the end */ + mac_data = (void *)&mac_data->macs[uc_count][0]; + + mac_data->entries = mc_count; + i = 0; + netdev_for_each_mc_addr(ha, dev) + memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); + + sg_set_buf(&sg[1], mac_data, + sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, + VIRTIO_NET_CTRL_MAC_TABLE_SET, + sg, NULL)) + dev_warn(&dev->dev, "Failed to set MAC fitler table.\n"); + + kfree(buf); +} + +static int virtnet_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct scatterlist sg; + + sg_init_one(&sg, &vid, sizeof(vid)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, + VIRTIO_NET_CTRL_VLAN_ADD, &sg, NULL)) + dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); + return 0; +} + +static int virtnet_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct scatterlist sg; + + sg_init_one(&sg, &vid, sizeof(vid)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, + VIRTIO_NET_CTRL_VLAN_DEL, &sg, NULL)) + dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); + return 0; +} + +static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) +{ + int i; + int cpu; + + if (vi->affinity_hint_set) { + for (i = 0; i < vi->max_queue_pairs; i++) { + virtqueue_set_affinity(vi->rq[i].vq, -1); + virtqueue_set_affinity(vi->sq[i].vq, -1); + } + + vi->affinity_hint_set = false; + } + + i = 0; + for_each_online_cpu(cpu) { + if (cpu == hcpu) { + *per_cpu_ptr(vi->vq_index, cpu) = -1; + } else { + *per_cpu_ptr(vi->vq_index, cpu) = + ++i % vi->curr_queue_pairs; + } + } +} + +static void virtnet_set_affinity(struct virtnet_info *vi) +{ + int i; + int cpu; + + /* In multiqueue mode, when the number of cpu is equal to the number of + * queue pairs, we let the queue pairs to be private to one cpu by + * setting the affinity hint to eliminate the contention. + */ + if (vi->curr_queue_pairs == 1 || + vi->max_queue_pairs != num_online_cpus()) { + virtnet_clean_affinity(vi, -1); + return; + } + + i = 0; + for_each_online_cpu(cpu) { + virtqueue_set_affinity(vi->rq[i].vq, cpu); + virtqueue_set_affinity(vi->sq[i].vq, cpu); + *per_cpu_ptr(vi->vq_index, cpu) = i; + i++; + } + + vi->affinity_hint_set = true; +} + +static int virtnet_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); + + switch(action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + case CPU_DOWN_FAILED: + case CPU_DEAD: + virtnet_set_affinity(vi); + break; + case CPU_DOWN_PREPARE: + virtnet_clean_affinity(vi, (long)hcpu); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static void virtnet_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct virtnet_info *vi = netdev_priv(dev); + + ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); + ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); + ring->rx_pending = ring->rx_max_pending; + ring->tx_pending = ring->tx_max_pending; +} + + +static void virtnet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct virtio_device *vdev = vi->vdev; + + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); + +} + +/* TODO: Eliminate OOO packets during switching */ +static int virtnet_set_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct virtnet_info *vi = netdev_priv(dev); + u16 queue_pairs = channels->combined_count; + int err; + + /* We don't support separate rx/tx channels. + * We don't allow setting 'other' channels. + */ + if (channels->rx_count || channels->tx_count || channels->other_count) + return -EINVAL; + + if (queue_pairs > vi->max_queue_pairs) + return -EINVAL; + + get_online_cpus(); + err = virtnet_set_queues(vi, queue_pairs); + if (!err) { + netif_set_real_num_tx_queues(dev, queue_pairs); + netif_set_real_num_rx_queues(dev, queue_pairs); + + virtnet_set_affinity(vi); + } + put_online_cpus(); + + return err; +} + +static void virtnet_get_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct virtnet_info *vi = netdev_priv(dev); + + channels->combined_count = vi->curr_queue_pairs; + channels->max_combined = vi->max_queue_pairs; + channels->max_other = 0; + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; +} + +static const struct ethtool_ops virtnet_ethtool_ops = { + .get_drvinfo = virtnet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = virtnet_get_ringparam, + .set_channels = virtnet_set_channels, + .get_channels = virtnet_get_channels, +}; + +#define MIN_MTU 68 +#define MAX_MTU 65535 + +static int virtnet_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + +/* To avoid contending a lock hold by a vcpu who would exit to host, select the + * txq based on the processor id. + */ +static u16 virtnet_select_queue(struct net_device *dev, struct sk_buff *skb) +{ + int txq; + struct virtnet_info *vi = netdev_priv(dev); + + if (skb_rx_queue_recorded(skb)) { + txq = skb_get_rx_queue(skb); + } else { + txq = *__this_cpu_ptr(vi->vq_index); + if (txq == -1) + txq = 0; + } + + while (unlikely(txq >= dev->real_num_tx_queues)) + txq -= dev->real_num_tx_queues; + + return txq; +} + +static const struct net_device_ops virtnet_netdev = { + .ndo_open = virtnet_open, + .ndo_stop = virtnet_close, + .ndo_start_xmit = start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = virtnet_set_mac_address, + .ndo_set_rx_mode = virtnet_set_rx_mode, + .ndo_change_mtu = virtnet_change_mtu, + .ndo_get_stats64 = virtnet_stats, + .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, + .ndo_select_queue = virtnet_select_queue, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = virtnet_netpoll, +#endif +}; + +static void virtnet_config_changed_work(struct work_struct *work) +{ + struct virtnet_info *vi = + container_of(work, struct virtnet_info, config_work); + u16 v; + + mutex_lock(&vi->config_lock); + if (!vi->config_enable) + goto done; + + if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS, + offsetof(struct virtio_net_config, status), + &v) < 0) + goto done; + + if (v & VIRTIO_NET_S_ANNOUNCE) { + netdev_notify_peers(vi->dev); + virtnet_ack_link_announce(vi); + } + + /* Ignore unknown (future) status bits */ + v &= VIRTIO_NET_S_LINK_UP; + + if (vi->status == v) + goto done; + + vi->status = v; + + if (vi->status & VIRTIO_NET_S_LINK_UP) { + netif_carrier_on(vi->dev); + netif_tx_wake_all_queues(vi->dev); + } else { + netif_carrier_off(vi->dev); + netif_tx_stop_all_queues(vi->dev); + } +done: + mutex_unlock(&vi->config_lock); +} + +static void virtnet_config_changed(struct virtio_device *vdev) +{ + struct virtnet_info *vi = vdev->priv; + + schedule_work(&vi->config_work); +} + +static void virtnet_free_queues(struct virtnet_info *vi) +{ + int i; + + for (i = 0; i < vi->max_queue_pairs; i++) + netif_napi_del(&vi->rq[i].napi); + + kfree(vi->rq); + kfree(vi->sq); +} + +static void free_receive_bufs(struct virtnet_info *vi) +{ + int i; + + for (i = 0; i < vi->max_queue_pairs; i++) { + while (vi->rq[i].pages) + __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); + } +} + +static void free_unused_bufs(struct virtnet_info *vi) +{ + void *buf; + int i; + + for (i = 0; i < vi->max_queue_pairs; i++) { + struct virtqueue *vq = vi->sq[i].vq; + while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) + dev_kfree_skb(buf); + } + + for (i = 0; i < vi->max_queue_pairs; i++) { + struct virtqueue *vq = vi->rq[i].vq; + + while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { + if (vi->mergeable_rx_bufs || vi->big_packets) + give_pages(&vi->rq[i], buf); + else + dev_kfree_skb(buf); + --vi->rq[i].num; + } + BUG_ON(vi->rq[i].num != 0); + } +} + +static void virtnet_del_vqs(struct virtnet_info *vi) +{ + struct virtio_device *vdev = vi->vdev; + + virtnet_clean_affinity(vi, -1); + + vdev->config->del_vqs(vdev); + + virtnet_free_queues(vi); +} + +static int virtnet_find_vqs(struct virtnet_info *vi) +{ + vq_callback_t **callbacks; + struct virtqueue **vqs; + int ret = -ENOMEM; + int i, total_vqs; + const char **names; + + /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by + * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by + * possible control vq. + */ + total_vqs = vi->max_queue_pairs * 2 + + virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); + + /* Allocate space for find_vqs parameters */ + vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); + if (!vqs) + goto err_vq; + callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); + if (!callbacks) + goto err_callback; + names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); + if (!names) + goto err_names; + + /* Parameters for control virtqueue, if any */ + if (vi->has_cvq) { + callbacks[total_vqs - 1] = NULL; + names[total_vqs - 1] = "control"; + } + + /* Allocate/initialize parameters for send/receive virtqueues */ + for (i = 0; i < vi->max_queue_pairs; i++) { + callbacks[rxq2vq(i)] = skb_recv_done; + callbacks[txq2vq(i)] = skb_xmit_done; + sprintf(vi->rq[i].name, "input.%d", i); + sprintf(vi->sq[i].name, "output.%d", i); + names[rxq2vq(i)] = vi->rq[i].name; + names[txq2vq(i)] = vi->sq[i].name; + } + + ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, + names); + if (ret) + goto err_find; + + if (vi->has_cvq) { + vi->cvq = vqs[total_vqs - 1]; + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) + vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } + + for (i = 0; i < vi->max_queue_pairs; i++) { + vi->rq[i].vq = vqs[rxq2vq(i)]; + vi->sq[i].vq = vqs[txq2vq(i)]; + } + + kfree(names); + kfree(callbacks); + kfree(vqs); + + return 0; + +err_find: + kfree(names); +err_names: + kfree(callbacks); +err_callback: + kfree(vqs); +err_vq: + return ret; +} + +static int virtnet_alloc_queues(struct virtnet_info *vi) +{ + int i; + + vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); + if (!vi->sq) + goto err_sq; + vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); + if (!vi->rq) + goto err_rq; + + INIT_DELAYED_WORK(&vi->refill, refill_work); + for (i = 0; i < vi->max_queue_pairs; i++) { + vi->rq[i].pages = NULL; + netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, + napi_weight); + + sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); + sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); + } + + return 0; + +err_rq: + kfree(vi->sq); +err_sq: + return -ENOMEM; +} + +static int init_vqs(struct virtnet_info *vi) +{ + int ret; + + /* Allocate send & receive queues */ + ret = virtnet_alloc_queues(vi); + if (ret) + goto err; + + ret = virtnet_find_vqs(vi); + if (ret) + goto err_free; + + get_online_cpus(); + virtnet_set_affinity(vi); + put_online_cpus(); + + return 0; + +err_free: + virtnet_free_queues(vi); +err: + return ret; +} + +static int virtnet_probe(struct virtio_device *vdev) +{ + int i, err; + struct net_device *dev; + struct virtnet_info *vi; + u16 max_queue_pairs; + + /* Find if host supports multiqueue virtio_net device */ + err = virtio_config_val(vdev, VIRTIO_NET_F_MQ, + offsetof(struct virtio_net_config, + max_virtqueue_pairs), &max_queue_pairs); + + /* We need at least 2 queue's */ + if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || + max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || + !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) + max_queue_pairs = 1; + + /* Allocate ourselves a network device with room for our info */ + dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); + if (!dev) + return -ENOMEM; + + /* Set up network device as normal. */ + dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; + dev->netdev_ops = &virtnet_netdev; + dev->features = NETIF_F_HIGHDMA; + + SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); + SET_NETDEV_DEV(dev, &vdev->dev); + + /* Do we support "hardware" checksums? */ + if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { + /* This opens up the world of extra features. */ + dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; + if (csum) + dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; + + if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { + dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO + | NETIF_F_TSO_ECN | NETIF_F_TSO6; + } + /* Individual feature bits: what can host handle? */ + if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) + dev->hw_features |= NETIF_F_TSO; + if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) + dev->hw_features |= NETIF_F_TSO6; + if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) + dev->hw_features |= NETIF_F_TSO_ECN; + if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) + dev->hw_features |= NETIF_F_UFO; + + if (gso) + dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); + /* (!csum && gso) case will be fixed by register_netdev() */ + } + + dev->vlan_features = dev->features; + + /* Configuration may specify what MAC to use. Otherwise random. */ + if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC, + offsetof(struct virtio_net_config, mac), + dev->dev_addr, dev->addr_len) < 0) + eth_hw_addr_random(dev); + + /* Set up our device-specific information */ + vi = netdev_priv(dev); + vi->dev = dev; + vi->vdev = vdev; + vdev->priv = vi; + vi->stats = alloc_percpu(struct virtnet_stats); + err = -ENOMEM; + if (vi->stats == NULL) + goto free; + + vi->vq_index = alloc_percpu(int); + if (vi->vq_index == NULL) + goto free_stats; + + mutex_init(&vi->config_lock); + vi->config_enable = true; + INIT_WORK(&vi->config_work, virtnet_config_changed_work); + + /* If we can receive ANY GSO packets, we must allocate large ones. */ + if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) + vi->big_packets = true; + + if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) + vi->mergeable_rx_bufs = true; + + if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) + vi->has_cvq = true; + + /* Use single tx/rx queue pair as default */ + vi->curr_queue_pairs = 1; + vi->max_queue_pairs = max_queue_pairs; + + /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ + err = init_vqs(vi); + if (err) + goto free_index; + + netif_set_real_num_tx_queues(dev, 1); + netif_set_real_num_rx_queues(dev, 1); + + err = register_netdev(dev); + if (err) { + pr_debug("virtio_net: registering device failed\n"); + goto free_vqs; + } + + /* Last of all, set up some receive buffers. */ + for (i = 0; i < vi->curr_queue_pairs; i++) { + try_fill_recv(&vi->rq[i], GFP_KERNEL); + + /* If we didn't even get one input buffer, we're useless. */ + if (vi->rq[i].num == 0) { + free_unused_bufs(vi); + err = -ENOMEM; + goto free_recv_bufs; + } + } + + vi->nb.notifier_call = &virtnet_cpu_callback; + err = register_hotcpu_notifier(&vi->nb); + if (err) { + pr_debug("virtio_net: registering cpu notifier failed\n"); + goto free_recv_bufs; + } + + /* Assume link up if device can't report link status, + otherwise get link status from config. */ + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { + netif_carrier_off(dev); + schedule_work(&vi->config_work); + } else { + vi->status = VIRTIO_NET_S_LINK_UP; + netif_carrier_on(dev); + } + + pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", + dev->name, max_queue_pairs); + + return 0; + +free_recv_bufs: + free_receive_bufs(vi); + unregister_netdev(dev); +free_vqs: + cancel_delayed_work_sync(&vi->refill); + virtnet_del_vqs(vi); +free_index: + free_percpu(vi->vq_index); +free_stats: + free_percpu(vi->stats); +free: + free_netdev(dev); + return err; +} + +static void remove_vq_common(struct virtnet_info *vi) +{ + vi->vdev->config->reset(vi->vdev); + + /* Free unused buffers in both send and recv, if any. */ + free_unused_bufs(vi); + + free_receive_bufs(vi); + + virtnet_del_vqs(vi); +} + +static void virtnet_remove(struct virtio_device *vdev) +{ + struct virtnet_info *vi = vdev->priv; + + unregister_hotcpu_notifier(&vi->nb); + + /* Prevent config work handler from accessing the device. */ + mutex_lock(&vi->config_lock); + vi->config_enable = false; + mutex_unlock(&vi->config_lock); + + unregister_netdev(vi->dev); + + remove_vq_common(vi); + + flush_work(&vi->config_work); + + free_percpu(vi->vq_index); + free_percpu(vi->stats); + free_netdev(vi->dev); +} + +#ifdef CONFIG_PM +static int virtnet_freeze(struct virtio_device *vdev) +{ + struct virtnet_info *vi = vdev->priv; + int i; + + unregister_hotcpu_notifier(&vi->nb); + + /* Prevent config work handler from accessing the device */ + mutex_lock(&vi->config_lock); + vi->config_enable = false; + mutex_unlock(&vi->config_lock); + + netif_device_detach(vi->dev); + cancel_delayed_work_sync(&vi->refill); + + if (netif_running(vi->dev)) + for (i = 0; i < vi->max_queue_pairs; i++) { + napi_disable(&vi->rq[i].napi); + netif_napi_del(&vi->rq[i].napi); + } + + remove_vq_common(vi); + + flush_work(&vi->config_work); + + return 0; +} + +static int virtnet_restore(struct virtio_device *vdev) +{ + struct virtnet_info *vi = vdev->priv; + int err, i; + + err = init_vqs(vi); + if (err) + return err; + + if (netif_running(vi->dev)) { + for (i = 0; i < vi->curr_queue_pairs; i++) + if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) + schedule_delayed_work(&vi->refill, 0); + + for (i = 0; i < vi->max_queue_pairs; i++) + virtnet_napi_enable(&vi->rq[i]); + } + + netif_device_attach(vi->dev); + + mutex_lock(&vi->config_lock); + vi->config_enable = true; + mutex_unlock(&vi->config_lock); + + rtnl_lock(); + virtnet_set_queues(vi, vi->curr_queue_pairs); + rtnl_unlock(); + + err = register_hotcpu_notifier(&vi->nb); + if (err) + return err; + + return 0; +} +#endif + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { + VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, + VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, + VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, + VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, + VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, + VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, + VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, + VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, + VIRTIO_NET_F_CTRL_MAC_ADDR, +}; + +static struct virtio_driver virtio_net_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtnet_probe, + .remove = virtnet_remove, + .config_changed = virtnet_config_changed, +#ifdef CONFIG_PM + .freeze = virtnet_freeze, + .restore = virtnet_restore, +#endif +}; + +module_virtio_driver(virtio_net_driver); + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio network driver"); +MODULE_LICENSE("GPL"); diff --git a/addons/virtio/src/3.10.108/virtio_pci.c b/addons/virtio/src/3.10.108/virtio_pci.c new file mode 100644 index 00000000..933241a6 --- /dev/null +++ b/addons/virtio/src/3.10.108/virtio_pci.c @@ -0,0 +1,858 @@ +/* + * Virtio PCI driver + * + * This module allows virtio devices to be used over a virtual PCI device. + * This can be used with QEMU based VMMs like KVM or Xen. + * + * Copyright IBM Corp. 2007 + * + * Authors: + * Anthony Liguori + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Anthony Liguori "); +MODULE_DESCRIPTION("virtio-pci"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1"); + +/* Our device structure */ +struct virtio_pci_device +{ + struct virtio_device vdev; + struct pci_dev *pci_dev; + + /* the IO mapping for the PCI config space */ + void __iomem *ioaddr; + + /* a list of queues so we can dispatch IRQs */ + spinlock_t lock; + struct list_head virtqueues; + + /* MSI-X support */ + int msix_enabled; + int intx_enabled; + struct msix_entry *msix_entries; + cpumask_var_t *msix_affinity_masks; + /* Name strings for interrupts. This size should be enough, + * and I'm too lazy to allocate each name separately. */ + char (*msix_names)[256]; + /* Number of available vectors */ + unsigned msix_vectors; + /* Vectors allocated, excluding per-vq vectors if any */ + unsigned msix_used_vectors; + + /* Status saved during hibernate/restore */ + u8 saved_status; + + /* Whether we have vector per vq */ + bool per_vq_vectors; +}; + +/* Constants for MSI-X */ +/* Use first vector for configuration changes, second and the rest for + * virtqueues Thus, we need at least 2 vectors for MSI. */ +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, +}; + +struct virtio_pci_vq_info +{ + /* the actual virtqueue */ + struct virtqueue *vq; + + /* the number of entries in the queue */ + int num; + + /* the virtual address of the ring queue */ + void *queue; + + /* the list node for the virtqueues list */ + struct list_head node; + + /* MSI-X vector (or none) */ + unsigned msix_vector; +}; + +/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ +static DEFINE_PCI_DEVICE_TABLE(virtio_pci_id_table) = { + { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); + +/* Convert a generic virtio device to our structure */ +static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) +{ + return container_of(vdev, struct virtio_pci_device, vdev); +} + +/* virtio config->get_features() implementation */ +static u32 vp_get_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* When someone needs more than 32 feature bits, we'll need to + * steal a bit to indicate that the rest are somewhere else. */ + return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); +} + +/* virtio config->finalize_features() implementation */ +static void vp_finalize_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + /* We only support 32 feature bits. */ + BUILD_BUG_ON(ARRAY_SIZE(vdev->features) != 1); + iowrite32(vdev->features[0], vp_dev->ioaddr+VIRTIO_PCI_GUEST_FEATURES); +} + +/* virtio config->get() implementation */ +static void vp_get(struct virtio_device *vdev, unsigned offset, + void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + void __iomem *ioaddr = vp_dev->ioaddr + + VIRTIO_PCI_CONFIG(vp_dev) + offset; + u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + ptr[i] = ioread8(ioaddr + i); +} + +/* the config->set() implementation. it's symmetric to the config->get() + * implementation */ +static void vp_set(struct virtio_device *vdev, unsigned offset, + const void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + void __iomem *ioaddr = vp_dev->ioaddr + + VIRTIO_PCI_CONFIG(vp_dev) + offset; + const u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + iowrite8(ptr[i], ioaddr + i); +} + +/* config->{get,set}_status() implementations */ +static u8 vp_get_status(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); +} + +static void vp_set_status(struct virtio_device *vdev, u8 status) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* We should never be setting status to 0. */ + BUG_ON(status == 0); + iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); +} + +/* wait for pending irq handlers */ +static void vp_synchronize_vectors(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + int i; + + if (vp_dev->intx_enabled) + synchronize_irq(vp_dev->pci_dev->irq); + + for (i = 0; i < vp_dev->msix_vectors; ++i) + synchronize_irq(vp_dev->msix_entries[i].vector); +} + +static void vp_reset(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* 0 status means a reset. */ + iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); + /* Flush out the status write, and flush in device writes, + * including MSi-X interrupts, if any. */ + ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); + /* Flush pending VQ/configuration callbacks. */ + vp_synchronize_vectors(vdev); +} + +/* the notify function used when creating a virt queue */ +static void vp_notify(struct virtqueue *vq) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + + /* we write the queue's selector into the notification register to + * signal the other end */ + iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY); +} + +/* Handle a configuration change: Tell driver if it wants to know. */ +static irqreturn_t vp_config_changed(int irq, void *opaque) +{ + struct virtio_pci_device *vp_dev = opaque; + struct virtio_driver *drv; + drv = container_of(vp_dev->vdev.dev.driver, + struct virtio_driver, driver); + + if (drv && drv->config_changed) + drv->config_changed(&vp_dev->vdev); + return IRQ_HANDLED; +} + +/* Notify all virtqueues on an interrupt. */ +static irqreturn_t vp_vring_interrupt(int irq, void *opaque) +{ + struct virtio_pci_device *vp_dev = opaque; + struct virtio_pci_vq_info *info; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; + + spin_lock_irqsave(&vp_dev->lock, flags); + list_for_each_entry(info, &vp_dev->virtqueues, node) { + if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + spin_unlock_irqrestore(&vp_dev->lock, flags); + + return ret; +} + +/* A small wrapper to also acknowledge the interrupt when it's handled. + * I really need an EIO hook for the vring so I can ack the interrupt once we + * know that we'll be handling the IRQ but before we invoke the callback since + * the callback may notify the host which results in the host attempting to + * raise an interrupt that we would then mask once we acknowledged the + * interrupt. */ +static irqreturn_t vp_interrupt(int irq, void *opaque) +{ + struct virtio_pci_device *vp_dev = opaque; + u8 isr; + + /* reading the ISR has the effect of also clearing it so it's very + * important to save off the value. */ + isr = ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); + + /* It's definitely not us if the ISR was not high */ + if (!isr) + return IRQ_NONE; + + /* Configuration change? Tell driver if it wants to know. */ + if (isr & VIRTIO_PCI_ISR_CONFIG) + vp_config_changed(irq, opaque); + + return vp_vring_interrupt(irq, opaque); +} + +static void vp_free_vectors(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + int i; + + if (vp_dev->intx_enabled) { + free_irq(vp_dev->pci_dev->irq, vp_dev); + vp_dev->intx_enabled = 0; + } + + for (i = 0; i < vp_dev->msix_used_vectors; ++i) + free_irq(vp_dev->msix_entries[i].vector, vp_dev); + + for (i = 0; i < vp_dev->msix_vectors; i++) + if (vp_dev->msix_affinity_masks[i]) + free_cpumask_var(vp_dev->msix_affinity_masks[i]); + + if (vp_dev->msix_enabled) { + /* Disable the vector used for configuration */ + iowrite16(VIRTIO_MSI_NO_VECTOR, + vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + /* Flush the write out to device */ + ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + + pci_disable_msix(vp_dev->pci_dev); + vp_dev->msix_enabled = 0; + vp_dev->msix_vectors = 0; + } + + vp_dev->msix_used_vectors = 0; + kfree(vp_dev->msix_names); + vp_dev->msix_names = NULL; + kfree(vp_dev->msix_entries); + vp_dev->msix_entries = NULL; + kfree(vp_dev->msix_affinity_masks); + vp_dev->msix_affinity_masks = NULL; +} + +static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, + bool per_vq_vectors) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + const char *name = dev_name(&vp_dev->vdev.dev); + unsigned i, v; + int err = -ENOMEM; + + vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, + GFP_KERNEL); + if (!vp_dev->msix_entries) + goto error; + vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, + GFP_KERNEL); + if (!vp_dev->msix_names) + goto error; + vp_dev->msix_affinity_masks + = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, + GFP_KERNEL); + if (!vp_dev->msix_affinity_masks) + goto error; + for (i = 0; i < nvectors; ++i) + if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], + GFP_KERNEL)) + goto error; + + for (i = 0; i < nvectors; ++i) + vp_dev->msix_entries[i].entry = i; + + /* pci_enable_msix returns positive if we can't get this many. */ + err = pci_enable_msix(vp_dev->pci_dev, vp_dev->msix_entries, nvectors); + if (err > 0) + err = -ENOSPC; + if (err) + goto error; + vp_dev->msix_vectors = nvectors; + vp_dev->msix_enabled = 1; + + /* Set the vector used for configuration */ + v = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, + "%s-config", name); + err = request_irq(vp_dev->msix_entries[v].vector, + vp_config_changed, 0, vp_dev->msix_names[v], + vp_dev); + if (err) + goto error; + ++vp_dev->msix_used_vectors; + + iowrite16(v, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + /* Verify we had enough resources to assign the vector */ + v = ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + if (v == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto error; + } + + if (!per_vq_vectors) { + /* Shared vector for all VQs */ + v = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, + "%s-virtqueues", name); + err = request_irq(vp_dev->msix_entries[v].vector, + vp_vring_interrupt, 0, vp_dev->msix_names[v], + vp_dev); + if (err) + goto error; + ++vp_dev->msix_used_vectors; + } + return 0; +error: + vp_free_vectors(vdev); + return err; +} + +static int vp_request_intx(struct virtio_device *vdev) +{ + int err; + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, + IRQF_SHARED, dev_name(&vdev->dev), vp_dev); + if (!err) + vp_dev->intx_enabled = 1; + return err; +} + +static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_vq_info *info; + struct virtqueue *vq; + unsigned long flags, size; + u16 num; + int err; + + /* Select the queue we're interested in */ + iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); + + /* Check if queue is either not available or already active. */ + num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); + if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) + return ERR_PTR(-ENOENT); + + /* allocate and fill out our structure the represents an active + * queue */ + info = kmalloc(sizeof(struct virtio_pci_vq_info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + info->num = num; + info->msix_vector = msix_vec; + + size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); + info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); + if (info->queue == NULL) { + err = -ENOMEM; + goto out_info; + } + + /* activate the queue */ + iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, + vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); + + /* create the vring */ + vq = vring_new_virtqueue(index, info->num, VIRTIO_PCI_VRING_ALIGN, vdev, + true, info->queue, vp_notify, callback, name); + if (!vq) { + err = -ENOMEM; + goto out_activate_queue; + } + + vq->priv = info; + info->vq = vq; + + if (msix_vec != VIRTIO_MSI_NO_VECTOR) { + iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + if (msix_vec == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto out_assign; + } + } + + if (callback) { + spin_lock_irqsave(&vp_dev->lock, flags); + list_add(&info->node, &vp_dev->virtqueues); + spin_unlock_irqrestore(&vp_dev->lock, flags); + } else { + INIT_LIST_HEAD(&info->node); + } + + return vq; + +out_assign: + vring_del_virtqueue(vq); +out_activate_queue: + iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); + free_pages_exact(info->queue, size); +out_info: + kfree(info); + return ERR_PTR(err); +} + +static void vp_del_vq(struct virtqueue *vq) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + struct virtio_pci_vq_info *info = vq->priv; + unsigned long flags, size; + + spin_lock_irqsave(&vp_dev->lock, flags); + list_del(&info->node); + spin_unlock_irqrestore(&vp_dev->lock, flags); + + iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); + + if (vp_dev->msix_enabled) { + iowrite16(VIRTIO_MSI_NO_VECTOR, + vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + /* Flush the write out to device */ + ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); + } + + vring_del_virtqueue(vq); + + /* Select and deactivate the queue */ + iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); + + size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); + free_pages_exact(info->queue, size); + kfree(info); +} + +/* the config->del_vqs() implementation */ +static void vp_del_vqs(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtqueue *vq, *n; + struct virtio_pci_vq_info *info; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) { + info = vq->priv; + if (vp_dev->per_vq_vectors && + info->msix_vector != VIRTIO_MSI_NO_VECTOR) + free_irq(vp_dev->msix_entries[info->msix_vector].vector, + vq); + vp_del_vq(vq); + } + vp_dev->per_vq_vectors = false; + + vp_free_vectors(vdev); +} + +static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[], + bool use_msix, + bool per_vq_vectors) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + u16 msix_vec; + int i, err, nvectors, allocated_vectors; + + if (!use_msix) { + /* Old style: one normal interrupt for change and all vqs. */ + err = vp_request_intx(vdev); + if (err) + goto error_request; + } else { + if (per_vq_vectors) { + /* Best option: one for change interrupt, one per vq. */ + nvectors = 1; + for (i = 0; i < nvqs; ++i) + if (callbacks[i]) + ++nvectors; + } else { + /* Second best: one for change, shared for all vqs. */ + nvectors = 2; + } + + err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); + if (err) + goto error_request; + } + + vp_dev->per_vq_vectors = per_vq_vectors; + allocated_vectors = vp_dev->msix_used_vectors; + for (i = 0; i < nvqs; ++i) { + if (!names[i]) { + vqs[i] = NULL; + continue; + } else if (!callbacks[i] || !vp_dev->msix_enabled) + msix_vec = VIRTIO_MSI_NO_VECTOR; + else if (vp_dev->per_vq_vectors) + msix_vec = allocated_vectors++; + else + msix_vec = VP_MSIX_VQ_VECTOR; + vqs[i] = setup_vq(vdev, i, callbacks[i], names[i], msix_vec); + if (IS_ERR(vqs[i])) { + err = PTR_ERR(vqs[i]); + goto error_find; + } + + if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) + continue; + + /* allocate per-vq irq if available and necessary */ + snprintf(vp_dev->msix_names[msix_vec], + sizeof *vp_dev->msix_names, + "%s-%s", + dev_name(&vp_dev->vdev.dev), names[i]); + err = request_irq(vp_dev->msix_entries[msix_vec].vector, + vring_interrupt, 0, + vp_dev->msix_names[msix_vec], + vqs[i]); + if (err) { + vp_del_vq(vqs[i]); + goto error_find; + } + } + return 0; + +error_find: + vp_del_vqs(vdev); + +error_request: + return err; +} + +/* the config->find_vqs() implementation */ +static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + int err; + + /* Try MSI-X with one vector per queue. */ + err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); + if (!err) + return 0; + /* Fallback: MSI-X with one vector for config, one shared for queues. */ + err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, + true, false); + if (!err) + return 0; + /* Finally fall back to regular interrupts. */ + return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, + false, false); +} + +static const char *vp_bus_name(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + return pci_name(vp_dev->pci_dev); +} + +/* Setup the affinity for a virtqueue: + * - force the affinity for per vq vector + * - OR over all affinities for shared MSI + * - ignore the affinity request if we're using INTX + */ +static int vp_set_vq_affinity(struct virtqueue *vq, int cpu) +{ + struct virtio_device *vdev = vq->vdev; + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_vq_info *info = vq->priv; + struct cpumask *mask; + unsigned int irq; + + if (!vq->callback) + return -EINVAL; + + if (vp_dev->msix_enabled) { + mask = vp_dev->msix_affinity_masks[info->msix_vector]; + irq = vp_dev->msix_entries[info->msix_vector].vector; + if (cpu == -1) + irq_set_affinity_hint(irq, NULL); + else { + cpumask_set_cpu(cpu, mask); + irq_set_affinity_hint(irq, mask); + } + } + return 0; +} + +static const struct virtio_config_ops virtio_pci_config_ops = { + .get = vp_get, + .set = vp_set, + .get_status = vp_get_status, + .set_status = vp_set_status, + .reset = vp_reset, + .find_vqs = vp_find_vqs, + .del_vqs = vp_del_vqs, + .get_features = vp_get_features, + .finalize_features = vp_finalize_features, + .bus_name = vp_bus_name, + .set_vq_affinity = vp_set_vq_affinity, +}; + +static void virtio_pci_release_dev(struct device *_d) +{ + /* + * No need for a release method as we allocate/free + * all devices together with the pci devices. + * Provide an empty one to avoid getting a warning from core. + */ +} + +/* the PCI probing function */ +static int virtio_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + struct virtio_pci_device *vp_dev; + int err; + + /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ + if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) + return -ENODEV; + + if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { + printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", + VIRTIO_PCI_ABI_VERSION, pci_dev->revision); + return -ENODEV; + } + + /* allocate our structure and fill it out */ + vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); + if (vp_dev == NULL) + return -ENOMEM; + + vp_dev->vdev.dev.parent = &pci_dev->dev; + vp_dev->vdev.dev.release = virtio_pci_release_dev; + vp_dev->vdev.config = &virtio_pci_config_ops; + vp_dev->pci_dev = pci_dev; + INIT_LIST_HEAD(&vp_dev->virtqueues); + spin_lock_init(&vp_dev->lock); + + /* Disable MSI/MSIX to bring device to a known good state. */ + pci_msi_off(pci_dev); + + /* enable the device */ + err = pci_enable_device(pci_dev); + if (err) + goto out; + + err = pci_request_regions(pci_dev, "virtio-pci"); + if (err) + goto out_enable_device; + + vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); + if (vp_dev->ioaddr == NULL) { + err = -ENOMEM; + goto out_req_regions; + } + + pci_set_drvdata(pci_dev, vp_dev); + pci_set_master(pci_dev); + + /* we use the subsystem vendor/device id as the virtio vendor/device + * id. this allows us to use the same PCI vendor/device id for all + * virtio devices and to identify the particular virtio driver by + * the subsystem ids */ + vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; + vp_dev->vdev.id.device = pci_dev->subsystem_device; + + /* finally register the virtio device */ + err = register_virtio_device(&vp_dev->vdev); + if (err) + goto out_set_drvdata; + + return 0; + +out_set_drvdata: + pci_set_drvdata(pci_dev, NULL); + pci_iounmap(pci_dev, vp_dev->ioaddr); +out_req_regions: + pci_release_regions(pci_dev); +out_enable_device: + pci_disable_device(pci_dev); +out: + kfree(vp_dev); + return err; +} + +static void virtio_pci_remove(struct pci_dev *pci_dev) +{ + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + + unregister_virtio_device(&vp_dev->vdev); + + vp_del_vqs(&vp_dev->vdev); + pci_set_drvdata(pci_dev, NULL); + pci_iounmap(pci_dev, vp_dev->ioaddr); + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); + kfree(vp_dev); +} + +#ifdef CONFIG_PM +static int virtio_pci_freeze(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + struct virtio_driver *drv; + int ret; + + drv = container_of(vp_dev->vdev.dev.driver, + struct virtio_driver, driver); + + ret = 0; + vp_dev->saved_status = vp_get_status(&vp_dev->vdev); + if (drv && drv->freeze) + ret = drv->freeze(&vp_dev->vdev); + + if (!ret) + pci_disable_device(pci_dev); + return ret; +} + +static int virtio_pci_restore(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + struct virtio_driver *drv; + unsigned status = 0; + int ret; + + drv = container_of(vp_dev->vdev.dev.driver, + struct virtio_driver, driver); + + ret = pci_enable_device(pci_dev); + if (ret) + return ret; + + pci_set_master(pci_dev); + /* We always start by resetting the device, in case a previous + * driver messed it up. */ + vp_reset(&vp_dev->vdev); + + /* Acknowledge that we've seen the device. */ + status |= VIRTIO_CONFIG_S_ACKNOWLEDGE; + vp_set_status(&vp_dev->vdev, status); + + /* Maybe driver failed before freeze. + * Restore the failed status, for debugging. */ + status |= vp_dev->saved_status & VIRTIO_CONFIG_S_FAILED; + vp_set_status(&vp_dev->vdev, status); + + if (!drv) + return 0; + + /* We have a driver! */ + status |= VIRTIO_CONFIG_S_DRIVER; + vp_set_status(&vp_dev->vdev, status); + + vp_finalize_features(&vp_dev->vdev); + + if (drv->restore) { + ret = drv->restore(&vp_dev->vdev); + if (ret) { + status |= VIRTIO_CONFIG_S_FAILED; + vp_set_status(&vp_dev->vdev, status); + return ret; + } + } + + /* Finally, tell the device we're all set */ + status |= VIRTIO_CONFIG_S_DRIVER_OK; + vp_set_status(&vp_dev->vdev, status); + + return ret; +} + +static const struct dev_pm_ops virtio_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore) +}; +#endif + +static struct pci_driver virtio_pci_driver = { + .name = "virtio-pci", + .id_table = virtio_pci_id_table, + .probe = virtio_pci_probe, + .remove = virtio_pci_remove, +#ifdef CONFIG_PM + .driver.pm = &virtio_pci_pm_ops, +#endif +}; + +module_pci_driver(virtio_pci_driver); diff --git a/addons/virtio/src/3.10.108/virtio_ring.c b/addons/virtio/src/3.10.108/virtio_ring.c new file mode 100644 index 00000000..37d58f84 --- /dev/null +++ b/addons/virtio/src/3.10.108/virtio_ring.c @@ -0,0 +1,871 @@ +/* Virtio ring implementation. + * + * Copyright 2007 Rusty Russell IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include +#include +#include +#include +#include +#include +#include + +#ifdef DEBUG +/* For development, we want to crash whenever the ring is screwed. */ +#define BAD_RING(_vq, fmt, args...) \ + do { \ + dev_err(&(_vq)->vq.vdev->dev, \ + "%s:"fmt, (_vq)->vq.name, ##args); \ + BUG(); \ + } while (0) +/* Caller is supposed to guarantee no reentry. */ +#define START_USE(_vq) \ + do { \ + if ((_vq)->in_use) \ + panic("%s:in_use = %i\n", \ + (_vq)->vq.name, (_vq)->in_use); \ + (_vq)->in_use = __LINE__; \ + } while (0) +#define END_USE(_vq) \ + do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) +#else +#define BAD_RING(_vq, fmt, args...) \ + do { \ + dev_err(&_vq->vq.vdev->dev, \ + "%s:"fmt, (_vq)->vq.name, ##args); \ + (_vq)->broken = true; \ + } while (0) +#define START_USE(vq) +#define END_USE(vq) +#endif + +struct vring_virtqueue +{ + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* How to notify other side. FIXME: commonalize hcalls! */ + void (*notify)(struct virtqueue *vq); + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Tokens for callbacks. */ + void *data[]; +}; + +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +static inline struct scatterlist *sg_next_chained(struct scatterlist *sg, + unsigned int *count) +{ + return sg_next(sg); +} + +static inline struct scatterlist *sg_next_arr(struct scatterlist *sg, + unsigned int *count) +{ + if (--(*count) == 0) + return NULL; + return sg + 1; +} + +/* Set up an indirect table of descriptors and add it to the queue. */ +static inline int vring_add_indirect(struct vring_virtqueue *vq, + struct scatterlist *sgs[], + struct scatterlist *(*next) + (struct scatterlist *, unsigned int *), + unsigned int total_sg, + unsigned int total_out, + unsigned int total_in, + unsigned int out_sgs, + unsigned int in_sgs, + gfp_t gfp) +{ + struct vring_desc *desc; + unsigned head; + struct scatterlist *sg; + int i, n; + + /* + * We require lowmem mappings for the descriptors because + * otherwise virt_to_phys will give us bogus addresses in the + * virtqueue. + */ + gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); + + desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); + if (!desc) + return -ENOMEM; + + /* Transfer entries from the sg lists into the indirect page */ + i = 0; + for (n = 0; n < out_sgs; n++) { + for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { + desc[i].flags = VRING_DESC_F_NEXT; + desc[i].addr = sg_phys(sg); + desc[i].len = sg->length; + desc[i].next = i+1; + i++; + } + } + for (; n < (out_sgs + in_sgs); n++) { + for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { + desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; + desc[i].addr = sg_phys(sg); + desc[i].len = sg->length; + desc[i].next = i+1; + i++; + } + } + BUG_ON(i != total_sg); + + /* Last one doesn't continue. */ + desc[i-1].flags &= ~VRING_DESC_F_NEXT; + desc[i-1].next = 0; + + /* We're about to use a buffer */ + vq->vq.num_free--; + + /* Use a single buffer which doesn't continue */ + head = vq->free_head; + vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; + vq->vring.desc[head].addr = virt_to_phys(desc); + vq->vring.desc[head].len = i * sizeof(struct vring_desc); + + /* Update free pointer */ + vq->free_head = vq->vring.desc[head].next; + + return head; +} + +static inline int virtqueue_add(struct virtqueue *_vq, + struct scatterlist *sgs[], + struct scatterlist *(*next) + (struct scatterlist *, unsigned int *), + unsigned int total_out, + unsigned int total_in, + unsigned int out_sgs, + unsigned int in_sgs, + void *data, + gfp_t gfp) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + struct scatterlist *sg; + unsigned int i, n, avail, uninitialized_var(prev), total_sg; + int head; + + START_USE(vq); + + BUG_ON(data == NULL); + +#ifdef DEBUG + { + ktime_t now = ktime_get(); + + /* No kick or get, with .1 second between? Warn. */ + if (vq->last_add_time_valid) + WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) + > 100); + vq->last_add_time = now; + vq->last_add_time_valid = true; + } +#endif + + total_sg = total_in + total_out; + + /* If the host supports indirect descriptor tables, and we have multiple + * buffers, then go indirect. FIXME: tune this threshold */ + if (vq->indirect && total_sg > 1 && vq->vq.num_free) { + head = vring_add_indirect(vq, sgs, next, total_sg, total_out, + total_in, + out_sgs, in_sgs, gfp); + if (likely(head >= 0)) + goto add_head; + } + + BUG_ON(total_sg > vq->vring.num); + BUG_ON(total_sg == 0); + + if (vq->vq.num_free < total_sg) { + pr_debug("Can't add buf len %i - avail = %i\n", + total_sg, vq->vq.num_free); + /* FIXME: for historical reasons, we force a notify here if + * there are outgoing parts to the buffer. Presumably the + * host should service the ring ASAP. */ + if (out_sgs) + vq->notify(&vq->vq); + END_USE(vq); + return -ENOSPC; + } + + /* We're about to use some buffers from the free list. */ + vq->vq.num_free -= total_sg; + + head = i = vq->free_head; + for (n = 0; n < out_sgs; n++) { + for (sg = sgs[n]; sg; sg = next(sg, &total_out)) { + vq->vring.desc[i].flags = VRING_DESC_F_NEXT; + vq->vring.desc[i].addr = sg_phys(sg); + vq->vring.desc[i].len = sg->length; + prev = i; + i = vq->vring.desc[i].next; + } + } + for (; n < (out_sgs + in_sgs); n++) { + for (sg = sgs[n]; sg; sg = next(sg, &total_in)) { + vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; + vq->vring.desc[i].addr = sg_phys(sg); + vq->vring.desc[i].len = sg->length; + prev = i; + i = vq->vring.desc[i].next; + } + } + /* Last one doesn't continue. */ + vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; + + /* Update free pointer */ + vq->free_head = i; + +add_head: + /* Set token. */ + vq->data[head] = data; + + /* Put entry in available array (but don't update avail->idx until they + * do sync). */ + avail = (vq->vring.avail->idx & (vq->vring.num-1)); + vq->vring.avail->ring[avail] = head; + + /* Descriptors and available array need to be set before we expose the + * new available array entries. */ + virtio_wmb(vq->weak_barriers); + vq->vring.avail->idx++; + vq->num_added++; + + /* This is very unlikely, but theoretically possible. Kick + * just in case. */ + if (unlikely(vq->num_added == (1 << 16) - 1)) + virtqueue_kick(_vq); + + pr_debug("Added buffer head %i to %p\n", head, vq); + END_USE(vq); + + return 0; +} + +/** + * virtqueue_add_buf - expose buffer to other end + * @vq: the struct virtqueue we're talking about. + * @sg: the description of the buffer(s). + * @out_num: the number of sg readable by other side + * @in_num: the number of sg which are writable (after readable ones) + * @data: the token identifying the buffer. + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM). + */ +int virtqueue_add_buf(struct virtqueue *_vq, + struct scatterlist sg[], + unsigned int out, + unsigned int in, + void *data, + gfp_t gfp) +{ + struct scatterlist *sgs[2]; + + sgs[0] = sg; + sgs[1] = sg + out; + + return virtqueue_add(_vq, sgs, sg_next_arr, + out, in, out ? 1 : 0, in ? 1 : 0, data, gfp); +} +EXPORT_SYMBOL_GPL(virtqueue_add_buf); + +/** + * virtqueue_add_sgs - expose buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sgs: array of terminated scatterlists. + * @out_num: the number of scatterlists readable by other side + * @in_num: the number of scatterlists which are writable (after readable ones) + * @data: the token identifying the buffer. + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM). + */ +int virtqueue_add_sgs(struct virtqueue *_vq, + struct scatterlist *sgs[], + unsigned int out_sgs, + unsigned int in_sgs, + void *data, + gfp_t gfp) +{ + unsigned int i, total_out, total_in; + + /* Count them first. */ + for (i = total_out = total_in = 0; i < out_sgs; i++) { + struct scatterlist *sg; + for (sg = sgs[i]; sg; sg = sg_next(sg)) + total_out++; + } + for (; i < out_sgs + in_sgs; i++) { + struct scatterlist *sg; + for (sg = sgs[i]; sg; sg = sg_next(sg)) + total_in++; + } + return virtqueue_add(_vq, sgs, sg_next_chained, + total_out, total_in, out_sgs, in_sgs, data, gfp); +} +EXPORT_SYMBOL_GPL(virtqueue_add_sgs); + +/** + * virtqueue_add_outbuf - expose output buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sgs: array of scatterlists (need not be terminated!) + * @num: the number of scatterlists readable by other side + * @data: the token identifying the buffer. + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM). + */ +int virtqueue_add_outbuf(struct virtqueue *vq, + struct scatterlist sg[], unsigned int num, + void *data, + gfp_t gfp) +{ + return virtqueue_add(vq, &sg, sg_next_arr, num, 0, 1, 0, data, gfp); +} +EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); + +/** + * virtqueue_add_inbuf - expose input buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sgs: array of scatterlists (need not be terminated!) + * @num: the number of scatterlists writable by other side + * @data: the token identifying the buffer. + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM). + */ +int virtqueue_add_inbuf(struct virtqueue *vq, + struct scatterlist sg[], unsigned int num, + void *data, + gfp_t gfp) +{ + return virtqueue_add(vq, &sg, sg_next_arr, 0, num, 0, 1, data, gfp); +} +EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); + +/** + * virtqueue_kick_prepare - first half of split virtqueue_kick call. + * @vq: the struct virtqueue + * + * Instead of virtqueue_kick(), you can do: + * if (virtqueue_kick_prepare(vq)) + * virtqueue_notify(vq); + * + * This is sometimes useful because the virtqueue_kick_prepare() needs + * to be serialized, but the actual virtqueue_notify() call does not. + */ +bool virtqueue_kick_prepare(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + u16 new, old; + bool needs_kick; + + START_USE(vq); + /* We need to expose available array entries before checking avail + * event. */ + virtio_mb(vq->weak_barriers); + + old = vq->vring.avail->idx - vq->num_added; + new = vq->vring.avail->idx; + vq->num_added = 0; + +#ifdef DEBUG + if (vq->last_add_time_valid) { + WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), + vq->last_add_time)) > 100); + } + vq->last_add_time_valid = false; +#endif + + if (vq->event) { + needs_kick = vring_need_event(vring_avail_event(&vq->vring), + new, old); + } else { + needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); + } + END_USE(vq); + return needs_kick; +} +EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); + +/** + * virtqueue_notify - second half of split virtqueue_kick call. + * @vq: the struct virtqueue + * + * This does not need to be serialized. + */ +void virtqueue_notify(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + /* Prod other side to tell it about changes. */ + vq->notify(_vq); +} +EXPORT_SYMBOL_GPL(virtqueue_notify); + +/** + * virtqueue_kick - update after add_buf + * @vq: the struct virtqueue + * + * After one or more virtqueue_add_buf calls, invoke this to kick + * the other side. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ +void virtqueue_kick(struct virtqueue *vq) +{ + if (virtqueue_kick_prepare(vq)) + virtqueue_notify(vq); +} +EXPORT_SYMBOL_GPL(virtqueue_kick); + +static void detach_buf(struct vring_virtqueue *vq, unsigned int head) +{ + unsigned int i; + + /* Clear data ptr. */ + vq->data[head] = NULL; + + /* Put back on free list: find end */ + i = head; + + /* Free the indirect table */ + if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) + kfree(phys_to_virt(vq->vring.desc[i].addr)); + + while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { + i = vq->vring.desc[i].next; + vq->vq.num_free++; + } + + vq->vring.desc[i].next = vq->free_head; + vq->free_head = head; + /* Plus final descriptor */ + vq->vq.num_free++; +} + +static inline bool more_used(const struct vring_virtqueue *vq) +{ + return vq->last_used_idx != vq->vring.used->idx; +} + +/** + * virtqueue_get_buf - get the next used buffer + * @vq: the struct virtqueue we're talking about. + * @len: the length written into the buffer + * + * If the driver wrote data into the buffer, @len will be set to the + * amount written. This means you don't need to clear the buffer + * beforehand to ensure there's no data leakage in the case of short + * writes. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + * + * Returns NULL if there are no used buffers, or the "data" token + * handed to virtqueue_add_buf(). + */ +void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + void *ret; + unsigned int i; + u16 last_used; + + START_USE(vq); + + if (unlikely(vq->broken)) { + END_USE(vq); + return NULL; + } + + if (!more_used(vq)) { + pr_debug("No more buffers in queue\n"); + END_USE(vq); + return NULL; + } + + /* Only get used array entries after they have been exposed by host. */ + virtio_rmb(vq->weak_barriers); + + last_used = (vq->last_used_idx & (vq->vring.num - 1)); + i = vq->vring.used->ring[last_used].id; + *len = vq->vring.used->ring[last_used].len; + + if (unlikely(i >= vq->vring.num)) { + BAD_RING(vq, "id %u out of range\n", i); + return NULL; + } + if (unlikely(!vq->data[i])) { + BAD_RING(vq, "id %u is not a head!\n", i); + return NULL; + } + + /* detach_buf clears data, so grab it now. */ + ret = vq->data[i]; + detach_buf(vq, i); + vq->last_used_idx++; + /* If we expect an interrupt for the next entry, tell host + * by writing event index and flush out the write before + * the read in the next get_buf call. */ + if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { + vring_used_event(&vq->vring) = vq->last_used_idx; + virtio_mb(vq->weak_barriers); + } + +#ifdef DEBUG + vq->last_add_time_valid = false; +#endif + + END_USE(vq); + return ret; +} +EXPORT_SYMBOL_GPL(virtqueue_get_buf); + +/** + * virtqueue_disable_cb - disable callbacks + * @vq: the struct virtqueue we're talking about. + * + * Note that this is not necessarily synchronous, hence unreliable and only + * useful as an optimization. + * + * Unlike other operations, this need not be serialized. + */ +void virtqueue_disable_cb(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; +} +EXPORT_SYMBOL_GPL(virtqueue_disable_cb); + +/** + * virtqueue_enable_cb_prepare - restart callbacks after disable_cb + * @vq: the struct virtqueue we're talking about. + * + * This re-enables callbacks; it returns current queue state + * in an opaque unsigned value. This value should be later tested by + * virtqueue_poll, to detect a possible race between the driver checking for + * more work, and enabling callbacks. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ +unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + u16 last_used_idx; + + START_USE(vq); + + /* We optimistically turn back on interrupts, then check if there was + * more to do. */ + /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to + * either clear the flags bit or point the event index at the next + * entry. Always do both to keep code simple. */ + vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; + vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; + END_USE(vq); + return last_used_idx; +} +EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); + +/** + * virtqueue_poll - query pending used buffers + * @vq: the struct virtqueue we're talking about. + * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). + * + * Returns "true" if there are pending used buffers in the queue. + * + * This does not need to be serialized. + */ +bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + virtio_mb(vq->weak_barriers); + return (u16)last_used_idx != vq->vring.used->idx; +} +EXPORT_SYMBOL_GPL(virtqueue_poll); + +/** + * virtqueue_enable_cb - restart callbacks after disable_cb. + * @vq: the struct virtqueue we're talking about. + * + * This re-enables callbacks; it returns "false" if there are pending + * buffers in the queue, to detect a possible race between the driver + * checking for more work, and enabling callbacks. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ +bool virtqueue_enable_cb(struct virtqueue *_vq) +{ + unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); + return !virtqueue_poll(_vq, last_used_idx); +} +EXPORT_SYMBOL_GPL(virtqueue_enable_cb); + +/** + * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. + * @vq: the struct virtqueue we're talking about. + * + * This re-enables callbacks but hints to the other side to delay + * interrupts until most of the available buffers have been processed; + * it returns "false" if there are many pending buffers in the queue, + * to detect a possible race between the driver checking for more work, + * and enabling callbacks. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ +bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + u16 bufs; + + START_USE(vq); + + /* We optimistically turn back on interrupts, then check if there was + * more to do. */ + /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to + * either clear the flags bit or point the event index at the next + * entry. Always do both to keep code simple. */ + vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; + /* TODO: tune this threshold */ + bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; + vring_used_event(&vq->vring) = vq->last_used_idx + bufs; + virtio_mb(vq->weak_barriers); + if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { + END_USE(vq); + return false; + } + + END_USE(vq); + return true; +} +EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); + +/** + * virtqueue_detach_unused_buf - detach first unused buffer + * @vq: the struct virtqueue we're talking about. + * + * Returns NULL or the "data" token handed to virtqueue_add_buf(). + * This is not valid on an active queue; it is useful only for device + * shutdown. + */ +void *virtqueue_detach_unused_buf(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + unsigned int i; + void *buf; + + START_USE(vq); + + for (i = 0; i < vq->vring.num; i++) { + if (!vq->data[i]) + continue; + /* detach_buf clears data, so grab it now. */ + buf = vq->data[i]; + detach_buf(vq, i); + vq->vring.avail->idx--; + END_USE(vq); + return buf; + } + /* That should have freed everything. */ + BUG_ON(vq->vq.num_free != vq->vring.num); + + END_USE(vq); + return NULL; +} +EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); + +irqreturn_t vring_interrupt(int irq, void *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + if (!more_used(vq)) { + pr_debug("virtqueue interrupt with no work for %p\n", vq); + return IRQ_NONE; + } + + if (unlikely(vq->broken)) + return IRQ_HANDLED; + + pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); + if (vq->vq.callback) + vq->vq.callback(&vq->vq); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL_GPL(vring_interrupt); + +struct virtqueue *vring_new_virtqueue(unsigned int index, + unsigned int num, + unsigned int vring_align, + struct virtio_device *vdev, + bool weak_barriers, + void *pages, + void (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), + const char *name) +{ + struct vring_virtqueue *vq; + unsigned int i; + + /* We assume num is a power of 2. */ + if (num & (num - 1)) { + dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); + return NULL; + } + + vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); + if (!vq) + return NULL; + + vring_init(&vq->vring, num, pages, vring_align); + vq->vq.callback = callback; + vq->vq.vdev = vdev; + vq->vq.name = name; + vq->vq.num_free = num; + vq->vq.index = index; + vq->notify = notify; + vq->weak_barriers = weak_barriers; + vq->broken = false; + vq->last_used_idx = 0; + vq->num_added = 0; + list_add_tail(&vq->vq.list, &vdev->vqs); +#ifdef DEBUG + vq->in_use = false; + vq->last_add_time_valid = false; +#endif + + vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); + vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); + + /* No callback? Tell other side not to bother us. */ + if (!callback) + vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; + + /* Put everything in free lists. */ + vq->free_head = 0; + for (i = 0; i < num-1; i++) { + vq->vring.desc[i].next = i+1; + vq->data[i] = NULL; + } + vq->data[i] = NULL; + + return &vq->vq; +} +EXPORT_SYMBOL_GPL(vring_new_virtqueue); + +void vring_del_virtqueue(struct virtqueue *vq) +{ + list_del(&vq->list); + kfree(to_vvq(vq)); +} +EXPORT_SYMBOL_GPL(vring_del_virtqueue); + +/* Manipulates transport-specific feature bits. */ +void vring_transport_features(struct virtio_device *vdev) +{ + unsigned int i; + + for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { + switch (i) { + case VIRTIO_RING_F_INDIRECT_DESC: + break; + case VIRTIO_RING_F_EVENT_IDX: + break; + default: + /* We don't understand this bit. */ + clear_bit(i, vdev->features); + } + } +} +EXPORT_SYMBOL_GPL(vring_transport_features); + +/** + * virtqueue_get_vring_size - return the size of the virtqueue's vring + * @vq: the struct virtqueue containing the vring of interest. + * + * Returns the size of the vring. This is mainly used for boasting to + * userspace. Unlike other operations, this need not be serialized. + */ +unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) +{ + + struct vring_virtqueue *vq = to_vvq(_vq); + + return vq->vring.num; +} +EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); + +MODULE_LICENSE("GPL"); diff --git a/addons/virtio/src/3.10.108/virtio_scsi.c b/addons/virtio/src/3.10.108/virtio_scsi.c new file mode 100644 index 00000000..11f5326f --- /dev/null +++ b/addons/virtio/src/3.10.108/virtio_scsi.c @@ -0,0 +1,1086 @@ +/* + * Virtio SCSI HBA driver + * + * Copyright IBM Corp. 2010 + * Copyright Red Hat, Inc. 2011 + * + * Authors: + * Stefan Hajnoczi + * Paolo Bonzini + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define VIRTIO_SCSI_MEMPOOL_SZ 64 +#define VIRTIO_SCSI_EVENT_LEN 8 +#define VIRTIO_SCSI_VQ_BASE 2 + +/* Command queue element */ +struct virtio_scsi_cmd { + struct scsi_cmnd *sc; + struct completion *comp; + union { + struct virtio_scsi_cmd_req cmd; + struct virtio_scsi_ctrl_tmf_req tmf; + struct virtio_scsi_ctrl_an_req an; + } req; + union { + struct virtio_scsi_cmd_resp cmd; + struct virtio_scsi_ctrl_tmf_resp tmf; + struct virtio_scsi_ctrl_an_resp an; + struct virtio_scsi_event evt; + } resp; +} ____cacheline_aligned_in_smp; + +struct virtio_scsi_event_node { + struct virtio_scsi *vscsi; + struct virtio_scsi_event event; + struct work_struct work; +}; + +struct virtio_scsi_vq { + /* Protects vq */ + spinlock_t vq_lock; + + struct virtqueue *vq; +}; + +/* + * Per-target queue state. + * + * This struct holds the data needed by the queue steering policy. When a + * target is sent multiple requests, we need to drive them to the same queue so + * that FIFO processing order is kept. However, if a target was idle, we can + * choose a queue arbitrarily. In this case the queue is chosen according to + * the current VCPU, so the driver expects the number of request queues to be + * equal to the number of VCPUs. This makes it easy and fast to select the + * queue, and also lets the driver optimize the IRQ affinity for the virtqueues + * (each virtqueue's affinity is set to the CPU that "owns" the queue). + * + * An interesting effect of this policy is that only writes to req_vq need to + * take the tgt_lock. Read can be done outside the lock because: + * + * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1. + * In that case, no other CPU is reading req_vq: even if they were in + * virtscsi_queuecommand_multi, they would be spinning on tgt_lock. + * + * - reads of req_vq only occur when the target is not idle (reqs != 0). + * A CPU that enters virtscsi_queuecommand_multi will not modify req_vq. + * + * Similarly, decrements of reqs are never concurrent with writes of req_vq. + * Thus they can happen outside the tgt_lock, provided of course we make reqs + * an atomic_t. + */ +struct virtio_scsi_target_state { + /* This spinlock never held at the same time as vq_lock. */ + spinlock_t tgt_lock; + + /* Count of outstanding requests. */ + atomic_t reqs; + + /* Currently active virtqueue for requests sent to this target. */ + struct virtio_scsi_vq *req_vq; +}; + +/* Driver instance state */ +struct virtio_scsi { + struct virtio_device *vdev; + + /* Get some buffers ready for event vq */ + struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; + + u32 num_queues; + + /* If the affinity hint is set for virtqueues */ + bool affinity_hint_set; + + /* CPU hotplug notifier */ + struct notifier_block nb; + + struct virtio_scsi_vq ctrl_vq; + struct virtio_scsi_vq event_vq; + struct virtio_scsi_vq req_vqs[]; +}; + +static struct kmem_cache *virtscsi_cmd_cache; +static mempool_t *virtscsi_cmd_pool; + +static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) +{ + return vdev->priv; +} + +static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) +{ + if (!resid) + return; + + if (!scsi_bidi_cmnd(sc)) { + scsi_set_resid(sc, resid); + return; + } + + scsi_in(sc)->resid = min(resid, scsi_in(sc)->length); + scsi_out(sc)->resid = resid - scsi_in(sc)->resid; +} + +/** + * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done + * + * Called with vq_lock held. + */ +static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) +{ + struct virtio_scsi_cmd *cmd = buf; + struct scsi_cmnd *sc = cmd->sc; + struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; + struct virtio_scsi_target_state *tgt = + scsi_target(sc->device)->hostdata; + + dev_dbg(&sc->device->sdev_gendev, + "cmd %p response %u status %#02x sense_len %u\n", + sc, resp->response, resp->status, resp->sense_len); + + sc->result = resp->status; + virtscsi_compute_resid(sc, resp->resid); + switch (resp->response) { + case VIRTIO_SCSI_S_OK: + set_host_byte(sc, DID_OK); + break; + case VIRTIO_SCSI_S_OVERRUN: + set_host_byte(sc, DID_ERROR); + break; + case VIRTIO_SCSI_S_ABORTED: + set_host_byte(sc, DID_ABORT); + break; + case VIRTIO_SCSI_S_BAD_TARGET: + set_host_byte(sc, DID_BAD_TARGET); + break; + case VIRTIO_SCSI_S_RESET: + set_host_byte(sc, DID_RESET); + break; + case VIRTIO_SCSI_S_BUSY: + set_host_byte(sc, DID_BUS_BUSY); + break; + case VIRTIO_SCSI_S_TRANSPORT_FAILURE: + set_host_byte(sc, DID_TRANSPORT_DISRUPTED); + break; + case VIRTIO_SCSI_S_TARGET_FAILURE: + set_host_byte(sc, DID_TARGET_FAILURE); + break; + case VIRTIO_SCSI_S_NEXUS_FAILURE: + set_host_byte(sc, DID_NEXUS_FAILURE); + break; + default: + scmd_printk(KERN_WARNING, sc, "Unknown response %d", + resp->response); + /* fall through */ + case VIRTIO_SCSI_S_FAILURE: + set_host_byte(sc, DID_ERROR); + break; + } + + WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE); + if (sc->sense_buffer) { + memcpy(sc->sense_buffer, resp->sense, + min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE)); + if (resp->sense_len) + set_driver_byte(sc, DRIVER_SENSE); + } + + mempool_free(cmd, virtscsi_cmd_pool); + sc->scsi_done(sc); + + atomic_dec(&tgt->reqs); +} + +static void virtscsi_vq_done(struct virtio_scsi *vscsi, + struct virtio_scsi_vq *virtscsi_vq, + void (*fn)(struct virtio_scsi *vscsi, void *buf)) +{ + void *buf; + unsigned int len; + unsigned long flags; + struct virtqueue *vq = virtscsi_vq->vq; + + spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); + do { + virtqueue_disable_cb(vq); + while ((buf = virtqueue_get_buf(vq, &len)) != NULL) + fn(vscsi, buf); + } while (!virtqueue_enable_cb(vq)); + spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); +} + +static void virtscsi_req_done(struct virtqueue *vq) +{ + struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + int index = vq->index - VIRTIO_SCSI_VQ_BASE; + struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; + + /* + * Read req_vq before decrementing the reqs field in + * virtscsi_complete_cmd. + * + * With barriers: + * + * CPU #0 virtscsi_queuecommand_multi (CPU #1) + * ------------------------------------------------------------ + * lock vq_lock + * read req_vq + * read reqs (reqs = 1) + * write reqs (reqs = 0) + * increment reqs (reqs = 1) + * write req_vq + * + * Possible reordering without barriers: + * + * CPU #0 virtscsi_queuecommand_multi (CPU #1) + * ------------------------------------------------------------ + * lock vq_lock + * read reqs (reqs = 1) + * write reqs (reqs = 0) + * increment reqs (reqs = 1) + * write req_vq + * read (wrong) req_vq + * + * We do not need a full smp_rmb, because req_vq is required to get + * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored + * in the virtqueue as the user token. + */ + smp_read_barrier_depends(); + + virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); +}; + +static void virtscsi_poll_requests(struct virtio_scsi *vscsi) +{ + int i, num_vqs; + + num_vqs = vscsi->num_queues; + for (i = 0; i < num_vqs; i++) + virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], + virtscsi_complete_cmd); +} + +static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) +{ + struct virtio_scsi_cmd *cmd = buf; + + if (cmd->comp) + complete_all(cmd->comp); + else + mempool_free(cmd, virtscsi_cmd_pool); +} + +static void virtscsi_ctrl_done(struct virtqueue *vq) +{ + struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + + virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); +}; + +static void virtscsi_handle_event(struct work_struct *work); + +static int virtscsi_kick_event(struct virtio_scsi *vscsi, + struct virtio_scsi_event_node *event_node) +{ + int err; + struct scatterlist sg; + unsigned long flags; + + INIT_WORK(&event_node->work, virtscsi_handle_event); + sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); + + spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); + + err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node, + GFP_ATOMIC); + if (!err) + virtqueue_kick(vscsi->event_vq.vq); + + spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); + + return err; +} + +static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) +{ + int i; + + for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) { + vscsi->event_list[i].vscsi = vscsi; + virtscsi_kick_event(vscsi, &vscsi->event_list[i]); + } + + return 0; +} + +static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi) +{ + int i; + + for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) + cancel_work_sync(&vscsi->event_list[i].work); +} + +static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, + struct virtio_scsi_event *event) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + unsigned int target = event->lun[1]; + unsigned int lun = (event->lun[2] << 8) | event->lun[3]; + + switch (event->reason) { + case VIRTIO_SCSI_EVT_RESET_RESCAN: + scsi_add_device(shost, 0, target, lun); + break; + case VIRTIO_SCSI_EVT_RESET_REMOVED: + sdev = scsi_device_lookup(shost, 0, target, lun); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } else { + pr_err("SCSI device %d 0 %d %d not found\n", + shost->host_no, target, lun); + } + break; + default: + pr_info("Unsupport virtio scsi event reason %x\n", event->reason); + } +} + +static void virtscsi_handle_param_change(struct virtio_scsi *vscsi, + struct virtio_scsi_event *event) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + unsigned int target = event->lun[1]; + unsigned int lun = (event->lun[2] << 8) | event->lun[3]; + u8 asc = event->reason & 255; + u8 ascq = event->reason >> 8; + + sdev = scsi_device_lookup(shost, 0, target, lun); + if (!sdev) { + pr_err("SCSI device %d 0 %d %d not found\n", + shost->host_no, target, lun); + return; + } + + /* Handle "Parameters changed", "Mode parameters changed", and + "Capacity data has changed". */ + if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09)) + scsi_rescan_device(&sdev->sdev_gendev); + + scsi_device_put(sdev); +} + +static void virtscsi_handle_event(struct work_struct *work) +{ + struct virtio_scsi_event_node *event_node = + container_of(work, struct virtio_scsi_event_node, work); + struct virtio_scsi *vscsi = event_node->vscsi; + struct virtio_scsi_event *event = &event_node->event; + + if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) { + event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED; + scsi_scan_host(virtio_scsi_host(vscsi->vdev)); + } + + switch (event->event) { + case VIRTIO_SCSI_T_NO_EVENT: + break; + case VIRTIO_SCSI_T_TRANSPORT_RESET: + virtscsi_handle_transport_reset(vscsi, event); + break; + case VIRTIO_SCSI_T_PARAM_CHANGE: + virtscsi_handle_param_change(vscsi, event); + break; + default: + pr_err("Unsupport virtio scsi event %x\n", event->event); + } + virtscsi_kick_event(vscsi, event_node); +} + +static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) +{ + struct virtio_scsi_event_node *event_node = buf; + + schedule_work(&event_node->work); +} + +static void virtscsi_event_done(struct virtqueue *vq) +{ + struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + + virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event); +}; + +/** + * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue + * @vq : the struct virtqueue we're talking about + * @cmd : command structure + * @req_size : size of the request buffer + * @resp_size : size of the response buffer + * @gfp : flags to use for memory allocations + */ +static int virtscsi_add_cmd(struct virtqueue *vq, + struct virtio_scsi_cmd *cmd, + size_t req_size, size_t resp_size, gfp_t gfp) +{ + struct scsi_cmnd *sc = cmd->sc; + struct scatterlist *sgs[4], req, resp; + struct sg_table *out, *in; + unsigned out_num = 0, in_num = 0; + + out = in = NULL; + + if (sc && sc->sc_data_direction != DMA_NONE) { + if (sc->sc_data_direction != DMA_FROM_DEVICE) + out = &scsi_out(sc)->table; + if (sc->sc_data_direction != DMA_TO_DEVICE) + in = &scsi_in(sc)->table; + } + + /* Request header. */ + sg_init_one(&req, &cmd->req, req_size); + sgs[out_num++] = &req; + + /* Data-out buffer. */ + if (out) + sgs[out_num++] = out->sgl; + + /* Response header. */ + sg_init_one(&resp, &cmd->resp, resp_size); + sgs[out_num + in_num++] = &resp; + + /* Data-in buffer */ + if (in) + sgs[out_num + in_num++] = in->sgl; + + return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp); +} + +static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, + struct virtio_scsi_cmd *cmd, + size_t req_size, size_t resp_size, gfp_t gfp) +{ + unsigned long flags; + int err; + bool needs_kick = false; + + spin_lock_irqsave(&vq->vq_lock, flags); + err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp); + if (!err) + needs_kick = virtqueue_kick_prepare(vq->vq); + + spin_unlock_irqrestore(&vq->vq_lock, flags); + + if (needs_kick) + virtqueue_notify(vq->vq); + return err; +} + +static int virtscsi_queuecommand(struct virtio_scsi *vscsi, + struct virtio_scsi_vq *req_vq, + struct scsi_cmnd *sc) +{ + struct virtio_scsi_cmd *cmd; + int ret; + + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); + + /* TODO: check feature bit and fail if unsupported? */ + BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); + + dev_dbg(&sc->device->sdev_gendev, + "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); + + ret = SCSI_MLQUEUE_HOST_BUSY; + cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC); + if (!cmd) + goto out; + + memset(cmd, 0, sizeof(*cmd)); + cmd->sc = sc; + cmd->req.cmd = (struct virtio_scsi_cmd_req){ + .lun[0] = 1, + .lun[1] = sc->device->id, + .lun[2] = (sc->device->lun >> 8) | 0x40, + .lun[3] = sc->device->lun & 0xff, + .tag = (unsigned long)sc, + .task_attr = VIRTIO_SCSI_S_SIMPLE, + .prio = 0, + .crn = 0, + }; + + BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); + memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); + + if (virtscsi_kick_cmd(req_vq, cmd, + sizeof cmd->req.cmd, sizeof cmd->resp.cmd, + GFP_ATOMIC) == 0) + ret = 0; + else + mempool_free(cmd, virtscsi_cmd_pool); + +out: + return ret; +} + +static int virtscsi_queuecommand_single(struct Scsi_Host *sh, + struct scsi_cmnd *sc) +{ + struct virtio_scsi *vscsi = shost_priv(sh); + struct virtio_scsi_target_state *tgt = + scsi_target(sc->device)->hostdata; + + atomic_inc(&tgt->reqs); + return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc); +} + +static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, + struct virtio_scsi_target_state *tgt) +{ + struct virtio_scsi_vq *vq; + unsigned long flags; + u32 queue_num; + + spin_lock_irqsave(&tgt->tgt_lock, flags); + + /* + * The memory barrier after atomic_inc_return matches + * the smp_read_barrier_depends() in virtscsi_req_done. + */ + if (atomic_inc_return(&tgt->reqs) > 1) + vq = ACCESS_ONCE(tgt->req_vq); + else { + queue_num = smp_processor_id(); + while (unlikely(queue_num >= vscsi->num_queues)) + queue_num -= vscsi->num_queues; + + tgt->req_vq = vq = &vscsi->req_vqs[queue_num]; + } + + spin_unlock_irqrestore(&tgt->tgt_lock, flags); + return vq; +} + +static int virtscsi_queuecommand_multi(struct Scsi_Host *sh, + struct scsi_cmnd *sc) +{ + struct virtio_scsi *vscsi = shost_priv(sh); + struct virtio_scsi_target_state *tgt = + scsi_target(sc->device)->hostdata; + struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt); + + return virtscsi_queuecommand(vscsi, req_vq, sc); +} + +static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) +{ + DECLARE_COMPLETION_ONSTACK(comp); + int ret = FAILED; + + cmd->comp = ∁ + if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, + sizeof cmd->req.tmf, sizeof cmd->resp.tmf, + GFP_NOIO) < 0) + goto out; + + wait_for_completion(&comp); + if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || + cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) + ret = SUCCESS; + + /* + * The spec guarantees that all requests related to the TMF have + * been completed, but the callback might not have run yet if + * we're using independent interrupts (e.g. MSI). Poll the + * virtqueues once. + * + * In the abort case, sc->scsi_done will do nothing, because + * the block layer must have detected a timeout and as a result + * REQ_ATOM_COMPLETE has been set. + */ + virtscsi_poll_requests(vscsi); + +out: + mempool_free(cmd, virtscsi_cmd_pool); + return ret; +} + +static int virtscsi_device_reset(struct scsi_cmnd *sc) +{ + struct virtio_scsi *vscsi = shost_priv(sc->device->host); + struct virtio_scsi_cmd *cmd; + + sdev_printk(KERN_INFO, sc->device, "device reset\n"); + cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); + if (!cmd) + return FAILED; + + memset(cmd, 0, sizeof(*cmd)); + cmd->sc = sc; + cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ + .type = VIRTIO_SCSI_T_TMF, + .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET, + .lun[0] = 1, + .lun[1] = sc->device->id, + .lun[2] = (sc->device->lun >> 8) | 0x40, + .lun[3] = sc->device->lun & 0xff, + }; + return virtscsi_tmf(vscsi, cmd); +} + +static int virtscsi_abort(struct scsi_cmnd *sc) +{ + struct virtio_scsi *vscsi = shost_priv(sc->device->host); + struct virtio_scsi_cmd *cmd; + + scmd_printk(KERN_INFO, sc, "abort\n"); + cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); + if (!cmd) + return FAILED; + + memset(cmd, 0, sizeof(*cmd)); + cmd->sc = sc; + cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ + .type = VIRTIO_SCSI_T_TMF, + .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, + .lun[0] = 1, + .lun[1] = sc->device->id, + .lun[2] = (sc->device->lun >> 8) | 0x40, + .lun[3] = sc->device->lun & 0xff, + .tag = (unsigned long)sc, + }; + return virtscsi_tmf(vscsi, cmd); +} + +static int virtscsi_target_alloc(struct scsi_target *starget) +{ + struct virtio_scsi_target_state *tgt = + kmalloc(sizeof(*tgt), GFP_KERNEL); + if (!tgt) + return -ENOMEM; + + spin_lock_init(&tgt->tgt_lock); + atomic_set(&tgt->reqs, 0); + tgt->req_vq = NULL; + + starget->hostdata = tgt; + return 0; +} + +static void virtscsi_target_destroy(struct scsi_target *starget) +{ + struct virtio_scsi_target_state *tgt = starget->hostdata; + kfree(tgt); +} + +static struct scsi_host_template virtscsi_host_template_single = { + .module = THIS_MODULE, + .name = "Virtio SCSI HBA", + .proc_name = "virtio_scsi", + .this_id = -1, + .queuecommand = virtscsi_queuecommand_single, + .eh_abort_handler = virtscsi_abort, + .eh_device_reset_handler = virtscsi_device_reset, + + .can_queue = 1024, + .dma_boundary = UINT_MAX, + .use_clustering = ENABLE_CLUSTERING, + .target_alloc = virtscsi_target_alloc, + .target_destroy = virtscsi_target_destroy, +}; + +static struct scsi_host_template virtscsi_host_template_multi = { + .module = THIS_MODULE, + .name = "Virtio SCSI HBA", + .proc_name = "virtio_scsi", + .this_id = -1, + .queuecommand = virtscsi_queuecommand_multi, + .eh_abort_handler = virtscsi_abort, + .eh_device_reset_handler = virtscsi_device_reset, + + .can_queue = 1024, + .dma_boundary = UINT_MAX, + .use_clustering = ENABLE_CLUSTERING, + .target_alloc = virtscsi_target_alloc, + .target_destroy = virtscsi_target_destroy, +}; + +#define virtscsi_config_get(vdev, fld) \ + ({ \ + typeof(((struct virtio_scsi_config *)0)->fld) __val; \ + vdev->config->get(vdev, \ + offsetof(struct virtio_scsi_config, fld), \ + &__val, sizeof(__val)); \ + __val; \ + }) + +#define virtscsi_config_set(vdev, fld, val) \ + (void)({ \ + typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ + vdev->config->set(vdev, \ + offsetof(struct virtio_scsi_config, fld), \ + &__val, sizeof(__val)); \ + }) + +static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) +{ + int i; + int cpu; + + /* In multiqueue mode, when the number of cpu is equal + * to the number of request queues, we let the qeueues + * to be private to one cpu by setting the affinity hint + * to eliminate the contention. + */ + if ((vscsi->num_queues == 1 || + vscsi->num_queues != num_online_cpus()) && affinity) { + if (vscsi->affinity_hint_set) + affinity = false; + else + return; + } + + if (affinity) { + i = 0; + for_each_online_cpu(cpu) { + virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu); + i++; + } + + vscsi->affinity_hint_set = true; + } else { + for (i = 0; i < vscsi->num_queues; i++) { + if (!vscsi->req_vqs[i].vq) + continue; + + virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); + } + + vscsi->affinity_hint_set = false; + } +} + +static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) +{ + get_online_cpus(); + __virtscsi_set_affinity(vscsi, affinity); + put_online_cpus(); +} + +static int virtscsi_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb); + switch(action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + case CPU_DEAD: + case CPU_DEAD_FROZEN: + __virtscsi_set_affinity(vscsi, true); + break; + default: + break; + } + return NOTIFY_OK; +} + +static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, + struct virtqueue *vq) +{ + spin_lock_init(&virtscsi_vq->vq_lock); + virtscsi_vq->vq = vq; +} + +static void virtscsi_scan(struct virtio_device *vdev) +{ + struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv; + + scsi_scan_host(shost); +} + +static void virtscsi_remove_vqs(struct virtio_device *vdev) +{ + struct Scsi_Host *sh = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + + virtscsi_set_affinity(vscsi, false); + + /* Stop all the virtqueues. */ + vdev->config->reset(vdev); + + vdev->config->del_vqs(vdev); +} + +static int virtscsi_init(struct virtio_device *vdev, + struct virtio_scsi *vscsi) +{ + int err; + u32 i; + u32 num_vqs; + vq_callback_t **callbacks; + const char **names; + struct virtqueue **vqs; + + num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE; + vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL); + callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL); + names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL); + + if (!callbacks || !vqs || !names) { + err = -ENOMEM; + goto out; + } + + callbacks[0] = virtscsi_ctrl_done; + callbacks[1] = virtscsi_event_done; + names[0] = "control"; + names[1] = "event"; + for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) { + callbacks[i] = virtscsi_req_done; + names[i] = "request"; + } + + /* Discover virtqueues and write information to configuration. */ + err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); + if (err) + goto out; + + virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); + virtscsi_init_vq(&vscsi->event_vq, vqs[1]); + for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) + virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE], + vqs[i]); + + virtscsi_set_affinity(vscsi, true); + + virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); + virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); + + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) + virtscsi_kick_event_all(vscsi); + + err = 0; + +out: + kfree(names); + kfree(callbacks); + kfree(vqs); + if (err) + virtscsi_remove_vqs(vdev); + return err; +} + +static int virtscsi_probe(struct virtio_device *vdev) +{ + struct Scsi_Host *shost; + struct virtio_scsi *vscsi; + int err; + u32 sg_elems, num_targets; + u32 cmd_per_lun; + u32 num_queues; + struct scsi_host_template *hostt; + + /* We need to know how many queues before we allocate. */ + num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; + + num_targets = virtscsi_config_get(vdev, max_target) + 1; + + if (num_queues == 1) + hostt = &virtscsi_host_template_single; + else + hostt = &virtscsi_host_template_multi; + + shost = scsi_host_alloc(hostt, + sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues); + if (!shost) + return -ENOMEM; + + sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; + shost->sg_tablesize = sg_elems; + vscsi = shost_priv(shost); + vscsi->vdev = vdev; + vscsi->num_queues = num_queues; + vdev->priv = shost; + + err = virtscsi_init(vdev, vscsi); + if (err) + goto virtscsi_init_failed; + + vscsi->nb.notifier_call = &virtscsi_cpu_callback; + err = register_hotcpu_notifier(&vscsi->nb); + if (err) { + pr_err("registering cpu notifier failed\n"); + goto scsi_add_host_failed; + } + + cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; + shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); + shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; + + /* LUNs > 256 are reported with format 1, so they go in the range + * 16640-32767. + */ + shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000; + shost->max_id = num_targets; + shost->max_channel = 0; + shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; + err = scsi_add_host(shost, &vdev->dev); + if (err) + goto scsi_add_host_failed; + /* + * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan() + * after VIRTIO_CONFIG_S_DRIVER_OK has been set.. + */ + return 0; + +scsi_add_host_failed: + vdev->config->del_vqs(vdev); +virtscsi_init_failed: + scsi_host_put(shost); + return err; +} + +static void virtscsi_remove(struct virtio_device *vdev) +{ + struct Scsi_Host *shost = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(shost); + + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) + virtscsi_cancel_event_work(vscsi); + + scsi_remove_host(shost); + + unregister_hotcpu_notifier(&vscsi->nb); + + virtscsi_remove_vqs(vdev); + scsi_host_put(shost); +} + +#ifdef CONFIG_PM +static int virtscsi_freeze(struct virtio_device *vdev) +{ + struct Scsi_Host *sh = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + + unregister_hotcpu_notifier(&vscsi->nb); + virtscsi_remove_vqs(vdev); + return 0; +} + +static int virtscsi_restore(struct virtio_device *vdev) +{ + struct Scsi_Host *sh = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + int err; + + err = virtscsi_init(vdev, vscsi); + if (err) + return err; + + err = register_hotcpu_notifier(&vscsi->nb); + if (err) + vdev->config->del_vqs(vdev); + + return err; +} +#endif + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { + VIRTIO_SCSI_F_HOTPLUG, + VIRTIO_SCSI_F_CHANGE, +}; + +static struct virtio_driver virtio_scsi_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtscsi_probe, + .scan = virtscsi_scan, +#ifdef CONFIG_PM + .freeze = virtscsi_freeze, + .restore = virtscsi_restore, +#endif + .remove = virtscsi_remove, +}; + +static int __init init(void) +{ + int ret = -ENOMEM; + + virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0); + if (!virtscsi_cmd_cache) { + pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n"); + goto error; + } + + + virtscsi_cmd_pool = + mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ, + virtscsi_cmd_cache); + if (!virtscsi_cmd_pool) { + pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); + goto error; + } + ret = register_virtio_driver(&virtio_scsi_driver); + if (ret < 0) + goto error; + + return 0; + +error: + if (virtscsi_cmd_pool) { + mempool_destroy(virtscsi_cmd_pool); + virtscsi_cmd_pool = NULL; + } + if (virtscsi_cmd_cache) { + kmem_cache_destroy(virtscsi_cmd_cache); + virtscsi_cmd_cache = NULL; + } + return ret; +} + +static void __exit fini(void) +{ + unregister_virtio_driver(&virtio_scsi_driver); + mempool_destroy(virtscsi_cmd_pool); + kmem_cache_destroy(virtscsi_cmd_cache); +} +module_init(init); +module_exit(fini); + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio SCSI HBA driver"); +MODULE_LICENSE("GPL"); diff --git a/addons/virtio/src/4.4.180/Makefile b/addons/virtio/src/4.4.180/Makefile new file mode 100644 index 00000000..c07407df --- /dev/null +++ b/addons/virtio/src/4.4.180/Makefile @@ -0,0 +1,7 @@ +obj-m += virtio.o virtio_ring.o +obj-m += virtio_mmio.o +obj-m += virtio_pci.o +virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o +virtio_pci-m += virtio_pci_legacy.o +obj-m += virtio_net.o +obj-m += virtio_scsi.o diff --git a/addons/virtio/src/4.4.180/config.c b/addons/virtio/src/4.4.180/config.c new file mode 100644 index 00000000..f70bcd2f --- /dev/null +++ b/addons/virtio/src/4.4.180/config.c @@ -0,0 +1,12 @@ +/* Configuration space parsing helpers for virtio. + * + * The configuration is [type][len][... len bytes ...] fields. + * + * Copyright 2007 Rusty Russell, IBM Corporation. + * GPL v2 or later. + */ +#include +#include +#include +#include + diff --git a/addons/virtio/src/4.4.180/virtio.c b/addons/virtio/src/4.4.180/virtio.c new file mode 100644 index 00000000..462e1836 --- /dev/null +++ b/addons/virtio/src/4.4.180/virtio.c @@ -0,0 +1,422 @@ +#include +#include +#include +#include +#include +#include + +/* Unique numbering for virtio devices. */ +static DEFINE_IDA(virtio_index_ida); + +static ssize_t device_show(struct device *_d, + struct device_attribute *attr, char *buf) +{ + struct virtio_device *dev = dev_to_virtio(_d); + return sprintf(buf, "0x%04x\n", dev->id.device); +} +static DEVICE_ATTR_RO(device); + +static ssize_t vendor_show(struct device *_d, + struct device_attribute *attr, char *buf) +{ + struct virtio_device *dev = dev_to_virtio(_d); + return sprintf(buf, "0x%04x\n", dev->id.vendor); +} +static DEVICE_ATTR_RO(vendor); + +static ssize_t status_show(struct device *_d, + struct device_attribute *attr, char *buf) +{ + struct virtio_device *dev = dev_to_virtio(_d); + return sprintf(buf, "0x%08x\n", dev->config->get_status(dev)); +} +static DEVICE_ATTR_RO(status); + +static ssize_t modalias_show(struct device *_d, + struct device_attribute *attr, char *buf) +{ + struct virtio_device *dev = dev_to_virtio(_d); + return sprintf(buf, "virtio:d%08Xv%08X\n", + dev->id.device, dev->id.vendor); +} +static DEVICE_ATTR_RO(modalias); + +static ssize_t features_show(struct device *_d, + struct device_attribute *attr, char *buf) +{ + struct virtio_device *dev = dev_to_virtio(_d); + unsigned int i; + ssize_t len = 0; + + /* We actually represent this as a bitstring, as it could be + * arbitrary length in future. */ + for (i = 0; i < sizeof(dev->features)*8; i++) + len += sprintf(buf+len, "%c", + __virtio_test_bit(dev, i) ? '1' : '0'); + len += sprintf(buf+len, "\n"); + return len; +} +static DEVICE_ATTR_RO(features); + +static struct attribute *virtio_dev_attrs[] = { + &dev_attr_device.attr, + &dev_attr_vendor.attr, + &dev_attr_status.attr, + &dev_attr_modalias.attr, + &dev_attr_features.attr, + NULL, +}; +ATTRIBUTE_GROUPS(virtio_dev); + +static inline int virtio_id_match(const struct virtio_device *dev, + const struct virtio_device_id *id) +{ + if (id->device != dev->id.device && id->device != VIRTIO_DEV_ANY_ID) + return 0; + + return id->vendor == VIRTIO_DEV_ANY_ID || id->vendor == dev->id.vendor; +} + +/* This looks through all the IDs a driver claims to support. If any of them + * match, we return 1 and the kernel will call virtio_dev_probe(). */ +static int virtio_dev_match(struct device *_dv, struct device_driver *_dr) +{ + unsigned int i; + struct virtio_device *dev = dev_to_virtio(_dv); + const struct virtio_device_id *ids; + + ids = drv_to_virtio(_dr)->id_table; + for (i = 0; ids[i].device; i++) + if (virtio_id_match(dev, &ids[i])) + return 1; + return 0; +} + +static int virtio_uevent(struct device *_dv, struct kobj_uevent_env *env) +{ + struct virtio_device *dev = dev_to_virtio(_dv); + + return add_uevent_var(env, "MODALIAS=virtio:d%08Xv%08X", + dev->id.device, dev->id.vendor); +} + +static void add_status(struct virtio_device *dev, unsigned status) +{ + dev->config->set_status(dev, dev->config->get_status(dev) | status); +} + +void virtio_check_driver_offered_feature(const struct virtio_device *vdev, + unsigned int fbit) +{ + unsigned int i; + struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver); + + for (i = 0; i < drv->feature_table_size; i++) + if (drv->feature_table[i] == fbit) + return; + + if (drv->feature_table_legacy) { + for (i = 0; i < drv->feature_table_size_legacy; i++) + if (drv->feature_table_legacy[i] == fbit) + return; + } + + BUG(); +} +EXPORT_SYMBOL_GPL(virtio_check_driver_offered_feature); + +static void __virtio_config_changed(struct virtio_device *dev) +{ + struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); + + if (!dev->config_enabled) + dev->config_change_pending = true; + else if (drv && drv->config_changed) + drv->config_changed(dev); +} + +void virtio_config_changed(struct virtio_device *dev) +{ + unsigned long flags; + + spin_lock_irqsave(&dev->config_lock, flags); + __virtio_config_changed(dev); + spin_unlock_irqrestore(&dev->config_lock, flags); +} +EXPORT_SYMBOL_GPL(virtio_config_changed); + +static void virtio_config_disable(struct virtio_device *dev) +{ + spin_lock_irq(&dev->config_lock); + dev->config_enabled = false; + spin_unlock_irq(&dev->config_lock); +} + +static void virtio_config_enable(struct virtio_device *dev) +{ + spin_lock_irq(&dev->config_lock); + dev->config_enabled = true; + if (dev->config_change_pending) + __virtio_config_changed(dev); + dev->config_change_pending = false; + spin_unlock_irq(&dev->config_lock); +} + +static int virtio_finalize_features(struct virtio_device *dev) +{ + int ret = dev->config->finalize_features(dev); + unsigned status; + + if (ret) + return ret; + + if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) + return 0; + + add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); + status = dev->config->get_status(dev); + if (!(status & VIRTIO_CONFIG_S_FEATURES_OK)) { + dev_err(&dev->dev, "virtio: device refuses features: %x\n", + status); + return -ENODEV; + } + return 0; +} + +static int virtio_dev_probe(struct device *_d) +{ + int err, i; + struct virtio_device *dev = dev_to_virtio(_d); + struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); + u64 device_features; + u64 driver_features; + u64 driver_features_legacy; + + /* We have a driver! */ + add_status(dev, VIRTIO_CONFIG_S_DRIVER); + + /* Figure out what features the device supports. */ + device_features = dev->config->get_features(dev); + + /* Figure out what features the driver supports. */ + driver_features = 0; + for (i = 0; i < drv->feature_table_size; i++) { + unsigned int f = drv->feature_table[i]; + BUG_ON(f >= 64); + driver_features |= (1ULL << f); + } + + /* Some drivers have a separate feature table for virtio v1.0 */ + if (drv->feature_table_legacy) { + driver_features_legacy = 0; + for (i = 0; i < drv->feature_table_size_legacy; i++) { + unsigned int f = drv->feature_table_legacy[i]; + BUG_ON(f >= 64); + driver_features_legacy |= (1ULL << f); + } + } else { + driver_features_legacy = driver_features; + } + + if (device_features & (1ULL << VIRTIO_F_VERSION_1)) + dev->features = driver_features & device_features; + else + dev->features = driver_features_legacy & device_features; + + /* Transport features always preserved to pass to finalize_features. */ + for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) + if (device_features & (1ULL << i)) + __virtio_set_bit(dev, i); + + err = virtio_finalize_features(dev); + if (err) + goto err; + + err = drv->probe(dev); + if (err) + goto err; + + /* If probe didn't do it, mark device DRIVER_OK ourselves. */ + if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK)) + virtio_device_ready(dev); + + if (drv->scan) + drv->scan(dev); + + virtio_config_enable(dev); + + return 0; +err: + add_status(dev, VIRTIO_CONFIG_S_FAILED); + return err; + +} + +static int virtio_dev_remove(struct device *_d) +{ + struct virtio_device *dev = dev_to_virtio(_d); + struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); + + virtio_config_disable(dev); + + drv->remove(dev); + + /* Driver should have reset device. */ + WARN_ON_ONCE(dev->config->get_status(dev)); + + /* Acknowledge the device's existence again. */ + add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + return 0; +} + +static struct bus_type virtio_bus = { + .name = "virtio", + .match = virtio_dev_match, + .dev_groups = virtio_dev_groups, + .uevent = virtio_uevent, + .probe = virtio_dev_probe, + .remove = virtio_dev_remove, +}; + +int register_virtio_driver(struct virtio_driver *driver) +{ + /* Catch this early. */ + BUG_ON(driver->feature_table_size && !driver->feature_table); + driver->driver.bus = &virtio_bus; + return driver_register(&driver->driver); +} +EXPORT_SYMBOL_GPL(register_virtio_driver); + +void unregister_virtio_driver(struct virtio_driver *driver) +{ + driver_unregister(&driver->driver); +} +EXPORT_SYMBOL_GPL(unregister_virtio_driver); + +int register_virtio_device(struct virtio_device *dev) +{ + int err; + + dev->dev.bus = &virtio_bus; + + /* Assign a unique device index and hence name. */ + err = ida_simple_get(&virtio_index_ida, 0, 0, GFP_KERNEL); + if (err < 0) + goto out; + + dev->index = err; + dev_set_name(&dev->dev, "virtio%u", dev->index); + + spin_lock_init(&dev->config_lock); + dev->config_enabled = false; + dev->config_change_pending = false; + + /* We always start by resetting the device, in case a previous + * driver messed it up. This also tests that code path a little. */ + dev->config->reset(dev); + + /* Acknowledge that we've seen the device. */ + add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + + INIT_LIST_HEAD(&dev->vqs); + + /* device_register() causes the bus infrastructure to look for a + * matching driver. */ + err = device_register(&dev->dev); + if (err) + ida_simple_remove(&virtio_index_ida, dev->index); +out: + if (err) + add_status(dev, VIRTIO_CONFIG_S_FAILED); + return err; +} +EXPORT_SYMBOL_GPL(register_virtio_device); + +void unregister_virtio_device(struct virtio_device *dev) +{ + int index = dev->index; /* save for after device release */ + + device_unregister(&dev->dev); + ida_simple_remove(&virtio_index_ida, index); +} +EXPORT_SYMBOL_GPL(unregister_virtio_device); + +#ifdef CONFIG_PM_SLEEP +int virtio_device_freeze(struct virtio_device *dev) +{ + struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); + + virtio_config_disable(dev); + + dev->failed = dev->config->get_status(dev) & VIRTIO_CONFIG_S_FAILED; + + if (drv && drv->freeze) + return drv->freeze(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(virtio_device_freeze); + +int virtio_device_restore(struct virtio_device *dev) +{ + struct virtio_driver *drv = drv_to_virtio(dev->dev.driver); + int ret; + + /* We always start by resetting the device, in case a previous + * driver messed it up. */ + dev->config->reset(dev); + + /* Acknowledge that we've seen the device. */ + add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE); + + /* Maybe driver failed before freeze. + * Restore the failed status, for debugging. */ + if (dev->failed) + add_status(dev, VIRTIO_CONFIG_S_FAILED); + + if (!drv) + return 0; + + /* We have a driver! */ + add_status(dev, VIRTIO_CONFIG_S_DRIVER); + + ret = virtio_finalize_features(dev); + if (ret) + goto err; + + if (drv->restore) { + ret = drv->restore(dev); + if (ret) + goto err; + } + + /* Finally, tell the device we're all set */ + add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); + + virtio_config_enable(dev); + + return 0; + +err: + add_status(dev, VIRTIO_CONFIG_S_FAILED); + return ret; +} +EXPORT_SYMBOL_GPL(virtio_device_restore); +#endif + +static int virtio_init(void) +{ + if (bus_register(&virtio_bus) != 0) + panic("virtio bus registration failed"); + return 0; +} + +static void __exit virtio_exit(void) +{ + bus_unregister(&virtio_bus); + ida_destroy(&virtio_index_ida); +} +core_initcall(virtio_init); +module_exit(virtio_exit); + +MODULE_LICENSE("GPL"); diff --git a/addons/virtio/src/4.4.180/virtio_mmio.c b/addons/virtio/src/4.4.180/virtio_mmio.c new file mode 100644 index 00000000..f499d9da --- /dev/null +++ b/addons/virtio/src/4.4.180/virtio_mmio.c @@ -0,0 +1,770 @@ +/* + * Virtio memory mapped device driver + * + * Copyright 2011-2014, ARM Ltd. + * + * This module allows virtio devices to be used over a virtual, memory mapped + * platform device. + * + * The guest device(s) may be instantiated in one of three equivalent ways: + * + * 1. Static platform device in board's code, eg.: + * + * static struct platform_device v2m_virtio_device = { + * .name = "virtio-mmio", + * .id = -1, + * .num_resources = 2, + * .resource = (struct resource []) { + * { + * .start = 0x1001e000, + * .end = 0x1001e0ff, + * .flags = IORESOURCE_MEM, + * }, { + * .start = 42 + 32, + * .end = 42 + 32, + * .flags = IORESOURCE_IRQ, + * }, + * } + * }; + * + * 2. Device Tree node, eg.: + * + * virtio_block@1e000 { + * compatible = "virtio,mmio"; + * reg = <0x1e000 0x100>; + * interrupts = <42>; + * } + * + * 3. Kernel module (or command line) parameter. Can be used more than once - + * one device will be created for each one. Syntax: + * + * [virtio_mmio.]device=@:[:] + * where: + * := size (can use standard suffixes like K, M or G) + * := physical base address + * := interrupt number (as passed to request_irq()) + * := (optional) platform device id + * eg.: + * virtio_mmio.device=0x100@0x100b0000:48 \ + * virtio_mmio.device=1K@0x1001e000:74 + * + * + * + * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#define pr_fmt(fmt) "virtio-mmio: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +/* The alignment to use between consumer and producer parts of vring. + * Currently hardcoded to the page size. */ +#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE + + + +#define to_virtio_mmio_device(_plat_dev) \ + container_of(_plat_dev, struct virtio_mmio_device, vdev) + +struct virtio_mmio_device { + struct virtio_device vdev; + struct platform_device *pdev; + + void __iomem *base; + unsigned long version; + + /* a list of queues so we can dispatch IRQs */ + spinlock_t lock; + struct list_head virtqueues; +}; + +struct virtio_mmio_vq_info { + /* the actual virtqueue */ + struct virtqueue *vq; + + /* the number of entries in the queue */ + unsigned int num; + + /* the virtual address of the ring queue */ + void *queue; + + /* the list node for the virtqueues list */ + struct list_head node; +}; + + + +/* Configuration interface */ + +static u64 vm_get_features(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + u64 features; + + writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); + features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); + features <<= 32; + + writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); + features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); + + return features; +} + +static int vm_finalize_features(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + /* Make sure there is are no mixed devices */ + if (vm_dev->version == 2 && + !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { + dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n"); + return -EINVAL; + } + + writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL); + writel((u32)(vdev->features >> 32), + vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); + + writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL); + writel((u32)vdev->features, + vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); + + return 0; +} + +static void vm_get(struct virtio_device *vdev, unsigned offset, + void *buf, unsigned len) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; + u8 b; + __le16 w; + __le32 l; + + if (vm_dev->version == 1) { + u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + ptr[i] = readb(base + offset + i); + return; + } + + switch (len) { + case 1: + b = readb(base + offset); + memcpy(buf, &b, sizeof b); + break; + case 2: + w = cpu_to_le16(readw(base + offset)); + memcpy(buf, &w, sizeof w); + break; + case 4: + l = cpu_to_le32(readl(base + offset)); + memcpy(buf, &l, sizeof l); + break; + case 8: + l = cpu_to_le32(readl(base + offset)); + memcpy(buf, &l, sizeof l); + l = cpu_to_le32(ioread32(base + offset + sizeof l)); + memcpy(buf + sizeof l, &l, sizeof l); + break; + default: + BUG(); + } +} + +static void vm_set(struct virtio_device *vdev, unsigned offset, + const void *buf, unsigned len) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; + u8 b; + __le16 w; + __le32 l; + + if (vm_dev->version == 1) { + const u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + writeb(ptr[i], base + offset + i); + + return; + } + + switch (len) { + case 1: + memcpy(&b, buf, sizeof b); + writeb(b, base + offset); + break; + case 2: + memcpy(&w, buf, sizeof w); + writew(le16_to_cpu(w), base + offset); + break; + case 4: + memcpy(&l, buf, sizeof l); + writel(le32_to_cpu(l), base + offset); + break; + case 8: + memcpy(&l, buf, sizeof l); + writel(le32_to_cpu(l), base + offset); + memcpy(&l, buf + sizeof l, sizeof l); + writel(le32_to_cpu(l), base + offset + sizeof l); + break; + default: + BUG(); + } +} + +static u32 vm_generation(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + if (vm_dev->version == 1) + return 0; + else + return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION); +} + +static u8 vm_get_status(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff; +} + +static void vm_set_status(struct virtio_device *vdev, u8 status) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + /* We should never be setting status to 0. */ + BUG_ON(status == 0); + + writel(status, vm_dev->base + VIRTIO_MMIO_STATUS); +} + +static void vm_reset(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + /* 0 status means a reset. */ + writel(0, vm_dev->base + VIRTIO_MMIO_STATUS); +} + + + +/* Transport interface */ + +/* the notify function used when creating a virt queue */ +static bool vm_notify(struct virtqueue *vq) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); + + /* We write the queue's selector into the notification register to + * signal the other end */ + writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); + return true; +} + +/* Notify all virtqueues on an interrupt. */ +static irqreturn_t vm_interrupt(int irq, void *opaque) +{ + struct virtio_mmio_device *vm_dev = opaque; + struct virtio_mmio_vq_info *info; + unsigned long status; + unsigned long flags; + irqreturn_t ret = IRQ_NONE; + + /* Read and acknowledge interrupts */ + status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS); + writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK); + + if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) { + virtio_config_changed(&vm_dev->vdev); + ret = IRQ_HANDLED; + } + + if (likely(status & VIRTIO_MMIO_INT_VRING)) { + spin_lock_irqsave(&vm_dev->lock, flags); + list_for_each_entry(info, &vm_dev->virtqueues, node) + ret |= vring_interrupt(irq, info->vq); + spin_unlock_irqrestore(&vm_dev->lock, flags); + } + + return ret; +} + + + +static void vm_del_vq(struct virtqueue *vq) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); + struct virtio_mmio_vq_info *info = vq->priv; + unsigned long flags, size; + unsigned int index = vq->index; + + spin_lock_irqsave(&vm_dev->lock, flags); + list_del(&info->node); + spin_unlock_irqrestore(&vm_dev->lock, flags); + + vring_del_virtqueue(vq); + + /* Select and deactivate the queue */ + writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); + if (vm_dev->version == 1) { + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + } else { + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); + WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); + } + + size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); + free_pages_exact(info->queue, size); + kfree(info); +} + +static void vm_del_vqs(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + struct virtqueue *vq, *n; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + vm_del_vq(vq); + + free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); +} + + + +static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + struct virtio_mmio_vq_info *info; + struct virtqueue *vq; + unsigned long flags, size; + int err; + + if (!name) + return NULL; + + /* Select the queue we're interested in */ + writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); + + /* Queue shouldn't already be set up. */ + if (readl(vm_dev->base + (vm_dev->version == 1 ? + VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) { + err = -ENOENT; + goto error_available; + } + + /* Allocate and fill out our active queue description */ + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + err = -ENOMEM; + goto error_kmalloc; + } + + /* Allocate pages for the queue - start with a queue as big as + * possible (limited by maximum size allowed by device), drop down + * to a minimal size, just big enough to fit descriptor table + * and two rings (which makes it "alignment_size * 2") + */ + info->num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX); + + /* If the device reports a 0 entry queue, we won't be able to + * use it to perform I/O, and vring_new_virtqueue() can't create + * empty queues anyway, so don't bother to set up the device. + */ + if (info->num == 0) { + err = -ENOENT; + goto error_alloc_pages; + } + + while (1) { + size = PAGE_ALIGN(vring_size(info->num, + VIRTIO_MMIO_VRING_ALIGN)); + /* Did the last iter shrink the queue below minimum size? */ + if (size < VIRTIO_MMIO_VRING_ALIGN * 2) { + err = -ENOMEM; + goto error_alloc_pages; + } + + info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); + if (info->queue) + break; + + info->num /= 2; + } + + /* Create the vring */ + vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev, + true, info->queue, vm_notify, callback, name); + if (!vq) { + err = -ENOMEM; + goto error_new_virtqueue; + } + + /* Activate the queue */ + writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); + if (vm_dev->version == 1) { + writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); + writel(virt_to_phys(info->queue) >> PAGE_SHIFT, + vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + } else { + u64 addr; + + addr = virt_to_phys(info->queue); + writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW); + writel((u32)(addr >> 32), + vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH); + + addr = virt_to_phys(virtqueue_get_avail(vq)); + writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW); + writel((u32)(addr >> 32), + vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH); + + addr = virt_to_phys(virtqueue_get_used(vq)); + writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW); + writel((u32)(addr >> 32), + vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH); + + writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); + } + + vq->priv = info; + info->vq = vq; + + spin_lock_irqsave(&vm_dev->lock, flags); + list_add(&info->node, &vm_dev->virtqueues); + spin_unlock_irqrestore(&vm_dev->lock, flags); + + return vq; + +error_new_virtqueue: + if (vm_dev->version == 1) { + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + } else { + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); + WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); + } + free_pages_exact(info->queue, size); +error_alloc_pages: + kfree(info); +error_kmalloc: +error_available: + return ERR_PTR(err); +} + +static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + unsigned int irq = platform_get_irq(vm_dev->pdev, 0); + int i, err; + + err = request_irq(irq, vm_interrupt, IRQF_SHARED, + dev_name(&vdev->dev), vm_dev); + if (err) + return err; + + for (i = 0; i < nvqs; ++i) { + vqs[i] = vm_setup_vq(vdev, i, callbacks[i], names[i]); + if (IS_ERR(vqs[i])) { + vm_del_vqs(vdev); + return PTR_ERR(vqs[i]); + } + } + + return 0; +} + +static const char *vm_bus_name(struct virtio_device *vdev) +{ + struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + + return vm_dev->pdev->name; +} + +static const struct virtio_config_ops virtio_mmio_config_ops = { + .get = vm_get, + .set = vm_set, + .generation = vm_generation, + .get_status = vm_get_status, + .set_status = vm_set_status, + .reset = vm_reset, + .find_vqs = vm_find_vqs, + .del_vqs = vm_del_vqs, + .get_features = vm_get_features, + .finalize_features = vm_finalize_features, + .bus_name = vm_bus_name, +}; + + + +/* Platform device */ + +static int virtio_mmio_probe(struct platform_device *pdev) +{ + struct virtio_mmio_device *vm_dev; + struct resource *mem; + unsigned long magic; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -EINVAL; + + if (!devm_request_mem_region(&pdev->dev, mem->start, + resource_size(mem), pdev->name)) + return -EBUSY; + + vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); + if (!vm_dev) + return -ENOMEM; + + vm_dev->vdev.dev.parent = &pdev->dev; + vm_dev->vdev.config = &virtio_mmio_config_ops; + vm_dev->pdev = pdev; + INIT_LIST_HEAD(&vm_dev->virtqueues); + spin_lock_init(&vm_dev->lock); + + vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); + if (vm_dev->base == NULL) + return -EFAULT; + + /* Check magic value */ + magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); + if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) { + dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); + return -ENODEV; + } + + /* Check device version */ + vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION); + if (vm_dev->version < 1 || vm_dev->version > 2) { + dev_err(&pdev->dev, "Version %ld not supported!\n", + vm_dev->version); + return -ENXIO; + } + + vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); + if (vm_dev->vdev.id.device == 0) { + /* + * virtio-mmio device with an ID 0 is a (dummy) placeholder + * with no function. End probing now with no error reported. + */ + return -ENODEV; + } + vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); + + if (vm_dev->version == 1) + writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); + + platform_set_drvdata(pdev, vm_dev); + + return register_virtio_device(&vm_dev->vdev); +} + +static int virtio_mmio_remove(struct platform_device *pdev) +{ + struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); + + unregister_virtio_device(&vm_dev->vdev); + + return 0; +} + + + +/* Devices list parameter */ + +#if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES) + +static struct device vm_cmdline_parent = { + .init_name = "virtio-mmio-cmdline", +}; + +static int vm_cmdline_parent_registered; +static int vm_cmdline_id; + +static int vm_cmdline_set(const char *device, + const struct kernel_param *kp) +{ + int err; + struct resource resources[2] = {}; + char *str; + long long int base, size; + unsigned int irq; + int processed, consumed = 0; + struct platform_device *pdev; + + /* Consume "size" part of the command line parameter */ + size = memparse(device, &str); + + /* Get "@:[:]" chunks */ + processed = sscanf(str, "@%lli:%u%n:%d%n", + &base, &irq, &consumed, + &vm_cmdline_id, &consumed); + + /* + * sscanf() must processes at least 2 chunks; also there + * must be no extra characters after the last chunk, so + * str[consumed] must be '\0' + */ + if (processed < 2 || str[consumed]) + return -EINVAL; + + resources[0].flags = IORESOURCE_MEM; + resources[0].start = base; + resources[0].end = base + size - 1; + + resources[1].flags = IORESOURCE_IRQ; + resources[1].start = resources[1].end = irq; + + if (!vm_cmdline_parent_registered) { + err = device_register(&vm_cmdline_parent); + if (err) { + pr_err("Failed to register parent device!\n"); + return err; + } + vm_cmdline_parent_registered = 1; + } + + pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n", + vm_cmdline_id, + (unsigned long long)resources[0].start, + (unsigned long long)resources[0].end, + (int)resources[1].start); + + pdev = platform_device_register_resndata(&vm_cmdline_parent, + "virtio-mmio", vm_cmdline_id++, + resources, ARRAY_SIZE(resources), NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + + return 0; +} + +static int vm_cmdline_get_device(struct device *dev, void *data) +{ + char *buffer = data; + unsigned int len = strlen(buffer); + struct platform_device *pdev = to_platform_device(dev); + + snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n", + pdev->resource[0].end - pdev->resource[0].start + 1ULL, + (unsigned long long)pdev->resource[0].start, + (unsigned long long)pdev->resource[1].start, + pdev->id); + return 0; +} + +static int vm_cmdline_get(char *buffer, const struct kernel_param *kp) +{ + buffer[0] = '\0'; + device_for_each_child(&vm_cmdline_parent, buffer, + vm_cmdline_get_device); + return strlen(buffer) + 1; +} + +static const struct kernel_param_ops vm_cmdline_param_ops = { + .set = vm_cmdline_set, + .get = vm_cmdline_get, +}; + +device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR); + +static int vm_unregister_cmdline_device(struct device *dev, + void *data) +{ + platform_device_unregister(to_platform_device(dev)); + + return 0; +} + +static void vm_unregister_cmdline_devices(void) +{ + if (vm_cmdline_parent_registered) { + device_for_each_child(&vm_cmdline_parent, NULL, + vm_unregister_cmdline_device); + device_unregister(&vm_cmdline_parent); + vm_cmdline_parent_registered = 0; + } +} + +#else + +static void vm_unregister_cmdline_devices(void) +{ +} + +#endif + +/* Platform driver */ + +static struct of_device_id virtio_mmio_match[] = { + { .compatible = "virtio,mmio", }, + {}, +}; +MODULE_DEVICE_TABLE(of, virtio_mmio_match); + +#ifdef CONFIG_ACPI +static const struct acpi_device_id virtio_mmio_acpi_match[] = { + { "LNRO0005", }, + { } +}; +MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match); +#endif + +static struct platform_driver virtio_mmio_driver = { + .probe = virtio_mmio_probe, + .remove = virtio_mmio_remove, + .driver = { + .name = "virtio-mmio", + .of_match_table = virtio_mmio_match, + .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match), + }, +}; + +static int __init virtio_mmio_init(void) +{ + return platform_driver_register(&virtio_mmio_driver); +} + +static void __exit virtio_mmio_exit(void) +{ + platform_driver_unregister(&virtio_mmio_driver); + vm_unregister_cmdline_devices(); +} + +module_init(virtio_mmio_init); +module_exit(virtio_mmio_exit); + +MODULE_AUTHOR("Pawel Moll "); +MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices"); +MODULE_LICENSE("GPL"); diff --git a/addons/virtio/src/4.4.180/virtio_net.c b/addons/virtio/src/4.4.180/virtio_net.c new file mode 100644 index 00000000..2759d386 --- /dev/null +++ b/addons/virtio/src/4.4.180/virtio_net.c @@ -0,0 +1,2070 @@ +/* A network driver using virtio. + * + * Copyright 2007 Rusty Russell IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +//#define DEBUG +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int napi_weight = NAPI_POLL_WEIGHT; +module_param(napi_weight, int, 0444); + +static bool csum = true, gso = true; +module_param(csum, bool, 0444); +module_param(gso, bool, 0444); + +/* FIXME: MTU in config. */ +#define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) +#define GOOD_COPY_LEN 128 + +/* RX packet size EWMA. The average packet size is used to determine the packet + * buffer size when refilling RX rings. As the entire RX ring may be refilled + * at once, the weight is chosen so that the EWMA will be insensitive to short- + * term, transient changes in packet size. + */ +DECLARE_EWMA(pkt_len, 1, 64) + +/* With mergeable buffers we align buffer address and use the low bits to + * encode its true size. Buffer size is up to 1 page so we need to align to + * square root of page size to ensure we reserve enough bits to encode the true + * size. + */ +#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2) + +/* Minimum alignment for mergeable packet buffers. */ +#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \ + 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT) + +#define VIRTNET_DRIVER_VERSION "1.0.0" + +struct virtnet_stats { + struct u64_stats_sync tx_syncp; + struct u64_stats_sync rx_syncp; + u64 tx_bytes; + u64 tx_packets; + + u64 rx_bytes; + u64 rx_packets; +}; + +/* Internal representation of a send virtqueue */ +struct send_queue { + /* Virtqueue associated with this send _queue */ + struct virtqueue *vq; + + /* TX: fragments + linear part + virtio header */ + struct scatterlist sg[MAX_SKB_FRAGS + 2]; + + /* Name of the send queue: output.$index */ + char name[40]; +}; + +/* Internal representation of a receive virtqueue */ +struct receive_queue { + /* Virtqueue associated with this receive_queue */ + struct virtqueue *vq; + + struct napi_struct napi; + + /* Chain pages by the private ptr. */ + struct page *pages; + + /* Average packet length for mergeable receive buffers. */ + struct ewma_pkt_len mrg_avg_pkt_len; + + /* Page frag for packet buffer allocation. */ + struct page_frag alloc_frag; + + /* RX: fragments + linear part + virtio header */ + struct scatterlist sg[MAX_SKB_FRAGS + 2]; + + /* Name of this receive queue: input.$index */ + char name[40]; +}; + +struct virtnet_info { + struct virtio_device *vdev; + struct virtqueue *cvq; + struct net_device *dev; + struct send_queue *sq; + struct receive_queue *rq; + unsigned int status; + + /* Max # of queue pairs supported by the device */ + u16 max_queue_pairs; + + /* # of queue pairs currently used by the driver */ + u16 curr_queue_pairs; + + /* I like... big packets and I cannot lie! */ + bool big_packets; + + /* Host will merge rx buffers for big packets (shake it! shake it!) */ + bool mergeable_rx_bufs; + + /* Has control virtqueue */ + bool has_cvq; + + /* Host can handle any s/g split between our header and packet data */ + bool any_header_sg; + + /* Packet virtio header size */ + u8 hdr_len; + + /* Active statistics */ + struct virtnet_stats __percpu *stats; + + /* Work struct for refilling if we run low on memory. */ + struct delayed_work refill; + + /* Work struct for config space updates */ + struct work_struct config_work; + + /* Does the affinity hint is set for virtqueues? */ + bool affinity_hint_set; + + /* CPU hot plug notifier */ + struct notifier_block nb; + + /* Control VQ buffers: protected by the rtnl lock */ + struct virtio_net_ctrl_hdr ctrl_hdr; + virtio_net_ctrl_ack ctrl_status; + u8 ctrl_promisc; + u8 ctrl_allmulti; +}; + +struct padded_vnet_hdr { + struct virtio_net_hdr_mrg_rxbuf hdr; + /* + * hdr is in a separate sg buffer, and data sg buffer shares same page + * with this header sg. This padding makes next sg 16 byte aligned + * after the header. + */ + char padding[4]; +}; + +/* Converting between virtqueue no. and kernel tx/rx queue no. + * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq + */ +static int vq2txq(struct virtqueue *vq) +{ + return (vq->index - 1) / 2; +} + +static int txq2vq(int txq) +{ + return txq * 2 + 1; +} + +static int vq2rxq(struct virtqueue *vq) +{ + return vq->index / 2; +} + +static int rxq2vq(int rxq) +{ + return rxq * 2; +} + +static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb) +{ + return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb; +} + +/* + * private is used to chain pages for big packets, put the whole + * most recent used list in the beginning for reuse + */ +static void give_pages(struct receive_queue *rq, struct page *page) +{ + struct page *end; + + /* Find end of list, sew whole thing into vi->rq.pages. */ + for (end = page; end->private; end = (struct page *)end->private); + end->private = (unsigned long)rq->pages; + rq->pages = page; +} + +static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) +{ + struct page *p = rq->pages; + + if (p) { + rq->pages = (struct page *)p->private; + /* clear private here, it is used to chain pages */ + p->private = 0; + } else + p = alloc_page(gfp_mask); + return p; +} + +static void skb_xmit_done(struct virtqueue *vq) +{ + struct virtnet_info *vi = vq->vdev->priv; + + /* Suppress further interrupts. */ + virtqueue_disable_cb(vq); + + /* We were probably waiting for more output buffers. */ + netif_wake_subqueue(vi->dev, vq2txq(vq)); +} + +static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx) +{ + unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1); + return (truesize + 1) * MERGEABLE_BUFFER_ALIGN; +} + +static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx) +{ + return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN); + +} + +static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize) +{ + unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN; + return (unsigned long)buf | (size - 1); +} + +/* Called from bottom half context */ +static struct sk_buff *page_to_skb(struct virtnet_info *vi, + struct receive_queue *rq, + struct page *page, unsigned int offset, + unsigned int len, unsigned int truesize) +{ + struct sk_buff *skb; + struct virtio_net_hdr_mrg_rxbuf *hdr; + unsigned int copy, hdr_len, hdr_padded_len; + char *p; + + p = page_address(page) + offset; + + /* copy small packet so we can reuse these pages for small data */ + skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN); + if (unlikely(!skb)) + return NULL; + + hdr = skb_vnet_hdr(skb); + + hdr_len = vi->hdr_len; + if (vi->mergeable_rx_bufs) + hdr_padded_len = sizeof *hdr; + else + hdr_padded_len = sizeof(struct padded_vnet_hdr); + + memcpy(hdr, p, hdr_len); + + len -= hdr_len; + offset += hdr_padded_len; + p += hdr_padded_len; + + copy = len; + if (copy > skb_tailroom(skb)) + copy = skb_tailroom(skb); + memcpy(skb_put(skb, copy), p, copy); + + len -= copy; + offset += copy; + + if (vi->mergeable_rx_bufs) { + if (len) + skb_add_rx_frag(skb, 0, page, offset, len, truesize); + else + put_page(page); + return skb; + } + + /* + * Verify that we can indeed put this data into a skb. + * This is here to handle cases when the device erroneously + * tries to receive more than is possible. This is usually + * the case of a broken device. + */ + if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) { + net_dbg_ratelimited("%s: too much data\n", skb->dev->name); + dev_kfree_skb(skb); + return NULL; + } + BUG_ON(offset >= PAGE_SIZE); + while (len) { + unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, + frag_size, truesize); + len -= frag_size; + page = (struct page *)page->private; + offset = 0; + } + + if (page) + give_pages(rq, page); + + return skb; +} + +static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len) +{ + struct sk_buff * skb = buf; + + len -= vi->hdr_len; + skb_trim(skb, len); + + return skb; +} + +static struct sk_buff *receive_big(struct net_device *dev, + struct virtnet_info *vi, + struct receive_queue *rq, + void *buf, + unsigned int len) +{ + struct page *page = buf; + struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); + + if (unlikely(!skb)) + goto err; + + return skb; + +err: + dev->stats.rx_dropped++; + give_pages(rq, page); + return NULL; +} + +static struct sk_buff *receive_mergeable(struct net_device *dev, + struct virtnet_info *vi, + struct receive_queue *rq, + unsigned long ctx, + unsigned int len) +{ + void *buf = mergeable_ctx_to_buf_address(ctx); + struct virtio_net_hdr_mrg_rxbuf *hdr = buf; + u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); + struct page *page = virt_to_head_page(buf); + int offset = buf - page_address(page); + unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); + + struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len, + truesize); + struct sk_buff *curr_skb = head_skb; + + if (unlikely(!curr_skb)) + goto err_skb; + while (--num_buf) { + int num_skb_frags; + + ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); + if (unlikely(!ctx)) { + pr_debug("%s: rx error: %d buffers out of %d missing\n", + dev->name, num_buf, + virtio16_to_cpu(vi->vdev, + hdr->num_buffers)); + dev->stats.rx_length_errors++; + goto err_buf; + } + + buf = mergeable_ctx_to_buf_address(ctx); + page = virt_to_head_page(buf); + + num_skb_frags = skb_shinfo(curr_skb)->nr_frags; + if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { + struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); + + if (unlikely(!nskb)) + goto err_skb; + if (curr_skb == head_skb) + skb_shinfo(curr_skb)->frag_list = nskb; + else + curr_skb->next = nskb; + curr_skb = nskb; + head_skb->truesize += nskb->truesize; + num_skb_frags = 0; + } + truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); + if (curr_skb != head_skb) { + head_skb->data_len += len; + head_skb->len += len; + head_skb->truesize += truesize; + } + offset = buf - page_address(page); + if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { + put_page(page); + skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, + len, truesize); + } else { + skb_add_rx_frag(curr_skb, num_skb_frags, page, + offset, len, truesize); + } + } + + ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); + return head_skb; + +err_skb: + put_page(page); + while (--num_buf) { + ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len); + if (unlikely(!ctx)) { + pr_debug("%s: rx error: %d buffers missing\n", + dev->name, num_buf); + dev->stats.rx_length_errors++; + break; + } + page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx)); + put_page(page); + } +err_buf: + dev->stats.rx_dropped++; + dev_kfree_skb(head_skb); + return NULL; +} + +static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, + void *buf, unsigned int len) +{ + struct net_device *dev = vi->dev; + struct virtnet_stats *stats = this_cpu_ptr(vi->stats); + struct sk_buff *skb; + struct virtio_net_hdr_mrg_rxbuf *hdr; + + if (unlikely(len < vi->hdr_len + ETH_HLEN)) { + pr_debug("%s: short packet %i\n", dev->name, len); + dev->stats.rx_length_errors++; + if (vi->mergeable_rx_bufs) { + unsigned long ctx = (unsigned long)buf; + void *base = mergeable_ctx_to_buf_address(ctx); + put_page(virt_to_head_page(base)); + } else if (vi->big_packets) { + give_pages(rq, buf); + } else { + dev_kfree_skb(buf); + } + return; + } + + if (vi->mergeable_rx_bufs) + skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); + else if (vi->big_packets) + skb = receive_big(dev, vi, rq, buf, len); + else + skb = receive_small(vi, buf, len); + + if (unlikely(!skb)) + return; + + hdr = skb_vnet_hdr(skb); + + u64_stats_update_begin(&stats->rx_syncp); + stats->rx_bytes += skb->len; + stats->rx_packets++; + u64_stats_update_end(&stats->rx_syncp); + + if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { + pr_debug("Needs csum!\n"); + if (!skb_partial_csum_set(skb, + virtio16_to_cpu(vi->vdev, hdr->hdr.csum_start), + virtio16_to_cpu(vi->vdev, hdr->hdr.csum_offset))) + goto frame_err; + } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } + + skb->protocol = eth_type_trans(skb, dev); + pr_debug("Receiving skb proto 0x%04x len %i type %i\n", + ntohs(skb->protocol), skb->len, skb->pkt_type); + + if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { + pr_debug("GSO!\n"); + switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { + case VIRTIO_NET_HDR_GSO_TCPV4: + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + break; + case VIRTIO_NET_HDR_GSO_UDP: + skb_shinfo(skb)->gso_type = SKB_GSO_UDP; + break; + case VIRTIO_NET_HDR_GSO_TCPV6: + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + break; + default: + net_warn_ratelimited("%s: bad gso type %u.\n", + dev->name, hdr->hdr.gso_type); + goto frame_err; + } + + if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN) + skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; + + skb_shinfo(skb)->gso_size = virtio16_to_cpu(vi->vdev, + hdr->hdr.gso_size); + if (skb_shinfo(skb)->gso_size == 0) { + net_warn_ratelimited("%s: zero gso size.\n", dev->name); + goto frame_err; + } + + /* Header must be checked, and gso_segs computed. */ + skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; + skb_shinfo(skb)->gso_segs = 0; + } + + skb_mark_napi_id(skb, &rq->napi); + + napi_gro_receive(&rq->napi, skb); + return; + +frame_err: + dev->stats.rx_frame_errors++; + dev_kfree_skb(skb); +} + +static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, + gfp_t gfp) +{ + struct sk_buff *skb; + struct virtio_net_hdr_mrg_rxbuf *hdr; + int err; + + skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp); + if (unlikely(!skb)) + return -ENOMEM; + + skb_put(skb, GOOD_PACKET_LEN); + + hdr = skb_vnet_hdr(skb); + sg_init_table(rq->sg, 2); + sg_set_buf(rq->sg, hdr, vi->hdr_len); + + err = skb_to_sgvec(skb, rq->sg + 1, 0, skb->len); + if (unlikely(err < 0)) { + dev_kfree_skb(skb); + return err; + } + + err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp); + if (err < 0) + dev_kfree_skb(skb); + + return err; +} + +static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq, + gfp_t gfp) +{ + struct page *first, *list = NULL; + char *p; + int i, err, offset; + + sg_init_table(rq->sg, MAX_SKB_FRAGS + 2); + + /* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */ + for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { + first = get_a_page(rq, gfp); + if (!first) { + if (list) + give_pages(rq, list); + return -ENOMEM; + } + sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE); + + /* chain new page in list head to match sg */ + first->private = (unsigned long)list; + list = first; + } + + first = get_a_page(rq, gfp); + if (!first) { + give_pages(rq, list); + return -ENOMEM; + } + p = page_address(first); + + /* rq->sg[0], rq->sg[1] share the same page */ + /* a separated rq->sg[0] for header - required in case !any_header_sg */ + sg_set_buf(&rq->sg[0], p, vi->hdr_len); + + /* rq->sg[1] for data packet, from offset */ + offset = sizeof(struct padded_vnet_hdr); + sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset); + + /* chain first in list head */ + first->private = (unsigned long)list; + err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2, + first, gfp); + if (err < 0) + give_pages(rq, first); + + return err; +} + +static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len) +{ + const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); + unsigned int len; + + len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), + GOOD_PACKET_LEN, PAGE_SIZE - hdr_len); + return ALIGN(len, MERGEABLE_BUFFER_ALIGN); +} + +static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) +{ + struct page_frag *alloc_frag = &rq->alloc_frag; + char *buf; + unsigned long ctx; + int err; + unsigned int len, hole; + + len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len); + if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp))) + return -ENOMEM; + + buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset; + ctx = mergeable_buf_to_ctx(buf, len); + get_page(alloc_frag->page); + alloc_frag->offset += len; + hole = alloc_frag->size - alloc_frag->offset; + if (hole < len) { + /* To avoid internal fragmentation, if there is very likely not + * enough space for another buffer, add the remaining space to + * the current buffer. This extra space is not included in + * the truesize stored in ctx. + */ + len += hole; + alloc_frag->offset += hole; + } + + sg_init_one(rq->sg, buf, len); + err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp); + if (err < 0) + put_page(virt_to_head_page(buf)); + + return err; +} + +/* + * Returns false if we couldn't fill entirely (OOM). + * + * Normally run in the receive path, but can also be run from ndo_open + * before we're receiving packets, or from refill_work which is + * careful to disable receiving (using napi_disable). + */ +static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, + gfp_t gfp) +{ + int err; + bool oom; + + gfp |= __GFP_COLD; + do { + if (vi->mergeable_rx_bufs) + err = add_recvbuf_mergeable(rq, gfp); + else if (vi->big_packets) + err = add_recvbuf_big(vi, rq, gfp); + else + err = add_recvbuf_small(vi, rq, gfp); + + oom = err == -ENOMEM; + if (err) + break; + } while (rq->vq->num_free); + virtqueue_kick(rq->vq); + return !oom; +} + +static void skb_recv_done(struct virtqueue *rvq) +{ + struct virtnet_info *vi = rvq->vdev->priv; + struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; + + /* Schedule NAPI, Suppress further interrupts if successful. */ + if (napi_schedule_prep(&rq->napi)) { + virtqueue_disable_cb(rvq); + __napi_schedule(&rq->napi); + } +} + +static void virtnet_napi_enable(struct receive_queue *rq) +{ + napi_enable(&rq->napi); + + /* If all buffers were filled by other side before we napi_enabled, we + * won't get another interrupt, so process any outstanding packets + * now. virtnet_poll wants re-enable the queue, so we disable here. + * We synchronize against interrupts via NAPI_STATE_SCHED */ + if (napi_schedule_prep(&rq->napi)) { + virtqueue_disable_cb(rq->vq); + local_bh_disable(); + __napi_schedule(&rq->napi); + local_bh_enable(); + } +} + +static void refill_work(struct work_struct *work) +{ + struct virtnet_info *vi = + container_of(work, struct virtnet_info, refill.work); + bool still_empty; + int i; + + for (i = 0; i < vi->curr_queue_pairs; i++) { + struct receive_queue *rq = &vi->rq[i]; + + napi_disable(&rq->napi); + still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); + virtnet_napi_enable(rq); + + /* In theory, this can happen: if we don't get any buffers in + * we will *never* try to fill again. + */ + if (still_empty) + schedule_delayed_work(&vi->refill, HZ/2); + } +} + +static int virtnet_receive(struct receive_queue *rq, int budget) +{ + struct virtnet_info *vi = rq->vq->vdev->priv; + unsigned int len, received = 0; + void *buf; + + while (received < budget && + (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { + receive_buf(vi, rq, buf, len); + received++; + } + + if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { + if (!try_fill_recv(vi, rq, GFP_ATOMIC)) + schedule_delayed_work(&vi->refill, 0); + } + + return received; +} + +static int virtnet_poll(struct napi_struct *napi, int budget) +{ + struct receive_queue *rq = + container_of(napi, struct receive_queue, napi); + unsigned int r, received; + + received = virtnet_receive(rq, budget); + + /* Out of packets? */ + if (received < budget) { + r = virtqueue_enable_cb_prepare(rq->vq); + napi_complete_done(napi, received); + if (unlikely(virtqueue_poll(rq->vq, r)) && + napi_schedule_prep(napi)) { + virtqueue_disable_cb(rq->vq); + __napi_schedule(napi); + } + } + + return received; +} + +#ifdef CONFIG_NET_RX_BUSY_POLL +/* must be called with local_bh_disable()d */ +static int virtnet_busy_poll(struct napi_struct *napi) +{ + struct receive_queue *rq = + container_of(napi, struct receive_queue, napi); + struct virtnet_info *vi = rq->vq->vdev->priv; + int r, received = 0, budget = 4; + + if (!(vi->status & VIRTIO_NET_S_LINK_UP)) + return LL_FLUSH_FAILED; + + if (!napi_schedule_prep(napi)) + return LL_FLUSH_BUSY; + + virtqueue_disable_cb(rq->vq); + +again: + received += virtnet_receive(rq, budget); + + r = virtqueue_enable_cb_prepare(rq->vq); + clear_bit(NAPI_STATE_SCHED, &napi->state); + if (unlikely(virtqueue_poll(rq->vq, r)) && + napi_schedule_prep(napi)) { + virtqueue_disable_cb(rq->vq); + if (received < budget) { + budget -= received; + goto again; + } else { + __napi_schedule(napi); + } + } + + return received; +} +#endif /* CONFIG_NET_RX_BUSY_POLL */ + +static int virtnet_open(struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + int i; + + for (i = 0; i < vi->max_queue_pairs; i++) { + if (i < vi->curr_queue_pairs) + /* Make sure we have some buffers: if oom use wq. */ + if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) + schedule_delayed_work(&vi->refill, 0); + virtnet_napi_enable(&vi->rq[i]); + } + + return 0; +} + +static void free_old_xmit_skbs(struct send_queue *sq) +{ + struct sk_buff *skb; + unsigned int len; + struct virtnet_info *vi = sq->vq->vdev->priv; + struct virtnet_stats *stats = this_cpu_ptr(vi->stats); + + while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { + pr_debug("Sent skb %p\n", skb); + + u64_stats_update_begin(&stats->tx_syncp); + stats->tx_bytes += skb->len; + stats->tx_packets++; + u64_stats_update_end(&stats->tx_syncp); + + dev_kfree_skb_any(skb); + } +} + +static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) +{ + struct virtio_net_hdr_mrg_rxbuf *hdr; + const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; + struct virtnet_info *vi = sq->vq->vdev->priv; + int num_sg; + unsigned hdr_len = vi->hdr_len; + bool can_push; + + pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); + + can_push = vi->any_header_sg && + !((unsigned long)skb->data & (__alignof__(*hdr) - 1)) && + !skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len; + /* Even if we can, don't push here yet as this would skew + * csum_start offset below. */ + if (can_push) + hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len); + else + hdr = skb_vnet_hdr(skb); + + if (skb->ip_summed == CHECKSUM_PARTIAL) { + hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; + hdr->hdr.csum_start = cpu_to_virtio16(vi->vdev, + skb_checksum_start_offset(skb)); + hdr->hdr.csum_offset = cpu_to_virtio16(vi->vdev, + skb->csum_offset); + } else { + hdr->hdr.flags = 0; + hdr->hdr.csum_offset = hdr->hdr.csum_start = 0; + } + + if (skb_is_gso(skb)) { + hdr->hdr.hdr_len = cpu_to_virtio16(vi->vdev, skb_headlen(skb)); + hdr->hdr.gso_size = cpu_to_virtio16(vi->vdev, + skb_shinfo(skb)->gso_size); + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP; + else + BUG(); + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) + hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN; + } else { + hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE; + hdr->hdr.gso_size = hdr->hdr.hdr_len = 0; + } + + if (vi->mergeable_rx_bufs) + hdr->num_buffers = 0; + + sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2)); + if (can_push) { + __skb_push(skb, hdr_len); + num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len); + if (unlikely(num_sg < 0)) + return num_sg; + /* Pull header back to avoid skew in tx bytes calculations. */ + __skb_pull(skb, hdr_len); + } else { + sg_set_buf(sq->sg, hdr, hdr_len); + num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len); + if (unlikely(num_sg < 0)) + return num_sg; + num_sg++; + } + return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); +} + +static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + int qnum = skb_get_queue_mapping(skb); + struct send_queue *sq = &vi->sq[qnum]; + int err; + struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); + bool kick = !skb->xmit_more; + + /* Free up any pending old buffers before queueing new ones. */ + free_old_xmit_skbs(sq); + + /* timestamp packet in software */ + skb_tx_timestamp(skb); + + /* Try to transmit */ + err = xmit_skb(sq, skb); + + /* This should not happen! */ + if (unlikely(err)) { + dev->stats.tx_fifo_errors++; + if (net_ratelimit()) + dev_warn(&dev->dev, + "Unexpected TXQ (%d) queue failure: %d\n", qnum, err); + dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + + /* Don't wait up for transmitted skbs to be freed. */ + skb_orphan(skb); + nf_reset(skb); + + /* If running out of space, stop queue to avoid getting packets that we + * are then unable to transmit. + * An alternative would be to force queuing layer to requeue the skb by + * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be + * returned in a normal path of operation: it means that driver is not + * maintaining the TX queue stop/start state properly, and causes + * the stack to do a non-trivial amount of useless work. + * Since most packets only take 1 or 2 ring slots, stopping the queue + * early means 16 slots are typically wasted. + */ + if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { + netif_stop_subqueue(dev, qnum); + if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { + /* More just got used, free them then recheck. */ + free_old_xmit_skbs(sq); + if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { + netif_start_subqueue(dev, qnum); + virtqueue_disable_cb(sq->vq); + } + } + } + + if (kick || netif_xmit_stopped(txq)) + virtqueue_kick(sq->vq); + + return NETDEV_TX_OK; +} + +/* + * Send command via the control virtqueue and check status. Commands + * supported by the hypervisor, as indicated by feature bits, should + * never fail unless improperly formatted. + */ +static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, + struct scatterlist *out) +{ + struct scatterlist *sgs[4], hdr, stat; + unsigned out_num = 0, tmp; + + /* Caller should know better */ + BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); + + vi->ctrl_status = ~0; + vi->ctrl_hdr.class = class; + vi->ctrl_hdr.cmd = cmd; + /* Add header */ + sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); + sgs[out_num++] = &hdr; + + if (out) + sgs[out_num++] = out; + + /* Add return status. */ + sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); + sgs[out_num] = &stat; + + BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); + virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); + + if (unlikely(!virtqueue_kick(vi->cvq))) + return vi->ctrl_status == VIRTIO_NET_OK; + + /* Spin for a response, the kick causes an ioport write, trapping + * into the hypervisor, so the request should be handled immediately. + */ + while (!virtqueue_get_buf(vi->cvq, &tmp) && + !virtqueue_is_broken(vi->cvq)) + cpu_relax(); + + return vi->ctrl_status == VIRTIO_NET_OK; +} + +static int virtnet_set_mac_address(struct net_device *dev, void *p) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct virtio_device *vdev = vi->vdev; + int ret; + struct sockaddr *addr = p; + struct scatterlist sg; + + ret = eth_prepare_mac_addr_change(dev, p); + if (ret) + return ret; + + if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) { + sg_init_one(&sg, addr->sa_data, dev->addr_len); + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, + VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) { + dev_warn(&vdev->dev, + "Failed to set mac address by vq command.\n"); + return -EINVAL; + } + } else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) && + !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) { + unsigned int i; + + /* Naturally, this has an atomicity problem. */ + for (i = 0; i < dev->addr_len; i++) + virtio_cwrite8(vdev, + offsetof(struct virtio_net_config, mac) + + i, addr->sa_data[i]); + } + + eth_commit_mac_addr_change(dev, p); + + return 0; +} + +static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, + struct rtnl_link_stats64 *tot) +{ + struct virtnet_info *vi = netdev_priv(dev); + int cpu; + unsigned int start; + + for_each_possible_cpu(cpu) { + struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu); + u64 tpackets, tbytes, rpackets, rbytes; + + do { + start = u64_stats_fetch_begin_irq(&stats->tx_syncp); + tpackets = stats->tx_packets; + tbytes = stats->tx_bytes; + } while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&stats->rx_syncp); + rpackets = stats->rx_packets; + rbytes = stats->rx_bytes; + } while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start)); + + tot->rx_packets += rpackets; + tot->tx_packets += tpackets; + tot->rx_bytes += rbytes; + tot->tx_bytes += tbytes; + } + + tot->tx_dropped = dev->stats.tx_dropped; + tot->tx_fifo_errors = dev->stats.tx_fifo_errors; + tot->rx_dropped = dev->stats.rx_dropped; + tot->rx_length_errors = dev->stats.rx_length_errors; + tot->rx_frame_errors = dev->stats.rx_frame_errors; + + return tot; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void virtnet_netpoll(struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + int i; + + for (i = 0; i < vi->curr_queue_pairs; i++) + napi_schedule(&vi->rq[i].napi); +} +#endif + +static void virtnet_ack_link_announce(struct virtnet_info *vi) +{ + rtnl_lock(); + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE, + VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL)) + dev_warn(&vi->dev->dev, "Failed to ack link announce.\n"); + rtnl_unlock(); +} + +static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) +{ + struct scatterlist sg; + struct virtio_net_ctrl_mq s; + struct net_device *dev = vi->dev; + + if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) + return 0; + + s.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); + sg_init_one(&sg, &s, sizeof(s)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, + VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { + dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n", + queue_pairs); + return -EINVAL; + } else { + vi->curr_queue_pairs = queue_pairs; + /* virtnet_open() will refill when device is going to up. */ + if (dev->flags & IFF_UP) + schedule_delayed_work(&vi->refill, 0); + } + + return 0; +} + +static int virtnet_close(struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + int i; + + /* Make sure refill_work doesn't re-enable napi! */ + cancel_delayed_work_sync(&vi->refill); + + for (i = 0; i < vi->max_queue_pairs; i++) + napi_disable(&vi->rq[i].napi); + + return 0; +} + +static void virtnet_set_rx_mode(struct net_device *dev) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct scatterlist sg[2]; + struct virtio_net_ctrl_mac *mac_data; + struct netdev_hw_addr *ha; + int uc_count; + int mc_count; + void *buf; + int i; + + /* We can't dynamically set ndo_set_rx_mode, so return gracefully */ + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) + return; + + vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); + vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); + + sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, + VIRTIO_NET_CTRL_RX_PROMISC, sg)) + dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", + vi->ctrl_promisc ? "en" : "dis"); + + sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, + VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) + dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", + vi->ctrl_allmulti ? "en" : "dis"); + + uc_count = netdev_uc_count(dev); + mc_count = netdev_mc_count(dev); + /* MAC filter - use one buffer for both lists */ + buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) + + (2 * sizeof(mac_data->entries)), GFP_ATOMIC); + mac_data = buf; + if (!buf) + return; + + sg_init_table(sg, 2); + + /* Store the unicast list and count in the front of the buffer */ + mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count); + i = 0; + netdev_for_each_uc_addr(ha, dev) + memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); + + sg_set_buf(&sg[0], mac_data, + sizeof(mac_data->entries) + (uc_count * ETH_ALEN)); + + /* multicast list and count fill the end */ + mac_data = (void *)&mac_data->macs[uc_count][0]; + + mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count); + i = 0; + netdev_for_each_mc_addr(ha, dev) + memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN); + + sg_set_buf(&sg[1], mac_data, + sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC, + VIRTIO_NET_CTRL_MAC_TABLE_SET, sg)) + dev_warn(&dev->dev, "Failed to set MAC filter table.\n"); + + kfree(buf); +} + +static int virtnet_vlan_rx_add_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct scatterlist sg; + + sg_init_one(&sg, &vid, sizeof(vid)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, + VIRTIO_NET_CTRL_VLAN_ADD, &sg)) + dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid); + return 0; +} + +static int virtnet_vlan_rx_kill_vid(struct net_device *dev, + __be16 proto, u16 vid) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct scatterlist sg; + + sg_init_one(&sg, &vid, sizeof(vid)); + + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, + VIRTIO_NET_CTRL_VLAN_DEL, &sg)) + dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid); + return 0; +} + +static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu) +{ + int i; + + if (vi->affinity_hint_set) { + for (i = 0; i < vi->max_queue_pairs; i++) { + virtqueue_set_affinity(vi->rq[i].vq, -1); + virtqueue_set_affinity(vi->sq[i].vq, -1); + } + + vi->affinity_hint_set = false; + } +} + +static void virtnet_set_affinity(struct virtnet_info *vi) +{ + int i; + int cpu; + + /* In multiqueue mode, when the number of cpu is equal to the number of + * queue pairs, we let the queue pairs to be private to one cpu by + * setting the affinity hint to eliminate the contention. + */ + if (vi->curr_queue_pairs == 1 || + vi->max_queue_pairs != num_online_cpus()) { + virtnet_clean_affinity(vi, -1); + return; + } + + i = 0; + for_each_online_cpu(cpu) { + virtqueue_set_affinity(vi->rq[i].vq, cpu); + virtqueue_set_affinity(vi->sq[i].vq, cpu); + netif_set_xps_queue(vi->dev, cpumask_of(cpu), i); + i++; + } + + vi->affinity_hint_set = true; +} + +static int virtnet_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); + + switch(action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: + case CPU_DOWN_FAILED: + case CPU_DEAD: + virtnet_set_affinity(vi); + break; + case CPU_DOWN_PREPARE: + virtnet_clean_affinity(vi, (long)hcpu); + break; + default: + break; + } + + return NOTIFY_OK; +} + +static void virtnet_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ring) +{ + struct virtnet_info *vi = netdev_priv(dev); + + ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq); + ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq); + ring->rx_pending = ring->rx_max_pending; + ring->tx_pending = ring->tx_max_pending; +} + + +static void virtnet_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct virtio_device *vdev = vi->vdev; + + strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver)); + strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version)); + strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info)); + +} + +/* TODO: Eliminate OOO packets during switching */ +static int virtnet_set_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct virtnet_info *vi = netdev_priv(dev); + u16 queue_pairs = channels->combined_count; + int err; + + /* We don't support separate rx/tx channels. + * We don't allow setting 'other' channels. + */ + if (channels->rx_count || channels->tx_count || channels->other_count) + return -EINVAL; + + if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0) + return -EINVAL; + + get_online_cpus(); + err = virtnet_set_queues(vi, queue_pairs); + if (!err) { + netif_set_real_num_tx_queues(dev, queue_pairs); + netif_set_real_num_rx_queues(dev, queue_pairs); + + virtnet_set_affinity(vi); + } + put_online_cpus(); + + return err; +} + +static void virtnet_get_channels(struct net_device *dev, + struct ethtool_channels *channels) +{ + struct virtnet_info *vi = netdev_priv(dev); + + channels->combined_count = vi->curr_queue_pairs; + channels->max_combined = vi->max_queue_pairs; + channels->max_other = 0; + channels->rx_count = 0; + channels->tx_count = 0; + channels->other_count = 0; +} + +static const struct ethtool_ops virtnet_ethtool_ops = { + .get_drvinfo = virtnet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = virtnet_get_ringparam, + .set_channels = virtnet_set_channels, + .get_channels = virtnet_get_channels, + .get_ts_info = ethtool_op_get_ts_info, +}; + +#define MIN_MTU 68 +#define MAX_MTU 65535 + +static int virtnet_change_mtu(struct net_device *dev, int new_mtu) +{ + if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) + return -EINVAL; + dev->mtu = new_mtu; + return 0; +} + +static const struct net_device_ops virtnet_netdev = { + .ndo_open = virtnet_open, + .ndo_stop = virtnet_close, + .ndo_start_xmit = start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = virtnet_set_mac_address, + .ndo_set_rx_mode = virtnet_set_rx_mode, + .ndo_change_mtu = virtnet_change_mtu, + .ndo_get_stats64 = virtnet_stats, + .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = virtnet_netpoll, +#endif +#ifdef CONFIG_NET_RX_BUSY_POLL + .ndo_busy_poll = virtnet_busy_poll, +#endif + .ndo_features_check = passthru_features_check, +}; + +static void virtnet_config_changed_work(struct work_struct *work) +{ + struct virtnet_info *vi = + container_of(work, struct virtnet_info, config_work); + u16 v; + + if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS, + struct virtio_net_config, status, &v) < 0) + return; + + if (v & VIRTIO_NET_S_ANNOUNCE) { + netdev_notify_peers(vi->dev); + virtnet_ack_link_announce(vi); + } + + /* Ignore unknown (future) status bits */ + v &= VIRTIO_NET_S_LINK_UP; + + if (vi->status == v) + return; + + vi->status = v; + + if (vi->status & VIRTIO_NET_S_LINK_UP) { + netif_carrier_on(vi->dev); + netif_tx_wake_all_queues(vi->dev); + } else { + netif_carrier_off(vi->dev); + netif_tx_stop_all_queues(vi->dev); + } +} + +static void virtnet_config_changed(struct virtio_device *vdev) +{ + struct virtnet_info *vi = vdev->priv; + + schedule_work(&vi->config_work); +} + +static void virtnet_free_queues(struct virtnet_info *vi) +{ + int i; + + for (i = 0; i < vi->max_queue_pairs; i++) { + napi_hash_del(&vi->rq[i].napi); + netif_napi_del(&vi->rq[i].napi); + } + + /* We called napi_hash_del() before netif_napi_del(), + * we need to respect an RCU grace period before freeing vi->rq + */ + synchronize_net(); + + kfree(vi->rq); + kfree(vi->sq); +} + +static void free_receive_bufs(struct virtnet_info *vi) +{ + int i; + + for (i = 0; i < vi->max_queue_pairs; i++) { + while (vi->rq[i].pages) + __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0); + } +} + +static void free_receive_page_frags(struct virtnet_info *vi) +{ + int i; + for (i = 0; i < vi->max_queue_pairs; i++) + if (vi->rq[i].alloc_frag.page) + put_page(vi->rq[i].alloc_frag.page); +} + +static void free_unused_bufs(struct virtnet_info *vi) +{ + void *buf; + int i; + + for (i = 0; i < vi->max_queue_pairs; i++) { + struct virtqueue *vq = vi->sq[i].vq; + while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) + dev_kfree_skb(buf); + } + + for (i = 0; i < vi->max_queue_pairs; i++) { + struct virtqueue *vq = vi->rq[i].vq; + + while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { + if (vi->mergeable_rx_bufs) { + unsigned long ctx = (unsigned long)buf; + void *base = mergeable_ctx_to_buf_address(ctx); + put_page(virt_to_head_page(base)); + } else if (vi->big_packets) { + give_pages(&vi->rq[i], buf); + } else { + dev_kfree_skb(buf); + } + } + } +} + +static void virtnet_del_vqs(struct virtnet_info *vi) +{ + struct virtio_device *vdev = vi->vdev; + + virtnet_clean_affinity(vi, -1); + + vdev->config->del_vqs(vdev); + + virtnet_free_queues(vi); +} + +static int virtnet_find_vqs(struct virtnet_info *vi) +{ + vq_callback_t **callbacks; + struct virtqueue **vqs; + int ret = -ENOMEM; + int i, total_vqs; + const char **names; + + /* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by + * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by + * possible control vq. + */ + total_vqs = vi->max_queue_pairs * 2 + + virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ); + + /* Allocate space for find_vqs parameters */ + vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL); + if (!vqs) + goto err_vq; + callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL); + if (!callbacks) + goto err_callback; + names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL); + if (!names) + goto err_names; + + /* Parameters for control virtqueue, if any */ + if (vi->has_cvq) { + callbacks[total_vqs - 1] = NULL; + names[total_vqs - 1] = "control"; + } + + /* Allocate/initialize parameters for send/receive virtqueues */ + for (i = 0; i < vi->max_queue_pairs; i++) { + callbacks[rxq2vq(i)] = skb_recv_done; + callbacks[txq2vq(i)] = skb_xmit_done; + sprintf(vi->rq[i].name, "input.%d", i); + sprintf(vi->sq[i].name, "output.%d", i); + names[rxq2vq(i)] = vi->rq[i].name; + names[txq2vq(i)] = vi->sq[i].name; + } + + ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks, + names); + if (ret) + goto err_find; + + if (vi->has_cvq) { + vi->cvq = vqs[total_vqs - 1]; + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN)) + vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + } + + for (i = 0; i < vi->max_queue_pairs; i++) { + vi->rq[i].vq = vqs[rxq2vq(i)]; + vi->sq[i].vq = vqs[txq2vq(i)]; + } + + kfree(names); + kfree(callbacks); + kfree(vqs); + + return 0; + +err_find: + kfree(names); +err_names: + kfree(callbacks); +err_callback: + kfree(vqs); +err_vq: + return ret; +} + +static int virtnet_alloc_queues(struct virtnet_info *vi) +{ + int i; + + vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); + if (!vi->sq) + goto err_sq; + vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL); + if (!vi->rq) + goto err_rq; + + INIT_DELAYED_WORK(&vi->refill, refill_work); + for (i = 0; i < vi->max_queue_pairs; i++) { + vi->rq[i].pages = NULL; + netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll, + napi_weight); + napi_hash_add(&vi->rq[i].napi); + + sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); + ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); + sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); + } + + return 0; + +err_rq: + kfree(vi->sq); +err_sq: + return -ENOMEM; +} + +static int init_vqs(struct virtnet_info *vi) +{ + int ret; + + /* Allocate send & receive queues */ + ret = virtnet_alloc_queues(vi); + if (ret) + goto err; + + ret = virtnet_find_vqs(vi); + if (ret) + goto err_free; + + get_online_cpus(); + virtnet_set_affinity(vi); + put_online_cpus(); + + return 0; + +err_free: + virtnet_free_queues(vi); +err: + return ret; +} + +#ifdef CONFIG_SYSFS +static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue, + struct rx_queue_attribute *attribute, char *buf) +{ + struct virtnet_info *vi = netdev_priv(queue->dev); + unsigned int queue_index = get_netdev_rx_queue_index(queue); + struct ewma_pkt_len *avg; + + BUG_ON(queue_index >= vi->max_queue_pairs); + avg = &vi->rq[queue_index].mrg_avg_pkt_len; + return sprintf(buf, "%u\n", get_mergeable_buf_len(avg)); +} + +static struct rx_queue_attribute mergeable_rx_buffer_size_attribute = + __ATTR_RO(mergeable_rx_buffer_size); + +static struct attribute *virtio_net_mrg_rx_attrs[] = { + &mergeable_rx_buffer_size_attribute.attr, + NULL +}; + +static const struct attribute_group virtio_net_mrg_rx_group = { + .name = "virtio_net", + .attrs = virtio_net_mrg_rx_attrs +}; +#endif + +static bool virtnet_fail_on_feature(struct virtio_device *vdev, + unsigned int fbit, + const char *fname, const char *dname) +{ + if (!virtio_has_feature(vdev, fbit)) + return false; + + dev_err(&vdev->dev, "device advertises feature %s but not %s", + fname, dname); + + return true; +} + +#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \ + virtnet_fail_on_feature(vdev, fbit, #fbit, dbit) + +static bool virtnet_validate_features(struct virtio_device *vdev) +{ + if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) && + (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE, + "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") || + VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR, + "VIRTIO_NET_F_CTRL_VQ"))) { + return false; + } + + return true; +} + +static int virtnet_probe(struct virtio_device *vdev) +{ + int i, err; + struct net_device *dev; + struct virtnet_info *vi; + u16 max_queue_pairs; + + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + + if (!virtnet_validate_features(vdev)) + return -EINVAL; + + /* Find if host supports multiqueue virtio_net device */ + err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, + struct virtio_net_config, + max_virtqueue_pairs, &max_queue_pairs); + + /* We need at least 2 queue's */ + if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN || + max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX || + !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) + max_queue_pairs = 1; + + /* Allocate ourselves a network device with room for our info */ + dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs); + if (!dev) + return -ENOMEM; + + /* Set up network device as normal. */ + dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; + dev->netdev_ops = &virtnet_netdev; + dev->features = NETIF_F_HIGHDMA; + + dev->ethtool_ops = &virtnet_ethtool_ops; + SET_NETDEV_DEV(dev, &vdev->dev); + + /* Do we support "hardware" checksums? */ + if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { + /* This opens up the world of extra features. */ + dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG; + if (csum) + dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; + + if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { + dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO + | NETIF_F_TSO_ECN | NETIF_F_TSO6; + } + /* Individual feature bits: what can host handle? */ + if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) + dev->hw_features |= NETIF_F_TSO; + if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) + dev->hw_features |= NETIF_F_TSO6; + if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) + dev->hw_features |= NETIF_F_TSO_ECN; + if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) + dev->hw_features |= NETIF_F_UFO; + + dev->features |= NETIF_F_GSO_ROBUST; + + if (gso) + dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); + /* (!csum && gso) case will be fixed by register_netdev() */ + } + if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) + dev->features |= NETIF_F_RXCSUM; + + dev->vlan_features = dev->features; + + /* Configuration may specify what MAC to use. Otherwise random. */ + if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) + virtio_cread_bytes(vdev, + offsetof(struct virtio_net_config, mac), + dev->dev_addr, dev->addr_len); + else + eth_hw_addr_random(dev); + + /* Set up our device-specific information */ + vi = netdev_priv(dev); + vi->dev = dev; + vi->vdev = vdev; + vdev->priv = vi; + vi->stats = alloc_percpu(struct virtnet_stats); + err = -ENOMEM; + if (vi->stats == NULL) + goto free; + + for_each_possible_cpu(i) { + struct virtnet_stats *virtnet_stats; + virtnet_stats = per_cpu_ptr(vi->stats, i); + u64_stats_init(&virtnet_stats->tx_syncp); + u64_stats_init(&virtnet_stats->rx_syncp); + } + + INIT_WORK(&vi->config_work, virtnet_config_changed_work); + + /* If we can receive ANY GSO packets, we must allocate large ones. */ + if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO)) + vi->big_packets = true; + + if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) + vi->mergeable_rx_bufs = true; + + if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) || + virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) + vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); + else + vi->hdr_len = sizeof(struct virtio_net_hdr); + + if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || + virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) + vi->any_header_sg = true; + + if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) + vi->has_cvq = true; + + if (vi->any_header_sg) + dev->needed_headroom = vi->hdr_len; + + /* Use single tx/rx queue pair as default */ + vi->curr_queue_pairs = 1; + vi->max_queue_pairs = max_queue_pairs; + + /* Allocate/initialize the rx/tx queues, and invoke find_vqs */ + err = init_vqs(vi); + if (err) + goto free_stats; + +#ifdef CONFIG_SYSFS + if (vi->mergeable_rx_bufs) + dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group; +#endif + netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs); + netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs); + + err = register_netdev(dev); + if (err) { + pr_debug("virtio_net: registering device failed\n"); + goto free_vqs; + } + + virtio_device_ready(vdev); + + /* Last of all, set up some receive buffers. */ + for (i = 0; i < vi->curr_queue_pairs; i++) { + try_fill_recv(vi, &vi->rq[i], GFP_KERNEL); + + /* If we didn't even get one input buffer, we're useless. */ + if (vi->rq[i].vq->num_free == + virtqueue_get_vring_size(vi->rq[i].vq)) { + free_unused_bufs(vi); + err = -ENOMEM; + goto free_recv_bufs; + } + } + + vi->nb.notifier_call = &virtnet_cpu_callback; + err = register_hotcpu_notifier(&vi->nb); + if (err) { + pr_debug("virtio_net: registering cpu notifier failed\n"); + goto free_recv_bufs; + } + + /* Assume link up if device can't report link status, + otherwise get link status from config. */ + netif_carrier_off(dev); + if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { + schedule_work(&vi->config_work); + } else { + vi->status = VIRTIO_NET_S_LINK_UP; + netif_carrier_on(dev); + } + + pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", + dev->name, max_queue_pairs); + + return 0; + +free_recv_bufs: + vi->vdev->config->reset(vdev); + + free_receive_bufs(vi); + unregister_netdev(dev); +free_vqs: + cancel_delayed_work_sync(&vi->refill); + free_receive_page_frags(vi); + virtnet_del_vqs(vi); +free_stats: + free_percpu(vi->stats); +free: + free_netdev(dev); + return err; +} + +static void remove_vq_common(struct virtnet_info *vi) +{ + vi->vdev->config->reset(vi->vdev); + + /* Free unused buffers in both send and recv, if any. */ + free_unused_bufs(vi); + + free_receive_bufs(vi); + + free_receive_page_frags(vi); + + virtnet_del_vqs(vi); +} + +static void virtnet_remove(struct virtio_device *vdev) +{ + struct virtnet_info *vi = vdev->priv; + + unregister_hotcpu_notifier(&vi->nb); + + /* Make sure no work handler is accessing the device. */ + flush_work(&vi->config_work); + + unregister_netdev(vi->dev); + + remove_vq_common(vi); + + free_percpu(vi->stats); + free_netdev(vi->dev); +} + +#ifdef CONFIG_PM_SLEEP +static int virtnet_freeze(struct virtio_device *vdev) +{ + struct virtnet_info *vi = vdev->priv; + int i; + + unregister_hotcpu_notifier(&vi->nb); + + /* Make sure no work handler is accessing the device */ + flush_work(&vi->config_work); + + netif_device_detach(vi->dev); + cancel_delayed_work_sync(&vi->refill); + + if (netif_running(vi->dev)) { + for (i = 0; i < vi->max_queue_pairs; i++) + napi_disable(&vi->rq[i].napi); + } + + remove_vq_common(vi); + + return 0; +} + +static int virtnet_restore(struct virtio_device *vdev) +{ + struct virtnet_info *vi = vdev->priv; + int err, i; + + err = init_vqs(vi); + if (err) + return err; + + virtio_device_ready(vdev); + + if (netif_running(vi->dev)) { + for (i = 0; i < vi->curr_queue_pairs; i++) + if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) + schedule_delayed_work(&vi->refill, 0); + + for (i = 0; i < vi->max_queue_pairs; i++) + virtnet_napi_enable(&vi->rq[i]); + } + + netif_device_attach(vi->dev); + + rtnl_lock(); + virtnet_set_queues(vi, vi->curr_queue_pairs); + rtnl_unlock(); + + err = register_hotcpu_notifier(&vi->nb); + if (err) + return err; + + return 0; +} +#endif + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { + VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, + VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, + VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, + VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, + VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, + VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, + VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, + VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, + VIRTIO_NET_F_CTRL_MAC_ADDR, + VIRTIO_F_ANY_LAYOUT, +}; + +static struct virtio_driver virtio_net_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtnet_probe, + .remove = virtnet_remove, + .config_changed = virtnet_config_changed, +#ifdef CONFIG_PM_SLEEP + .freeze = virtnet_freeze, + .restore = virtnet_restore, +#endif +}; + +module_virtio_driver(virtio_net_driver); + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio network driver"); +MODULE_LICENSE("GPL"); diff --git a/addons/virtio/src/4.4.180/virtio_pci_common.c b/addons/virtio/src/4.4.180/virtio_pci_common.c new file mode 100644 index 00000000..2046a68a --- /dev/null +++ b/addons/virtio/src/4.4.180/virtio_pci_common.c @@ -0,0 +1,576 @@ +/* + * Virtio PCI driver - common functionality for all device versions + * + * This module allows virtio devices to be used over a virtual PCI device. + * This can be used with QEMU based VMMs like KVM or Xen. + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori + * Rusty Russell + * Michael S. Tsirkin + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "virtio_pci_common.h" + +static bool force_legacy = false; + +#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) +module_param(force_legacy, bool, 0444); +MODULE_PARM_DESC(force_legacy, + "Force legacy mode for transitional virtio 1 devices"); +#endif + +/* wait for pending irq handlers */ +void vp_synchronize_vectors(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + int i; + + if (vp_dev->intx_enabled) + synchronize_irq(vp_dev->pci_dev->irq); + + for (i = 0; i < vp_dev->msix_vectors; ++i) + synchronize_irq(vp_dev->msix_entries[i].vector); +} + +/* the notify function used when creating a virt queue */ +bool vp_notify(struct virtqueue *vq) +{ + /* we write the queue's selector into the notification register to + * signal the other end */ + iowrite16(vq->index, (void __iomem *)vq->priv); + return true; +} + +/* Handle a configuration change: Tell driver if it wants to know. */ +static irqreturn_t vp_config_changed(int irq, void *opaque) +{ + struct virtio_pci_device *vp_dev = opaque; + + virtio_config_changed(&vp_dev->vdev); + return IRQ_HANDLED; +} + +/* Notify all virtqueues on an interrupt. */ +static irqreturn_t vp_vring_interrupt(int irq, void *opaque) +{ + struct virtio_pci_device *vp_dev = opaque; + struct virtio_pci_vq_info *info; + irqreturn_t ret = IRQ_NONE; + unsigned long flags; + + spin_lock_irqsave(&vp_dev->lock, flags); + list_for_each_entry(info, &vp_dev->virtqueues, node) { + if (vring_interrupt(irq, info->vq) == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + spin_unlock_irqrestore(&vp_dev->lock, flags); + + return ret; +} + +/* A small wrapper to also acknowledge the interrupt when it's handled. + * I really need an EIO hook for the vring so I can ack the interrupt once we + * know that we'll be handling the IRQ but before we invoke the callback since + * the callback may notify the host which results in the host attempting to + * raise an interrupt that we would then mask once we acknowledged the + * interrupt. */ +static irqreturn_t vp_interrupt(int irq, void *opaque) +{ + struct virtio_pci_device *vp_dev = opaque; + u8 isr; + + /* reading the ISR has the effect of also clearing it so it's very + * important to save off the value. */ + isr = ioread8(vp_dev->isr); + + /* It's definitely not us if the ISR was not high */ + if (!isr) + return IRQ_NONE; + + /* Configuration change? Tell driver if it wants to know. */ + if (isr & VIRTIO_PCI_ISR_CONFIG) + vp_config_changed(irq, opaque); + + return vp_vring_interrupt(irq, opaque); +} + +static void vp_free_vectors(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + int i; + + if (vp_dev->intx_enabled) { + free_irq(vp_dev->pci_dev->irq, vp_dev); + vp_dev->intx_enabled = 0; + } + + for (i = 0; i < vp_dev->msix_used_vectors; ++i) + free_irq(vp_dev->msix_entries[i].vector, vp_dev); + + for (i = 0; i < vp_dev->msix_vectors; i++) + if (vp_dev->msix_affinity_masks[i]) + free_cpumask_var(vp_dev->msix_affinity_masks[i]); + + if (vp_dev->msix_enabled) { + /* Disable the vector used for configuration */ + vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR); + + pci_disable_msix(vp_dev->pci_dev); + vp_dev->msix_enabled = 0; + } + + vp_dev->msix_vectors = 0; + vp_dev->msix_used_vectors = 0; + kfree(vp_dev->msix_names); + vp_dev->msix_names = NULL; + kfree(vp_dev->msix_entries); + vp_dev->msix_entries = NULL; + kfree(vp_dev->msix_affinity_masks); + vp_dev->msix_affinity_masks = NULL; +} + +static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors, + bool per_vq_vectors) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + const char *name = dev_name(&vp_dev->vdev.dev); + unsigned i, v; + int err = -ENOMEM; + + vp_dev->msix_vectors = nvectors; + + vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries, + GFP_KERNEL); + if (!vp_dev->msix_entries) + goto error; + vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names, + GFP_KERNEL); + if (!vp_dev->msix_names) + goto error; + vp_dev->msix_affinity_masks + = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks, + GFP_KERNEL); + if (!vp_dev->msix_affinity_masks) + goto error; + for (i = 0; i < nvectors; ++i) + if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i], + GFP_KERNEL)) + goto error; + + for (i = 0; i < nvectors; ++i) + vp_dev->msix_entries[i].entry = i; + + err = pci_enable_msix_exact(vp_dev->pci_dev, + vp_dev->msix_entries, nvectors); + if (err) + goto error; + vp_dev->msix_enabled = 1; + + /* Set the vector used for configuration */ + v = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, + "%s-config", name); + err = request_irq(vp_dev->msix_entries[v].vector, + vp_config_changed, 0, vp_dev->msix_names[v], + vp_dev); + if (err) + goto error; + ++vp_dev->msix_used_vectors; + + v = vp_dev->config_vector(vp_dev, v); + /* Verify we had enough resources to assign the vector */ + if (v == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto error; + } + + if (!per_vq_vectors) { + /* Shared vector for all VQs */ + v = vp_dev->msix_used_vectors; + snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names, + "%s-virtqueues", name); + err = request_irq(vp_dev->msix_entries[v].vector, + vp_vring_interrupt, 0, vp_dev->msix_names[v], + vp_dev); + if (err) + goto error; + ++vp_dev->msix_used_vectors; + } + return 0; +error: + vp_free_vectors(vdev); + return err; +} + +static int vp_request_intx(struct virtio_device *vdev) +{ + int err; + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, + IRQF_SHARED, dev_name(&vdev->dev), vp_dev); + if (!err) + vp_dev->intx_enabled = 1; + return err; +} + +static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL); + struct virtqueue *vq; + unsigned long flags; + + /* fill out our structure that represents an active queue */ + if (!info) + return ERR_PTR(-ENOMEM); + + vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec); + if (IS_ERR(vq)) + goto out_info; + + info->vq = vq; + if (callback) { + spin_lock_irqsave(&vp_dev->lock, flags); + list_add(&info->node, &vp_dev->virtqueues); + spin_unlock_irqrestore(&vp_dev->lock, flags); + } else { + INIT_LIST_HEAD(&info->node); + } + + vp_dev->vqs[index] = info; + return vq; + +out_info: + kfree(info); + return vq; +} + +static void vp_del_vq(struct virtqueue *vq) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; + unsigned long flags; + + spin_lock_irqsave(&vp_dev->lock, flags); + list_del(&info->node); + spin_unlock_irqrestore(&vp_dev->lock, flags); + + vp_dev->del_vq(info); + kfree(info); +} + +/* the config->del_vqs() implementation */ +void vp_del_vqs(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtqueue *vq, *n; + struct virtio_pci_vq_info *info; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) { + info = vp_dev->vqs[vq->index]; + if (vp_dev->per_vq_vectors && + info->msix_vector != VIRTIO_MSI_NO_VECTOR) + free_irq(vp_dev->msix_entries[info->msix_vector].vector, + vq); + vp_del_vq(vq); + } + vp_dev->per_vq_vectors = false; + + vp_free_vectors(vdev); + kfree(vp_dev->vqs); + vp_dev->vqs = NULL; +} + +static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[], + bool use_msix, + bool per_vq_vectors) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + u16 msix_vec; + int i, err, nvectors, allocated_vectors; + + vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL); + if (!vp_dev->vqs) + return -ENOMEM; + + if (!use_msix) { + /* Old style: one normal interrupt for change and all vqs. */ + err = vp_request_intx(vdev); + if (err) + goto error_find; + } else { + if (per_vq_vectors) { + /* Best option: one for change interrupt, one per vq. */ + nvectors = 1; + for (i = 0; i < nvqs; ++i) + if (callbacks[i]) + ++nvectors; + } else { + /* Second best: one for change, shared for all vqs. */ + nvectors = 2; + } + + err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors); + if (err) + goto error_find; + } + + vp_dev->per_vq_vectors = per_vq_vectors; + allocated_vectors = vp_dev->msix_used_vectors; + for (i = 0; i < nvqs; ++i) { + if (!names[i]) { + vqs[i] = NULL; + continue; + } else if (!callbacks[i] || !vp_dev->msix_enabled) + msix_vec = VIRTIO_MSI_NO_VECTOR; + else if (vp_dev->per_vq_vectors) + msix_vec = allocated_vectors++; + else + msix_vec = VP_MSIX_VQ_VECTOR; + vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec); + if (IS_ERR(vqs[i])) { + err = PTR_ERR(vqs[i]); + goto error_find; + } + + if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR) + continue; + + /* allocate per-vq irq if available and necessary */ + snprintf(vp_dev->msix_names[msix_vec], + sizeof *vp_dev->msix_names, + "%s-%s", + dev_name(&vp_dev->vdev.dev), names[i]); + err = request_irq(vp_dev->msix_entries[msix_vec].vector, + vring_interrupt, 0, + vp_dev->msix_names[msix_vec], + vqs[i]); + if (err) { + vp_del_vq(vqs[i]); + goto error_find; + } + } + return 0; + +error_find: + vp_del_vqs(vdev); + return err; +} + +/* the config->find_vqs() implementation */ +int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + int err; + + /* Try MSI-X with one vector per queue. */ + err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true); + if (!err) + return 0; + /* Fallback: MSI-X with one vector for config, one shared for queues. */ + err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, + true, false); + if (!err) + return 0; + /* Finally fall back to regular interrupts. */ + return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, + false, false); +} + +const char *vp_bus_name(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + return pci_name(vp_dev->pci_dev); +} + +/* Setup the affinity for a virtqueue: + * - force the affinity for per vq vector + * - OR over all affinities for shared MSI + * - ignore the affinity request if we're using INTX + */ +int vp_set_vq_affinity(struct virtqueue *vq, int cpu) +{ + struct virtio_device *vdev = vq->vdev; + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index]; + struct cpumask *mask; + unsigned int irq; + + if (!vq->callback) + return -EINVAL; + + if (vp_dev->msix_enabled) { + mask = vp_dev->msix_affinity_masks[info->msix_vector]; + irq = vp_dev->msix_entries[info->msix_vector].vector; + if (cpu == -1) + irq_set_affinity_hint(irq, NULL); + else { + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + irq_set_affinity_hint(irq, mask); + } + } + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int virtio_pci_freeze(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + int ret; + + ret = virtio_device_freeze(&vp_dev->vdev); + + if (!ret) + pci_disable_device(pci_dev); + return ret; +} + +static int virtio_pci_restore(struct device *dev) +{ + struct pci_dev *pci_dev = to_pci_dev(dev); + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + int ret; + + ret = pci_enable_device(pci_dev); + if (ret) + return ret; + + pci_set_master(pci_dev); + return virtio_device_restore(&vp_dev->vdev); +} + +static const struct dev_pm_ops virtio_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore) +}; +#endif + + +/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */ +static const struct pci_device_id virtio_pci_id_table[] = { + { PCI_DEVICE(0x1af4, PCI_ANY_ID) }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); + +static void virtio_pci_release_dev(struct device *_d) +{ + struct virtio_device *vdev = dev_to_virtio(_d); + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* As struct device is a kobject, it's not safe to + * free the memory (including the reference counter itself) + * until it's release callback. */ + kfree(vp_dev); +} + +static int virtio_pci_probe(struct pci_dev *pci_dev, + const struct pci_device_id *id) +{ + struct virtio_pci_device *vp_dev; + int rc; + + /* allocate our structure and fill it out */ + vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); + if (!vp_dev) + return -ENOMEM; + + pci_set_drvdata(pci_dev, vp_dev); + vp_dev->vdev.dev.parent = &pci_dev->dev; + vp_dev->vdev.dev.release = virtio_pci_release_dev; + vp_dev->pci_dev = pci_dev; + INIT_LIST_HEAD(&vp_dev->virtqueues); + spin_lock_init(&vp_dev->lock); + + /* enable the device */ + rc = pci_enable_device(pci_dev); + if (rc) + goto err_enable_device; + + if (force_legacy) { + rc = virtio_pci_legacy_probe(vp_dev); + /* Also try modern mode if we can't map BAR0 (no IO space). */ + if (rc == -ENODEV || rc == -ENOMEM) + rc = virtio_pci_modern_probe(vp_dev); + if (rc) + goto err_probe; + } else { + rc = virtio_pci_modern_probe(vp_dev); + if (rc == -ENODEV) + rc = virtio_pci_legacy_probe(vp_dev); + if (rc) + goto err_probe; + } + + pci_set_master(pci_dev); + + rc = register_virtio_device(&vp_dev->vdev); + if (rc) + goto err_register; + + return 0; + +err_register: + if (vp_dev->ioaddr) + virtio_pci_legacy_remove(vp_dev); + else + virtio_pci_modern_remove(vp_dev); +err_probe: + pci_disable_device(pci_dev); +err_enable_device: + kfree(vp_dev); + return rc; +} + +static void virtio_pci_remove(struct pci_dev *pci_dev) +{ + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + struct device *dev = get_device(&vp_dev->vdev.dev); + + unregister_virtio_device(&vp_dev->vdev); + + if (vp_dev->ioaddr) + virtio_pci_legacy_remove(vp_dev); + else + virtio_pci_modern_remove(vp_dev); + + pci_disable_device(pci_dev); + put_device(dev); +} + +static struct pci_driver virtio_pci_driver = { + .name = "virtio-pci", + .id_table = virtio_pci_id_table, + .probe = virtio_pci_probe, + .remove = virtio_pci_remove, +#ifdef CONFIG_PM_SLEEP + .driver.pm = &virtio_pci_pm_ops, +#endif +}; + +module_pci_driver(virtio_pci_driver); + +MODULE_AUTHOR("Anthony Liguori "); +MODULE_DESCRIPTION("virtio-pci"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("1"); diff --git a/addons/virtio/src/4.4.180/virtio_pci_common.h b/addons/virtio/src/4.4.180/virtio_pci_common.h new file mode 100644 index 00000000..b976d968 --- /dev/null +++ b/addons/virtio/src/4.4.180/virtio_pci_common.h @@ -0,0 +1,167 @@ +#ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H +#define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H +/* + * Virtio PCI driver - APIs for common functionality for all device versions + * + * This module allows virtio devices to be used over a virtual PCI device. + * This can be used with QEMU based VMMs like KVM or Xen. + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori + * Rusty Russell + * Michael S. Tsirkin + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct virtio_pci_vq_info { + /* the actual virtqueue */ + struct virtqueue *vq; + + /* the number of entries in the queue */ + int num; + + /* the virtual address of the ring queue */ + void *queue; + + /* the list node for the virtqueues list */ + struct list_head node; + + /* MSI-X vector (or none) */ + unsigned msix_vector; +}; + +/* Our device structure */ +struct virtio_pci_device { + struct virtio_device vdev; + struct pci_dev *pci_dev; + + /* In legacy mode, these two point to within ->legacy. */ + /* Where to read and clear interrupt */ + u8 __iomem *isr; + + /* Modern only fields */ + /* The IO mapping for the PCI config space (non-legacy mode) */ + struct virtio_pci_common_cfg __iomem *common; + /* Device-specific data (non-legacy mode) */ + void __iomem *device; + /* Base of vq notifications (non-legacy mode). */ + void __iomem *notify_base; + + /* So we can sanity-check accesses. */ + size_t notify_len; + size_t device_len; + + /* Capability for when we need to map notifications per-vq. */ + int notify_map_cap; + + /* Multiply queue_notify_off by this value. (non-legacy mode). */ + u32 notify_offset_multiplier; + + int modern_bars; + + /* Legacy only field */ + /* the IO mapping for the PCI config space */ + void __iomem *ioaddr; + + /* a list of queues so we can dispatch IRQs */ + spinlock_t lock; + struct list_head virtqueues; + + /* array of all queues for house-keeping */ + struct virtio_pci_vq_info **vqs; + + /* MSI-X support */ + int msix_enabled; + int intx_enabled; + struct msix_entry *msix_entries; + cpumask_var_t *msix_affinity_masks; + /* Name strings for interrupts. This size should be enough, + * and I'm too lazy to allocate each name separately. */ + char (*msix_names)[256]; + /* Number of available vectors */ + unsigned msix_vectors; + /* Vectors allocated, excluding per-vq vectors if any */ + unsigned msix_used_vectors; + + /* Whether we have vector per vq */ + bool per_vq_vectors; + + struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, + unsigned idx, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec); + void (*del_vq)(struct virtio_pci_vq_info *info); + + u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); +}; + +/* Constants for MSI-X */ +/* Use first vector for configuration changes, second and the rest for + * virtqueues Thus, we need at least 2 vectors for MSI. */ +enum { + VP_MSIX_CONFIG_VECTOR = 0, + VP_MSIX_VQ_VECTOR = 1, +}; + +/* Convert a generic virtio device to our structure */ +static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) +{ + return container_of(vdev, struct virtio_pci_device, vdev); +} + +/* wait for pending irq handlers */ +void vp_synchronize_vectors(struct virtio_device *vdev); +/* the notify function used when creating a virt queue */ +bool vp_notify(struct virtqueue *vq); +/* the config->del_vqs() implementation */ +void vp_del_vqs(struct virtio_device *vdev); +/* the config->find_vqs() implementation */ +int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]); +const char *vp_bus_name(struct virtio_device *vdev); + +/* Setup the affinity for a virtqueue: + * - force the affinity for per vq vector + * - OR over all affinities for shared MSI + * - ignore the affinity request if we're using INTX + */ +int vp_set_vq_affinity(struct virtqueue *vq, int cpu); + +#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) +int virtio_pci_legacy_probe(struct virtio_pci_device *); +void virtio_pci_legacy_remove(struct virtio_pci_device *); +#else +static inline int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev) +{ + return -ENODEV; +} +static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev) +{ +} +#endif +int virtio_pci_modern_probe(struct virtio_pci_device *); +void virtio_pci_modern_remove(struct virtio_pci_device *); + +#endif diff --git a/addons/virtio/src/4.4.180/virtio_pci_legacy.c b/addons/virtio/src/4.4.180/virtio_pci_legacy.c new file mode 100644 index 00000000..48bc9797 --- /dev/null +++ b/addons/virtio/src/4.4.180/virtio_pci_legacy.c @@ -0,0 +1,267 @@ +/* + * Virtio PCI driver - legacy device support + * + * This module allows virtio devices to be used over a virtual PCI device. + * This can be used with QEMU based VMMs like KVM or Xen. + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori + * Rusty Russell + * Michael S. Tsirkin + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "virtio_pci_common.h" + +/* virtio config->get_features() implementation */ +static u64 vp_get_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* When someone needs more than 32 feature bits, we'll need to + * steal a bit to indicate that the rest are somewhere else. */ + return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES); +} + +/* virtio config->finalize_features() implementation */ +static int vp_finalize_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + /* Make sure we don't have any features > 32 bits! */ + BUG_ON((u32)vdev->features != vdev->features); + + /* We only support 32 feature bits. */ + iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES); + + return 0; +} + +/* virtio config->get() implementation */ +static void vp_get(struct virtio_device *vdev, unsigned offset, + void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + void __iomem *ioaddr = vp_dev->ioaddr + + VIRTIO_PCI_CONFIG(vp_dev) + offset; + u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + ptr[i] = ioread8(ioaddr + i); +} + +/* the config->set() implementation. it's symmetric to the config->get() + * implementation */ +static void vp_set(struct virtio_device *vdev, unsigned offset, + const void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + void __iomem *ioaddr = vp_dev->ioaddr + + VIRTIO_PCI_CONFIG(vp_dev) + offset; + const u8 *ptr = buf; + int i; + + for (i = 0; i < len; i++) + iowrite8(ptr[i], ioaddr + i); +} + +/* config->{get,set}_status() implementations */ +static u8 vp_get_status(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); +} + +static void vp_set_status(struct virtio_device *vdev, u8 status) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* We should never be setting status to 0. */ + BUG_ON(status == 0); + iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS); +} + +static void vp_reset(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* 0 status means a reset. */ + iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS); + /* Flush out the status write, and flush in device writes, + * including MSi-X interrupts, if any. */ + ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS); + /* Flush pending VQ/configuration callbacks. */ + vp_synchronize_vectors(vdev); +} + +static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) +{ + /* Setup the vector used for configuration events */ + iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); + /* Verify we had enough resources to assign the vector */ + /* Will also flush the write out to device */ + return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR); +} + +static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, + unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec) +{ + struct virtqueue *vq; + unsigned long size; + u16 num; + int err; + + /* Select the queue we're interested in */ + iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); + + /* Check if queue is either not available or already active. */ + num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM); + if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN)) + return ERR_PTR(-ENOENT); + + info->num = num; + info->msix_vector = msix_vec; + + size = PAGE_ALIGN(vring_size(num, VIRTIO_PCI_VRING_ALIGN)); + info->queue = alloc_pages_exact(size, GFP_KERNEL|__GFP_ZERO); + if (info->queue == NULL) + return ERR_PTR(-ENOMEM); + + /* activate the queue */ + iowrite32(virt_to_phys(info->queue) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT, + vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); + + /* create the vring */ + vq = vring_new_virtqueue(index, info->num, + VIRTIO_PCI_VRING_ALIGN, &vp_dev->vdev, + true, info->queue, vp_notify, callback, name); + if (!vq) { + err = -ENOMEM; + goto out_activate_queue; + } + + vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY; + + if (msix_vec != VIRTIO_MSI_NO_VECTOR) { + iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + if (msix_vec == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto out_assign; + } + } + + return vq; + +out_assign: + vring_del_virtqueue(vq); +out_activate_queue: + iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); + free_pages_exact(info->queue, size); + return ERR_PTR(err); +} + +static void del_vq(struct virtio_pci_vq_info *info) +{ + struct virtqueue *vq = info->vq; + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + unsigned long size; + + iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL); + + if (vp_dev->msix_enabled) { + iowrite16(VIRTIO_MSI_NO_VECTOR, + vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR); + /* Flush the write out to device */ + ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR); + } + + vring_del_virtqueue(vq); + + /* Select and deactivate the queue */ + iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN); + + size = PAGE_ALIGN(vring_size(info->num, VIRTIO_PCI_VRING_ALIGN)); + free_pages_exact(info->queue, size); +} + +static const struct virtio_config_ops virtio_pci_config_ops = { + .get = vp_get, + .set = vp_set, + .get_status = vp_get_status, + .set_status = vp_set_status, + .reset = vp_reset, + .find_vqs = vp_find_vqs, + .del_vqs = vp_del_vqs, + .get_features = vp_get_features, + .finalize_features = vp_finalize_features, + .bus_name = vp_bus_name, + .set_vq_affinity = vp_set_vq_affinity, +}; + +/* the PCI probing function */ +int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev) +{ + struct pci_dev *pci_dev = vp_dev->pci_dev; + int rc; + + /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ + if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) + return -ENODEV; + + if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) { + printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n", + VIRTIO_PCI_ABI_VERSION, pci_dev->revision); + return -ENODEV; + } + + rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy"); + if (rc) + return rc; + + rc = -ENOMEM; + vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); + if (!vp_dev->ioaddr) + goto err_iomap; + + vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR; + + /* we use the subsystem vendor/device id as the virtio vendor/device + * id. this allows us to use the same PCI vendor/device id for all + * virtio devices and to identify the particular virtio driver by + * the subsystem ids */ + vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; + vp_dev->vdev.id.device = pci_dev->subsystem_device; + + vp_dev->vdev.config = &virtio_pci_config_ops; + + vp_dev->config_vector = vp_config_vector; + vp_dev->setup_vq = setup_vq; + vp_dev->del_vq = del_vq; + + return 0; + +err_iomap: + pci_release_region(pci_dev, 0); + return rc; +} + +void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev) +{ + struct pci_dev *pci_dev = vp_dev->pci_dev; + + pci_iounmap(pci_dev, vp_dev->ioaddr); + pci_release_region(pci_dev, 0); +} diff --git a/addons/virtio/src/4.4.180/virtio_pci_modern.c b/addons/virtio/src/4.4.180/virtio_pci_modern.c new file mode 100644 index 00000000..4469202e --- /dev/null +++ b/addons/virtio/src/4.4.180/virtio_pci_modern.c @@ -0,0 +1,747 @@ +/* + * Virtio PCI driver - modern (virtio 1.0) device support + * + * This module allows virtio devices to be used over a virtual PCI device. + * This can be used with QEMU based VMMs like KVM or Xen. + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori + * Rusty Russell + * Michael S. Tsirkin + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include +#define VIRTIO_PCI_NO_LEGACY +#include "virtio_pci_common.h" + +/* + * Type-safe wrappers for io accesses. + * Use these to enforce at compile time the following spec requirement: + * + * The driver MUST access each field using the “natural” access + * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses + * for 16-bit fields and 8-bit accesses for 8-bit fields. + */ +static inline u8 vp_ioread8(u8 __iomem *addr) +{ + return ioread8(addr); +} +static inline u16 vp_ioread16 (u16 __iomem *addr) +{ + return ioread16(addr); +} + +static inline u32 vp_ioread32(u32 __iomem *addr) +{ + return ioread32(addr); +} + +static inline void vp_iowrite8(u8 value, u8 __iomem *addr) +{ + iowrite8(value, addr); +} + +static inline void vp_iowrite16(u16 value, u16 __iomem *addr) +{ + iowrite16(value, addr); +} + +static inline void vp_iowrite32(u32 value, u32 __iomem *addr) +{ + iowrite32(value, addr); +} + +static void vp_iowrite64_twopart(u64 val, + __le32 __iomem *lo, __le32 __iomem *hi) +{ + vp_iowrite32((u32)val, lo); + vp_iowrite32(val >> 32, hi); +} + +static void __iomem *map_capability(struct pci_dev *dev, int off, + size_t minlen, + u32 align, + u32 start, u32 size, + size_t *len) +{ + u8 bar; + u32 offset, length; + void __iomem *p; + + pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap, + bar), + &bar); + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset), + &offset); + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), + &length); + + if (length <= start) { + dev_err(&dev->dev, + "virtio_pci: bad capability len %u (>%u expected)\n", + length, start); + return NULL; + } + + if (length - start < minlen) { + dev_err(&dev->dev, + "virtio_pci: bad capability len %u (>=%zu expected)\n", + length, minlen); + return NULL; + } + + length -= start; + + if (start + offset < offset) { + dev_err(&dev->dev, + "virtio_pci: map wrap-around %u+%u\n", + start, offset); + return NULL; + } + + offset += start; + + if (offset & (align - 1)) { + dev_err(&dev->dev, + "virtio_pci: offset %u not aligned to %u\n", + offset, align); + return NULL; + } + + if (length > size) + length = size; + + if (len) + *len = length; + + if (minlen + offset < minlen || + minlen + offset > pci_resource_len(dev, bar)) { + dev_err(&dev->dev, + "virtio_pci: map virtio %zu@%u " + "out of range on bar %i length %lu\n", + minlen, offset, + bar, (unsigned long)pci_resource_len(dev, bar)); + return NULL; + } + + p = pci_iomap_range(dev, bar, offset, length); + if (!p) + dev_err(&dev->dev, + "virtio_pci: unable to map virtio %u@%u on bar %i\n", + length, offset, bar); + return p; +} + +/* virtio config->get_features() implementation */ +static u64 vp_get_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + u64 features; + + vp_iowrite32(0, &vp_dev->common->device_feature_select); + features = vp_ioread32(&vp_dev->common->device_feature); + vp_iowrite32(1, &vp_dev->common->device_feature_select); + features |= ((u64)vp_ioread32(&vp_dev->common->device_feature) << 32); + + return features; +} + +/* virtio config->finalize_features() implementation */ +static int vp_finalize_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { + dev_err(&vdev->dev, "virtio: device uses modern interface " + "but does not have VIRTIO_F_VERSION_1\n"); + return -EINVAL; + } + + vp_iowrite32(0, &vp_dev->common->guest_feature_select); + vp_iowrite32((u32)vdev->features, &vp_dev->common->guest_feature); + vp_iowrite32(1, &vp_dev->common->guest_feature_select); + vp_iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature); + + return 0; +} + +/* virtio config->get() implementation */ +static void vp_get(struct virtio_device *vdev, unsigned offset, + void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + u8 b; + __le16 w; + __le32 l; + + BUG_ON(offset + len > vp_dev->device_len); + + switch (len) { + case 1: + b = ioread8(vp_dev->device + offset); + memcpy(buf, &b, sizeof b); + break; + case 2: + w = cpu_to_le16(ioread16(vp_dev->device + offset)); + memcpy(buf, &w, sizeof w); + break; + case 4: + l = cpu_to_le32(ioread32(vp_dev->device + offset)); + memcpy(buf, &l, sizeof l); + break; + case 8: + l = cpu_to_le32(ioread32(vp_dev->device + offset)); + memcpy(buf, &l, sizeof l); + l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l)); + memcpy(buf + sizeof l, &l, sizeof l); + break; + default: + BUG(); + } +} + +/* the config->set() implementation. it's symmetric to the config->get() + * implementation */ +static void vp_set(struct virtio_device *vdev, unsigned offset, + const void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + u8 b; + __le16 w; + __le32 l; + + BUG_ON(offset + len > vp_dev->device_len); + + switch (len) { + case 1: + memcpy(&b, buf, sizeof b); + iowrite8(b, vp_dev->device + offset); + break; + case 2: + memcpy(&w, buf, sizeof w); + iowrite16(le16_to_cpu(w), vp_dev->device + offset); + break; + case 4: + memcpy(&l, buf, sizeof l); + iowrite32(le32_to_cpu(l), vp_dev->device + offset); + break; + case 8: + memcpy(&l, buf, sizeof l); + iowrite32(le32_to_cpu(l), vp_dev->device + offset); + memcpy(&l, buf + sizeof l, sizeof l); + iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l); + break; + default: + BUG(); + } +} + +static u32 vp_generation(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + return vp_ioread8(&vp_dev->common->config_generation); +} + +/* config->{get,set}_status() implementations */ +static u8 vp_get_status(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + return vp_ioread8(&vp_dev->common->device_status); +} + +static void vp_set_status(struct virtio_device *vdev, u8 status) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* We should never be setting status to 0. */ + BUG_ON(status == 0); + vp_iowrite8(status, &vp_dev->common->device_status); +} + +static void vp_reset(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* 0 status means a reset. */ + vp_iowrite8(0, &vp_dev->common->device_status); + /* After writing 0 to device_status, the driver MUST wait for a read of + * device_status to return 0 before reinitializing the device. + * This will flush out the status write, and flush in device writes, + * including MSI-X interrupts, if any. + */ + while (vp_ioread8(&vp_dev->common->device_status)) + msleep(1); + /* Flush pending VQ/configuration callbacks. */ + vp_synchronize_vectors(vdev); +} + +static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) +{ + /* Setup the vector used for configuration events */ + vp_iowrite16(vector, &vp_dev->common->msix_config); + /* Verify we had enough resources to assign the vector */ + /* Will also flush the write out to device */ + return vp_ioread16(&vp_dev->common->msix_config); +} + +static size_t vring_pci_size(u16 num) +{ + /* We only need a cacheline separation. */ + return PAGE_ALIGN(vring_size(num, SMP_CACHE_BYTES)); +} + +static void *alloc_virtqueue_pages(int *num) +{ + void *pages; + + /* TODO: allocate each queue chunk individually */ + for (; *num && vring_pci_size(*num) > PAGE_SIZE; *num /= 2) { + pages = alloc_pages_exact(vring_pci_size(*num), + GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); + if (pages) + return pages; + } + + if (!*num) + return NULL; + + /* Try to get a single page. You are my only hope! */ + return alloc_pages_exact(vring_pci_size(*num), GFP_KERNEL|__GFP_ZERO); +} + +static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, + unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec) +{ + struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common; + struct virtqueue *vq; + u16 num, off; + int err; + + if (index >= vp_ioread16(&cfg->num_queues)) + return ERR_PTR(-ENOENT); + + /* Select the queue we're interested in */ + vp_iowrite16(index, &cfg->queue_select); + + /* Check if queue is either not available or already active. */ + num = vp_ioread16(&cfg->queue_size); + if (!num || vp_ioread16(&cfg->queue_enable)) + return ERR_PTR(-ENOENT); + + if (num & (num - 1)) { + dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num); + return ERR_PTR(-EINVAL); + } + + /* get offset of notification word for this vq */ + off = vp_ioread16(&cfg->queue_notify_off); + + info->num = num; + info->msix_vector = msix_vec; + + info->queue = alloc_virtqueue_pages(&info->num); + if (info->queue == NULL) + return ERR_PTR(-ENOMEM); + + /* create the vring */ + vq = vring_new_virtqueue(index, info->num, + SMP_CACHE_BYTES, &vp_dev->vdev, + true, info->queue, vp_notify, callback, name); + if (!vq) { + err = -ENOMEM; + goto err_new_queue; + } + + /* activate the queue */ + vp_iowrite16(num, &cfg->queue_size); + vp_iowrite64_twopart(virt_to_phys(info->queue), + &cfg->queue_desc_lo, &cfg->queue_desc_hi); + vp_iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)), + &cfg->queue_avail_lo, &cfg->queue_avail_hi); + vp_iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)), + &cfg->queue_used_lo, &cfg->queue_used_hi); + + if (vp_dev->notify_base) { + /* offset should not wrap */ + if ((u64)off * vp_dev->notify_offset_multiplier + 2 + > vp_dev->notify_len) { + dev_warn(&vp_dev->pci_dev->dev, + "bad notification offset %u (x %u) " + "for queue %u > %zd", + off, vp_dev->notify_offset_multiplier, + index, vp_dev->notify_len); + err = -EINVAL; + goto err_map_notify; + } + vq->priv = (void __force *)vp_dev->notify_base + + off * vp_dev->notify_offset_multiplier; + } else { + vq->priv = (void __force *)map_capability(vp_dev->pci_dev, + vp_dev->notify_map_cap, 2, 2, + off * vp_dev->notify_offset_multiplier, 2, + NULL); + } + + if (!vq->priv) { + err = -ENOMEM; + goto err_map_notify; + } + + if (msix_vec != VIRTIO_MSI_NO_VECTOR) { + vp_iowrite16(msix_vec, &cfg->queue_msix_vector); + msix_vec = vp_ioread16(&cfg->queue_msix_vector); + if (msix_vec == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto err_assign_vector; + } + } + + return vq; + +err_assign_vector: + if (!vp_dev->notify_base) + pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv); +err_map_notify: + vring_del_virtqueue(vq); +err_new_queue: + free_pages_exact(info->queue, vring_pci_size(info->num)); + return ERR_PTR(err); +} + +static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtqueue *vq; + int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names); + + if (rc) + return rc; + + /* Select and activate all queues. Has to be done last: once we do + * this, there's no way to go back except reset. + */ + list_for_each_entry(vq, &vdev->vqs, list) { + vp_iowrite16(vq->index, &vp_dev->common->queue_select); + vp_iowrite16(1, &vp_dev->common->queue_enable); + } + + return 0; +} + +static void del_vq(struct virtio_pci_vq_info *info) +{ + struct virtqueue *vq = info->vq; + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + + vp_iowrite16(vq->index, &vp_dev->common->queue_select); + + if (vp_dev->msix_enabled) { + vp_iowrite16(VIRTIO_MSI_NO_VECTOR, + &vp_dev->common->queue_msix_vector); + /* Flush the write out to device */ + vp_ioread16(&vp_dev->common->queue_msix_vector); + } + + if (!vp_dev->notify_base) + pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv); + + vring_del_virtqueue(vq); + + free_pages_exact(info->queue, vring_pci_size(info->num)); +} + +static const struct virtio_config_ops virtio_pci_config_nodev_ops = { + .get = NULL, + .set = NULL, + .generation = vp_generation, + .get_status = vp_get_status, + .set_status = vp_set_status, + .reset = vp_reset, + .find_vqs = vp_modern_find_vqs, + .del_vqs = vp_del_vqs, + .get_features = vp_get_features, + .finalize_features = vp_finalize_features, + .bus_name = vp_bus_name, + .set_vq_affinity = vp_set_vq_affinity, +}; + +static const struct virtio_config_ops virtio_pci_config_ops = { + .get = vp_get, + .set = vp_set, + .generation = vp_generation, + .get_status = vp_get_status, + .set_status = vp_set_status, + .reset = vp_reset, + .find_vqs = vp_modern_find_vqs, + .del_vqs = vp_del_vqs, + .get_features = vp_get_features, + .finalize_features = vp_finalize_features, + .bus_name = vp_bus_name, + .set_vq_affinity = vp_set_vq_affinity, +}; + +/** + * virtio_pci_find_capability - walk capabilities to find device info. + * @dev: the pci device + * @cfg_type: the VIRTIO_PCI_CAP_* value we seek + * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. + * + * Returns offset of the capability, or 0. + */ +static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, + u32 ioresource_types, int *bars) +{ + int pos; + + for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); + pos > 0; + pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { + u8 type, bar; + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, + cfg_type), + &type); + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, + bar), + &bar); + + /* Ignore structures with reserved BAR values */ + if (bar > 0x5) + continue; + + if (type == cfg_type) { + if (pci_resource_len(dev, bar) && + pci_resource_flags(dev, bar) & ioresource_types) { + *bars |= (1 << bar); + return pos; + } + } + } + return 0; +} + +/* This is part of the ABI. Don't screw with it. */ +static inline void check_offsets(void) +{ + /* Note: disk space was harmed in compilation of this function. */ + BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != + offsetof(struct virtio_pci_cap, cap_vndr)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != + offsetof(struct virtio_pci_cap, cap_next)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != + offsetof(struct virtio_pci_cap, cap_len)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != + offsetof(struct virtio_pci_cap, cfg_type)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != + offsetof(struct virtio_pci_cap, bar)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != + offsetof(struct virtio_pci_cap, offset)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != + offsetof(struct virtio_pci_cap, length)); + BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != + offsetof(struct virtio_pci_notify_cap, + notify_off_multiplier)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != + offsetof(struct virtio_pci_common_cfg, + device_feature_select)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != + offsetof(struct virtio_pci_common_cfg, device_feature)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != + offsetof(struct virtio_pci_common_cfg, + guest_feature_select)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != + offsetof(struct virtio_pci_common_cfg, guest_feature)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != + offsetof(struct virtio_pci_common_cfg, msix_config)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != + offsetof(struct virtio_pci_common_cfg, num_queues)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != + offsetof(struct virtio_pci_common_cfg, device_status)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != + offsetof(struct virtio_pci_common_cfg, config_generation)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != + offsetof(struct virtio_pci_common_cfg, queue_select)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != + offsetof(struct virtio_pci_common_cfg, queue_size)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != + offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != + offsetof(struct virtio_pci_common_cfg, queue_enable)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != + offsetof(struct virtio_pci_common_cfg, queue_notify_off)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != + offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != + offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != + offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != + offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != + offsetof(struct virtio_pci_common_cfg, queue_used_lo)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != + offsetof(struct virtio_pci_common_cfg, queue_used_hi)); +} + +/* the PCI probing function */ +int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) +{ + struct pci_dev *pci_dev = vp_dev->pci_dev; + int err, common, isr, notify, device; + u32 notify_length; + u32 notify_offset; + + check_offsets(); + + /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ + if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) + return -ENODEV; + + if (pci_dev->device < 0x1040) { + /* Transitional devices: use the PCI subsystem device id as + * virtio device id, same as legacy driver always did. + */ + vp_dev->vdev.id.device = pci_dev->subsystem_device; + } else { + /* Modern devices: simply use PCI device id, but start from 0x1040. */ + vp_dev->vdev.id.device = pci_dev->device - 0x1040; + } + vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; + + /* check for a common config: if not, use legacy mode (bar 0). */ + common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, + IORESOURCE_IO | IORESOURCE_MEM, + &vp_dev->modern_bars); + if (!common) { + dev_info(&pci_dev->dev, + "virtio_pci: leaving for legacy driver\n"); + return -ENODEV; + } + + /* If common is there, these should be too... */ + isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG, + IORESOURCE_IO | IORESOURCE_MEM, + &vp_dev->modern_bars); + notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, + IORESOURCE_IO | IORESOURCE_MEM, + &vp_dev->modern_bars); + if (!isr || !notify) { + dev_err(&pci_dev->dev, + "virtio_pci: missing capabilities %i/%i/%i\n", + common, isr, notify); + return -EINVAL; + } + + /* Device capability is only mandatory for devices that have + * device-specific configuration. + */ + device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, + IORESOURCE_IO | IORESOURCE_MEM, + &vp_dev->modern_bars); + + err = pci_request_selected_regions(pci_dev, vp_dev->modern_bars, + "virtio-pci-modern"); + if (err) + return err; + + err = -EINVAL; + vp_dev->common = map_capability(pci_dev, common, + sizeof(struct virtio_pci_common_cfg), 4, + 0, sizeof(struct virtio_pci_common_cfg), + NULL); + if (!vp_dev->common) + goto err_map_common; + vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1, + 0, 1, + NULL); + if (!vp_dev->isr) + goto err_map_isr; + + /* Read notify_off_multiplier from config space. */ + pci_read_config_dword(pci_dev, + notify + offsetof(struct virtio_pci_notify_cap, + notify_off_multiplier), + &vp_dev->notify_offset_multiplier); + /* Read notify length and offset from config space. */ + pci_read_config_dword(pci_dev, + notify + offsetof(struct virtio_pci_notify_cap, + cap.length), + ¬ify_length); + + pci_read_config_dword(pci_dev, + notify + offsetof(struct virtio_pci_notify_cap, + cap.length), + ¬ify_offset); + + /* We don't know how many VQs we'll map, ahead of the time. + * If notify length is small, map it all now. + * Otherwise, map each VQ individually later. + */ + if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { + vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2, + 0, notify_length, + &vp_dev->notify_len); + if (!vp_dev->notify_base) + goto err_map_notify; + } else { + vp_dev->notify_map_cap = notify; + } + + /* Again, we don't know how much we should map, but PAGE_SIZE + * is more than enough for all existing devices. + */ + if (device) { + vp_dev->device = map_capability(pci_dev, device, 0, 4, + 0, PAGE_SIZE, + &vp_dev->device_len); + if (!vp_dev->device) + goto err_map_device; + + vp_dev->vdev.config = &virtio_pci_config_ops; + } else { + vp_dev->vdev.config = &virtio_pci_config_nodev_ops; + } + + vp_dev->config_vector = vp_config_vector; + vp_dev->setup_vq = setup_vq; + vp_dev->del_vq = del_vq; + + return 0; + +err_map_device: + if (vp_dev->notify_base) + pci_iounmap(pci_dev, vp_dev->notify_base); +err_map_notify: + pci_iounmap(pci_dev, vp_dev->isr); +err_map_isr: + pci_iounmap(pci_dev, vp_dev->common); +err_map_common: + return err; +} + +void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev) +{ + struct pci_dev *pci_dev = vp_dev->pci_dev; + + if (vp_dev->device) + pci_iounmap(pci_dev, vp_dev->device); + if (vp_dev->notify_base) + pci_iounmap(pci_dev, vp_dev->notify_base); + pci_iounmap(pci_dev, vp_dev->isr); + pci_iounmap(pci_dev, vp_dev->common); + pci_release_selected_regions(pci_dev, vp_dev->modern_bars); +} diff --git a/addons/virtio/src/4.4.180/virtio_ring.c b/addons/virtio/src/4.4.180/virtio_ring.c new file mode 100644 index 00000000..a01a41a4 --- /dev/null +++ b/addons/virtio/src/4.4.180/virtio_ring.c @@ -0,0 +1,875 @@ +/* Virtio ring implementation. + * + * Copyright 2007 Rusty Russell IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef DEBUG +/* For development, we want to crash whenever the ring is screwed. */ +#define BAD_RING(_vq, fmt, args...) \ + do { \ + dev_err(&(_vq)->vq.vdev->dev, \ + "%s:"fmt, (_vq)->vq.name, ##args); \ + BUG(); \ + } while (0) +/* Caller is supposed to guarantee no reentry. */ +#define START_USE(_vq) \ + do { \ + if ((_vq)->in_use) \ + panic("%s:in_use = %i\n", \ + (_vq)->vq.name, (_vq)->in_use); \ + (_vq)->in_use = __LINE__; \ + } while (0) +#define END_USE(_vq) \ + do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) +#else +#define BAD_RING(_vq, fmt, args...) \ + do { \ + dev_err(&_vq->vq.vdev->dev, \ + "%s:"fmt, (_vq)->vq.name, ##args); \ + (_vq)->broken = true; \ + } while (0) +#define START_USE(vq) +#define END_USE(vq) +#endif + +struct vring_virtqueue { + struct virtqueue vq; + + /* Actual memory layout for this queue */ + struct vring vring; + + /* Can we use weak barriers? */ + bool weak_barriers; + + /* Other side has made a mess, don't try any more. */ + bool broken; + + /* Host supports indirect buffers */ + bool indirect; + + /* Host publishes avail event idx */ + bool event; + + /* Head of free buffer list. */ + unsigned int free_head; + /* Number we've added since last sync. */ + unsigned int num_added; + + /* Last used index we've seen. */ + u16 last_used_idx; + + /* Last written value to avail->flags */ + u16 avail_flags_shadow; + + /* Last written value to avail->idx in guest byte order */ + u16 avail_idx_shadow; + + /* How to notify other side. FIXME: commonalize hcalls! */ + bool (*notify)(struct virtqueue *vq); + +#ifdef DEBUG + /* They're supposed to lock for us. */ + unsigned int in_use; + + /* Figure out if their kicks are too delayed. */ + bool last_add_time_valid; + ktime_t last_add_time; +#endif + + /* Tokens for callbacks. */ + void *data[]; +}; + +#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) + +static struct vring_desc *alloc_indirect(struct virtqueue *_vq, + unsigned int total_sg, gfp_t gfp) +{ + struct vring_desc *desc; + unsigned int i; + + /* + * We require lowmem mappings for the descriptors because + * otherwise virt_to_phys will give us bogus addresses in the + * virtqueue. + */ + gfp &= ~__GFP_HIGHMEM; + + desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); + if (!desc) + return NULL; + + for (i = 0; i < total_sg; i++) + desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); + return desc; +} + +static inline int virtqueue_add(struct virtqueue *_vq, + struct scatterlist *sgs[], + unsigned int total_sg, + unsigned int out_sgs, + unsigned int in_sgs, + void *data, + gfp_t gfp) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + struct scatterlist *sg; + struct vring_desc *desc; + unsigned int i, n, avail, descs_used, uninitialized_var(prev); + int head; + bool indirect; + + START_USE(vq); + + BUG_ON(data == NULL); + + if (unlikely(vq->broken)) { + END_USE(vq); + return -EIO; + } + +#ifdef DEBUG + { + ktime_t now = ktime_get(); + + /* No kick or get, with .1 second between? Warn. */ + if (vq->last_add_time_valid) + WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) + > 100); + vq->last_add_time = now; + vq->last_add_time_valid = true; + } +#endif + + BUG_ON(total_sg > vq->vring.num); + BUG_ON(total_sg == 0); + + head = vq->free_head; + + /* If the host supports indirect descriptor tables, and we have multiple + * buffers, then go indirect. FIXME: tune this threshold */ + if (vq->indirect && total_sg > 1 && vq->vq.num_free) + desc = alloc_indirect(_vq, total_sg, gfp); + else + desc = NULL; + + if (desc) { + /* Use a single buffer which doesn't continue */ + vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); + vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc)); + /* avoid kmemleak false positive (hidden by virt_to_phys) */ + kmemleak_ignore(desc); + vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); + + /* Set up rest to use this indirect table. */ + i = 0; + descs_used = 1; + indirect = true; + } else { + desc = vq->vring.desc; + i = head; + descs_used = total_sg; + indirect = false; + } + + if (vq->vq.num_free < descs_used) { + pr_debug("Can't add buf len %i - avail = %i\n", + descs_used, vq->vq.num_free); + /* FIXME: for historical reasons, we force a notify here if + * there are outgoing parts to the buffer. Presumably the + * host should service the ring ASAP. */ + if (out_sgs) + vq->notify(&vq->vq); + if (indirect) + kfree(desc); + END_USE(vq); + return -ENOSPC; + } + + /* We're about to use some buffers from the free list. */ + vq->vq.num_free -= descs_used; + + for (n = 0; n < out_sgs; n++) { + for (sg = sgs[n]; sg; sg = sg_next(sg)) { + desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); + desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); + desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); + prev = i; + i = virtio16_to_cpu(_vq->vdev, desc[i].next); + } + } + for (; n < (out_sgs + in_sgs); n++) { + for (sg = sgs[n]; sg; sg = sg_next(sg)) { + desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); + desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); + desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); + prev = i; + i = virtio16_to_cpu(_vq->vdev, desc[i].next); + } + } + /* Last one doesn't continue. */ + desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); + + /* Update free pointer */ + if (indirect) + vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); + else + vq->free_head = i; + + /* Set token. */ + vq->data[head] = data; + + /* Put entry in available array (but don't update avail->idx until they + * do sync). */ + avail = vq->avail_idx_shadow & (vq->vring.num - 1); + vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); + + /* Descriptors and available array need to be set before we expose the + * new available array entries. */ + virtio_wmb(vq->weak_barriers); + vq->avail_idx_shadow++; + vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); + vq->num_added++; + + pr_debug("Added buffer head %i to %p\n", head, vq); + END_USE(vq); + + /* This is very unlikely, but theoretically possible. Kick + * just in case. */ + if (unlikely(vq->num_added == (1 << 16) - 1)) + virtqueue_kick(_vq); + + return 0; +} + +/** + * virtqueue_add_sgs - expose buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sgs: array of terminated scatterlists. + * @out_num: the number of scatterlists readable by other side + * @in_num: the number of scatterlists which are writable (after readable ones) + * @data: the token identifying the buffer. + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). + */ +int virtqueue_add_sgs(struct virtqueue *_vq, + struct scatterlist *sgs[], + unsigned int out_sgs, + unsigned int in_sgs, + void *data, + gfp_t gfp) +{ + unsigned int i, total_sg = 0; + + /* Count them first. */ + for (i = 0; i < out_sgs + in_sgs; i++) { + struct scatterlist *sg; + for (sg = sgs[i]; sg; sg = sg_next(sg)) + total_sg++; + } + return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); +} +EXPORT_SYMBOL_GPL(virtqueue_add_sgs); + +/** + * virtqueue_add_outbuf - expose output buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sg: scatterlist (must be well-formed and terminated!) + * @num: the number of entries in @sg readable by other side + * @data: the token identifying the buffer. + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). + */ +int virtqueue_add_outbuf(struct virtqueue *vq, + struct scatterlist *sg, unsigned int num, + void *data, + gfp_t gfp) +{ + return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); +} +EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); + +/** + * virtqueue_add_inbuf - expose input buffers to other end + * @vq: the struct virtqueue we're talking about. + * @sg: scatterlist (must be well-formed and terminated!) + * @num: the number of entries in @sg writable by other side + * @data: the token identifying the buffer. + * @gfp: how to do memory allocations (if necessary). + * + * Caller must ensure we don't call this with other virtqueue operations + * at the same time (except where noted). + * + * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). + */ +int virtqueue_add_inbuf(struct virtqueue *vq, + struct scatterlist *sg, unsigned int num, + void *data, + gfp_t gfp) +{ + return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); +} +EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); + +/** + * virtqueue_kick_prepare - first half of split virtqueue_kick call. + * @vq: the struct virtqueue + * + * Instead of virtqueue_kick(), you can do: + * if (virtqueue_kick_prepare(vq)) + * virtqueue_notify(vq); + * + * This is sometimes useful because the virtqueue_kick_prepare() needs + * to be serialized, but the actual virtqueue_notify() call does not. + */ +bool virtqueue_kick_prepare(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + u16 new, old; + bool needs_kick; + + START_USE(vq); + /* We need to expose available array entries before checking avail + * event. */ + virtio_mb(vq->weak_barriers); + + old = vq->avail_idx_shadow - vq->num_added; + new = vq->avail_idx_shadow; + vq->num_added = 0; + +#ifdef DEBUG + if (vq->last_add_time_valid) { + WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), + vq->last_add_time)) > 100); + } + vq->last_add_time_valid = false; +#endif + + if (vq->event) { + needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), + new, old); + } else { + needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); + } + END_USE(vq); + return needs_kick; +} +EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); + +/** + * virtqueue_notify - second half of split virtqueue_kick call. + * @vq: the struct virtqueue + * + * This does not need to be serialized. + * + * Returns false if host notify failed or queue is broken, otherwise true. + */ +bool virtqueue_notify(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + if (unlikely(vq->broken)) + return false; + + /* Prod other side to tell it about changes. */ + if (!vq->notify(_vq)) { + vq->broken = true; + return false; + } + return true; +} +EXPORT_SYMBOL_GPL(virtqueue_notify); + +/** + * virtqueue_kick - update after add_buf + * @vq: the struct virtqueue + * + * After one or more virtqueue_add_* calls, invoke this to kick + * the other side. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + * + * Returns false if kick failed, otherwise true. + */ +bool virtqueue_kick(struct virtqueue *vq) +{ + if (virtqueue_kick_prepare(vq)) + return virtqueue_notify(vq); + return true; +} +EXPORT_SYMBOL_GPL(virtqueue_kick); + +static void detach_buf(struct vring_virtqueue *vq, unsigned int head) +{ + unsigned int i; + + /* Clear data ptr. */ + vq->data[head] = NULL; + + /* Put back on free list: find end */ + i = head; + + /* Free the indirect table */ + if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)) + kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr))); + + while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) { + i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); + vq->vq.num_free++; + } + + vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); + vq->free_head = head; + /* Plus final descriptor */ + vq->vq.num_free++; +} + +static inline bool more_used(const struct vring_virtqueue *vq) +{ + return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); +} + +/** + * virtqueue_get_buf - get the next used buffer + * @vq: the struct virtqueue we're talking about. + * @len: the length written into the buffer + * + * If the driver wrote data into the buffer, @len will be set to the + * amount written. This means you don't need to clear the buffer + * beforehand to ensure there's no data leakage in the case of short + * writes. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + * + * Returns NULL if there are no used buffers, or the "data" token + * handed to virtqueue_add_*(). + */ +void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + void *ret; + unsigned int i; + u16 last_used; + + START_USE(vq); + + if (unlikely(vq->broken)) { + END_USE(vq); + return NULL; + } + + if (!more_used(vq)) { + pr_debug("No more buffers in queue\n"); + END_USE(vq); + return NULL; + } + + /* Only get used array entries after they have been exposed by host. */ + virtio_rmb(vq->weak_barriers); + + last_used = (vq->last_used_idx & (vq->vring.num - 1)); + i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); + *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); + + if (unlikely(i >= vq->vring.num)) { + BAD_RING(vq, "id %u out of range\n", i); + return NULL; + } + if (unlikely(!vq->data[i])) { + BAD_RING(vq, "id %u is not a head!\n", i); + return NULL; + } + + /* detach_buf clears data, so grab it now. */ + ret = vq->data[i]; + detach_buf(vq, i); + vq->last_used_idx++; + /* If we expect an interrupt for the next entry, tell host + * by writing event index and flush out the write before + * the read in the next get_buf call. */ + if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { + vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx); + virtio_mb(vq->weak_barriers); + } + +#ifdef DEBUG + vq->last_add_time_valid = false; +#endif + + END_USE(vq); + return ret; +} +EXPORT_SYMBOL_GPL(virtqueue_get_buf); + +/** + * virtqueue_disable_cb - disable callbacks + * @vq: the struct virtqueue we're talking about. + * + * Note that this is not necessarily synchronous, hence unreliable and only + * useful as an optimization. + * + * Unlike other operations, this need not be serialized. + */ +void virtqueue_disable_cb(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { + vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; + if (!vq->event) + vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); + } + +} +EXPORT_SYMBOL_GPL(virtqueue_disable_cb); + +/** + * virtqueue_enable_cb_prepare - restart callbacks after disable_cb + * @vq: the struct virtqueue we're talking about. + * + * This re-enables callbacks; it returns current queue state + * in an opaque unsigned value. This value should be later tested by + * virtqueue_poll, to detect a possible race between the driver checking for + * more work, and enabling callbacks. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ +unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + u16 last_used_idx; + + START_USE(vq); + + /* We optimistically turn back on interrupts, then check if there was + * more to do. */ + /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to + * either clear the flags bit or point the event index at the next + * entry. Always do both to keep code simple. */ + if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { + vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; + if (!vq->event) + vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); + } + vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); + END_USE(vq); + return last_used_idx; +} +EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); + +/** + * virtqueue_poll - query pending used buffers + * @vq: the struct virtqueue we're talking about. + * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). + * + * Returns "true" if there are pending used buffers in the queue. + * + * This does not need to be serialized. + */ +bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + virtio_mb(vq->weak_barriers); + return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); +} +EXPORT_SYMBOL_GPL(virtqueue_poll); + +/** + * virtqueue_enable_cb - restart callbacks after disable_cb. + * @vq: the struct virtqueue we're talking about. + * + * This re-enables callbacks; it returns "false" if there are pending + * buffers in the queue, to detect a possible race between the driver + * checking for more work, and enabling callbacks. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ +bool virtqueue_enable_cb(struct virtqueue *_vq) +{ + unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); + return !virtqueue_poll(_vq, last_used_idx); +} +EXPORT_SYMBOL_GPL(virtqueue_enable_cb); + +/** + * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. + * @vq: the struct virtqueue we're talking about. + * + * This re-enables callbacks but hints to the other side to delay + * interrupts until most of the available buffers have been processed; + * it returns "false" if there are many pending buffers in the queue, + * to detect a possible race between the driver checking for more work, + * and enabling callbacks. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ +bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + u16 bufs; + + START_USE(vq); + + /* We optimistically turn back on interrupts, then check if there was + * more to do. */ + /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to + * either clear the flags bit or point the event index at the next + * entry. Always update the event index to keep code simple. */ + if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { + vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; + if (!vq->event) + vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); + } + /* TODO: tune this threshold */ + bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; + vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs); + virtio_mb(vq->weak_barriers); + if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { + END_USE(vq); + return false; + } + + END_USE(vq); + return true; +} +EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); + +/** + * virtqueue_detach_unused_buf - detach first unused buffer + * @vq: the struct virtqueue we're talking about. + * + * Returns NULL or the "data" token handed to virtqueue_add_*(). + * This is not valid on an active queue; it is useful only for device + * shutdown. + */ +void *virtqueue_detach_unused_buf(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + unsigned int i; + void *buf; + + START_USE(vq); + + for (i = 0; i < vq->vring.num; i++) { + if (!vq->data[i]) + continue; + /* detach_buf clears data, so grab it now. */ + buf = vq->data[i]; + detach_buf(vq, i); + vq->avail_idx_shadow--; + vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); + END_USE(vq); + return buf; + } + /* That should have freed everything. */ + BUG_ON(vq->vq.num_free != vq->vring.num); + + END_USE(vq); + return NULL; +} +EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); + +irqreturn_t vring_interrupt(int irq, void *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + if (!more_used(vq)) { + pr_debug("virtqueue interrupt with no work for %p\n", vq); + return IRQ_NONE; + } + + if (unlikely(vq->broken)) + return IRQ_HANDLED; + + pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); + if (vq->vq.callback) + vq->vq.callback(&vq->vq); + + return IRQ_HANDLED; +} +EXPORT_SYMBOL_GPL(vring_interrupt); + +struct virtqueue *vring_new_virtqueue(unsigned int index, + unsigned int num, + unsigned int vring_align, + struct virtio_device *vdev, + bool weak_barriers, + void *pages, + bool (*notify)(struct virtqueue *), + void (*callback)(struct virtqueue *), + const char *name) +{ + struct vring_virtqueue *vq; + unsigned int i; + + /* We assume num is a power of 2. */ + if (num & (num - 1)) { + dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); + return NULL; + } + + vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); + if (!vq) + return NULL; + + vring_init(&vq->vring, num, pages, vring_align); + vq->vq.callback = callback; + vq->vq.vdev = vdev; + vq->vq.name = name; + vq->vq.num_free = num; + vq->vq.index = index; + vq->notify = notify; + vq->weak_barriers = weak_barriers; + vq->broken = false; + vq->last_used_idx = 0; + vq->avail_flags_shadow = 0; + vq->avail_idx_shadow = 0; + vq->num_added = 0; + list_add_tail(&vq->vq.list, &vdev->vqs); +#ifdef DEBUG + vq->in_use = false; + vq->last_add_time_valid = false; +#endif + + vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); + vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); + + /* No callback? Tell other side not to bother us. */ + if (!callback) { + vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; + if (!vq->event) + vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); + } + + /* Put everything in free lists. */ + vq->free_head = 0; + for (i = 0; i < num-1; i++) { + vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); + vq->data[i] = NULL; + } + vq->data[i] = NULL; + + return &vq->vq; +} +EXPORT_SYMBOL_GPL(vring_new_virtqueue); + +void vring_del_virtqueue(struct virtqueue *vq) +{ + list_del(&vq->list); + kfree(to_vvq(vq)); +} +EXPORT_SYMBOL_GPL(vring_del_virtqueue); + +/* Manipulates transport-specific feature bits. */ +void vring_transport_features(struct virtio_device *vdev) +{ + unsigned int i; + + for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { + switch (i) { + case VIRTIO_RING_F_INDIRECT_DESC: + break; + case VIRTIO_RING_F_EVENT_IDX: + break; + case VIRTIO_F_VERSION_1: + break; + default: + /* We don't understand this bit. */ + __virtio_clear_bit(vdev, i); + } + } +} +EXPORT_SYMBOL_GPL(vring_transport_features); + +/** + * virtqueue_get_vring_size - return the size of the virtqueue's vring + * @vq: the struct virtqueue containing the vring of interest. + * + * Returns the size of the vring. This is mainly used for boasting to + * userspace. Unlike other operations, this need not be serialized. + */ +unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) +{ + + struct vring_virtqueue *vq = to_vvq(_vq); + + return vq->vring.num; +} +EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); + +bool virtqueue_is_broken(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + return vq->broken; +} +EXPORT_SYMBOL_GPL(virtqueue_is_broken); + +/* + * This should prevent the device from being used, allowing drivers to + * recover. You may need to grab appropriate locks to flush. + */ +void virtio_break_device(struct virtio_device *dev) +{ + struct virtqueue *_vq; + + list_for_each_entry(_vq, &dev->vqs, list) { + struct vring_virtqueue *vq = to_vvq(_vq); + vq->broken = true; + } +} +EXPORT_SYMBOL_GPL(virtio_break_device); + +void *virtqueue_get_avail(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + return vq->vring.avail; +} +EXPORT_SYMBOL_GPL(virtqueue_get_avail); + +void *virtqueue_get_used(struct virtqueue *_vq) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + + return vq->vring.used; +} +EXPORT_SYMBOL_GPL(virtqueue_get_used); + +MODULE_LICENSE("GPL"); diff --git a/addons/virtio/src/4.4.180/virtio_scsi.c b/addons/virtio/src/4.4.180/virtio_scsi.c new file mode 100644 index 00000000..92374277 --- /dev/null +++ b/addons/virtio/src/4.4.180/virtio_scsi.c @@ -0,0 +1,1200 @@ +/* + * Virtio SCSI HBA driver + * + * Copyright IBM Corp. 2010 + * Copyright Red Hat, Inc. 2011 + * + * Authors: + * Stefan Hajnoczi + * Paolo Bonzini + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define VIRTIO_SCSI_MEMPOOL_SZ 64 +#define VIRTIO_SCSI_EVENT_LEN 8 +#define VIRTIO_SCSI_VQ_BASE 2 + +/* Command queue element */ +struct virtio_scsi_cmd { + struct scsi_cmnd *sc; + struct completion *comp; + union { + struct virtio_scsi_cmd_req cmd; + struct virtio_scsi_cmd_req_pi cmd_pi; + struct virtio_scsi_ctrl_tmf_req tmf; + struct virtio_scsi_ctrl_an_req an; + } req; + union { + struct virtio_scsi_cmd_resp cmd; + struct virtio_scsi_ctrl_tmf_resp tmf; + struct virtio_scsi_ctrl_an_resp an; + struct virtio_scsi_event evt; + } resp; +} ____cacheline_aligned_in_smp; + +struct virtio_scsi_event_node { + struct virtio_scsi *vscsi; + struct virtio_scsi_event event; + struct work_struct work; +}; + +struct virtio_scsi_vq { + /* Protects vq */ + spinlock_t vq_lock; + + struct virtqueue *vq; +}; + +/* + * Per-target queue state. + * + * This struct holds the data needed by the queue steering policy. When a + * target is sent multiple requests, we need to drive them to the same queue so + * that FIFO processing order is kept. However, if a target was idle, we can + * choose a queue arbitrarily. In this case the queue is chosen according to + * the current VCPU, so the driver expects the number of request queues to be + * equal to the number of VCPUs. This makes it easy and fast to select the + * queue, and also lets the driver optimize the IRQ affinity for the virtqueues + * (each virtqueue's affinity is set to the CPU that "owns" the queue). + * + * tgt_seq is held to serialize reading and writing req_vq. + * + * Decrements of reqs are never concurrent with writes of req_vq: before the + * decrement reqs will be != 0; after the decrement the virtqueue completion + * routine will not use the req_vq so it can be changed by a new request. + * Thus they can happen outside the tgt_seq, provided of course we make reqs + * an atomic_t. + */ +struct virtio_scsi_target_state { + seqcount_t tgt_seq; + + /* Count of outstanding requests. */ + atomic_t reqs; + + /* Currently active virtqueue for requests sent to this target. */ + struct virtio_scsi_vq *req_vq; +}; + +/* Driver instance state */ +struct virtio_scsi { + struct virtio_device *vdev; + + /* Get some buffers ready for event vq */ + struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN]; + + u32 num_queues; + + /* If the affinity hint is set for virtqueues */ + bool affinity_hint_set; + + /* CPU hotplug notifier */ + struct notifier_block nb; + + /* Protected by event_vq lock */ + bool stop_events; + + struct virtio_scsi_vq ctrl_vq; + struct virtio_scsi_vq event_vq; + struct virtio_scsi_vq req_vqs[]; +}; + +static struct kmem_cache *virtscsi_cmd_cache; +static mempool_t *virtscsi_cmd_pool; + +static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev) +{ + return vdev->priv; +} + +static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid) +{ + if (!resid) + return; + + if (!scsi_bidi_cmnd(sc)) { + scsi_set_resid(sc, resid); + return; + } + + scsi_in(sc)->resid = min(resid, scsi_in(sc)->length); + scsi_out(sc)->resid = resid - scsi_in(sc)->resid; +} + +/** + * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done + * + * Called with vq_lock held. + */ +static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf) +{ + struct virtio_scsi_cmd *cmd = buf; + struct scsi_cmnd *sc = cmd->sc; + struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd; + struct virtio_scsi_target_state *tgt = + scsi_target(sc->device)->hostdata; + + dev_dbg(&sc->device->sdev_gendev, + "cmd %p response %u status %#02x sense_len %u\n", + sc, resp->response, resp->status, resp->sense_len); + + sc->result = resp->status; + virtscsi_compute_resid(sc, virtio32_to_cpu(vscsi->vdev, resp->resid)); + switch (resp->response) { + case VIRTIO_SCSI_S_OK: + set_host_byte(sc, DID_OK); + break; + case VIRTIO_SCSI_S_OVERRUN: + set_host_byte(sc, DID_ERROR); + break; + case VIRTIO_SCSI_S_ABORTED: + set_host_byte(sc, DID_ABORT); + break; + case VIRTIO_SCSI_S_BAD_TARGET: + set_host_byte(sc, DID_BAD_TARGET); + break; + case VIRTIO_SCSI_S_RESET: + set_host_byte(sc, DID_RESET); + break; + case VIRTIO_SCSI_S_BUSY: + set_host_byte(sc, DID_BUS_BUSY); + break; + case VIRTIO_SCSI_S_TRANSPORT_FAILURE: + set_host_byte(sc, DID_TRANSPORT_DISRUPTED); + break; + case VIRTIO_SCSI_S_TARGET_FAILURE: + set_host_byte(sc, DID_TARGET_FAILURE); + break; + case VIRTIO_SCSI_S_NEXUS_FAILURE: + set_host_byte(sc, DID_NEXUS_FAILURE); + break; + default: + scmd_printk(KERN_WARNING, sc, "Unknown response %d", + resp->response); + /* fall through */ + case VIRTIO_SCSI_S_FAILURE: + set_host_byte(sc, DID_ERROR); + break; + } + + WARN_ON(virtio32_to_cpu(vscsi->vdev, resp->sense_len) > + VIRTIO_SCSI_SENSE_SIZE); + if (sc->sense_buffer) { + memcpy(sc->sense_buffer, resp->sense, + min_t(u32, + virtio32_to_cpu(vscsi->vdev, resp->sense_len), + VIRTIO_SCSI_SENSE_SIZE)); + if (resp->sense_len) + set_driver_byte(sc, DRIVER_SENSE); + } + + sc->scsi_done(sc); + + atomic_dec(&tgt->reqs); +} + +static void virtscsi_vq_done(struct virtio_scsi *vscsi, + struct virtio_scsi_vq *virtscsi_vq, + void (*fn)(struct virtio_scsi *vscsi, void *buf)) +{ + void *buf; + unsigned int len; + unsigned long flags; + struct virtqueue *vq = virtscsi_vq->vq; + + spin_lock_irqsave(&virtscsi_vq->vq_lock, flags); + do { + virtqueue_disable_cb(vq); + while ((buf = virtqueue_get_buf(vq, &len)) != NULL) + fn(vscsi, buf); + + if (unlikely(virtqueue_is_broken(vq))) + break; + } while (!virtqueue_enable_cb(vq)); + spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags); +} + +static void virtscsi_req_done(struct virtqueue *vq) +{ + struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + int index = vq->index - VIRTIO_SCSI_VQ_BASE; + struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index]; + + virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); +}; + +static void virtscsi_poll_requests(struct virtio_scsi *vscsi) +{ + int i, num_vqs; + + num_vqs = vscsi->num_queues; + for (i = 0; i < num_vqs; i++) + virtscsi_vq_done(vscsi, &vscsi->req_vqs[i], + virtscsi_complete_cmd); +} + +static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) +{ + struct virtio_scsi_cmd *cmd = buf; + + if (cmd->comp) + complete_all(cmd->comp); +} + +static void virtscsi_ctrl_done(struct virtqueue *vq) +{ + struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + + virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); +}; + +static void virtscsi_handle_event(struct work_struct *work); + +static int virtscsi_kick_event(struct virtio_scsi *vscsi, + struct virtio_scsi_event_node *event_node) +{ + int err; + struct scatterlist sg; + unsigned long flags; + + INIT_WORK(&event_node->work, virtscsi_handle_event); + sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); + + spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); + + err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node, + GFP_ATOMIC); + if (!err) + virtqueue_kick(vscsi->event_vq.vq); + + spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags); + + return err; +} + +static int virtscsi_kick_event_all(struct virtio_scsi *vscsi) +{ + int i; + + for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) { + vscsi->event_list[i].vscsi = vscsi; + virtscsi_kick_event(vscsi, &vscsi->event_list[i]); + } + + return 0; +} + +static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi) +{ + int i; + + /* Stop scheduling work before calling cancel_work_sync. */ + spin_lock_irq(&vscsi->event_vq.vq_lock); + vscsi->stop_events = true; + spin_unlock_irq(&vscsi->event_vq.vq_lock); + + for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) + cancel_work_sync(&vscsi->event_list[i].work); +} + +static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, + struct virtio_scsi_event *event) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + unsigned int target = event->lun[1]; + unsigned int lun = (event->lun[2] << 8) | event->lun[3]; + + switch (virtio32_to_cpu(vscsi->vdev, event->reason)) { + case VIRTIO_SCSI_EVT_RESET_RESCAN: + scsi_add_device(shost, 0, target, lun); + break; + case VIRTIO_SCSI_EVT_RESET_REMOVED: + sdev = scsi_device_lookup(shost, 0, target, lun); + if (sdev) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } else { + pr_err("SCSI device %d 0 %d %d not found\n", + shost->host_no, target, lun); + } + break; + default: + pr_info("Unsupport virtio scsi event reason %x\n", event->reason); + } +} + +static void virtscsi_handle_param_change(struct virtio_scsi *vscsi, + struct virtio_scsi_event *event) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + unsigned int target = event->lun[1]; + unsigned int lun = (event->lun[2] << 8) | event->lun[3]; + u8 asc = virtio32_to_cpu(vscsi->vdev, event->reason) & 255; + u8 ascq = virtio32_to_cpu(vscsi->vdev, event->reason) >> 8; + + sdev = scsi_device_lookup(shost, 0, target, lun); + if (!sdev) { + pr_err("SCSI device %d 0 %d %d not found\n", + shost->host_no, target, lun); + return; + } + + /* Handle "Parameters changed", "Mode parameters changed", and + "Capacity data has changed". */ + if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09)) + scsi_rescan_device(&sdev->sdev_gendev); + + scsi_device_put(sdev); +} + +static void virtscsi_handle_event(struct work_struct *work) +{ + struct virtio_scsi_event_node *event_node = + container_of(work, struct virtio_scsi_event_node, work); + struct virtio_scsi *vscsi = event_node->vscsi; + struct virtio_scsi_event *event = &event_node->event; + + if (event->event & + cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) { + event->event &= ~cpu_to_virtio32(vscsi->vdev, + VIRTIO_SCSI_T_EVENTS_MISSED); + scsi_scan_host(virtio_scsi_host(vscsi->vdev)); + } + + switch (virtio32_to_cpu(vscsi->vdev, event->event)) { + case VIRTIO_SCSI_T_NO_EVENT: + break; + case VIRTIO_SCSI_T_TRANSPORT_RESET: + virtscsi_handle_transport_reset(vscsi, event); + break; + case VIRTIO_SCSI_T_PARAM_CHANGE: + virtscsi_handle_param_change(vscsi, event); + break; + default: + pr_err("Unsupport virtio scsi event %x\n", event->event); + } + virtscsi_kick_event(vscsi, event_node); +} + +static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf) +{ + struct virtio_scsi_event_node *event_node = buf; + + if (!vscsi->stop_events) + queue_work(system_freezable_wq, &event_node->work); +} + +static void virtscsi_event_done(struct virtqueue *vq) +{ + struct Scsi_Host *sh = virtio_scsi_host(vq->vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + + virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event); +}; + +/** + * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue + * @vq : the struct virtqueue we're talking about + * @cmd : command structure + * @req_size : size of the request buffer + * @resp_size : size of the response buffer + */ +static int virtscsi_add_cmd(struct virtqueue *vq, + struct virtio_scsi_cmd *cmd, + size_t req_size, size_t resp_size) +{ + struct scsi_cmnd *sc = cmd->sc; + struct scatterlist *sgs[6], req, resp; + struct sg_table *out, *in; + unsigned out_num = 0, in_num = 0; + + out = in = NULL; + + if (sc && sc->sc_data_direction != DMA_NONE) { + if (sc->sc_data_direction != DMA_FROM_DEVICE) + out = &scsi_out(sc)->table; + if (sc->sc_data_direction != DMA_TO_DEVICE) + in = &scsi_in(sc)->table; + } + + /* Request header. */ + sg_init_one(&req, &cmd->req, req_size); + sgs[out_num++] = &req; + + /* Data-out buffer. */ + if (out) { + /* Place WRITE protection SGLs before Data OUT payload */ + if (scsi_prot_sg_count(sc)) + sgs[out_num++] = scsi_prot_sglist(sc); + sgs[out_num++] = out->sgl; + } + + /* Response header. */ + sg_init_one(&resp, &cmd->resp, resp_size); + sgs[out_num + in_num++] = &resp; + + /* Data-in buffer */ + if (in) { + /* Place READ protection SGLs before Data IN payload */ + if (scsi_prot_sg_count(sc)) + sgs[out_num + in_num++] = scsi_prot_sglist(sc); + sgs[out_num + in_num++] = in->sgl; + } + + return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); +} + +static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, + struct virtio_scsi_cmd *cmd, + size_t req_size, size_t resp_size) +{ + unsigned long flags; + int err; + bool needs_kick = false; + + spin_lock_irqsave(&vq->vq_lock, flags); + err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size); + if (!err) + needs_kick = virtqueue_kick_prepare(vq->vq); + + spin_unlock_irqrestore(&vq->vq_lock, flags); + + if (needs_kick) + virtqueue_notify(vq->vq); + return err; +} + +static void virtio_scsi_init_hdr(struct virtio_device *vdev, + struct virtio_scsi_cmd_req *cmd, + struct scsi_cmnd *sc) +{ + cmd->lun[0] = 1; + cmd->lun[1] = sc->device->id; + cmd->lun[2] = (sc->device->lun >> 8) | 0x40; + cmd->lun[3] = sc->device->lun & 0xff; + cmd->tag = cpu_to_virtio64(vdev, (unsigned long)sc); + cmd->task_attr = VIRTIO_SCSI_S_SIMPLE; + cmd->prio = 0; + cmd->crn = 0; +} + +#ifdef CONFIG_BLK_DEV_INTEGRITY +static void virtio_scsi_init_hdr_pi(struct virtio_device *vdev, + struct virtio_scsi_cmd_req_pi *cmd_pi, + struct scsi_cmnd *sc) +{ + struct request *rq = sc->request; + struct blk_integrity *bi; + + virtio_scsi_init_hdr(vdev, (struct virtio_scsi_cmd_req *)cmd_pi, sc); + + if (!rq || !scsi_prot_sg_count(sc)) + return; + + bi = blk_get_integrity(rq->rq_disk); + + if (sc->sc_data_direction == DMA_TO_DEVICE) + cmd_pi->pi_bytesout = cpu_to_virtio32(vdev, + blk_rq_sectors(rq) * + bi->tuple_size); + else if (sc->sc_data_direction == DMA_FROM_DEVICE) + cmd_pi->pi_bytesin = cpu_to_virtio32(vdev, + blk_rq_sectors(rq) * + bi->tuple_size); +} +#endif + +static int virtscsi_queuecommand(struct virtio_scsi *vscsi, + struct virtio_scsi_vq *req_vq, + struct scsi_cmnd *sc) +{ + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); + unsigned long flags; + int req_size; + int ret; + + BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize); + + /* TODO: check feature bit and fail if unsupported? */ + BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL); + + dev_dbg(&sc->device->sdev_gendev, + "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]); + + memset(cmd, 0, sizeof(*cmd)); + cmd->sc = sc; + + BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE); + +#ifdef CONFIG_BLK_DEV_INTEGRITY + if (virtio_has_feature(vscsi->vdev, VIRTIO_SCSI_F_T10_PI)) { + virtio_scsi_init_hdr_pi(vscsi->vdev, &cmd->req.cmd_pi, sc); + memcpy(cmd->req.cmd_pi.cdb, sc->cmnd, sc->cmd_len); + req_size = sizeof(cmd->req.cmd_pi); + } else +#endif + { + virtio_scsi_init_hdr(vscsi->vdev, &cmd->req.cmd, sc); + memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len); + req_size = sizeof(cmd->req.cmd); + } + + ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)); + if (ret == -EIO) { + cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; + spin_lock_irqsave(&req_vq->vq_lock, flags); + virtscsi_complete_cmd(vscsi, cmd); + spin_unlock_irqrestore(&req_vq->vq_lock, flags); + } else if (ret != 0) { + return SCSI_MLQUEUE_HOST_BUSY; + } + return 0; +} + +static int virtscsi_queuecommand_single(struct Scsi_Host *sh, + struct scsi_cmnd *sc) +{ + struct virtio_scsi *vscsi = shost_priv(sh); + struct virtio_scsi_target_state *tgt = + scsi_target(sc->device)->hostdata; + + atomic_inc(&tgt->reqs); + return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc); +} + +static struct virtio_scsi_vq *virtscsi_pick_vq_mq(struct virtio_scsi *vscsi, + struct scsi_cmnd *sc) +{ + u32 tag = blk_mq_unique_tag(sc->request); + u16 hwq = blk_mq_unique_tag_to_hwq(tag); + + return &vscsi->req_vqs[hwq]; +} + +static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi, + struct virtio_scsi_target_state *tgt) +{ + struct virtio_scsi_vq *vq; + unsigned long flags; + u32 queue_num; + + local_irq_save(flags); + if (atomic_inc_return(&tgt->reqs) > 1) { + unsigned long seq; + + do { + seq = read_seqcount_begin(&tgt->tgt_seq); + vq = tgt->req_vq; + } while (read_seqcount_retry(&tgt->tgt_seq, seq)); + } else { + /* no writes can be concurrent because of atomic_t */ + write_seqcount_begin(&tgt->tgt_seq); + + /* keep previous req_vq if a reader just arrived */ + if (unlikely(atomic_read(&tgt->reqs) > 1)) { + vq = tgt->req_vq; + goto unlock; + } + + queue_num = smp_processor_id(); + while (unlikely(queue_num >= vscsi->num_queues)) + queue_num -= vscsi->num_queues; + tgt->req_vq = vq = &vscsi->req_vqs[queue_num]; + unlock: + write_seqcount_end(&tgt->tgt_seq); + } + local_irq_restore(flags); + + return vq; +} + +static int virtscsi_queuecommand_multi(struct Scsi_Host *sh, + struct scsi_cmnd *sc) +{ + struct virtio_scsi *vscsi = shost_priv(sh); + struct virtio_scsi_target_state *tgt = + scsi_target(sc->device)->hostdata; + struct virtio_scsi_vq *req_vq; + + if (shost_use_blk_mq(sh)) + req_vq = virtscsi_pick_vq_mq(vscsi, sc); + else + req_vq = virtscsi_pick_vq(vscsi, tgt); + + return virtscsi_queuecommand(vscsi, req_vq, sc); +} + +static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) +{ + DECLARE_COMPLETION_ONSTACK(comp); + int ret = FAILED; + + cmd->comp = ∁ + if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, + sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0) + goto out; + + wait_for_completion(&comp); + if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK || + cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) + ret = SUCCESS; + + /* + * The spec guarantees that all requests related to the TMF have + * been completed, but the callback might not have run yet if + * we're using independent interrupts (e.g. MSI). Poll the + * virtqueues once. + * + * In the abort case, sc->scsi_done will do nothing, because + * the block layer must have detected a timeout and as a result + * REQ_ATOM_COMPLETE has been set. + */ + virtscsi_poll_requests(vscsi); + +out: + mempool_free(cmd, virtscsi_cmd_pool); + return ret; +} + +static int virtscsi_device_reset(struct scsi_cmnd *sc) +{ + struct virtio_scsi *vscsi = shost_priv(sc->device->host); + struct virtio_scsi_cmd *cmd; + + sdev_printk(KERN_INFO, sc->device, "device reset\n"); + cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); + if (!cmd) + return FAILED; + + memset(cmd, 0, sizeof(*cmd)); + cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ + .type = VIRTIO_SCSI_T_TMF, + .subtype = cpu_to_virtio32(vscsi->vdev, + VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET), + .lun[0] = 1, + .lun[1] = sc->device->id, + .lun[2] = (sc->device->lun >> 8) | 0x40, + .lun[3] = sc->device->lun & 0xff, + }; + return virtscsi_tmf(vscsi, cmd); +} + +static int virtscsi_device_alloc(struct scsi_device *sdevice) +{ + /* + * Passed through SCSI targets (e.g. with qemu's 'scsi-block') + * may have transfer limits which come from the host SCSI + * controller or something on the host side other than the + * target itself. + * + * To make this work properly, the hypervisor can adjust the + * target's VPD information to advertise these limits. But + * for that to work, the guest has to look at the VPD pages, + * which we won't do by default if it is an SPC-2 device, even + * if it does actually support it. + * + * So, set the blist to always try to read the VPD pages. + */ + sdevice->sdev_bflags = BLIST_TRY_VPD_PAGES; + + return 0; +} + + +/** + * virtscsi_change_queue_depth() - Change a virtscsi target's queue depth + * @sdev: Virtscsi target whose queue depth to change + * @qdepth: New queue depth + */ +static int virtscsi_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + int max_depth = shost->cmd_per_lun; + + return scsi_change_queue_depth(sdev, min(max_depth, qdepth)); +} + +static int virtscsi_abort(struct scsi_cmnd *sc) +{ + struct virtio_scsi *vscsi = shost_priv(sc->device->host); + struct virtio_scsi_cmd *cmd; + + scmd_printk(KERN_INFO, sc, "abort\n"); + cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO); + if (!cmd) + return FAILED; + + memset(cmd, 0, sizeof(*cmd)); + cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){ + .type = VIRTIO_SCSI_T_TMF, + .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK, + .lun[0] = 1, + .lun[1] = sc->device->id, + .lun[2] = (sc->device->lun >> 8) | 0x40, + .lun[3] = sc->device->lun & 0xff, + .tag = cpu_to_virtio64(vscsi->vdev, (unsigned long)sc), + }; + return virtscsi_tmf(vscsi, cmd); +} + +static int virtscsi_target_alloc(struct scsi_target *starget) +{ + struct Scsi_Host *sh = dev_to_shost(starget->dev.parent); + struct virtio_scsi *vscsi = shost_priv(sh); + + struct virtio_scsi_target_state *tgt = + kmalloc(sizeof(*tgt), GFP_KERNEL); + if (!tgt) + return -ENOMEM; + + seqcount_init(&tgt->tgt_seq); + atomic_set(&tgt->reqs, 0); + tgt->req_vq = &vscsi->req_vqs[0]; + + starget->hostdata = tgt; + return 0; +} + +static void virtscsi_target_destroy(struct scsi_target *starget) +{ + struct virtio_scsi_target_state *tgt = starget->hostdata; + kfree(tgt); +} + +static struct scsi_host_template virtscsi_host_template_single = { + .module = THIS_MODULE, + .name = "Virtio SCSI HBA", + .proc_name = "virtio_scsi", + .this_id = -1, + .cmd_size = sizeof(struct virtio_scsi_cmd), + .queuecommand = virtscsi_queuecommand_single, + .change_queue_depth = virtscsi_change_queue_depth, + .eh_abort_handler = virtscsi_abort, + .eh_device_reset_handler = virtscsi_device_reset, + .slave_alloc = virtscsi_device_alloc, + + .can_queue = 1024, + .dma_boundary = UINT_MAX, + .use_clustering = ENABLE_CLUSTERING, + .target_alloc = virtscsi_target_alloc, + .target_destroy = virtscsi_target_destroy, + .track_queue_depth = 1, +}; + +static struct scsi_host_template virtscsi_host_template_multi = { + .module = THIS_MODULE, + .name = "Virtio SCSI HBA", + .proc_name = "virtio_scsi", + .this_id = -1, + .cmd_size = sizeof(struct virtio_scsi_cmd), + .queuecommand = virtscsi_queuecommand_multi, + .change_queue_depth = virtscsi_change_queue_depth, + .eh_abort_handler = virtscsi_abort, + .eh_device_reset_handler = virtscsi_device_reset, + + .slave_alloc = virtscsi_device_alloc, + .can_queue = 1024, + .dma_boundary = UINT_MAX, + .use_clustering = ENABLE_CLUSTERING, + .target_alloc = virtscsi_target_alloc, + .target_destroy = virtscsi_target_destroy, + .track_queue_depth = 1, +}; + +#define virtscsi_config_get(vdev, fld) \ + ({ \ + typeof(((struct virtio_scsi_config *)0)->fld) __val; \ + virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \ + __val; \ + }) + +#define virtscsi_config_set(vdev, fld, val) \ + do { \ + typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ + virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ + } while(0) + +static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) +{ + int i; + int cpu; + + /* In multiqueue mode, when the number of cpu is equal + * to the number of request queues, we let the qeueues + * to be private to one cpu by setting the affinity hint + * to eliminate the contention. + */ + if ((vscsi->num_queues == 1 || + vscsi->num_queues != num_online_cpus()) && affinity) { + if (vscsi->affinity_hint_set) + affinity = false; + else + return; + } + + if (affinity) { + i = 0; + for_each_online_cpu(cpu) { + virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu); + i++; + } + + vscsi->affinity_hint_set = true; + } else { + for (i = 0; i < vscsi->num_queues; i++) { + if (!vscsi->req_vqs[i].vq) + continue; + + virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); + } + + vscsi->affinity_hint_set = false; + } +} + +static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) +{ + get_online_cpus(); + __virtscsi_set_affinity(vscsi, affinity); + put_online_cpus(); +} + +static int virtscsi_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb); + switch(action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + case CPU_DEAD: + case CPU_DEAD_FROZEN: + __virtscsi_set_affinity(vscsi, true); + break; + default: + break; + } + return NOTIFY_OK; +} + +static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq, + struct virtqueue *vq) +{ + spin_lock_init(&virtscsi_vq->vq_lock); + virtscsi_vq->vq = vq; +} + +static void virtscsi_remove_vqs(struct virtio_device *vdev) +{ + struct Scsi_Host *sh = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + + virtscsi_set_affinity(vscsi, false); + + /* Stop all the virtqueues. */ + vdev->config->reset(vdev); + + vdev->config->del_vqs(vdev); +} + +static int virtscsi_init(struct virtio_device *vdev, + struct virtio_scsi *vscsi) +{ + int err; + u32 i; + u32 num_vqs; + vq_callback_t **callbacks; + const char **names; + struct virtqueue **vqs; + + num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE; + vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL); + callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL); + names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL); + + if (!callbacks || !vqs || !names) { + err = -ENOMEM; + goto out; + } + + callbacks[0] = virtscsi_ctrl_done; + callbacks[1] = virtscsi_event_done; + names[0] = "control"; + names[1] = "event"; + for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) { + callbacks[i] = virtscsi_req_done; + names[i] = "request"; + } + + /* Discover virtqueues and write information to configuration. */ + err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names); + if (err) + goto out; + + virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]); + virtscsi_init_vq(&vscsi->event_vq, vqs[1]); + for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) + virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE], + vqs[i]); + + virtscsi_set_affinity(vscsi, true); + + virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE); + virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE); + + err = 0; + +out: + kfree(names); + kfree(callbacks); + kfree(vqs); + if (err) + virtscsi_remove_vqs(vdev); + return err; +} + +static int virtscsi_probe(struct virtio_device *vdev) +{ + struct Scsi_Host *shost; + struct virtio_scsi *vscsi; + int err; + u32 sg_elems, num_targets; + u32 cmd_per_lun; + u32 num_queues; + struct scsi_host_template *hostt; + + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + + /* We need to know how many queues before we allocate. */ + num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; + + num_targets = virtscsi_config_get(vdev, max_target) + 1; + + if (num_queues == 1) + hostt = &virtscsi_host_template_single; + else + hostt = &virtscsi_host_template_multi; + + shost = scsi_host_alloc(hostt, + sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues); + if (!shost) + return -ENOMEM; + + sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1; + shost->sg_tablesize = sg_elems; + vscsi = shost_priv(shost); + vscsi->vdev = vdev; + vscsi->num_queues = num_queues; + vdev->priv = shost; + + err = virtscsi_init(vdev, vscsi); + if (err) + goto virtscsi_init_failed; + + vscsi->nb.notifier_call = &virtscsi_cpu_callback; + err = register_hotcpu_notifier(&vscsi->nb); + if (err) { + pr_err("registering cpu notifier failed\n"); + goto scsi_add_host_failed; + } + + cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1; + shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue); + shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF; + + /* LUNs > 256 are reported with format 1, so they go in the range + * 16640-32767. + */ + shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000; + shost->max_id = num_targets; + shost->max_channel = 0; + shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE; + shost->nr_hw_queues = num_queues; + +#ifdef CONFIG_BLK_DEV_INTEGRITY + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { + int host_prot; + + host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | + SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | + SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; + + scsi_host_set_prot(shost, host_prot); + scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC); + } +#endif + + err = scsi_add_host(shost, &vdev->dev); + if (err) + goto scsi_add_host_failed; + + virtio_device_ready(vdev); + + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) + virtscsi_kick_event_all(vscsi); + + scsi_scan_host(shost); + return 0; + +scsi_add_host_failed: + vdev->config->del_vqs(vdev); +virtscsi_init_failed: + scsi_host_put(shost); + return err; +} + +static void virtscsi_remove(struct virtio_device *vdev) +{ + struct Scsi_Host *shost = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(shost); + + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) + virtscsi_cancel_event_work(vscsi); + + scsi_remove_host(shost); + + unregister_hotcpu_notifier(&vscsi->nb); + + virtscsi_remove_vqs(vdev); + scsi_host_put(shost); +} + +#ifdef CONFIG_PM_SLEEP +static int virtscsi_freeze(struct virtio_device *vdev) +{ + struct Scsi_Host *sh = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + + unregister_hotcpu_notifier(&vscsi->nb); + virtscsi_remove_vqs(vdev); + return 0; +} + +static int virtscsi_restore(struct virtio_device *vdev) +{ + struct Scsi_Host *sh = virtio_scsi_host(vdev); + struct virtio_scsi *vscsi = shost_priv(sh); + int err; + + err = virtscsi_init(vdev, vscsi); + if (err) + return err; + + err = register_hotcpu_notifier(&vscsi->nb); + if (err) { + vdev->config->del_vqs(vdev); + return err; + } + + virtio_device_ready(vdev); + + if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) + virtscsi_kick_event_all(vscsi); + + return err; +} +#endif + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { + VIRTIO_SCSI_F_HOTPLUG, + VIRTIO_SCSI_F_CHANGE, +#ifdef CONFIG_BLK_DEV_INTEGRITY + VIRTIO_SCSI_F_T10_PI, +#endif +}; + +static struct virtio_driver virtio_scsi_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtscsi_probe, +#ifdef CONFIG_PM_SLEEP + .freeze = virtscsi_freeze, + .restore = virtscsi_restore, +#endif + .remove = virtscsi_remove, +}; + +static int __init init(void) +{ + int ret = -ENOMEM; + + virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0); + if (!virtscsi_cmd_cache) { + pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n"); + goto error; + } + + + virtscsi_cmd_pool = + mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ, + virtscsi_cmd_cache); + if (!virtscsi_cmd_pool) { + pr_err("mempool_create() for virtscsi_cmd_pool failed\n"); + goto error; + } + ret = register_virtio_driver(&virtio_scsi_driver); + if (ret < 0) + goto error; + + return 0; + +error: + if (virtscsi_cmd_pool) { + mempool_destroy(virtscsi_cmd_pool); + virtscsi_cmd_pool = NULL; + } + if (virtscsi_cmd_cache) { + kmem_cache_destroy(virtscsi_cmd_cache); + virtscsi_cmd_cache = NULL; + } + return ret; +} + +static void __exit fini(void) +{ + unregister_virtio_driver(&virtio_scsi_driver); + mempool_destroy(virtscsi_cmd_pool); + kmem_cache_destroy(virtscsi_cmd_cache); +} +module_init(init); +module_exit(fini); + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio SCSI HBA driver"); +MODULE_LICENSE("GPL"); diff --git a/compile-lkm.sh b/compile-lkm.sh new file mode 100755 index 00000000..016eac62 --- /dev/null +++ b/compile-lkm.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +set -e + +TMP_PATH="/tmp" +DEST_PATH="files/board/arpl/p3/lkms" + +############################################################################### +function trap_cancel() { + echo "Press Control+C once more terminate the process (or wait 2s for it to restart)" + sleep 2 || exit 1 +} +trap trap_cancel SIGINT SIGTERM + +############################################################################### +function die() { + echo -e "\033[1;31m$@\033[0m" + exit 1 +} + +# Main +while read PLATFORM KVER; do +# Compile using docker + docker run --rm -t --user `id -u` -v "${TMP_PATH}":/output \ + -v "${PWD}/redpill-lkm":/input syno-compiler compile-lkm ${PLATFORM} + mv "${TMP_PATH}/redpill-dev.ko" "${DEST_PATH}/rp-${PLATFORM}-${KVER}-dev.ko" + mv "${TMP_PATH}/redpill-prod.ko" "${DEST_PATH}/rp-${PLATFORM}-${KVER}-prod.ko" +done < PLATFORMS diff --git a/docker/Dockerfile.template b/docker/Dockerfile.template new file mode 100644 index 00000000..84d05b4d --- /dev/null +++ b/docker/Dockerfile.template @@ -0,0 +1,35 @@ +FROM alpine:3.14 AS stage +ARG PLATFORMS="@@@PLATFORMS@@@" +ARG TOOLKIT_VER="@@@TOOLKIT_VER@@@" + +# Copy downloaded toolkits +ADD cache /cache +# Extract toolkits +RUN for V in ${PLATFORMS}; do \ + echo "${V}" | while IFS=':' read PLATFORM KVER; do \ + echo "Extracting ds.${PLATFORM}-${TOOLKIT_VER}.dev.txz" && \ + mkdir "/opt/${PLATFORM}" && \ + tar -xaf "/cache/ds.${PLATFORM}-${TOOLKIT_VER}.dev.txz" -C "/opt/${PLATFORM}" --strip-components=10 \ + "usr/local/x86_64-pc-linux-gnu/x86_64-pc-linux-gnu/sys-root/usr/lib/modules/DSM-7.0/build" && \ + echo -e "${PLATFORM}\t${KVER}" >> /opt/platforms; \ + done; \ + done + +# Final image +FROM debian:8-slim +ENV SHELL=/bin/bash \ + ARCH=x86_64 + +RUN apt update --yes && \ + apt install --yes --no-install-recommends --no-install-suggests \ + build-essential nano make && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +COPY --from=stage /opt /opt +COPY files/ / + +WORKDIR /input +VOLUME /input /output + +ENTRYPOINT ["/opt/do.sh"] diff --git a/docker/build.sh b/docker/build.sh new file mode 100755 index 00000000..b250e89e --- /dev/null +++ b/docker/build.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +############################################################################### +function trap_cancel() { + echo "Press Control+C once more terminate the process (or wait 2s for it to restart)" + sleep 2 || exit 1 +} +trap trap_cancel SIGINT SIGTERM + +# Read platforms/kerver version +echo "Reading platforms" +declare -A PLATFORMS +while read PLATFORM KVER; do + PLATFORMS[${PLATFORM}]="${KVER}" +done <../PLATFORMS + +# Download toolkits +TOOLKIT_VER="7.0" +for PLATFORM in ${!PLATFORMS[@]}; do + echo -n "Checking cache/ds.${PLATFORM}-${TOOLKIT_VER}.dev.txz... " + if [ ! -f "cache/ds.${PLATFORM}-${TOOLKIT_VER}.dev.txz" ]; then + URL="https://global.download.synology.com/download/ToolChain/toolkit/${TOOLKIT_VER}/${PLATFORM}/ds.${PLATFORM}-${TOOLKIT_VER}.dev.txz" + echo "Downloading ${URL}" + curl -L "${URL}" -o "cache/ds.${PLATFORM}-${TOOLKIT_VER}.dev.txz" + else + echo "OK" + fi +done + +# Generate Dockerfile +echo "Generating Dockerfile" +cp Dockerfile.template Dockerfile +VALUE="" +for PLATFORM in ${!PLATFORMS[@]}; do + VALUE+="${PLATFORM}:${PLATFORMS[${PLATFORM}]} " +done +sed -i "s|@@@PLATFORMS@@@|${VALUE::-1}|g" Dockerfile +sed -i "s|@@@TOOLKIT_VER@@@|${TOOLKIT_VER}|g" Dockerfile + +# Build +echo "Building... Drink a coffee and wait!" +docker image rm syno-compiler >/dev/null 2>&1 +docker buildx build . --load --tag syno-compiler diff --git a/docker/files/etc/profile.d/login.sh b/docker/files/etc/profile.d/login.sh new file mode 100644 index 00000000..76437f46 --- /dev/null +++ b/docker/files/etc/profile.d/login.sh @@ -0,0 +1,9 @@ +[[ "$-" != *i* ]] && return +export LS_OPTIONS='--color=auto' +export SHELL='linux' +eval "`dircolors`" +alias ls='ls -F -h --color=always -v --author --time-style=long-iso' +alias ll='ls -l' +alias l='ls -l -a' +alias h='history 25' +alias j='jobs -l' diff --git a/docker/files/opt/do.sh b/docker/files/opt/do.sh new file mode 100755 index 00000000..20840257 --- /dev/null +++ b/docker/files/opt/do.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash + +function compile-module { + # Validate + if [ -z "${1}" ]; then + echo "Use: compile-module " + exit 1 + fi + VALID=0 + while read PLATFORM KVER; do + if [ "${PLATFORM}" = "${1}" ]; then + VALID=1 + break + fi + done " + exit 1 + fi + cp -R /input /tmp + make -C "/tmp/input" LINUX_SRC="/opt/${PLATFORM}" dev-v7 + strip -g "/tmp/input/redpill.ko" + mv "/tmp/input/redpill.ko" "/output/redpill-dev.ko" + make -C "/tmp/input" LINUX_SRC="/opt/${PLATFORM}" clean + make -C "/tmp/input" LINUX_SRC="/opt/${PLATFORM}" prod-v7 + strip -g "/tmp/input/redpill.ko" + mv "/tmp/input/redpill.ko" "/output/redpill-prod.ko" +} + +if [ $# -lt 1 ]; then + echo "Use: ()" + exit 1 +fi +case $1 in + bash) bash -l ;; + compile-module) compile-module $2 ;; + compile-lkm) compile-lkm $2 ;; + *) echo "Command not recognized: $1" ;; +esac diff --git a/files/board/arpl/busybox_defconfig b/files/board/arpl/busybox_defconfig new file mode 100644 index 00000000..0df5e6a0 --- /dev/null +++ b/files/board/arpl/busybox_defconfig @@ -0,0 +1,1216 @@ +# +# Automatically generated make config: don't edit +# Busybox version: 1.35.0 +# Wed Jun 22 14:12:15 2022 +# +CONFIG_HAVE_DOT_CONFIG=y + +# +# Settings +# +CONFIG_DESKTOP=y +# CONFIG_EXTRA_COMPAT is not set +# CONFIG_FEDORA_COMPAT is not set +CONFIG_INCLUDE_SUSv2=y +CONFIG_LONG_OPTS=y +CONFIG_SHOW_USAGE=y +CONFIG_FEATURE_VERBOSE_USAGE=y +# CONFIG_FEATURE_COMPRESS_USAGE is not set +CONFIG_LFS=y +# CONFIG_PAM is not set +CONFIG_FEATURE_DEVPTS=y +CONFIG_FEATURE_UTMP=y +CONFIG_FEATURE_WTMP=y +# CONFIG_FEATURE_PIDFILE is not set +CONFIG_PID_FILE_PATH="" +CONFIG_BUSYBOX=y +CONFIG_FEATURE_SHOW_SCRIPT=y +CONFIG_FEATURE_INSTALLER=y +# CONFIG_INSTALL_NO_USR is not set +CONFIG_FEATURE_SUID=y +# CONFIG_FEATURE_SUID_CONFIG is not set +# CONFIG_FEATURE_SUID_CONFIG_QUIET is not set +# CONFIG_FEATURE_PREFER_APPLETS is not set +CONFIG_BUSYBOX_EXEC_PATH="/proc/self/exe" +# CONFIG_SELINUX is not set +# CONFIG_FEATURE_CLEAN_UP is not set +CONFIG_FEATURE_SYSLOG_INFO=y +CONFIG_FEATURE_SYSLOG=y + +# +# Build Options +# +# CONFIG_STATIC is not set +# CONFIG_PIE is not set +# CONFIG_NOMMU is not set +# CONFIG_BUILD_LIBBUSYBOX is not set +# CONFIG_FEATURE_LIBBUSYBOX_STATIC is not set +# CONFIG_FEATURE_INDIVIDUAL is not set +# CONFIG_FEATURE_SHARED_BUSYBOX is not set +CONFIG_CROSS_COMPILER_PREFIX="" +CONFIG_SYSROOT="" +CONFIG_EXTRA_CFLAGS="" +CONFIG_EXTRA_LDFLAGS="" +CONFIG_EXTRA_LDLIBS="" +# CONFIG_USE_PORTABLE_CODE is not set +CONFIG_STACK_OPTIMIZATION_386=y +CONFIG_STATIC_LIBGCC=y + +# +# Installation Options ("make install" behavior) +# +CONFIG_INSTALL_APPLET_SYMLINKS=y +# CONFIG_INSTALL_APPLET_HARDLINKS is not set +# CONFIG_INSTALL_APPLET_SCRIPT_WRAPPERS is not set +# CONFIG_INSTALL_APPLET_DONT is not set +# CONFIG_INSTALL_SH_APPLET_SYMLINK is not set +# CONFIG_INSTALL_SH_APPLET_HARDLINK is not set +# CONFIG_INSTALL_SH_APPLET_SCRIPT_WRAPPER is not set +CONFIG_PREFIX="./_install" + +# +# Debugging Options +# +# CONFIG_DEBUG is not set +# CONFIG_DEBUG_PESSIMIZE is not set +# CONFIG_DEBUG_SANITIZE is not set +# CONFIG_UNIT_TEST is not set +# CONFIG_WERROR is not set +# CONFIG_WARN_SIMPLE_MSG is not set +CONFIG_NO_DEBUG_LIB=y +# CONFIG_DMALLOC is not set +# CONFIG_EFENCE is not set + +# +# Library Tuning +# +# CONFIG_FEATURE_USE_BSS_TAIL is not set +CONFIG_FLOAT_DURATION=y +CONFIG_FEATURE_RTMINMAX=y +CONFIG_FEATURE_RTMINMAX_USE_LIBC_DEFINITIONS=y +CONFIG_FEATURE_BUFFERS_USE_MALLOC=y +# CONFIG_FEATURE_BUFFERS_GO_ON_STACK is not set +# CONFIG_FEATURE_BUFFERS_GO_IN_BSS is not set +CONFIG_PASSWORD_MINLEN=6 +CONFIG_MD5_SMALL=1 +CONFIG_SHA3_SMALL=1 +CONFIG_FEATURE_NON_POSIX_CP=y +# CONFIG_FEATURE_VERBOSE_CP_MESSAGE is not set +CONFIG_FEATURE_USE_SENDFILE=y +CONFIG_FEATURE_COPYBUF_KB=4 +CONFIG_MONOTONIC_SYSCALL=y +CONFIG_IOCTL_HEX2STR_ERROR=y +CONFIG_FEATURE_EDITING=y +CONFIG_FEATURE_EDITING_MAX_LEN=1024 +CONFIG_FEATURE_EDITING_VI=y +CONFIG_FEATURE_EDITING_HISTORY=999 +CONFIG_FEATURE_EDITING_SAVEHISTORY=y +# CONFIG_FEATURE_EDITING_SAVE_ON_EXIT is not set +CONFIG_FEATURE_REVERSE_SEARCH=y +CONFIG_FEATURE_TAB_COMPLETION=y +# CONFIG_FEATURE_USERNAME_COMPLETION is not set +CONFIG_FEATURE_EDITING_FANCY_PROMPT=y +CONFIG_FEATURE_EDITING_WINCH=y +# CONFIG_FEATURE_EDITING_ASK_TERMINAL is not set +# CONFIG_LOCALE_SUPPORT is not set +# CONFIG_UNICODE_SUPPORT is not set +# CONFIG_UNICODE_USING_LOCALE is not set +# CONFIG_FEATURE_CHECK_UNICODE_IN_ENV is not set +CONFIG_SUBST_WCHAR=0 +CONFIG_LAST_SUPPORTED_WCHAR=0 +# CONFIG_UNICODE_COMBINING_WCHARS is not set +# CONFIG_UNICODE_WIDE_WCHARS is not set +# CONFIG_UNICODE_BIDI_SUPPORT is not set +# CONFIG_UNICODE_NEUTRAL_TABLE is not set +# CONFIG_UNICODE_PRESERVE_BROKEN is not set + +# +# Applets +# + +# +# Archival Utilities +# +# CONFIG_FEATURE_SEAMLESS_XZ is not set +# CONFIG_FEATURE_SEAMLESS_LZMA is not set +# CONFIG_FEATURE_SEAMLESS_BZ2 is not set +# CONFIG_FEATURE_SEAMLESS_GZ is not set +# CONFIG_FEATURE_SEAMLESS_Z is not set +CONFIG_AR=y +# CONFIG_FEATURE_AR_LONG_FILENAMES is not set +CONFIG_FEATURE_AR_CREATE=y +# CONFIG_UNCOMPRESS is not set +CONFIG_GUNZIP=y +CONFIG_ZCAT=y +CONFIG_FEATURE_GUNZIP_LONG_OPTIONS=y +CONFIG_BUNZIP2=y +CONFIG_BZCAT=y +CONFIG_UNLZMA=y +CONFIG_LZCAT=y +CONFIG_LZMA=y +CONFIG_UNXZ=y +CONFIG_XZCAT=y +CONFIG_XZ=y +# CONFIG_BZIP2 is not set +CONFIG_BZIP2_SMALL=0 +CONFIG_FEATURE_BZIP2_DECOMPRESS=y +CONFIG_CPIO=y +# CONFIG_FEATURE_CPIO_O is not set +# CONFIG_FEATURE_CPIO_P is not set +# CONFIG_FEATURE_CPIO_IGNORE_DEVNO is not set +# CONFIG_FEATURE_CPIO_RENUMBER_INODES is not set +# CONFIG_DPKG is not set +# CONFIG_DPKG_DEB is not set +CONFIG_GZIP=y +# CONFIG_FEATURE_GZIP_LONG_OPTIONS is not set +CONFIG_GZIP_FAST=0 +# CONFIG_FEATURE_GZIP_LEVELS is not set +CONFIG_FEATURE_GZIP_DECOMPRESS=y +# CONFIG_LZOP is not set +CONFIG_UNLZOP=y +CONFIG_LZOPCAT=y +# CONFIG_LZOP_COMPR_HIGH is not set +# CONFIG_RPM is not set +# CONFIG_RPM2CPIO is not set +CONFIG_TAR=y +CONFIG_FEATURE_TAR_LONG_OPTIONS=y +CONFIG_FEATURE_TAR_CREATE=y +# CONFIG_FEATURE_TAR_AUTODETECT is not set +CONFIG_FEATURE_TAR_FROM=y +# CONFIG_FEATURE_TAR_OLDGNU_COMPATIBILITY is not set +# CONFIG_FEATURE_TAR_OLDSUN_COMPATIBILITY is not set +CONFIG_FEATURE_TAR_GNU_EXTENSIONS=y +CONFIG_FEATURE_TAR_TO_COMMAND=y +# CONFIG_FEATURE_TAR_UNAME_GNAME is not set +# CONFIG_FEATURE_TAR_NOPRESERVE_TIME is not set +# CONFIG_FEATURE_TAR_SELINUX is not set +CONFIG_UNZIP=y +CONFIG_FEATURE_UNZIP_CDF=y +CONFIG_FEATURE_UNZIP_BZIP2=y +CONFIG_FEATURE_UNZIP_LZMA=y +CONFIG_FEATURE_UNZIP_XZ=y +# CONFIG_FEATURE_LZMA_FAST is not set + +# +# Coreutils +# +CONFIG_FEATURE_VERBOSE=y + +# +# Common options for date and touch +# +CONFIG_FEATURE_TIMEZONE=y + +# +# Common options for cp and mv +# +CONFIG_FEATURE_PRESERVE_HARDLINKS=y + +# +# Common options for df, du, ls +# +CONFIG_FEATURE_HUMAN_READABLE=y +CONFIG_BASENAME=y +CONFIG_CAT=y +CONFIG_FEATURE_CATN=y +CONFIG_FEATURE_CATV=y +CONFIG_CHGRP=y +CONFIG_CHMOD=y +CONFIG_CHOWN=y +# CONFIG_FEATURE_CHOWN_LONG_OPTIONS is not set +CONFIG_CHROOT=y +CONFIG_CKSUM=y +CONFIG_CRC32=y +# CONFIG_COMM is not set +CONFIG_CP=y +# CONFIG_FEATURE_CP_LONG_OPTIONS is not set +# CONFIG_FEATURE_CP_REFLINK is not set +CONFIG_CUT=y +CONFIG_FEATURE_CUT_REGEX=y +CONFIG_DATE=y +CONFIG_FEATURE_DATE_ISOFMT=y +# CONFIG_FEATURE_DATE_NANO is not set +CONFIG_FEATURE_DATE_COMPAT=y +CONFIG_DD=y +CONFIG_FEATURE_DD_SIGNAL_HANDLING=y +# CONFIG_FEATURE_DD_THIRD_STATUS_LINE is not set +CONFIG_FEATURE_DD_IBS_OBS=y +CONFIG_FEATURE_DD_STATUS=y +CONFIG_DF=y +# CONFIG_FEATURE_DF_FANCY is not set +CONFIG_FEATURE_SKIP_ROOTFS=y +CONFIG_DIRNAME=y +CONFIG_DOS2UNIX=y +CONFIG_UNIX2DOS=y +CONFIG_DU=y +CONFIG_FEATURE_DU_DEFAULT_BLOCKSIZE_1K=y +CONFIG_ECHO=y +CONFIG_FEATURE_FANCY_ECHO=y +CONFIG_ENV=y +# CONFIG_EXPAND is not set +# CONFIG_UNEXPAND is not set +CONFIG_EXPR=y +CONFIG_EXPR_MATH_SUPPORT_64=y +CONFIG_FACTOR=y +CONFIG_FALSE=y +CONFIG_FOLD=y +CONFIG_HEAD=y +CONFIG_FEATURE_FANCY_HEAD=y +CONFIG_HOSTID=y +CONFIG_ID=y +# CONFIG_GROUPS is not set +CONFIG_INSTALL=y +CONFIG_FEATURE_INSTALL_LONG_OPTIONS=y +CONFIG_LINK=y +CONFIG_LN=y +CONFIG_LOGNAME=y +CONFIG_LS=y +CONFIG_FEATURE_LS_FILETYPES=y +CONFIG_FEATURE_LS_FOLLOWLINKS=y +CONFIG_FEATURE_LS_RECURSIVE=y +CONFIG_FEATURE_LS_WIDTH=y +CONFIG_FEATURE_LS_SORTFILES=y +CONFIG_FEATURE_LS_TIMESTAMPS=y +CONFIG_FEATURE_LS_USERNAME=y +CONFIG_FEATURE_LS_COLOR=y +CONFIG_FEATURE_LS_COLOR_IS_DEFAULT=y +CONFIG_MD5SUM=y +CONFIG_SHA1SUM=y +CONFIG_SHA256SUM=y +CONFIG_SHA512SUM=y +CONFIG_SHA3SUM=y + +# +# Common options for md5sum, sha1sum, sha256sum, sha512sum, sha3sum +# +CONFIG_FEATURE_MD5_SHA1_SUM_CHECK=y +CONFIG_MKDIR=y +CONFIG_MKFIFO=y +CONFIG_MKNOD=y +CONFIG_MKTEMP=y +CONFIG_MV=y +CONFIG_NICE=y +CONFIG_NL=y +CONFIG_NOHUP=y +CONFIG_NPROC=y +CONFIG_OD=y +CONFIG_PASTE=y +CONFIG_PRINTENV=y +CONFIG_PRINTF=y +CONFIG_PWD=y +CONFIG_READLINK=y +CONFIG_FEATURE_READLINK_FOLLOW=y +CONFIG_REALPATH=y +CONFIG_RM=y +CONFIG_RMDIR=y +CONFIG_SEQ=y +CONFIG_SHRED=y +# CONFIG_SHUF is not set +CONFIG_SLEEP=y +CONFIG_FEATURE_FANCY_SLEEP=y +CONFIG_SORT=y +CONFIG_FEATURE_SORT_BIG=y +# CONFIG_FEATURE_SORT_OPTIMIZE_MEMORY is not set +# CONFIG_SPLIT is not set +# CONFIG_FEATURE_SPLIT_FANCY is not set +# CONFIG_STAT is not set +# CONFIG_FEATURE_STAT_FORMAT is not set +# CONFIG_FEATURE_STAT_FILESYSTEM is not set +CONFIG_STTY=y +# CONFIG_SUM is not set +CONFIG_SYNC=y +# CONFIG_FEATURE_SYNC_FANCY is not set +# CONFIG_FSYNC is not set +# CONFIG_TAC is not set +CONFIG_TAIL=y +CONFIG_FEATURE_FANCY_TAIL=y +CONFIG_TEE=y +CONFIG_FEATURE_TEE_USE_BLOCK_IO=y +CONFIG_TEST=y +CONFIG_TEST1=y +CONFIG_TEST2=y +CONFIG_FEATURE_TEST_64=y +# CONFIG_TIMEOUT is not set +CONFIG_TOUCH=y +CONFIG_FEATURE_TOUCH_SUSV3=y +CONFIG_TR=y +CONFIG_FEATURE_TR_CLASSES=y +CONFIG_FEATURE_TR_EQUIV=y +CONFIG_TRUE=y +CONFIG_TRUNCATE=y +CONFIG_TTY=y +CONFIG_UNAME=y +CONFIG_UNAME_OSNAME="GNU/Linux" +CONFIG_BB_ARCH=y +CONFIG_UNIQ=y +CONFIG_UNLINK=y +CONFIG_USLEEP=y +CONFIG_UUDECODE=y +CONFIG_BASE32=y +CONFIG_BASE64=y +CONFIG_UUENCODE=y +CONFIG_WC=y +# CONFIG_FEATURE_WC_LARGE is not set +CONFIG_WHO=y +CONFIG_W=y +# CONFIG_USERS is not set +CONFIG_WHOAMI=y +CONFIG_YES=y + +# +# Console Utilities +# +CONFIG_CHVT=y +CONFIG_CLEAR=y +CONFIG_DEALLOCVT=y +CONFIG_DUMPKMAP=y +# CONFIG_FGCONSOLE is not set +# CONFIG_KBD_MODE is not set +CONFIG_LOADFONT=y +# CONFIG_SETFONT is not set +# CONFIG_FEATURE_SETFONT_TEXTUAL_MAP is not set +CONFIG_DEFAULT_SETFONT_DIR="" + +# +# Common options for loadfont and setfont +# +CONFIG_FEATURE_LOADFONT_PSF2=y +CONFIG_FEATURE_LOADFONT_RAW=y +CONFIG_LOADKMAP=y +CONFIG_OPENVT=y +CONFIG_RESET=y +CONFIG_RESIZE=y +CONFIG_FEATURE_RESIZE_PRINT=y +CONFIG_SETCONSOLE=y +# CONFIG_FEATURE_SETCONSOLE_LONG_OPTIONS is not set +CONFIG_SETKEYCODES=y +CONFIG_SETLOGCONS=y +# CONFIG_SHOWKEY is not set + +# +# Debian Utilities +# +CONFIG_PIPE_PROGRESS=y +CONFIG_RUN_PARTS=y +CONFIG_FEATURE_RUN_PARTS_LONG_OPTIONS=y +# CONFIG_FEATURE_RUN_PARTS_FANCY is not set +CONFIG_START_STOP_DAEMON=y +CONFIG_FEATURE_START_STOP_DAEMON_LONG_OPTIONS=y +CONFIG_FEATURE_START_STOP_DAEMON_FANCY=y +CONFIG_WHICH=y + +# +# klibc-utils +# +# CONFIG_MINIPS is not set +CONFIG_NUKE=y +CONFIG_RESUME=y +CONFIG_RUN_INIT=y + +# +# Editors +# +CONFIG_AWK=y +# CONFIG_FEATURE_AWK_LIBM is not set +CONFIG_FEATURE_AWK_GNU_EXTENSIONS=y +CONFIG_CMP=y +CONFIG_DIFF=y +# CONFIG_FEATURE_DIFF_LONG_OPTIONS is not set +CONFIG_FEATURE_DIFF_DIR=y +# CONFIG_ED is not set +CONFIG_PATCH=y +CONFIG_SED=y +CONFIG_VI=y +CONFIG_FEATURE_VI_MAX_LEN=4096 +CONFIG_FEATURE_VI_8BIT=y +CONFIG_FEATURE_VI_COLON=y +CONFIG_FEATURE_VI_COLON_EXPAND=y +CONFIG_FEATURE_VI_YANKMARK=y +CONFIG_FEATURE_VI_SEARCH=y +# CONFIG_FEATURE_VI_REGEX_SEARCH is not set +CONFIG_FEATURE_VI_USE_SIGNALS=y +CONFIG_FEATURE_VI_DOT_CMD=y +CONFIG_FEATURE_VI_READONLY=y +CONFIG_FEATURE_VI_SETOPTS=y +CONFIG_FEATURE_VI_SET=y +CONFIG_FEATURE_VI_WIN_RESIZE=y +CONFIG_FEATURE_VI_ASK_TERMINAL=y +CONFIG_FEATURE_VI_UNDO=y +CONFIG_FEATURE_VI_UNDO_QUEUE=y +CONFIG_FEATURE_VI_UNDO_QUEUE_MAX=256 +CONFIG_FEATURE_VI_VERBOSE_STATUS=y +CONFIG_FEATURE_ALLOW_EXEC=y + +# +# Finding Utilities +# +CONFIG_FIND=y +CONFIG_FEATURE_FIND_PRINT0=y +CONFIG_FEATURE_FIND_MTIME=y +CONFIG_FEATURE_FIND_ATIME=y +CONFIG_FEATURE_FIND_CTIME=y +CONFIG_FEATURE_FIND_MMIN=y +CONFIG_FEATURE_FIND_AMIN=y +CONFIG_FEATURE_FIND_CMIN=y +CONFIG_FEATURE_FIND_PERM=y +CONFIG_FEATURE_FIND_TYPE=y +CONFIG_FEATURE_FIND_EXECUTABLE=y +CONFIG_FEATURE_FIND_XDEV=y +CONFIG_FEATURE_FIND_MAXDEPTH=y +CONFIG_FEATURE_FIND_NEWER=y +# CONFIG_FEATURE_FIND_INUM is not set +CONFIG_FEATURE_FIND_SAMEFILE=y +CONFIG_FEATURE_FIND_EXEC=y +CONFIG_FEATURE_FIND_EXEC_PLUS=y +CONFIG_FEATURE_FIND_USER=y +CONFIG_FEATURE_FIND_GROUP=y +CONFIG_FEATURE_FIND_NOT=y +CONFIG_FEATURE_FIND_DEPTH=y +CONFIG_FEATURE_FIND_PAREN=y +CONFIG_FEATURE_FIND_SIZE=y +CONFIG_FEATURE_FIND_PRUNE=y +CONFIG_FEATURE_FIND_QUIT=y +# CONFIG_FEATURE_FIND_DELETE is not set +CONFIG_FEATURE_FIND_EMPTY=y +CONFIG_FEATURE_FIND_PATH=y +CONFIG_FEATURE_FIND_REGEX=y +# CONFIG_FEATURE_FIND_CONTEXT is not set +# CONFIG_FEATURE_FIND_LINKS is not set +CONFIG_GREP=y +CONFIG_EGREP=y +CONFIG_FGREP=y +CONFIG_FEATURE_GREP_CONTEXT=y +CONFIG_XARGS=y +# CONFIG_FEATURE_XARGS_SUPPORT_CONFIRMATION is not set +CONFIG_FEATURE_XARGS_SUPPORT_QUOTES=y +CONFIG_FEATURE_XARGS_SUPPORT_TERMOPT=y +CONFIG_FEATURE_XARGS_SUPPORT_ZERO_TERM=y +CONFIG_FEATURE_XARGS_SUPPORT_REPL_STR=y +CONFIG_FEATURE_XARGS_SUPPORT_PARALLEL=y +CONFIG_FEATURE_XARGS_SUPPORT_ARGS_FILE=y + +# +# Init Utilities +# +# CONFIG_BOOTCHARTD is not set +# CONFIG_FEATURE_BOOTCHARTD_BLOATED_HEADER is not set +# CONFIG_FEATURE_BOOTCHARTD_CONFIG_FILE is not set +CONFIG_HALT=y +CONFIG_POWEROFF=y +CONFIG_REBOOT=y +CONFIG_FEATURE_WAIT_FOR_INIT=y +# CONFIG_FEATURE_CALL_TELINIT is not set +CONFIG_TELINIT_PATH="" +CONFIG_INIT=y +CONFIG_LINUXRC=y +CONFIG_FEATURE_USE_INITTAB=y +CONFIG_FEATURE_KILL_REMOVED=y +CONFIG_FEATURE_KILL_DELAY=0 +CONFIG_FEATURE_INIT_SCTTY=y +CONFIG_FEATURE_INIT_SYSLOG=y +CONFIG_FEATURE_INIT_QUIET=y +# CONFIG_FEATURE_INIT_COREDUMPS is not set +CONFIG_INIT_TERMINAL_TYPE="linux" +CONFIG_FEATURE_INIT_MODIFY_CMDLINE=y + +# +# Login/Password Management Utilities +# +CONFIG_FEATURE_SHADOWPASSWDS=y +# CONFIG_USE_BB_PWD_GRP is not set +# CONFIG_USE_BB_SHADOW is not set +CONFIG_USE_BB_CRYPT=y +CONFIG_USE_BB_CRYPT_SHA=y +# CONFIG_ADD_SHELL is not set +# CONFIG_REMOVE_SHELL is not set +CONFIG_ADDGROUP=y +# CONFIG_FEATURE_ADDUSER_TO_GROUP is not set +CONFIG_ADDUSER=y +# CONFIG_FEATURE_CHECK_NAMES is not set +CONFIG_LAST_ID=60000 +CONFIG_FIRST_SYSTEM_ID=100 +CONFIG_LAST_SYSTEM_ID=999 +# CONFIG_CHPASSWD is not set +CONFIG_FEATURE_DEFAULT_PASSWD_ALGO="md5" +# CONFIG_CRYPTPW is not set +CONFIG_MKPASSWD=y +CONFIG_DELUSER=y +CONFIG_DELGROUP=y +# CONFIG_FEATURE_DEL_USER_FROM_GROUP is not set +CONFIG_GETTY=y +CONFIG_LOGIN=y +# CONFIG_LOGIN_SESSION_AS_CHILD is not set +# CONFIG_LOGIN_SCRIPTS is not set +CONFIG_FEATURE_NOLOGIN=y +CONFIG_FEATURE_SECURETTY=y +CONFIG_PASSWD=y +CONFIG_FEATURE_PASSWD_WEAK_CHECK=y +CONFIG_SU=y +CONFIG_FEATURE_SU_SYSLOG=y +CONFIG_FEATURE_SU_CHECKS_SHELLS=y +# CONFIG_FEATURE_SU_BLANK_PW_NEEDS_SECURE_TTY is not set +CONFIG_SULOGIN=y +CONFIG_VLOCK=y + +# +# Linux Ext2 FS Progs +# +CONFIG_CHATTR=y +CONFIG_FSCK=y +CONFIG_LSATTR=y +# CONFIG_TUNE2FS is not set + +# +# Linux Module Utilities +# +# CONFIG_MODPROBE_SMALL is not set +# CONFIG_DEPMOD is not set +CONFIG_INSMOD=y +CONFIG_LSMOD=y +CONFIG_FEATURE_LSMOD_PRETTY_2_6_OUTPUT=y +# CONFIG_MODINFO is not set +CONFIG_MODPROBE=y +# CONFIG_FEATURE_MODPROBE_BLACKLIST is not set +CONFIG_RMMOD=y + +# +# Options common to multiple modutils +# +CONFIG_FEATURE_CMDLINE_MODULE_OPTIONS=y +# CONFIG_FEATURE_MODPROBE_SMALL_CHECK_ALREADY_LOADED is not set +# CONFIG_FEATURE_2_4_MODULES is not set +# CONFIG_FEATURE_INSMOD_VERSION_CHECKING is not set +# CONFIG_FEATURE_INSMOD_KSYMOOPS_SYMBOLS is not set +# CONFIG_FEATURE_INSMOD_LOADINKMEM is not set +# CONFIG_FEATURE_INSMOD_LOAD_MAP is not set +# CONFIG_FEATURE_INSMOD_LOAD_MAP_FULL is not set +CONFIG_FEATURE_CHECK_TAINTED_MODULE=y +# CONFIG_FEATURE_INSMOD_TRY_MMAP is not set +CONFIG_FEATURE_MODUTILS_ALIAS=y +CONFIG_FEATURE_MODUTILS_SYMBOLS=y +CONFIG_DEFAULT_MODULES_DIR="/lib/modules" +CONFIG_DEFAULT_DEPMOD_FILE="modules.dep" + +# +# Linux System Utilities +# +# CONFIG_ACPID is not set +# CONFIG_FEATURE_ACPID_COMPAT is not set +# CONFIG_BLKDISCARD is not set +CONFIG_BLKID=y +# CONFIG_FEATURE_BLKID_TYPE is not set +# CONFIG_BLOCKDEV is not set +# CONFIG_CAL is not set +CONFIG_CHRT=y +CONFIG_DMESG=y +CONFIG_FEATURE_DMESG_PRETTY=y +CONFIG_EJECT=y +# CONFIG_FEATURE_EJECT_SCSI is not set +CONFIG_FALLOCATE=y +# CONFIG_FATATTR is not set +CONFIG_FBSET=y +CONFIG_FEATURE_FBSET_FANCY=y +CONFIG_FEATURE_FBSET_READMODE=y +CONFIG_FDFORMAT=y +CONFIG_FDISK=y +# CONFIG_FDISK_SUPPORT_LARGE_DISKS is not set +CONFIG_FEATURE_FDISK_WRITABLE=y +# CONFIG_FEATURE_AIX_LABEL is not set +# CONFIG_FEATURE_SGI_LABEL is not set +# CONFIG_FEATURE_SUN_LABEL is not set +# CONFIG_FEATURE_OSF_LABEL is not set +CONFIG_FEATURE_GPT_LABEL=y +CONFIG_FEATURE_FDISK_ADVANCED=y +# CONFIG_FINDFS is not set +CONFIG_FLOCK=y +CONFIG_FDFLUSH=y +CONFIG_FREERAMDISK=y +# CONFIG_FSCK_MINIX is not set +CONFIG_FSFREEZE=y +CONFIG_FSTRIM=y +CONFIG_GETOPT=y +CONFIG_FEATURE_GETOPT_LONG=y +CONFIG_HEXDUMP=y +# CONFIG_HD is not set +CONFIG_XXD=y +CONFIG_HWCLOCK=y +CONFIG_FEATURE_HWCLOCK_ADJTIME_FHS=y +# CONFIG_IONICE is not set +CONFIG_IPCRM=y +CONFIG_IPCS=y +CONFIG_LAST=y +# CONFIG_FEATURE_LAST_FANCY is not set +CONFIG_LOSETUP=y +CONFIG_LSPCI=y +CONFIG_LSUSB=y +CONFIG_MDEV=y +CONFIG_FEATURE_MDEV_CONF=y +CONFIG_FEATURE_MDEV_RENAME=y +# CONFIG_FEATURE_MDEV_RENAME_REGEXP is not set +CONFIG_FEATURE_MDEV_EXEC=y +# CONFIG_FEATURE_MDEV_LOAD_FIRMWARE is not set +CONFIG_FEATURE_MDEV_DAEMON=y +CONFIG_MESG=y +CONFIG_FEATURE_MESG_ENABLE_ONLY_GROUP=y +CONFIG_MKE2FS=y +# CONFIG_MKFS_EXT2 is not set +# CONFIG_MKFS_MINIX is not set +# CONFIG_FEATURE_MINIX2 is not set +# CONFIG_MKFS_REISER is not set +CONFIG_MKDOSFS=y +# CONFIG_MKFS_VFAT is not set +CONFIG_MKSWAP=y +# CONFIG_FEATURE_MKSWAP_UUID is not set +CONFIG_MORE=y +CONFIG_MOUNT=y +# CONFIG_FEATURE_MOUNT_FAKE is not set +# CONFIG_FEATURE_MOUNT_VERBOSE is not set +# CONFIG_FEATURE_MOUNT_HELPERS is not set +# CONFIG_FEATURE_MOUNT_LABEL is not set +# CONFIG_FEATURE_MOUNT_NFS is not set +CONFIG_FEATURE_MOUNT_CIFS=y +CONFIG_FEATURE_MOUNT_FLAGS=y +CONFIG_FEATURE_MOUNT_FSTAB=y +CONFIG_FEATURE_MOUNT_OTHERTAB=y +CONFIG_MOUNTPOINT=y +CONFIG_NOLOGIN=y +# CONFIG_NOLOGIN_DEPENDENCIES is not set +# CONFIG_NSENTER is not set +CONFIG_PIVOT_ROOT=y +CONFIG_RDATE=y +# CONFIG_RDEV is not set +CONFIG_READPROFILE=y +CONFIG_RENICE=y +# CONFIG_REV is not set +# CONFIG_RTCWAKE is not set +# CONFIG_SCRIPT is not set +# CONFIG_SCRIPTREPLAY is not set +CONFIG_SETARCH=y +CONFIG_LINUX32=y +CONFIG_LINUX64=y +CONFIG_SETPRIV=y +CONFIG_FEATURE_SETPRIV_DUMP=y +CONFIG_FEATURE_SETPRIV_CAPABILITIES=y +CONFIG_FEATURE_SETPRIV_CAPABILITY_NAMES=y +CONFIG_SETSID=y +CONFIG_SWAPON=y +# CONFIG_FEATURE_SWAPON_DISCARD is not set +# CONFIG_FEATURE_SWAPON_PRI is not set +CONFIG_SWAPOFF=y +CONFIG_FEATURE_SWAPONOFF_LABEL=y +CONFIG_SWITCH_ROOT=y +# CONFIG_TASKSET is not set +# CONFIG_FEATURE_TASKSET_FANCY is not set +# CONFIG_FEATURE_TASKSET_CPULIST is not set +CONFIG_UEVENT=y +CONFIG_UMOUNT=y +CONFIG_FEATURE_UMOUNT_ALL=y +# CONFIG_UNSHARE is not set +# CONFIG_WALL is not set + +# +# Common options for mount/umount +# +CONFIG_FEATURE_MOUNT_LOOP=y +CONFIG_FEATURE_MOUNT_LOOP_CREATE=y +# CONFIG_FEATURE_MTAB_SUPPORT is not set +CONFIG_VOLUMEID=y + +# +# Filesystem/Volume identification +# +# CONFIG_FEATURE_VOLUMEID_BCACHE is not set +# CONFIG_FEATURE_VOLUMEID_BTRFS is not set +# CONFIG_FEATURE_VOLUMEID_CRAMFS is not set +CONFIG_FEATURE_VOLUMEID_EROFS=y +CONFIG_FEATURE_VOLUMEID_EXFAT=y +CONFIG_FEATURE_VOLUMEID_EXT=y +CONFIG_FEATURE_VOLUMEID_F2FS=y +CONFIG_FEATURE_VOLUMEID_FAT=y +# CONFIG_FEATURE_VOLUMEID_HFS is not set +# CONFIG_FEATURE_VOLUMEID_ISO9660 is not set +# CONFIG_FEATURE_VOLUMEID_JFS is not set +# CONFIG_FEATURE_VOLUMEID_LFS is not set +# CONFIG_FEATURE_VOLUMEID_LINUXRAID is not set +# CONFIG_FEATURE_VOLUMEID_LINUXSWAP is not set +# CONFIG_FEATURE_VOLUMEID_LUKS is not set +CONFIG_FEATURE_VOLUMEID_MINIX=y +# CONFIG_FEATURE_VOLUMEID_NILFS is not set +# CONFIG_FEATURE_VOLUMEID_NTFS is not set +# CONFIG_FEATURE_VOLUMEID_OCFS2 is not set +# CONFIG_FEATURE_VOLUMEID_REISERFS is not set +# CONFIG_FEATURE_VOLUMEID_ROMFS is not set +# CONFIG_FEATURE_VOLUMEID_SQUASHFS is not set +# CONFIG_FEATURE_VOLUMEID_SYSV is not set +CONFIG_FEATURE_VOLUMEID_UBIFS=y +# CONFIG_FEATURE_VOLUMEID_UDF is not set +# CONFIG_FEATURE_VOLUMEID_XFS is not set + +# +# Miscellaneous Utilities +# +# CONFIG_ADJTIMEX is not set +CONFIG_ASCII=y +# CONFIG_BBCONFIG is not set +# CONFIG_FEATURE_COMPRESS_BBCONFIG is not set +CONFIG_BC=y +CONFIG_DC=y +CONFIG_FEATURE_DC_BIG=y +# CONFIG_FEATURE_DC_LIBM is not set +CONFIG_FEATURE_BC_INTERACTIVE=y +CONFIG_FEATURE_BC_LONG_OPTIONS=y +# CONFIG_BEEP is not set +CONFIG_FEATURE_BEEP_FREQ=0 +CONFIG_FEATURE_BEEP_LENGTH_MS=0 +# CONFIG_CHAT is not set +# CONFIG_FEATURE_CHAT_NOFAIL is not set +# CONFIG_FEATURE_CHAT_TTY_HIFI is not set +# CONFIG_FEATURE_CHAT_IMPLICIT_CR is not set +# CONFIG_FEATURE_CHAT_SWALLOW_OPTS is not set +# CONFIG_FEATURE_CHAT_SEND_ESCAPES is not set +# CONFIG_FEATURE_CHAT_VAR_ABORT_LEN is not set +# CONFIG_FEATURE_CHAT_CLR_ABORT is not set +# CONFIG_CONSPY is not set +CONFIG_CROND=y +# CONFIG_FEATURE_CROND_D is not set +# CONFIG_FEATURE_CROND_CALL_SENDMAIL is not set +CONFIG_FEATURE_CROND_SPECIAL_TIMES=y +CONFIG_FEATURE_CROND_DIR="/var/spool/cron" +CONFIG_CRONTAB=y +# CONFIG_DEVFSD is not set +# CONFIG_DEVFSD_MODLOAD is not set +# CONFIG_DEVFSD_FG_NP is not set +# CONFIG_DEVFSD_VERBOSE is not set +# CONFIG_FEATURE_DEVFS is not set +CONFIG_DEVMEM=y +# CONFIG_FBSPLASH is not set +# CONFIG_FLASH_ERASEALL is not set +# CONFIG_FLASH_LOCK is not set +# CONFIG_FLASH_UNLOCK is not set +# CONFIG_FLASHCP is not set +CONFIG_HDPARM=y +CONFIG_FEATURE_HDPARM_GET_IDENTITY=y +# CONFIG_FEATURE_HDPARM_HDIO_SCAN_HWIF is not set +# CONFIG_FEATURE_HDPARM_HDIO_UNREGISTER_HWIF is not set +# CONFIG_FEATURE_HDPARM_HDIO_DRIVE_RESET is not set +# CONFIG_FEATURE_HDPARM_HDIO_TRISTATE_HWIF is not set +# CONFIG_FEATURE_HDPARM_HDIO_GETSET_DMA is not set +CONFIG_HEXEDIT=y +CONFIG_I2CGET=y +CONFIG_I2CSET=y +CONFIG_I2CDUMP=y +CONFIG_I2CDETECT=y +CONFIG_I2CTRANSFER=y +# CONFIG_INOTIFYD is not set +CONFIG_LESS=y +CONFIG_FEATURE_LESS_MAXLINES=9999999 +CONFIG_FEATURE_LESS_BRACKETS=y +CONFIG_FEATURE_LESS_FLAGS=y +CONFIG_FEATURE_LESS_TRUNCATE=y +# CONFIG_FEATURE_LESS_MARKS is not set +CONFIG_FEATURE_LESS_REGEXP=y +# CONFIG_FEATURE_LESS_WINCH is not set +# CONFIG_FEATURE_LESS_ASK_TERMINAL is not set +# CONFIG_FEATURE_LESS_DASHCMD is not set +# CONFIG_FEATURE_LESS_LINENUMS is not set +# CONFIG_FEATURE_LESS_RAW is not set +# CONFIG_FEATURE_LESS_ENV is not set +CONFIG_LSSCSI=y +CONFIG_MAKEDEVS=y +# CONFIG_FEATURE_MAKEDEVS_LEAF is not set +CONFIG_FEATURE_MAKEDEVS_TABLE=y +# CONFIG_MAN is not set +CONFIG_MICROCOM=y +CONFIG_MIM=y +CONFIG_MT=y +# CONFIG_NANDWRITE is not set +# CONFIG_NANDDUMP is not set +CONFIG_PARTPROBE=y +# CONFIG_RAIDAUTORUN is not set +# CONFIG_READAHEAD is not set +# CONFIG_RFKILL is not set +CONFIG_RUNLEVEL=y +# CONFIG_RX is not set +CONFIG_SETFATTR=y +CONFIG_SETSERIAL=y +CONFIG_STRINGS=y +CONFIG_TIME=y +CONFIG_TS=y +# CONFIG_TTYSIZE is not set +# CONFIG_UBIATTACH is not set +# CONFIG_UBIDETACH is not set +# CONFIG_UBIMKVOL is not set +# CONFIG_UBIRMVOL is not set +# CONFIG_UBIRSVOL is not set +# CONFIG_UBIUPDATEVOL is not set +CONFIG_UBIRENAME=y +# CONFIG_VOLNAME is not set +CONFIG_WATCHDOG=y +# CONFIG_FEATURE_WATCHDOG_OPEN_TWICE is not set + +# +# Networking Utilities +# +CONFIG_FEATURE_IPV6=y +# CONFIG_FEATURE_UNIX_LOCAL is not set +CONFIG_FEATURE_PREFER_IPV4_ADDRESS=y +# CONFIG_VERBOSE_RESOLUTION_ERRORS is not set +# CONFIG_FEATURE_ETC_NETWORKS is not set +# CONFIG_FEATURE_ETC_SERVICES is not set +CONFIG_FEATURE_HWIB=y +# CONFIG_FEATURE_TLS_SHA1 is not set +CONFIG_ARP=y +CONFIG_ARPING=y +# CONFIG_BRCTL is not set +# CONFIG_FEATURE_BRCTL_FANCY is not set +# CONFIG_FEATURE_BRCTL_SHOW is not set +CONFIG_DNSD=y +CONFIG_ETHER_WAKE=y +# CONFIG_FTPD is not set +# CONFIG_FEATURE_FTPD_WRITE is not set +# CONFIG_FEATURE_FTPD_ACCEPT_BROKEN_LIST is not set +# CONFIG_FEATURE_FTPD_AUTHENTICATION is not set +# CONFIG_FTPGET is not set +# CONFIG_FTPPUT is not set +# CONFIG_FEATURE_FTPGETPUT_LONG_OPTIONS is not set +CONFIG_HOSTNAME=y +CONFIG_DNSDOMAINNAME=y +# CONFIG_HTTPD is not set +CONFIG_FEATURE_HTTPD_PORT_DEFAULT=0 +# CONFIG_FEATURE_HTTPD_RANGES is not set +# CONFIG_FEATURE_HTTPD_SETUID is not set +# CONFIG_FEATURE_HTTPD_BASIC_AUTH is not set +# CONFIG_FEATURE_HTTPD_AUTH_MD5 is not set +# CONFIG_FEATURE_HTTPD_CGI is not set +# CONFIG_FEATURE_HTTPD_CONFIG_WITH_SCRIPT_INTERPR is not set +# CONFIG_FEATURE_HTTPD_SET_REMOTE_PORT_TO_ENV is not set +# CONFIG_FEATURE_HTTPD_ENCODE_URL_STR is not set +# CONFIG_FEATURE_HTTPD_ERROR_PAGES is not set +# CONFIG_FEATURE_HTTPD_PROXY is not set +# CONFIG_FEATURE_HTTPD_GZIP is not set +# CONFIG_FEATURE_HTTPD_ETAG is not set +# CONFIG_FEATURE_HTTPD_LAST_MODIFIED is not set +# CONFIG_FEATURE_HTTPD_DATE is not set +# CONFIG_FEATURE_HTTPD_ACL_IP is not set +CONFIG_IFCONFIG=y +CONFIG_FEATURE_IFCONFIG_STATUS=y +CONFIG_FEATURE_IFCONFIG_SLIP=y +CONFIG_FEATURE_IFCONFIG_MEMSTART_IOADDR_IRQ=y +CONFIG_FEATURE_IFCONFIG_HW=y +# CONFIG_FEATURE_IFCONFIG_BROADCAST_PLUS is not set +# CONFIG_IFENSLAVE is not set +# CONFIG_IFPLUGD is not set +CONFIG_IFUP=y +CONFIG_IFDOWN=y +CONFIG_IFUPDOWN_IFSTATE_PATH="/var/run/ifstate" +CONFIG_FEATURE_IFUPDOWN_IP=y +CONFIG_FEATURE_IFUPDOWN_IPV4=y +CONFIG_FEATURE_IFUPDOWN_IPV6=y +CONFIG_FEATURE_IFUPDOWN_MAPPING=y +# CONFIG_FEATURE_IFUPDOWN_EXTERNAL_DHCP is not set +CONFIG_INETD=y +CONFIG_FEATURE_INETD_SUPPORT_BUILTIN_ECHO=y +CONFIG_FEATURE_INETD_SUPPORT_BUILTIN_DISCARD=y +CONFIG_FEATURE_INETD_SUPPORT_BUILTIN_TIME=y +CONFIG_FEATURE_INETD_SUPPORT_BUILTIN_DAYTIME=y +CONFIG_FEATURE_INETD_SUPPORT_BUILTIN_CHARGEN=y +# CONFIG_FEATURE_INETD_RPC is not set +CONFIG_IP=y +CONFIG_IPADDR=y +CONFIG_IPLINK=y +CONFIG_IPROUTE=y +CONFIG_IPTUNNEL=y +CONFIG_IPRULE=y +CONFIG_IPNEIGH=y +CONFIG_FEATURE_IP_ADDRESS=y +CONFIG_FEATURE_IP_LINK=y +CONFIG_FEATURE_IP_ROUTE=y +CONFIG_FEATURE_IP_ROUTE_DIR="/etc/iproute2" +CONFIG_FEATURE_IP_TUNNEL=y +CONFIG_FEATURE_IP_RULE=y +CONFIG_FEATURE_IP_NEIGH=y +# CONFIG_FEATURE_IP_RARE_PROTOCOLS is not set +# CONFIG_IPCALC is not set +# CONFIG_FEATURE_IPCALC_LONG_OPTIONS is not set +# CONFIG_FEATURE_IPCALC_FANCY is not set +# CONFIG_FAKEIDENTD is not set +CONFIG_NAMEIF=y +# CONFIG_FEATURE_NAMEIF_EXTENDED is not set +# CONFIG_NBDCLIENT is not set +# CONFIG_NC is not set +# CONFIG_NETCAT is not set +# CONFIG_NC_SERVER is not set +# CONFIG_NC_EXTRA is not set +# CONFIG_NC_110_COMPAT is not set +CONFIG_NETSTAT=y +# CONFIG_FEATURE_NETSTAT_WIDE is not set +# CONFIG_FEATURE_NETSTAT_PRG is not set +CONFIG_NSLOOKUP=y +CONFIG_FEATURE_NSLOOKUP_BIG=y +CONFIG_FEATURE_NSLOOKUP_LONG_OPTIONS=y +# CONFIG_NTPD is not set +# CONFIG_FEATURE_NTPD_SERVER is not set +# CONFIG_FEATURE_NTPD_CONF is not set +# CONFIG_FEATURE_NTP_AUTH is not set +CONFIG_PING=y +# CONFIG_PING6 is not set +CONFIG_FEATURE_FANCY_PING=y +# CONFIG_PSCAN is not set +CONFIG_ROUTE=y +# CONFIG_SLATTACH is not set +# CONFIG_SSL_CLIENT is not set +CONFIG_TC=y +CONFIG_FEATURE_TC_INGRESS=y +# CONFIG_TCPSVD is not set +# CONFIG_UDPSVD is not set +CONFIG_TELNET=y +CONFIG_FEATURE_TELNET_TTYPE=y +CONFIG_FEATURE_TELNET_AUTOLOGIN=y +CONFIG_FEATURE_TELNET_WIDTH=y +# CONFIG_TELNETD is not set +# CONFIG_FEATURE_TELNETD_STANDALONE is not set +CONFIG_FEATURE_TELNETD_PORT_DEFAULT=0 +# CONFIG_FEATURE_TELNETD_INETD_WAIT is not set +CONFIG_TFTP=y +# CONFIG_FEATURE_TFTP_PROGRESS_BAR is not set +CONFIG_FEATURE_TFTP_HPA_COMPAT=y +# CONFIG_TFTPD is not set +CONFIG_FEATURE_TFTP_GET=y +CONFIG_FEATURE_TFTP_PUT=y +CONFIG_FEATURE_TFTP_BLOCKSIZE=y +# CONFIG_TFTP_DEBUG is not set +# CONFIG_TLS is not set +CONFIG_TRACEROUTE=y +# CONFIG_TRACEROUTE6 is not set +# CONFIG_FEATURE_TRACEROUTE_VERBOSE is not set +# CONFIG_FEATURE_TRACEROUTE_USE_ICMP is not set +# CONFIG_TUNCTL is not set +# CONFIG_FEATURE_TUNCTL_UG is not set +CONFIG_VCONFIG=y +CONFIG_WGET=y +CONFIG_FEATURE_WGET_LONG_OPTIONS=y +CONFIG_FEATURE_WGET_STATUSBAR=y +CONFIG_FEATURE_WGET_FTP=y +CONFIG_FEATURE_WGET_AUTHENTICATION=y +CONFIG_FEATURE_WGET_TIMEOUT=y +# CONFIG_FEATURE_WGET_HTTPS is not set +# CONFIG_FEATURE_WGET_OPENSSL is not set +# CONFIG_WHOIS is not set +# CONFIG_ZCIP is not set +# CONFIG_UDHCPD is not set +# CONFIG_FEATURE_UDHCPD_BASE_IP_ON_MAC is not set +# CONFIG_FEATURE_UDHCPD_WRITE_LEASES_EARLY is not set +CONFIG_DHCPD_LEASES_FILE="" +# CONFIG_DUMPLEASES is not set +# CONFIG_DHCPRELAY is not set +CONFIG_UDHCPC=y +CONFIG_FEATURE_UDHCPC_ARPING=y +CONFIG_FEATURE_UDHCPC_SANITIZEOPT=y +CONFIG_UDHCPC_DEFAULT_SCRIPT="/usr/share/udhcpc/default.script" +# CONFIG_UDHCPC6 is not set +# CONFIG_FEATURE_UDHCPC6_RFC3646 is not set +# CONFIG_FEATURE_UDHCPC6_RFC4704 is not set +# CONFIG_FEATURE_UDHCPC6_RFC4833 is not set +# CONFIG_FEATURE_UDHCPC6_RFC5970 is not set + +# +# Common options for DHCP applets +# +CONFIG_UDHCPC_DEFAULT_INTERFACE="eth0" +# CONFIG_FEATURE_UDHCP_PORT is not set +CONFIG_UDHCP_DEBUG=0 +CONFIG_UDHCPC_SLACK_FOR_BUGGY_SERVERS=80 +CONFIG_FEATURE_UDHCP_RFC3397=y +CONFIG_FEATURE_UDHCP_8021Q=y +CONFIG_IFUPDOWN_UDHCPC_CMD_OPTIONS="-b -R -O search" + +# +# Print Utilities +# +# CONFIG_LPD is not set +# CONFIG_LPR is not set +# CONFIG_LPQ is not set + +# +# Mail Utilities +# +CONFIG_FEATURE_MIME_CHARSET="" +# CONFIG_MAKEMIME is not set +# CONFIG_POPMAILDIR is not set +# CONFIG_FEATURE_POPMAILDIR_DELIVERY is not set +# CONFIG_REFORMIME is not set +# CONFIG_FEATURE_REFORMIME_COMPAT is not set +# CONFIG_SENDMAIL is not set + +# +# Process Utilities +# +# CONFIG_FEATURE_FAST_TOP is not set +# CONFIG_FEATURE_SHOW_THREADS is not set +CONFIG_FREE=y +CONFIG_FUSER=y +# CONFIG_IOSTAT is not set +CONFIG_KILL=y +CONFIG_KILLALL=y +CONFIG_KILLALL5=y +CONFIG_LSOF=y +# CONFIG_MPSTAT is not set +# CONFIG_NMETER is not set +# CONFIG_PGREP is not set +# CONFIG_PKILL is not set +CONFIG_PIDOF=y +CONFIG_FEATURE_PIDOF_SINGLE=y +CONFIG_FEATURE_PIDOF_OMIT=y +# CONFIG_PMAP is not set +# CONFIG_POWERTOP is not set +# CONFIG_FEATURE_POWERTOP_INTERACTIVE is not set +CONFIG_PS=y +# CONFIG_FEATURE_PS_WIDE is not set +# CONFIG_FEATURE_PS_LONG is not set +# CONFIG_FEATURE_PS_TIME is not set +# CONFIG_FEATURE_PS_UNUSUAL_SYSTEMS is not set +# CONFIG_FEATURE_PS_ADDITIONAL_COLUMNS is not set +# CONFIG_PSTREE is not set +# CONFIG_PWDX is not set +# CONFIG_SMEMCAP is not set +CONFIG_BB_SYSCTL=y +CONFIG_TOP=y +CONFIG_FEATURE_TOP_INTERACTIVE=y +CONFIG_FEATURE_TOP_CPU_USAGE_PERCENTAGE=y +CONFIG_FEATURE_TOP_CPU_GLOBAL_PERCENTS=y +# CONFIG_FEATURE_TOP_SMP_CPU is not set +# CONFIG_FEATURE_TOP_DECIMALS is not set +# CONFIG_FEATURE_TOP_SMP_PROCESS is not set +# CONFIG_FEATURE_TOPMEM is not set +CONFIG_UPTIME=y +# CONFIG_FEATURE_UPTIME_UTMP_SUPPORT is not set +CONFIG_WATCH=y + +# +# Runit Utilities +# +# CONFIG_CHPST is not set +# CONFIG_SETUIDGID is not set +# CONFIG_ENVUIDGID is not set +# CONFIG_ENVDIR is not set +# CONFIG_SOFTLIMIT is not set +# CONFIG_RUNSV is not set +# CONFIG_RUNSVDIR is not set +# CONFIG_FEATURE_RUNSVDIR_LOG is not set +# CONFIG_SV is not set +CONFIG_SV_DEFAULT_SERVICE_DIR="" +CONFIG_SVC=y +CONFIG_SVOK=y +# CONFIG_SVLOGD is not set +# CONFIG_CHCON is not set +# CONFIG_GETENFORCE is not set +# CONFIG_GETSEBOOL is not set +# CONFIG_LOAD_POLICY is not set +# CONFIG_MATCHPATHCON is not set +# CONFIG_RUNCON is not set +# CONFIG_SELINUXENABLED is not set +# CONFIG_SESTATUS is not set +# CONFIG_SETENFORCE is not set +# CONFIG_SETFILES is not set +# CONFIG_FEATURE_SETFILES_CHECK_OPTION is not set +# CONFIG_RESTORECON is not set +# CONFIG_SETSEBOOL is not set + +# +# Shells +# +CONFIG_SH_IS_ASH=y +# CONFIG_SH_IS_HUSH is not set +# CONFIG_SH_IS_NONE is not set +# CONFIG_BASH_IS_ASH is not set +# CONFIG_BASH_IS_HUSH is not set +CONFIG_BASH_IS_NONE=y +CONFIG_SHELL_ASH=y +CONFIG_ASH=y +CONFIG_ASH_OPTIMIZE_FOR_SIZE=y +CONFIG_ASH_INTERNAL_GLOB=y +CONFIG_ASH_BASH_COMPAT=y +# CONFIG_ASH_BASH_SOURCE_CURDIR is not set +CONFIG_ASH_BASH_NOT_FOUND_HOOK=y +CONFIG_ASH_JOB_CONTROL=y +CONFIG_ASH_ALIAS=y +CONFIG_ASH_RANDOM_SUPPORT=y +CONFIG_ASH_EXPAND_PRMT=y +CONFIG_ASH_IDLE_TIMEOUT=y +# CONFIG_ASH_MAIL is not set +CONFIG_ASH_ECHO=y +CONFIG_ASH_PRINTF=y +CONFIG_ASH_TEST=y +CONFIG_ASH_HELP=y +CONFIG_ASH_GETOPTS=y +CONFIG_ASH_CMDCMD=y +# CONFIG_CTTYHACK is not set +# CONFIG_HUSH is not set +# CONFIG_SHELL_HUSH is not set +# CONFIG_HUSH_BASH_COMPAT is not set +# CONFIG_HUSH_BRACE_EXPANSION is not set +# CONFIG_HUSH_BASH_SOURCE_CURDIR is not set +# CONFIG_HUSH_LINENO_VAR is not set +# CONFIG_HUSH_INTERACTIVE is not set +# CONFIG_HUSH_SAVEHISTORY is not set +# CONFIG_HUSH_JOB is not set +# CONFIG_HUSH_TICK is not set +# CONFIG_HUSH_IF is not set +# CONFIG_HUSH_LOOPS is not set +# CONFIG_HUSH_CASE is not set +# CONFIG_HUSH_FUNCTIONS is not set +# CONFIG_HUSH_LOCAL is not set +# CONFIG_HUSH_RANDOM_SUPPORT is not set +# CONFIG_HUSH_MODE_X is not set +# CONFIG_HUSH_ECHO is not set +# CONFIG_HUSH_PRINTF is not set +# CONFIG_HUSH_TEST is not set +# CONFIG_HUSH_HELP is not set +# CONFIG_HUSH_EXPORT is not set +# CONFIG_HUSH_EXPORT_N is not set +# CONFIG_HUSH_READONLY is not set +# CONFIG_HUSH_KILL is not set +# CONFIG_HUSH_WAIT is not set +# CONFIG_HUSH_COMMAND is not set +# CONFIG_HUSH_TRAP is not set +# CONFIG_HUSH_TYPE is not set +# CONFIG_HUSH_TIMES is not set +# CONFIG_HUSH_READ is not set +# CONFIG_HUSH_SET is not set +# CONFIG_HUSH_UNSET is not set +# CONFIG_HUSH_ULIMIT is not set +# CONFIG_HUSH_UMASK is not set +# CONFIG_HUSH_GETOPTS is not set +# CONFIG_HUSH_MEMLEAK is not set + +# +# Options common to all shells +# +CONFIG_FEATURE_SH_MATH=y +CONFIG_FEATURE_SH_MATH_64=y +CONFIG_FEATURE_SH_MATH_BASE=y +CONFIG_FEATURE_SH_EXTRA_QUIET=y +# CONFIG_FEATURE_SH_STANDALONE is not set +# CONFIG_FEATURE_SH_NOFORK is not set +CONFIG_FEATURE_SH_READ_FRAC=y +# CONFIG_FEATURE_SH_HISTFILESIZE is not set +CONFIG_FEATURE_SH_EMBEDDED_SCRIPTS=y + +# +# System Logging Utilities +# +CONFIG_KLOGD=y +CONFIG_FEATURE_KLOGD_KLOGCTL=y +CONFIG_LOGGER=y +# CONFIG_LOGREAD is not set +# CONFIG_FEATURE_LOGREAD_REDUCED_LOCKING is not set +CONFIG_SYSLOGD=y +CONFIG_FEATURE_ROTATE_LOGFILE=y +CONFIG_FEATURE_REMOTE_LOG=y +# CONFIG_FEATURE_SYSLOGD_DUP is not set +# CONFIG_FEATURE_SYSLOGD_CFG is not set +# CONFIG_FEATURE_SYSLOGD_PRECISE_TIMESTAMPS is not set +CONFIG_FEATURE_SYSLOGD_READ_BUFFER_SIZE=256 +# CONFIG_FEATURE_IPC_SYSLOG is not set +CONFIG_FEATURE_IPC_SYSLOG_BUFFER_SIZE=0 +# CONFIG_FEATURE_KMSG_SYSLOG is not set diff --git a/files/board/arpl/grub.bin b/files/board/arpl/grub.bin new file mode 100644 index 00000000..fc15bfe7 Binary files /dev/null and b/files/board/arpl/grub.bin differ diff --git a/files/board/arpl/kernel_defconfig b/files/board/arpl/kernel_defconfig new file mode 100644 index 00000000..bab5e451 --- /dev/null +++ b/files/board/arpl/kernel_defconfig @@ -0,0 +1,352 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_KERNEL_XZ=y +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_CGROUPS=y +CONFIG_CGROUP_SCHED=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_COMPAT_BRK is not set +CONFIG_PROFILING=y +CONFIG_SMP=y +CONFIG_IOSF_MBI=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_MICROCODE_AMD=y +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +CONFIG_NUMA=y +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +# CONFIG_MTRR_SANITIZER is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_MIXED=y +CONFIG_HZ_1000=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_HIBERNATION=y +CONFIG_PM_DEBUG=y +CONFIG_PM_TRACE_RTC=y +CONFIG_ACPI_VIDEO=y +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_BGRT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_X86_ACPI_CPUFREQ=y +CONFIG_IA32_EMULATION=y +# CONFIG_VIRTUALIZATION is not set +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_GCC_PLUGINS is not set +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_BINFMT_MISC=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +# CONFIG_INET_DIAG is not set +CONFIG_TCP_CONG_ADVANCED=y +# CONFIG_TCP_CONG_BIC is not set +# CONFIG_TCP_CONG_WESTWOOD is not set +# CONFIG_TCP_CONG_HTCP is not set +CONFIG_TCP_MD5SIG=y +# CONFIG_IPV6 is not set +CONFIG_NETLABEL=y +CONFIG_NETFILTER=y +# CONFIG_NETFILTER_ADVANCED is not set +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_FTP=y +CONFIG_NF_CONNTRACK_IRC=y +CONFIG_NF_CONNTRACK_SIP=y +CONFIG_NF_CT_NETLINK=y +CONFIG_NF_NAT=y +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y +CONFIG_NETFILTER_XT_TARGET_NFLOG=y +CONFIG_NETFILTER_XT_TARGET_SECMARK=y +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +CONFIG_NETFILTER_XT_MATCH_POLICY=y +CONFIG_NETFILTER_XT_MATCH_STATE=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_MANGLE=y +CONFIG_NET_SCHED=y +CONFIG_NET_EMATCH=y +CONFIG_NET_CLS_ACT=y +# CONFIG_WIRELESS is not set +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DEBUG_DEVRES=y +CONFIG_CONNECTOR=y +CONFIG_EFI_VARS=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_NVME=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_BLK_DEV_3W_XXXX_RAID=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AIC79XX=m +# CONFIG_AIC79XX_DEBUG_ENABLE is not set +CONFIG_SCSI_AIC94XX=m +CONFIG_SCSI_MVSAS=m +CONFIG_SCSI_MVUMI=m +CONFIG_SCSI_DPT_I2O=m +CONFIG_MEGARAID_LEGACY=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_UFSHCD=m +CONFIG_SCSI_BUSLOGIC=m +CONFIG_VMWARE_PVSCSI=m +CONFIG_SCSI_ISCI=m +CONFIG_SCSI_IPS=m +CONFIG_SCSI_AM53C974=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_DH=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_ATA_PIIX=y +CONFIG_SATA_MV=m +CONFIG_SATA_NV=m +CONFIG_SATA_SIL=m +CONFIG_SATA_SIS=m +CONFIG_SATA_ULI=m +CONFIG_SATA_VIA=m +CONFIG_PATA_AMD=y +CONFIG_PATA_OLDPIIX=y +CONFIG_PATA_SCH=y +CONFIG_ATA_GENERIC=y +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_BLK_DEV_DM=y +CONFIG_DM_MIRROR=y +CONFIG_DM_ZERO=y +CONFIG_NETDEVICES=y +CONFIG_NETCONSOLE=y +CONFIG_VIRTIO_NET=m +CONFIG_ET131X=m +CONFIG_ACENIC=m +CONFIG_AMD_XGBE=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +CONFIG_CNIC=m +CONFIG_TIGON3=y +CONFIG_BNX2X=m +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T3=m +CONFIG_NET_TULIP=y +CONFIG_DL2K=m +CONFIG_BE2NET=m +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGB=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_SKGE=m +CONFIG_SKY2=y +CONFIG_MLX4_EN=m +CONFIG_MYRI10GE=m +CONFIG_S2IO=m +CONFIG_VXGE=m +CONFIG_FORCEDETH=m +CONFIG_QLCNIC=m +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_BNA=m +CONFIG_QCOM_EMAC=m +CONFIG_R8169=m +CONFIG_SXGBE_ETH=m +CONFIG_SIS190=m +CONFIG_STMMAC_ETH=m +CONFIG_XILINX_AXI_EMAC=m +CONFIG_REALTEK_PHY=y +# CONFIG_USB_NET_DRIVERS is not set +# CONFIG_WLAN is not set +CONFIG_VMXNET3=m +CONFIG_INPUT_EVDEV=y +CONFIG_INPUT_TABLET=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_DETECT_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_VIRTIO_CONSOLE=m +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_INTEL is not set +# CONFIG_HW_RANDOM_AMD is not set +CONFIG_NVRAM=y +CONFIG_HPET=y +# CONFIG_HPET_MMAP is not set +CONFIG_I2C_I801=y +CONFIG_WATCHDOG=y +CONFIG_REGULATOR=y +CONFIG_AGP=y +CONFIG_AGP_AMD64=y +CONFIG_AGP_INTEL=y +CONFIG_DRM=y +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_FB=y +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_HIDRAW=y +CONFIG_HID_GYRATION=y +CONFIG_LOGITECH_FF=y +CONFIG_HID_NTRIG=y +CONFIG_HID_PANTHERLORD=y +CONFIG_PANTHERLORD_FF=y +CONFIG_HID_PETALYNX=y +CONFIG_HID_SAMSUNG=y +CONFIG_HID_SONY=y +CONFIG_HID_SUNPLUS=y +CONFIG_HID_TOPSEED=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_MON=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_USB_STORAGE_REALTEK=y +CONFIG_USB_STORAGE_DATAFAB=y +CONFIG_USB_STORAGE_FREECOM=y +CONFIG_USB_STORAGE_ISD200=y +CONFIG_USB_STORAGE_USBAT=y +CONFIG_USB_STORAGE_SDDR09=y +CONFIG_USB_STORAGE_SDDR55=y +CONFIG_USB_STORAGE_JUMPSHOT=y +CONFIG_USB_STORAGE_ALAUDA=y +CONFIG_USB_STORAGE_ONETOUCH=y +CONFIG_USB_STORAGE_KARMA=y +CONFIG_USB_STORAGE_CYPRESS_ATACB=y +CONFIG_USB_STORAGE_ENE_UB6250=y +CONFIG_USB_UAS=y +CONFIG_USB_SERIAL=m +CONFIG_USB_ROLE_SWITCH=y +CONFIG_USB_ROLES_INTEL_XHCI=m +CONFIG_LEDS_TRIGGERS=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_HCTOSYS is not set +CONFIG_DMADEVICES=y +CONFIG_VIRT_DRIVERS=y +CONFIG_VBOXGUEST=m +CONFIG_VIRTIO_PCI=m +# CONFIG_VIRTIO_PCI_LEGACY is not set +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VHOST_NET=m +CONFIG_ACPI_WMI=y +CONFIG_MXM_WMI=y +CONFIG_EEEPC_LAPTOP=y +# CONFIG_SURFACE_PLATFORMS is not set +CONFIG_AMD_IOMMU=y +CONFIG_INTEL_IOMMU=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_VIRTIO_IOMMU=m +CONFIG_MEMORY=y +CONFIG_VALIDATE_FS_PARSER=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QFMT_V2=y +CONFIG_AUTOFS4_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_INODE64=y +CONFIG_HUGETLBFS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_ROOT_NFS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DISABLE=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_AES=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_CRC_CCITT=y +CONFIG_PRINTK_TIME=y +CONFIG_MAGIC_SYSRQ=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_STACK_USAGE=y +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHEDSTATS=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_DEBUG_BOOT_PARAMS=y diff --git a/files/board/arpl/make-img.sh b/files/board/arpl/make-img.sh new file mode 100755 index 00000000..0eb7b563 --- /dev/null +++ b/files/board/arpl/make-img.sh @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +# CONFIG_DIR = . +# $1 = Target path = ./output/target +# BR2_DL_DIR = ./dl +# BINARIES_DIR = ./output/images +# BUILD_DIR = ./output/build +# BASE_DIR = ./output + +set -e + +# Define some constants +MY_ROOT="${CONFIG_DIR}/.." +IMAGE_FILE="${MY_ROOT}/arpl.img" +BOARD_PATH="${CONFIG_DIR}/board/arpl" + +echo "Creating image file" +# Create image zeroed +dd if="/dev/zero" of="${IMAGE_FILE}" bs=1M count=250 conv=sync 2>/dev/null +# Copy grub stage1 to image +dd if="${BOARD_PATH}/grub.bin" of="${IMAGE_FILE}" conv=notrunc,sync 2>/dev/null +# Create partitions on image +echo -e "n\np\n\n\n+100M\nt\n\n0b\nn\np\n\n\n+100M\nn\np\n\n\n\nw" | fdisk "${IMAGE_FILE}" >/dev/null + +# Force umount, ignore errors +sudo umount "${BINARIES_DIR}/p1" 2>/dev/null || true +sudo umount "${BINARIES_DIR}/p3" 2>/dev/null || true +# Force unsetup of loop device +sudSetupo losetup -d "/dev/loop8" 2>/dev/null || true +# Setup the loop8 loop device +sudo losetup -P "/dev/loop8" "${IMAGE_FILE}" +# Format partitions +sudo mkdosfs -F32 -n ARPL1 "/dev/loop8p1" >/dev/null 2>&1 +sudo mkfs.ext2 -F -F -L ARPL2 "/dev/loop8p2" >/dev/null 2>&1 +sudo mkfs.ext4 -F -F -L ARPL3 "/dev/loop8p3" >/dev/null 2>&1 + +echo "Mounting image file" +mkdir -p "${BINARIES_DIR}/p1" +mkdir -p "${BINARIES_DIR}/p3" +sudo mount /dev/loop8p1 "${BINARIES_DIR}/p1" +sudo mount /dev/loop8p3 "${BINARIES_DIR}/p3" + +echo "Copying files" +sudo cp "${BINARIES_DIR}/bzImage" "${BINARIES_DIR}/p1/bzImage-arpl" +sudo cp "${BINARIES_DIR}/rootfs.cpio.xz" "${BINARIES_DIR}/p1/initrd-arpl" +sudo cp -R "${BOARD_PATH}/p1/"* "${BINARIES_DIR}/p1" +sudo cp -R "${BOARD_PATH}/p3/"* "${BINARIES_DIR}/p3" +sync + +echo "Unmount image file" +sudo umount "${BINARIES_DIR}/p1" +sudo umount "${BINARIES_DIR}/p3" +rmdir "${BINARIES_DIR}/p1" +rmdir "${BINARIES_DIR}/p3" + +sudo losetup --detach /dev/loop8 diff --git a/files/board/arpl/overlayfs/etc/inittab b/files/board/arpl/overlayfs/etc/inittab new file mode 100644 index 00000000..940c03f6 --- /dev/null +++ b/files/board/arpl/overlayfs/etc/inittab @@ -0,0 +1,42 @@ +# /etc/inittab +# +# Copyright (C) 2001 Erik Andersen +# +# Note: BusyBox init doesn't support runlevels. The runlevels field is +# completely ignored by BusyBox init. If you want runlevels, use +# sysvinit. +# +# Format for each entry: ::: +# +# id == tty to run on, or empty for /dev/console +# runlevels == ignored +# action == one of sysinit, respawn, askfirst, wait, and once +# process == program to run + +# Startup the system +::sysinit:/bin/mount -t proc proc /proc +::sysinit:/bin/mount -o remount,rw / +::sysinit:/bin/mkdir -p /dev/pts /dev/shm +::sysinit:/bin/mount -a +::sysinit:/bin/mkdir -p /run/lock/subsys +::sysinit:/sbin/swapon -a +null::sysinit:/bin/ln -sf /proc/self/fd /dev/fd +null::sysinit:/bin/ln -sf /proc/self/fd/0 /dev/stdin +null::sysinit:/bin/ln -sf /proc/self/fd/1 /dev/stdout +null::sysinit:/bin/ln -sf /proc/self/fd/2 /dev/stderr +::sysinit:/bin/hostname -F /etc/hostname +# now run any rc scripts +::sysinit:/etc/init.d/rcS + +# Login in terminals +::respawn:/sbin/agetty -a root --noclear tty1 +ttyS0::askfirst:/sbin/agetty -a root ttyS0 115200 vt100 +::respawn:/usr/bin/ttyd login -f root + +# Stuff to do for the 3-finger salute +::ctrlaltdel:/sbin/reboot + +# Stuff to do before rebooting +::shutdown:/etc/init.d/rcK +::shutdown:/sbin/swapoff -a +::shutdown:/bin/umount -a -r diff --git a/files/board/arpl/overlayfs/etc/ssh/sshd_config b/files/board/arpl/overlayfs/etc/ssh/sshd_config new file mode 100644 index 00000000..d9204d68 --- /dev/null +++ b/files/board/arpl/overlayfs/etc/ssh/sshd_config @@ -0,0 +1,116 @@ +# $OpenBSD: sshd_config,v 1.104 2021/07/02 05:11:21 dtucker Exp $ + +# This is the sshd server system-wide configuration file. See +# sshd_config(5) for more information. + +# This sshd was compiled with PATH=/bin:/sbin:/usr/bin:/usr/sbin + +# The strategy used for options in the default sshd_config shipped with +# OpenSSH is to specify options with their default value where +# possible, but leave them commented. Uncommented options override the +# default value. + +#Port 22 +#AddressFamily any +#ListenAddress 0.0.0.0 +#ListenAddress :: + +#HostKey /etc/ssh/ssh_host_rsa_key +#HostKey /etc/ssh/ssh_host_ecdsa_key +#HostKey /etc/ssh/ssh_host_ed25519_key + +# Ciphers and keying +#RekeyLimit default none + +# Logging +#SyslogFacility AUTH +#LogLevel INFO + +# Authentication: + +#LoginGraceTime 2m +PermitRootLogin yes +#StrictModes yes +#MaxAuthTries 6 +#MaxSessions 10 + +#PubkeyAuthentication yes + +# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2 +# but this is overridden so installations will only check .ssh/authorized_keys +AuthorizedKeysFile .ssh/authorized_keys + +#AuthorizedPrincipalsFile none + +#AuthorizedKeysCommand none +#AuthorizedKeysCommandUser nobody + +# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts +#HostbasedAuthentication no +# Change to yes if you don't trust ~/.ssh/known_hosts for +# HostbasedAuthentication +#IgnoreUserKnownHosts no +# Don't read the user's ~/.rhosts and ~/.shosts files +#IgnoreRhosts yes + +# To disable tunneled clear text passwords, change to no here! +#PasswordAuthentication yes +#PermitEmptyPasswords no + +# Change to no to disable s/key passwords +#KbdInteractiveAuthentication yes + +# Kerberos options +#KerberosAuthentication no +#KerberosOrLocalPasswd yes +#KerberosTicketCleanup yes +#KerberosGetAFSToken no + +# GSSAPI options +#GSSAPIAuthentication no +#GSSAPICleanupCredentials yes + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the KbdInteractiveAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via KbdInteractiveAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and KbdInteractiveAuthentication to 'no'. +#UsePAM no + +#AllowAgentForwarding yes +#AllowTcpForwarding yes +#GatewayPorts no +#X11Forwarding no +#X11DisplayOffset 10 +#X11UseLocalhost yes +#PermitTTY yes +#PrintMotd yes +#PrintLastLog yes +#TCPKeepAlive yes +#PermitUserEnvironment no +#Compression delayed +#ClientAliveInterval 0 +#ClientAliveCountMax 3 +#UseDNS no +#PidFile /var/run/sshd.pid +#MaxStartups 10:30:100 +#PermitTunnel no +#ChrootDirectory none +#VersionAddendum none + +# no default banner path +#Banner none + +# override default of no subsystems +Subsystem sftp /usr/libexec/sftp-server + +# Example of overriding settings on a per-user basis +#Match User anoncvs +# X11Forwarding no +# AllowTcpForwarding no +# PermitTTY no +# ForceCommand cvs server diff --git a/files/board/arpl/overlayfs/opt/arpl/boot.sh b/files/board/arpl/overlayfs/opt/arpl/boot.sh new file mode 100755 index 00000000..ae6c454e --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/boot.sh @@ -0,0 +1,109 @@ +#!/usr/bin/env bash + +set -e + +. /opt/arpl/include/functions.sh + +# Sanity check +loaderIsConfigured || die "Loader is not configured!" + +# Print text centralized, if variable ${COLUMNS} is defined +clear +TITLE="BOOTING..." +if [ -z "${COLUMNS}" ]; then + echo -e "\033[1;33m${TITLE}\033[0m" +else + printf "\033[1;33m%*s\033[0m\n" $(((${#TITLE}+${COLUMNS})/2)) "${TITLE}" +fi + +# Check if DSM zImage changed, patch it if necessary +ZIMAGE_HASH="`readConfigKey "zimage-hash" "${USER_CONFIG_FILE}"`" +if [ "`sha256sum "${ORI_ZIMAGE_FILE}" | awk '{print$1}'`" != "${ZIMAGE_HASH}" ]; then + echo -e "\033[1;43mDSM zImage changed\033[0m" + /opt/arpl/zimage-patch.sh + if [ $? -ne 0 ]; then + dialog --backtitle "`backtitle`" --title "Error" \ + --msgbox "zImage not patched:\n`<"${LOG_FILE}"`" 12 70 + return 1 + fi +fi + +# Check if DSM ramdisk changed, patch it if necessary +RAMDISK_HASH="`readConfigKey "ramdisk-hash" "${USER_CONFIG_FILE}"`" +if [ "`sha256sum "${ORI_RDGZ_FILE}" | awk '{print$1}'`" != "${RAMDISK_HASH}" ]; then + echo -e "\033[1;43mDSM Ramdisk changed\033[0m" + /opt/arpl/ramdisk-patch.sh + if [ $? -ne 0 ]; then + dialog --backtitle "`backtitle`" --title "Error" \ + --msgbox "Ramdisk not patched:\n`<"${LOG_FILE}"`" 12 70 + return 1 + fi +fi + +# Load necessary variables +VID="`readConfigKey "vid" "${USER_CONFIG_FILE}"`" +PID="`readConfigKey "pid" "${USER_CONFIG_FILE}"`" +MODEL="`readConfigKey "model" "${USER_CONFIG_FILE}"`" +BUILD="`readConfigKey "build" "${USER_CONFIG_FILE}"`" +SN="`readConfigKey "sn" "${USER_CONFIG_FILE}"`" + +declare -A CMDLINE + +# Fixed values +CMDLINE['netif_num']=0 +# Automatic values +CMDLINE['syno_hw_version']="${MODEL}" +[ -z "${VID}" ] && VID="0x0000" # Sanity check +[ -z "${PID}" ] && PID="0x0000" # Sanity check +CMDLINE['vid']="${VID}" +CMDLINE['pid']="${PID}" +CMDLINE['sn']="${SN}" + +# Read cmdline +while IFS="=" read KEY VALUE; do + [ -n "${KEY}" ] && CMDLINE["${KEY}"]="${VALUE}" +done < <(readModelMap "${MODEL}" "builds.${BUILD}.cmdline") +while IFS="=" read KEY VALUE; do + [ -n "${KEY}" ] && CMDLINE["${KEY}"]="${VALUE}" +done < <(readConfigMap "cmdline" "${USER_CONFIG_FILE}") + +# Check if machine has EFI +[ -d /sys/firmware/efi ] && EFI=1 || EFI=0 +# Read EFI bug value +EFI_BUG="`readModelKey "${MODEL}" "builds.${BUILD}.efi-bug"`" + +LOADER_DISK="`blkid | grep 'LABEL="ARPL3"' | cut -d3 -f1`" +BUS=`udevadm info --query property --name ${LOADER_DISK} | grep ID_BUS | cut -d= -f2` + +# Prepare command line +CMDLINE_LINE="" +[ ${EFI} -eq 1 ] && CMDLINE_LINE+="withefi " +[ "${BUS}" = "ata" ] && CMDLINE_LINE+="synoboot_satadom=1 " +CMDLINE_LINE+="console=ttyS0,115200n8 earlyprintk log_buf_len=32M earlycon=uart8250,io,0x3f8,115200n8 elevator=elevator root=/dev/md0 loglevel=15" +for KEY in ${!CMDLINE[@]}; do + VALUE="${CMDLINE[${KEY}]}" + CMDLINE_LINE+=" ${KEY}" + [ -n "${VALUE}" ] && CMDLINE_LINE+="=${VALUE}" +done +# Escape special chars +CMDLINE_LINE=`echo ${CMDLINE_LINE} | sed 's/>/\\\\>/g'` + +# Inform user +echo -e "Model: \033[1;36m${MODEL}\033[0m" +echo -e "Build: \033[1;36m${BUILD}\033[0m" +echo -e "Cmdline:\n\033[1;36m${CMDLINE_LINE}\033[0m" +echo -e "\033[1;37mLoading DSM kernel...\033[0m" + +# Executes DSM kernel via KEXEC +history -a +sync +if [ "${EFI_BUG}" = "yes" -a ${EFI} -eq 1 ]; then + echo -e "\033[1;33mWarning, running kexec with --noefi param, strange things will happen!!\033[0m" + kexec --noefi -l "${MOD_ZIMAGE_FILE}" --initrd "${MOD_RDGZ_FILE}" --command-line="${CMDLINE_LINE}" >"${LOG_FILE}" 2>&1 || dieLog +else + kexec -l "${MOD_ZIMAGE_FILE}" --initrd "${MOD_RDGZ_FILE}" --command-line="${CMDLINE_LINE}" >"${LOG_FILE}" 2>&1 || dieLog +fi +/sbin/swapoff -a >/dev/null 2>&1 || true +/bin/umount -a -r >/dev/null 2>&1 || true +echo -e "\033[1;37mBooting...\033[0m" +kexec -e -a >"${LOG_FILE}" 2>&1 || dieLog diff --git a/files/board/arpl/overlayfs/opt/arpl/bzImage-to-vmlinux.sh b/files/board/arpl/overlayfs/opt/arpl/bzImage-to-vmlinux.sh new file mode 100755 index 00000000..b2096a6c --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/bzImage-to-vmlinux.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +read_u8() { + dd if=$1 bs=1 skip=$(( $2 )) count=1 2>/dev/null | od -An -tu1 | grep -Eo '[0-9]+' +} +read_u32() { + dd if=$1 bs=1 skip=$(( $2 )) count=4 2>/dev/null | od -An -tu4 | grep -Eo '[0-9]+' +} + +set -x +setup_size=$(read_u8 $1 0x1f1) +payload_offset=$(read_u32 $1 0x248) +payload_length=$(read_u32 $1 0x24c) +inner_pos=$(( ($setup_size + 1) * 512 )) + +tail -c+$(( $inner_pos + 1 )) $1 | tail -c+$(( $payload_offset + 1 )) | head -c $(( $payload_length )) | head -c $(($payload_length - 4)) | unlzma > $2 diff --git a/files/board/arpl/overlayfs/opt/arpl/calc_run_size.sh b/files/board/arpl/overlayfs/opt/arpl/calc_run_size.sh new file mode 100755 index 00000000..a1569de6 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/calc_run_size.sh @@ -0,0 +1,42 @@ +#!/bin/sh +# +# Calculate the amount of space needed to run the kernel, including room for +# the .bss and .brk sections. +# +# Usage: +# objdump -h a.out | sh calc_run_size.sh + +NUM='\([0-9a-fA-F]*[ \t]*\)' +OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"${NUM}${NUM}${NUM}${NUM}"'.*/\1\4/p') +if [ -z "$OUT" ] ; then + echo "Never found .bss or .brk file offset" >&2 + exit 1 +fi + +OUT=$(echo ${OUT# }) +sizeA=$(printf "%d" 0x${OUT%% *}) +OUT=${OUT#* } +offsetA=$(printf "%d" 0x${OUT%% *}) +OUT=${OUT#* } +sizeB=$(printf "%d" 0x${OUT%% *}) +OUT=${OUT#* } +offsetB=$(printf "%d" 0x${OUT%% *}) + +run_size=$(( ${offsetA} + ${sizeA} + ${sizeB} )) + +# BFD linker shows the same file offset in ELF. +if [ "${offsetA}" -ne "${offsetB}" ] ; then + # Gold linker shows them as consecutive. + endB=$(( ${offsetB} + ${sizeB} )) + if [ "$endB" != "$run_size" ] ; then + printf "sizeA: 0x%x\n" ${sizeA} >&2 + printf "offsetA: 0x%x\n" ${offsetA} >&2 + printf "sizeB: 0x%x\n" ${sizeB} >&2 + printf "offsetB: 0x%x\n" ${offsetB} >&2 + echo ".bss and .brk are non-contiguous" >&2 + exit 1 + fi +fi + +printf "%d\n" ${run_size} +exit 0 diff --git a/files/board/arpl/overlayfs/opt/arpl/common.php b/files/board/arpl/overlayfs/opt/arpl/common.php new file mode 100755 index 00000000..8fcae197 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/common.php @@ -0,0 +1,245 @@ +#!/usr/bin/env php + $num) { + if ($num === null) { + $searchSeq[] = null; + } elseif (is_array($num) && count($num) == 2 && is_int($num[0]) && is_int($num[1]) && $num[0] >= 0 && + $num[0] <= 255 && $num[1] >= 0 && $num[1] <= 255 && $num[0] < $num[1]) { + $searchSeq[] = $num; //Leave them as numeric + } elseif (is_int($num) && $num >= 0 && $num <= 255) { + $searchSeq[] = chr($num); + } else { + perr("Found invalid search sequence at index $idx", true); + } + } + + //$pos denotes start position but it's also used to mark where start of a potential pattern match was found + fseek($fp, $pos); + do { //This loop is optimized for speed + $buf = fread($fp, $bufLen); + if (!isset($buf[$bufLen-1])) { + break; //Not enough data = no match + } + + $successfulLoops = 0; + foreach ($searchSeq as $byteIdx => $seekByte) { + if ($seekByte === null) { //any character + ++$successfulLoops; + continue; + } + + //element in the array can be a range [(int)from,(int)to] or a literal SINGLE byte + //if isset finds a second element it will mean for us that it's an array of 2 elements (as we don't expect + //a string longer than a single byte) + if (isset($seekByte[1])) { + $curByteNum = ord($buf[$byteIdx]); + if ($curByteNum < $seekByte[0] || $curByteNum > $seekByte[1]) { + break; + } + } elseif($buf[$byteIdx] !== $seekByte) { //If the byte doesn't match literally we know it's not a match + break; + } + + ++$successfulLoops; + } + if ($successfulLoops === $bufLen) { + return $pos; + } + + fseek($fp, ++$pos); + $maxToCheck--; + } while (!feof($fp) && $maxToCheck != 0); + + return -1; +} + +/** + * @return resource + */ +function getFileMemMapped(string $path) +{ + $fp = fopen('php://memory', 'r+'); + fwrite($fp, file_get_contents($path)); //poor man's mmap :D + + return $fp; +} + +function saveStreamToFile($fp, string $path) +{ + perr("Saving stream to $path ...\n"); + + $fp2 = fopen($path, 'w'); + fseek($fp, 0); + while (!feof($fp)) { + fwrite($fp2, fread($fp, 8192)); + } + fclose($fp2); + + perr("DONE!\n"); +} + +/** + * Do not call this in time-sensitive code... + */ +function readAt($fp, int $pos, int $len) +{ + fseek($fp, $pos); + return fread($fp, $len); +} diff --git a/files/board/arpl/overlayfs/opt/arpl/crc32.php b/files/board/arpl/overlayfs/opt/arpl/crc32.php new file mode 100755 index 00000000..1f2799c0 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/crc32.php @@ -0,0 +1,8 @@ +#!/usr/bin/env php + 2) { + fwrite(STDERR, "Usage: " . $argv[0] . " \n"); + die(); +} +echo hash_file('crc32b', $argv[1]); +?> diff --git a/files/board/arpl/overlayfs/opt/arpl/include/addons.sh b/files/board/arpl/overlayfs/opt/arpl/include/addons.sh new file mode 100755 index 00000000..4cb3b903 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/include/addons.sh @@ -0,0 +1,60 @@ + +############################################################################### +# Return list of available addons +# 1 - Platform +# 2 - Kernel Version +function availableAddons() { + while read D; do + [ ! -f "${D}/manifest.yml" ] && continue + ADDON=`basename ${D}` + checkAddonExist "${ADDON}" "${1}" "${2}" || continue + SYSTEM=`readConfigKey "system" "${D}/manifest.yml"` + [ "${SYSTEM}" = "true" ] && continue + DESC="`readConfigKey "description" "${D}/manifest.yml"`" + echo -e "${ADDON}\t${DESC}" + done < <(find "${ADDONS_PATH}" -maxdepth 1 -type d | sort) +} + +############################################################################### +# Check if addon exist +# 1 - Addon id +# 2 - Platform +# 3 - Kernel Version +# Return ERROR if not exists +function checkAddonExist() { + # First check generic files + if [ -f "${ADDONS_PATH}/${1}/all.tgz" ]; then + return 0 # OK + fi + # Now check specific platform file + if [ -f "${ADDONS_PATH}/${1}/${2}-${3}.tgz" ]; then + return 0 # OK + fi + return 1 # ERROR +} + +############################################################################### +# Install Addon into ramdisk image +# 1 - Addon id +function installAddon() { + ADDON="${1}" + mkdir -p "${TMP_PATH}/${ADDON}" + HAS_FILES=0 + # First check generic files + if [ -f "${ADDONS_PATH}/${ADDON}/all.tgz" ]; then + gzip -dc "${ADDONS_PATH}/${ADDON}/all.tgz" | tar xf - -C "${TMP_PATH}/${ADDON}" + HAS_FILES=1 + fi + # Now check specific platform files + if [ -f "${ADDONS_PATH}/${ADDON}/${PLATFORM}-${KVER}.tgz" ]; then + gzip -dc "${ADDONS_PATH}/${ADDON}/${PLATFORM}-${KVER}.tgz" | tar xf - -C "${TMP_PATH}/${ADDON}" + HAS_FILES=1 + fi + # If has files to copy, copy it, else return error + [ ${HAS_FILES} -ne 1 ] && return 1 + cp "${TMP_PATH}/${ADDON}/install.sh" "${RAMDISK_PATH}/addons/${ADDON}.sh" 2>"${LOG_FILE}" || dieLog + chmod +x "${RAMDISK_PATH}/addons/${ADDON}.sh" + [ -d ${TMP_PATH}/${ADDON}/root ] && (cp -R "${TMP_PATH}/${ADDON}/root/"* "${RAMDISK_PATH}/" 2>"${LOG_FILE}" || dieLog) + rm -rf "${TMP_PATH}/${ADDON}" + return 0 +} diff --git a/files/board/arpl/overlayfs/opt/arpl/include/configFile.sh b/files/board/arpl/overlayfs/opt/arpl/include/configFile.sh new file mode 100644 index 00000000..c6301d2b --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/include/configFile.sh @@ -0,0 +1,55 @@ + +############################################################################### +# Delete a key in config file +# 1 - Path of Key +# 2 - Path of yaml config file +function deleteConfigKey() { + yq eval 'del(.'${1}')' --inplace "${2}" +} + +############################################################################### +# Write to yaml config file +# 1 - Path of Key +# 2 - Value +# 3 - Path of yaml config file +function writeConfigKey() { + [ "${2}" = "{}" ] && yq eval '.'${1}' = {}' --inplace "${3}" || \ + yq eval '.'${1}' = "'${2}'"' --inplace "${3}" +} + +############################################################################### +# Read key value from yaml config file +# 1 - Path of key +# 2 - Path of yaml config file +# Return Value +function readConfigKey() { + RESULT=`yq eval '.'${1}' | explode(.)' "${2}"` + [ "${RESULT}" == "null" ] && echo "" || echo ${RESULT} +} + +############################################################################### +# Read Entries as map(key=value) from yaml config file +# 1 - Path of key +# 2 - Path of yaml config file +# Returns map of values +function readConfigMap() { + yq eval '.'${1}' | explode(.) | to_entries | map([.key, .value] | join("=")) | .[]' "${2}" +} + +############################################################################### +# Read an array from yaml config file +# 1 - Path of key +# 2 - Path of yaml config file +# Returns array/map of values +function readConfigArray() { + yq eval '.'${1}'[]' "${2}" +} + +############################################################################### +# Read Entries as array from yaml config file +# 1 - Path of key +# 2 - Path of yaml config file +# Returns array of values +function readConfigEntriesArray() { + yq eval '.'${1}' | explode(.) | to_entries | map([.key])[] | .[]' "${2}" +} diff --git a/files/board/arpl/overlayfs/opt/arpl/include/consts.sh b/files/board/arpl/overlayfs/opt/arpl/include/consts.sh new file mode 100644 index 00000000..0d4ea7ce --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/include/consts.sh @@ -0,0 +1,21 @@ + +ARPL_VERSION="0.1-alpha" + +# Define paths +TMP_PATH="/tmp" +RAMDISK_PATH="${TMP_PATH}/ramdisk" +LOG_FILE="${TMP_PATH}/log.txt" + +USER_CONFIG_FILE="${BOOTLOADER_PATH}/user-config.yml" +MOD_ZIMAGE_FILE="${BOOTLOADER_PATH}/zImage" +MOD_RDGZ_FILE="${BOOTLOADER_PATH}/rd.gz" + +ORI_ZIMAGE_FILE="${SLPART_PATH}/zImage" +ORI_RDGZ_FILE="${SLPART_PATH}/rd.gz" + +ADDONS_PATH="${CACHE_PATH}/addons" +LKM_PATH="${CACHE_PATH}/lkms" + +MODEL_CONFIG_PATH="/opt/arpl/model-configs" +INCLUDE_PATH="/opt/arpl/include" +PATCH_PATH="/opt/arpl/patch" diff --git a/files/board/arpl/overlayfs/opt/arpl/include/functions.sh b/files/board/arpl/overlayfs/opt/arpl/include/functions.sh new file mode 100644 index 00000000..b1c83ae1 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/include/functions.sh @@ -0,0 +1,148 @@ + +. /opt/arpl/include/consts.sh +. /opt/arpl/include/configFile.sh + +############################################################################### +# Read key value from model config file +# 1 - Model +# 2 - Key +# Return Value +function readModelKey() { + readConfigKey "${2}" "${MODEL_CONFIG_PATH}/${1}.yml" +} + +############################################################################### +# Read Entries as map(key=value) from model config +# 1 - Model +# 2 - Path of key +# Returns map of values +function readModelMap() { + readConfigMap "${2}" "${MODEL_CONFIG_PATH}/${1}.yml" +} + +############################################################################### +# Read an array from model config +# 1 - Model +# 2 - Path of key +# Returns array/map of values +function readModelArray() { + readConfigArray "${2}" "${MODEL_CONFIG_PATH}/${1}.yml" +} + +############################################################################### +# Check if loader is fully configured +# Returns 1 if not +function loaderIsConfigured() { + SN="`readConfigKey "sn" "${USER_CONFIG_FILE}"`" + [ -z "${SN}" ] && return 1 + [ ! -f "${MOD_ZIMAGE_FILE}" ] && return 1 + [ ! -f "${MOD_RDGZ_FILE}" ] && return 1 + return 0 # OK +} + +############################################################################### +# Just show error message and dies +function die() { + echo -e "\033[1;41m$@\033[0m" + exit 1 +} + +############################################################################### +# Show error message with log content and dies +function dieLog() { + echo -en "\n\033[1;41mUNRECOVERY ERROR: " + cat "${LOG_FILE}" + echo -e "\033[0m" + sleep 3 + exit 1 +} + +############################################################################### +# Generate a number with 6 digits from 1 to 30000 +function random() { + printf "%06d" $(($RANDOM %30000 +1 )) +} + +############################################################################### +# Generate a hexa number from 0x00 to 0xFF +function randomhex() { + printf "&02X" "$(( $RANDOM %255 +1 ))" +} + +############################################################################### +# Generate a random letter +function generateRandomLetter() { + for i in A B C D E F G H J K L M N P Q R S T V W X Y Z; do + echo $i + done | sort -R | tail -1 +} + +############################################################################### +# Generate a random digit (0-9A-Z) +function generateRandomValue() { + for i in 0 1 2 3 4 5 6 7 8 9 A B C D E F G H J K L M N P Q R S T V W X Y Z; do + echo $i + done | sort -R | tail -1 +} + +############################################################################### +# Generate a random serial number for a model +# 1 - Model +# Returns serial number +function generateSerial() { + SERIAL="`readModelArray "${1}" "serial.prefix" | sort -R | tail -1`" + SERIAL+=`readModelKey "${1}" "serial.middle"` + case "`readModelKey "${1}" "serial.suffix"`" in + numeric) + SERIAL+=$(random) + ;; + alpha) + SERIAL+=$(generateRandomLetter)$(generateRandomValue)$(generateRandomValue)$(generateRandomValue)$(generateRandomValue)$(generateRandomLetter) + ;; + esac + echo ${SERIAL} +} + +############################################################################### +# Validate a serial number for a model +# 1 - Model +# 2 - Serial number to test +# Returns 1 if serial number is valid +function validateSerial() { + PREFIX=`readModelArray "${1}" "serial.prefix"` + MIDDLE=`readModelKey "${1}" "serial.middle"` + S=${2:0:4} + P=${2:4:3} + L=${#2} + if [ ${L} -ne 13 ]; then + echo 0 + return + fi + echo ${PREFIX} | grep -q ${S} + if [ $? -eq 1 ]; then + echo 0 + return + fi + if [ "${MIDDLE}" != "${P}" ]; then + echo 0 + return + fi + echo 1 +} + +############################################################################### +# Check if a item exists into array +# 1 - Item +# 2.. - Array +# Return 0 if exists +function arrayExistItem() { + EXISTS=1 + ITEM="${1}" + shift + for i in "$@"; do + [ "${i}" = "${ITEM}" ] || continue + EXISTS=0 + break + done + return ${EXISTS} +} diff --git a/files/board/arpl/overlayfs/opt/arpl/init.sh b/files/board/arpl/overlayfs/opt/arpl/init.sh new file mode 100755 index 00000000..f017bf23 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/init.sh @@ -0,0 +1,148 @@ +#!/usr/bin/env bash + +. /opt/arpl/include/functions.sh + +set -e + +# Wait kernel enumerate the disks +CNT=3 +while true; do + [ ${CNT} -eq 0 ] && break + LOADER_DISK="`blkid | grep 'LABEL="ARPL3"' | cut -d3 -f1`" + [ -n "${LOADER_DISK}" ] && break + CNT=$((${CNT}-1)) + sleep 1 +done +if [ -z "${LOADER_DISK}" ]; then + die "Loader disk not found!" +fi +NUM_PARTITIONS=$(blkid | grep "${LOADER_DISK}" | cut -d: -f1 | wc -l) +if [ $NUM_PARTITIONS -ne 3 ]; then + die "Loader disk not found!" +fi + +# Check partitions and ignore errors +fsck.vfat -aw ${LOADER_DISK}1 >/dev/null 2>&1 || true +fsck.ext2 -p ${LOADER_DISK}2 >/dev/null 2>&1 || true +fsck.ext2 -p ${LOADER_DISK}3 >/dev/null 2>&1 || true +# Make folders to mount partitions +mkdir -p ${BOOTLOADER_PATH} +mkdir -p ${SLPART_PATH} +mkdir -p ${CACHE_PATH} +# Mount the partitions +mount ${LOADER_DISK}1 ${BOOTLOADER_PATH} || die "Can't mount ${BOOTLOADER_PATH}" +mount ${LOADER_DISK}2 ${SLPART_PATH} || die "Can't mount ${SLPART_PATH}" +mount ${LOADER_DISK}3 ${CACHE_PATH} || die "Can't mount ${CACHE_PATH}" + +mkdir -p "${ADDONS_PATH}" +# Move/link SSH machine keys to/from cache volume +[ ! -d "${CACHE_PATH}/ssh" ] && cp -R "/etc/ssh" "${CACHE_PATH}/ssh" +rm -rf "/etc/ssh" +ln -s "${CACHE_PATH}/ssh" "/etc/ssh" +# Link bash history to cache volume +rm -rf ~/.bash_history +ln -s ${CACHE_PATH}/.bash_history ~/.bash_history + +# If user config file not exists, initialize it +if [ ! -f "${USER_CONFIG_FILE}" ]; then + touch "${USER_CONFIG_FILE}" + writeConfigKey "lkm" "dev" "${USER_CONFIG_FILE}" + writeConfigKey "model" "" "${USER_CONFIG_FILE}" + writeConfigKey "build" "" "${USER_CONFIG_FILE}" + writeConfigKey "sn" "" "${USER_CONFIG_FILE}" + writeConfigKey "keymap" "" "${USER_CONFIG_FILE}" + writeConfigKey "zimage-hash" "" "${USER_CONFIG_FILE}" + writeConfigKey "ramdisk-hash" "" "${USER_CONFIG_FILE}" + writeConfigKey "cmdline" "{}" "${USER_CONFIG_FILE}" + writeConfigKey "synoinfo" "{}" "${USER_CONFIG_FILE}" + writeConfigKey "addons" "{}" "${USER_CONFIG_FILE}" +fi + +# Get the VID/PID if we are in USB +VID="0x0000" +PID="0x0000" +BUS=`udevadm info --query property --name ${LOADER_DISK} | grep BUS | cut -d= -f2` +if [ "${BUS}" = "usb" ]; then + VID="0x`udevadm info --query property --name ${LOADER_DISK} | grep ID_VENDOR_ID | cut -d= -f2`" + PID="0x`udevadm info --query property --name ${LOADER_DISK} | grep ID_MODEL_ID | cut -d= -f2`" +elif [ "${BUS}" != "ata" ]; then + die "Loader disk neither USB or DoM" +fi + +# Save variables to user config file +writeConfigKey "vid" ${VID} "${USER_CONFIG_FILE}" +writeConfigKey "pid" ${PID} "${USER_CONFIG_FILE}" + +# Shows title +clear +TITLE="Welcome to Automated Redpill Loader ${ARPL_VERSION}" +printf "\033[1;44m%*s\n" $COLUMNS "" +printf "\033[1;32m%*s%*s \033[0m\n" $(((${#TITLE}+$COLUMNS)/2)) "${TITLE}" $(((${#TITLE}+$COLUMNS)/2-${#TITLE})) "" +printf "\033[1;44m%*s\033[0m\n" $COLUMNS "" + +# Inform user +echo -en "Loader disk: \033[1;32m${LOADER_DISK}\033[0m (" +if [ "${BUS}" = "usb" ]; then + echo -en "\033[1;32mUSB flashdisk\033[0m" +else + echo -en "\033[1;32mSATA DoM\033[0m" +fi +echo ")" + +# Check if partition 3 occupies all free space, resize if needed +LOADER_DEVICE_NAME=`echo ${LOADER_DISK} | sed 's|/dev/||'` +SIZEOFDISK=`cat /sys/block/${LOADER_DEVICE_NAME}/size` +ENDSECTOR=$((`fdisk -l ${LOADER_DISK} | awk '/'${LOADER_DEVICE_NAME}3'/{print$3}'`+1)) +if [ ${SIZEOFDISK} -ne ${ENDSECTOR} ]; then + echo -e "\033[1;36mResizing ${LOADER_DISK}3\033[0m" + echo -e "d\n\nn\n\n\n\n\nn\nw" | fdisk "${LOADER_DISK}" >"${LOG_FILE}" 2>&1 || dieLog + resize2fs ${LOADER_DISK}3 >"${LOG_FILE}" 2>&1 || dieLog +fi + +# Load keymap name +KEYMAP="`readConfigKey "keymap" "${USER_CONFIG_FILE}"`" + +# Loads a keymap if is valid +if [ -f /usr/share/keymaps/i386/qwerty/${KEYMAP}.map.gz ]; then + echo -e "Loading keymap \033[1;32m${KEYMAP}\033[0m" + zcat /usr/share/keymaps/i386/qwerty/${KEYMAP}.map.gz | loadkeys +fi + +# Decide if boot automatically +BOOT=1 +if ! loaderIsConfigured; then + echo -e "\033[1;33mLoader is not configured!\033[0m" + BOOT=0 +elif grep -q "IWANTTOCHANGETHECONFIG" /proc/cmdline; then + echo -e "\033[1;33mUser requested edit settings.\033[0m" + BOOT=0 +fi + +# If is to boot automatically, do it +[ ${BOOT} -eq 1 ] && boot.sh + +# Wait for an IP +COUNT=0 +echo -n "Waiting IP." +while true; do + if [ ${COUNT} -eq 15 ]; then + echo "ERROR" + break + fi + COUNT=$((${COUNT}+1)) + IP=`ip route get 1.1.1.1 2>/dev/null | awk '{print$7}'` + if [ -n "${IP}" ]; then + echo -en "OK\nAccess \033[1;34mhttp://${IP}:7681\033[0m to configure the loader via web terminal" + break + fi + echo -n "." + sleep 1 +done + +# Inform user +echo +echo -e "Call \033[1;32mmenu.sh\033[0m to configure loader" +echo +echo -e "User config is on \033[1;32m${USER_CONFIG_FILE}\033[0m" +echo -e "Default SSH Root password is \033[1;31mRedp1lL-1s-4weSomE\033[0m" +echo diff --git a/files/board/arpl/overlayfs/opt/arpl/menu.sh b/files/board/arpl/overlayfs/opt/arpl/menu.sh new file mode 100755 index 00000000..698fc665 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/menu.sh @@ -0,0 +1,812 @@ +#!/usr/bin/env bash + +. /opt/arpl/include/functions.sh +. /opt/arpl/include/addons.sh + +# Check partition 3 space, if < 2GiB uses ramdisk +RAMCACHE=0 +LOADER_DISK="`blkid | grep 'LABEL="ARPL3"' | cut -d3 -f1`" +LOADER_DEVICE_NAME=`echo ${LOADER_DISK} | sed 's|/dev/||'` +if [ `cat /sys/block/${LOADER_DEVICE_NAME}/${LOADER_DEVICE_NAME}3/size` -lt 4194304 ]; then + RAMCACHE=1 +fi + +# Get actual IP +IP=`ip route get 1.1.1.1 2>/dev/null | awk '{print$7}'` + +# Define classes for hw detection +declare -A CLASSES +CLASSES["0100"]="SCSI" +CLASSES["0106"]="SATA" +CLASSES["0101"]="IDE" +CLASSES["0107"]="SAS" +CLASSES["0200"]="Ethernet" +CLASSES["0300"]="VGA" +CLASSES["0c03"]="USB Controller" +CLASSES["0c04"]="Fiber Channel" + +# Dirty flag +DIRTY=0 + +MODEL="`readConfigKey "model" "${USER_CONFIG_FILE}"`" +BUILD="`readConfigKey "build" "${USER_CONFIG_FILE}"`" +KEYMAP="`readConfigKey "keymap" "${USER_CONFIG_FILE}"`" +LKM="`readConfigKey "lkm" "${USER_CONFIG_FILE}"`" +SN="`readConfigKey "sn" "${USER_CONFIG_FILE}"`" + +############################################################################### +# Mounts backtitle dynamically +function backtitle() { + BACKTITLE="ARPL ${ARPL_VERSION}" + if [ -n "${MODEL}" ]; then + BACKTITLE+=" ${MODEL}" + else + BACKTITLE+=" (no model)" + fi + if [ -n "${BUILD}" ]; then + BACKTITLE+=" ${BUILD}" + else + BACKTITLE+=" (no build)" + fi + if [ -n "${SN}" ]; then + BACKTITLE+=" ${SN}" + else + BACKTITLE+=" (no SN)" + fi + if [ -n "${IP}" ]; then + BACKTITLE+=" ${IP}" + else + BACKTITLE+=" (no IP)" + fi + if [ -n "${KEYMAP}" ]; then + BACKTITLE+=" (${KEYMAP})" + else + BACKTITLE+=" (us)" + fi + echo ${BACKTITLE} +} + +############################################################################### +# Shows available models to user choose one +function modelMenu() { + ITEMS="" + while read M; do + M="`basename ${M}`" + M="${M::-4}" + # Check id model is compatible with CPU + COMPATIBLE=1 + for F in `readModelArray "${M}" "flags"`; do + if ! grep -q "^flags.*${F}.*" /proc/cpuinfo; then + COMPATIBLE=0 + break + fi + done + [ ${COMPATIBLE} -eq 1 ] && ITEMS+="${M} " + done < <(find "${MODEL_CONFIG_PATH}" -maxdepth 1 -name \*.yml | sort) + dialog --clear --no-items \ + --backtitle "`backtitle`" \ + --menu "Choose the model" 0 0 0 \ + ${ITEMS} \ + 2>${TMP_PATH}/resp + [ $? -ne 0 ] && return + resp=$(<${TMP_PATH}/resp) + [ -z "${resp}" ] && return + # If user change model, clean buildnumber and S/N + if [ "${MODEL}" != "${resp}" ]; then + MODEL=${resp} + writeConfigKey "model" "${MODEL}" "${USER_CONFIG_FILE}" + BUILD="" + writeConfigKey "build" "${BUILD}" "${USER_CONFIG_FILE}" + SN="" + writeConfigKey "sn" "${SN}" "${USER_CONFIG_FILE}" + # Delete old files + rm -f "${MOD_ZIMAGE_FILE}" + rm -f "${MOD_RDGZ_FILE}" + DIRTY=1 + fi +} + +############################################################################### +# Shows available buildnumbers from a model to user choose one +function buildMenu() { + ITEMS="`readConfigEntriesArray "builds" "${MODEL_CONFIG_PATH}/${MODEL}.yml"`" + dialog --clear --no-items --backtitle "`backtitle`" \ + --menu "Choose a build number" 0 0 0 ${ITEMS} 2>${TMP_PATH}/resp + [ $? -ne 0 ] && return + resp=$(<${TMP_PATH}/resp) + [ -z "${resp}" ] && return + if [ "${BUILD}" != "${resp}" ]; then + BUILD=${resp} + writeConfigKey "build" "${BUILD}" "${USER_CONFIG_FILE}" + DIRTY=1 + # Remove old files + rm -f "${MOD_ZIMAGE_FILE}" + rm -f "${MOD_RDGZ_FILE}" + fi +} + +############################################################################### +# Shows menu to user type one or generate randomly +function serialMenu() { + while true; do + dialog --clear --backtitle "`backtitle`" \ + --menu "Choose a option" 0 0 0 \ + a "Generate a random serial number" \ + m "Enter a serial number" \ + 2>${TMP_PATH}/resp + [ $? -ne 0 ] && return + resp=$(<${TMP_PATH}/resp) + [ -z "${resp}" ] && return + if [ "${resp}" = "m" ]; then + while true; do + dialog --backtitle "`backtitle`" \ + --inputbox "Please enter a serial number " 0 0 "" \ + 2>${TMP_PATH}/resp + [ $? -ne 0 ] && return + SERIAL=`cat ${TMP_PATH}/resp` + if [ -z "${SERIAL}" ]; then + return + elif [ `validateSerial ${MODEL} ${SERIAL}` -eq 1 ]; then + break + fi + dialog --backtitle "`backtitle`" \ + --msgbox "Invalid serial, please type a right one" 0 0 + done + break + elif [ "${resp}" = "a" ]; then + SERIAL=`generateSerial "${MODEL}"` + break + fi + done + SN="${SERIAL}" + writeConfigKey "sn" "${SN}" "${USER_CONFIG_FILE}" +} + +############################################################################### +# Where the magic happens :D +function make() { + clear + PLATFORM="`readModelKey "${MODEL}" "platform"`" + KVER="`readModelKey "${MODEL}" "builds.${BUILD}.kver"`" + PAT_URL="`readModelKey "${MODEL}" "builds.${BUILD}.pat.url"`" + PAT_HASH="`readModelKey "${MODEL}" "builds.${BUILD}.pat.hash"`" + RAMDISK_HASH="`readModelKey "${MODEL}" "builds.${BUILD}.pat.ramdisk-hash"`" + ZIMAGE_HASH="`readModelKey "${MODEL}" "builds.${BUILD}.pat.zimage-hash"`" + + # Check if all addon exists + while IFS="=" read ADDON PARAM; do + [ -z "${KEY}" ] && continue + if ! checkAddonExist "${ADDON}" "${PLATFORM}" "${KVER}"; then + echo "Addon ${ADDON} not found!" + return 1 + fi + done < <(readConfigMap "addons" "${USER_CONFIG_FILE}") + + if [ ${RAMCACHE} -eq 0 ]; then + OUT_PATH="${CACHE_PATH}/dl" + echo "Cache in disk" + else + OUT_PATH="${TMP_PATH}/dl" + echo "Cache in ram" + fi + mkdir -p "${OUT_PATH}" + + UNTAR_PAT_PATH="${TMP_PATH}/pat" + PAT_FILE="${MODEL}-${BUILD}.pat" + PAT_PATH="${OUT_PATH}/${PAT_FILE}" + EXTRACTOR_PATH="${CACHE_PATH}/extractor" + EXTRACTOR_BIN="syno_extract_system_patch" + OLDPAT_URL="https://global.download.synology.com/download/DSM/release/7.0.1/42218/DSM_DS3622xs%2B_42218.pat" + OLDPAT_PATH="${OUT_PATH}/DS3622xs+-42218.pat" + + if [ -f "${PAT_PATH}" ]; then + echo "${PAT_FILE} cached." + else + echo "Downloading ${PAT_FILE}" + curl --insecure -L "${PAT_URL}" -o "${PAT_PATH}" --progress-bar + if [ $? -ne 0 ]; then + dialog --backtitle "`backtitle`" --title "Error downloading" --aspect 18 \ + --msgbox "Check internet or cache disk space" 0 0 + return 1 + fi + fi + + echo -n "Checking hash of ${PAT_FILE}: " + if [ "`sha256sum ${PAT_PATH} | awk '{print$1}'`" != "${PAT_HASH}" ]; then + dialog --backtitle "`backtitle`" --title "Error" --aspect 18 \ + --msgbox "Hash of pat not match, try again!" 0 0 + rm -f ${PAT_PATH} + return 1 + fi + echo "OK" + + rm -rf "${UNTAR_PAT_PATH}" + mkdir "${UNTAR_PAT_PATH}" + echo -n "Disassembling ${PAT_FILE}: " + + header="$(od -bcN2 ${PAT_PATH} | head -1 | awk '{print $3}')" + case ${header} in + 105) + echo "Uncompressed tar" + isencrypted="no" + ;; + 213) + echo "Compressed tar" + isencrypted="no" + ;; + 255) + echo "Encrypted" + isencrypted="yes" + ;; + *) + dialog --backtitle "`backtitle`" --title "Error" --aspect 18 \ + --msgbox "Could not determine if pat file is encrypted or not, maybe corrupted, try again!" \ + 0 0 + return 1 + ;; + esac + + if [ "${isencrypted}" = "yes" ]; then + # Check existance of extractor + if [ -f "${EXTRACTOR_PATH}/${EXTRACTOR_BIN}" ]; then + echo "Extractor cached." + else + # Extractor not exists, get it. + mkdir -p "${EXTRACTOR_PATH}" + # Check if old pat already downloaded + if [ ! -f "${OLDPAT_PATH}" ]; then + echo "Downloading old pat to extract synology .pat extractor..." + curl --insecure -L "${OLDPAT_URL}" \ + -o "${OLDPAT_PATH}" --progress-bar + if [ $? -ne 0 ]; then + dialog --backtitle "`backtitle`" --title "Error downloading" --aspect 18 \ + --msgbox "Check internet or cache disk space" 0 0 + return 1 + fi + fi + # Extract ramdisk from PAT + rm -rf "${RAMDISK_PATH}" + mkdir -p "${RAMDISK_PATH}" + tar -xf "${OLDPAT_PATH}" -C "${RAMDISK_PATH}" rd.gz >"${LOG_FILE}" 2>&1 + if [ $? -ne 0 ]; then + dialog --backtitle "`backtitle`" --title "Error extracting" --textbox "${LOG_FILE}" 0 0 + fi + + # Extract all files from rd.gz + (cd "${RAMDISK_PATH}"; xz -dc < rd.gz | cpio -idm) >/dev/null 2>&1 || true + # Copy only necessary files + for f in libcurl.so.4 libmbedcrypto.so.5 libmbedtls.so.13 libmbedx509.so.1 libmsgpackc.so.2 libsodium.so libsynocodesign-ng-virtual-junior-wins.so.7; do + cp "${RAMDISK_PATH}/usr/lib/${f}" "${EXTRACTOR_PATH}" + done + cp "${RAMDISK_PATH}/usr/syno/bin/scemd" "${EXTRACTOR_PATH}/${EXTRACTOR_BIN}" + rm -rf "${RAMDISK_PATH}" + fi + # Uses the extractor to untar pat file + echo "Extracting..." + LD_LIBRARY_PATH=${EXTRACTOR_PATH} "${EXTRACTOR_PATH}/${EXTRACTOR_BIN}" "${PAT_PATH}" "${UNTAR_PAT_PATH}" || true + else + echo "Extracting..." + tar -xf "${PAT_PATH}" -C "${UNTAR_PAT_PATH}" >"${LOG_FILE}" 2>&1 + if [ $? -ne 0 ]; then + dialog --backtitle "`backtitle`" --title "Error extracting" --textbox "${LOG_FILE}" 0 0 + fi + fi + + echo -n "Checking hash of zImage: " + HASH="`sha256sum ${UNTAR_PAT_PATH}/zImage | awk '{print$1}'`" + if [ "${HASH}" != "${ZIMAGE_HASH}" ]; then + sleep 1 + dialog --backtitle "`backtitle`" --title "Error" --aspect 18 \ + --msgbox "Hash of zImage not match, try again!" 0 0 + return 1 + fi + echo "OK" + writeConfigKey "zimage-hash" "${ZIMAGE_HASH}" "${USER_CONFIG_FILE}" + + echo -n "Checking hash of ramdisk: " + HASH="`sha256sum ${UNTAR_PAT_PATH}/rd.gz | awk '{print$1}'`" + if [ "${HASH}" != "${RAMDISK_HASH}" ]; then + sleep 1 + dialog --backtitle "`backtitle`" --title "Error" --aspect 18 \ + --msgbox "Hash of ramdisk not match, try again!" 0 0 + return 1 + fi + echo "OK" + writeConfigKey "ramdisk-hash" "${RAMDISK_HASH}" "${USER_CONFIG_FILE}" + + echo -n "Copying files: " + cp "${UNTAR_PAT_PATH}/grub_cksum.syno" "${BOOTLOADER_PATH}" + cp "${UNTAR_PAT_PATH}/GRUB_VER" "${BOOTLOADER_PATH}" + cp "${UNTAR_PAT_PATH}/grub_cksum.syno" "${SLPART_PATH}" + cp "${UNTAR_PAT_PATH}/GRUB_VER" "${SLPART_PATH}" + cp "${UNTAR_PAT_PATH}/zImage" "${ORI_ZIMAGE_FILE}" + cp "${UNTAR_PAT_PATH}/rd.gz" "${ORI_RDGZ_FILE}" + echo "OK" + + /opt/arpl/zimage-patch.sh + if [ $? -ne 0 ]; then + dialog --backtitle "`backtitle`" --title "Error" --aspect 18 \ + --msgbox "zImage not patched:\n`<"${LOG_FILE}"`" 0 0 + return 1 + fi + + /opt/arpl/ramdisk-patch.sh + if [ $? -ne 0 ]; then + dialog --backtitle "`backtitle`" --title "Error" --aspect 18 \ + --msgbox "Ramdisk not patched:\n`<"${LOG_FILE}"`" 0 0 + return 1 + fi + + echo "Cleaning" + rm -rf "${UNTAR_PAT_PATH}" + + echo "Ready!" + sleep 3 + DIRTY=0 + return 0 +} + +############################################################################### +# Detect hardware +function detectHw() { + PLATFORM="`readModelKey "${MODEL}" "platform"`" + KVER="`readModelKey "${MODEL}" "builds.${BUILD}.kver"`" + # Get modules not needed + unset NOTNEEDED + declare -A NOTNEEDED + while read M; do + NOTNEEDED[${M}]="1" + done < <(readModelArray "${MODEL}" "builds.${BUILD}.modules-notneeded") + unset DEVC DEVN + declare -A DEVC + declare -A DEVN + while read L; do + F=` sed -E 's/^([0-9a-z]{2}:[0-9a-z]{2}.[0-9a-z]{1})[^\[]*\[([0-9a-z]{4})\]: (.*)/\1|\2|\3/' <<<"${L}"` + PCI="`cut -d'|' -f1 <<<"${F}"`" + CLASS="`cut -d'|' -f2 <<<"${F}"`" + NAME="`cut -d'|' -f3 <<<"${F}"`" + MODULE="`lspci -ks "${PCI}" | awk '/Kernel driver in use/{print$5}'`" + [ -z "${MODULE}" ] && continue + # If is a virtio module, change id + if grep -q "virtio" <<<"$MODULE"; then + MODULE="virtio" + fi + CLASS=${CLASSES[${CLASS}]} # Get class name of module + [ -z "${CLASS}" ] && continue # If no class, skip + arrayExistItem "${MODULE}" "${!ADDONS[@]}" && continue # Check if module already added + [ -n "${NOTNEEDED[${MODULE}]}" ] && continue # Check if module is not necessary + # Add module to list + DEVC[${MODULE}]="${CLASS}" + DEVN[${MODULE}]="${NAME}" + done < <(lspci -nn) + if [ ${#DEVC[@]} -eq 0 ]; then + dialog --backtitle "`backtitle`" --aspect 18 \ + --msgbox "No device detected or already added!" 0 0 + return + fi + for MODULE in ${!DEVC[@]}; do + CLASS="${DEVC[${MODULE}]}" + NAME="${DEVN[${MODULE}]}" + TEXT="Found a ${NAME}\nClass ${CLASS}\nModule ${MODULE}\nAccept?" + checkAddonExist "${MODULE}" "${PLATFORM}" "${KVER}" || TEXT+="\n\n\Z1PS: Addon for this module not found\Zn" + dialog --backtitle "`backtitle`" --title "Found Hardware" \ + --colors --yesno "${TEXT}" 12 70 + [ $? -ne 0 ] && continue + dialog --backtitle "`backtitle`" --title "params" \ + --inputbox "Type a opcional params to module" 0 0 \ + 2>${TMP_PATH}/resp + [ $? -ne 0 ] && continue + VALUE="`<${TMP_PATH}/resp`" + ADDONS["${MODULE}"]="${VALUE}" + writeConfigKey "addons.${MODULE}" "${VALUE}" "${USER_CONFIG_FILE}" + DIRTY=1 + done +} + +############################################################################### +# Manage addons/drivers +function addonMenu() { + # Read 'platform' and kernel version to check if addon exists + PLATFORM="`readModelKey "${MODEL}" "platform"`" + KVER="`readModelKey "${MODEL}" "builds.${BUILD}.kver"`" + # Read addons from user config + unset ADDONS + declare -A ADDONS + while IFS="=" read KEY VALUE; do + [ -n "${KEY}" ] && ADDONS["${KEY}"]="${VALUE}" + done < <(readConfigMap "addons" "${USER_CONFIG_FILE}") + NEXT="h" + # Loop menu + while true; do + dialog --backtitle "`backtitle`" --default-item ${NEXT} \ + --menu "Choose a option" 0 0 0 \ + h "Detect hardware" \ + a "Add an addon" \ + d "Delete addon(s)" \ + s "Show user addons" \ + m "Show all available addons" \ + e "Exit" \ + 2>${TMP_PATH}/resp + [ $? -ne 0 ] && return + case "`<${TMP_PATH}/resp`" in + h) + detectHw + NEXT='e' + ;; + a) NEXT='a' + rm "${TMP_PATH}/menu" + while read ADDON DESC; do + arrayExistItem "${ADDON}" "${!ADDONS[@]}" && continue # Check if addon has already been added + echo "${ADDON} \"${DESC}\"" >> "${TMP_PATH}/menu" + done < <(availableAddons "${PLATFORM}" "${KVER}") + if [ ! -f "${TMP_PATH}/menu" ] ; then + dialog --backtitle "`backtitle`" --msgbox "No available addons to add" 0 0 + continue + fi + dialog --backtitle "`backtitle`" --menu "Select an addon" 0 0 0 \ + --file "${TMP_PATH}/menu" 2>"${TMP_PATH}/resp" + [ $? -ne 0 ] && continue + ADDON="`<"${TMP_PATH}/resp"`" + [ -z "${ADDON}" ] && continue + dialog --backtitle "`backtitle`" --title "params" \ + --inputbox "Type a opcional params to addon" 0 0 \ + 2>${TMP_PATH}/resp + [ $? -ne 0 ] && continue + ADDONS[${ADDON}]="`<"${TMP_PATH}/resp"`" + writeConfigKey "addons.${ADDON}" "${VALUE}" "${USER_CONFIG_FILE}" + DIRTY=1 + ;; + d) NEXT='d' + if [ ${#ADDONS[@]} -eq 0 ]; then + dialog --backtitle "`backtitle`" --msgbox "No user addons to remove" 0 0 + continue + fi + ITEMS="" + for I in "${!ADDONS[@]}"; do + ITEMS+="${I} ${I} off " + done + dialog --backtitle "`backtitle`" --no-tags \ + --checklist "Select addon to remove" 0 0 0 ${ITEMS} \ + 2>"${TMP_PATH}/resp" + [ $? -ne 0 ] && continue + ADDON="`<"${TMP_PATH}/resp"`" + [ -z "${ADDON}" ] && continue + for I in ${ADDON}; do + unset ADDONS[${I}] + deleteConfigKey "addons.${NAME}" "${USER_CONFIG_FILE}" + done + DIRTY=1 + ;; + s) NEXT='s' + ITEMS="" + for KEY in ${!ADDONS[@]}; do + ITEMS+="${KEY}: ${ADDONS[$KEY]}\n" + done + dialog --backtitle "`backtitle`" --title "User addons" \ + --msgbox "${ITEMS}" 0 0 + ;; + m) NEXT='m' + MSG="" + while read MODULE DESC; do + if arrayExistItem "${MODULE}" "${!ADDONS[@]}"; then + MSG+="\Z4${MODULE}\Zn" + else + MSG+="${MODULE}" + fi + MSG+=": \Z5${DESC}\Zn\n" + done < <(availableAddons "${PLATFORM}" "${KVER}") + dialog --backtitle "`backtitle`" --title "Available addons" \ + --colors --msgbox "${MSG}" 0 0 + ;; + e) return ;; + esac + done +} + +############################################################################### +function cmdlineMenu() { + # Read from user config + DT="`readModelKey "${MODEL}" "dt"`" + unset CMDLINE + declare -A CMDLINE + while IFS="=" read KEY VALUE; do + [ -n "${KEY}" ] && CMDLINE["${KEY}"]="${VALUE}" + done < <(readConfigMap "cmdline" "${USER_CONFIG_FILE}") + # Loop menu + while true; do + + echo "a \"Add/edit an cmdline item\"" > "${TMP_PATH}/menu" + echo "d \"Delete cmdline item(s)\"" >> "${TMP_PATH}/menu" + echo "s \"Show user cmdline\"" >> "${TMP_PATH}/menu" + echo "m \"Show model/build cmdline\"" >> "${TMP_PATH}/menu" + if [ "${DT}" != "true" ]; then + echo "h \"Change maxdisks\"" >> "${TMP_PATH}/menu" + echo "u \"Show SATA(s) # ports and drives\"" >> "${TMP_PATH}/menu" + fi + echo "e \"Exit\"" >> "${TMP_PATH}/menu" + + dialog --backtitle "`backtitle`" --menu "Choose a option" 0 0 0 \ + --file "${TMP_PATH}/menu" 2>${TMP_PATH}/resp + [ $? -ne 0 ] && return + case "`<${TMP_PATH}/resp`" in + a) + dialog --backtitle "`backtitle`" --title "User cmdline" \ + --inputbox "Type a name of cmdline" 0 0 \ + 2>${TMP_PATH}/resp + [ $? -ne 0 ] && continue + NAME="`sed 's/://g' <"${TMP_PATH}/resp"`" + [ -z "${NAME}" ] continue + dialog --backtitle "`backtitle`" --title "User cmdline" \ + --inputbox "Type a value of '${NAME}' cmdline" 0 0 "${CMDLINE[${NAME}]}" \ + 2>${TMP_PATH}/resp + [ $? -ne 0 ] && continue + VALUE="`<"${TMP_PATH}/resp"`" + CMDLINE[${NAME}]="${VALUE}" + writeConfigKey "cmdline.${NAME}" "${VALUE}" "${USER_CONFIG_FILE}" + ;; + d) + if [ ${#CMDLINE[@]} -eq 0 ]; then + dialog --backtitle "`backtitle`" --msgbox "No user cmdline to remove" 0 0 + continue + fi + ITEMS="" + for I in "${!CMDLINE[@]}"; do + ITEMS+="${I} ${CMDLINE[${I}]} off " + done + dialog --backtitle "`backtitle`" \ + --checklist "Select cmdline to remove" 0 0 0 ${ITEMS} \ + 2>"${TMP_PATH}/resp" + [ $? -ne 0 ] && continue + RESP=`<"${TMP_PATH}/resp"` + [ -z "${RESP}" ] && continue + for I in ${RESP}; do + unset CMDLINE[${I}] + deleteConfigKey "cmdline.${I}" "${USER_CONFIG_FILE}" + done + ;; + s) + ITEMS="" + for KEY in ${!CMDLINE[@]}; do + ITEMS+="${KEY}: ${CMDLINE[$KEY]}\n" + done + dialog --backtitle "`backtitle`" --title "User cmdline" \ + --aspect 18 --msgbox "${ITEMS}" 0 0 + ;; + m) + ITEMS="" + while IFS="=" read KEY VALUE; do + ITEMS+="${KEY}: ${VALUE}\n" + done < <(readModelMap "${MODEL}" "builds.${BUILD}.cmdline") + dialog --backtitle "`backtitle`" --title "Model/build cmdline" \ + --aspect 18 --msgbox "${ITEMS}" 0 0 + ;; + h) MODEL_DISKS="`readModelKey "${MODEL}" "disks"`" + dialog --backtitle "`backtitle`" --title "Change max of disks" \ + --inputbox "${MODEL} disks: ${MODEL_DISKS}\nType the desired number of disks (1-26)" 0 0 \ + 2>${TMP_PATH}/resp + [ $? -ne 0 ] && continue + VALUE="`<"${TMP_PATH}/resp"`" + [ -z "${VALUE}" ] && continue + if [ ${VALUE} -ge 1 -a ${VALUE} -le 26 ]; then + CMDLINE['maxdisks']="${VALUE}" + writeConfigKey "cmdline.maxdisks" "${VALUE}" "${USER_CONFIG_FILE}" + INTPORTCFG="" + for I in `seq 1 ${VALUE}`; do INTPORTCFG+="1"; done + INTPORTCFG=`printf "%x" "$((2#${INTPORTCFG}))"` + CMDLINE['internalportcfg']="${INTPORTCFG}" + writeConfigKey "cmdline.internalportcfg" "${INTPORTCFG}" "${USER_CONFIG_FILE}" + else + dialog --backtitle "`backtitle`" --msgbox "Invalid number" 0 0 + fi + ;; + u) TEXT="" + for PCI in `lspci -d ::106 | awk '{print$1}'`; do + NAME=`lspci -s "${PCI}" | sed "s/\ .*://"` + TEXT+="\Zb${NAME}\Zn\nPorts: " + unset HOSTPORTS + declare -A HOSTPORTS + while read LINE; do + ATAPORT="`echo ${LINE} | grep -o 'ata[0-9]*'`" + PORT=`echo ${ATAPORT} | sed 's/ata//'` + HOSTPORTS[${PORT}]=`echo ${LINE} | grep -o 'host[0-9]*$'` + done < <(ls -l /sys/class/scsi_host | fgrep "${PCI}") + while read PORT; do + ls -l /sys/block | fgrep -q "${PCI}/ata${PORT}" && ATTACH=1 || ATTACH=0 + [ `cat /sys/class/scsi_host/${HOSTPORTS[${PORT}]}/ahci_port_cmd` -eq 0 ] && DUMMY=1 || DUMMY=0 + [ ${ATTACH} -eq 1 ] && TEXT+="\Z2\Zb" + [ ${DUMMY} -eq 1 ] && TEXT+="\Z1" + TEXT+="${PORT}\Zn " + done < <(echo ${!HOSTPORTS[@]} | tr ' ' '\n' | sort -n) + TEXT+="\n" + done + TEXT+="\nPorts with color \Z1red\Zn as DUMMY, color \Z2\Zbgreen\Zn has drive connected\n" + TEXT+="Use this information to mount SataPortMap, DiskIdxMap and sata_remap" + dialog --backtitle "`backtitle`" --colors --aspect 18 \ + --msgbox "${TEXT}" 0 0 + ;; + e) return ;; + esac + done +} + +############################################################################### +function synoinfoMenu() { + # Read synoinfo from user config + unset SYNOINFO + declare -A SYNOINFO + while IFS="=" read KEY VALUE; do + [ -n "${KEY}" ] && SYNOINFO["${KEY}"]="${VALUE}" + done < <(readConfigMap "synoinfo" "${USER_CONFIG_FILE}") + # menu loop + while true; do + dialog --backtitle "`backtitle`" \ + --menu "Choose a option" 0 0 0 \ + a "Add/edit an synoinfo item" \ + d "Delete synoinfo item(s)" \ + s "Show user synoinfo" \ + m "Show model/build synoinfo" \ + e "Exit" \ + 2>${TMP_PATH}/resp + [ $? -ne 0 ] && return + case "`<${TMP_PATH}/resp`" in + a) + dialog --backtitle "`backtitle`" --title "User synoinfo" \ + --inputbox "Type a name of synoinfo variable" 0 0 \ + 2>${TMP_PATH}/resp + [ $? -ne 0 ] && continue + NAME="`sed 's/://g' <"${TMP_PATH}/resp"`" + dialog --backtitle "`backtitle`" --title "User synoinfo" \ + --inputbox "Type a value of '${NAME}' variable" 0 0 "${SYNOINFO[${NAME}]}" \ + 2>${TMP_PATH}/resp + [ $? -ne 0 ] && continue + VALUE="`<"${TMP_PATH}/resp"`" + SYNOINFO[${NAME}]="${VALUE}" + writeConfigKey "synoinfo.${NAME}" "${VALUE}" "${USER_CONFIG_FILE}" + DIRTY=1 + ;; + d) + if [ ${#SYNOINFO[@]} -eq 0 ]; then + dialog --backtitle "`backtitle`" --msgbox "No user synoinfo to remove" 0 0 + continue + fi + ITEMS="" + for I in "${!SYNOINFO[@]}"; do + ITEMS+="${I} ${SYNOINFO[${I}]} off " + done + dialog --backtitle "`backtitle`" \ + --checklist "Select synoinfo to remove" 0 0 0 ${ITEMS} \ + 2>"${TMP_PATH}/resp" + [ $? -ne 0 ] && continue + RESP=`<"${TMP_PATH}/resp"` + [ -z "${RESP}" ] && continue + for I in ${RESP}; do + unset SYNOINFO[${I}] + deleteConfigKey "synoinfo.${I}" "${USER_CONFIG_FILE}" + done + DIRTY=1 + ;; + s) + ITEMS="" + for KEY in ${!SYNOINFO[@]}; do + ITEMS+="${KEY}: ${SYNOINFO[$KEY]}\n" + done + dialog --backtitle "`backtitle`" --title "User synoinfo" \ + --aspect 18 --msgbox "${ITEMS}" 0 0 + ;; + m) + ITEMS="" + while IFS="=" read KEY VALUE; do + ITEMS+="${KEY}: ${VALUE}\n" + done < <(readModelMap "${MODEL}" "builds.${BUILD}.synoinfo") + dialog --backtitle "`backtitle`" --title "Model/build synoinfo" \ + --aspect 18 --msgbox "${ITEMS}" 0 0 + ;; + e) return ;; + esac + done +} + +############################################################################### +# Calls boot.sh to boot into DSM kernel/ramdisk +function boot() { + [ ${DIRTY} -eq 1 ] && dialog --backtitle "`backtitle`" --title "Alert" \ + --yesno "Config changed, would you like to rebuild the loader?" 0 0 + if [ $? -eq 0 ]; then + make || return + fi + boot.sh +} + +############################################################################### +# Permits user edit the user config +function editUserConfig() { + while true; do + dialog --backtitle "`backtitle`" --title "Edit with caution" \ + --editbox "${USER_CONFIG_FILE}" 0 0 2>"${TMP_PATH}/userconfig" + [ $? -ne 0 ] && return + mv "${TMP_PATH}/userconfig" "${USER_CONFIG_FILE}" + ERRORS=`yq eval "${USER_CONFIG_FILE}" 2>&1` + [ $? -eq 0 ] && break + dialog --backtitle "`backtitle`" --title "Invalid YAML format" --msgbox "${ERRORS}" 0 0 + done + OLDMODEL=${MODEL} + OLDBUILD=${BUILD} + MODEL="`readConfigKey "model" "${USER_CONFIG_FILE}"`" + BUILD="`readConfigKey "build" "${USER_CONFIG_FILE}"`" + SN="`readConfigKey "sn" "${USER_CONFIG_FILE}"`" + + if [ "${MODEL}" != "${OLDMODEL}" -o "${BUILD}" != "${OLDBUILD}" ]; then + # Remove old files + rm -f "${MOD_ZIMAGE_FILE}" + rm -f "${MOD_RDGZ_FILE}" + fi + DIRTY=1 +} + +############################################################################### +# Shows available keymaps to user choose one +function keymapMenu() { + OPTIONS="" + while read KM; do + OPTIONS+="${KM::-7} " + done < <(cd /usr/share/keymaps/i386/qwerty; ls *.map.gz) + dialog --backtitle "`backtitle`" --no-items \ + --menu "Choice a keymap" 0 0 0 ${OPTIONS} \ + 2>/tmp/resp + [ $? -ne 0 ] && return + resp=`cat /tmp/resp 2>/dev/null` + [ -z "${resp}" ] && return + KEYMAP=${resp} + writeConfigKey "keymap" "${KEYMAP}" "${USER_CONFIG_FILE}" + zcat /usr/share/keymaps/i386/qwerty/${KEYMAP}.map.gz | loadkeys +} + +############################################################################### +############################################################################### + +# Main loop +NEXT="m" +while true; do + echo "m \"Choose a model\"" > "${TMP_PATH}/menu" + if [ -n "${MODEL}" ]; then + echo "n \"Choose a Build Number\"" >> "${TMP_PATH}/menu" + echo "s \"Choose a serial number\"" >> "${TMP_PATH}/menu" + if [ -n "${BUILD}" ]; then + echo "a \"Addons/drivers\"" >> "${TMP_PATH}/menu" + echo "x \"Cmdline menu\"" >> "${TMP_PATH}/menu" + echo "i \"Synoinfo menu\"" >> "${TMP_PATH}/menu" + echo "l \"Switch LKM version: \Z4${LKM}\Zn\"" >> "${TMP_PATH}/menu" + echo "d \"Build the loader\"" >> "${TMP_PATH}/menu" + fi + fi + loaderIsConfigured && echo "b \"Boot the loader\" " >> "${TMP_PATH}/menu" + echo "u \"Edit user config file manually\"" >> "${TMP_PATH}/menu" + echo "k \"Choose a keymap\" " >> "${TMP_PATH}/menu" + [ ${RAMCACHE} -eq 0 -a -d "${CACHE_PATH}/dl" ] && echo "c \"Clean disk cache\"" >> "${TMP_PATH}/menu" + echo "e \"Exit\"" >> "${TMP_PATH}/menu" + dialog --clear --default-item ${NEXT} --backtitle "`backtitle`" --colors \ + --menu "Choose the option" 0 0 0 --file "${TMP_PATH}/menu" \ + 2>${TMP_PATH}/resp + [ $? -ne 0 ] && break + case `<"${TMP_PATH}/resp"` in + m) modelMenu; NEXT="n" ;; + n) buildMenu; NEXT="s" ;; + s) serialMenu; NEXT="a" ;; + a) addonMenu; NEXT="x" ;; + x) cmdlineMenu; NEXT="i" ;; + i) synoinfoMenu; NEXT="l" ;; + l) [ "${LKM}" = "dev" ] && LKM='prod' || LKM='dev' + writeConfigKey "lkm" "${LKM}" "${USER_CONFIG_FILE}" + NEXT="d" + ;; + d) make; NEXT="b" ;; + b) boot ;; + u) editUserConfig; NEXT="u" ;; + k) keymapMenu ;; + c) dialog --backtitle "`backtitle`" --title "Cleaning" --aspect 18 \ + --prgbox "rm -rfv \"${CACHE_PATH}/dl\"" 0 0 ;; + e) break ;; + esac +done +clear +echo -e "Call \033[1;32mmenu.sh\033[0m to return to menu" diff --git a/files/board/arpl/overlayfs/opt/arpl/model-configs/DS1621+.yml b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS1621+.yml new file mode 100644 index 00000000..187d9d14 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS1621+.yml @@ -0,0 +1,82 @@ +id: "DS1621+" +modules-notneeded: &modules-notneeded + - ahci + - i40e + - ixgbe + - igb + - e1000e + - dca + - etxhci_hcd + - xhci_hcd + - ehci-pci + - uhci_hcd +synoinfo: &synoinfo + rss_server: "http://example.com/null.xml" + rss_server_ssl: "https://example.com/null.xml" + rss_server_v2: "https://example.com/autoupdate/v2/getList" + update_server: "http://example.com/" + update_server_ssl: "https://example.com/" + small_info_path: "https://example.com/null" + updateurl: "http://example.com/" + myds_region_api_base_url: "https://example.com" + security_version_server: "https://example.com/smallupdate" +cmdline: &cmdline + SMBusHddDynamicPower: 1 + syno_hdd_powerup_seq: 0 + HddHotplug: 0 + vender_format_version: 2 + syno_port_thaw: 1 + syno_hdd_detect: 0 + synoboot2: + syno_ttyS0: "serial,0x3f8" + syno_ttyS1: "serial,0x2f8" +platform: "v1000" +serial: + prefix: + - "2080" + middle: "S7R" + suffix: "alpha" +disks: 6 +dt: true +builds: + 42218: + ver: "7.0.1" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.0.1/42218/DSM_DS1621%2B_42218.pat" + hash: "19f56827ba8bf0397d42cd1d6f83c447f092c2c1bbb70d8a2ad3fbd427e866df" + ramdisk-hash: "73512c7bceb34cf7f7f93c2703db60496da0e27274fc45e5aefa0366c9734d6e" + zimage-hash: "f4648d0dd6b29ef6149b0ff46afe1fe32f81730aa79af72f37ffd3647c76f586" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-common-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + + 42661: + ver: "7.1.0" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.1/42661-1/DSM_DS1621%2B_42661.pat" + hash: "381077302a89398a9fb5ec516217578d6f33b0219fe95135e80fd93cddbf88c4" + ramdisk-hash: "8fd5eb40fb088af97d3beee85b6275c2ceb368b08453eb5a5d00d42cc7d578d1" + zimage-hash: "d939b5937be00a644aae64c33633619a7c310433e60a515c77bbef00b0a7e6b6" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-42661-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" diff --git a/files/board/arpl/overlayfs/opt/arpl/model-configs/DS2422+.yml b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS2422+.yml new file mode 100644 index 00000000..d7f96c1e --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS2422+.yml @@ -0,0 +1,60 @@ +id: "DS2422+" +modules-notneeded: &modules-notneeded + - ahci + - i40e + - ixgbe + - igb + - e1000e + - dca + - etxhci_hcd + - xhci_hcd + - ehci-pci + - uhci_hcd +synoinfo: &synoinfo + rss_server: "http://example.com/null.xml" + rss_server_ssl: "https://example.com/null.xml" + rss_server_v2: "https://example.com/autoupdate/v2/getList" + update_server: "http://example.com/" + update_server_ssl: "https://example.com/" + small_info_path: "https://example.com/null" + updateurl: "http://example.com/" + myds_region_api_base_url: "https://example.com" + security_version_server: "https://example.com/smallupdate" +cmdline: &cmdline + SMBusHddDynamicPower: 1 + vender_format_version: 2 + syno_port_thaw: 1 + syno_hdd_detect: 0 + synoboot2: + syno_ttyS0: "serial,0x3f8" + syno_ttyS1: "serial,0x2f8" +platform: "v1000" +serial: + prefix: + - "0000" + middle: "XXX" + suffix: "numeric" +disks: 12 +dt: true +builds: + 42218: + ver: "7.0.1" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.0.1/42218/DSM_DS2422%2B_42218.pat" + hash: "415c54934d483a2557500bc3a2e74588a0cec1266e1f0d9a82a7d3aace002471" + ramdisk-hash: "2b5b8dd90b2e6020ffccc2719d8bc16d9935421754a8c088d6b31dbca4e4ff7b" + zimage-hash: "38281a90036fffcb41cd17f05a6c7e9a1d5740a78c135980fb0c3a6d0ca1485f" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-common-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-common-disable-disabled-ports.patch" diff --git a/files/board/arpl/overlayfs/opt/arpl/model-configs/DS3615xs.yml b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS3615xs.yml new file mode 100644 index 00000000..00232ef8 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS3615xs.yml @@ -0,0 +1,81 @@ +id: "DS3615xs" +modules-notneeded: &modules-notneeded + - ahci + - i40e + - ixgbe + - igb + - e1000e + - dca + - etxhci_hcd + - xhci_hcd + - ehci-pci + - uhci_hcd +synoinfo: &synoinfo + esataportcfg: "0x0" + usbportcfg: "0x8700" + rss_server: "http://example.com/null.xml" + rss_server_ssl: "https://example.com/null.xml" + small_info_path: "https://example.com/null" + updateurl: "http://example.com/" + myds_region_api_base_url: "https://example.com" +cmdline: &cmdline + syno_hdd_powerup_seq: 0 + HddHotplug: 0 + vender_format_version: 2 + syno_port_thaw: 1 + syno_hdd_detect: 0 + +platform: "bromolow" +serial: + prefix: + - "1130" + - "1230" + - "1330" + - "1430" + middle: "LWN" + suffix: "numeric" +disks: 12 +builds: + 42218: + ver: "7.0.1" + kver: "3.10.108" + rd-compressed: false + efi-bug: yes + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.0.1/42218/DSM_DS3615xs_42218.pat" + hash: "dddd26891815ddca02d0d53c1d42e8b39058b398a4cc7b49b80c99f851cf0ef7" + ramdisk-hash: "4c90c3c7ee25b5fcc651552e80a9364d22823c863c834c5f43e3344a3a68af78" + zimage-hash: "d29b695612710376734cb5c5b5ae4f2d8afc49ffd640387e1c86010f6c7d2c8a" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-common-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-common-disable-disabled-ports.patch" + + 42661: + ver: "7.1.0" + kver: "3.10.108" + rd-compressed: false + efi-bug: yes + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.1/42661-1/DSM_DS3615xs_42661.pat" + hash: "1e95d8c63981bcf42ea2eaedfbc7acc4248ff16d129344453b7479953f9ad145" + ramdisk-hash: "8ee5df65bcfd25c3d1999262153dcff625714d98789bc8065e217773f8d070d8" + zimage-hash: "3017542c92232cb5477b0e11d82d708a9909320350b1740aab58359e85f82351" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-42661-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-42661-disable-disabled-ports.patch" diff --git a/files/board/arpl/overlayfs/opt/arpl/model-configs/DS3617xs.yml b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS3617xs.yml new file mode 100644 index 00000000..bd37acd2 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS3617xs.yml @@ -0,0 +1,86 @@ +id: "DS3617xs" +modules-notneeded: &modules-notneeded + - ahci + - i40e + - ixgbe + - igb + - e1000e + - dca + - xhci_pci + - xhci_hcd + - uhci_hcd +synoinfo: &synoinfo +# maxdisks: "15" +# internalportcfg: "0x78FF" + esataportcfg: "0x00" + usbportcfg: "0x8700" + rss_server: "http://example.com/null.xml" + rss_server_v2: "https://example.com/autoupdate/v2/getList" + rss_server_ssl: "https://example.com/null.xml" + update_server: "http://example.com/" + update_server_ssl: "https://example.com/" + small_info_path: "https://example.com/null" + updateurl: "http://example.com/" + myds_region_api_base_url: "https://example.com" + security_version_server: "https://example.com/smallupdate" +cmdline: &cmdline + syno_hdd_powerup_seq: 0 + HddHotplug: 0 + vender_format_version: 2 + syno_port_thaw: 1 + syno_hdd_detect: 0 + +platform: "broadwell" +serial: + prefix: + - "1130" + - "1230" + - "1330" + - "1430" + middle: "ODN" + suffix: "numeric" +disks: 12 +builds: + 42218: + ver: "7.0.1" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.0.1/42218/DSM_DS3617xs_42218.pat" + hash: "d65ee4ed5971e38f6cdab00e1548183435b53ba49a5dca7eaed6f56be939dcd2" + ramdisk-hash: "1b2e86fbf4006f6aa40dcd674ad449feed8b0b8317a71e2bb8bb986a74e08c57" + zimage-hash: "28a75e0b680517d39374260eb981b8ca9ace8810b121a30b8036fa09cfcb77fc" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-common-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-common-disable-disabled-ports.patch" + + 42661: + ver: "7.1.0" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.1/42661-1/DSM_DS3617xs_42661.pat" + hash: "0a5a243109098587569ab4153923f30025419740fb07d0ea856b06917247ab5c" + ramdisk-hash: "da3c2a170fea24052d817cbc4bb5b610a5b05288758d746b60a294ed614239fb" + zimage-hash: "9598f66b75e5b303e571241696e02fe7c0add80f13564f8c6b8c0daaf3cb3018" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-42661-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-42661-disable-disabled-ports.patch" diff --git a/files/board/arpl/overlayfs/opt/arpl/model-configs/DS3622xs+.yml b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS3622xs+.yml new file mode 100644 index 00000000..707abed6 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS3622xs+.yml @@ -0,0 +1,91 @@ +id: "DS3622xs+" +modules-notneeded: &modules-notneeded + - ahci + - i40e + - ixgbe + - igb + - e1000e + - r8168 + - r8169 + - dca + - etxhci_hcd + - xhci_hcd + - ehci-pci + - uhci_hcd +synoinfo: &synoinfo + esataportcfg: "0x00" + support_bde_internal_10g: "no" + support_disk_compatibility : "no" + support_memory_compatibility : "no" + supportraidgroup: "no" + supportssdcache: "no" + rss_server: "http://example.com/null.xml" + rss_server_ssl: "https://example.com/null.xml" + rss_server_v2: "https://example.com/autoupdate/v2/getList" + update_server: "http://example.com/" + update_server_ssl: "https://example.com/" + small_info_path: "https://example.com/null" + updateurl: "http://example.com/" + myds_region_api_base_url: "https://example.com" + security_version_server: "https://example.com/smallupdate" +cmdline: &cmdline + syno_hdd_powerup_seq: 0 + HddHotplug: 0 + vender_format_version: 2 + syno_port_thaw: 1 + syno_hdd_detect: 0 + +platform: "broadwellnk" +serial: + prefix: + - "2030" + - "2040" + - "20C0" + - "2150" + middle: "SQR" + suffix: "alpha" +disks: 12 +builds: + 42218: + ver: "7.0.1" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.0.1/42218/DSM_DS3622xs%2B_42218.pat" + hash: "f38329b8cdc5824a8f01fb1e377d3b1b6bd23da365142a01e2158beff5b8a424" + ramdisk-hash: "a95d4ab06189460f3b3d13a33e421887b5f3ea09a10535ae0d4c92beb7ff631d" + zimage-hash: "06964b68e5ccdedd4363dff3986f99686d3c9cb5225e8e4c3d840a1d9cd1330b" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-common-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-common-disable-disabled-ports.patch" + + 42661: + ver: "7.1.0" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.1/42661-1/DSM_DS3622xs%2B_42661.pat" + hash: "53d0a4f1667288b6e890c4fdc48422557ff26ea8a2caede0955c5f45b560cccd" + ramdisk-hash: "df8a055d6bc901229f0ba53ed5b4fe024bdf9a1b42f0c32483adefcdac14db4d" + zimage-hash: "e073dd84054f652811e0ae1932af2c7cdbd5fb6e5f18f265097072b8af4605e8" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-42661-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-42661-disable-disabled-ports.patch" diff --git a/files/board/arpl/overlayfs/opt/arpl/model-configs/DS918+.yml b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS918+.yml new file mode 100644 index 00000000..24ad5622 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS918+.yml @@ -0,0 +1,89 @@ +id: "DS918+" +modules-notneeded: &modules-notneeded + - ahci + - i40e + - ixgbe + - igb + - e1000e + - dca + - etxhci_hcd + - xhci_hcd + - ehci-pci + - uhci_hcd +synoinfo: &synoinfo + esataportcfg: "0x00" + support_led_brightness_adjustment: "" + support_leds_lp3943: "" + buzzeroffen: "0xffff" + rss_server: "http://example.com/null.xml" + rss_server_ssl: "https://example.com/null.xml" + rss_server_v2: "https://example.com/autoupdate/v2/getList" + update_server: "http://example.com/" + update_server_ssl: "https://example.com/" + small_info_path: "https://example.com/null" + updateurl: "http://example.com/" + myds_region_api_base_url: "https://example.com" + security_version_server: "https://example.com/smallupdate" +cmdline: &cmdline + syno_hdd_powerup_seq: 0 + HddHotplug: 0 + vender_format_version: 2 + syno_port_thaw: 1 + syno_hdd_detect: 0 + +platform: "apollolake" +serial: + prefix: + - "1780" + - "1790" + - "1860" + - "1980" + middle: "PDN" + suffix: "numeric" +disks: 4 +flags: + - "fma" +builds: + 42218: + ver: "7.0.1" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.0.1/42218/DSM_DS918%2B_42218.pat" + hash: "a403809ab2cd476c944fdfa18cae2c2833e4af36230fa63f0cdee31a92bebba2" + ramdisk-hash: "4b7a7a271a3b2158d9193a4f0e75c59590949ad7b4e26d546f46cc2ee8504d51" + zimage-hash: "338ba514066da01d0c1f770418916b9b96f5355d88a7b55b398d2726db591fdb" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-common-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-common-disable-disabled-ports.patch" + + 42661: + ver: "7.1.0" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.1/42661-1/DSM_DS918%2B_42661.pat" + hash: "4e8a9d82a8a1fde5af9a934391080b7bf6b91811d9583acb73b90fb6577e22d7" + ramdisk-hash: "ef8c87b6e68226339e5623d048252f5be3089c0831e41298a4695f2bfa65f00e" + zimage-hash: "9ce0d3452f08afaf95d52292ff20cbac6d69e17d5b9953377e4ac90c9ac7397d" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-42661-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-42661-disable-disabled-ports.patch" diff --git a/files/board/arpl/overlayfs/opt/arpl/model-configs/DS920+.yml b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS920+.yml new file mode 100644 index 00000000..10299598 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/model-configs/DS920+.yml @@ -0,0 +1,81 @@ +id: "DS920+" +modules-notneeded: &modules-notneeded + - ahci + - i40e + - ixgbe + - igb + - e1000e + - dca + - etxhci_hcd + - xhci_hcd + - ehci-pci + - uhci_hcd +synoinfo: &synoinfo + rss_server: "http://example.com/null.xml" + rss_server_ssl: "https://example.com/null.xml" + rss_server_v2: "https://example.com/autoupdate/v2/getList" + update_server: "http://example.com/" + update_server_ssl: "https://example.com/" + small_info_path: "https://example.com/null" + updateurl: "http://example.com/" + myds_region_api_base_url: "https://example.com" + security_version_server: "https://example.com/smallupdate" +cmdline: &cmdline + intel_iommu: "igfx_off" + HddEnableDynamicPower: 1 + synoboot2: + syno_ttyS0: "serial,0x3f8" + syno_ttyS1: "serial,0x2f8" + vender_format_version: 2 +platform: "geminilake" +serial: + prefix: + - "2030" + - "2040" + - "20C0" + - "2150" + middle: "SBR" + suffix: "alpha" +disks: 4 +builds: + 42218: + ver: "7.0.1" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.0.1/42218/DSM_DS920%2B_42218.pat" + hash: "73053911bd118b432d5a2036dc62d518eed83b78b32c1eb23696d59725a14892" + ramdisk-hash: "e39890f4bef2e5e4eea956996b0dd92d081c6d9e7c393331131e65bbad1a17a9" + zimage-hash: "74d513aaa3e30d8aa4f80e202d94a68a552e9c0472f8470e133ad29080556f55" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-common-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + + 42661: + ver: "7.1.0" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.1/42661-1/DSM_DS920%2B_42661.pat" + hash: "8076950fdad2ca58ea9b91a12584b9262830fe627794a0c4fc5861f819095261" + ramdisk-hash: "c8ad44826c87e065a3b05f354a639c0a86cb2fa47b88e11949604d53f3e80048" + zimage-hash: "1d0e5b76e08e3483f6bf06d23b5978ec498b855bde23db1f96f343db4c43337d" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-42661-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" diff --git a/files/board/arpl/overlayfs/opt/arpl/model-configs/DVA1622.yml b/files/board/arpl/overlayfs/opt/arpl/model-configs/DVA1622.yml new file mode 100644 index 00000000..364dd625 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/model-configs/DVA1622.yml @@ -0,0 +1,64 @@ +id: "DVA1622" +modules-notneeded: &modules-notneeded + - ahci + - i40e + - ixgbe + - igb + - e1000e + - dca + - etxhci_hcd + - xhci_hcd + - ehci-pci + - uhci_hcd + +synoinfo: &synoinfo + buzzeroffen: "0xffff" + rss_server: "http://example.com/null.xml" + rss_server_ssl: "https://example.com/null.xml" + rss_server_v2: "https://example.com/autoupdate/v2/getList" + update_server: "http://example.com/" + update_server_ssl: "https://example.com/" + small_info_path: "https://example.com/null" + updateurl: "http://example.com/" + myds_region_api_base_url: "https://example.com" + security_version_server: "https://example.com/smallupdate" +cmdline: &cmdline + intel_iommu: "igfx_off" + HddEnableDynamicPower: 1 + vender_format_version: 2 + syno_port_thaw: 1 + synoboot2: + syno_ttyS0: "serial,0x3f8" + syno_ttyS1: "serial,0x2f8" +platform: "geminilake" +serial: + prefix: + - "2030" + - "2040" + - "20C0" + - "2150" + middle: "SBR" + suffix: "alpha" +disks: 2 +dt: true +builds: + 42661: + ver: "7.1.0" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.1/42661-1/DSM_DVA1622_42661.pat" + hash: "f1484cf302627072ca393293cd73e61dc9e09d479ef028b216eae7c12f7b7825" + ramdisk-hash: "6290945ba61f652aec83725f81f5a47bd5e4cdbeb86241c33825154140e164ec" + zimage-hash: "1d0e5b76e08e3483f6bf06d23b5978ec498b855bde23db1f96f343db4c43337d" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-42661-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" diff --git a/files/board/arpl/overlayfs/opt/arpl/model-configs/DVA3221.yml b/files/board/arpl/overlayfs/opt/arpl/model-configs/DVA3221.yml new file mode 100644 index 00000000..75accd15 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/model-configs/DVA3221.yml @@ -0,0 +1,88 @@ +id: "DVA3221" +modules-notneeded: &modules-notneeded + - ahci + - i40e + - ixgbe + - igb + - e1000e + - dca + - etxhci_hcd + - xhci_hcd + - ehci-pci + - uhci_hcd +synoinfo: &synoinfo + esataportcfg: "0x00" + support_bde_internal_10g: "no" + support_disk_compatibility: "no" + rss_server: "http://example.com/null.xml" + rss_server_ssl: "https://example.com/null.xml" + rss_server_v2: "https://example.com/autoupdate/v2/getList" + update_server: "http://example.com/" + update_server_ssl: "https://example.com/" + small_info_path: "https://example.com/null" + updateurl: "http://example.com/" + myds_region_api_base_url: "https://example.com" + security_version_server: "https://example.com/smallupdate" +cmdline: &cmdline + syno_hdd_powerup_seq: 0 + HddHotplug: 0 + vender_format_version: 2 + syno_port_thaw: 1 + syno_hdd_detect: 0 + +platform: "denverton" +serial: + prefix: + - "2030" + - "2040" + - "20C0" + - "2150" + middle: "SJR" + suffix: "alpha" +flags: + - "fma" +disks: 4 +builds: + 42218: + ver: "7.0.1" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.0.1/42218/DSM_DVA3221_42218.pat" + hash: "01f101d7b310c857e54b0177068fb7250ff722dc9fa2472b1a48607ba40897ee" + ramdisk-hash: "0825958923a5e67d967389769cff5fb7a04a25b98a2826c4c1e8aa7b8146dc8b" + zimage-hash: "ef97f2d64f3f7f8c5e3f4e8fee613d385d7888826f56e119f1885a722c95c7cc" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-common-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-common-disable-disabled-ports.patch" + + 42661: + ver: "7.1.0" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.1/42661-1/DSM_DVA3221_42661.pat" + hash: "ed3207db40b7bac4d96411378558193b7747ebe88f0fc9c26c59c0b5c688c359" + ramdisk-hash: "0ff061f453bc9888b16c59baaf3617bfa6ee42044122eb1dd0eaaa18b3832381" + zimage-hash: "5222b5efaf7af28ff3833fd37f13100c30acba1ee201a15b2ee360e66e75b48e" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-42661-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-42661-disable-disabled-ports.patch" diff --git a/files/board/arpl/overlayfs/opt/arpl/model-configs/FS6400.yml.disabled b/files/board/arpl/overlayfs/opt/arpl/model-configs/FS6400.yml.disabled new file mode 100644 index 00000000..7537e70a --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/model-configs/FS6400.yml.disabled @@ -0,0 +1,88 @@ +id: "FS6400" +modules-notneeded: &modules-notneeded + - ahci + - i40e + - ixgbe + - igb + - e1000e + - dca + - etxhci_hcd + - xhci_hcd + - ehci-pci + - uhci_hcd + - r8168 + - mpt3sas +synoinfo: &synoinfo +# support_bde_internal_10g: "no" +# support_disk_compatibility: "no" +# esataportcfg: "0x00" +# usbportcfg: "0x10000" + rss_server: "http://example.com/null.xml" + rss_server_ssl: "https://example.com/null.xml" + rss_server_v2: "https://example.com/autoupdate/v2/getList" + update_server: "http://example.com/" + update_server_ssl: "https://example.com/" + small_info_path: "https://example.com/null" + updateurl: "http://example.com/" + myds_region_api_base_url: "https://example.com" + security_version_server: "https://example.com/smallupdate" +cmdline: &cmdline +# SMBusHddDynamicPower: 1 + vender_format_version: 2 + syno_port_thaw: 1 + syno_hdd_detect: 0 + synoboot2: + syno_ttyS0: "serial,0x3f8" + syno_ttyS1: "serial,0x2f8" +platform: "purley" +serial: + prefix: + - "1960" + middle: "PSN" + suffix: "numeric" +disks: 24 +dt: true +builds: + 42218: + ver: "7.0.1" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.0.1/42218/DSM_FS6400_42218.pat" + hash: "0e5e15398fb50d21ac52e0fbae199d5bacebc52f04933be5825c710f9de874ea" + ramdisk-hash: "eebaf0236230956fc1a9d8ca8c8f86143da959b631cad9c311152a4e644d17a0" + zimage-hash: "cbed16da4970c41e9b9c6797c57c70b12f55ab497756cb050247d1c155c8a8f6" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-common-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-common-disable-disabled-ports.patch" + + 42661: + ver: "7.1.0" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.1/42661-1/DSM_FS6400_42661.pat" + hash: "94a84725e8b24dfb448429068c046e05084ead3a210f8710979e0992904673e7" + ramdisk-hash: "6ceff751e7132dd8cc80ff64ee23b7e3b3986d85d7d9132b4cb4b0d50f223b1f" + zimage-hash: "2326a556eec4c48b9887bad7eecab268e43dc0e036bed8ddf6fbba466d713cde" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-42661-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-42661-disable-disabled-ports.patch" diff --git a/files/board/arpl/overlayfs/opt/arpl/model-configs/RS4021xs+.yml.disabled b/files/board/arpl/overlayfs/opt/arpl/model-configs/RS4021xs+.yml.disabled new file mode 100644 index 00000000..a02e37a4 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/model-configs/RS4021xs+.yml.disabled @@ -0,0 +1,84 @@ +id: "RS4021xs+" +modules-notneeded: &modules-notneeded + - ahci + - i40e + - ixgbe + - igb + - e1000e + - dca + - etxhci_hcd + - xhci_hcd + - ehci-pci + - uhci_hcd +synoinfo: &synoinfo + support_bde_internal_10g: "no" + support_disk_compatibility: "no" + supportraidgroup: "no" + esataportcfg: "0x00" + rss_server: "http://example.com/null.xml" + rss_server_ssl: "https://example.com/null.xml" + rss_server_v2: "https://example.com/autoupdate/v2/getList" + update_server: "http://example.com/" + update_server_ssl: "https://example.com/" + small_info_path: "https://example.com/null" + updateurl: "http://example.com/" + myds_region_api_base_url: "https://example.com" + security_version_server: "https://example.com/smallupdate" +cmdline: &cmdline + syno_hdd_powerup_seq: 0 + HddHotplug: 0 + vender_format_version: 2 + syno_port_thaw: 1 + syno_hdd_detect: 0 + +platform: "broadwellnk" +serial: + prefix: + - "0000" + middle: "XXX" + suffix: "numeric" +disks: 16 +builds: + 42218: + ver: "7.0.1" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.0.1/42218/DSM_RS4021xs%2B_42218.pat" + hash: "7afca3970ac7324d7431c1484d4249939bedd4c18ac34187f894c43119edf3a1" + ramdisk-hash: "3510afe5b3dfe3662bfe054c1728c8794911da431718b533cd03d2a2c061ffd5" + zimage-hash: "b4cc62e9953f226960de98b65887e17dd6df5fa0ad28f665e0b4660dbd5f2fa8" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-common-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-common-disable-disabled-ports.patch" + + 42661: + ver: "7.1.0" + kver: "4.4.180" + rd-compressed: false + efi-bug: no + modules-notneeded: *modules-notneeded + cmdline: + <<: *cmdline + synoinfo: + <<: *synoinfo + pat: + url: "https://global.download.synology.com/download/DSM/release/7.1/42661-1/DSM_RS4021xs%2B_42661.pat" + hash: "496b64e431dafa34cdebb92da8ac736bf1610fe157f03df7e6d11152d60991f5" + ramdisk-hash: "143e475fe73c0adb3377361402b4baad21448476e844e55e16d1ed51ffc4c971" + zimage-hash: "e073dd84054f652811e0ae1932af2c7cdbd5fb6e5f18f265097072b8af4605e8" + patch: + - "ramdisk-common-disable-root-pwd.patch" + - "ramdisk-common-init-script.patch" + - "ramdisk-42661-post-init-script.patch" + - "ramdisk-common-network-hosts.patch" + - "ramdisk-42661-disable-disabled-ports.patch" diff --git a/files/board/arpl/overlayfs/opt/arpl/patch-boot_params-check.php b/files/board/arpl/overlayfs/opt/arpl/patch-boot_params-check.php new file mode 100755 index 00000000..4e88d88e --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/patch-boot_params-check.php @@ -0,0 +1,141 @@ +#!/usr/bin/env php + 3) { + perr("Usage: " . $argv[0] . " []\n", true); +} + +$file = getArgFilePath(1); +perr("\nGenerating patch for $file\n"); + +//The function will reside in init code part. We don't care we may potentially search beyond as we expect it to be found +$codeAddr = getELFSectionAddr($file, '.init.text', 3); + +//Finding a function boundary is non-trivial really as patters can vary, we can have multiple exit points, and in CISC +// there are many things which may match e.g. "PUSH EBP". Implementing even a rough disassembler is pointless. +//However, we can certainly cheat here as we know with CDECL a non-empty function will always contain one or more +// PUSH (0x41) R12-R15 (0x54-57) sequences. Then we can search like a 1K forward for these characteristic LOCK OR. +const PUSH_R12_R15_SEQ = [0x41, [0x54, 0x57]]; +const PUSH_R12_R15_SEQ_LEN = 2; +const LOCK_OR_LOOK_AHEAD = 1024; +const LOCK_OR_PTR_SEQs = [ + [0xF0, 0x80, null, null, null, null, null, 0x01], + [0xF0, 0x80, null, null, null, null, null, 0x02], + [0xF0, 0x80, null, null, null, null, null, 0x04], + [0xF0, 0x80, null, null, null, null, null, 0x08], +]; +const LOCK_OR_PTR_SEQs_NUM = 4; //how many sequences we are expecting +const LOCK_OR_PTR_SEQ_LEN = 8; //length of a single sequence + +$fp = getFileMemMapped($file); //Read the whole file to memory to make fseet/fread much faster +$pos = $codeAddr; //Start from where code starts +$orsPos = null; //When matched it will contain our resulting file offsets to LOCK(); OR BYTE PTR [rip+...],0x calls +perr("Looking for f() candidates...\n"); +do { + $find = findSequenceWithWildcard($fp, PUSH_R12_R15_SEQ, $pos, -1); + if ($find === -1) { + break; //no more "functions" left + } + + perr("\rAnalyzing f() candidate @ " . decTo32bUFhex($pos)); + + //we found something looking like PUSH R12-R15, now find the ORs + $orsPos = []; //file offsets where LOCK() calls should start + $orsPosNum = 0; //Number of LOCK(); OR ptr sequences found + $seqPos = $pos; + foreach (LOCK_OR_PTR_SEQs as $idx => $seq) { + $find = findSequenceWithWildcard($fp, $seq, $seqPos, LOCK_OR_LOOK_AHEAD); + if ($find === -1) { + break; //Seq not found - there's no point to look further + } + + $orsPos[] = $find; + ++$orsPosNum; + $seqPos = $find + LOCK_OR_PTR_SEQ_LEN; //Next search will start after the current sequence code + } + + //We can always move forward by the function token length (obvious) but if we couldn't find any LOCK-OR tokens + // we can skip the whole look ahead distance. We CANNOT do that if we found even a single token because the next one + // might have been just after the look ahead distance + if ($orsPosNum !== LOCK_OR_PTR_SEQs_NUM) { + $pos += PUSH_R12_R15_SEQ_LEN; + if ($orsPosNum === 0) { + $pos += LOCK_OR_LOOK_AHEAD; + } + continue; //Continue the main search loop to find next function candidate + } + + //We found LOCK(); OR ptr sequences so we can print some logs and collect ptrs (as this is quite expensive) + $seqPtrsDist = []; + perr("\n[?] Found possible f() @ " . decTo32bUFhex($pos) . "\n"); + $ptrOffset = null; + $equalJumps = 0; + foreach (LOCK_OR_PTR_SEQs as $idx => $seq) { + //data will have the following bytes: + // [0-LOCK()] [1-OR()] [2-BYTE-PTR] [3-OFFS-b3] [4-OFFS-b2] [5-OFFS-b1] [6-OFFS-b1] [7-NUMBER] + $seqData = readAt($fp, $orsPos[$idx], LOCK_OR_PTR_SEQ_LEN); + $newPtrOffset = //how far it "jumps" + $orsPos[$idx] + + (unpack('V', $seqData[3] . $seqData[4] . $seqData[5] . $seqData[6])[1]); //u32 bit LE + + if($ptrOffset === null) { + $ptrOffset = $newPtrOffset; //Save the first one to compare in the next loop + ++$equalJumps; + } elseif ($ptrOffset === $newPtrOffset) { + ++$equalJumps; + } + + perr( + "\t[+] Found LOCK-OR#$idx sequence @ " . decTo32bUFhex($orsPos[$idx]) . " => " . + rawToUFhex($seqData) . " [RIP+(dec)$newPtrOffset]\n" + ); + } + if ($equalJumps !== 4) { + perr("\t[-] LOCK-OR PTR offset mismatch - $equalJumps/" . LOCK_OR_PTR_SEQs_NUM . " matched\n"); + //If the pointer checking failed we can at least move beyond the last LOCK-OR found as we know there's no valid + // sequence of LOCK-ORs there + $pos = $orsPos[3]; + continue; + } + + perr("\t[+] All $equalJumps LOCK-OR PTR offsets equal - match found!\n"); + break; +} while(!feof($fp)); + +if ($orsPos === null) { //There's a small chance no candidates with LOCK ORs were found + perr("Failed to find matching sequences", true); +} + +//Patch offsets +foreach ($orsPos as $seqFileOffset) { + //The offset will point at LOCK(), we need to change the OR (0x80 0x0d) to AND (0x80 0x25) so the two bytes after + $seqFileOffset = $seqFileOffset+2; + + perr("Patching OR to AND @ file offset (dec)$seqFileOffset\n"); + fseek($fp, $seqFileOffset); + fwrite($fp, "\x25"); //0x0d is OR, 0x25 is AND +} + +if (!isset($argv[2])) { + perr("No output file specified - discarding data\n"); + exit; +} + +saveStreamToFile($fp, $argv[2]); +fclose($fp); diff --git a/files/board/arpl/overlayfs/opt/arpl/patch-ramdisk-check.php b/files/board/arpl/overlayfs/opt/arpl/patch-ramdisk-check.php new file mode 100755 index 00000000..0f464051 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/patch-ramdisk-check.php @@ -0,0 +1,85 @@ +#!/usr/bin/env php + 3) { + perr("Usage: " . $argv[0] . " []\n", true); +} + +$file = getArgFilePath(1); +perr("\nGenerating patch for $file\n"); + +//Strings (e.g. error for printk()) reside in .rodata - start searching there to save time +$rodataAddr = getELFSectionAddr($file, '.rodata', 2); + +//Locate the precise location of "ramdisk error" string +$rdErrAddr = getELFStringLoc($file, '3ramdisk corrupt'); + + +//offsets will be 32 bit in ASM and in LE +$errPrintAddr = $rodataAddr + $rdErrAddr; +$errPrintCAddrLEH = decTo32bLEhex($errPrintAddr - 1); //Somehow rodata contains off-by-one sometimes... +$errPrintAddrLEH = decTo32bLEhex($errPrintAddr); +perr("LE arg addr: " . $errPrintCAddrLEH . "\n"); + +$fp = getFileMemMapped($file); //Read the whole file to memory to make fseet/fread much faster + +//Find the printk() call argument +$printkPos = findSequence($fp, hex2raw($errPrintCAddrLEH), 0, DIR_FWD, -1); +if ($printkPos === -1) { + perr("printk pos not found!\n", true); +} +perr("Found printk arg @ " . decTo32bUFhex($printkPos) . "\n"); + +//double check if it's a MOV reg,VAL (where reg is EAX/ECX/EDX/EBX/ESP/EBP/ESI/EDI) +fseek($fp, $printkPos - 3); +$instr = fread($fp, 3); +if (strncmp($instr, "\x48\xc7", 2) !== 0) { + perr("Expected MOV=>reg before printk error, got " . bin2hex($instr) . "\n", true); +} +$dstReg = ord($instr[2]); +if ($dstReg < 192 || $dstReg > 199) { + perr("Expected MOV w/reg operand [C0-C7], got " . bin2hex($instr[2]) . "\n", true); +} +$movPos = $printkPos - 3; +perr("Found printk MOV @ " . decTo32bUFhex($movPos) . "\n"); + +//now we should seek a reasonable amount (say, up to 32 bytes) for a sequence of CALL x => TEST EAX,EAX => JZ +$testPos = findSequence($fp, "\x85\xc0", $movPos, DIR_RWD, 32); +if ($testPos === -1) { + perr("Failed to find TEST eax,eax\n", true); +} + +$jzPos = $testPos + 2; +fseek($fp, $jzPos); +$jz = fread($fp, 2); +if ($jz[0] !== "\x74") { + perr("Failed to find JZ\n", true); +} + +$jzp = "\xEB" . $jz[1]; +perr('OK - patching ' . bin2hex($jz) . " (JZ) to " . bin2hex($jzp) . " (JMP) @ $jzPos\n"); +fseek($fp, $jzPos); //we should be here already +perr("Patched " . fwrite($fp, $jzp) . " bytes in memory\n"); + +if (!isset($argv[2])) { + perr("No output file specified - discarding data\n"); + exit; +} + +if (!isset($argv[2])) { + perr("No output file specified - discarding data\n"); + exit; +} + +saveStreamToFile($fp, $argv[2]); +fclose($fp); diff --git a/files/board/arpl/overlayfs/opt/arpl/patch/config-manipulators.sh b/files/board/arpl/overlayfs/opt/arpl/patch/config-manipulators.sh new file mode 100644 index 00000000..05d34a7b --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/patch/config-manipulators.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env sh + +# +# WARNING: this file is also embedded in the post-init patcher, so don't go to crazy with the syntax/tools as it must +# be able to execute in the initramfs/preboot environment (so no bashism etc) +# All comments will be stripped, functions here should NOT start with brp_ as they're not part of the builder + + +if [ -z ${SED_PATH+x} ]; then + echo "Your SED_PATH variable is not set/is empty!" + exit 1 +fi + +##$1 from, $2 to, $3 file to path +_replace_in_file() +{ + if grep -q "$1" "$3"; then + $SED_PATH -i "$3" -e "s#$1#$2#" + fi +} + +# Replace/remove/add values in .conf K=V file +# +# Args: $1 name, $2 new_val, $3 path +_set_conf_kv() +{ + # Delete + if [ -z "$2" ]; then + $SED_PATH -i "$3" -e "s/^$1=.*$//" + return 0; + fi + + # Replace + if grep -q "^$1=" "$3"; then + $SED_PATH -i "$3" -e "s\"^$1=.*\"$1=\\\"$2\\\"\"" + return 0 + fi + + # Add if doesn't exist + echo "$1=\"$2\"" >> $3 +} diff --git a/files/board/arpl/overlayfs/opt/arpl/patch/iosched-trampoline.sh b/files/board/arpl/overlayfs/opt/arpl/patch/iosched-trampoline.sh new file mode 100755 index 00000000..69217918 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/patch/iosched-trampoline.sh @@ -0,0 +1,16 @@ +#!/usr/bin/sh +# This script is saved to /sbin/modprobe which is a so called UMH (user-mode-helper) for kmod (kernel/kmod.c) +# The kmod subsystem in the kernel is used to load modules from kernel. We exploit it a bit to load RP as soon as +# possible (which turns out to be via init/main.c => load_default_modules => load_default_elevator_module +# When the kernel is booted with "elevator=elevator" it will attempt to load a module "elevator-iosched"... and the rest +# should be obvious from the code below. DO NOT print anything here (kernel doesn't attach STDOUT) +for arg in "$@" +do + if [ "$arg" = "elevator-iosched" ]; then + insmod /usr/lib/modules/rp.ko + rm /usr/lib/modules/rp.ko + rm /sbin/modprobe + exit 0 + fi +done +exit 1 diff --git a/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-42661-disable-disabled-ports.patch b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-42661-disable-disabled-ports.patch new file mode 100644 index 00000000..5f7b2234 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-42661-disable-disabled-ports.patch @@ -0,0 +1,11 @@ +--- a/usr/syno/web/webman/get_state.cgi ++++ b/usr/syno/web/webman/get_state.cgi +@@ -1,7 +1,7 @@ + #!/bin/sh + + PATH="$PATH:/bin:/sbin:/usr/bin:/usr/sbin" +-DisabledPortDisks="$(/usr/syno/bin/synodiskport -portthawlist)" ++DisabledPortDisks="" + partition="$(/usr/syno/bin/synodiskport -installable_disk_list)" + + upnpmodelname="$(/bin/get_key_value /etc.defaults/synoinfo.conf upnpmodelname)" diff --git a/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-42661-post-init-script.patch b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-42661-post-init-script.patch new file mode 100644 index 00000000..87ba956b --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-42661-post-init-script.patch @@ -0,0 +1,31 @@ +--- a/usr/sbin/init.post ++++ b/usr/sbin/init.post +@@ -31,7 +31,28 @@ + OptPrjQuota="$(GetPQMountOpt "${RootMountPath}")" + # shellcheck disable=SC2046 + Mount "${RootMountPath}" /tmpRoot -o barrier=1 ${OptPrjQuota} ++############################################################################################ ++SED_PATH='/tmpRoot/usr/bin/sed' + ++@@@CONFIG-MANIPULATORS-TOOLS@@@ ++ ++@@@CONFIG-GENERATED@@@ ++ ++UPSTART="/tmpRoot/usr/share/init" ++ ++if ! echo; then ++ _replace_in_file '^start on' '#start on' $UPSTART/tty.conf ++ _replace_in_file "console output" "console none" $UPSTART/syno_poweroff_task.conf ++ _replace_in_file "console output" "console none" $UPSTART/burnin_loader.conf ++ _replace_in_file "console output" "console none" $UPSTART/udevtrigger.conf ++ _replace_in_file "console output" "console none" $UPSTART/bs-poweroff.conf ++ _replace_in_file "console output" "console none" $UPSTART/udevd.conf ++else ++ _replace_in_file '^#start on' 'start on' $UPSTART/tty.conf ++fi ++ ++/addons/addons.sh sys ++############################################################################################ + Mkdir -p /tmpRoot/initrd + + Umount /proc >/dev/null 2>&1 \ No newline at end of file diff --git a/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-disable-disabled-ports.patch b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-disable-disabled-ports.patch new file mode 100644 index 00000000..3ea5eb70 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-disable-disabled-ports.patch @@ -0,0 +1,11 @@ +--- a/usr/syno/web/webman/get_state.cgi ++++ b/usr/syno/web/webman/get_state.cgi +@@ -1,7 +1,7 @@ + #!/bin/sh + + PATH="$PATH:/bin:/sbin:/usr/bin:/usr/sbin" +-DisabledPortDisks=`/usr/syno/bin/synodiskport -portthawlist` ++DisabledPortDisks="" + partition=`/usr/syno/bin/synodiskport -installable_disk_list` + + product=`/bin/get_key_value /etc.defaults/synoinfo.conf product` diff --git a/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-disable-root-pwd.patch b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-disable-root-pwd.patch new file mode 100644 index 00000000..b76360f8 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-disable-root-pwd.patch @@ -0,0 +1,10 @@ +diff --git a/etc/passwd b/etc/passwd +index 6a0344d..cf95fbd 100644 +--- a/etc/passwd ++++ b/etc/passwd +@@ -1,4 +1,4 @@ +-root:x:0:0::/root:/bin/ash ++root::0:0::/root:/bin/ash + system:x:1:1::/usr/syno/synoman:/usr/bin/nologin + daemon:x:2:2::/:/bin/sh + lp:x:7:7::/var/spool/lpd:/bin/sh diff --git a/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-init-script.patch b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-init-script.patch new file mode 100644 index 00000000..bd53da7b --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-init-script.patch @@ -0,0 +1,11 @@ +--- a/linuxrc.syno.impl ++++ b/linuxrc.syno.impl +@@ -155,6 +155,8 @@ fi + # insert basic USB modules for detect f401/FDT + echo "Insert basic USB modules..." + SYNOLoadModules $USB_MODULES ++SYNOLoadModules "usb-storage" ++/addons/addons.sh rd + + # insert Etron USB3.0 drivers + diff --git a/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-network-hosts.patch b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-network-hosts.patch new file mode 100644 index 00000000..b6c953b6 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-network-hosts.patch @@ -0,0 +1,8 @@ +--- /etc/hosts ++++ /etc/hosts +@@ -2,3 +2,5 @@ + # that require network functionality will fail. + 127.0.0.1 localhost + ::1 localhost ++127.0.0.1 update7.synology.com ++127.0.0.1 dataupdate7.synology.com diff --git a/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-post-init-script.patch b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-post-init-script.patch new file mode 100644 index 00000000..91af341a --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/patch/ramdisk-common-post-init-script.patch @@ -0,0 +1,32 @@ +--- a/usr/sbin/init.post ++++ b/usr/sbin/init.post +@@ -18,6 +18,29 @@ if [ "$UniqueRD" = "nextkvmx64" ]; then + fi + Mount "$RootDevice" /tmpRoot -o barrier=1 + ++############################################################################################ ++SED_PATH='/tmpRoot/usr/bin/sed' ++ ++@@@CONFIG-MANIPULATORS-TOOLS@@@ ++ ++@@@CONFIG-GENERATED@@@ ++ ++UPSTART="/tmpRoot/usr/share/init" ++ ++if ! echo; then ++ _replace_in_file '^start on' '#start on' $UPSTART/tty.conf ++ _replace_in_file "console output" "console none" $UPSTART/syno_poweroff_task.conf ++ _replace_in_file "console output" "console none" $UPSTART/burnin_loader.conf ++ _replace_in_file "console output" "console none" $UPSTART/udevtrigger.conf ++ _replace_in_file "console output" "console none" $UPSTART/bs-poweroff.conf ++ _replace_in_file "console output" "console none" $UPSTART/udevd.conf ++else ++ _replace_in_file '^#start on' 'start on' $UPSTART/tty.conf ++fi ++ ++/addons/addons.sh sys ++############################################################################################ ++ + Mkdir -p /tmpRoot/initrd + + Umount /proc >/dev/null 2>&1 diff --git a/files/board/arpl/overlayfs/opt/arpl/ramdisk-patch.sh b/files/board/arpl/overlayfs/opt/arpl/ramdisk-patch.sh new file mode 100755 index 00000000..54b21d56 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/ramdisk-patch.sh @@ -0,0 +1,126 @@ +#!/usr/bin/env bash + +. /opt/arpl/include/functions.sh +. /opt/arpl/include/addons.sh + +# Sanity check +[ -f "${ORI_RDGZ_FILE}" ] || die "${ORI_RDGZ_FILE} not found!" + +echo -n "Patching Ramdisk" + +# Remove old rd.gz patched +rm -f "${MOD_RDGZ_FILE}" + +# Unzipping ramdisk +echo -n "." +rm -rf "${RAMDISK_PATH}" # Force clean +mkdir -p "${RAMDISK_PATH}" +(cd "${RAMDISK_PATH}"; xz -dc < "${ORI_RDGZ_FILE}" | cpio -idm) >/dev/null 2>&1 + +# Check if DSM buildnumber changed +. "${RAMDISK_PATH}/etc/VERSION" + +MODEL="`readConfigKey "model" "${USER_CONFIG_FILE}"`" +BUILD="`readConfigKey "build" "${USER_CONFIG_FILE}"`" +LKM="`readConfigKey "lkm" "${USER_CONFIG_FILE}"`" + +if [ ${BUILD} -ne ${buildnumber} ]; then + echo -e "\033[A\n\033[1;32mBuild number changed from \033[1;31m${BUILD}\033[1;32m to \033[1;31m${buildnumber}\033[0m" + echo -n "Patching Ramdisk." + # Update new buildnumber + BUILD=${buildnumber} + writeConfigKey "build" "${BUILD}" "${USER_CONFIG_FILE}" +fi + +echo -n "." +# Read model data +PLATFORM="`readModelKey "${MODEL}" "platform"`" +KVER="`readModelKey "${MODEL}" "builds.${BUILD}.kver"`" +RD_COMPRESSED="`readModelKey "${MODEL}" "builds.${BUILD}.rd-compressed"`" + +# Sanity check +[ -z "${PLATFORM}" -o -z "${KVER}" ] && die "ERROR: Configuration for model ${MODEL} and buildnumber ${BUILD} not found." + +declare -A SYNOINFO +declare -A ADDONS + +# Read more config +while IFS="=" read KEY VALUE; do + [ -n "${KEY}" ] && SYNOINFO["${KEY}"]="${VALUE}" +done < <(readModelMap "${MODEL}" "builds.${BUILD}.synoinfo") +while IFS="=" read KEY VALUE; do + [ -n "${KEY}" ] && SYNOINFO["${KEY}"]="${VALUE}" +done < <(readConfigMap "synoinfo" "${USER_CONFIG_FILE}") +while IFS="=" read KEY VALUE; do + [ -n "${KEY}" ] && ADDONS["${KEY}"]="${VALUE}" +done < <(readConfigMap "addons" "${USER_CONFIG_FILE}") + +# Patches +while read f; do + echo -n "." + echo "Patching with ${f}" >"${LOG_FILE}" 2>&1 + (cd "${RAMDISK_PATH}" && patch -p1 < "${PATCH_PATH}/${f}") >>"${LOG_FILE}" 2>&1 || dieLog +done < <(readModelArray "${MODEL}" "builds.${BUILD}.patch") + +# Patch /etc/synoinfo.conf +echo -n "." +for KEY in ${!SYNOINFO[@]}; do + sed -i "s|^${KEY}=.*|${KEY}=\"${SYNOINFO[${KEY}]}\"|" "${RAMDISK_PATH}/etc/synoinfo.conf" >"${LOG_FILE}" 2>&1 || dieLog +done + +# Patch /sbin/init.post +echo -n "." +grep -v -e '^[\t ]*#' -e '^$' "${PATCH_PATH}/config-manipulators.sh" > "${TMP_PATH}/rp.txt" +sed -e "/@@@CONFIG-MANIPULATORS-TOOLS@@@/ {" -e "r ${TMP_PATH}/rp.txt" -e 'd' -e '}' -i "${RAMDISK_PATH}/sbin/init.post" +rm "${TMP_PATH}/rp.txt" +touch "${TMP_PATH}/rp.txt" +for KEY in ${!SYNOINFO[@]}; do + echo "_set_conf_kv '${KEY}' '${SYNOINFO[${KEY}]}' '/tmpRoot/etc/synoinfo.conf'" >> "${TMP_PATH}/rp.txt" + echo "_set_conf_kv '${KEY}' '${SYNOINFO[${KEY}]}' '/tmpRoot/etc.defaults/synoinfo.conf'" >> "${TMP_PATH}/rp.txt" +done +sed -e "/@@@CONFIG-GENERATED@@@/ {" -e "r ${TMP_PATH}/rp.txt" -e 'd' -e '}' -i "${RAMDISK_PATH}/sbin/init.post" +rm "${TMP_PATH}/rp.txt" + +# Copying LKM to /usr/lib/modules/rp.ko +echo -n "." +cp "${LKM_PATH}/rp-${PLATFORM}-${KVER}-${LKM}.ko" "${RAMDISK_PATH}/usr/lib/modules/rp.ko" + +# Copying fake modprobe +echo -n "." +cp "${PATCH_PATH}/iosched-trampoline.sh" "${RAMDISK_PATH}/usr/sbin/modprobe" + +# Addons +# Check if model needs Device-tree dynamic patch +DT="`readModelKey "${MODEL}" "dt"`" +[ "${DT}" = "true" ] && ADDONS['qjs-dtb']="" # Add system addon "qjs-dtb" +mkdir -p "${RAMDISK_PATH}/addons" +echo -n "." +#/proc/sys/kernel/syno_install_flag +echo "#!/bin/sh" > "${RAMDISK_PATH}/addons/addons.sh" +echo 'export INSMOD="/sbin/insmod"' >> "${RAMDISK_PATH}/addons/addons.sh" +echo >> "${RAMDISK_PATH}/addons/addons.sh" +for ADDON in ${!ADDONS[@]}; do + PARAMS=${ADDONS[${ADDON}]} + if ! installAddon ${ADDON}; then + echo "ADDON ${ADDON} not found!" | tee "${LOG_FILE}" + exit 1 + fi + echo "/addons/${ADDON}.sh \${1} ${PARAMS}" >> "${RAMDISK_PATH}/addons/addons.sh" 2>"${LOG_FILE}" || dieLog +done +chmod +x "${RAMDISK_PATH}/addons/addons.sh" + +# Reassembly ramdisk +echo -n "." +if [ "${RD_COMPRESSED}" == "true" ]; then + (cd "${RAMDISK_PATH}" && find . | cpio -o -H newc -R root:root | xz -9 --format=lzma > "${MOD_RDGZ_FILE}") >"${LOG_FILE}" 2>&1 || dieLog +else + (cd "${RAMDISK_PATH}" && find . | cpio -o -H newc -R root:root > "${MOD_RDGZ_FILE}") >"${LOG_FILE}" 2>&1 || dieLog +fi + +# Clean +rm -rf "${RAMDISK_PATH}" + +# Update SHA256 hash +RAMDISK_HASH="`sha256sum ${ORI_RDGZ_FILE} | awk '{print$1}'`" +writeConfigKey "ramdisk-hash" "${RAMDISK_HASH}" "${USER_CONFIG_FILE}" +echo diff --git a/files/board/arpl/overlayfs/opt/arpl/vmlinux-to-bzImage.sh b/files/board/arpl/overlayfs/opt/arpl/vmlinux-to-bzImage.sh new file mode 100755 index 00000000..94978c45 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/vmlinux-to-bzImage.sh @@ -0,0 +1,63 @@ +#!/bin/sh + +. /opt/arpl/include/functions.sh + +#zImage_head 16494 +#payload( +# vmlinux.bin x +# padding 0xf00000-x +# vmlinux.bin size 4 +#) 0xf00004 +#zImage_tail( +# unknown 72 +# run_size 4 +# unknown 30 +# vmlinux.bin size 4 +# unknown 114460 +#) 114570 +#crc32 4 + +# Adapted from: scripts/Makefile.lib +# Usage: size_append FILE [FILE2] [FILEn]... +# Output: LE HEX with size of file in bytes (to STDOUT) +file_size_le () { + printf $( + dec_size=0; + for F in "${@}"; do + fsize=$(stat -c "%s" ${F}); + dec_size=$(expr ${dec_size} + ${fsize}); + done; + printf "%08x\n" ${dec_size} | + sed 's/\(..\)/\1 /g' | { + read ch0 ch1 ch2 ch3; + for ch in ${ch3} ${ch2} ${ch1} ${ch0}; do + printf '%s%03o' '\' $((0x${ch})); + done; + } + ) +} + +size_le () { + printf $( + printf "%08x\n" "${@}" | + sed 's/\(..\)/\1 /g' | { + read ch0 ch1 ch2 ch3; + for ch in ${ch3} ${ch2} ${ch1} ${ch0}; do + printf '%s%03o' '\' $((0x${ch})); + done; + } + ) +} +SCRIPT_DIR=`dirname $0` +VMLINUX_MOD=${1} +ZIMAGE_MOD=${2} +gzip -cd "${SCRIPT_DIR}/zImage_template.gz" > "${ZIMAGE_MOD}" + +dd if="${VMLINUX_MOD}" of="${ZIMAGE_MOD}" bs=16494 seek=1 conv=notrunc >"${LOG_FILE}" 2>&1 || dieLog +file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=15745134 seek=1 conv=notrunc >"${LOG_FILE}" 2>&1 || dieLog + +RUN_SIZE=$(objdump -h ${VMLINUX_MOD} | sh "${SCRIPT_DIR}/calc_run_size.sh") +size_le $RUN_SIZE | dd of=$ZIMAGE_MOD bs=15745210 seek=1 conv=notrunc >"${LOG_FILE}" 2>&1 || dieLog +file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=15745244 seek=1 conv=notrunc >"${LOG_FILE}" 2>&1 || dieLog +# cksum $ZIMAGE_MOD # https://blog.box.com/crc32-checksums-the-good-the-bad-and-the-ugly +size_le $(($((16#$(php "${SCRIPT_DIR}/crc32.php" "${ZIMAGE_MOD}"))) ^ 0xFFFFFFFF)) | dd of="${ZIMAGE_MOD}" conv=notrunc oflag=append >"${LOG_FILE}" 2>&1 || dieLog diff --git a/files/board/arpl/overlayfs/opt/arpl/zImage_template.gz b/files/board/arpl/overlayfs/opt/arpl/zImage_template.gz new file mode 100644 index 00000000..cb00ed1a Binary files /dev/null and b/files/board/arpl/overlayfs/opt/arpl/zImage_template.gz differ diff --git a/files/board/arpl/overlayfs/opt/arpl/zimage-patch.sh b/files/board/arpl/overlayfs/opt/arpl/zimage-patch.sh new file mode 100755 index 00000000..34e1c1b7 --- /dev/null +++ b/files/board/arpl/overlayfs/opt/arpl/zimage-patch.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +. /opt/arpl/include/functions.sh + +# Sanity check +[ -f "${ORI_ZIMAGE_FILE}" ] || die "${ORI_ZIMAGE_FILE} not found!" + +echo -n "Patching zImage" + +rm -f "${MOD_ZIMAGE_FILE}" +echo -n "." +# Extract vmlinux +/opt/arpl/bzImage-to-vmlinux.sh "${ORI_ZIMAGE_FILE}" "${TMP_PATH}/vmlinux" >"${LOG_FILE}" 2>&1 || dieLog +echo -n "." +# Patch boot params +/opt/arpl/patch-boot_params-check.php "${TMP_PATH}/vmlinux" "${TMP_PATH}/vmlinux-mod1" >"${LOG_FILE}" 2>&1 || dieLog +echo -n "." +# Patch ramdisk check +/opt/arpl/patch-ramdisk-check.php "${TMP_PATH}/vmlinux-mod1" "${TMP_PATH}/vmlinux-mod2" >"${LOG_FILE}" 2>&1 || dieLog +echo -n "." +# rebuild zImage +/opt/arpl/vmlinux-to-bzImage.sh "${TMP_PATH}/vmlinux-mod2" "${MOD_ZIMAGE_FILE}" >"${LOG_FILE}" 2>&1 || dieLog + +echo -n "." +# Update HASH of new DSM zImage +HASH="`sha256sum ${ORI_ZIMAGE_FILE} | awk '{print$1}'`" +writeConfigKey "zimage-hash" "${HASH}" "${USER_CONFIG_FILE}" +echo diff --git a/files/board/arpl/overlayfs/root/.bashrc b/files/board/arpl/overlayfs/root/.bashrc new file mode 100644 index 00000000..8d86203d --- /dev/null +++ b/files/board/arpl/overlayfs/root/.bashrc @@ -0,0 +1,31 @@ +# ~/.bashrc: executed by bash(1) for non-login shells. + +# Note: PS1 and umask are already set in /etc/profile. You should not +# need this unless you want different defaults for root. +PS1='\u@\h:\w# ' +# umask 022 + +# You may uncomment the following lines if you want `ls' to be colorized: +export LS_OPTIONS='--color=auto' +alias ls='ls $LS_OPTIONS' +alias ll='ls $LS_OPTIONS -l' + +# Save history in realtime +shopt -s histappend +PROMPT_COMMAND="history -a;$PROMPT_COMMAND" + +export EDITOR="/bin/nano" +export BOOTLOADER_PATH="/mnt/p1" +export SLPART_PATH="/mnt/p2" # Synologic partition +export CACHE_PATH="/mnt/p3" +export PATH="${PATH}:/opt/arpl" + +if [ ! -f ${HOME}/.initialized ]; then + touch ${HOME}/.initialized + /opt/arpl/init.sh +fi +cd /opt/arpl +if tty | grep -q "/dev/pts" && [ -z "${SSH_TTY}" ]; then + /opt/arpl/menu.sh + exit +fi diff --git a/files/board/arpl/overlayfs/root/.profile b/files/board/arpl/overlayfs/root/.profile new file mode 100644 index 00000000..bb632cf6 --- /dev/null +++ b/files/board/arpl/overlayfs/root/.profile @@ -0,0 +1,17 @@ +# ~/.profile: executed by the command interpreter for login shells. +# This file is not read by bash(1), if ~/.bash_profile or ~/.bash_login +# exists. +# see /usr/share/doc/bash/examples/startup-files for examples. +# the files are located in the bash-doc package. + +# the default umask is set in /etc/profile; for setting the umask +# for ssh logins, install and configure the libpam-umask package. +#umask 022 + +# if running bash +if [ -n "$BASH_VERSION" ]; then + # include .bashrc if it exists + if [ -f "$HOME/.bashrc" ]; then + . "$HOME/.bashrc" + fi +fi diff --git a/files/board/arpl/overlayfs/usr/bin/yq b/files/board/arpl/overlayfs/usr/bin/yq new file mode 100755 index 00000000..1481a7be Binary files /dev/null and b/files/board/arpl/overlayfs/usr/bin/yq differ diff --git a/files/board/arpl/p1/EFI/BOOT/BOOTX64.EFI b/files/board/arpl/p1/EFI/BOOT/BOOTX64.EFI new file mode 100644 index 00000000..b6d21d1f Binary files /dev/null and b/files/board/arpl/p1/EFI/BOOT/BOOTX64.EFI differ diff --git a/files/board/arpl/p1/EFI/BOOT/SynoBootLoader.conf b/files/board/arpl/p1/EFI/BOOT/SynoBootLoader.conf new file mode 100755 index 00000000..2fb0ba94 --- /dev/null +++ b/files/board/arpl/p1/EFI/BOOT/SynoBootLoader.conf @@ -0,0 +1,19 @@ +serial --unit=1 --speed=115200 +terminal serial +default 1 +timeout 3 +verbose +hiddenmenu +fallback 0 + +title SYNOLOGY_1 + root (hd0,0) + kernel /zImage root=/dev/md0 + initrd /rd.gz + +title SYNOLOGY_2 + root (hd0,1) + cksum /grub_cksum.syno + vender /vender show + kernel /zImage root=/dev/md0 + initrd /rd.gz diff --git a/files/board/arpl/p1/EFI/BOOT/SynoBootLoader.efi b/files/board/arpl/p1/EFI/BOOT/SynoBootLoader.efi new file mode 100755 index 00000000..e69de29b diff --git a/files/board/arpl/p1/EFI/BOOT/grub.cfg b/files/board/arpl/p1/EFI/BOOT/grub.cfg new file mode 100644 index 00000000..d1d7a92c --- /dev/null +++ b/files/board/arpl/p1/EFI/BOOT/grub.cfg @@ -0,0 +1,3 @@ +set root=(hd0,msdos1) +set prefix=($root)'/grub' +configfile $prefix/grub.cfg diff --git a/files/board/arpl/p1/grub/fonts/unicode.pf2 b/files/board/arpl/p1/grub/fonts/unicode.pf2 new file mode 100644 index 00000000..6c3b9d13 Binary files /dev/null and b/files/board/arpl/p1/grub/fonts/unicode.pf2 differ diff --git a/files/board/arpl/p1/grub/grub.cfg b/files/board/arpl/p1/grub/grub.cfg new file mode 100644 index 00000000..aee4a153 --- /dev/null +++ b/files/board/arpl/p1/grub/grub.cfg @@ -0,0 +1,73 @@ +insmod echo +insmod terminal +insmod test + +terminal_input console +terminal_output console + +set default="0" +set timeout="3" +set timeout_style="menu" + +insmod loadenv +if [ -s $prefix/grubenv ]; then + set have_grubenv=true + load_env +fi +if [ "${next_entry}" ] ; then + set default="${next_entry}" + set next_entry= + save_env next_entry +fi + +insmod usb_keyboard +insmod part_msdos +insmod ext2 +insmod fat +insmod linux +insmod gzio + +set gfxmode=auto +if [ "${grub_platform}" = "efi" ]; then + insmod efi_gop + insmod efi_uga +else + insmod vbe + insmod vga +fi + +insmod font +if loadfont ${prefix}/fonts/unicode.pf2; then + insmod gfxterm + set gfxmode=auto + set gfxpayload=keep + terminal_output gfxterm +fi + +insmod serial +if serial --unit=0 --speed=115200; then + terminal_input --append serial_com0 + terminal_output --append serial_com0 +fi + +insmod search +search --set=root --label "ARPL1" +if [ -s /zImage -a -s /rd.gz ]; then + menuentry 'Boot DSM' --id boot { + echo "Loading kernel..." + linux /bzImage-arpl console=ttyS0,115200n8 + echo "Loading initramfs..." + initrd /initrd-arpl + echo "Booting..." + } +else + set timeout="1" +fi + +menuentry 'Configure loader' --id config { + echo "Loading kernel..." + linux /bzImage-arpl console=ttyS0,115200n8 IWANTTOCHANGETHECONFIG + echo "Loading initramfs..." + initrd /initrd-arpl + echo "Booting..." +} diff --git a/files/board/arpl/p1/grub/grubenv b/files/board/arpl/p1/grub/grubenv new file mode 100755 index 00000000..f93ccbff --- /dev/null +++ b/files/board/arpl/p1/grub/grubenv @@ -0,0 +1,2 @@ +# GRUB Environment Block +####################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################################### \ No newline at end of file diff --git a/files/board/arpl/p1/grub/i386-pc/acpi.mod b/files/board/arpl/p1/grub/i386-pc/acpi.mod new file mode 100644 index 00000000..35f7439c Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/acpi.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/adler32.mod b/files/board/arpl/p1/grub/i386-pc/adler32.mod new file mode 100644 index 00000000..9188b181 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/adler32.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/affs.mod b/files/board/arpl/p1/grub/i386-pc/affs.mod new file mode 100644 index 00000000..3a9e5a7c Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/affs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/afs.mod b/files/board/arpl/p1/grub/i386-pc/afs.mod new file mode 100644 index 00000000..863c5434 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/afs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ahci.mod b/files/board/arpl/p1/grub/i386-pc/ahci.mod new file mode 100644 index 00000000..32c5375f Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ahci.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/all_video.mod b/files/board/arpl/p1/grub/i386-pc/all_video.mod new file mode 100644 index 00000000..937ed6ed Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/all_video.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/aout.mod b/files/board/arpl/p1/grub/i386-pc/aout.mod new file mode 100644 index 00000000..97398c70 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/aout.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/archelp.mod b/files/board/arpl/p1/grub/i386-pc/archelp.mod new file mode 100644 index 00000000..391a409a Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/archelp.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/at_keyboard.mod b/files/board/arpl/p1/grub/i386-pc/at_keyboard.mod new file mode 100644 index 00000000..66e7da3b Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/at_keyboard.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ata.mod b/files/board/arpl/p1/grub/i386-pc/ata.mod new file mode 100644 index 00000000..f7ba44c6 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ata.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/backtrace.mod b/files/board/arpl/p1/grub/i386-pc/backtrace.mod new file mode 100644 index 00000000..933deecc Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/backtrace.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/bfs.mod b/files/board/arpl/p1/grub/i386-pc/bfs.mod new file mode 100644 index 00000000..23b67da9 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/bfs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/biosdisk.mod b/files/board/arpl/p1/grub/i386-pc/biosdisk.mod new file mode 100644 index 00000000..982b19d1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/biosdisk.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/bitmap.mod b/files/board/arpl/p1/grub/i386-pc/bitmap.mod new file mode 100644 index 00000000..4f15eb5f Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/bitmap.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/bitmap_scale.mod b/files/board/arpl/p1/grub/i386-pc/bitmap_scale.mod new file mode 100644 index 00000000..8abdb70a Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/bitmap_scale.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/blocklist.mod b/files/board/arpl/p1/grub/i386-pc/blocklist.mod new file mode 100644 index 00000000..537901ac Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/blocklist.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/boot.mod b/files/board/arpl/p1/grub/i386-pc/boot.mod new file mode 100644 index 00000000..0ee77ed3 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/boot.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/bsd.mod b/files/board/arpl/p1/grub/i386-pc/bsd.mod new file mode 100644 index 00000000..5adbc995 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/bsd.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/bswap_test.mod b/files/board/arpl/p1/grub/i386-pc/bswap_test.mod new file mode 100644 index 00000000..7be8907a Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/bswap_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/btrfs.mod b/files/board/arpl/p1/grub/i386-pc/btrfs.mod new file mode 100644 index 00000000..5b7a8cda Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/btrfs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/bufio.mod b/files/board/arpl/p1/grub/i386-pc/bufio.mod new file mode 100644 index 00000000..da0b55fd Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/bufio.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cat.mod b/files/board/arpl/p1/grub/i386-pc/cat.mod new file mode 100644 index 00000000..20c7225e Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cat.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cbfs.mod b/files/board/arpl/p1/grub/i386-pc/cbfs.mod new file mode 100644 index 00000000..bcb5094e Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cbfs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cbls.mod b/files/board/arpl/p1/grub/i386-pc/cbls.mod new file mode 100644 index 00000000..820929ed Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cbls.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cbmemc.mod b/files/board/arpl/p1/grub/i386-pc/cbmemc.mod new file mode 100644 index 00000000..60c50e1e Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cbmemc.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cbtable.mod b/files/board/arpl/p1/grub/i386-pc/cbtable.mod new file mode 100644 index 00000000..8e788b1c Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cbtable.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cbtime.mod b/files/board/arpl/p1/grub/i386-pc/cbtime.mod new file mode 100644 index 00000000..a6300aee Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cbtime.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/chain.mod b/files/board/arpl/p1/grub/i386-pc/chain.mod new file mode 100644 index 00000000..7a46f83c Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/chain.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cmdline_cat_test.mod b/files/board/arpl/p1/grub/i386-pc/cmdline_cat_test.mod new file mode 100644 index 00000000..ece6e77a Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cmdline_cat_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cmosdump.mod b/files/board/arpl/p1/grub/i386-pc/cmosdump.mod new file mode 100644 index 00000000..49a8286d Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cmosdump.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cmostest.mod b/files/board/arpl/p1/grub/i386-pc/cmostest.mod new file mode 100644 index 00000000..23e0997e Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cmostest.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cmp.mod b/files/board/arpl/p1/grub/i386-pc/cmp.mod new file mode 100644 index 00000000..bb61db13 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cmp.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cmp_test.mod b/files/board/arpl/p1/grub/i386-pc/cmp_test.mod new file mode 100644 index 00000000..a271d724 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cmp_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/configfile.mod b/files/board/arpl/p1/grub/i386-pc/configfile.mod new file mode 100644 index 00000000..66ed4505 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/configfile.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cpio.mod b/files/board/arpl/p1/grub/i386-pc/cpio.mod new file mode 100644 index 00000000..8b96f897 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cpio.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cpio_be.mod b/files/board/arpl/p1/grub/i386-pc/cpio_be.mod new file mode 100644 index 00000000..7b7bcb8b Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cpio_be.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cpuid.mod b/files/board/arpl/p1/grub/i386-pc/cpuid.mod new file mode 100644 index 00000000..863bd1a1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cpuid.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/crc64.mod b/files/board/arpl/p1/grub/i386-pc/crc64.mod new file mode 100644 index 00000000..d044baa5 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/crc64.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/crypto.mod b/files/board/arpl/p1/grub/i386-pc/crypto.mod new file mode 100644 index 00000000..054a6740 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/crypto.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cryptodisk.mod b/files/board/arpl/p1/grub/i386-pc/cryptodisk.mod new file mode 100644 index 00000000..85d69439 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cryptodisk.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/cs5536.mod b/files/board/arpl/p1/grub/i386-pc/cs5536.mod new file mode 100644 index 00000000..6a4c437e Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/cs5536.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ctz_test.mod b/files/board/arpl/p1/grub/i386-pc/ctz_test.mod new file mode 100644 index 00000000..bf031325 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ctz_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/date.mod b/files/board/arpl/p1/grub/i386-pc/date.mod new file mode 100644 index 00000000..29a5c248 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/date.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/datehook.mod b/files/board/arpl/p1/grub/i386-pc/datehook.mod new file mode 100644 index 00000000..1e598369 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/datehook.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/datetime.mod b/files/board/arpl/p1/grub/i386-pc/datetime.mod new file mode 100644 index 00000000..940a21e2 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/datetime.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/disk.mod b/files/board/arpl/p1/grub/i386-pc/disk.mod new file mode 100644 index 00000000..d0fb152b Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/disk.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/diskfilter.mod b/files/board/arpl/p1/grub/i386-pc/diskfilter.mod new file mode 100644 index 00000000..bf25b072 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/diskfilter.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/div.mod b/files/board/arpl/p1/grub/i386-pc/div.mod new file mode 100644 index 00000000..38d121f6 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/div.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/div_test.mod b/files/board/arpl/p1/grub/i386-pc/div_test.mod new file mode 100644 index 00000000..3fbaf39d Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/div_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/dm_nv.mod b/files/board/arpl/p1/grub/i386-pc/dm_nv.mod new file mode 100644 index 00000000..5c5e7e50 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/dm_nv.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/drivemap.mod b/files/board/arpl/p1/grub/i386-pc/drivemap.mod new file mode 100644 index 00000000..5b1b230a Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/drivemap.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/echo.mod b/files/board/arpl/p1/grub/i386-pc/echo.mod new file mode 100644 index 00000000..d56f6101 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/echo.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/efiemu.mod b/files/board/arpl/p1/grub/i386-pc/efiemu.mod new file mode 100644 index 00000000..658316d7 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/efiemu.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ehci.mod b/files/board/arpl/p1/grub/i386-pc/ehci.mod new file mode 100644 index 00000000..b4d19c00 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ehci.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/elf.mod b/files/board/arpl/p1/grub/i386-pc/elf.mod new file mode 100644 index 00000000..a08a6a38 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/elf.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/eval.mod b/files/board/arpl/p1/grub/i386-pc/eval.mod new file mode 100644 index 00000000..ce0ff471 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/eval.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/exfat.mod b/files/board/arpl/p1/grub/i386-pc/exfat.mod new file mode 100644 index 00000000..7fe49188 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/exfat.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/exfctest.mod b/files/board/arpl/p1/grub/i386-pc/exfctest.mod new file mode 100644 index 00000000..2bf8a40d Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/exfctest.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ext2.mod b/files/board/arpl/p1/grub/i386-pc/ext2.mod new file mode 100644 index 00000000..b678a43b Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ext2.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/extcmd.mod b/files/board/arpl/p1/grub/i386-pc/extcmd.mod new file mode 100644 index 00000000..cf73946c Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/extcmd.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/f2fs.mod b/files/board/arpl/p1/grub/i386-pc/f2fs.mod new file mode 100644 index 00000000..82786a6f Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/f2fs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/fat.mod b/files/board/arpl/p1/grub/i386-pc/fat.mod new file mode 100644 index 00000000..cb4fb8c8 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/fat.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/file.mod b/files/board/arpl/p1/grub/i386-pc/file.mod new file mode 100644 index 00000000..94cae269 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/file.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/font.mod b/files/board/arpl/p1/grub/i386-pc/font.mod new file mode 100644 index 00000000..61e6b958 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/font.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/freedos.mod b/files/board/arpl/p1/grub/i386-pc/freedos.mod new file mode 100644 index 00000000..ec655794 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/freedos.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/fshelp.mod b/files/board/arpl/p1/grub/i386-pc/fshelp.mod new file mode 100644 index 00000000..fb5b42e5 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/fshelp.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/functional_test.mod b/files/board/arpl/p1/grub/i386-pc/functional_test.mod new file mode 100644 index 00000000..2997819d Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/functional_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_arcfour.mod b/files/board/arpl/p1/grub/i386-pc/gcry_arcfour.mod new file mode 100644 index 00000000..d94b3a0a Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_arcfour.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_blowfish.mod b/files/board/arpl/p1/grub/i386-pc/gcry_blowfish.mod new file mode 100644 index 00000000..b86fdb83 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_blowfish.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_camellia.mod b/files/board/arpl/p1/grub/i386-pc/gcry_camellia.mod new file mode 100644 index 00000000..b512ee82 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_camellia.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_cast5.mod b/files/board/arpl/p1/grub/i386-pc/gcry_cast5.mod new file mode 100644 index 00000000..ad0ce0a1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_cast5.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_crc.mod b/files/board/arpl/p1/grub/i386-pc/gcry_crc.mod new file mode 100644 index 00000000..b6fa8f6d Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_crc.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_des.mod b/files/board/arpl/p1/grub/i386-pc/gcry_des.mod new file mode 100644 index 00000000..9c2e2b02 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_des.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_dsa.mod b/files/board/arpl/p1/grub/i386-pc/gcry_dsa.mod new file mode 100644 index 00000000..010e6bae Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_dsa.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_idea.mod b/files/board/arpl/p1/grub/i386-pc/gcry_idea.mod new file mode 100644 index 00000000..051b4ecc Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_idea.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_md4.mod b/files/board/arpl/p1/grub/i386-pc/gcry_md4.mod new file mode 100644 index 00000000..fd08dc7f Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_md4.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_md5.mod b/files/board/arpl/p1/grub/i386-pc/gcry_md5.mod new file mode 100644 index 00000000..15721e05 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_md5.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_rfc2268.mod b/files/board/arpl/p1/grub/i386-pc/gcry_rfc2268.mod new file mode 100644 index 00000000..6bf1a7c1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_rfc2268.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_rijndael.mod b/files/board/arpl/p1/grub/i386-pc/gcry_rijndael.mod new file mode 100644 index 00000000..282e589b Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_rijndael.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_rmd160.mod b/files/board/arpl/p1/grub/i386-pc/gcry_rmd160.mod new file mode 100644 index 00000000..d0fe35f8 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_rmd160.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_rsa.mod b/files/board/arpl/p1/grub/i386-pc/gcry_rsa.mod new file mode 100644 index 00000000..b637f275 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_rsa.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_seed.mod b/files/board/arpl/p1/grub/i386-pc/gcry_seed.mod new file mode 100644 index 00000000..30c1ad09 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_seed.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_serpent.mod b/files/board/arpl/p1/grub/i386-pc/gcry_serpent.mod new file mode 100644 index 00000000..eaba69a9 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_serpent.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_sha1.mod b/files/board/arpl/p1/grub/i386-pc/gcry_sha1.mod new file mode 100644 index 00000000..aa6eca41 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_sha1.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_sha256.mod b/files/board/arpl/p1/grub/i386-pc/gcry_sha256.mod new file mode 100644 index 00000000..706fccb4 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_sha256.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_sha512.mod b/files/board/arpl/p1/grub/i386-pc/gcry_sha512.mod new file mode 100644 index 00000000..dad0b4ca Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_sha512.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_tiger.mod b/files/board/arpl/p1/grub/i386-pc/gcry_tiger.mod new file mode 100644 index 00000000..e6d77246 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_tiger.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_twofish.mod b/files/board/arpl/p1/grub/i386-pc/gcry_twofish.mod new file mode 100644 index 00000000..36e9f5df Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_twofish.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gcry_whirlpool.mod b/files/board/arpl/p1/grub/i386-pc/gcry_whirlpool.mod new file mode 100644 index 00000000..c3f62d16 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gcry_whirlpool.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gdb.mod b/files/board/arpl/p1/grub/i386-pc/gdb.mod new file mode 100644 index 00000000..2f914025 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gdb.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/geli.mod b/files/board/arpl/p1/grub/i386-pc/geli.mod new file mode 100644 index 00000000..3f1eeb91 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/geli.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gettext.mod b/files/board/arpl/p1/grub/i386-pc/gettext.mod new file mode 100644 index 00000000..03b1a468 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gettext.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gfxmenu.mod b/files/board/arpl/p1/grub/i386-pc/gfxmenu.mod new file mode 100644 index 00000000..d88819a0 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gfxmenu.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gfxterm.mod b/files/board/arpl/p1/grub/i386-pc/gfxterm.mod new file mode 100644 index 00000000..a44eff31 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gfxterm.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gfxterm_background.mod b/files/board/arpl/p1/grub/i386-pc/gfxterm_background.mod new file mode 100644 index 00000000..b78e7de8 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gfxterm_background.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gfxterm_menu.mod b/files/board/arpl/p1/grub/i386-pc/gfxterm_menu.mod new file mode 100644 index 00000000..71818056 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gfxterm_menu.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gptsync.mod b/files/board/arpl/p1/grub/i386-pc/gptsync.mod new file mode 100644 index 00000000..4255280d Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gptsync.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/gzio.mod b/files/board/arpl/p1/grub/i386-pc/gzio.mod new file mode 100644 index 00000000..9f1ce2ca Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/gzio.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/halt.mod b/files/board/arpl/p1/grub/i386-pc/halt.mod new file mode 100644 index 00000000..96027211 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/halt.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/hashsum.mod b/files/board/arpl/p1/grub/i386-pc/hashsum.mod new file mode 100644 index 00000000..a7b2f8e9 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/hashsum.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/hdparm.mod b/files/board/arpl/p1/grub/i386-pc/hdparm.mod new file mode 100644 index 00000000..a0198129 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/hdparm.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/hello.mod b/files/board/arpl/p1/grub/i386-pc/hello.mod new file mode 100644 index 00000000..1ca320ae Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/hello.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/help.mod b/files/board/arpl/p1/grub/i386-pc/help.mod new file mode 100644 index 00000000..d7cd9947 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/help.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/hexdump.mod b/files/board/arpl/p1/grub/i386-pc/hexdump.mod new file mode 100644 index 00000000..1826b028 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/hexdump.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/hfs.mod b/files/board/arpl/p1/grub/i386-pc/hfs.mod new file mode 100644 index 00000000..198f6e9c Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/hfs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/hfsplus.mod b/files/board/arpl/p1/grub/i386-pc/hfsplus.mod new file mode 100644 index 00000000..467f0cb1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/hfsplus.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/hfspluscomp.mod b/files/board/arpl/p1/grub/i386-pc/hfspluscomp.mod new file mode 100644 index 00000000..fe632574 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/hfspluscomp.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/http.mod b/files/board/arpl/p1/grub/i386-pc/http.mod new file mode 100644 index 00000000..626d47b1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/http.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/iorw.mod b/files/board/arpl/p1/grub/i386-pc/iorw.mod new file mode 100644 index 00000000..b025d534 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/iorw.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/iso9660.mod b/files/board/arpl/p1/grub/i386-pc/iso9660.mod new file mode 100644 index 00000000..9124aa6d Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/iso9660.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/jfs.mod b/files/board/arpl/p1/grub/i386-pc/jfs.mod new file mode 100644 index 00000000..778998db Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/jfs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/jpeg.mod b/files/board/arpl/p1/grub/i386-pc/jpeg.mod new file mode 100644 index 00000000..5cdd4e07 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/jpeg.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/keylayouts.mod b/files/board/arpl/p1/grub/i386-pc/keylayouts.mod new file mode 100644 index 00000000..91ef8f32 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/keylayouts.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/keystatus.mod b/files/board/arpl/p1/grub/i386-pc/keystatus.mod new file mode 100644 index 00000000..645926d7 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/keystatus.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ldm.mod b/files/board/arpl/p1/grub/i386-pc/ldm.mod new file mode 100644 index 00000000..b53c8bd5 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ldm.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/legacy_password_test.mod b/files/board/arpl/p1/grub/i386-pc/legacy_password_test.mod new file mode 100644 index 00000000..250b6929 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/legacy_password_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/legacycfg.mod b/files/board/arpl/p1/grub/i386-pc/legacycfg.mod new file mode 100644 index 00000000..f06edca4 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/legacycfg.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/linux.mod b/files/board/arpl/p1/grub/i386-pc/linux.mod new file mode 100644 index 00000000..6895472f Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/linux.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/linux16.mod b/files/board/arpl/p1/grub/i386-pc/linux16.mod new file mode 100644 index 00000000..e3f82c47 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/linux16.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/load.cfg b/files/board/arpl/p1/grub/i386-pc/load.cfg new file mode 100644 index 00000000..0de9e31b --- /dev/null +++ b/files/board/arpl/p1/grub/i386-pc/load.cfg @@ -0,0 +1,2 @@ +search.fs_uuid 391019b3-a8f7-405b-917c-69da59ccbc23 root hd0,gpt2 +set prefix=($root)'/tmp/grub' diff --git a/files/board/arpl/p1/grub/i386-pc/loadenv.mod b/files/board/arpl/p1/grub/i386-pc/loadenv.mod new file mode 100644 index 00000000..082823ea Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/loadenv.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/loopback.mod b/files/board/arpl/p1/grub/i386-pc/loopback.mod new file mode 100644 index 00000000..577293d4 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/loopback.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ls.mod b/files/board/arpl/p1/grub/i386-pc/ls.mod new file mode 100644 index 00000000..1532200f Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ls.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/lsacpi.mod b/files/board/arpl/p1/grub/i386-pc/lsacpi.mod new file mode 100644 index 00000000..a38b4976 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/lsacpi.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/lsapm.mod b/files/board/arpl/p1/grub/i386-pc/lsapm.mod new file mode 100644 index 00000000..f8c94495 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/lsapm.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/lsmmap.mod b/files/board/arpl/p1/grub/i386-pc/lsmmap.mod new file mode 100644 index 00000000..c26fefe1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/lsmmap.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/lspci.mod b/files/board/arpl/p1/grub/i386-pc/lspci.mod new file mode 100644 index 00000000..da70e482 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/lspci.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/luks.mod b/files/board/arpl/p1/grub/i386-pc/luks.mod new file mode 100644 index 00000000..af343832 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/luks.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/lvm.mod b/files/board/arpl/p1/grub/i386-pc/lvm.mod new file mode 100644 index 00000000..6e2f7ede Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/lvm.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/lzopio.mod b/files/board/arpl/p1/grub/i386-pc/lzopio.mod new file mode 100644 index 00000000..c84e3c50 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/lzopio.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/macbless.mod b/files/board/arpl/p1/grub/i386-pc/macbless.mod new file mode 100644 index 00000000..939f2f2b Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/macbless.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/macho.mod b/files/board/arpl/p1/grub/i386-pc/macho.mod new file mode 100644 index 00000000..9136e54a Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/macho.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/mda_text.mod b/files/board/arpl/p1/grub/i386-pc/mda_text.mod new file mode 100644 index 00000000..c5a1db68 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/mda_text.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/mdraid09.mod b/files/board/arpl/p1/grub/i386-pc/mdraid09.mod new file mode 100644 index 00000000..cd6a8398 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/mdraid09.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/mdraid09_be.mod b/files/board/arpl/p1/grub/i386-pc/mdraid09_be.mod new file mode 100644 index 00000000..58cbab26 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/mdraid09_be.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/mdraid1x.mod b/files/board/arpl/p1/grub/i386-pc/mdraid1x.mod new file mode 100644 index 00000000..873b5f89 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/mdraid1x.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/memdisk.mod b/files/board/arpl/p1/grub/i386-pc/memdisk.mod new file mode 100644 index 00000000..441e4aec Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/memdisk.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/memrw.mod b/files/board/arpl/p1/grub/i386-pc/memrw.mod new file mode 100644 index 00000000..3e6e1640 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/memrw.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/minicmd.mod b/files/board/arpl/p1/grub/i386-pc/minicmd.mod new file mode 100644 index 00000000..7e53e0c7 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/minicmd.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/minix.mod b/files/board/arpl/p1/grub/i386-pc/minix.mod new file mode 100644 index 00000000..c3a89898 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/minix.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/minix2.mod b/files/board/arpl/p1/grub/i386-pc/minix2.mod new file mode 100644 index 00000000..7b44af07 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/minix2.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/minix2_be.mod b/files/board/arpl/p1/grub/i386-pc/minix2_be.mod new file mode 100644 index 00000000..3f8659c1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/minix2_be.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/minix3.mod b/files/board/arpl/p1/grub/i386-pc/minix3.mod new file mode 100644 index 00000000..370f40f7 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/minix3.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/minix3_be.mod b/files/board/arpl/p1/grub/i386-pc/minix3_be.mod new file mode 100644 index 00000000..d347d494 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/minix3_be.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/minix_be.mod b/files/board/arpl/p1/grub/i386-pc/minix_be.mod new file mode 100644 index 00000000..744eede1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/minix_be.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/mmap.mod b/files/board/arpl/p1/grub/i386-pc/mmap.mod new file mode 100644 index 00000000..85f63c7e Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/mmap.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/modinfo.sh b/files/board/arpl/p1/grub/i386-pc/modinfo.sh new file mode 100644 index 00000000..84d8bc3a --- /dev/null +++ b/files/board/arpl/p1/grub/i386-pc/modinfo.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# User-controllable options +grub_modinfo_target_cpu=i386 +grub_modinfo_platform=pc +grub_disk_cache_stats=0 +grub_boot_time_stats=0 +grub_have_font_source=0 + +# Autodetected config +grub_have_asm_uscore=0 +grub_bss_start_symbol="__bss_start" +grub_end_symbol="end" + +# Build environment +grub_target_cc='/home/fabio/dirgit/arpl/buildroot-2022.02.2/output/host/bin/x86_64-buildroot-linux-gnu-gcc' +grub_target_cc_version='x86_64-buildroot-linux-gnu-gcc.br_real (Buildroot -gf34edcf) 10.3.0' +grub_target_cflags='-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Os -g0 -D_FORTIFY_SOURCE=1 -Os -Wall -W -Wshadow -Wpointer-arith -Wundef -Wchar-subscripts -Wcomment -Wdeprecated-declarations -Wdisabled-optimization -Wdiv-by-zero -Wfloat-equal -Wformat-extra-args -Wformat-security -Wformat-y2k -Wimplicit -Wimplicit-function-declaration -Wimplicit-int -Wmain -Wmissing-braces -Wmissing-format-attribute -Wmultichar -Wparentheses -Wreturn-type -Wsequence-point -Wshadow -Wsign-compare -Wswitch -Wtrigraphs -Wunknown-pragmas -Wunused -Wunused-function -Wunused-label -Wunused-parameter -Wunused-value -Wunused-variable -Wwrite-strings -Wnested-externs -Wstrict-prototypes -g -Wredundant-decls -Wmissing-prototypes -Wmissing-declarations -Wextra -Wattributes -Wendif-labels -Winit-self -Wint-to-pointer-cast -Winvalid-pch -Wmissing-field-initializers -Wnonnull -Woverflow -Wvla -Wpointer-to-int-cast -Wstrict-aliasing -Wvariadic-macros -Wvolatile-register-var -Wpointer-sign -Wmissing-include-dirs -Wmissing-prototypes -Wmissing-declarations -Wformat=2 -march=i386 -m32 -mrtd -mregparm=3 -falign-jumps=1 -falign-loops=1 -falign-functions=1 -freg-struct-return -mno-mmx -mno-sse -mno-sse2 -mno-sse3 -mno-3dnow -msoft-float -fno-dwarf2-cfi-asm -mno-stack-arg-probe -fno-asynchronous-unwind-tables -fno-unwind-tables -fno-PIE -fno-pie -fno-stack-protector -Wtrampolines' +grub_target_cppflags='-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Os -fno-stack-protector -Wall -W -DGRUB_MACHINE_PCBIOS=1 -DGRUB_MACHINE=I386_PC -m32 -nostdinc -isystem /home/fabio/dirgit/arpl/buildroot-2022.02.2/output/host/lib/gcc/x86_64-buildroot-linux-gnu/10.3.0/include -I$(top_srcdir)/include -I$(top_builddir)/include' +grub_target_ccasflags=' -g -m32 -msoft-float' +grub_target_ldflags=' -Os -m32 -Wl,-melf_i386 -no-pie -Wl,--build-id=none' +grub_cflags='-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Os -g0 -D_FORTIFY_SOURCE=1 -Os' +grub_cppflags='-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Os -fno-stack-protector -D_FILE_OFFSET_BITS=64' +grub_ccasflags='-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Os -g0 -D_FORTIFY_SOURCE=1 -Os' +grub_ldflags='' +grub_target_strip='/home/fabio/dirgit/arpl/buildroot-2022.02.2/output/host/bin/x86_64-buildroot-linux-gnu-strip' +grub_target_nm='/home/fabio/dirgit/arpl/buildroot-2022.02.2/output/host/bin/x86_64-buildroot-linux-gnu-gcc-nm' +grub_target_ranlib='ranlib' +grub_target_objconf='' +grub_target_obj2elf='' +grub_target_img_base_ldopt='-Wl,-Ttext' +grub_target_img_ldflags='@TARGET_IMG_BASE_LDFLAGS@' + +# Version +grub_version="2.04" +grub_package="grub" +grub_package_string="GRUB 2.04" +grub_package_version="2.04" +grub_package_name="GRUB" +grub_package_bugreport="bug-grub@gnu.org" diff --git a/files/board/arpl/p1/grub/i386-pc/morse.mod b/files/board/arpl/p1/grub/i386-pc/morse.mod new file mode 100644 index 00000000..e89353e8 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/morse.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/mpi.mod b/files/board/arpl/p1/grub/i386-pc/mpi.mod new file mode 100644 index 00000000..319fe937 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/mpi.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/msdospart.mod b/files/board/arpl/p1/grub/i386-pc/msdospart.mod new file mode 100644 index 00000000..320e0c06 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/msdospart.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/mul_test.mod b/files/board/arpl/p1/grub/i386-pc/mul_test.mod new file mode 100644 index 00000000..256410cc Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/mul_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/multiboot.mod b/files/board/arpl/p1/grub/i386-pc/multiboot.mod new file mode 100644 index 00000000..6dbfeb3c Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/multiboot.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/multiboot2.mod b/files/board/arpl/p1/grub/i386-pc/multiboot2.mod new file mode 100644 index 00000000..5f4c3a24 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/multiboot2.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/nativedisk.mod b/files/board/arpl/p1/grub/i386-pc/nativedisk.mod new file mode 100644 index 00000000..32ac2a6e Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/nativedisk.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/net.mod b/files/board/arpl/p1/grub/i386-pc/net.mod new file mode 100644 index 00000000..3120f2af Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/net.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/newc.mod b/files/board/arpl/p1/grub/i386-pc/newc.mod new file mode 100644 index 00000000..62a949eb Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/newc.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/nilfs2.mod b/files/board/arpl/p1/grub/i386-pc/nilfs2.mod new file mode 100644 index 00000000..5168bbd8 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/nilfs2.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/normal.mod b/files/board/arpl/p1/grub/i386-pc/normal.mod new file mode 100644 index 00000000..22f5b61e Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/normal.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ntfs.mod b/files/board/arpl/p1/grub/i386-pc/ntfs.mod new file mode 100644 index 00000000..9f5b4301 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ntfs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ntfscomp.mod b/files/board/arpl/p1/grub/i386-pc/ntfscomp.mod new file mode 100644 index 00000000..4cae2da1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ntfscomp.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ntldr.mod b/files/board/arpl/p1/grub/i386-pc/ntldr.mod new file mode 100644 index 00000000..5ef0b0ed Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ntldr.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/odc.mod b/files/board/arpl/p1/grub/i386-pc/odc.mod new file mode 100644 index 00000000..3fba51fb Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/odc.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/offsetio.mod b/files/board/arpl/p1/grub/i386-pc/offsetio.mod new file mode 100644 index 00000000..57bc300a Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/offsetio.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ohci.mod b/files/board/arpl/p1/grub/i386-pc/ohci.mod new file mode 100644 index 00000000..3153c941 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ohci.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/part_acorn.mod b/files/board/arpl/p1/grub/i386-pc/part_acorn.mod new file mode 100644 index 00000000..5d1075cc Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/part_acorn.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/part_amiga.mod b/files/board/arpl/p1/grub/i386-pc/part_amiga.mod new file mode 100644 index 00000000..f372ab82 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/part_amiga.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/part_apple.mod b/files/board/arpl/p1/grub/i386-pc/part_apple.mod new file mode 100644 index 00000000..a4d3f240 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/part_apple.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/part_bsd.mod b/files/board/arpl/p1/grub/i386-pc/part_bsd.mod new file mode 100644 index 00000000..7a4f0e83 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/part_bsd.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/part_dfly.mod b/files/board/arpl/p1/grub/i386-pc/part_dfly.mod new file mode 100644 index 00000000..a32c0011 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/part_dfly.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/part_dvh.mod b/files/board/arpl/p1/grub/i386-pc/part_dvh.mod new file mode 100644 index 00000000..de75cd0e Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/part_dvh.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/part_gpt.mod b/files/board/arpl/p1/grub/i386-pc/part_gpt.mod new file mode 100644 index 00000000..612b1149 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/part_gpt.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/part_msdos.mod b/files/board/arpl/p1/grub/i386-pc/part_msdos.mod new file mode 100644 index 00000000..f72a3a96 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/part_msdos.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/part_plan.mod b/files/board/arpl/p1/grub/i386-pc/part_plan.mod new file mode 100644 index 00000000..f799490b Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/part_plan.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/part_sun.mod b/files/board/arpl/p1/grub/i386-pc/part_sun.mod new file mode 100644 index 00000000..2ec2976c Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/part_sun.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/part_sunpc.mod b/files/board/arpl/p1/grub/i386-pc/part_sunpc.mod new file mode 100644 index 00000000..27e3050a Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/part_sunpc.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/parttool.mod b/files/board/arpl/p1/grub/i386-pc/parttool.mod new file mode 100644 index 00000000..827416f4 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/parttool.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/password.mod b/files/board/arpl/p1/grub/i386-pc/password.mod new file mode 100644 index 00000000..6d68a0a8 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/password.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/password_pbkdf2.mod b/files/board/arpl/p1/grub/i386-pc/password_pbkdf2.mod new file mode 100644 index 00000000..668556dc Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/password_pbkdf2.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/pata.mod b/files/board/arpl/p1/grub/i386-pc/pata.mod new file mode 100644 index 00000000..5b3e1a07 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/pata.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/pbkdf2.mod b/files/board/arpl/p1/grub/i386-pc/pbkdf2.mod new file mode 100644 index 00000000..a6335aed Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/pbkdf2.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/pbkdf2_test.mod b/files/board/arpl/p1/grub/i386-pc/pbkdf2_test.mod new file mode 100644 index 00000000..87723f81 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/pbkdf2_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/pci.mod b/files/board/arpl/p1/grub/i386-pc/pci.mod new file mode 100644 index 00000000..ddae0e6a Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/pci.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/pcidump.mod b/files/board/arpl/p1/grub/i386-pc/pcidump.mod new file mode 100644 index 00000000..58c40452 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/pcidump.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/pgp.mod b/files/board/arpl/p1/grub/i386-pc/pgp.mod new file mode 100644 index 00000000..6030847d Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/pgp.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/plan9.mod b/files/board/arpl/p1/grub/i386-pc/plan9.mod new file mode 100644 index 00000000..a77a9ac2 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/plan9.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/play.mod b/files/board/arpl/p1/grub/i386-pc/play.mod new file mode 100644 index 00000000..ad11dd16 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/play.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/png.mod b/files/board/arpl/p1/grub/i386-pc/png.mod new file mode 100644 index 00000000..207e4c66 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/png.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/priority_queue.mod b/files/board/arpl/p1/grub/i386-pc/priority_queue.mod new file mode 100644 index 00000000..a59fbf1e Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/priority_queue.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/probe.mod b/files/board/arpl/p1/grub/i386-pc/probe.mod new file mode 100644 index 00000000..c42745b8 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/probe.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/procfs.mod b/files/board/arpl/p1/grub/i386-pc/procfs.mod new file mode 100644 index 00000000..be1668eb Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/procfs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/progress.mod b/files/board/arpl/p1/grub/i386-pc/progress.mod new file mode 100644 index 00000000..87ff026f Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/progress.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/pxe.mod b/files/board/arpl/p1/grub/i386-pc/pxe.mod new file mode 100644 index 00000000..8161a1ea Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/pxe.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/pxechain.mod b/files/board/arpl/p1/grub/i386-pc/pxechain.mod new file mode 100644 index 00000000..84608dd0 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/pxechain.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/raid5rec.mod b/files/board/arpl/p1/grub/i386-pc/raid5rec.mod new file mode 100644 index 00000000..4cf7149c Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/raid5rec.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/raid6rec.mod b/files/board/arpl/p1/grub/i386-pc/raid6rec.mod new file mode 100644 index 00000000..607b8638 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/raid6rec.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/random.mod b/files/board/arpl/p1/grub/i386-pc/random.mod new file mode 100644 index 00000000..82226e1f Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/random.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/rdmsr.mod b/files/board/arpl/p1/grub/i386-pc/rdmsr.mod new file mode 100644 index 00000000..6150a030 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/rdmsr.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/read.mod b/files/board/arpl/p1/grub/i386-pc/read.mod new file mode 100644 index 00000000..cf1436c3 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/read.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/reboot.mod b/files/board/arpl/p1/grub/i386-pc/reboot.mod new file mode 100644 index 00000000..62f1e654 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/reboot.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/regexp.mod b/files/board/arpl/p1/grub/i386-pc/regexp.mod new file mode 100644 index 00000000..afd34ede Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/regexp.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/reiserfs.mod b/files/board/arpl/p1/grub/i386-pc/reiserfs.mod new file mode 100644 index 00000000..01ea6d1d Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/reiserfs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/relocator.mod b/files/board/arpl/p1/grub/i386-pc/relocator.mod new file mode 100644 index 00000000..322a5bd2 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/relocator.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/romfs.mod b/files/board/arpl/p1/grub/i386-pc/romfs.mod new file mode 100644 index 00000000..3d143292 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/romfs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/scsi.mod b/files/board/arpl/p1/grub/i386-pc/scsi.mod new file mode 100644 index 00000000..4be0a8dc Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/scsi.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/search.mod b/files/board/arpl/p1/grub/i386-pc/search.mod new file mode 100644 index 00000000..bfa83200 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/search.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/search_fs_file.mod b/files/board/arpl/p1/grub/i386-pc/search_fs_file.mod new file mode 100644 index 00000000..2069c9fc Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/search_fs_file.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/search_fs_uuid.mod b/files/board/arpl/p1/grub/i386-pc/search_fs_uuid.mod new file mode 100644 index 00000000..4a73ae5c Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/search_fs_uuid.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/search_label.mod b/files/board/arpl/p1/grub/i386-pc/search_label.mod new file mode 100644 index 00000000..5f67a792 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/search_label.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/sendkey.mod b/files/board/arpl/p1/grub/i386-pc/sendkey.mod new file mode 100644 index 00000000..b4eff04d Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/sendkey.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/serial.mod b/files/board/arpl/p1/grub/i386-pc/serial.mod new file mode 100644 index 00000000..f33df995 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/serial.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/setjmp.mod b/files/board/arpl/p1/grub/i386-pc/setjmp.mod new file mode 100644 index 00000000..2eb2a431 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/setjmp.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/setjmp_test.mod b/files/board/arpl/p1/grub/i386-pc/setjmp_test.mod new file mode 100644 index 00000000..1711d314 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/setjmp_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/setpci.mod b/files/board/arpl/p1/grub/i386-pc/setpci.mod new file mode 100644 index 00000000..917561da Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/setpci.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/sfs.mod b/files/board/arpl/p1/grub/i386-pc/sfs.mod new file mode 100644 index 00000000..10447521 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/sfs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/shift_test.mod b/files/board/arpl/p1/grub/i386-pc/shift_test.mod new file mode 100644 index 00000000..1695a187 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/shift_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/signature_test.mod b/files/board/arpl/p1/grub/i386-pc/signature_test.mod new file mode 100644 index 00000000..19894a97 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/signature_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/sleep.mod b/files/board/arpl/p1/grub/i386-pc/sleep.mod new file mode 100644 index 00000000..31612225 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/sleep.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/sleep_test.mod b/files/board/arpl/p1/grub/i386-pc/sleep_test.mod new file mode 100644 index 00000000..2c537f06 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/sleep_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/spkmodem.mod b/files/board/arpl/p1/grub/i386-pc/spkmodem.mod new file mode 100644 index 00000000..a375f383 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/spkmodem.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/squash4.mod b/files/board/arpl/p1/grub/i386-pc/squash4.mod new file mode 100644 index 00000000..4c3d8611 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/squash4.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/strtoull_test.mod b/files/board/arpl/p1/grub/i386-pc/strtoull_test.mod new file mode 100644 index 00000000..00e320c1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/strtoull_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/syslinuxcfg.mod b/files/board/arpl/p1/grub/i386-pc/syslinuxcfg.mod new file mode 100644 index 00000000..e2f0b2d5 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/syslinuxcfg.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/tar.mod b/files/board/arpl/p1/grub/i386-pc/tar.mod new file mode 100644 index 00000000..c94a51e9 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/tar.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/terminal.mod b/files/board/arpl/p1/grub/i386-pc/terminal.mod new file mode 100644 index 00000000..4fc213ea Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/terminal.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/terminfo.mod b/files/board/arpl/p1/grub/i386-pc/terminfo.mod new file mode 100644 index 00000000..5e11f8c9 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/terminfo.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/test.mod b/files/board/arpl/p1/grub/i386-pc/test.mod new file mode 100644 index 00000000..52947185 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/test_blockarg.mod b/files/board/arpl/p1/grub/i386-pc/test_blockarg.mod new file mode 100644 index 00000000..e8d71e4c Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/test_blockarg.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/testload.mod b/files/board/arpl/p1/grub/i386-pc/testload.mod new file mode 100644 index 00000000..bfeb1803 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/testload.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/testspeed.mod b/files/board/arpl/p1/grub/i386-pc/testspeed.mod new file mode 100644 index 00000000..87096d09 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/testspeed.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/tftp.mod b/files/board/arpl/p1/grub/i386-pc/tftp.mod new file mode 100644 index 00000000..ab642975 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/tftp.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/tga.mod b/files/board/arpl/p1/grub/i386-pc/tga.mod new file mode 100644 index 00000000..f0445181 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/tga.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/time.mod b/files/board/arpl/p1/grub/i386-pc/time.mod new file mode 100644 index 00000000..14558ffc Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/time.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/tr.mod b/files/board/arpl/p1/grub/i386-pc/tr.mod new file mode 100644 index 00000000..afe9cab0 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/tr.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/trig.mod b/files/board/arpl/p1/grub/i386-pc/trig.mod new file mode 100644 index 00000000..7c9ffd58 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/trig.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/true.mod b/files/board/arpl/p1/grub/i386-pc/true.mod new file mode 100644 index 00000000..de1771a1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/true.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/truecrypt.mod b/files/board/arpl/p1/grub/i386-pc/truecrypt.mod new file mode 100644 index 00000000..73d1ee83 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/truecrypt.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/udf.mod b/files/board/arpl/p1/grub/i386-pc/udf.mod new file mode 100644 index 00000000..f65913b1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/udf.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ufs1.mod b/files/board/arpl/p1/grub/i386-pc/ufs1.mod new file mode 100644 index 00000000..f085d73b Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ufs1.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ufs1_be.mod b/files/board/arpl/p1/grub/i386-pc/ufs1_be.mod new file mode 100644 index 00000000..894e755e Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ufs1_be.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/ufs2.mod b/files/board/arpl/p1/grub/i386-pc/ufs2.mod new file mode 100644 index 00000000..faaa687d Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/ufs2.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/uhci.mod b/files/board/arpl/p1/grub/i386-pc/uhci.mod new file mode 100644 index 00000000..f527cd1c Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/uhci.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/usb.mod b/files/board/arpl/p1/grub/i386-pc/usb.mod new file mode 100644 index 00000000..79a23a1c Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/usb.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/usb_keyboard.mod b/files/board/arpl/p1/grub/i386-pc/usb_keyboard.mod new file mode 100644 index 00000000..8ed3c8e2 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/usb_keyboard.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/usbms.mod b/files/board/arpl/p1/grub/i386-pc/usbms.mod new file mode 100644 index 00000000..f4239435 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/usbms.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/usbserial_common.mod b/files/board/arpl/p1/grub/i386-pc/usbserial_common.mod new file mode 100644 index 00000000..3fd65ce4 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/usbserial_common.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/usbserial_ftdi.mod b/files/board/arpl/p1/grub/i386-pc/usbserial_ftdi.mod new file mode 100644 index 00000000..c29767c1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/usbserial_ftdi.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/usbserial_pl2303.mod b/files/board/arpl/p1/grub/i386-pc/usbserial_pl2303.mod new file mode 100644 index 00000000..01cc89e4 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/usbserial_pl2303.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/usbserial_usbdebug.mod b/files/board/arpl/p1/grub/i386-pc/usbserial_usbdebug.mod new file mode 100644 index 00000000..854a9cb0 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/usbserial_usbdebug.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/usbtest.mod b/files/board/arpl/p1/grub/i386-pc/usbtest.mod new file mode 100644 index 00000000..f212901a Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/usbtest.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/vbe.mod b/files/board/arpl/p1/grub/i386-pc/vbe.mod new file mode 100644 index 00000000..055aea09 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/vbe.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/vga.mod b/files/board/arpl/p1/grub/i386-pc/vga.mod new file mode 100644 index 00000000..d5153c95 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/vga.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/vga_text.mod b/files/board/arpl/p1/grub/i386-pc/vga_text.mod new file mode 100644 index 00000000..a60d59b1 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/vga_text.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/video.mod b/files/board/arpl/p1/grub/i386-pc/video.mod new file mode 100644 index 00000000..68fa8821 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/video.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/video_bochs.mod b/files/board/arpl/p1/grub/i386-pc/video_bochs.mod new file mode 100644 index 00000000..0d9e5d05 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/video_bochs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/video_cirrus.mod b/files/board/arpl/p1/grub/i386-pc/video_cirrus.mod new file mode 100644 index 00000000..4f1024bc Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/video_cirrus.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/video_colors.mod b/files/board/arpl/p1/grub/i386-pc/video_colors.mod new file mode 100644 index 00000000..01c0cdbe Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/video_colors.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/video_fb.mod b/files/board/arpl/p1/grub/i386-pc/video_fb.mod new file mode 100644 index 00000000..aadd30c8 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/video_fb.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/videoinfo.mod b/files/board/arpl/p1/grub/i386-pc/videoinfo.mod new file mode 100644 index 00000000..781417bd Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/videoinfo.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/videotest.mod b/files/board/arpl/p1/grub/i386-pc/videotest.mod new file mode 100644 index 00000000..52990445 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/videotest.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/videotest_checksum.mod b/files/board/arpl/p1/grub/i386-pc/videotest_checksum.mod new file mode 100644 index 00000000..3c2baef3 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/videotest_checksum.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/wrmsr.mod b/files/board/arpl/p1/grub/i386-pc/wrmsr.mod new file mode 100644 index 00000000..f6b046eb Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/wrmsr.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/xfs.mod b/files/board/arpl/p1/grub/i386-pc/xfs.mod new file mode 100644 index 00000000..260869f0 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/xfs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/xnu.mod b/files/board/arpl/p1/grub/i386-pc/xnu.mod new file mode 100644 index 00000000..b541e908 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/xnu.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/xnu_uuid.mod b/files/board/arpl/p1/grub/i386-pc/xnu_uuid.mod new file mode 100644 index 00000000..64372325 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/xnu_uuid.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/xnu_uuid_test.mod b/files/board/arpl/p1/grub/i386-pc/xnu_uuid_test.mod new file mode 100644 index 00000000..bdb1bfb7 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/xnu_uuid_test.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/xzio.mod b/files/board/arpl/p1/grub/i386-pc/xzio.mod new file mode 100644 index 00000000..f5828eef Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/xzio.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/zfs.mod b/files/board/arpl/p1/grub/i386-pc/zfs.mod new file mode 100644 index 00000000..e65f8710 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/zfs.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/zfscrypt.mod b/files/board/arpl/p1/grub/i386-pc/zfscrypt.mod new file mode 100644 index 00000000..94b1e8c8 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/zfscrypt.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/zfsinfo.mod b/files/board/arpl/p1/grub/i386-pc/zfsinfo.mod new file mode 100644 index 00000000..d24228b2 Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/zfsinfo.mod differ diff --git a/files/board/arpl/p1/grub/i386-pc/zstd.mod b/files/board/arpl/p1/grub/i386-pc/zstd.mod new file mode 100644 index 00000000..16a7e84b Binary files /dev/null and b/files/board/arpl/p1/grub/i386-pc/zstd.mod differ diff --git a/files/board/arpl/p1/grub/locale/ast.mo b/files/board/arpl/p1/grub/locale/ast.mo new file mode 100644 index 00000000..95aedbfb Binary files /dev/null and b/files/board/arpl/p1/grub/locale/ast.mo differ diff --git a/files/board/arpl/p1/grub/locale/ca.mo b/files/board/arpl/p1/grub/locale/ca.mo new file mode 100644 index 00000000..6994a493 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/ca.mo differ diff --git a/files/board/arpl/p1/grub/locale/da.mo b/files/board/arpl/p1/grub/locale/da.mo new file mode 100644 index 00000000..85d8d75f Binary files /dev/null and b/files/board/arpl/p1/grub/locale/da.mo differ diff --git a/files/board/arpl/p1/grub/locale/de.mo b/files/board/arpl/p1/grub/locale/de.mo new file mode 100644 index 00000000..98839da9 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/de.mo differ diff --git a/files/board/arpl/p1/grub/locale/de@hebrew.mo b/files/board/arpl/p1/grub/locale/de@hebrew.mo new file mode 100644 index 00000000..72cd4d17 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/de@hebrew.mo differ diff --git a/files/board/arpl/p1/grub/locale/de_CH.mo b/files/board/arpl/p1/grub/locale/de_CH.mo new file mode 100644 index 00000000..2aa731d9 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/de_CH.mo differ diff --git a/files/board/arpl/p1/grub/locale/en@arabic.mo b/files/board/arpl/p1/grub/locale/en@arabic.mo new file mode 100644 index 00000000..2b70806e Binary files /dev/null and b/files/board/arpl/p1/grub/locale/en@arabic.mo differ diff --git a/files/board/arpl/p1/grub/locale/en@cyrillic.mo b/files/board/arpl/p1/grub/locale/en@cyrillic.mo new file mode 100644 index 00000000..5ff30d2e Binary files /dev/null and b/files/board/arpl/p1/grub/locale/en@cyrillic.mo differ diff --git a/files/board/arpl/p1/grub/locale/en@greek.mo b/files/board/arpl/p1/grub/locale/en@greek.mo new file mode 100644 index 00000000..43870ac7 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/en@greek.mo differ diff --git a/files/board/arpl/p1/grub/locale/en@hebrew.mo b/files/board/arpl/p1/grub/locale/en@hebrew.mo new file mode 100644 index 00000000..9d7c384a Binary files /dev/null and b/files/board/arpl/p1/grub/locale/en@hebrew.mo differ diff --git a/files/board/arpl/p1/grub/locale/en@piglatin.mo b/files/board/arpl/p1/grub/locale/en@piglatin.mo new file mode 100644 index 00000000..ec681380 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/en@piglatin.mo differ diff --git a/files/board/arpl/p1/grub/locale/en@quot.mo b/files/board/arpl/p1/grub/locale/en@quot.mo new file mode 100644 index 00000000..572572d4 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/en@quot.mo differ diff --git a/files/board/arpl/p1/grub/locale/eo.mo b/files/board/arpl/p1/grub/locale/eo.mo new file mode 100644 index 00000000..39abc31c Binary files /dev/null and b/files/board/arpl/p1/grub/locale/eo.mo differ diff --git a/files/board/arpl/p1/grub/locale/es.mo b/files/board/arpl/p1/grub/locale/es.mo new file mode 100644 index 00000000..e9c77510 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/es.mo differ diff --git a/files/board/arpl/p1/grub/locale/fi.mo b/files/board/arpl/p1/grub/locale/fi.mo new file mode 100644 index 00000000..126d6070 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/fi.mo differ diff --git a/files/board/arpl/p1/grub/locale/fr.mo b/files/board/arpl/p1/grub/locale/fr.mo new file mode 100644 index 00000000..116410b4 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/fr.mo differ diff --git a/files/board/arpl/p1/grub/locale/gl.mo b/files/board/arpl/p1/grub/locale/gl.mo new file mode 100644 index 00000000..219ef4c0 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/gl.mo differ diff --git a/files/board/arpl/p1/grub/locale/hr.mo b/files/board/arpl/p1/grub/locale/hr.mo new file mode 100644 index 00000000..fda50441 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/hr.mo differ diff --git a/files/board/arpl/p1/grub/locale/hu.mo b/files/board/arpl/p1/grub/locale/hu.mo new file mode 100644 index 00000000..5813ae24 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/hu.mo differ diff --git a/files/board/arpl/p1/grub/locale/id.mo b/files/board/arpl/p1/grub/locale/id.mo new file mode 100644 index 00000000..1a021c8a Binary files /dev/null and b/files/board/arpl/p1/grub/locale/id.mo differ diff --git a/files/board/arpl/p1/grub/locale/it.mo b/files/board/arpl/p1/grub/locale/it.mo new file mode 100644 index 00000000..9228f68a Binary files /dev/null and b/files/board/arpl/p1/grub/locale/it.mo differ diff --git a/files/board/arpl/p1/grub/locale/ja.mo b/files/board/arpl/p1/grub/locale/ja.mo new file mode 100644 index 00000000..0661a0a9 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/ja.mo differ diff --git a/files/board/arpl/p1/grub/locale/ko.mo b/files/board/arpl/p1/grub/locale/ko.mo new file mode 100644 index 00000000..85e13cb8 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/ko.mo differ diff --git a/files/board/arpl/p1/grub/locale/lg.mo b/files/board/arpl/p1/grub/locale/lg.mo new file mode 100644 index 00000000..1f0ff338 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/lg.mo differ diff --git a/files/board/arpl/p1/grub/locale/lt.mo b/files/board/arpl/p1/grub/locale/lt.mo new file mode 100644 index 00000000..4e95019b Binary files /dev/null and b/files/board/arpl/p1/grub/locale/lt.mo differ diff --git a/files/board/arpl/p1/grub/locale/nb.mo b/files/board/arpl/p1/grub/locale/nb.mo new file mode 100644 index 00000000..5fac596d Binary files /dev/null and b/files/board/arpl/p1/grub/locale/nb.mo differ diff --git a/files/board/arpl/p1/grub/locale/nl.mo b/files/board/arpl/p1/grub/locale/nl.mo new file mode 100644 index 00000000..110e7845 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/nl.mo differ diff --git a/files/board/arpl/p1/grub/locale/pa.mo b/files/board/arpl/p1/grub/locale/pa.mo new file mode 100644 index 00000000..3c13b0b8 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/pa.mo differ diff --git a/files/board/arpl/p1/grub/locale/pl.mo b/files/board/arpl/p1/grub/locale/pl.mo new file mode 100644 index 00000000..4e20ce98 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/pl.mo differ diff --git a/files/board/arpl/p1/grub/locale/pt.mo b/files/board/arpl/p1/grub/locale/pt.mo new file mode 100644 index 00000000..95a3eddd Binary files /dev/null and b/files/board/arpl/p1/grub/locale/pt.mo differ diff --git a/files/board/arpl/p1/grub/locale/pt_BR.mo b/files/board/arpl/p1/grub/locale/pt_BR.mo new file mode 100644 index 00000000..cbee334b Binary files /dev/null and b/files/board/arpl/p1/grub/locale/pt_BR.mo differ diff --git a/files/board/arpl/p1/grub/locale/ro.mo b/files/board/arpl/p1/grub/locale/ro.mo new file mode 100644 index 00000000..b939fc31 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/ro.mo differ diff --git a/files/board/arpl/p1/grub/locale/ru.mo b/files/board/arpl/p1/grub/locale/ru.mo new file mode 100644 index 00000000..086730e9 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/ru.mo differ diff --git a/files/board/arpl/p1/grub/locale/sl.mo b/files/board/arpl/p1/grub/locale/sl.mo new file mode 100644 index 00000000..30c1c527 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/sl.mo differ diff --git a/files/board/arpl/p1/grub/locale/sr.mo b/files/board/arpl/p1/grub/locale/sr.mo new file mode 100644 index 00000000..96e562d8 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/sr.mo differ diff --git a/files/board/arpl/p1/grub/locale/sv.mo b/files/board/arpl/p1/grub/locale/sv.mo new file mode 100644 index 00000000..58601dca Binary files /dev/null and b/files/board/arpl/p1/grub/locale/sv.mo differ diff --git a/files/board/arpl/p1/grub/locale/tr.mo b/files/board/arpl/p1/grub/locale/tr.mo new file mode 100644 index 00000000..7d7b98d3 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/tr.mo differ diff --git a/files/board/arpl/p1/grub/locale/uk.mo b/files/board/arpl/p1/grub/locale/uk.mo new file mode 100644 index 00000000..99a4ce27 Binary files /dev/null and b/files/board/arpl/p1/grub/locale/uk.mo differ diff --git a/files/board/arpl/p1/grub/locale/vi.mo b/files/board/arpl/p1/grub/locale/vi.mo new file mode 100644 index 00000000..be5b2cbb Binary files /dev/null and b/files/board/arpl/p1/grub/locale/vi.mo differ diff --git a/files/board/arpl/p1/grub/locale/zh_CN.mo b/files/board/arpl/p1/grub/locale/zh_CN.mo new file mode 100644 index 00000000..d687dd6b Binary files /dev/null and b/files/board/arpl/p1/grub/locale/zh_CN.mo differ diff --git a/files/board/arpl/p1/grub/locale/zh_TW.mo b/files/board/arpl/p1/grub/locale/zh_TW.mo new file mode 100644 index 00000000..fc15ebda Binary files /dev/null and b/files/board/arpl/p1/grub/locale/zh_TW.mo differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/acpi.mod b/files/board/arpl/p1/grub/x86_64-efi/acpi.mod new file mode 100644 index 00000000..bb3a3356 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/acpi.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/adler32.mod b/files/board/arpl/p1/grub/x86_64-efi/adler32.mod new file mode 100644 index 00000000..813900dd Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/adler32.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/affs.mod b/files/board/arpl/p1/grub/x86_64-efi/affs.mod new file mode 100644 index 00000000..7845b31a Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/affs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/afs.mod b/files/board/arpl/p1/grub/x86_64-efi/afs.mod new file mode 100644 index 00000000..87e6f0f6 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/afs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/ahci.mod b/files/board/arpl/p1/grub/x86_64-efi/ahci.mod new file mode 100644 index 00000000..ddf9534b Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/ahci.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/all_video.mod b/files/board/arpl/p1/grub/x86_64-efi/all_video.mod new file mode 100644 index 00000000..0839b743 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/all_video.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/aout.mod b/files/board/arpl/p1/grub/x86_64-efi/aout.mod new file mode 100644 index 00000000..19738e62 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/aout.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/appleldr.mod b/files/board/arpl/p1/grub/x86_64-efi/appleldr.mod new file mode 100644 index 00000000..49e03620 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/appleldr.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/archelp.mod b/files/board/arpl/p1/grub/x86_64-efi/archelp.mod new file mode 100644 index 00000000..1ff489bd Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/archelp.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/at_keyboard.mod b/files/board/arpl/p1/grub/x86_64-efi/at_keyboard.mod new file mode 100644 index 00000000..29dc4469 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/at_keyboard.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/ata.mod b/files/board/arpl/p1/grub/x86_64-efi/ata.mod new file mode 100644 index 00000000..1d994f82 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/ata.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/backtrace.mod b/files/board/arpl/p1/grub/x86_64-efi/backtrace.mod new file mode 100644 index 00000000..53caa6f1 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/backtrace.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/bfs.mod b/files/board/arpl/p1/grub/x86_64-efi/bfs.mod new file mode 100644 index 00000000..05dd6d99 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/bfs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/bitmap.mod b/files/board/arpl/p1/grub/x86_64-efi/bitmap.mod new file mode 100644 index 00000000..9d1b98dd Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/bitmap.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/bitmap_scale.mod b/files/board/arpl/p1/grub/x86_64-efi/bitmap_scale.mod new file mode 100644 index 00000000..bb61844c Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/bitmap_scale.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/blocklist.mod b/files/board/arpl/p1/grub/x86_64-efi/blocklist.mod new file mode 100644 index 00000000..2731eabc Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/blocklist.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/boot.mod b/files/board/arpl/p1/grub/x86_64-efi/boot.mod new file mode 100644 index 00000000..346d1c88 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/boot.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/bsd.mod b/files/board/arpl/p1/grub/x86_64-efi/bsd.mod new file mode 100644 index 00000000..86cb350c Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/bsd.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/bswap_test.mod b/files/board/arpl/p1/grub/x86_64-efi/bswap_test.mod new file mode 100644 index 00000000..be7c1c71 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/bswap_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/btrfs.mod b/files/board/arpl/p1/grub/x86_64-efi/btrfs.mod new file mode 100644 index 00000000..e5d9572a Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/btrfs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/bufio.mod b/files/board/arpl/p1/grub/x86_64-efi/bufio.mod new file mode 100644 index 00000000..5ba3f92e Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/bufio.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cat.mod b/files/board/arpl/p1/grub/x86_64-efi/cat.mod new file mode 100644 index 00000000..23804b2c Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cat.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cbfs.mod b/files/board/arpl/p1/grub/x86_64-efi/cbfs.mod new file mode 100644 index 00000000..0fcf4092 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cbfs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cbls.mod b/files/board/arpl/p1/grub/x86_64-efi/cbls.mod new file mode 100644 index 00000000..4c078f67 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cbls.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cbmemc.mod b/files/board/arpl/p1/grub/x86_64-efi/cbmemc.mod new file mode 100644 index 00000000..4e60cdc7 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cbmemc.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cbtable.mod b/files/board/arpl/p1/grub/x86_64-efi/cbtable.mod new file mode 100644 index 00000000..2396dfd5 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cbtable.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cbtime.mod b/files/board/arpl/p1/grub/x86_64-efi/cbtime.mod new file mode 100644 index 00000000..955be0ef Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cbtime.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/chain.mod b/files/board/arpl/p1/grub/x86_64-efi/chain.mod new file mode 100644 index 00000000..9530095a Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/chain.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cmdline_cat_test.mod b/files/board/arpl/p1/grub/x86_64-efi/cmdline_cat_test.mod new file mode 100644 index 00000000..4d6dbb57 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cmdline_cat_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cmp.mod b/files/board/arpl/p1/grub/x86_64-efi/cmp.mod new file mode 100644 index 00000000..d1513b39 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cmp.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cmp_test.mod b/files/board/arpl/p1/grub/x86_64-efi/cmp_test.mod new file mode 100644 index 00000000..1615da0e Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cmp_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/command.lst b/files/board/arpl/p1/grub/x86_64-efi/command.lst new file mode 100644 index 00000000..7239dd69 --- /dev/null +++ b/files/board/arpl/p1/grub/x86_64-efi/command.lst @@ -0,0 +1,190 @@ +*acpi: acpi +*all_functional_test: functional_test +*background_image: gfxterm_background +*cat: cat +*cpuid: cpuid +*crc: hashsum +*cryptomount: cryptodisk +*echo: echo +*extract_syslinux_entries_configfile: syslinuxcfg +*extract_syslinux_entries_source: syslinuxcfg +*file: file +*functional_test: functional_test +*gettext: gettext +*hashsum: hashsum +*hdparm: hdparm +*hello: hello +*help: help +*hexdump: hexdump +*inb: iorw +*inl: iorw +*inw: iorw +*keystatus: keystatus +*kfreebsd: bsd +*knetbsd: bsd +*kopenbsd: bsd +*list_env: loadenv +*load_env: loadenv +*loopback: loopback +*ls: ls +*lsacpi: lsacpi +*lspci: lspci +*md5sum: hashsum +*menuentry: normal +*pcidump: pcidump +*probe: probe +*rdmsr: rdmsr +*read_byte: memrw +*read_dword: memrw +*read_word: memrw +*regexp: regexp +*save_env: loadenv +*search: search +*serial: serial +*setpci: setpci +*sha1sum: hashsum +*sha256sum: hashsum +*sha512sum: hashsum +*sleep: sleep +*submenu: normal +*syslinux_configfile: syslinuxcfg +*syslinux_source: syslinuxcfg +*terminfo: terminfo +*test_blockarg: test_blockarg +*testspeed: testspeed +*tr: tr +*trust: pgp +*verify_detached: pgp +*xnu_splash: xnu +*zfskey: zfscrypt +.: configfile +[: test +appleloader: appleldr +authenticate: normal +background_color: gfxterm_background +backtrace: backtrace +badram: mmap +blocklist: blocklist +boot: boot +break: normal +cat: minicmd +cbmemc: cbmemc +chainloader: chain +clear: normal +cmp: cmp +configfile: configfile +continue: normal +coreboot_boottime: cbtime +cutmem: mmap +date: date +distrust: pgp +dump: minicmd +eval: eval +exit: minicmd +export: normal +extract_entries_configfile: configfile +extract_entries_source: configfile +extract_legacy_entries_configfile: legacycfg +extract_legacy_entries_source: legacycfg +fakebios: loadbios +false: true +fix_video: fixvideo +fwsetup: efifwsetup +gptsync: gptsync +halt: halt +help: minicmd +hexdump_random: random +initrd16: linux16 +initrd: linux +keymap: keylayouts +kfreebsd_loadenv: bsd +kfreebsd_module: bsd +kfreebsd_module_elf: bsd +knetbsd_module: bsd +knetbsd_module_elf: bsd +kopenbsd_ramdisk: bsd +legacy_check_password: legacycfg +legacy_configfile: legacycfg +legacy_initrd: legacycfg +legacy_initrd_nounzip: legacycfg +legacy_kernel: legacycfg +legacy_password: legacycfg +legacy_source: legacycfg +linux16: linux16 +linux: linux +list_trusted: pgp +loadbios: loadbios +loadfont: font +lscoreboot: cbls +lsefi: lsefi +lsefimmap: lsefimmap +lsefisystab: lsefisystab +lsfonts: font +lsmmap: lsmmap +lsmod: minicmd +lssal: lssal +macppcbless: macbless +mactelbless: macbless +module2: multiboot2 +module: multiboot +multiboot2: multiboot2 +multiboot: multiboot +nativedisk: nativedisk +net_add_addr: net +net_add_dns: net +net_add_route: net +net_bootp: net +net_del_addr: net +net_del_dns: net +net_del_route: net +net_dhcp: net +net_get_dhcp_option: net +net_ipv6_autoconf: net +net_ls_addr: net +net_ls_cards: net +net_ls_dns: net +net_ls_routes: net +net_nslookup: net +normal: normal +normal_exit: normal +outb: iorw +outl: iorw +outw: iorw +parttool: parttool +password: password +password_pbkdf2: password_pbkdf2 +play: play +read: read +reboot: reboot +return: normal +rmmod: minicmd +search.file: search_fs_file +search.fs_label: search_label +search.fs_uuid: search_fs_uuid +setparams: normal +shift: normal +source: configfile +terminal_input: terminal +terminal_output: terminal +test: test +testload: testload +time: time +true: true +usb: usbtest +videoinfo: videoinfo +videotest: videotest +write_byte: memrw +write_dword: memrw +write_word: memrw +wrmsr: wrmsr +xnu_devprop_load: xnu +xnu_kernel64: xnu +xnu_kernel: xnu +xnu_kext: xnu +xnu_kextdir: xnu +xnu_mkext: xnu +xnu_ramdisk: xnu +xnu_resume: xnu +xnu_uuid: xnu_uuid +zfs-bootfs: zfsinfo +zfsinfo: zfsinfo diff --git a/files/board/arpl/p1/grub/x86_64-efi/configfile.mod b/files/board/arpl/p1/grub/x86_64-efi/configfile.mod new file mode 100644 index 00000000..ed4806b4 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/configfile.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/core.efi b/files/board/arpl/p1/grub/x86_64-efi/core.efi new file mode 100644 index 00000000..2208f3d0 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/core.efi differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cpio.mod b/files/board/arpl/p1/grub/x86_64-efi/cpio.mod new file mode 100644 index 00000000..f9a7b910 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cpio.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cpio_be.mod b/files/board/arpl/p1/grub/x86_64-efi/cpio_be.mod new file mode 100644 index 00000000..a4e4b987 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cpio_be.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cpuid.mod b/files/board/arpl/p1/grub/x86_64-efi/cpuid.mod new file mode 100644 index 00000000..e81a5a95 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cpuid.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/crc64.mod b/files/board/arpl/p1/grub/x86_64-efi/crc64.mod new file mode 100644 index 00000000..5604646b Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/crc64.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/crypto.lst b/files/board/arpl/p1/grub/x86_64-efi/crypto.lst new file mode 100644 index 00000000..77d9efc0 --- /dev/null +++ b/files/board/arpl/p1/grub/x86_64-efi/crypto.lst @@ -0,0 +1,45 @@ +RIJNDAEL: gcry_rijndael +RIJNDAEL192: gcry_rijndael +RIJNDAEL256: gcry_rijndael +AES128: gcry_rijndael +AES-128: gcry_rijndael +AES-192: gcry_rijndael +AES-256: gcry_rijndael +ADLER32: adler32 +CRC64: crc64 +ARCFOUR: gcry_arcfour +BLOWFISH: gcry_blowfish +CAMELLIA128: gcry_camellia +CAMELLIA192: gcry_camellia +CAMELLIA256: gcry_camellia +CAST5: gcry_cast5 +CRC32: gcry_crc +CRC32RFC1510: gcry_crc +CRC24RFC2440: gcry_crc +DES: gcry_des +3DES: gcry_des +DSA: gcry_dsa +IDEA: gcry_idea +MD4: gcry_md4 +MD5: gcry_md5 +RFC2268_40: gcry_rfc2268 +AES: gcry_rijndael +AES192: gcry_rijndael +AES256: gcry_rijndael +RIPEMD160: gcry_rmd160 +RSA: gcry_rsa +SEED: gcry_seed +SERPENT128: gcry_serpent +SERPENT192: gcry_serpent +SERPENT256: gcry_serpent +SHA1: gcry_sha1 +SHA224: gcry_sha256 +SHA256: gcry_sha256 +SHA512: gcry_sha512 +SHA384: gcry_sha512 +TIGER192: gcry_tiger +TIGER: gcry_tiger +TIGER2: gcry_tiger +TWOFISH: gcry_twofish +TWOFISH128: gcry_twofish +WHIRLPOOL: gcry_whirlpool diff --git a/files/board/arpl/p1/grub/x86_64-efi/crypto.mod b/files/board/arpl/p1/grub/x86_64-efi/crypto.mod new file mode 100644 index 00000000..01ccac17 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/crypto.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cryptodisk.mod b/files/board/arpl/p1/grub/x86_64-efi/cryptodisk.mod new file mode 100644 index 00000000..633d939b Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cryptodisk.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/cs5536.mod b/files/board/arpl/p1/grub/x86_64-efi/cs5536.mod new file mode 100644 index 00000000..3f00c6ec Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/cs5536.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/ctz_test.mod b/files/board/arpl/p1/grub/x86_64-efi/ctz_test.mod new file mode 100644 index 00000000..411ae618 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/ctz_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/date.mod b/files/board/arpl/p1/grub/x86_64-efi/date.mod new file mode 100644 index 00000000..2ead20f9 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/date.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/datehook.mod b/files/board/arpl/p1/grub/x86_64-efi/datehook.mod new file mode 100644 index 00000000..eac38acd Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/datehook.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/datetime.mod b/files/board/arpl/p1/grub/x86_64-efi/datetime.mod new file mode 100644 index 00000000..2283f2c3 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/datetime.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/disk.mod b/files/board/arpl/p1/grub/x86_64-efi/disk.mod new file mode 100644 index 00000000..eaf21a88 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/disk.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/diskfilter.mod b/files/board/arpl/p1/grub/x86_64-efi/diskfilter.mod new file mode 100644 index 00000000..162e5e64 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/diskfilter.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/div.mod b/files/board/arpl/p1/grub/x86_64-efi/div.mod new file mode 100644 index 00000000..cbbff0c3 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/div.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/div_test.mod b/files/board/arpl/p1/grub/x86_64-efi/div_test.mod new file mode 100644 index 00000000..01efbef5 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/div_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/dm_nv.mod b/files/board/arpl/p1/grub/x86_64-efi/dm_nv.mod new file mode 100644 index 00000000..2cf67403 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/dm_nv.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/echo.mod b/files/board/arpl/p1/grub/x86_64-efi/echo.mod new file mode 100644 index 00000000..50229a8f Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/echo.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/efi_gop.mod b/files/board/arpl/p1/grub/x86_64-efi/efi_gop.mod new file mode 100644 index 00000000..54affeae Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/efi_gop.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/efi_uga.mod b/files/board/arpl/p1/grub/x86_64-efi/efi_uga.mod new file mode 100644 index 00000000..9020af59 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/efi_uga.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/efifwsetup.mod b/files/board/arpl/p1/grub/x86_64-efi/efifwsetup.mod new file mode 100644 index 00000000..c6c6492d Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/efifwsetup.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/efinet.mod b/files/board/arpl/p1/grub/x86_64-efi/efinet.mod new file mode 100644 index 00000000..f4729fcc Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/efinet.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/ehci.mod b/files/board/arpl/p1/grub/x86_64-efi/ehci.mod new file mode 100644 index 00000000..4354d1c3 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/ehci.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/elf.mod b/files/board/arpl/p1/grub/x86_64-efi/elf.mod new file mode 100644 index 00000000..366e17d2 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/elf.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/eval.mod b/files/board/arpl/p1/grub/x86_64-efi/eval.mod new file mode 100644 index 00000000..9783ffde Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/eval.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/exfat.mod b/files/board/arpl/p1/grub/x86_64-efi/exfat.mod new file mode 100644 index 00000000..0d33d327 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/exfat.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/exfctest.mod b/files/board/arpl/p1/grub/x86_64-efi/exfctest.mod new file mode 100644 index 00000000..9f82d93a Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/exfctest.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/ext2.mod b/files/board/arpl/p1/grub/x86_64-efi/ext2.mod new file mode 100644 index 00000000..75f0faba Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/ext2.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/extcmd.mod b/files/board/arpl/p1/grub/x86_64-efi/extcmd.mod new file mode 100644 index 00000000..1f585bec Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/extcmd.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/f2fs.mod b/files/board/arpl/p1/grub/x86_64-efi/f2fs.mod new file mode 100644 index 00000000..6773f4c7 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/f2fs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/fat.mod b/files/board/arpl/p1/grub/x86_64-efi/fat.mod new file mode 100644 index 00000000..17ef7e32 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/fat.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/file.mod b/files/board/arpl/p1/grub/x86_64-efi/file.mod new file mode 100644 index 00000000..8470677b Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/file.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/fixvideo.mod b/files/board/arpl/p1/grub/x86_64-efi/fixvideo.mod new file mode 100644 index 00000000..5d812bd0 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/fixvideo.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/font.mod b/files/board/arpl/p1/grub/x86_64-efi/font.mod new file mode 100644 index 00000000..25bb73bc Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/font.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/fs.lst b/files/board/arpl/p1/grub/x86_64-efi/fs.lst new file mode 100644 index 00000000..0acd240b --- /dev/null +++ b/files/board/arpl/p1/grub/x86_64-efi/fs.lst @@ -0,0 +1,37 @@ +affs +afs +bfs +btrfs +cbfs +cpio +cpio_be +exfat +ext2 +f2fs +fat +hfs +hfsplus +iso9660 +jfs +minix +minix2 +minix2_be +minix3 +minix3_be +minix_be +newc +nilfs2 +ntfs +odc +procfs +reiserfs +romfs +sfs +squash4 +tar +udf +ufs1 +ufs1_be +ufs2 +xfs +zfs diff --git a/files/board/arpl/p1/grub/x86_64-efi/fshelp.mod b/files/board/arpl/p1/grub/x86_64-efi/fshelp.mod new file mode 100644 index 00000000..27706346 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/fshelp.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/functional_test.mod b/files/board/arpl/p1/grub/x86_64-efi/functional_test.mod new file mode 100644 index 00000000..cc8a2b75 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/functional_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_arcfour.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_arcfour.mod new file mode 100644 index 00000000..a46c488e Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_arcfour.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_blowfish.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_blowfish.mod new file mode 100644 index 00000000..3d4b11c6 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_blowfish.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_camellia.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_camellia.mod new file mode 100644 index 00000000..3cd46fd0 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_camellia.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_cast5.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_cast5.mod new file mode 100644 index 00000000..c3c8feda Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_cast5.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_crc.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_crc.mod new file mode 100644 index 00000000..4b7ebf18 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_crc.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_des.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_des.mod new file mode 100644 index 00000000..b932601f Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_des.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_dsa.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_dsa.mod new file mode 100644 index 00000000..6842a3c4 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_dsa.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_idea.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_idea.mod new file mode 100644 index 00000000..25859220 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_idea.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_md4.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_md4.mod new file mode 100644 index 00000000..2bca09d6 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_md4.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_md5.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_md5.mod new file mode 100644 index 00000000..6cba6b74 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_md5.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_rfc2268.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_rfc2268.mod new file mode 100644 index 00000000..0d6dd46e Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_rfc2268.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_rijndael.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_rijndael.mod new file mode 100644 index 00000000..3d316644 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_rijndael.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_rmd160.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_rmd160.mod new file mode 100644 index 00000000..9d0b8d32 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_rmd160.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_rsa.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_rsa.mod new file mode 100644 index 00000000..e98967f2 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_rsa.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_seed.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_seed.mod new file mode 100644 index 00000000..fbcd2e1d Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_seed.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_serpent.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_serpent.mod new file mode 100644 index 00000000..2092ce5b Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_serpent.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_sha1.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_sha1.mod new file mode 100644 index 00000000..7670834c Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_sha1.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_sha256.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_sha256.mod new file mode 100644 index 00000000..99f8aaed Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_sha256.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_sha512.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_sha512.mod new file mode 100644 index 00000000..b936760c Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_sha512.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_tiger.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_tiger.mod new file mode 100644 index 00000000..789c3f0a Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_tiger.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_twofish.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_twofish.mod new file mode 100644 index 00000000..fe189182 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_twofish.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gcry_whirlpool.mod b/files/board/arpl/p1/grub/x86_64-efi/gcry_whirlpool.mod new file mode 100644 index 00000000..faa06f47 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gcry_whirlpool.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/geli.mod b/files/board/arpl/p1/grub/x86_64-efi/geli.mod new file mode 100644 index 00000000..16ad78e2 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/geli.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gettext.mod b/files/board/arpl/p1/grub/x86_64-efi/gettext.mod new file mode 100644 index 00000000..59fdb103 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gettext.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gfxmenu.mod b/files/board/arpl/p1/grub/x86_64-efi/gfxmenu.mod new file mode 100644 index 00000000..1af331fb Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gfxmenu.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gfxterm.mod b/files/board/arpl/p1/grub/x86_64-efi/gfxterm.mod new file mode 100644 index 00000000..7171e14b Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gfxterm.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gfxterm_background.mod b/files/board/arpl/p1/grub/x86_64-efi/gfxterm_background.mod new file mode 100644 index 00000000..6bfab3ca Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gfxterm_background.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gfxterm_menu.mod b/files/board/arpl/p1/grub/x86_64-efi/gfxterm_menu.mod new file mode 100644 index 00000000..61280b47 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gfxterm_menu.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gptsync.mod b/files/board/arpl/p1/grub/x86_64-efi/gptsync.mod new file mode 100644 index 00000000..342da46f Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gptsync.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/grub.efi b/files/board/arpl/p1/grub/x86_64-efi/grub.efi new file mode 100644 index 00000000..a17d6582 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/grub.efi differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/gzio.mod b/files/board/arpl/p1/grub/x86_64-efi/gzio.mod new file mode 100644 index 00000000..950d974c Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/gzio.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/halt.mod b/files/board/arpl/p1/grub/x86_64-efi/halt.mod new file mode 100644 index 00000000..3931e891 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/halt.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/hashsum.mod b/files/board/arpl/p1/grub/x86_64-efi/hashsum.mod new file mode 100644 index 00000000..0914145b Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/hashsum.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/hdparm.mod b/files/board/arpl/p1/grub/x86_64-efi/hdparm.mod new file mode 100644 index 00000000..6f1dd577 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/hdparm.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/hello.mod b/files/board/arpl/p1/grub/x86_64-efi/hello.mod new file mode 100644 index 00000000..c1183c44 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/hello.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/help.mod b/files/board/arpl/p1/grub/x86_64-efi/help.mod new file mode 100644 index 00000000..259b560b Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/help.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/hexdump.mod b/files/board/arpl/p1/grub/x86_64-efi/hexdump.mod new file mode 100644 index 00000000..fd1b4a24 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/hexdump.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/hfs.mod b/files/board/arpl/p1/grub/x86_64-efi/hfs.mod new file mode 100644 index 00000000..c1a616f8 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/hfs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/hfsplus.mod b/files/board/arpl/p1/grub/x86_64-efi/hfsplus.mod new file mode 100644 index 00000000..a2924684 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/hfsplus.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/hfspluscomp.mod b/files/board/arpl/p1/grub/x86_64-efi/hfspluscomp.mod new file mode 100644 index 00000000..a76a63d0 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/hfspluscomp.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/http.mod b/files/board/arpl/p1/grub/x86_64-efi/http.mod new file mode 100644 index 00000000..19d5515f Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/http.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/iorw.mod b/files/board/arpl/p1/grub/x86_64-efi/iorw.mod new file mode 100644 index 00000000..2c194eaf Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/iorw.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/iso9660.mod b/files/board/arpl/p1/grub/x86_64-efi/iso9660.mod new file mode 100644 index 00000000..4c3a0489 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/iso9660.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/jfs.mod b/files/board/arpl/p1/grub/x86_64-efi/jfs.mod new file mode 100644 index 00000000..a2707d0a Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/jfs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/jpeg.mod b/files/board/arpl/p1/grub/x86_64-efi/jpeg.mod new file mode 100644 index 00000000..cdeff7df Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/jpeg.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/keylayouts.mod b/files/board/arpl/p1/grub/x86_64-efi/keylayouts.mod new file mode 100644 index 00000000..9edb650e Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/keylayouts.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/keystatus.mod b/files/board/arpl/p1/grub/x86_64-efi/keystatus.mod new file mode 100644 index 00000000..43763e3d Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/keystatus.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/ldm.mod b/files/board/arpl/p1/grub/x86_64-efi/ldm.mod new file mode 100644 index 00000000..cd9034c2 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/ldm.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/legacy_password_test.mod b/files/board/arpl/p1/grub/x86_64-efi/legacy_password_test.mod new file mode 100644 index 00000000..dfe77673 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/legacy_password_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/legacycfg.mod b/files/board/arpl/p1/grub/x86_64-efi/legacycfg.mod new file mode 100644 index 00000000..2488dc8d Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/legacycfg.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/linux.mod b/files/board/arpl/p1/grub/x86_64-efi/linux.mod new file mode 100644 index 00000000..b7bfd3d3 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/linux.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/linux16.mod b/files/board/arpl/p1/grub/x86_64-efi/linux16.mod new file mode 100644 index 00000000..e52bdd8e Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/linux16.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/load.cfg b/files/board/arpl/p1/grub/x86_64-efi/load.cfg new file mode 100644 index 00000000..0de9e31b --- /dev/null +++ b/files/board/arpl/p1/grub/x86_64-efi/load.cfg @@ -0,0 +1,2 @@ +search.fs_uuid 391019b3-a8f7-405b-917c-69da59ccbc23 root hd0,gpt2 +set prefix=($root)'/tmp/grub' diff --git a/files/board/arpl/p1/grub/x86_64-efi/loadbios.mod b/files/board/arpl/p1/grub/x86_64-efi/loadbios.mod new file mode 100644 index 00000000..6b6aa627 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/loadbios.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/loadenv.mod b/files/board/arpl/p1/grub/x86_64-efi/loadenv.mod new file mode 100644 index 00000000..56875968 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/loadenv.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/loopback.mod b/files/board/arpl/p1/grub/x86_64-efi/loopback.mod new file mode 100644 index 00000000..e7ea17f3 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/loopback.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/ls.mod b/files/board/arpl/p1/grub/x86_64-efi/ls.mod new file mode 100644 index 00000000..3548be7d Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/ls.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/lsacpi.mod b/files/board/arpl/p1/grub/x86_64-efi/lsacpi.mod new file mode 100644 index 00000000..f3fb8aad Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/lsacpi.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/lsefi.mod b/files/board/arpl/p1/grub/x86_64-efi/lsefi.mod new file mode 100644 index 00000000..c79172f6 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/lsefi.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/lsefimmap.mod b/files/board/arpl/p1/grub/x86_64-efi/lsefimmap.mod new file mode 100644 index 00000000..52aae667 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/lsefimmap.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/lsefisystab.mod b/files/board/arpl/p1/grub/x86_64-efi/lsefisystab.mod new file mode 100644 index 00000000..254abfd5 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/lsefisystab.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/lsmmap.mod b/files/board/arpl/p1/grub/x86_64-efi/lsmmap.mod new file mode 100644 index 00000000..9ea9c38f Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/lsmmap.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/lspci.mod b/files/board/arpl/p1/grub/x86_64-efi/lspci.mod new file mode 100644 index 00000000..ff3834c0 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/lspci.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/lssal.mod b/files/board/arpl/p1/grub/x86_64-efi/lssal.mod new file mode 100644 index 00000000..6896bc63 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/lssal.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/luks.mod b/files/board/arpl/p1/grub/x86_64-efi/luks.mod new file mode 100644 index 00000000..2fb54919 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/luks.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/lvm.mod b/files/board/arpl/p1/grub/x86_64-efi/lvm.mod new file mode 100644 index 00000000..ce5306c9 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/lvm.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/lzopio.mod b/files/board/arpl/p1/grub/x86_64-efi/lzopio.mod new file mode 100644 index 00000000..6f528e74 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/lzopio.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/macbless.mod b/files/board/arpl/p1/grub/x86_64-efi/macbless.mod new file mode 100644 index 00000000..a53681f4 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/macbless.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/macho.mod b/files/board/arpl/p1/grub/x86_64-efi/macho.mod new file mode 100644 index 00000000..5cc62a44 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/macho.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/mdraid09.mod b/files/board/arpl/p1/grub/x86_64-efi/mdraid09.mod new file mode 100644 index 00000000..892717a6 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/mdraid09.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/mdraid09_be.mod b/files/board/arpl/p1/grub/x86_64-efi/mdraid09_be.mod new file mode 100644 index 00000000..9d73f750 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/mdraid09_be.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/mdraid1x.mod b/files/board/arpl/p1/grub/x86_64-efi/mdraid1x.mod new file mode 100644 index 00000000..b3de711f Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/mdraid1x.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/memdisk.mod b/files/board/arpl/p1/grub/x86_64-efi/memdisk.mod new file mode 100644 index 00000000..9c77b90b Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/memdisk.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/memrw.mod b/files/board/arpl/p1/grub/x86_64-efi/memrw.mod new file mode 100644 index 00000000..51a9ac48 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/memrw.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/minicmd.mod b/files/board/arpl/p1/grub/x86_64-efi/minicmd.mod new file mode 100644 index 00000000..bd7c6c21 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/minicmd.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/minix.mod b/files/board/arpl/p1/grub/x86_64-efi/minix.mod new file mode 100644 index 00000000..b8b1c516 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/minix.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/minix2.mod b/files/board/arpl/p1/grub/x86_64-efi/minix2.mod new file mode 100644 index 00000000..97e19a09 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/minix2.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/minix2_be.mod b/files/board/arpl/p1/grub/x86_64-efi/minix2_be.mod new file mode 100644 index 00000000..ac9da6ac Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/minix2_be.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/minix3.mod b/files/board/arpl/p1/grub/x86_64-efi/minix3.mod new file mode 100644 index 00000000..c2243a50 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/minix3.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/minix3_be.mod b/files/board/arpl/p1/grub/x86_64-efi/minix3_be.mod new file mode 100644 index 00000000..cf2dacd1 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/minix3_be.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/minix_be.mod b/files/board/arpl/p1/grub/x86_64-efi/minix_be.mod new file mode 100644 index 00000000..7e532a1d Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/minix_be.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/mmap.mod b/files/board/arpl/p1/grub/x86_64-efi/mmap.mod new file mode 100644 index 00000000..150898f9 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/mmap.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/moddep.lst b/files/board/arpl/p1/grub/x86_64-efi/moddep.lst new file mode 100644 index 00000000..43043a31 --- /dev/null +++ b/files/board/arpl/p1/grub/x86_64-efi/moddep.lst @@ -0,0 +1,262 @@ +videotest: font video gfxmenu +odc: archelp +loopback: extcmd +macho: +gcry_des: crypto +memrw: extcmd +terminfo: extcmd +f2fs: fshelp +part_gpt: +romfs: fshelp +read: +lsefimmap: +aout: +gcry_arcfour: crypto +tftp: net +newc: archelp +minix2_be: +elf: +videotest_checksum: font functional_test video_fb +password_pbkdf2: crypto gcry_sha512 pbkdf2 normal +gcry_seed: crypto +pcidump: extcmd +bsd: elf serial crypto gcry_md5 extcmd video boot aout cpuid relocator mmap +sfs: fshelp +reiserfs: fshelp +part_sunpc: +zstd: +gfxmenu: video_colors trig bitmap_scale gfxterm font normal video bitmap +backtrace: +jfs: +help: extcmd normal +configfile: normal +cbls: cbtable +gfxterm_menu: font functional_test procfs normal video_fb +gcry_idea: crypto +tr: extcmd +shift_test: functional_test +ohci: cs5536 usb boot +afs: fshelp +spkmodem: terminfo +usb_keyboard: usb keylayouts +xzio: crypto +syslinuxcfg: extcmd normal +search_fs_file: +wrmsr: +usbms: scsi usb +test_blockarg: extcmd normal +true: +affs: fshelp +iso9660: fshelp +exfat: fshelp +setjmp_test: functional_test setjmp +gfxterm: font video +efinet: net +disk: +appleldr: boot +xfs: fshelp +testspeed: extcmd normal +cpio_be: archelp +functional_test: btrfs extcmd video video_fb +bswap_test: functional_test +sleep: extcmd normal +memdisk: +gcry_rijndael: crypto +mdraid09_be: diskfilter +gettext: +gcry_sha1: crypto +hfspluscomp: gzio hfsplus +cmp: +random: hexdump +offsetio: +file: macho elf extcmd offsetio +usbserial_usbdebug: serial usb usbserial_common +video_colors: +morse: +hashsum: crypto extcmd normal +usb: +halt: acpi +gfxterm_background: video_colors bitmap_scale gfxterm extcmd video bitmap +search_fs_uuid: +gcry_dsa: pgp mpi +keystatus: extcmd +linux: video boot relocator mmap +geli: cryptodisk crypto gcry_sha512 pbkdf2 gcry_sha256 +cmdline_cat_test: font functional_test procfs normal video_fb +rdmsr: extcmd +part_sun: +cbtable: +pbkdf2_test: functional_test pbkdf2 gcry_sha1 +video_bochs: video video_fb +bufio: +usbserial_ftdi: serial usb usbserial_common +legacy_password_test: functional_test legacycfg +cpuid: extcmd +hdparm: extcmd hexdump +bfs: fshelp +gcry_blowfish: crypto +test: +nilfs2: fshelp +gcry_rsa: pgp mpi +cryptodisk: crypto extcmd procfs +nativedisk: +minicmd: +signature_test: functional_test procfs +ata: scsi +udf: fshelp +gzio: gcry_crc +xnu_uuid: gcry_md5 +uhci: usb +pata: ata +mul_test: functional_test +adler32: crypto +terminal: +div: +ehci: cs5536 usb boot +crypto: +part_bsd: part_msdos +cs5536: +gcry_sha512: crypto +password: crypto normal +fshelp: +sleep_test: functional_test datetime +iorw: extcmd +xnu: macho bitmap_scale random extcmd video bitmap boot relocator mmap +mmap: +exfctest: functional_test +zfsinfo: zfs +ldm: part_gpt diskfilter part_msdos +eval: normal +part_dvh: +lssal: +ext2: fshelp +blocklist: +net: priority_queue bufio datetime boot +part_acorn: +videoinfo: video +btrfs: zstd lzopio raid6rec gzio +lsmmap: mmap +strtoull_test: functional_test +bitmap: +ntfs: fshelp +multiboot: net linux video boot relocator mmap +tpm: +gcry_crc: crypto +png: bufio bitmap +jpeg: bufio bitmap +macbless: disk +div_test: functional_test div +regexp: extcmd normal +parttool: normal +usbserial_pl2303: serial usb usbserial_common +cpio: archelp +gcry_rmd160: crypto +fat: fshelp +ufs1_be: +archelp: +http: net +zfs: gzio +raid6rec: diskfilter +lsefisystab: +minix2: +lsacpi: extcmd acpi +datehook: datetime normal +loadenv: disk extcmd +bitmap_scale: bitmap +probe: extcmd +minix3: +tar: archelp +loadbios: +hfs: fshelp +procfs: archelp +boot: +keylayouts: +progress: normal +kernel: +usbtest: usb +relocator: mmap +acpi: extcmd mmap +tga: bufio bitmap +reboot: +serial: extcmd terminfo +zfscrypt: crypto pbkdf2 zfs extcmd gcry_sha1 gcry_rijndael +efi_uga: video video_fb +dm_nv: diskfilter +cmp_test: functional_test +luks: cryptodisk crypto pbkdf2 +font: bufio video +raid5rec: diskfilter +crc64: crypto +datetime: +efifwsetup: +ctz_test: functional_test +video: +cbmemc: normal cbtable terminfo +hfsplus: fshelp +gcry_cast5: crypto +extcmd: +squash4: fshelp lzopio xzio gzio +part_plan: +minix_be: +gcry_whirlpool: crypto +gcry_tiger: crypto +fixvideo: +search: search_fs_uuid search_fs_file extcmd search_label +lspci: extcmd +cbtime: cbtable +video_fb: +minix3_be: +trig: +msdospart: disk parttool +priority_queue: +gcry_twofish: crypto +part_dfly: +xnu_uuid_test: functional_test +diskfilter: +testload: +part_apple: +hexdump: extcmd +date: datetime normal +pbkdf2: crypto +gcry_sha256: crypto +ls: extcmd normal +usbserial_common: serial usb +ntfscomp: ntfs +lzopio: crypto +video_cirrus: video video_fb +hello: extcmd +scsi: +linux16: linux video boot relocator mmap +cat: extcmd +ahci: ata boot +pgp: crypto extcmd mpi gcry_sha1 +normal: terminal crypto bufio extcmd boot gettext +ufs1: +mdraid09: diskfilter +lvm: diskfilter +chain: net efinet boot +cbfs: archelp +ufs2: +time: +setpci: extcmd +gptsync: disk +search_label: +setjmp: +multiboot2: net linux video boot relocator mmap acpi +gcry_rfc2268: crypto +mdraid1x: diskfilter +mpi: crypto +legacycfg: password crypto gcry_md5 normal +play: +part_amiga: +efi_gop: video video_fb +minix: +echo: extcmd +lsefi: +gcry_serpent: crypto +gcry_md4: crypto +gcry_md5: crypto +part_msdos: +gcry_camellia: crypto +at_keyboard: keylayouts boot +all_video: efi_gop efi_uga video_bochs video_cirrus diff --git a/files/board/arpl/p1/grub/x86_64-efi/modinfo.sh b/files/board/arpl/p1/grub/x86_64-efi/modinfo.sh new file mode 100644 index 00000000..22f6137d --- /dev/null +++ b/files/board/arpl/p1/grub/x86_64-efi/modinfo.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# User-controllable options +grub_modinfo_target_cpu=x86_64 +grub_modinfo_platform=efi +grub_disk_cache_stats=0 +grub_boot_time_stats=0 +grub_have_font_source=0 + +# Autodetected config +grub_have_asm_uscore=0 +grub_bss_start_symbol="" +grub_end_symbol="" + +# Build environment +grub_target_cc='/home/fabio/dirgit/arpl/buildroot-2022.02.2/output/host/bin/x86_64-buildroot-linux-gnu-gcc' +grub_target_cc_version='x86_64-buildroot-linux-gnu-gcc.br_real (Buildroot -gf34edcf) 10.3.0' +grub_target_cflags='-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Os -g0 -D_FORTIFY_SOURCE=1 -Os -Wall -W -Wshadow -Wpointer-arith -Wundef -Wchar-subscripts -Wcomment -Wdeprecated-declarations -Wdisabled-optimization -Wdiv-by-zero -Wfloat-equal -Wformat-extra-args -Wformat-security -Wformat-y2k -Wimplicit -Wimplicit-function-declaration -Wimplicit-int -Wmain -Wmissing-braces -Wmissing-format-attribute -Wmultichar -Wparentheses -Wreturn-type -Wsequence-point -Wshadow -Wsign-compare -Wswitch -Wtrigraphs -Wunknown-pragmas -Wunused -Wunused-function -Wunused-label -Wunused-parameter -Wunused-value -Wunused-variable -Wwrite-strings -Wnested-externs -Wstrict-prototypes -g -Wredundant-decls -Wmissing-prototypes -Wmissing-declarations -Wextra -Wattributes -Wendif-labels -Winit-self -Wint-to-pointer-cast -Winvalid-pch -Wmissing-field-initializers -Wnonnull -Woverflow -Wvla -Wpointer-to-int-cast -Wstrict-aliasing -Wvariadic-macros -Wvolatile-register-var -Wpointer-sign -Wmissing-include-dirs -Wmissing-prototypes -Wmissing-declarations -Wformat=2 -m64 -freg-struct-return -mno-mmx -mno-sse -mno-sse2 -mno-sse3 -mno-3dnow -msoft-float -fno-dwarf2-cfi-asm -mno-stack-arg-probe -fno-asynchronous-unwind-tables -fno-unwind-tables -mcmodel=large -mno-red-zone -fno-PIE -fno-pie -fno-stack-protector -Wtrampolines' +grub_target_cppflags='-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Os -fno-stack-protector -Wall -W -DGRUB_MACHINE_EFI=1 -DGRUB_MACHINE=X86_64_EFI -m64 -nostdinc -isystem /home/fabio/dirgit/arpl/buildroot-2022.02.2/output/host/lib/gcc/x86_64-buildroot-linux-gnu/10.3.0/include -I$(top_srcdir)/include -I$(top_builddir)/include' +grub_target_ccasflags=' -g -m64 -msoft-float' +grub_target_ldflags=' -Os -m64 -Wl,-melf_x86_64 -no-pie -Wl,--build-id=none' +grub_cflags='-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Os -g0 -D_FORTIFY_SOURCE=1 -Os' +grub_cppflags='-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Os -fno-stack-protector -D_FILE_OFFSET_BITS=64' +grub_ccasflags='-D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -Os -g0 -D_FORTIFY_SOURCE=1 -Os' +grub_ldflags='' +grub_target_strip='/home/fabio/dirgit/arpl/buildroot-2022.02.2/output/host/bin/x86_64-buildroot-linux-gnu-strip' +grub_target_nm='/home/fabio/dirgit/arpl/buildroot-2022.02.2/output/host/bin/x86_64-buildroot-linux-gnu-gcc-nm' +grub_target_ranlib='ranlib' +grub_target_objconf='' +grub_target_obj2elf='' +grub_target_img_base_ldopt='-Wl,-Ttext' +grub_target_img_ldflags='@TARGET_IMG_BASE_LDFLAGS@' + +# Version +grub_version="2.04" +grub_package="grub" +grub_package_string="GRUB 2.04" +grub_package_version="2.04" +grub_package_name="GRUB" +grub_package_bugreport="bug-grub@gnu.org" diff --git a/files/board/arpl/p1/grub/x86_64-efi/morse.mod b/files/board/arpl/p1/grub/x86_64-efi/morse.mod new file mode 100644 index 00000000..5f1e6858 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/morse.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/mpi.mod b/files/board/arpl/p1/grub/x86_64-efi/mpi.mod new file mode 100644 index 00000000..114209d1 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/mpi.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/msdospart.mod b/files/board/arpl/p1/grub/x86_64-efi/msdospart.mod new file mode 100644 index 00000000..6e46c439 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/msdospart.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/mul_test.mod b/files/board/arpl/p1/grub/x86_64-efi/mul_test.mod new file mode 100644 index 00000000..c4c6d2f8 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/mul_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/multiboot.mod b/files/board/arpl/p1/grub/x86_64-efi/multiboot.mod new file mode 100644 index 00000000..6e7a561e Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/multiboot.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/multiboot2.mod b/files/board/arpl/p1/grub/x86_64-efi/multiboot2.mod new file mode 100644 index 00000000..7a9023c7 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/multiboot2.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/nativedisk.mod b/files/board/arpl/p1/grub/x86_64-efi/nativedisk.mod new file mode 100644 index 00000000..47ad3746 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/nativedisk.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/net.mod b/files/board/arpl/p1/grub/x86_64-efi/net.mod new file mode 100644 index 00000000..b799dfd9 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/net.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/newc.mod b/files/board/arpl/p1/grub/x86_64-efi/newc.mod new file mode 100644 index 00000000..04d6396e Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/newc.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/nilfs2.mod b/files/board/arpl/p1/grub/x86_64-efi/nilfs2.mod new file mode 100644 index 00000000..a34ba904 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/nilfs2.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/normal.mod b/files/board/arpl/p1/grub/x86_64-efi/normal.mod new file mode 100644 index 00000000..6b882a2f Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/normal.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/ntfs.mod b/files/board/arpl/p1/grub/x86_64-efi/ntfs.mod new file mode 100644 index 00000000..64b553ff Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/ntfs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/ntfscomp.mod b/files/board/arpl/p1/grub/x86_64-efi/ntfscomp.mod new file mode 100644 index 00000000..6361f55c Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/ntfscomp.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/odc.mod b/files/board/arpl/p1/grub/x86_64-efi/odc.mod new file mode 100644 index 00000000..c34ee9f7 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/odc.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/offsetio.mod b/files/board/arpl/p1/grub/x86_64-efi/offsetio.mod new file mode 100644 index 00000000..bc57ecd8 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/offsetio.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/ohci.mod b/files/board/arpl/p1/grub/x86_64-efi/ohci.mod new file mode 100644 index 00000000..5fca5985 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/ohci.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/part_acorn.mod b/files/board/arpl/p1/grub/x86_64-efi/part_acorn.mod new file mode 100644 index 00000000..42d25f55 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/part_acorn.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/part_amiga.mod b/files/board/arpl/p1/grub/x86_64-efi/part_amiga.mod new file mode 100644 index 00000000..1e10fff2 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/part_amiga.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/part_apple.mod b/files/board/arpl/p1/grub/x86_64-efi/part_apple.mod new file mode 100644 index 00000000..a644f515 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/part_apple.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/part_bsd.mod b/files/board/arpl/p1/grub/x86_64-efi/part_bsd.mod new file mode 100644 index 00000000..719f3818 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/part_bsd.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/part_dfly.mod b/files/board/arpl/p1/grub/x86_64-efi/part_dfly.mod new file mode 100644 index 00000000..4dfc7a4f Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/part_dfly.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/part_dvh.mod b/files/board/arpl/p1/grub/x86_64-efi/part_dvh.mod new file mode 100644 index 00000000..9ced39a6 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/part_dvh.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/part_gpt.mod b/files/board/arpl/p1/grub/x86_64-efi/part_gpt.mod new file mode 100644 index 00000000..d9a568fb Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/part_gpt.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/part_msdos.mod b/files/board/arpl/p1/grub/x86_64-efi/part_msdos.mod new file mode 100644 index 00000000..e3535ce7 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/part_msdos.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/part_plan.mod b/files/board/arpl/p1/grub/x86_64-efi/part_plan.mod new file mode 100644 index 00000000..d1ffb7b3 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/part_plan.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/part_sun.mod b/files/board/arpl/p1/grub/x86_64-efi/part_sun.mod new file mode 100644 index 00000000..c736f5f9 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/part_sun.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/part_sunpc.mod b/files/board/arpl/p1/grub/x86_64-efi/part_sunpc.mod new file mode 100644 index 00000000..32d51bdc Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/part_sunpc.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/partmap.lst b/files/board/arpl/p1/grub/x86_64-efi/partmap.lst new file mode 100644 index 00000000..761233aa --- /dev/null +++ b/files/board/arpl/p1/grub/x86_64-efi/partmap.lst @@ -0,0 +1,11 @@ +part_acorn +part_amiga +part_apple +part_bsd +part_dfly +part_dvh +part_gpt +part_msdos +part_plan +part_sun +part_sunpc diff --git a/files/board/arpl/p1/grub/x86_64-efi/parttool.lst b/files/board/arpl/p1/grub/x86_64-efi/parttool.lst new file mode 100644 index 00000000..68b4b5c4 --- /dev/null +++ b/files/board/arpl/p1/grub/x86_64-efi/parttool.lst @@ -0,0 +1 @@ +msdos: msdospart diff --git a/files/board/arpl/p1/grub/x86_64-efi/parttool.mod b/files/board/arpl/p1/grub/x86_64-efi/parttool.mod new file mode 100644 index 00000000..f8ec300a Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/parttool.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/password.mod b/files/board/arpl/p1/grub/x86_64-efi/password.mod new file mode 100644 index 00000000..71d471f5 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/password.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/password_pbkdf2.mod b/files/board/arpl/p1/grub/x86_64-efi/password_pbkdf2.mod new file mode 100644 index 00000000..cd622bc4 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/password_pbkdf2.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/pata.mod b/files/board/arpl/p1/grub/x86_64-efi/pata.mod new file mode 100644 index 00000000..25701b75 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/pata.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/pbkdf2.mod b/files/board/arpl/p1/grub/x86_64-efi/pbkdf2.mod new file mode 100644 index 00000000..4bde9924 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/pbkdf2.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/pbkdf2_test.mod b/files/board/arpl/p1/grub/x86_64-efi/pbkdf2_test.mod new file mode 100644 index 00000000..036b450d Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/pbkdf2_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/pcidump.mod b/files/board/arpl/p1/grub/x86_64-efi/pcidump.mod new file mode 100644 index 00000000..5b4acb67 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/pcidump.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/pgp.mod b/files/board/arpl/p1/grub/x86_64-efi/pgp.mod new file mode 100644 index 00000000..962a2979 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/pgp.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/play.mod b/files/board/arpl/p1/grub/x86_64-efi/play.mod new file mode 100644 index 00000000..2873a8aa Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/play.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/png.mod b/files/board/arpl/p1/grub/x86_64-efi/png.mod new file mode 100644 index 00000000..ad8ffbc3 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/png.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/priority_queue.mod b/files/board/arpl/p1/grub/x86_64-efi/priority_queue.mod new file mode 100644 index 00000000..b87bcb6a Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/priority_queue.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/probe.mod b/files/board/arpl/p1/grub/x86_64-efi/probe.mod new file mode 100644 index 00000000..6ed23f56 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/probe.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/procfs.mod b/files/board/arpl/p1/grub/x86_64-efi/procfs.mod new file mode 100644 index 00000000..40994edf Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/procfs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/progress.mod b/files/board/arpl/p1/grub/x86_64-efi/progress.mod new file mode 100644 index 00000000..13eec163 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/progress.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/raid5rec.mod b/files/board/arpl/p1/grub/x86_64-efi/raid5rec.mod new file mode 100644 index 00000000..1c0bc7a4 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/raid5rec.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/raid6rec.mod b/files/board/arpl/p1/grub/x86_64-efi/raid6rec.mod new file mode 100644 index 00000000..3c31a68f Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/raid6rec.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/random.mod b/files/board/arpl/p1/grub/x86_64-efi/random.mod new file mode 100644 index 00000000..f9160b6e Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/random.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/rdmsr.mod b/files/board/arpl/p1/grub/x86_64-efi/rdmsr.mod new file mode 100644 index 00000000..5d13b03e Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/rdmsr.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/read.mod b/files/board/arpl/p1/grub/x86_64-efi/read.mod new file mode 100644 index 00000000..1aa9781e Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/read.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/reboot.mod b/files/board/arpl/p1/grub/x86_64-efi/reboot.mod new file mode 100644 index 00000000..5250e53a Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/reboot.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/regexp.mod b/files/board/arpl/p1/grub/x86_64-efi/regexp.mod new file mode 100644 index 00000000..cd01b31c Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/regexp.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/reiserfs.mod b/files/board/arpl/p1/grub/x86_64-efi/reiserfs.mod new file mode 100644 index 00000000..28e31536 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/reiserfs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/relocator.mod b/files/board/arpl/p1/grub/x86_64-efi/relocator.mod new file mode 100644 index 00000000..8a07fe67 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/relocator.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/romfs.mod b/files/board/arpl/p1/grub/x86_64-efi/romfs.mod new file mode 100644 index 00000000..4bb7f806 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/romfs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/scsi.mod b/files/board/arpl/p1/grub/x86_64-efi/scsi.mod new file mode 100644 index 00000000..9d57f852 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/scsi.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/search.mod b/files/board/arpl/p1/grub/x86_64-efi/search.mod new file mode 100644 index 00000000..8e695eb6 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/search.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/search_fs_file.mod b/files/board/arpl/p1/grub/x86_64-efi/search_fs_file.mod new file mode 100644 index 00000000..7f17f9d1 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/search_fs_file.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/search_fs_uuid.mod b/files/board/arpl/p1/grub/x86_64-efi/search_fs_uuid.mod new file mode 100644 index 00000000..1777afa0 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/search_fs_uuid.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/search_label.mod b/files/board/arpl/p1/grub/x86_64-efi/search_label.mod new file mode 100644 index 00000000..5116b07c Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/search_label.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/serial.mod b/files/board/arpl/p1/grub/x86_64-efi/serial.mod new file mode 100644 index 00000000..4a958ae1 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/serial.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/setjmp.mod b/files/board/arpl/p1/grub/x86_64-efi/setjmp.mod new file mode 100644 index 00000000..681ec8d9 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/setjmp.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/setjmp_test.mod b/files/board/arpl/p1/grub/x86_64-efi/setjmp_test.mod new file mode 100644 index 00000000..46ddfaf7 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/setjmp_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/setpci.mod b/files/board/arpl/p1/grub/x86_64-efi/setpci.mod new file mode 100644 index 00000000..a0f1abbe Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/setpci.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/sfs.mod b/files/board/arpl/p1/grub/x86_64-efi/sfs.mod new file mode 100644 index 00000000..c900986a Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/sfs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/shift_test.mod b/files/board/arpl/p1/grub/x86_64-efi/shift_test.mod new file mode 100644 index 00000000..27037cb3 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/shift_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/signature_test.mod b/files/board/arpl/p1/grub/x86_64-efi/signature_test.mod new file mode 100644 index 00000000..4b604e70 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/signature_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/sleep.mod b/files/board/arpl/p1/grub/x86_64-efi/sleep.mod new file mode 100644 index 00000000..5500a077 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/sleep.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/sleep_test.mod b/files/board/arpl/p1/grub/x86_64-efi/sleep_test.mod new file mode 100644 index 00000000..779fedbd Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/sleep_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/spkmodem.mod b/files/board/arpl/p1/grub/x86_64-efi/spkmodem.mod new file mode 100644 index 00000000..81d01fcb Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/spkmodem.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/squash4.mod b/files/board/arpl/p1/grub/x86_64-efi/squash4.mod new file mode 100644 index 00000000..ad6639ae Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/squash4.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/strtoull_test.mod b/files/board/arpl/p1/grub/x86_64-efi/strtoull_test.mod new file mode 100644 index 00000000..b91751c7 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/strtoull_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/syslinuxcfg.mod b/files/board/arpl/p1/grub/x86_64-efi/syslinuxcfg.mod new file mode 100644 index 00000000..3c090987 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/syslinuxcfg.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/tar.mod b/files/board/arpl/p1/grub/x86_64-efi/tar.mod new file mode 100644 index 00000000..a34ba677 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/tar.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/terminal.lst b/files/board/arpl/p1/grub/x86_64-efi/terminal.lst new file mode 100644 index 00000000..3c9a5a34 --- /dev/null +++ b/files/board/arpl/p1/grub/x86_64-efi/terminal.lst @@ -0,0 +1,9 @@ +iat_keyboard: at_keyboard +iserial: serial +iserial_*: serial +oaudio: morse +ocbmemc: cbmemc +ogfxterm: gfxterm +oserial: serial +oserial_*: serial +ospkmodem: spkmodem diff --git a/files/board/arpl/p1/grub/x86_64-efi/terminal.mod b/files/board/arpl/p1/grub/x86_64-efi/terminal.mod new file mode 100644 index 00000000..231b1769 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/terminal.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/terminfo.mod b/files/board/arpl/p1/grub/x86_64-efi/terminfo.mod new file mode 100644 index 00000000..ba314586 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/terminfo.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/test.mod b/files/board/arpl/p1/grub/x86_64-efi/test.mod new file mode 100644 index 00000000..4ee54991 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/test_blockarg.mod b/files/board/arpl/p1/grub/x86_64-efi/test_blockarg.mod new file mode 100644 index 00000000..ac2f6f0b Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/test_blockarg.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/testload.mod b/files/board/arpl/p1/grub/x86_64-efi/testload.mod new file mode 100644 index 00000000..bfb6d464 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/testload.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/testspeed.mod b/files/board/arpl/p1/grub/x86_64-efi/testspeed.mod new file mode 100644 index 00000000..c36dfb27 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/testspeed.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/tftp.mod b/files/board/arpl/p1/grub/x86_64-efi/tftp.mod new file mode 100644 index 00000000..e8b708fe Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/tftp.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/tga.mod b/files/board/arpl/p1/grub/x86_64-efi/tga.mod new file mode 100644 index 00000000..622bd60a Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/tga.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/time.mod b/files/board/arpl/p1/grub/x86_64-efi/time.mod new file mode 100644 index 00000000..64ac4e4c Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/time.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/tpm.mod b/files/board/arpl/p1/grub/x86_64-efi/tpm.mod new file mode 100644 index 00000000..defb3770 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/tpm.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/tr.mod b/files/board/arpl/p1/grub/x86_64-efi/tr.mod new file mode 100644 index 00000000..10f1e550 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/tr.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/trig.mod b/files/board/arpl/p1/grub/x86_64-efi/trig.mod new file mode 100644 index 00000000..74a0464f Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/trig.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/true.mod b/files/board/arpl/p1/grub/x86_64-efi/true.mod new file mode 100644 index 00000000..99bd5704 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/true.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/udf.mod b/files/board/arpl/p1/grub/x86_64-efi/udf.mod new file mode 100644 index 00000000..bb2e59aa Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/udf.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/ufs1.mod b/files/board/arpl/p1/grub/x86_64-efi/ufs1.mod new file mode 100644 index 00000000..e35d8521 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/ufs1.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/ufs1_be.mod b/files/board/arpl/p1/grub/x86_64-efi/ufs1_be.mod new file mode 100644 index 00000000..49620db6 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/ufs1_be.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/ufs2.mod b/files/board/arpl/p1/grub/x86_64-efi/ufs2.mod new file mode 100644 index 00000000..98d8ca29 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/ufs2.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/uhci.mod b/files/board/arpl/p1/grub/x86_64-efi/uhci.mod new file mode 100644 index 00000000..7342e68f Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/uhci.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/usb.mod b/files/board/arpl/p1/grub/x86_64-efi/usb.mod new file mode 100644 index 00000000..a9eda33b Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/usb.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/usb_keyboard.mod b/files/board/arpl/p1/grub/x86_64-efi/usb_keyboard.mod new file mode 100644 index 00000000..34ffe382 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/usb_keyboard.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/usbms.mod b/files/board/arpl/p1/grub/x86_64-efi/usbms.mod new file mode 100644 index 00000000..798d5a28 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/usbms.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/usbserial_common.mod b/files/board/arpl/p1/grub/x86_64-efi/usbserial_common.mod new file mode 100644 index 00000000..2a74ae7d Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/usbserial_common.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/usbserial_ftdi.mod b/files/board/arpl/p1/grub/x86_64-efi/usbserial_ftdi.mod new file mode 100644 index 00000000..2dbd5d8f Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/usbserial_ftdi.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/usbserial_pl2303.mod b/files/board/arpl/p1/grub/x86_64-efi/usbserial_pl2303.mod new file mode 100644 index 00000000..6f29066d Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/usbserial_pl2303.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/usbserial_usbdebug.mod b/files/board/arpl/p1/grub/x86_64-efi/usbserial_usbdebug.mod new file mode 100644 index 00000000..78528a2c Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/usbserial_usbdebug.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/usbtest.mod b/files/board/arpl/p1/grub/x86_64-efi/usbtest.mod new file mode 100644 index 00000000..449d316c Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/usbtest.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/video.lst b/files/board/arpl/p1/grub/x86_64-efi/video.lst new file mode 100644 index 00000000..ae9ba23e --- /dev/null +++ b/files/board/arpl/p1/grub/x86_64-efi/video.lst @@ -0,0 +1,4 @@ +efi_gop +efi_uga +video_bochs +video_cirrus diff --git a/files/board/arpl/p1/grub/x86_64-efi/video.mod b/files/board/arpl/p1/grub/x86_64-efi/video.mod new file mode 100644 index 00000000..76f22c13 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/video.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/video_bochs.mod b/files/board/arpl/p1/grub/x86_64-efi/video_bochs.mod new file mode 100644 index 00000000..1bbb3c33 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/video_bochs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/video_cirrus.mod b/files/board/arpl/p1/grub/x86_64-efi/video_cirrus.mod new file mode 100644 index 00000000..8cdf6533 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/video_cirrus.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/video_colors.mod b/files/board/arpl/p1/grub/x86_64-efi/video_colors.mod new file mode 100644 index 00000000..c6cfe046 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/video_colors.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/video_fb.mod b/files/board/arpl/p1/grub/x86_64-efi/video_fb.mod new file mode 100644 index 00000000..f0163c62 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/video_fb.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/videoinfo.mod b/files/board/arpl/p1/grub/x86_64-efi/videoinfo.mod new file mode 100644 index 00000000..5f1ceb72 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/videoinfo.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/videotest.mod b/files/board/arpl/p1/grub/x86_64-efi/videotest.mod new file mode 100644 index 00000000..969177d9 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/videotest.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/videotest_checksum.mod b/files/board/arpl/p1/grub/x86_64-efi/videotest_checksum.mod new file mode 100644 index 00000000..3a72e4d8 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/videotest_checksum.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/wrmsr.mod b/files/board/arpl/p1/grub/x86_64-efi/wrmsr.mod new file mode 100644 index 00000000..fc8b0267 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/wrmsr.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/xfs.mod b/files/board/arpl/p1/grub/x86_64-efi/xfs.mod new file mode 100644 index 00000000..e489650f Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/xfs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/xnu.mod b/files/board/arpl/p1/grub/x86_64-efi/xnu.mod new file mode 100644 index 00000000..441dc9c4 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/xnu.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/xnu_uuid.mod b/files/board/arpl/p1/grub/x86_64-efi/xnu_uuid.mod new file mode 100644 index 00000000..f7f6cbba Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/xnu_uuid.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/xnu_uuid_test.mod b/files/board/arpl/p1/grub/x86_64-efi/xnu_uuid_test.mod new file mode 100644 index 00000000..0c40ba16 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/xnu_uuid_test.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/xzio.mod b/files/board/arpl/p1/grub/x86_64-efi/xzio.mod new file mode 100644 index 00000000..e2509d56 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/xzio.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/zfs.mod b/files/board/arpl/p1/grub/x86_64-efi/zfs.mod new file mode 100644 index 00000000..0a13f69e Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/zfs.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/zfscrypt.mod b/files/board/arpl/p1/grub/x86_64-efi/zfscrypt.mod new file mode 100644 index 00000000..b3d46dd9 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/zfscrypt.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/zfsinfo.mod b/files/board/arpl/p1/grub/x86_64-efi/zfsinfo.mod new file mode 100644 index 00000000..ad1129c5 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/zfsinfo.mod differ diff --git a/files/board/arpl/p1/grub/x86_64-efi/zstd.mod b/files/board/arpl/p1/grub/x86_64-efi/zstd.mod new file mode 100644 index 00000000..fc251098 Binary files /dev/null and b/files/board/arpl/p1/grub/x86_64-efi/zstd.mod differ diff --git a/files/board/arpl/p3/addons/.gitkeep b/files/board/arpl/p3/addons/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/files/board/arpl/p3/addons/9p/apollolake-4.4.180.tgz b/files/board/arpl/p3/addons/9p/apollolake-4.4.180.tgz new file mode 100644 index 00000000..300c00e4 Binary files /dev/null and b/files/board/arpl/p3/addons/9p/apollolake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/9p/broadwell-4.4.180.tgz b/files/board/arpl/p3/addons/9p/broadwell-4.4.180.tgz new file mode 100644 index 00000000..e4bb3f84 Binary files /dev/null and b/files/board/arpl/p3/addons/9p/broadwell-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/9p/broadwellnk-4.4.180.tgz b/files/board/arpl/p3/addons/9p/broadwellnk-4.4.180.tgz new file mode 100644 index 00000000..3b775883 Binary files /dev/null and b/files/board/arpl/p3/addons/9p/broadwellnk-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/9p/bromolow-3.10.108.tgz b/files/board/arpl/p3/addons/9p/bromolow-3.10.108.tgz new file mode 100644 index 00000000..24d94b12 Binary files /dev/null and b/files/board/arpl/p3/addons/9p/bromolow-3.10.108.tgz differ diff --git a/files/board/arpl/p3/addons/9p/denverton-4.4.180.tgz b/files/board/arpl/p3/addons/9p/denverton-4.4.180.tgz new file mode 100644 index 00000000..a7f168c8 Binary files /dev/null and b/files/board/arpl/p3/addons/9p/denverton-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/9p/geminilake-4.4.180.tgz b/files/board/arpl/p3/addons/9p/geminilake-4.4.180.tgz new file mode 100644 index 00000000..8b7b4de6 Binary files /dev/null and b/files/board/arpl/p3/addons/9p/geminilake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/9p/manifest.yml b/files/board/arpl/p3/addons/9p/manifest.yml new file mode 100644 index 00000000..cd4c5807 --- /dev/null +++ b/files/board/arpl/p3/addons/9p/manifest.yml @@ -0,0 +1,28 @@ +version: 1 +name: 9p +description: "Driver for Plan 9 Resource Sharing Support (9P2000)" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/files/board/arpl/p3/addons/9p/purley-4.4.180.tgz b/files/board/arpl/p3/addons/9p/purley-4.4.180.tgz new file mode 100644 index 00000000..9ac5de2d Binary files /dev/null and b/files/board/arpl/p3/addons/9p/purley-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/9p/v1000-4.4.180.tgz b/files/board/arpl/p3/addons/9p/v1000-4.4.180.tgz new file mode 100644 index 00000000..06a6566c Binary files /dev/null and b/files/board/arpl/p3/addons/9p/v1000-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/acpid/apollolake-4.4.180.tgz b/files/board/arpl/p3/addons/acpid/apollolake-4.4.180.tgz new file mode 100644 index 00000000..26cc3ee0 Binary files /dev/null and b/files/board/arpl/p3/addons/acpid/apollolake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/acpid/broadwell-4.4.180.tgz b/files/board/arpl/p3/addons/acpid/broadwell-4.4.180.tgz new file mode 100644 index 00000000..e1691895 Binary files /dev/null and b/files/board/arpl/p3/addons/acpid/broadwell-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/acpid/broadwellnk-4.4.180.tgz b/files/board/arpl/p3/addons/acpid/broadwellnk-4.4.180.tgz new file mode 100644 index 00000000..accb81c2 Binary files /dev/null and b/files/board/arpl/p3/addons/acpid/broadwellnk-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/acpid/bromolow-3.10.108.tgz b/files/board/arpl/p3/addons/acpid/bromolow-3.10.108.tgz new file mode 100644 index 00000000..613e670c Binary files /dev/null and b/files/board/arpl/p3/addons/acpid/bromolow-3.10.108.tgz differ diff --git a/files/board/arpl/p3/addons/acpid/denverton-4.4.180.tgz b/files/board/arpl/p3/addons/acpid/denverton-4.4.180.tgz new file mode 100644 index 00000000..cfc8d9ba Binary files /dev/null and b/files/board/arpl/p3/addons/acpid/denverton-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/acpid/geminilake-4.4.180.tgz b/files/board/arpl/p3/addons/acpid/geminilake-4.4.180.tgz new file mode 100644 index 00000000..4ff01489 Binary files /dev/null and b/files/board/arpl/p3/addons/acpid/geminilake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/acpid/manifest.yml b/files/board/arpl/p3/addons/acpid/manifest.yml new file mode 100644 index 00000000..e22a99a1 --- /dev/null +++ b/files/board/arpl/p3/addons/acpid/manifest.yml @@ -0,0 +1,36 @@ +version: 1 +name: acpid +description: "Flexible and extensible daemon for delivering ACPI events" +available-for: + bromolow-3.10.108: + install-script: "install.sh" + copy: "all" + modules: true + apollolake-4.4.180: + install-script: "install.sh" + copy: "all" + modules: true + broadwell-4.4.180: + install-script: "install.sh" + copy: "all" + modules: true + broadwellnk-4.4.180: + install-script: "install.sh" + copy: "all" + modules: true + denverton-4.4.180: + install-script: "install.sh" + copy: "all" + modules: true + geminilake-4.4.180: + install-script: "install.sh" + copy: "all" + modules: true + v1000-4.4.180: + install-script: "install.sh" + copy: "all" + modules: true + purley-4.4.180: + install-script: "install.sh" + copy: "all" + modules: false diff --git a/files/board/arpl/p3/addons/acpid/purley-4.4.180.tgz b/files/board/arpl/p3/addons/acpid/purley-4.4.180.tgz new file mode 100644 index 00000000..89df72a7 Binary files /dev/null and b/files/board/arpl/p3/addons/acpid/purley-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/acpid/v1000-4.4.180.tgz b/files/board/arpl/p3/addons/acpid/v1000-4.4.180.tgz new file mode 100644 index 00000000..7ad69203 Binary files /dev/null and b/files/board/arpl/p3/addons/acpid/v1000-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1c/apollolake-4.4.180.tgz b/files/board/arpl/p3/addons/atl1c/apollolake-4.4.180.tgz new file mode 100644 index 00000000..a0c55b1c Binary files /dev/null and b/files/board/arpl/p3/addons/atl1c/apollolake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1c/broadwell-4.4.180.tgz b/files/board/arpl/p3/addons/atl1c/broadwell-4.4.180.tgz new file mode 100644 index 00000000..98271a24 Binary files /dev/null and b/files/board/arpl/p3/addons/atl1c/broadwell-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1c/broadwellnk-4.4.180.tgz b/files/board/arpl/p3/addons/atl1c/broadwellnk-4.4.180.tgz new file mode 100644 index 00000000..808c065d Binary files /dev/null and b/files/board/arpl/p3/addons/atl1c/broadwellnk-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1c/bromolow-3.10.108.tgz b/files/board/arpl/p3/addons/atl1c/bromolow-3.10.108.tgz new file mode 100644 index 00000000..be850fae Binary files /dev/null and b/files/board/arpl/p3/addons/atl1c/bromolow-3.10.108.tgz differ diff --git a/files/board/arpl/p3/addons/atl1c/denverton-4.4.180.tgz b/files/board/arpl/p3/addons/atl1c/denverton-4.4.180.tgz new file mode 100644 index 00000000..4a26b529 Binary files /dev/null and b/files/board/arpl/p3/addons/atl1c/denverton-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1c/geminilake-4.4.180.tgz b/files/board/arpl/p3/addons/atl1c/geminilake-4.4.180.tgz new file mode 100644 index 00000000..3011f37f Binary files /dev/null and b/files/board/arpl/p3/addons/atl1c/geminilake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1c/manifest.yml b/files/board/arpl/p3/addons/atl1c/manifest.yml new file mode 100644 index 00000000..d2a309c2 --- /dev/null +++ b/files/board/arpl/p3/addons/atl1c/manifest.yml @@ -0,0 +1,28 @@ +version: 1 +name: atl1c +description: "Driver for Atheros L1C Gigabit Ethernet adapters" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/files/board/arpl/p3/addons/atl1c/purley-4.4.180.tgz b/files/board/arpl/p3/addons/atl1c/purley-4.4.180.tgz new file mode 100644 index 00000000..b11af2c3 Binary files /dev/null and b/files/board/arpl/p3/addons/atl1c/purley-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1c/v1000-4.4.180.tgz b/files/board/arpl/p3/addons/atl1c/v1000-4.4.180.tgz new file mode 100644 index 00000000..4afd7857 Binary files /dev/null and b/files/board/arpl/p3/addons/atl1c/v1000-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1e/apollolake-4.4.180.tgz b/files/board/arpl/p3/addons/atl1e/apollolake-4.4.180.tgz new file mode 100644 index 00000000..f1ad4386 Binary files /dev/null and b/files/board/arpl/p3/addons/atl1e/apollolake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1e/broadwell-4.4.180.tgz b/files/board/arpl/p3/addons/atl1e/broadwell-4.4.180.tgz new file mode 100644 index 00000000..58621852 Binary files /dev/null and b/files/board/arpl/p3/addons/atl1e/broadwell-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1e/broadwellnk-4.4.180.tgz b/files/board/arpl/p3/addons/atl1e/broadwellnk-4.4.180.tgz new file mode 100644 index 00000000..cb4bc815 Binary files /dev/null and b/files/board/arpl/p3/addons/atl1e/broadwellnk-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1e/bromolow-3.10.108.tgz b/files/board/arpl/p3/addons/atl1e/bromolow-3.10.108.tgz new file mode 100644 index 00000000..87e5a48c Binary files /dev/null and b/files/board/arpl/p3/addons/atl1e/bromolow-3.10.108.tgz differ diff --git a/files/board/arpl/p3/addons/atl1e/denverton-4.4.180.tgz b/files/board/arpl/p3/addons/atl1e/denverton-4.4.180.tgz new file mode 100644 index 00000000..35e656bb Binary files /dev/null and b/files/board/arpl/p3/addons/atl1e/denverton-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1e/geminilake-4.4.180.tgz b/files/board/arpl/p3/addons/atl1e/geminilake-4.4.180.tgz new file mode 100644 index 00000000..616f76a2 Binary files /dev/null and b/files/board/arpl/p3/addons/atl1e/geminilake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1e/manifest.yml b/files/board/arpl/p3/addons/atl1e/manifest.yml new file mode 100644 index 00000000..77a403b3 --- /dev/null +++ b/files/board/arpl/p3/addons/atl1e/manifest.yml @@ -0,0 +1,28 @@ +version: 1 +name: atl1e +description: "Driver for Atheros L1E Gigabit Ethernet adapters" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/files/board/arpl/p3/addons/atl1e/purley-4.4.180.tgz b/files/board/arpl/p3/addons/atl1e/purley-4.4.180.tgz new file mode 100644 index 00000000..fbb1ad2d Binary files /dev/null and b/files/board/arpl/p3/addons/atl1e/purley-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/atl1e/v1000-4.4.180.tgz b/files/board/arpl/p3/addons/atl1e/v1000-4.4.180.tgz new file mode 100644 index 00000000..f559d1f2 Binary files /dev/null and b/files/board/arpl/p3/addons/atl1e/v1000-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000/apollolake-4.4.180.tgz b/files/board/arpl/p3/addons/e1000/apollolake-4.4.180.tgz new file mode 100644 index 00000000..39f360ea Binary files /dev/null and b/files/board/arpl/p3/addons/e1000/apollolake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000/broadwell-4.4.180.tgz b/files/board/arpl/p3/addons/e1000/broadwell-4.4.180.tgz new file mode 100644 index 00000000..c467799a Binary files /dev/null and b/files/board/arpl/p3/addons/e1000/broadwell-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000/broadwellnk-4.4.180.tgz b/files/board/arpl/p3/addons/e1000/broadwellnk-4.4.180.tgz new file mode 100644 index 00000000..0c229c3b Binary files /dev/null and b/files/board/arpl/p3/addons/e1000/broadwellnk-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000/bromolow-3.10.108.tgz b/files/board/arpl/p3/addons/e1000/bromolow-3.10.108.tgz new file mode 100644 index 00000000..c52cc4b9 Binary files /dev/null and b/files/board/arpl/p3/addons/e1000/bromolow-3.10.108.tgz differ diff --git a/files/board/arpl/p3/addons/e1000/denverton-4.4.180.tgz b/files/board/arpl/p3/addons/e1000/denverton-4.4.180.tgz new file mode 100644 index 00000000..f21abc7f Binary files /dev/null and b/files/board/arpl/p3/addons/e1000/denverton-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000/geminilake-4.4.180.tgz b/files/board/arpl/p3/addons/e1000/geminilake-4.4.180.tgz new file mode 100644 index 00000000..3cd2ddfe Binary files /dev/null and b/files/board/arpl/p3/addons/e1000/geminilake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000/manifest.yml b/files/board/arpl/p3/addons/e1000/manifest.yml new file mode 100644 index 00000000..017e1f50 --- /dev/null +++ b/files/board/arpl/p3/addons/e1000/manifest.yml @@ -0,0 +1,29 @@ +version: 1 +name: e1000 +description: "Driver for Intel(R) PRO/1000 Gigabit Ethernet adapters" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true + diff --git a/files/board/arpl/p3/addons/e1000/purley-4.4.180.tgz b/files/board/arpl/p3/addons/e1000/purley-4.4.180.tgz new file mode 100644 index 00000000..56f5668d Binary files /dev/null and b/files/board/arpl/p3/addons/e1000/purley-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000/v1000-4.4.180.tgz b/files/board/arpl/p3/addons/e1000/v1000-4.4.180.tgz new file mode 100644 index 00000000..4f99ff52 Binary files /dev/null and b/files/board/arpl/p3/addons/e1000/v1000-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000e/apollolake-4.4.180.tgz b/files/board/arpl/p3/addons/e1000e/apollolake-4.4.180.tgz new file mode 100644 index 00000000..05bc0b25 Binary files /dev/null and b/files/board/arpl/p3/addons/e1000e/apollolake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000e/broadwell-4.4.180.tgz b/files/board/arpl/p3/addons/e1000e/broadwell-4.4.180.tgz new file mode 100644 index 00000000..484f198b Binary files /dev/null and b/files/board/arpl/p3/addons/e1000e/broadwell-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000e/broadwellnk-4.4.180.tgz b/files/board/arpl/p3/addons/e1000e/broadwellnk-4.4.180.tgz new file mode 100644 index 00000000..20c3ac0b Binary files /dev/null and b/files/board/arpl/p3/addons/e1000e/broadwellnk-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000e/bromolow-3.10.108.tgz b/files/board/arpl/p3/addons/e1000e/bromolow-3.10.108.tgz new file mode 100644 index 00000000..fe010ca4 Binary files /dev/null and b/files/board/arpl/p3/addons/e1000e/bromolow-3.10.108.tgz differ diff --git a/files/board/arpl/p3/addons/e1000e/denverton-4.4.180.tgz b/files/board/arpl/p3/addons/e1000e/denverton-4.4.180.tgz new file mode 100644 index 00000000..02eb442d Binary files /dev/null and b/files/board/arpl/p3/addons/e1000e/denverton-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000e/geminilake-4.4.180.tgz b/files/board/arpl/p3/addons/e1000e/geminilake-4.4.180.tgz new file mode 100644 index 00000000..9e2f484f Binary files /dev/null and b/files/board/arpl/p3/addons/e1000e/geminilake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000e/manifest.yml b/files/board/arpl/p3/addons/e1000e/manifest.yml new file mode 100644 index 00000000..fe6ad631 --- /dev/null +++ b/files/board/arpl/p3/addons/e1000e/manifest.yml @@ -0,0 +1,29 @@ +version: 1 +name: e1000e +description: "Driver for Intel(R) PRO/1000 Gigabit Ethernet PCI-e adapters" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true + diff --git a/files/board/arpl/p3/addons/e1000e/purley-4.4.180.tgz b/files/board/arpl/p3/addons/e1000e/purley-4.4.180.tgz new file mode 100644 index 00000000..4c6f2c39 Binary files /dev/null and b/files/board/arpl/p3/addons/e1000e/purley-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/e1000e/v1000-4.4.180.tgz b/files/board/arpl/p3/addons/e1000e/v1000-4.4.180.tgz new file mode 100644 index 00000000..17398c04 Binary files /dev/null and b/files/board/arpl/p3/addons/e1000e/v1000-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/ehci-pci/broadwell-4.4.180.tgz b/files/board/arpl/p3/addons/ehci-pci/broadwell-4.4.180.tgz new file mode 100644 index 00000000..0d977488 Binary files /dev/null and b/files/board/arpl/p3/addons/ehci-pci/broadwell-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/ehci-pci/manifest.yml b/files/board/arpl/p3/addons/ehci-pci/manifest.yml new file mode 100644 index 00000000..dd29813d --- /dev/null +++ b/files/board/arpl/p3/addons/ehci-pci/manifest.yml @@ -0,0 +1,7 @@ +version: 1 +name: ehci-pci +description: "Driver for USB 2.0 Host Controller" +available-for: + broadwell-4.4.180: + install-script: &script "install.sh" + modules: true diff --git a/files/board/arpl/p3/addons/mpt3sas/apollolake-4.4.180.tgz b/files/board/arpl/p3/addons/mpt3sas/apollolake-4.4.180.tgz new file mode 100644 index 00000000..aaeb31dc Binary files /dev/null and b/files/board/arpl/p3/addons/mpt3sas/apollolake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/mpt3sas/broadwell-4.4.180.tgz b/files/board/arpl/p3/addons/mpt3sas/broadwell-4.4.180.tgz new file mode 100644 index 00000000..a3fc0fab Binary files /dev/null and b/files/board/arpl/p3/addons/mpt3sas/broadwell-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/mpt3sas/broadwellnk-4.4.180.tgz b/files/board/arpl/p3/addons/mpt3sas/broadwellnk-4.4.180.tgz new file mode 100644 index 00000000..65ad33cb Binary files /dev/null and b/files/board/arpl/p3/addons/mpt3sas/broadwellnk-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/mpt3sas/bromolow-3.10.108.tgz b/files/board/arpl/p3/addons/mpt3sas/bromolow-3.10.108.tgz new file mode 100644 index 00000000..39cf7064 Binary files /dev/null and b/files/board/arpl/p3/addons/mpt3sas/bromolow-3.10.108.tgz differ diff --git a/files/board/arpl/p3/addons/mpt3sas/denverton-4.4.180.tgz b/files/board/arpl/p3/addons/mpt3sas/denverton-4.4.180.tgz new file mode 100644 index 00000000..e7b5df25 Binary files /dev/null and b/files/board/arpl/p3/addons/mpt3sas/denverton-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/mpt3sas/geminilake-4.4.180.tgz b/files/board/arpl/p3/addons/mpt3sas/geminilake-4.4.180.tgz new file mode 100644 index 00000000..acb78337 Binary files /dev/null and b/files/board/arpl/p3/addons/mpt3sas/geminilake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/mpt3sas/manifest.yml b/files/board/arpl/p3/addons/mpt3sas/manifest.yml new file mode 100644 index 00000000..5fae0962 --- /dev/null +++ b/files/board/arpl/p3/addons/mpt3sas/manifest.yml @@ -0,0 +1,29 @@ +version: 1 +name: mpt3sas +description: "Driver for LSI MPT Fusion SAS 3.0 Device" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true + diff --git a/files/board/arpl/p3/addons/mpt3sas/purley-4.4.180.tgz b/files/board/arpl/p3/addons/mpt3sas/purley-4.4.180.tgz new file mode 100644 index 00000000..749c7733 Binary files /dev/null and b/files/board/arpl/p3/addons/mpt3sas/purley-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/mpt3sas/v1000-4.4.180.tgz b/files/board/arpl/p3/addons/mpt3sas/v1000-4.4.180.tgz new file mode 100644 index 00000000..b0adbdd1 Binary files /dev/null and b/files/board/arpl/p3/addons/mpt3sas/v1000-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/qjs-dtb/all.tgz b/files/board/arpl/p3/addons/qjs-dtb/all.tgz new file mode 100644 index 00000000..fb6e3d49 Binary files /dev/null and b/files/board/arpl/p3/addons/qjs-dtb/all.tgz differ diff --git a/files/board/arpl/p3/addons/qjs-dtb/manifest.yml b/files/board/arpl/p3/addons/qjs-dtb/manifest.yml new file mode 100644 index 00000000..7fc1726b --- /dev/null +++ b/files/board/arpl/p3/addons/qjs-dtb/manifest.yml @@ -0,0 +1,16 @@ +version: 1 +name: qjs-dtb +system: true +description: "Jumkey's script to change model.dtb dynamically" +all: + install-script: "install.sh" + copy: "all" +available-for: + bromolow-3.10.108: + apollolake-4.4.180: + broadwell-4.4.180: + broadwellnk-4.4.180: + denverton-4.4.180: + geminilake-4.4.180: + v1000-4.4.180: + purley-4.4.180: diff --git a/files/board/arpl/p3/addons/r8168/apollolake-4.4.180.tgz b/files/board/arpl/p3/addons/r8168/apollolake-4.4.180.tgz new file mode 100644 index 00000000..dc9a1c0b Binary files /dev/null and b/files/board/arpl/p3/addons/r8168/apollolake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/r8168/broadwell-4.4.180.tgz b/files/board/arpl/p3/addons/r8168/broadwell-4.4.180.tgz new file mode 100644 index 00000000..3ef75d50 Binary files /dev/null and b/files/board/arpl/p3/addons/r8168/broadwell-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/r8168/broadwellnk-4.4.180.tgz b/files/board/arpl/p3/addons/r8168/broadwellnk-4.4.180.tgz new file mode 100644 index 00000000..430641e9 Binary files /dev/null and b/files/board/arpl/p3/addons/r8168/broadwellnk-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/r8168/bromolow-3.10.108.tgz b/files/board/arpl/p3/addons/r8168/bromolow-3.10.108.tgz new file mode 100644 index 00000000..d54dc8eb Binary files /dev/null and b/files/board/arpl/p3/addons/r8168/bromolow-3.10.108.tgz differ diff --git a/files/board/arpl/p3/addons/r8168/denverton-4.4.180.tgz b/files/board/arpl/p3/addons/r8168/denverton-4.4.180.tgz new file mode 100644 index 00000000..511ac8a7 Binary files /dev/null and b/files/board/arpl/p3/addons/r8168/denverton-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/r8168/geminilake-4.4.180.tgz b/files/board/arpl/p3/addons/r8168/geminilake-4.4.180.tgz new file mode 100644 index 00000000..a8f8a072 Binary files /dev/null and b/files/board/arpl/p3/addons/r8168/geminilake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/r8168/manifest.yml b/files/board/arpl/p3/addons/r8168/manifest.yml new file mode 100644 index 00000000..53e5d9bf --- /dev/null +++ b/files/board/arpl/p3/addons/r8168/manifest.yml @@ -0,0 +1,30 @@ +version: 1 +name: r8168 +description: "Driver for Realtek R8168/8111 Ethernet adapters" +conflits: + - r8169 +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/files/board/arpl/p3/addons/r8168/purley-4.4.180.tgz b/files/board/arpl/p3/addons/r8168/purley-4.4.180.tgz new file mode 100644 index 00000000..a6697eeb Binary files /dev/null and b/files/board/arpl/p3/addons/r8168/purley-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/r8168/v1000-4.4.180.tgz b/files/board/arpl/p3/addons/r8168/v1000-4.4.180.tgz new file mode 100644 index 00000000..9d905034 Binary files /dev/null and b/files/board/arpl/p3/addons/r8168/v1000-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/r8169/apollolake-4.4.180.tgz b/files/board/arpl/p3/addons/r8169/apollolake-4.4.180.tgz new file mode 100644 index 00000000..389f2783 Binary files /dev/null and b/files/board/arpl/p3/addons/r8169/apollolake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/r8169/broadwell-4.4.180.tgz b/files/board/arpl/p3/addons/r8169/broadwell-4.4.180.tgz new file mode 100644 index 00000000..5abf2a89 Binary files /dev/null and b/files/board/arpl/p3/addons/r8169/broadwell-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/r8169/broadwellnk-4.4.180.tgz b/files/board/arpl/p3/addons/r8169/broadwellnk-4.4.180.tgz new file mode 100644 index 00000000..082ea922 Binary files /dev/null and b/files/board/arpl/p3/addons/r8169/broadwellnk-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/r8169/bromolow-3.10.108.tgz b/files/board/arpl/p3/addons/r8169/bromolow-3.10.108.tgz new file mode 100644 index 00000000..3fd97663 Binary files /dev/null and b/files/board/arpl/p3/addons/r8169/bromolow-3.10.108.tgz differ diff --git a/files/board/arpl/p3/addons/r8169/denverton-4.4.180.tgz b/files/board/arpl/p3/addons/r8169/denverton-4.4.180.tgz new file mode 100644 index 00000000..f58465d5 Binary files /dev/null and b/files/board/arpl/p3/addons/r8169/denverton-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/r8169/geminilake-4.4.180.tgz b/files/board/arpl/p3/addons/r8169/geminilake-4.4.180.tgz new file mode 100644 index 00000000..4e60cbd6 Binary files /dev/null and b/files/board/arpl/p3/addons/r8169/geminilake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/r8169/manifest.yml b/files/board/arpl/p3/addons/r8169/manifest.yml new file mode 100644 index 00000000..d09dc879 --- /dev/null +++ b/files/board/arpl/p3/addons/r8169/manifest.yml @@ -0,0 +1,30 @@ +version: 1 +name: r8169 +description: "Driver for Realtek R8169 Ethernet adapters" +conflits: + - r8168 +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/files/board/arpl/p3/addons/r8169/purley-4.4.180.tgz b/files/board/arpl/p3/addons/r8169/purley-4.4.180.tgz new file mode 100644 index 00000000..4cd14716 Binary files /dev/null and b/files/board/arpl/p3/addons/r8169/purley-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/r8169/v1000-4.4.180.tgz b/files/board/arpl/p3/addons/r8169/v1000-4.4.180.tgz new file mode 100644 index 00000000..42576d13 Binary files /dev/null and b/files/board/arpl/p3/addons/r8169/v1000-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/skge/apollolake-4.4.180.tgz b/files/board/arpl/p3/addons/skge/apollolake-4.4.180.tgz new file mode 100644 index 00000000..d7eccd60 Binary files /dev/null and b/files/board/arpl/p3/addons/skge/apollolake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/skge/broadwell-4.4.180.tgz b/files/board/arpl/p3/addons/skge/broadwell-4.4.180.tgz new file mode 100644 index 00000000..934d1eab Binary files /dev/null and b/files/board/arpl/p3/addons/skge/broadwell-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/skge/broadwellnk-4.4.180.tgz b/files/board/arpl/p3/addons/skge/broadwellnk-4.4.180.tgz new file mode 100644 index 00000000..caaee5db Binary files /dev/null and b/files/board/arpl/p3/addons/skge/broadwellnk-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/skge/bromolow-3.10.108.tgz b/files/board/arpl/p3/addons/skge/bromolow-3.10.108.tgz new file mode 100644 index 00000000..d291af6b Binary files /dev/null and b/files/board/arpl/p3/addons/skge/bromolow-3.10.108.tgz differ diff --git a/files/board/arpl/p3/addons/skge/denverton-4.4.180.tgz b/files/board/arpl/p3/addons/skge/denverton-4.4.180.tgz new file mode 100644 index 00000000..9483f3c5 Binary files /dev/null and b/files/board/arpl/p3/addons/skge/denverton-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/skge/geminilake-4.4.180.tgz b/files/board/arpl/p3/addons/skge/geminilake-4.4.180.tgz new file mode 100644 index 00000000..599ce7e5 Binary files /dev/null and b/files/board/arpl/p3/addons/skge/geminilake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/skge/manifest.yml b/files/board/arpl/p3/addons/skge/manifest.yml new file mode 100644 index 00000000..1f11e717 --- /dev/null +++ b/files/board/arpl/p3/addons/skge/manifest.yml @@ -0,0 +1,28 @@ +version: 1 +name: skge +description: "Driver for Marvell Yukon Gigabit Ethernet adapters" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/files/board/arpl/p3/addons/skge/purley-4.4.180.tgz b/files/board/arpl/p3/addons/skge/purley-4.4.180.tgz new file mode 100644 index 00000000..6c59cda7 Binary files /dev/null and b/files/board/arpl/p3/addons/skge/purley-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/skge/v1000-4.4.180.tgz b/files/board/arpl/p3/addons/skge/v1000-4.4.180.tgz new file mode 100644 index 00000000..70cf1418 Binary files /dev/null and b/files/board/arpl/p3/addons/skge/v1000-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/virtio/apollolake-4.4.180.tgz b/files/board/arpl/p3/addons/virtio/apollolake-4.4.180.tgz new file mode 100644 index 00000000..4de4fb0a Binary files /dev/null and b/files/board/arpl/p3/addons/virtio/apollolake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/virtio/broadwell-4.4.180.tgz b/files/board/arpl/p3/addons/virtio/broadwell-4.4.180.tgz new file mode 100644 index 00000000..7e62aaf6 Binary files /dev/null and b/files/board/arpl/p3/addons/virtio/broadwell-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/virtio/broadwellnk-4.4.180.tgz b/files/board/arpl/p3/addons/virtio/broadwellnk-4.4.180.tgz new file mode 100644 index 00000000..63154550 Binary files /dev/null and b/files/board/arpl/p3/addons/virtio/broadwellnk-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/virtio/bromolow-3.10.108.tgz b/files/board/arpl/p3/addons/virtio/bromolow-3.10.108.tgz new file mode 100644 index 00000000..aeae5ac0 Binary files /dev/null and b/files/board/arpl/p3/addons/virtio/bromolow-3.10.108.tgz differ diff --git a/files/board/arpl/p3/addons/virtio/denverton-4.4.180.tgz b/files/board/arpl/p3/addons/virtio/denverton-4.4.180.tgz new file mode 100644 index 00000000..38b696fc Binary files /dev/null and b/files/board/arpl/p3/addons/virtio/denverton-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/virtio/geminilake-4.4.180.tgz b/files/board/arpl/p3/addons/virtio/geminilake-4.4.180.tgz new file mode 100644 index 00000000..9420e656 Binary files /dev/null and b/files/board/arpl/p3/addons/virtio/geminilake-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/virtio/manifest.yml b/files/board/arpl/p3/addons/virtio/manifest.yml new file mode 100644 index 00000000..f67a8123 --- /dev/null +++ b/files/board/arpl/p3/addons/virtio/manifest.yml @@ -0,0 +1,28 @@ +version: 1 +name: virtio +description: "Drivers for QEMU/Virtualbox hypervisor" +available-for: + bromolow-3.10.108: + install-script: &script "install.sh" + modules: true + apollolake-4.4.180: + install-script: *script + modules: true + broadwell-4.4.180: + install-script: *script + modules: true + broadwellnk-4.4.180: + install-script: *script + modules: true + denverton-4.4.180: + install-script: *script + modules: true + geminilake-4.4.180: + install-script: *script + modules: true + v1000-4.4.180: + install-script: *script + modules: true + purley-4.4.180: + install-script: *script + modules: true diff --git a/files/board/arpl/p3/addons/virtio/purley-4.4.180.tgz b/files/board/arpl/p3/addons/virtio/purley-4.4.180.tgz new file mode 100644 index 00000000..c502a383 Binary files /dev/null and b/files/board/arpl/p3/addons/virtio/purley-4.4.180.tgz differ diff --git a/files/board/arpl/p3/addons/virtio/v1000-4.4.180.tgz b/files/board/arpl/p3/addons/virtio/v1000-4.4.180.tgz new file mode 100644 index 00000000..fdbf3f8c Binary files /dev/null and b/files/board/arpl/p3/addons/virtio/v1000-4.4.180.tgz differ diff --git a/files/board/arpl/p3/lkms/rp-apollolake-4.4.180-dev.ko b/files/board/arpl/p3/lkms/rp-apollolake-4.4.180-dev.ko new file mode 100644 index 00000000..c3a99fa5 Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-apollolake-4.4.180-dev.ko differ diff --git a/files/board/arpl/p3/lkms/rp-apollolake-4.4.180-prod.ko b/files/board/arpl/p3/lkms/rp-apollolake-4.4.180-prod.ko new file mode 100644 index 00000000..d1a15ed3 Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-apollolake-4.4.180-prod.ko differ diff --git a/files/board/arpl/p3/lkms/rp-broadwell-4.4.180-dev.ko b/files/board/arpl/p3/lkms/rp-broadwell-4.4.180-dev.ko new file mode 100644 index 00000000..ad6de4e5 Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-broadwell-4.4.180-dev.ko differ diff --git a/files/board/arpl/p3/lkms/rp-broadwell-4.4.180-prod.ko b/files/board/arpl/p3/lkms/rp-broadwell-4.4.180-prod.ko new file mode 100644 index 00000000..85570813 Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-broadwell-4.4.180-prod.ko differ diff --git a/files/board/arpl/p3/lkms/rp-broadwellnk-4.4.180-dev.ko b/files/board/arpl/p3/lkms/rp-broadwellnk-4.4.180-dev.ko new file mode 100644 index 00000000..47d05af2 Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-broadwellnk-4.4.180-dev.ko differ diff --git a/files/board/arpl/p3/lkms/rp-broadwellnk-4.4.180-prod.ko b/files/board/arpl/p3/lkms/rp-broadwellnk-4.4.180-prod.ko new file mode 100644 index 00000000..fcd261e5 Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-broadwellnk-4.4.180-prod.ko differ diff --git a/files/board/arpl/p3/lkms/rp-bromolow-3.10.108-dev.ko b/files/board/arpl/p3/lkms/rp-bromolow-3.10.108-dev.ko new file mode 100644 index 00000000..5204d14c Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-bromolow-3.10.108-dev.ko differ diff --git a/files/board/arpl/p3/lkms/rp-bromolow-3.10.108-prod.ko b/files/board/arpl/p3/lkms/rp-bromolow-3.10.108-prod.ko new file mode 100644 index 00000000..466b808a Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-bromolow-3.10.108-prod.ko differ diff --git a/files/board/arpl/p3/lkms/rp-denverton-4.4.180-dev.ko b/files/board/arpl/p3/lkms/rp-denverton-4.4.180-dev.ko new file mode 100644 index 00000000..5ac76762 Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-denverton-4.4.180-dev.ko differ diff --git a/files/board/arpl/p3/lkms/rp-denverton-4.4.180-prod.ko b/files/board/arpl/p3/lkms/rp-denverton-4.4.180-prod.ko new file mode 100644 index 00000000..039b1f49 Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-denverton-4.4.180-prod.ko differ diff --git a/files/board/arpl/p3/lkms/rp-geminilake-4.4.180-dev.ko b/files/board/arpl/p3/lkms/rp-geminilake-4.4.180-dev.ko new file mode 100644 index 00000000..80ac7ac0 Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-geminilake-4.4.180-dev.ko differ diff --git a/files/board/arpl/p3/lkms/rp-geminilake-4.4.180-prod.ko b/files/board/arpl/p3/lkms/rp-geminilake-4.4.180-prod.ko new file mode 100644 index 00000000..d40f3748 Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-geminilake-4.4.180-prod.ko differ diff --git a/files/board/arpl/p3/lkms/rp-purley-4.4.180-dev.ko b/files/board/arpl/p3/lkms/rp-purley-4.4.180-dev.ko new file mode 100644 index 00000000..633928d7 Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-purley-4.4.180-dev.ko differ diff --git a/files/board/arpl/p3/lkms/rp-purley-4.4.180-prod.ko b/files/board/arpl/p3/lkms/rp-purley-4.4.180-prod.ko new file mode 100644 index 00000000..7050307e Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-purley-4.4.180-prod.ko differ diff --git a/files/board/arpl/p3/lkms/rp-v1000-4.4.180-dev.ko b/files/board/arpl/p3/lkms/rp-v1000-4.4.180-dev.ko new file mode 100644 index 00000000..ec956204 Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-v1000-4.4.180-dev.ko differ diff --git a/files/board/arpl/p3/lkms/rp-v1000-4.4.180-prod.ko b/files/board/arpl/p3/lkms/rp-v1000-4.4.180-prod.ko new file mode 100644 index 00000000..f6ed2987 Binary files /dev/null and b/files/board/arpl/p3/lkms/rp-v1000-4.4.180-prod.ko differ diff --git a/files/board/arpl/post-build.sh b/files/board/arpl/post-build.sh new file mode 100755 index 00000000..b319f3e5 --- /dev/null +++ b/files/board/arpl/post-build.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +# CONFIG_DIR = . +# $1 = Target path = ./output/target +# BR2_DL_DIR = ./dl +# BINARIES_DIR = ./output/images +# BUILD_DIR = ./output/build +# BASE_DIR = ./output + +set -e + +# Fix DHCPCD client id +sed -i 's|#clientid|clientid|' "${1}/etc/dhcpcd.conf" +sed -i 's|duid|#duid|' "${1}/etc/dhcpcd.conf" diff --git a/files/configs/arpl_defconfig b/files/configs/arpl_defconfig new file mode 100644 index 00000000..aa0b0a29 --- /dev/null +++ b/files/configs/arpl_defconfig @@ -0,0 +1,62 @@ +BR2_x86_64=y +BR2_TOOLCHAIN_BUILDROOT_GLIBC=y +BR2_TOOLCHAIN_BUILDROOT_CXX=y +BR2_TARGET_GENERIC_HOSTNAME="arpl" +BR2_TARGET_GENERIC_ISSUE="Automated RedPill loader" +BR2_ROOTFS_DEVICE_CREATION_DYNAMIC_EUDEV=y +BR2_ROOTFS_MERGED_USR=y +BR2_TARGET_GENERIC_ROOT_PASSWD="Redp1lL-1s-4weSomE" +BR2_SYSTEM_BIN_SH_BASH=y +BR2_TARGET_TZ_INFO=y +BR2_ROOTFS_OVERLAY="board/arpl/overlayfs" +BR2_ROOTFS_POST_BUILD_SCRIPT="board/arpl/post-build.sh" +BR2_ROOTFS_POST_IMAGE_SCRIPT="board/arpl/make-img.sh" +BR2_LINUX_KERNEL=y +BR2_LINUX_KERNEL_USE_CUSTOM_CONFIG=y +BR2_LINUX_KERNEL_CUSTOM_CONFIG_FILE="board/arpl/kernel_defconfig" +BR2_LINUX_KERNEL_XZ=y +BR2_PACKAGE_BUSYBOX_CONFIG="board/arpl/busybox_defconfig" +BR2_PACKAGE_BUSYBOX_SHOW_OTHERS=y +BR2_PACKAGE_BZIP2=y +BR2_PACKAGE_XZ=y +BR2_PACKAGE_KEXEC=y +BR2_PACKAGE_KEXEC_ZLIB=y +BR2_PACKAGE_BINUTILS=y +BR2_PACKAGE_BINUTILS_TARGET=y +BR2_PACKAGE_CPIO=y +BR2_PACKAGE_DOSFSTOOLS=y +BR2_PACKAGE_DOSFSTOOLS_FSCK_FAT=y +BR2_PACKAGE_E2FSPROGS=y +BR2_PACKAGE_E2FSPROGS_RESIZE2FS=y +BR2_PACKAGE_ACPID=y +BR2_PACKAGE_KBD=y +BR2_PACKAGE_LSHW=y +BR2_PACKAGE_LSSCSI=y +BR2_PACKAGE_MDADM=y +BR2_PACKAGE_NVME=y +BR2_PACKAGE_PCIUTILS=y +BR2_PACKAGE_USBUTILS=y +BR2_PACKAGE_PHP=y +BR2_PACKAGE_PHP_SAPI_CLI=y +BR2_PACKAGE_DTC=y +BR2_PACKAGE_DTC_PROGRAMS=y +BR2_PACKAGE_LIBCURL=y +BR2_PACKAGE_LIBCURL_CURL=y +BR2_PACKAGE_DHCPCD=y +BR2_PACKAGE_OPENSSH=y +# BR2_PACKAGE_OPENSSH_CLIENT is not set +BR2_PACKAGE_BASH_COMPLETION=y +BR2_PACKAGE_DIALOG=y +BR2_PACKAGE_TTYD=y +BR2_PACKAGE_XXHASH=y +BR2_PACKAGE_ACL=y +BR2_PACKAGE_COREUTILS=y +BR2_PACKAGE_COREUTILS_INDIVIDUAL_BINARIES=y +BR2_PACKAGE_UTIL_LINUX_BINARIES=y +BR2_PACKAGE_UTIL_LINUX_AGETTY=y +BR2_PACKAGE_NANO=y +BR2_TARGET_ROOTFS_CPIO=y +BR2_TARGET_ROOTFS_CPIO_XZ=y +# BR2_TARGET_ROOTFS_TAR is not set +BR2_PACKAGE_HOST_DOSFSTOOLS=y +BR2_PACKAGE_HOST_E2FSPROGS=y diff --git a/img-gen.sh b/img-gen.sh new file mode 100755 index 00000000..bc5578c3 --- /dev/null +++ b/img-gen.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash + +set -e + +BR_VER="buildroot-2022.02.2" +if [ ! -d "${BR_VER}" ]; then + echo "Downloading buildroot" + curl -LO "https://buildroot.org/downloads/${BR_VER}.tar.gz" + echo "Extracting buildroot" + tar xf "${BR_VER}.tar.gz" + rm "${BR_VER}.tar.gz" +fi +# Remove old files +rm -rf "${BR_VER}/output/target/opt/arpl" +rm -rf "${BR_VER}/board/arpl/overlayfs" +rm -rf "${BR_VER}/board/arpl/p1" +rm -rf "${BR_VER}/board/arpl/p3" +echo "Copying files" +cp -Ru files/* "${BR_VER}/" + +cd "${BR_VER}" +echo "Generating default config" +make arpl_defconfig +echo "Building... Drink a coffee and wait!" +make +cd - +rm -f arpl.img.zip +zip -9 arpl.img.zip arpl.img